aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorToni Uhlig <matzeton@googlemail.com>2023-07-16 02:03:33 +0200
committerToni Uhlig <matzeton@googlemail.com>2023-07-16 02:03:33 +0200
commit5a40295c4cf0af5ea8da9ced04a4ce7d3621a080 (patch)
treecb21506e7b04d10b45d6066a0ee1655563d5d52b
Squashed 'flatcc/' content from commit 473da2a
git-subtree-dir: flatcc git-subtree-split: 473da2afa5ca435363f8c5e6569167aee6bc31c5
-rw-r--r--.github/workflows/ci.yml60
-rw-r--r--.github/workflows/weekly.yml191
-rw-r--r--.gitignore8
-rw-r--r--.travis.yml29
-rw-r--r--CHANGELOG.md562
-rw-r--r--CMakeLists.txt363
-rw-r--r--CONTRIBUTING.md17
-rw-r--r--LICENSE202
-rw-r--r--NOTICE22
-rw-r--r--README.md2620
-rw-r--r--appveyor.yml38
-rw-r--r--config/config.h477
-rw-r--r--doc/Grammar-2015-07-23.md42
-rw-r--r--doc/Grammar.README.md2
-rw-r--r--doc/benchmarks.md147
-rw-r--r--doc/binary-format.md1378
-rw-r--r--doc/builder.md1845
-rw-r--r--doc/eclectic.fbs12
-rw-r--r--doc/flatcc-help.md106
-rw-r--r--doc/json_parser_design.md138
-rw-r--r--doc/security.md272
-rw-r--r--external/grisu3/.gitignore1
-rw-r--r--external/grisu3/LICENSE14
-rw-r--r--external/grisu3/README.md9
-rw-r--r--external/grisu3/grisu3_math.h329
-rw-r--r--external/grisu3/grisu3_parse.h582
-rw-r--r--external/grisu3/grisu3_print.h265
-rw-r--r--external/grisu3/grisu3_test.c141
-rw-r--r--external/grisu3/grisu3_test_dblcnv.c482
-rwxr-xr-xexternal/grisu3/test.sh18
-rwxr-xr-xexternal/grisu3/test_dblcnv.sh15
-rw-r--r--external/hash/.gitignore1
-rw-r--r--external/hash/CMakeLists.txt38
-rw-r--r--external/hash/LICENSE28
-rw-r--r--external/hash/PMurHash.c334
-rw-r--r--external/hash/PMurHash.h64
-rw-r--r--external/hash/README.md158
-rw-r--r--external/hash/cmetrohash.h78
-rw-r--r--external/hash/cmetrohash64.c185
-rw-r--r--external/hash/hash.h115
-rw-r--r--external/hash/hash_table.h266
-rw-r--r--external/hash/hash_table_def.h154
-rw-r--r--external/hash/hash_table_impl.h233
-rw-r--r--external/hash/hash_table_impl_rh.h360
-rw-r--r--external/hash/hash_test.c419
-rw-r--r--external/hash/ht32.c47
-rw-r--r--external/hash/ht32.h36
-rw-r--r--external/hash/ht32rh.c47
-rw-r--r--external/hash/ht32rh.h36
-rw-r--r--external/hash/ht64.c47
-rw-r--r--external/hash/ht64.h36
-rw-r--r--external/hash/ht64rh.c47
-rw-r--r--external/hash/ht64rh.h36
-rw-r--r--external/hash/ht_hash_function.h258
-rw-r--r--external/hash/ht_portable.h9
-rw-r--r--external/hash/ht_trace.h59
-rwxr-xr-xexternal/hash/initbuild.sh5
-rwxr-xr-xexternal/hash/initbuild_debug.sh5
-rw-r--r--external/hash/int_set.h50
-rw-r--r--external/hash/load_test.c86
-rw-r--r--external/hash/pstdint.h898
-rw-r--r--external/hash/ptr_set.c60
-rw-r--r--external/hash/ptr_set.h19
-rw-r--r--external/hash/str_set.c61
-rw-r--r--external/hash/str_set.h32
-rw-r--r--external/hash/token_map.c54
-rw-r--r--external/hash/token_map.h39
-rw-r--r--external/hash/unaligned.h42
-rw-r--r--external/lex/LICENSE21
-rw-r--r--external/lex/README.md3
-rw-r--r--external/lex/luthor.c1509
-rw-r--r--external/lex/luthor.h472
-rw-r--r--external/lex/tokens.h554
-rw-r--r--include/flatcc/flatcc.h268
-rw-r--r--include/flatcc/flatcc_accessors.h101
-rw-r--r--include/flatcc/flatcc_alloc.h127
-rw-r--r--include/flatcc/flatcc_assert.h45
-rw-r--r--include/flatcc/flatcc_builder.h1911
-rw-r--r--include/flatcc/flatcc_emitter.h215
-rw-r--r--include/flatcc/flatcc_endian.h125
-rw-r--r--include/flatcc/flatcc_epilogue.h8
-rw-r--r--include/flatcc/flatcc_flatbuffers.h55
-rw-r--r--include/flatcc/flatcc_identifier.h148
-rw-r--r--include/flatcc/flatcc_iov.h31
-rw-r--r--include/flatcc/flatcc_json_parser.h908
-rw-r--r--include/flatcc/flatcc_json_printer.h788
-rw-r--r--include/flatcc/flatcc_portable.h14
-rw-r--r--include/flatcc/flatcc_prologue.h8
-rw-r--r--include/flatcc/flatcc_refmap.h144
-rw-r--r--include/flatcc/flatcc_rtconfig.h162
-rw-r--r--include/flatcc/flatcc_types.h97
-rw-r--r--include/flatcc/flatcc_unaligned.h16
-rw-r--r--include/flatcc/flatcc_verifier.h239
-rw-r--r--include/flatcc/flatcc_version.h14
-rw-r--r--include/flatcc/portable/LICENSE14
-rw-r--r--include/flatcc/portable/README.md57
-rw-r--r--include/flatcc/portable/grisu3_math.h329
-rw-r--r--include/flatcc/portable/grisu3_parse.h582
-rw-r--r--include/flatcc/portable/grisu3_print.h265
-rw-r--r--include/flatcc/portable/include/README4
-rw-r--r--include/flatcc/portable/include/linux/endian.h1
-rw-r--r--include/flatcc/portable/include/std/inttypes.h1
-rw-r--r--include/flatcc/portable/include/std/stdalign.h1
-rw-r--r--include/flatcc/portable/include/std/stdbool.h1
-rw-r--r--include/flatcc/portable/include/std/stdint.h1
-rw-r--r--include/flatcc/portable/paligned_alloc.h212
-rw-r--r--include/flatcc/portable/pattributes.h84
-rw-r--r--include/flatcc/portable/pbase64.h448
-rw-r--r--include/flatcc/portable/pcrt.h48
-rw-r--r--include/flatcc/portable/pdiagnostic.h85
-rw-r--r--include/flatcc/portable/pdiagnostic_pop.h20
-rw-r--r--include/flatcc/portable/pdiagnostic_push.h51
-rw-r--r--include/flatcc/portable/pendian.h206
-rw-r--r--include/flatcc/portable/pendian_detect.h118
-rw-r--r--include/flatcc/portable/pinline.h19
-rw-r--r--include/flatcc/portable/pinttypes.h52
-rw-r--r--include/flatcc/portable/portable.h2
-rw-r--r--include/flatcc/portable/portable_basic.h25
-rw-r--r--include/flatcc/portable/pparsefp.h226
-rw-r--r--include/flatcc/portable/pparseint.h374
-rw-r--r--include/flatcc/portable/pprintfp.h39
-rw-r--r--include/flatcc/portable/pprintint.h628
-rw-r--r--include/flatcc/portable/pstatic_assert.h67
-rw-r--r--include/flatcc/portable/pstatic_assert_scope.h280
-rw-r--r--include/flatcc/portable/pstdalign.h162
-rw-r--r--include/flatcc/portable/pstdbool.h37
-rw-r--r--include/flatcc/portable/pstdint.h898
-rw-r--r--include/flatcc/portable/punaligned.h190
-rw-r--r--include/flatcc/portable/pversion.h6
-rw-r--r--include/flatcc/portable/pwarnings.h52
-rw-r--r--include/flatcc/reflection/README19
-rw-r--r--include/flatcc/reflection/flatbuffers_common_builder.h685
-rw-r--r--include/flatcc/reflection/flatbuffers_common_reader.h578
-rw-r--r--include/flatcc/reflection/reflection_builder.h457
-rw-r--r--include/flatcc/reflection/reflection_reader.h411
-rw-r--r--include/flatcc/reflection/reflection_verifier.h308
-rw-r--r--include/flatcc/support/README1
-rw-r--r--include/flatcc/support/cdump.h38
-rw-r--r--include/flatcc/support/elapsed.h73
-rw-r--r--include/flatcc/support/hexdump.h47
-rw-r--r--include/flatcc/support/readfile.h66
-rw-r--r--reflection/README.in19
-rwxr-xr-xreflection/generate_code.sh13
-rw-r--r--reflection/reflection.fbs117
-rw-r--r--samples/CMakeLists.txt8
-rw-r--r--samples/bugreport/.gitignore1
-rwxr-xr-xsamples/bugreport/build.sh22
-rw-r--r--samples/bugreport/eclectic.fbs11
-rw-r--r--samples/bugreport/myissue.c35
-rw-r--r--samples/monster/CMakeLists.txt22
-rwxr-xr-xsamples/monster/build.sh27
-rw-r--r--samples/monster/monster.c353
-rw-r--r--samples/monster/monster.fbs32
-rw-r--r--samples/reflection/CMakeLists.txt31
-rw-r--r--samples/reflection/bfbs2json.c314
-rwxr-xr-xsamples/reflection/build.sh27
-rw-r--r--scripts/_user_build.in32
-rwxr-xr-xscripts/benchflatcc.sh6
-rwxr-xr-xscripts/benchmark.sh6
-rwxr-xr-xscripts/bfbs-sample.sh15
-rw-r--r--scripts/build.cfg.make3
-rw-r--r--scripts/build.cfg.make-32bit3
-rw-r--r--scripts/build.cfg.make-concurrent3
-rw-r--r--scripts/build.cfg.ninja3
-rwxr-xr-xscripts/build.sh37
-rwxr-xr-xscripts/cleanall.sh20
-rwxr-xr-xscripts/dev.sh9
-rwxr-xr-xscripts/flatcc-doc.sh36
-rwxr-xr-xscripts/initbuild.sh40
-rwxr-xr-xscripts/monster-doc.example.sh6
-rwxr-xr-xscripts/reflection-doc-example.sh6
-rwxr-xr-xscripts/release.sh9
-rwxr-xr-xscripts/setup.sh112
-rwxr-xr-xscripts/test.sh38
-rw-r--r--src/cli/CMakeLists.txt20
-rw-r--r--src/cli/flatcc_cli.c505
-rw-r--r--src/compiler/CMakeLists.txt43
-rw-r--r--src/compiler/catalog.h217
-rw-r--r--src/compiler/codegen.h46
-rw-r--r--src/compiler/codegen_c.c285
-rw-r--r--src/compiler/codegen_c.h397
-rw-r--r--src/compiler/codegen_c_builder.c2159
-rw-r--r--src/compiler/codegen_c_json_parser.c1850
-rw-r--r--src/compiler/codegen_c_json_printer.c732
-rw-r--r--src/compiler/codegen_c_reader.c1928
-rw-r--r--src/compiler/codegen_c_sort.c171
-rw-r--r--src/compiler/codegen_c_sort.h9
-rw-r--r--src/compiler/codegen_c_sorter.c355
-rw-r--r--src/compiler/codegen_c_verifier.c327
-rw-r--r--src/compiler/codegen_schema.c581
-rw-r--r--src/compiler/coerce.c266
-rw-r--r--src/compiler/coerce.h13
-rw-r--r--src/compiler/fileio.c225
-rw-r--r--src/compiler/fileio.h86
-rw-r--r--src/compiler/flatcc.c511
-rw-r--r--src/compiler/hash_tables/README.txt2
-rw-r--r--src/compiler/hash_tables/name_table.c21
-rw-r--r--src/compiler/hash_tables/schema_table.c21
-rw-r--r--src/compiler/hash_tables/scope_table.c177
-rw-r--r--src/compiler/hash_tables/symbol_table.c22
-rw-r--r--src/compiler/hash_tables/value_set.c60
-rw-r--r--src/compiler/keywords.h56
-rw-r--r--src/compiler/parser.c1550
-rw-r--r--src/compiler/parser.h213
-rw-r--r--src/compiler/pstrutil.h58
-rw-r--r--src/compiler/semantics.c1962
-rw-r--r--src/compiler/semantics.h12
-rw-r--r--src/compiler/symbols.h457
-rw-r--r--src/runtime/CMakeLists.txt16
-rw-r--r--src/runtime/builder.c2035
-rw-r--r--src/runtime/emitter.c269
-rw-r--r--src/runtime/json_parser.c1297
-rw-r--r--src/runtime/json_printer.c1486
-rw-r--r--src/runtime/refmap.c248
-rw-r--r--src/runtime/verifier.c617
-rw-r--r--test/CMakeLists.txt27
-rw-r--r--test/README.md19
-rw-r--r--test/benchmark/README.md68
-rwxr-xr-xtest/benchmark/benchall.sh16
-rw-r--r--test/benchmark/benchflatc/benchflatc.cpp70
-rw-r--r--test/benchmark/benchflatc/flatbench_generated.h166
-rw-r--r--test/benchmark/benchflatc/flatbuffers/flatbuffers.h1189
-rwxr-xr-xtest/benchmark/benchflatc/run.sh23
-rw-r--r--test/benchmark/benchflatcc/benchflatcc.c98
-rwxr-xr-xtest/benchmark/benchflatcc/run.sh24
-rw-r--r--test/benchmark/benchflatccjson/benchflatccjson.c182
-rwxr-xr-xtest/benchmark/benchflatccjson/run.sh23
-rw-r--r--test/benchmark/benchmain/benchmain.h66
-rw-r--r--test/benchmark/benchout-osx.txt169
-rw-r--r--test/benchmark/benchout-ubuntu.txt169
-rw-r--r--test/benchmark/benchraw/benchraw.c117
-rwxr-xr-xtest/benchmark/benchraw/run.sh21
-rw-r--r--test/benchmark/schema/flatbench.fbs37
-rw-r--r--test/cgen_test/CMakeLists.txt43
-rw-r--r--test/cgen_test/cgen_test.c163
-rwxr-xr-xtest/cgen_test/cgen_test.sh22
-rwxr-xr-xtest/debug.sh8
-rw-r--r--test/emit_test/CMakeLists.txt20
-rw-r--r--test/emit_test/emit_test.c137
-rw-r--r--test/emit_test/emit_test.fbs6
-rwxr-xr-xtest/emit_test/emit_test.sh19
-rw-r--r--test/flatc_compat/.gitattributes2
-rw-r--r--test/flatc_compat/CMakeLists.txt21
-rw-r--r--test/flatc_compat/README.md10
-rw-r--r--test/flatc_compat/flatc_compat.c226
-rwxr-xr-xtest/flatc_compat/flatc_compat.sh20
-rw-r--r--test/flatc_compat/monsterdata_test.golden48
-rwxr-xr-xtest/flatc_compat/monsterdata_test.json51
-rw-r--r--test/flatc_compat/monsterdata_test.monbin0 -> 336 bytes
-rw-r--r--test/json_test/CMakeLists.txt64
-rw-r--r--test/json_test/flatcc_golden.c45
-rwxr-xr-xtest/json_test/json_test.sh74
-rw-r--r--test/json_test/test_basic_parse.c291
-rw-r--r--test/json_test/test_json.c882
-rw-r--r--test/json_test/test_json_parser.c164
-rw-r--r--test/json_test/test_json_printer.c129
-rwxr-xr-xtest/leakcheck-full.sh9
-rwxr-xr-xtest/leakcheck.sh9
-rw-r--r--test/load_test/CMakeLists.txt20
-rw-r--r--test/load_test/load_test.c164
-rwxr-xr-xtest/load_test/load_test.sh22
-rw-r--r--test/monster_test/CMakeLists.txt20
-rw-r--r--test/monster_test/attributes.fbs6
-rw-r--r--test/monster_test/include_test1.fbs5
-rw-r--r--test/monster_test/include_test2.fbs11
-rw-r--r--test/monster_test/monster_test.c2919
-rwxr-xr-xtest/monster_test/monster_test.fbs365
-rw-r--r--test/monster_test_concat/CMakeLists.txt21
-rw-r--r--test/monster_test_concat/README.txt2
-rw-r--r--test/monster_test_concat/monster_test_concat.c24
-rw-r--r--test/monster_test_cpp/CMakeLists.txt24
-rw-r--r--test/monster_test_cpp/monster_test.cpp3
-rw-r--r--test/monster_test_prefix/CMakeLists.txt20
-rw-r--r--test/monster_test_prefix/monster_test_prefix.c24
-rw-r--r--test/monster_test_solo/CMakeLists.txt21
-rw-r--r--test/monster_test_solo/monster_test_solo.c24
-rw-r--r--test/optional_scalars_test/CMakeLists.txt19
-rw-r--r--test/optional_scalars_test/optional_scalars_test.c280
-rw-r--r--test/optional_scalars_test/optional_scalars_test.fbs71
-rw-r--r--test/reflection_test/CMakeLists.txt20
-rw-r--r--test/reflection_test/reflection_test.c196
-rwxr-xr-xtest/reflection_test/reflection_test.sh24
-rwxr-xr-xtest/test.sh103
-rw-r--r--test/union_vector_test/union_vector.fbs26
284 files changed, 63182 insertions, 0 deletions
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..42e227d
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,60 @@
+name: CI
+
+on: [push, pull_request]
+
+env:
+ CTEST_OUTPUT_ON_FAILURE: 1
+
+jobs:
+ ubuntu-ninja-clang:
+ name: Ubuntu (ninja, clang)
+ runs-on: ubuntu-22.04
+ steps:
+ - name: Prepare
+ run: |
+ sudo apt update
+ sudo apt install ninja-build
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ env:
+ CC: clang
+ CXX: clang++
+ run: |
+ scripts/test.sh
+
+ ubuntu-make-gcc:
+ name: Ubuntu (make, gcc)
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ env:
+ CC: gcc
+ CXX: g++
+ run: |
+ scripts/initbuild.sh make
+ scripts/test.sh
+
+ macos:
+ name: macOS
+ runs-on: macos-12
+ steps:
+ - name: Prepare
+ run: |
+ brew install cmake ninja
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ run: |
+ scripts/test.sh
+
+ windows:
+ name: Windows
+ runs-on: windows-2022
+ steps:
+ - uses: microsoft/setup-msbuild@v1.1
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ run: |
+ cmake .
+ msbuild FlatCC.sln /m /property:Configuration=Release
+ ctest -VV
diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml
new file mode 100644
index 0000000..a46f275
--- /dev/null
+++ b/.github/workflows/weekly.yml
@@ -0,0 +1,191 @@
+name: Weekly
+
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: '0 10 * * 1' # Mon 10.00 UTC
+
+env:
+ CTEST_OUTPUT_ON_FAILURE: 1
+
+jobs:
+ clang:
+ name: Clang ${{ matrix.clang-version }}
+ runs-on: ubuntu-20.04
+ strategy:
+ fail-fast: false
+ matrix:
+ clang-version: [5, 7, 9, 11, 13, 15]
+ steps:
+ - name: Setup Clang
+ uses: aminya/setup-cpp@v1
+ with:
+ llvm: ${{ matrix.clang-version }}
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ run: |
+ scripts/initbuild.sh make-concurrent
+ scripts/test.sh
+
+ clang-32bit:
+ name: Clang 32bit
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Prepare
+ run: |
+ sudo apt update
+ sudo apt install gcc-multilib g++-multilib
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ env:
+ CC: clang
+ CXX: clang++
+ run: |
+ scripts/initbuild.sh make-32bit
+ scripts/test.sh
+
+ gcc-old:
+ name: GCC 4.4
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Setup GCC
+ run: |
+ wget http://launchpadlibrarian.net/336269522/libmpfr4_3.1.6-1_amd64.deb
+ wget http://old-releases.ubuntu.com/ubuntu/pool/universe/g/gcc-4.4/gcc-4.4-base_4.4.7-8ubuntu1_amd64.deb
+ wget http://old-releases.ubuntu.com/ubuntu/pool/universe/g/gcc-4.4/cpp-4.4_4.4.7-8ubuntu1_amd64.deb
+ wget http://old-releases.ubuntu.com/ubuntu/pool/universe/g/gcc-4.4/gcc-4.4_4.4.7-8ubuntu1_amd64.deb
+ wget http://old-releases.ubuntu.com/ubuntu/pool/universe/g/gcc-4.4/libstdc++6-4.4-dev_4.4.7-8ubuntu1_amd64.deb
+ wget http://old-releases.ubuntu.com/ubuntu/pool/universe/g/gcc-4.4/g++-4.4_4.4.7-8ubuntu1_amd64.deb
+ sudo dpkg -i ./libmpfr4_3.1.6-1_amd64.deb
+ sudo dpkg -i ./gcc-4.4-base_4.4.7-8ubuntu1_amd64.deb
+ sudo dpkg -i ./cpp-4.4_4.4.7-8ubuntu1_amd64.deb
+ sudo dpkg -i ./gcc-4.4_4.4.7-8ubuntu1_amd64.deb
+ sudo dpkg -i ./libstdc++6-4.4-dev_4.4.7-8ubuntu1_amd64.deb ./g++-4.4_4.4.7-8ubuntu1_amd64.deb
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ env:
+ CC: gcc-4.4
+ CXX: g++-4.4
+ run: |
+ scripts/initbuild.sh make-concurrent
+ scripts/test.sh
+
+ gcc:
+ name: GCC ${{ matrix.gcc-version }}
+ runs-on: ubuntu-20.04
+ strategy:
+ fail-fast: false
+ matrix:
+ gcc-version: [7, 9, 11]
+ steps:
+ - name: Setup GCC
+ uses: aminya/setup-cpp@v1
+ with:
+ gcc: ${{ matrix.gcc-version }}
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ run: |
+ scripts/initbuild.sh make-concurrent
+ scripts/test.sh
+
+ gcc-32bit:
+ name: GCC 32bit
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Prepare
+ run: |
+ sudo apt update
+ sudo apt install gcc-multilib g++-multilib
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ run: |
+ scripts/initbuild.sh make-32bit
+ scripts/test.sh
+
+ intel:
+ name: Intel ${{ matrix.compiler }}
+ runs-on: ubuntu-20.04
+ strategy:
+ fail-fast: false
+ matrix:
+ compiler: [icc, icx]
+ steps:
+ - name: Prepare
+ run: |
+ wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB | sudo apt-key add -
+ echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list
+ sudo apt update
+ sudo apt install intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic-2021.4.0
+ - name: Setup Intel oneAPI
+ run: |
+ source /opt/intel/oneapi/setvars.sh
+ printenv >> $GITHUB_ENV
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ env:
+ CC: ${{ matrix.compiler }}
+ CXX: ${{ matrix.compiler }}
+ run: |
+ scripts/initbuild.sh make-concurrent
+ scripts/test.sh
+
+ macos-clang:
+ name: macOS Clang
+ runs-on: macos-11
+ steps:
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ run: |
+ scripts/initbuild.sh make-concurrent
+ scripts/test.sh
+
+ macos-gcc:
+ name: macOS GCC ${{ matrix.gcc-version }}
+ runs-on: macos-11
+ strategy:
+ fail-fast: false
+ matrix:
+ gcc-version: [9, 12]
+ steps:
+ - uses: actions/checkout@v3
+ - name: Prepare
+ run: |
+ brew install gcc@${{ matrix.gcc-version }}
+ - name: Build and run tests
+ env:
+ CC: gcc-${{ matrix.gcc-version }}
+ CXX: g++-${{ matrix.gcc-version }}
+ run: |
+ scripts/initbuild.sh make-concurrent
+ scripts/test.sh
+
+ windows:
+ name: Windows Visual Studio ${{ matrix.version }}
+ runs-on: windows-${{ matrix.version }}
+ strategy:
+ fail-fast: false
+ matrix:
+ version: [2019, 2022]
+ steps:
+ - uses: microsoft/setup-msbuild@v1.1
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ run: |
+ cmake .
+ msbuild FlatCC.sln /m /property:Configuration=Release
+ ctest -VV
+
+ cmake-minimum-required:
+ name: CMake 2.8.12 (min. required)
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Setup cmake
+ uses: jwlawson/actions-setup-cmake@v1
+ with:
+ cmake-version: 2.8.12
+ - uses: actions/checkout@v3
+ - name: Build and run tests
+ run: |
+ cmake --version
+ scripts/initbuild.sh make-concurrent
+ scripts/test.sh
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..2d74832
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,8 @@
+build/*
+bin/*
+lib/*
+release/*
+scripts/build.cfg
+compile_flags.txt
+compile_commands.json
+.cache
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..30a4529
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,29 @@
+language: c
+
+dist: focal
+
+os:
+ - linux
+ - osx
+
+compiler:
+ - gcc
+ - clang
+
+addons:
+ apt:
+ packages:
+ - ninja-build
+
+# macos builds are too slow on travis, and now brew update doesn't work without additional configuration
+#before_install:
+# - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update; fi
+#
+#install:
+# - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install ninja; fi
+
+script:
+ - if [[ "$TRAVIS_OS_NAME" != "osx" ]]; then scripts/initbuild.sh ninja; fi
+ - if [[ "$TRAVIS_OS_NAME" != "osx" ]]; then scripts/test.sh; fi
+ - scripts/initbuild.sh make-concurrent
+ - scripts/test.sh
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..7ff4a4a
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,562 @@
+# Change Log
+
+## [0.6.2-pre]
+
+- CMake: avoid assuming location of build dir during configuration.
+- Use untyped integer constants in place of enums for public interface flags to
+ allow for safe bit masking operations (PR #248).
+- Added experimental support for generating `compile_commands.json` via
+ `CMakeList.txt` for use with clangd.
+- Remove `fallthrough` macro for improved portability (#247, #252).
+- Added `parse_float/double_compare`, `parse_float/double_is_equal` to
+ portable library, and added `parse_float/double_isnan` to mirror isinf.
+ This should help with GCC 32-bit double precision conversion issue.
+- Add Github Actions builds to replace stale Travis CI build. This also
+ includes source code fixes for some build variants. Although
+ Windows build is included it only covers recent 64-bit Windows. More
+ work is need for older Windows variants. (#250).
+- Increase maximum allowed schema file size from 64 KiB to 1 MB (#256).
+- Fix seg fault in json parser while adding null characters to a too
+ short input string for a fixed length char array struct field (#257).
+
+## [0.6.1]
+
+- Add `flatcc_builder_alloc` and `flatcc_builder_free` to handle situations
+ where standard allocation has been redefined via macros so `free` is no longer
+ safe to use. These are similar to the existing `aligned_alloc/free` functions.
+- Fix a potential, but never seen, low level race condition in the builder when
+ writing a union field because the builder might reallocate between type
+ field and value field. Affects `flacc_common_builder.h` but not `builder.c`.
+- Fix GCC 8.3 reversal on `__alignas_is_defined` for -std=c++11, affecting
+ pstdalign.h (#130).
+- Make C++ test optional via build flag and disable for pre GCC-4.7 (#134).
+- Avoid conflicts with table or struct fields named `identifier` (#99, #135)
+ by not generating `<table-name>_identifier` for the table (or structs) file
+ identifier, instead `<table-name>_file_identifier` is generated and the old
+ form is generated for backwards compatibility when there is no conflict.
+- DEPRECATED: `<table-name>_identifier` should now be
+ `<table-name>_file_identifer`.
+- `FLATCC_ASSERT` and `FLATCC_NO_ASSERT` provided for custom runtime assert
+ handling (#139).
+- Add guards in case bswap macroes are already defined and use POSIX compliant
+ Linux detection (#129).
+- Fix precision loss of floating point default values in generated code (#140).
+- Fix anon union pedantic warning in internal builder struct (PR #143).
+- Remove warnings triggered by -Wsign-conversion.
+- BREAKING: some functions taking a string or buffer argument, and a separate
+ size argument, have changed the size type from `int` to `size_t` in an effort
+ to standardize and to remove sign conversion warnings.
+- Fixed size arrays renamed to fixed length arrays for consistency with Googles
+ project.
+- Support `aligned_alloc` for MingW (#155).
+- Disable config flag `FLATCC_ASCENDING_ENUM` now that Googles flatc compiler
+ also has support for reordered enums.
+- Fix cli options so common files can be generated without schema files
+ (PR #156).
+- Make build.sh work for non-bash shells (PR #159).
+- Fix parsing json struct as roort (#157).
+- Add support for optional scalar values (#162).
+- Fix enum out of range (#176).
+- Add stdalign support for TCC compiler (#174).
+- Add RPC data to bfbs binary schema (#181).
+- Fix support for printing JSON enum vectors (#182).
+- Silence GCC 11 identation warning (#183).
+- Fix type of code field on json test (#184).
+- Add `end_loc` to parser state of root json parsers (#186).
+- BREAKING: Add per table `<name>_file_extension` and fixed wrong per table
+ `<name>_file_identifier` assignment when multiple files are included due to
+ macro expansion conflict. Extensions are now specified without extra dot
+ prefix unless specified explicitly in the schema file. The default extension
+ is now 'bin' instead of '.bin' (#187).
+- Fix buffer overrun when parser reports error on large symbols (#188).
+- BREAKING: Print --version to stdout, not stderr.
+- Fix schema parser returning on success on some failure modes (#193).
+- Fix larger integer literal types in JSON parser and printer (#194).
+- Add pattributes.h to portable library and replace GCC fallthrough comments
+ with fallthough attribute to also silence clang warnings (#203).
+- Remove misguided include guards from `portable/pdiagnostic_push/pop.h` and fix
+ related and expected warnings in other code. NOTE: End user code might be
+ affected because warnigs were disabled more broadly than intended. Also note
+ that warnings will still be disabled after pop if the compiler does not
+ support push/pop diagnostics (#205).
+- Fix verifier crash on malicious string length input (#221).
+- Fix potential crash parsing unterminated JSON (#223).
+- Allow 0 (and other unknown values) as schema default value for enums with
+ `bit_flags` attribute.
+- Disable -pedantic flag for GCC >= 8, it just keeps breaking perfectly valid
+ code (#227).
+
+## [0.6.0]
+
+- BREAKING: if there are multiple table fields with a key attribute, the
+ default key is now the field with the smaller id rather than the first
+ listed in case these differ (which is not often the case).
+- The attribute `primary_key` has been added to choose a specific keyed
+ field as the default key field for finding and sorting instead of the
+ field with the smallest id.
+- Add mutable types: `mutable_table_t`, `mutable_union_t`, `mutable_union_vec_t`
+ and casts: `mutable_union_cast`, `mutable_union_vec_cast`.
+- Disallow key attribute on deprecated fields.
+- Add 'sorted' attribute for scalar, string, table and struct vectors.
+ Tables and structs must have a key field defined.
+- Add recursive table and union `_sort` operation in `_reader.h` for
+ types that contain a sorted vector, either directly or indirectly.
+ NOTE: shared vectors in a DAG will be sorted multiple times.
+- Allow attributes to be declared multiple times, including known attributes.
+- Allow table and struct field names to be reserved names such as 'namespace'
+ or 'table'. This can be disabled in `config/config.h`.
+ Separately do the same for enum member. This change was motivated by
+ JSON fields that can have names such as "table" or "namespace".
+- Use `FLATCC_INSTALL_LIB` for install targets in addition to ordinary builds
+ (#109).
+- Add missing case break in flatcc compiler - no impact on behavior (#110).
+- Make `flatbuffers_not_found` and `flatbuffers_end` constant values both
+ because it is more correct, and to silence warnings on some systems.
+- Fix flatcc endless loop with our of order field id's (#112).
+- Add support for fixed length arrays as struct member fields, including
+ fixed length char arrays. NOTE: bfbs schema uses byte arrays instead of
+ char arrays since Googles flatc tool does not have char arrays.
+- Fix `aligned_free` when used with `FLATCC_USE_GENERIC_ALIGNED_ALLOC` (#118).
+- Fix potential buffer overrun when parsing JSON containing surrogate pairs
+ with a resulting UTF-8 code point of length 4 (bug introduced in 0.5.3).
+- BREAKING: empty structs are no longer supported. They were fully supported
+ and Googles flatc compiler also no longer support them.
+- Fix incorrect sprintf arg when printing NaN with the grisu3 algorithm (#119).
+- Fix gcc 9 regression on `-fallthrough` comment syntax (#120).
+- Silence false positive gcc 9 warning on strncpy truncation
+ (-Wstringop-truncation) (#120).
+- Silence false positive gcc 9 warning on printf null string
+ (-Wno-format-overflow) (#120).
+- Silence GCC `-Wstrict-prototypes` warnings that incorrectly complain about
+ function definitions with empty instead of void arguments. Only for runtime
+ source (#122).
+
+## [0.5.3]
+- BREAKING: 0.5.3 changes behavour of builder create calls so arguments
+ are always ordered by field id when id attributes are being used, for
+ example `MyGame_Example_Monster_create()` in `monster_test_fbs` (#81).
+- Fix `refmap_clear` so that the refmap is always ready for reuse.
+- Remove C++ from CMake toolchain when not building tests (#94).
+- Add missing `_get` suffix in `flatbuffers_common_reader.h` needed when
+ used with flatcc -g option (#96).
+- Remove stray break statement that prevented generation of search
+ methods for scalar table fields with the key attribute set (#97).
+- Fix return value when creating a struct field fails which is unlikely
+ to happen, and hence has low impact (#98).
+- Fix file identifier string to type hash cast as implicit cast was not safe on
+ all platforms.
+- Fix reallocation bug when cloning vectors of non-scalar content (#102).
+- JSON parser: combine valid UTF-16 surrogate pairs such as "\uD834\uDD1E"
+ (U+1D11E) into single 4 byte UTF-8 sequence as per spec. Unmatched half
+ pairs are intentionally decoded into invalid UTF-8 as before.
+- Fix sorting tables by scalar keys. Sorting by integer key could lead to
+ undefined behavior (#104).
+- Add `FLATCC_INSTALL_LIB` configuration to CMake to change the
+ default <project>/lib path, for example `cmake -DFLATCC_INSTALL_LIB=lib64`.
+- Fix return code in `flatcc_parse_file` library function.
+
+## [0.5.2]
+
+- Handle union vectors in binary schema generation (.bfbs).
+- Handle mixed union types in binary schema (.bfbs).
+- Fix .bfbs bug failing to export fields of string type correctly.
+- Fix how vectors are printed in samples/reflection project.
+- Add support for KeyValue attributes in binary schema (.bfbs).
+- Added `__tmp` suffix to macro variables in `flatbuffers_common_reader.h`
+ to avoid potential name conflicts (#82).
+- Added `_get` suffix to all table and struct read accessors in
+ addition to existing accesors (`Monster_name()` vs `Monster_name_get()`
+ (#82).
+- Added `-g` option flatcc commandline to only generate read accessors
+ with the `_get` suffix in order to avoid potential name conficts (#82).
+- Fix stdalign.h not available in MSVC C++ in any known version.
+- Added test case for building flatcc project with C++ compiler (#79, #80).
+- Fix `flatbuffers_int8_vec_t` type which was incorrrectly unsigned.
+- Added table, union, vector clone, and union vector operations. Table
+ fields now also have a `_pick` method taking a source table of same
+ type as argument which is roughly a combined get, clone and add
+ operation for a single field. `_pick` will pick a field even if it is
+ a default value and it will succedd as a no-operation if the source is
+ absent. `_clone` discards deprecated fields. Unknown union types are
+ also discarded along with unions of type NONE, even if present in
+ source. Warning: `_clone` will expand DAGs.
+- Added `_get_ptr` reader method on scalar struct and table member
+ fields which returns the equivalent of a single field struct `_get`
+ method. This is used to clone scalar values without endian
+ conversion. NOTE: scalars do not have assertions on `required`
+ attribute, so be careful with null return values. For structs it
+ is valid to apply `_get_ptr` to a null container struct such that
+ navigation can be done without additional checks.
+- Added `_push_clone` synonym for scalar and struct vector `_push_copy`.
+- Add `include/flatcc/flatcc_refmap_h` and `src/runtime/refmap.c` to
+ runtime library. The runtime library now depends of refmap.c but has
+ very low overhead when not expeclity enabled for use with cloning.
+- Add `flatbuffers_type_hash_from_string` to avoid gcc-8 strncpy warning
+ (#86).
+- Add long form flatcc options --common, --common-reader, --common-builder,
+ --reader, --builder, --verifier.
+- Remove flatcc deprecated option for schema namespace.
+- Add scripts/flatcc-doc.sh to help document generated code.
+- Remove unnecessary memory.h includes (#92)
+
+## [0.5.1]
+
+- Fix parent namespace lookup in the schema parser when the namespace
+ prefix is omitted.
+- Fix buffer overrun in JSON printer when exhausting flush buffer (#70).
+- More consistent name generation across verifier and json parsers
+ allowing for namespace wrapped parse/verify/print table functions.
+- Fix unhelpful error on precision loss from float/double conversion
+ in schema and JSON parser.
+- Align `monster_test.fbs` Monster table more closely with Googles flatc
+ version after they diverged a bit. (Subtables may differ).
+- Some C++ compatiblity fixes so `include/{support/flatcc}` headers can
+ be included into C++ without `extern "C"`.
+- Fix missing null ptr check in fall-back `aligned_free`.
+- Enable `posix_memalign` as a default build option on recent GNU
+ systems because -std=c11 blocks automatic detection. This avoids
+ using a less efficient fallback implementation.
+- Add portable/include wrappers so build systems can add include paths
+ to ensure that <stdint.h>, <stdbool.h> etc. is available. Flatcc does
+ not currently rely on these.
+- Replace `flatcc/portable/pdiagnostic_push/pop.h` includes in generated
+ code with `flatcc/flatcc_pro/epilogue.h` and add `__cplusplus extern
+ "C"` guards in those. This removes explicit references to the portable
+ headers in generated code and improves C++ compatibility (#72).
+- Change inconsistent `const void *` to `const char *` in JSON buffer
+ argument to generated `_as_root` parsers (#73).
+- Simplify use of json printers by auto-flushing and terminating buffers
+ when a root object has been printed (#74).
+- BREAKING: in extension of the changes in 0.5.0 for unions and union
+ vectors, the low-level methods and structs now consistently use the
+ terminology { type, value } for union types and values rather than {
+ type, member } or { types, members }. The binary builder interface
+ remains unchanged.
+- Silence (unjustified) uninitialized gcc warnings (#75).
+- Fix C++14 missing `__alignas_is_defined`.
+- Remove newlib stdalign conflict (#77).
+- Add `flatcc_json_printer_total`.
+- Add `flatcc_builder_table_add_union_vector`.
+
+## [0.5.0]
+- New schema type aliases: int8, uint8, int16, uint16, int32, uint32,
+ int64, uint64, float32, float64.
+- Low-level: access multiple user frames in builder via handles.
+- Support for `_is_known_type` and `_is_known_value` on union and enum
+ types.
+- More casts for C++ compatiblity (#59).
+- Fix regressions in verifier fix in 0.4.3 that might report out of
+ bounds in rare cases (#60).
+- Silence gcc 7.x warnings about implicit fallthrough (#61).
+- Fix rare special in JSON parser causing spurious unknown symbol.
+- Reading and writing union vectors. The C++ interface also supports
+ these types, but other languages likely won't for a while.
+- New `_union(t)` method for accessing a union fields type and member
+ table in a single call. The method also supports union vectors to
+ retrieve the type vector and member vector as a single object.
+- BREAKING: In generated builder code for union references of the form
+ `<union-name>_union_ref_t` the union members and the hidden `_member`
+ field has been replaced with a single `member` field. Union
+ constructors work as before: `Any_union_ref_t uref =
+ Any_as_weapon(weapon_ref)` Otherwise use `.type` and `.member` fields
+ directly. This change was necessary to support the builder API's new
+ union vectors without hitting strict aliasing rules, for example as
+ argument to `flatcc_builder_union_vector_push`. Expected impact: low
+ or none. The physical struct layout remains unchanged.
+- BREAKING: `flatbuffers_generic_table_[vec_]t` has been renamed to
+ `flatbuffers_generic_[vec_]t`.
+- BREAKING: The verifiers runtime library interface has changed argument
+ order from `align, size` to `size, align` in order to be consistent
+ with the builders interface so generated code must match library
+ version. No impact on user code calling generated verifier functions.
+- BREAKING: generated json table parser now calls `table_end` and
+ returns the reference in a new `pref` argument. Generated json struct
+ parsers now renamed with an `_inline` suffix and the orignal name now
+ parses a non-inline struct similar to the table parsers. No impact to
+ user code that only calls the generated root parser.
+- Fix off-by-one indexing in `flatbuffers_generic_vec_at`. Impact
+ low since it was hardly relevant before union vectors were introduced
+ in this release.
+- Add document on security considerations (#63).
+- Add support for base64 and base64url attributes in JSON printing and
+ parsing of [ubyte] table fields.
+- Added `flatcc_builder_aligned_free` and `flatcc_builder_aligned_alloc`
+ to ensure `aligned_free` implementation matches allocation compiled
+ into the runtime library. Note that alignment and size arguments are
+ ordered opposite to most runtime library calls for consistency with
+ the C11 `aligned_alloc` prototype.
+- Support for struct and string types in unions.
+- Add missing `_create` method on table union member fields.
+- Add `_clone` and `_clone_as_[typed_]root[_with_size]` methods on structs.
+ `_clone` was already supported on structs inlined in table fields.
+- Fix harmless but space consuming overalignment of union types.
+- Add `flatbuffers_union_type_t` with `flatbuffers_union_type_vec` operations.
+- Fix scoping bug on union types in JSON parser: symbolic names of the form
+ `MyUnion.MyUnionMember` were not accepted on a union type field but
+ `MyNamespace.MyUnion.MyMember` and `MyMember` was supported. This has been
+ fixed so all forms are valid. Plain enums did not have this issue.
+- Place type identifiers early in generated `_reader.h` file to avoid
+ circular reference issue with nested buffers when nested buffer type
+ is placed after referencing table in schema.
+- Fix verify bug on struct buffers - and in test case - not affecting
+ ordinary buffers with table as root.
+
+## [0.4.3]
+- Fix issue with initbuild.sh for custom builds (#43)
+- Add casts to aid clean C++ builds (#47)
+- Add missing const specifier in generated `buffer_start` methods - removes C++
+ warnings (#48)
+- Update external/hash, removed buggy Sorted Robin Hood Hash that wasn't
+ faster anyway - no impact on flatcc.
+- Fix JSON parsing bug where some names are prefixes of others (#50).
+- A Table of Contents in documentation :-)
+- Move repetitive generated JSON string parsing into library.
+- Add tests for JSON runtime compiled with different flags such as
+ permitting unquoted keys.
+- Fix building nested buffers when the parent buffer has not yet emitted
+ any data (#51).
+- Fix building nested buffers using the _nest() call (#52).
+- Add `FLATCC_TRACE_VERIFY` as build option.
+- Allow more costumization of allocation functions (#55).
+- Avoid dependency on PORTABLE_H include guard which is too common (#55).
+- (possibly breaking) Fix duplicate field check in flatcc_builder_table_add call.
+- Fix incorrect infinity result in grisu3 parser and double to float
+ overflow handling in parse_float in portable library (affects JSON
+ of abnormal numeric values).
+- Fix return value handling of parse_float, parse_double in JSON parser.
+- Fix verifier vector alignment check - affects vectors with element size 8+.
+- Fix missing static in generated enum and union functions in JSON
+ printer (#57).
+
+## [0.4.2]
+- Fix SIGNIFICANT bug miscalculating the number of builder frames in
+ use. Nesting 8 levels would cause memory corruption (#41).
+- Fix minor memory leak in flatcc compiler.
+- Reduce collisions in builders vtable hash.
+- Remove broken dependency on `<mm_malloc.h>` for some GCC versions in
+ `paligned_alloc.h` (#40).
+- Allow C++ files to include `pstdalign.h` and `paligned_alloc.h` (#39).
+
+## [0.4.1]
+- Test for `posix_memalign` on GCC platforms and fix fallback
+ `aligned_alloc`.
+- Fix JSON parser handling of empty objects and tables.
+- Fix JSON parser - some fields would not be accepted as valid (#17).
+- Fix rare uncompilable doc comment in schema (#21).
+- Avoid crash on certain table parser error cases (#30).
+- Add support for scan similar to find in reader API, but for O(N)
+ unsorted search, or search by a secondary key, and in sub-ranges.
+- Optionally, and by default, allow scan by any field (#29), not just keys.
+- More compact code generation for reader (hiding scan methods).
+- Use __flatbuffers_utype_t for union type in reader instead of uint8_t.
+- Add unaligned write to punaligned for completeness.
+- Promote use of `flatcc_builder_finalize_aligned_buffer` in doc and
+ samples over `flatcc_builder_finalize_buffer`.
+- Add scope counter to pstatic_assert.h to avoid line number conflicts.
+- Fix compiler error/warning for negative enums in generated JSON parser (#35).
+- Fix potential compiler error/warnings for large enum/defaults in
+ generated reader/builder (#35).
+- Fix tab character in C++ style comments (#34)
+- Fix incorrect api usage in binary schema builder (#32)
+- Support hex constants in fbs schema (flatc also supports these now) (#33).
+
+
+## [0.4.0]
+- Fix Windows detection in flatcc/support/elapsed.h used by benchmark.
+- Fix #8 surplus integer literal suffix in portable byteswap fallback.
+- Fix `pstatic_assert.h` missing fallback case.
+- Fix #9 return values from allocation can be zero without being an error.
+- Fix #11 by avoiding dependency on -lm (libmath) by providing a cleaner
+ over/underflow function in `include/flatcc/portable/pparsefp.h`.
+- Fix #12 infinite loop during flatbuffer build operations caused by
+ rare vtable dedupe hash table collision chains.
+- Added `include/flatcc/support/cdump.h` tool for encoding buffers in C.
+- JSON code generators no longer use non-portable PRIszu print
+ modifiers. Fixes issue on IBM XLC AIX.
+- Deprecated support for PRIsz? print modifiers in
+ `include/flatcc/portable/pinttypes.h`, they would require much more
+ work to be portable.
+- Fix and clean up `__STDC__` version checks in portable library.
+- Improve IBM XLC support in `pstdalign.h`.
+- Always include `pstdalign.h` in `flatcc_flatbuffers.h` because some
+ C11 compilers fail to provide `stdalign.h`.
+- Buffer verifier used to mostly, but not always, verify buffer
+ alignment relative to buffer start. With size prefixed buffers it is
+ necessary to verify relative to the allocated buffer, which is also
+ safer and more consistent, but adds requirements to aligned allocation.
+- `monster_test` and `flatc_compat` test now uses aligned alloc.
+- Add `aligned_alloc` and `aligned_free` to `pstdalign.h`.
+- `flatcc_builder_finalize_aligned_buffer` now requires `aligned_free`
+ to be fully portable and no longer use unaligned malloc as fallback,
+ but still works with `free` on most platforms (not Windows).
+
+- BREAKING: Size prefixed buffers added requires a minor change
+ to the low-level flatcc builder library with a flag argument to create
+ and start buffer calls. This should not affect user code.
+
+
+Changes related to big endian support which do not affect little endian
+platforms with little endian wire format.
+
+- Support for big endian platforms, tested on IBM AIX Power PC.
+- Support for big endian encoded flatbuffers on both little and big
+ endian host platforms via `FLATBUFFERS_PROTOCOL_IS_LE/BE` in
+ `include/flatcc/flatcc_types.h`. Use `flatbuffers_is_native_pe()` to
+ see if the host native endian format matches the buffer protocol.
+ NOTE: file identifier at buffer offset 4 is always byteswapped.
+
+In more detail:
+
+- Fix vtable conversion to protocol endian format. This keeps cached
+ vtables entirely in native format and reduces hash collisions and only
+ converts when emitting the vtable to a buffer location.
+- Fix structs created with parameter list resulting in double endian
+ conversion back to native.
+- Fix string swap used in sort due to endian sensitive diff math.
+- Disable direct vector access test case when running on non-native
+ endian platform.
+- Update JSON printer test to handle `FLATBUFFERS_PROTOCOL_IS_BE`.
+- Fix emit test case. Incorrect assumption on acceptable null pointer
+ breaks with null pointer conversion. Also add binary check when
+ `FLATBUFFERS_PROTOCOL_IS_BE`.
+- Add binary test case to `json_test` when `FLATBUFFERS_PROTOCOL_IS_BE`.
+- Fix endian sensitive voffset access in json printer.
+- Update `flatc_compat` to reverse acceptance of 'golden' little endian
+ reference buffer when `FLATBUFFERS_PROTOCOL_IS_BE`.
+
+## [0.3.5a]
+- Fix regression introduced in 0.3.5 that caused double memory free on
+ input file buffer. See issue #7.
+
+## [0.3.5]
+
+- Allow flatcc cli options anywhere in the argument list.
+- Add --outfile option similar to --stdout, but to a file.
+- Add --depfile and --deptarget options for build dependencies.
+- Allow some test cases to accept arguments to avoid hardcoded paths.
+- Deprecate --schema-namespace=no option to disable namespace prefixes
+ in binary schema as Google flatbuffers now also includes namespaces
+ according to https://github.com/google/flatbuffers/pull/4025
+
+## [0.3.4]
+
+- Add `FLATCC_RTONLY` and `FLATCC_INSTALL` build options.
+- Fix issue4: when building a buffer and the first thing created is an
+ empty table, the builder wrongly assumed allocation failure. Affects
+ runtime library.
+- `scripts/setup.sh` now also links to debug libraries useful for bug
+ reporting.
+- Add ULL suffix to large printed constants in generated code which
+ would otherwise required --std=c99 to silence warnings.
+
+## [0.3.3]
+
+- BREAKING: `verify_as_root` no longer takes an identifier argument, use
+ `verify_as_root_with_identifier`. `myschema_verifier.h` now
+ includes `myschema_reader.h` to access identifier.
+ identifer argument, and variants for type identifiers;.
+- Added scripts/setup.sh to quickly get started on small user projects.
+- Support `namespace ;` for reverting to global namespace in schema.
+- Enable block comments now that they are supported in flatc.
+- Parse and validate new `rpc_service` schema syntax, but with no
+ support for code generation.
+- Add type hash support (`create/verify_as_typed_root` etc.) to
+ optionally store and verify file identifiers based on hashed fully
+ qualified type names.
+- Fix potential issue with detection of valid file identifiers in
+ buffer.
+- Moved `include/support` into `include/flatcc/support`, renamed
+ `include/support/readfile.h` function `read_file` to `readfile`.
+- Make `FLATCC_TEST` build option skip building samples and test
+ files, as opposed to just skip running the tests.
+- `vec_at`, `vec_find`, etc. now use index type `size_t` instead of
+ `flatbuffers_uoffset_t`.
+- Removed `size_t` conversion warnings on Win64.
+
+## [0.3.2]
+
+- Move compiler warning handling from generated headers to portable
+ library.
+- Clean up warnings and errors for older gcc, clang and MSVC compilers.
+- CI builds.
+- Fix and improve portable version of `static_assert`.
+- Add integer parsing to portable library.
+
+## [0.3.1]
+
+- Add support for MSVC on Windows.
+- Allow FlatBuffer enums to be used in switch statements without warnings.
+- Remove warnings for 32-bit builds.
+- Fix runtime detection of endianness and add support for more
+ platforms with compile time detection of endianness.
+- Fix scope bug where global namespace symbols from included schema
+ would be invisible in parent schema.
+- Add missing `static` for generated union verifiers.
+- Fix bug in json printer unicode escape and hash table bug in
+ compiler.
+- Fix json parser under allocation bug.
+- Fix `emit_test` early dealloc bug.
+
+## [0.3.0]
+
+- Rename examples folder to samples folder.
+- Add samples/monster example.
+- BREAKING: added missing `_vec` infix on some operations related to
+ building vectors. For example `Weapon_push` -> `Weapon_vec_push`.
+- BREAKING: vector and string start functions no longer takes a
+ count/len argument as it proved tedious and not very useful.
+ The return value is now 0 on success rather than a buffer pointer.
+ Use `_extend` call after start when the length argument is non-zero.
+
+## [0.2.1]
+
+- Disallow unquoted symbolic list in JSON parser by default for Google
+ flatc compatibility.
+- Remove PRIVATE flags from CMake build files to support older CMake
+ versions.
+- Simplify switching between ninja and make build tools.
+- Fix incorrectly named unaligned read macros - impacts non-x86 targets.
+- Mirror grisu3 headers in portable library to avoid dependency on
+ `external/grisu3`.
+- Use correct grisu3 header for parsing, improving json parsing times.
+- Move `include/portable` to `include/flatcc/portable` to simplify
+ runtime distribution and to prevent potential name and versioning
+ conflicts.
+- Fix `is_union` in bfbs2json.c example.
+
+## [0.2.0]
+
+- BREAKING: flatcc verify functions now return a distinct error code.
+ This breaks existing code. Before non-zero was success, now `flatcc_verify_ok` == 0.
+ The error code can be converted to a string using `flatcc_verify_error_string(ret)`.
+- BREAKING, minor: Remove user state from builder interface - now
+ providing a user stack instead.
+- Fix verification of nested flatbuffers.
+- Fix certain header fields that was not endian encoded in builder.
+- MAJOR: Generate json printer and parser.
+- Added high performance integer printinger to portable library
+ and fast floating point priting to runtime library (grisu3) for JSON.
+- Comparison agains default value now prints float to generated source
+ with full precision ("%.17g").
+- MAJOR: read-only generated files no longer attempt to be independent
+ of files in the flatcc include dir. Instead they will use one
+ well-defined source of information for flatbuffer types and endian detection.
+- Always depend `portable/pendian.h` and `portable/pendian_detect.h`.
+ (The `include/portable` dir can be copied under `include/flatcc` if so desired).
+- Updates to set of include files in include/flatcc.
+- Upgrade to pstdint.h 0.1.15 to fix 64-bit printf issue on OS-X.
+- Support for portable unaligned reads.
+- Hide symbols that leak into namespace via parent include.
+- Suppress unused function and variable warnings for GCC (in addition to clang).
+
+## [0.1.1]
+
+- Rename libflatccbuilder.a to libflatccrt.a (flatcc runtime).
+- Add suffix to all generated files (`monster_test.h -> monster_test_reader.h`)
+- Add buffer verifiers (`monster_test_verifier.h).
+- Assert on error in flatcc builder by default.
+- Fix -I include path regression in `flatcc` command.
+
+## [0.1.0]
+
+- Initial public release.
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..492ec8b
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,363 @@
+# Ubuntu 14.04 (Trusty)
+cmake_minimum_required (VERSION 2.8.12.2)
+# Centos 7
+#cmake_minimum_required (VERSION 2.8.11)
+#cmake_minimum_required (VERSION 2.8)
+
+# Experimental for generating compile_commands.json so editors with
+# clangd language server support can use it. Symlink
+# build/Debug/compile_commands.json to project root where it is
+# gitignored.
+#set(CMAKE_EXPORT_COMPILE_COMMANDS 1)
+
+# Disable build of tests and samples. Due to custom build step
+# dependency on flatcc tool, some custom build configurations may
+# experience issues, and this option can then help.
+option(FLATCC_TEST "enable tests" ON)
+
+# Only active if FLATCC_TEST is active. Used to ensure that C++ users
+# can include generatd C source. Old GCC pre 4.7 won't compile C++ test
+# project.
+option(FLATCC_CXX_TEST "enable C++ tests" ON)
+
+# Conditionally set project languages based on FLATCC_TEST, as C++ is
+# only necessary if building the tests.
+if (FLATCC_TEST AND FLATCC_CXX_TEST)
+ project (FlatCC C CXX)
+else()
+ project (FlatCC C)
+endif()
+
+#
+# NOTE: when changing build options, clean the build using on of:
+#
+# scripts/cleanall.sh
+# scripts/test.sh
+#
+
+# Force use of portable shims such as providing `static_assert`, and
+# `stdaligh.h`. Otherwise this option is automatically enabled for some
+# known compiler configurations below.
+option (FLATCC_PORTABLE
+ "include extra headers for compilers that do not support certain C11 features" OFF)
+
+# It is not possible to detect posix_memalign when compiling with
+# -std=c11 but aligned_alloc is not always available either.
+# This options assumes that posix_memalign is then available.
+# Without C11, detection depends on _POSIX_C_SOURCE.
+option (FLATCC_GNU_POSIX_MEMALIGN
+ "use posix_memalign on gnu systems also when C11 is configured" ON)
+
+# Only build the runtime library - mostly intended in combination with
+# FLATCC_INSTALL for cross compiling targets.
+option(FLATCC_RTONLY "enable build of runtime library only" OFF)
+
+# Use with or witout FLATCC_RTONLY to enable install targets.
+# Libraries are built statically by default, but can CMake's
+# cmake -DBUILD_SHARED_LIBS=on can override.
+option(FLATCC_INSTALL "enable install targets" OFF)
+
+# Use with debug build with testing enabled only. Enables generation
+# of coverage information during build and run. Adds target "coverage"
+# which collects data and makes HTML report in build directory
+option(FLATCC_COVERAGE "enable coverage" OFF)
+
+# Affects the flatbuffer verify operation. Normally a verify should just
+# quickly reject invalid buffers but for troubleshooting, assertions can
+# enabled. This requires rebuilding the runtime library and will likely
+# break test cases (those that tests that an invalid buffer is invalid).
+option (FLATCC_DEBUG_VERIFY
+ "assert on verify failure in runtime lib" OFF)
+
+# Print detailed traces of binary buffer contents when calling verify.
+option (FLATCC_TRACE_VERIFY
+ "assert on verify failure in runtime lib" OFF)
+
+# Reflection is the compilers ability to generate binary schema output
+# (.bfbs files). This requires using generated code from
+# `reflection.fbs`. During development it may not be possible to
+# compile with reflection enabled because it can become impossible to
+# fix broken builds. It may also be disabled simple because it isn't
+# needed.
+option (FLATCC_REFLECTION
+ "generation of binary flatbuffer schema files" ON)
+
+# FLATCC_NATIVE_OPTIM and FLATCC_FAST_DOUBLE affects json parsing,
+# especially if the content is pretty printed. But it is plenty
+# fast without these settings in most cases. Not recommended.
+option (FLATCC_NATIVE_OPTIM
+ "use machine native optimizations like SSE 4.2" OFF)
+
+# Fast grisu3 string/floating point conversion still depends on strtod
+# for about 1-2% of the conversions in order to produce an exact result.
+# By allowing a minor difference in the least significant bits, this
+# dependeny can be avoided, and speed improved. Some strtod
+# implementations call strlen which is really slow on large JSON
+# buffers, and catastrophic on buffers that are not zero-terminated -
+# regardless of size. Most platforms have a decent strtod these days.
+option (FLATCC_FAST_DOUBLE
+ "faster but slightly incorrect floating point parser (json)" OFF)
+
+# -Werror is only set for some compiler versions that are believed to
+# to not generate any warnings. If the assumption breaks, disable
+# this option if the warning is not significant.
+option (FLATCC_ALLOW_WERROR "allow -Werror to be configured" ON)
+
+# Experimental setting - sometimes the code branches on a constant
+# expression in order to select the best option for a given type size or
+# similar. Sometimes compilers don't like that. If this issue surfaces,
+# try using this option.
+option (FLATCC_IGNORE_CONST_COND "silence const condition warnings" OFF)
+
+if (FLATCC_RTONLY)
+ set(FLATCC_TEST off)
+endif()
+
+if (FLATCC_TEST)
+ enable_testing()
+endif()
+
+if (NOT FLATCC_TEST)
+ set(FLATCC_COVERAGE off)
+endif()
+
+if (NOT CMAKE_BUILD_TYPE MATCHES Debug)
+ set(FLATCC_COVERAGE off)
+endif()
+
+if (FLATCC_COVERAGE)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --coverage -DNDEBUG")
+endif()
+
+if (FLATCC_DEBUG_VERIFY)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DFLATCC_DEBUG_VERIFY=1")
+endif()
+
+if (FLATCC_TRACE_VERIFY)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DFLATCC_TRACE_VERIFY=1")
+endif()
+
+
+if (FLATCC_REFLECTION)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DFLATCC_REFLECTION=1")
+else()
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DFLATCC_REFLECTION=0")
+endif()
+
+
+if (FLATCC_NATIVE_OPTIM)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native -DFLATCC_USE_SSE4_2=1")
+endif()
+
+if (FLATCC_FAST_DOUBLE)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DGRISU3_PARSE_ALLOW_ERROR -DFLATCC_USE_GRISU3=1")
+endif()
+
+if (NOT DEFINED FLATCC_INSTALL_LIB)
+ set(lib_dir lib)
+else()
+ set(lib_dir ${FLATCC_INSTALL_LIB})
+endif()
+
+# The folder of this directory, as apposed to CMAKE_BINARY_DIR
+# which would usually be the build/Release and build/Debug paths
+set (dist_dir "${PROJECT_SOURCE_DIR}")
+# set (dist_dir "${CMAKE_BINARY_DIR}")
+
+message(STATUS "dist install dir ${dist_dir}")
+message(STATUS "lib install dir ${dist_dir}/${lib_dir}")
+
+# Note: for compiling generated C code, warnings of unused functions
+# and constants should be turned off - those are plentiful. They are
+# silenced for Clang, GCC and MSVC in generated headers.headers.
+
+if (CMAKE_C_COMPILER_ID MATCHES "Clang" AND NOT "${CMAKE_CXX_SIMULATE_ID}" STREQUAL "MSVC")
+ # Clang or AppleClang
+ message(STATUS "Setting Clang compiler options")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wstrict-prototypes")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wsign-conversion")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wconversion")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -pedantic -Wall -Wextra")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pedantic -Wall -Wextra")
+ # Fix broken C++ alignas - either will do
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+ #set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DPORTABLE_PATCH_CPLUSPLUS_STDALIGN")
+ if (FLATCC_ALLOW_WERROR)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror")
+ endif()
+ if (FLATCC_IGNORE_CONST_COND)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-tautological-constant-out-of-range-compare")
+ endif()
+ # Suppress warning relaxed in clang-6, see https://reviews.llvm.org/D28148
+ if (CMAKE_C_COMPILER_VERSION VERSION_LESS 6)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-missing-field-initializers")
+ endif()
+
+ # To get assembly output
+ # set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -save-temps")
+
+elseif (CMAKE_C_COMPILER_ID STREQUAL "GNU")
+ execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion
+ OUTPUT_VARIABLE GCC_VERSION)
+ if (GCC_VERSION VERSION_LESS 4.7)
+ message(STATUS "Setting older GNU C compiler options with FLATCC_PORTABLE")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra")
+ # We need stdalign.h
+ set(FLATCC_PORTABLE true)
+ # Disable C++ test for old compilers known to break due to
+ # missing stdalign.h and incomplete stdint.h which is not a
+ # priority to fix in portable library for C++ use case.
+ # Note: we test the C compiler version not the C++ compiler
+ # version, but that is (hopefully) close enough.
+ if (FLATCC_CXX_TEST)
+ message(STATUS "Disabling C++ tests for GCC pre 4.7")
+ set(FLATCC_CXX_TEST false)
+ endif()
+ else()
+ message(STATUS "Setting GNU C compiler options with c11 and Posix")
+ if (GCC_VERSION VERSION_LESS 8.0)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -pedantic -Wall -Wextra")
+ elseif (NOT (GCC_VERSION VERSION_LESS 8.0))
+ # Disable some GCC checks:
+ # (warnings exist since 8.0, but are more aggressive in 9.0)
+ #
+ # -Wstringop-truncation:
+ # GCC 9 warns on truncated strncpy into char arrays in FlatBuffer
+ # structs, but these are valid as zero-paddded, not zero terminated.
+ #
+ # -Wno-format-overflow:
+ # GCC 9 warns on mistakenly assumed NULL string when
+ # printing from a required FlatBuffer string field.
+ #
+ message(STATUS "Disabling -pedantic for GCC >= 8.0")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -Wall -Wextra")
+ message(STATUS "Disabling GNU C compiler warnings: -Wstringop-truncation -Wno-format-overflow")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-stringop-truncation -Wno-format-overflow")
+ endif()
+ if (NOT (GCC_VERSION VERSION_LESS 11.0))
+ # Disable warning on misleading indentation it become more aggressive in 11.0
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-misleading-indentation")
+ endif()
+ if (FLATCC_GNU_POSIX_MEMALIGN)
+ # -std=c11 prevents detection of posix_memalign and aligned_alloc might be missing
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DPORTABLE_POSIX_MEMALIGN=1")
+ endif()
+ if (FLATCC_ALLOW_WERROR)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror")
+ endif()
+ endif()
+ if (FLATCC_IGNORE_CONST_COND)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-type-limits")
+ endif()
+
+ # Too aggressive, e.g. main() is not permitted and main with
+ # args then yields unused arg warning.
+ # set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wstrict-prototypes")
+
+ # In gcc 4.8 it is not possible to suppress this warning using
+ # #pragma GCC diagnostic ignored "-Wunused-function"
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function")
+ # set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-type-limits")
+
+ if (GCC_VERSION VERSION_LESS 4.8)
+ # -Wsign-conversion broken for GCC 4.7 conditional operator
+ else()
+ # Might be disabled if GCC keeps getting more agressive.
+ # Incorrectly warns on explicit char to uint32_t casts.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wsign-conversion")
+
+ # Too aggressive, warns on `x = x + 1;` or `n = -n;`.
+ # set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wconversion")
+ endif()
+
+elseif (CMAKE_C_COMPILER_ID STREQUAL "Intel")
+ message(STATUS "Setting Intel C compiler options")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -Wall -Wextra")
+elseif (MSVC) # using STREQUAL here conflicts with string interpretation changes in CMake
+ message(STATUS "Setting MSVC C compiler options")
+ # -DFLATCC_PORTABLE also required, but set earlier
+ # -W3 is the highest warning level that is reasonable.
+ # See include/flatcc/portable/pwarnings.h for disabled warnings.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -W3 -D_CRT_SECURE_NO_WARNINGS")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W3 -D_CRT_SECURE_NO_WARNINGS")
+ # MSVC 2013 (1800) supports inline variable declations
+ # while MSVC 2010 (1600) does not.
+ if (MSVC_VERSION STRLESS "1800")
+ # Disables monster sample build which uses C99 style variable decls.
+ set (FLATCC_NEED_C89_VAR_DECLS true)
+ endif()
+ set(FLATCC_PORTABLE true)
+ elseif (CMAKE_C_COMPILER_ID STREQUAL "XL")
+ # IBM's native XLC C compiler in extended C99 mode
+
+ message(STATUS "Setting IBM XL C compiler options")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -qlanglvl=extc99")
+else()
+ # Best effort
+ message(STATUS "Best effort settings for compiler: ${CMAKE_C_COMPILER_ID}")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall")
+ set(FLATCC_PORTABLE true)
+endif()
+
+if (FLATCC_PORTABLE)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DFLATCC_PORTABLE")
+endif()
+
+if (CLANG_VERSION)
+ message(STATUS "CLANG_VERSION: ${CLANG_VERSION}")
+endif()
+if (GCC_VERSION)
+ message(STATUS "GCC_VERSION: ${GCC_VERSION}")
+endif()
+message(STATUS "Configured C_FLAGS: ${CMAKE_C_FLAGS}")
+
+set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/${lib_dir})
+
+set(CMAKE_DEBUG_POSTFIX "_d")
+
+if (CMAKE_BUILD_TYPE MATCHES "Debug")
+ set(CMAKE_EXECUTABLE_SUFFIX "_d${CMAKE_EXECUTABLE_SUFFIX}")
+endif()
+
+
+if (FLATCC_RTONLY)
+ # The targets we copy to bin and lib directories, i.e. not tests.
+ set(dist_targets
+ flatccrt
+ )
+ add_subdirectory(src/runtime)
+else()
+ # The targets we copy to bin and lib directories, i.e. not tests.
+ set(dist_targets
+ flatcc
+ flatccrt
+ flatcc_cli
+ )
+ add_subdirectory(src/runtime)
+ add_subdirectory(src/compiler)
+ add_subdirectory(src/cli)
+endif()
+
+# disabled by FLATCC_RTONLY
+if (FLATCC_TEST)
+ add_subdirectory(test)
+ add_subdirectory(samples)
+endif()
+
+if (FLATCC_COVERAGE)
+ add_custom_target(coverage
+ COMMAND lcov --capture --directory src --output-file coverage.info
+ COMMAND genhtml coverage.info --output-directory coverage)
+endif()
+
+set_target_properties(${dist_targets}
+ PROPERTIES
+ ARCHIVE_OUTPUT_DIRECTORY "${dist_dir}/${lib_dir}"
+ LIBRARY_OUTPUT_DIRECTORY "${dist_dir}/${lib_dir}"
+ RUNTIME_OUTPUT_DIRECTORY "${dist_dir}/bin"
+)
+
+if (FLATCC_INSTALL)
+ install(DIRECTORY include/flatcc DESTINATION include)
+endif()
+
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..6cc59dc
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+
+By contributing to this project you agree to:
+
+- You are the original creater of the submission.
+- No third party has any claims to the submission or if any such claims exists,
+ they are under a compatible license and clearly documented as such.
+- You grant a non-exclusive copyright license as per section 2. of the
+ Apache 2.0 license.
+
+Section 2. of Apache 2.0 license for reference:
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..71a3cac
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2015 Mikkel F. Jørgensen, dvide.com
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000..85305c3
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,22 @@
+This file documents third party content that is not covered by the
+project license.
+
+Files in the `external` and `include/flatcc/portable` directories have
+separate licenses and copyrights. These have relaxed open source
+licenses - either MIT, BSD, Apache 2.0, or similar as documented in
+these folders.
+
+`include/flatcc/support` files are in part based on snippets posted on online
+forums and documented as such in the source.
+
+A minimal set of files have migrated from Googles `flatbuffers` project
+in the interest of interoperability: `reflection/reflection.fbs` and
+`test/monster_test/monster_test.{fbs,golden,json}`, and grammar syntax
+in the `doc` directory. These are covered by the Apache 2.0 license,
+copyright Google Inc.
+
+`test/benchmark/flatbench.fbs` and the test data set is based on Googles
+FlatBuffers benchmark code by authors permission.
+
+The name `flatcc` which non-coincidentally resembles the `flatc` tool
+in the Google FlatBuffers project is used with authors permission.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..95fe808
--- /dev/null
+++ b/README.md
@@ -0,0 +1,2620 @@
+Ubuntu, macOS and Windows: [![Build Status](https://github.com/dvidelabs/flatcc/actions/workflows/ci.yml/badge.svg)](https://github.com/dvidelabs/flatcc/actions/workflows/ci.yml)
+Windows: [![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/dvidelabs/flatcc?branch=master&svg=true)](https://ci.appveyor.com/project/dvidelabs/flatcc)
+Weekly: [![Build Status](https://github.com/dvidelabs/flatcc/actions/workflows/weekly.yml/badge.svg)](https://github.com/dvidelabs/flatcc/actions/workflows/weekly.yml)
+
+
+_The JSON parser may change the interface for parsing union vectors in a
+future release which requires code generation to match library
+versions._
+
+# FlatCC FlatBuffers in C for C
+
+`flatcc` has no external dependencies except for build and compiler
+tools, and the C runtime library. With concurrent Ninja builds, a small client
+project can build flatcc with libraries, generate schema code, link the project
+and execute a test case in a few seconds, produce binaries between 15K and 60K,
+read small buffers in 30ns, build FlatBuffers in about 600ns, and with a larger
+executable also handle optional json parsing or printing in less than 2 us for a
+10 field mixed type message.
+
+
+<!-- vim-markdown-toc GFM -->
+
+* [Online Forums](#online-forums)
+* [Introduction](#introduction)
+* [Project Details](#project-details)
+* [Poll on Meson Build](#poll-on-meson-build)
+* [Reporting Bugs](#reporting-bugs)
+* [Status](#status)
+ * [Main features supported as of 0.6.1](#main-features-supported-as-of-061)
+ * [Supported platforms (CI tested)](#supported-platforms-ci-tested)
+ * [Platforms reported to work by users](#platforms-reported-to-work-by-users)
+ * [Portability](#portability)
+* [Time / Space / Usability Tradeoff](#time--space--usability-tradeoff)
+* [Generated Files](#generated-files)
+ * [Use of Macros in Generated Code](#use-of-macros-in-generated-code)
+ * [Extracting Documentation](#extracting-documentation)
+* [Using flatcc](#using-flatcc)
+* [Trouble Shooting](#trouble-shooting)
+* [Quickstart](#quickstart)
+ * [Reading a Buffer](#reading-a-buffer)
+ * [Compiling for Read-Only](#compiling-for-read-only)
+ * [Building a Buffer](#building-a-buffer)
+ * [Verifying a Buffer](#verifying-a-buffer)
+ * [Potential Name Conflicts](#potential-name-conflicts)
+ * [Debugging a Buffer](#debugging-a-buffer)
+* [File and Type Identifiers](#file-and-type-identifiers)
+ * [File Identifiers](#file-identifiers)
+ * [Type Identifiers](#type-identifiers)
+* [JSON Parsing and Printing](#json-parsing-and-printing)
+ * [Base64 Encoding](#base64-encoding)
+ * [Fixed Length Arrays](#fixed-length-arrays)
+ * [Runtime Flags](#runtime-flags)
+ * [Generic Parsing and Printing.](#generic-parsing-and-printing)
+ * [Performance Notes](#performance-notes)
+* [Global Scope and Included Schema](#global-scope-and-included-schema)
+* [Required Fields and Duplicate Fields](#required-fields-and-duplicate-fields)
+* [Fast Buffers](#fast-buffers)
+* [Types](#types)
+* [Unions](#unions)
+ * [Union Scope Resolution](#union-scope-resolution)
+* [Fixed Length Arrays](#fixed-length-arrays-1)
+* [Optional Fields](#optional-fields)
+* [Endianness](#endianness)
+* [Pitfalls in Error Handling](#pitfalls-in-error-handling)
+* [Searching and Sorting](#searching-and-sorting)
+* [Null Values](#null-values)
+* [Portability Layer](#portability-layer)
+* [Building](#building)
+ * [Unix Build (OS-X, Linux, related)](#unix-build-os-x-linux-related)
+ * [Windows Build (MSVC)](#windows-build-msvc)
+ * [Docker](#docker)
+ * [Cross-compilation](#cross-compilation)
+ * [Custom Allocation](#custom-allocation)
+ * [Custom Asserts](#custom-asserts)
+ * [Shared Libraries](#shared-libraries)
+* [Distribution](#distribution)
+ * [Unix Files](#unix-files)
+ * [Windows Files](#windows-files)
+* [Running Tests on Unix](#running-tests-on-unix)
+* [Running Tests on Windows](#running-tests-on-windows)
+* [Configuration](#configuration)
+* [Using the Compiler and Builder library](#using-the-compiler-and-builder-library)
+* [FlatBuffers Binary Format](#flatbuffers-binary-format)
+* [Security Considerations](#security-considerations)
+* [Style Guide](#style-guide)
+* [Benchmarks](#benchmarks)
+
+<!-- vim-markdown-toc -->
+
+## Online Forums
+
+- [Google Groups - FlatBuffers](https://groups.google.com/forum/#!forum/flatbuffers)
+- [Discord - FlatBuffers](https://discord.gg/6qgKs3R)
+- [Gitter - FlatBuffers](https://gitter.im/google/flatbuffers)
+
+
+## Introduction
+
+This project builds flatcc, a compiler that generates FlatBuffers code for
+C given a FlatBuffer schema file. This introduction also creates a separate test
+project with the traditional monster example, here in a C version.
+
+For now assume a Unix like system although that is not a general requirement -
+see also [Building](#building). You will need git, cmake, bash, a C compiler,
+and either the ninja build system, or make.
+
+ git clone https://github.com/dvidelabs/flatcc.git
+ cd flatcc
+ # scripts/initbuild.sh ninja
+ scripts/initbuild.sh make
+ scripts/setup.sh -a ../mymonster
+ ls bin
+ ls lib
+ cd ../mymonster
+ ls src
+ scripts/build.sh
+ ls generated
+
+`scripts/initbuild.sh` is optional and chooses the build backend, which defaults
+to ninja.
+
+The setup script builds flatcc using CMake, then creates a test project
+directory with the monster example, and a build script which is just a small
+shell script. The headers and libraries are symbolically linked into the test
+project. You do not need CMake to build your own projects once flatcc is
+compiled.
+
+To create another test project named foobar, call `scripts/setup.sh -s -x
+../foobar`. This will avoid rebuilding the flatcc project from scratch.
+
+
+## Project Details
+
+NOTE: see
+[CHANGELOG](https://github.com/dvidelabs/flatcc/blob/master/CHANGELOG.md).
+There are occassionally minor breaking changes as API inconsistencies
+are discovered. Unless clearly stated, breaking changes will not affect
+the compiled runtime library, only the header files. In case of trouble,
+make sure the `flatcc` tool is same version as the `include/flatcc`
+path.
+
+The project includes:
+
+- an executable `flatcc` FlatBuffers schema compiler for C and a
+ corresponding library `libflatcc.a`. The compiler generates C header
+ files or a binary flatbuffers schema.
+- a typeless runtime library `libflatccrt.a` for building and verifying
+ flatbuffers from C. Generated builder headers depend on this library.
+ It may also be useful for other language interfaces. The library
+ maintains a stack state to make it easy to build buffers from a parser
+ or similar.
+- a small `flatcc/portable` header only library for non-C11 compliant
+ compilers, and small helpers for all compilers including endian
+ handling and numeric printing and parsing.
+
+
+See also:
+
+- [Reporting Bugs](https://github.com/dvidelabs/flatcc#reporting-bugs)
+
+- [Google FlatBuffers](http://google.github.io/flatbuffers/)
+
+- [Build Instructions](https://github.com/dvidelabs/flatcc#building)
+
+- [Quickstart](https://github.com/dvidelabs/flatcc#quickstart)
+
+- [Builder Interface Reference]
+
+- [Benchmarks]
+
+The `flatcc` compiler is implemented as a standalone tool instead of
+extending Googles `flatc` compiler in order to have a pure portable C
+library implementation of the schema compiler that is designed to fail
+graciously on abusive input in long running processes. It is also
+believed a C version may help provide schema parsing to other language
+interfaces that find interfacing with C easier than C++. The FlatBuffers
+team at Googles FPL lab has been very helpful in providing feedback and
+answering many questions to help ensure the best possible compatibility.
+Notice the name `flatcc` (FlatBuffers C Compiler) vs Googles `flatc`.
+
+The JSON format is compatible with Googles `flatc` tool. The `flatc`
+tool converts JSON from the command line using a schema and a buffer as
+input. `flatcc` generates schema specific code to read and write JSON
+at runtime. While the `flatcc` approach is likely much faster and also
+easier to deploy, the `flatc` approach is likely more convenient when
+manually working with JSON such as editing game scenes. Both tools have
+their place.
+
+**NOTE: Big-endian platforms are only supported as of release 0.4.0.**
+
+
+## Poll on Meson Build
+
+It is being considered adding support for the Meson build system, but it
+would be good with some feedback on this via
+[issue #56](https://github.com/dvidelabs/flatcc/issues/56)
+
+
+## Reporting Bugs
+
+If possible, please provide a short reproducible schema and source file
+with a main program the returns 1 on error and 0 on success and a small
+build script. Preferably generate a hexdump and call the buffer verifier
+to ensure the input is valid and link with the debug library
+`flatccrt_d`.
+
+See also [Debugging a Buffer](#debugging-a-buffer), and [readfile.h]
+useful for reading an existing buffer for verification.
+
+Example:
+
+[samples/bugreport](samples/bugreport)
+
+eclectic.fbs :
+
+```c
+namespace Eclectic;
+
+enum Fruit : byte { Banana = -1, Orange = 42 }
+table FooBar {
+ meal : Fruit = Banana;
+ density : long (deprecated);
+ say : string;
+ height : short;
+}
+file_identifier "NOOB";
+root_type FooBar;
+```
+
+myissue.c :
+
+```c
+/* Minimal test with all headers generated into a single file. */
+#include "build/myissue_generated.h"
+#include "flatcc/support/hexdump.h"
+
+int main(int argc, char *argv[])
+{
+ int ret;
+ void *buf;
+ size_t size;
+ flatcc_builder_t builder, *B;
+
+ (void)argc;
+ (void)argv;
+
+ B = &builder;
+ flatcc_builder_init(B);
+
+ Eclectic_FooBar_start_as_root(B);
+ Eclectic_FooBar_say_create_str(B, "hello");
+ Eclectic_FooBar_meal_add(B, Eclectic_Fruit_Orange);
+ Eclectic_FooBar_height_add(B, -8000);
+ Eclectic_FooBar_end_as_root(B);
+ buf = flatcc_builder_get_direct_buffer(B, &size);
+#if defined(PROVOKE_ERROR) || 0
+ /* Provoke error for testing. */
+ ((char*)buf)[0] = 42;
+#endif
+ ret = Eclectic_FooBar_verify_as_root(buf, size);
+ if (ret) {
+ hexdump("Eclectic.FooBar buffer for myissue", buf, size, stdout);
+ printf("could not verify Electic.FooBar table, got %s\n", flatcc_verify_error_string(ret));
+ }
+ flatcc_builder_clear(B);
+ return ret;
+}
+```
+build.sh :
+```sh
+#!/bin/sh
+cd $(dirname $0)
+
+FLATBUFFERS_DIR=../..
+NAME=myissue
+SCHEMA=eclectic.fbs
+OUT=build
+
+FLATCC_EXE=$FLATBUFFERS_DIR/bin/flatcc
+FLATCC_INCLUDE=$FLATBUFFERS_DIR/include
+FLATCC_LIB=$FLATBUFFERS_DIR/lib
+
+mkdir -p $OUT
+$FLATCC_EXE --outfile $OUT/${NAME}_generated.h -a $SCHEMA || exit 1
+cc -I$FLATCC_INCLUDE -g -o $OUT/$NAME $NAME.c -L$FLATCC_LIB -lflatccrt_d || exit 1
+echo "running $OUT/$NAME"
+if $OUT/$NAME; then
+ echo "success"
+else
+ echo "failed"
+ exit 1
+fi
+```
+
+## Status
+
+Release 0.6.1 contains primarily bug fixes and numerous contributions
+from the community to handle platform edge cases. Additionally,
+pendantic GCC warnings are disabled, relying instead on clang, since GCC
+is too aggressive, breaks builds frequently and works against
+portability. An existing C++ test case ensures that C code also works
+with common C++ compilers, but it can break some environments, so there
+is now a flag to disable that test without disabling all tests. Support
+for Optional Scalar Values in the FlatBuffer format has been added.
+There is also improved support for abstracting memory allocation on
+various platforms. `<table>_identifier` has been deprecated in favor
+`<table>_file_identifier` in generated code due to `identifier` easily
+leading to name conflicts. `file_extension` constant in generated code
+is now without prefixed dot (.).
+
+Release 0.6.0 introduces a "primary" attribute to be used together with
+a key attribute to chose default key for finding and sorting. If primary
+is absent, the key with the lowest id becomes primary. Tables and
+vectors can now be sorted recursively on primary keys. BREAKING:
+previously the first listed, not the lowest id, would be the primary
+key. Also introduces fixed length scalar arrays in struct fields (struct
+and enum elements are not supported). Structs support fixed length array
+fields, including char arrays. Empty structs never fully worked and are
+no longer supported, they are also no longer supported by flatc.
+NOTE: char arrays are not currently part of Googles flatc compiler -
+int8 arrays may be used instead. BREAKING: empty structs are no longer
+supported - they are also not valid in Googles flatc compiler. See
+CHANGELOG for additional changes. DEPRECATED: low-level `cast_to/from`
+from functions in `flatcc_accessors.h` will be removed in favor of
+`read/write_from/to` because the cast interface breaks float conversion
+on some uncommon platforms. This should not affect normal use but
+remains valid in this release.
+
+Release 0.5.3 inlcudes various bug fixes (see changelog) and one
+breaking but likely low impact change: BREAKING: 0.5.3 changes behavour
+of builder create calls so arguments are always ordered by field id when
+id attributes are being used, for example
+`MyGame_Example_Monster_create()` in `monster_test.fbs`
+([#81](https://github.com/dvidelabs/flatcc/issues/81)). Fixes undefined
+behavior when sorting tables by a numeric key field.
+
+Release 0.5.2 introduces optional `_get` suffix to reader methods. By
+using `flatcc -g` only `_get` methods are valid. This removes potential
+name conficts for some field names. 0.5.2 also introduces the long
+awaited clone operation for tables and vectors. A C++ smoketest was
+added to reduce the number void pointer assignment errors that kept
+sneaking in. The runtime library now needs an extra file `refmap.c`.
+
+Release 0.5.1 fixes a buffer overrun in the JSON printer and improves
+the portable libraries <stdalign.h> compatibility with C++ and the
+embedded `newlib` standard library. JSON printing and parsing has been
+made more consistent to help parse and print tables other than the
+schema root as seen in the test driver in [test_json.c]. The
+[monster_test.fbs] file has been reorganized to keep the Monster table
+more consistent with Googles flatc version and a minor schema namespace
+inconsistency has been resolved as a result. Explicit references to
+portable headers have been moved out of generated source. extern "C" C++
+guards added around generated headers. 0.5.1 also cleaned up the
+low-level union interface so the terms { type, value } are used
+consistently over { type, member } and { types, members }.
+
+
+### Main features supported as of 0.6.1
+
+- generated FlatBuffers reader and builder headers for C
+- generated FlatBuffers verifier headers for C
+- generated FlatBuffers JSON parser and printer for C
+- ability to concatenate all output into one file, or to stdout
+- robust dependency file generation for build systems
+- binary schema (.bfbs) generation
+- pre-generated reflection headers for handling .bfbs files
+- cli schema compiler and library for compiling schema
+- runtime library for builder, verifier and JSON support
+- thorough test cases
+- monster sample project
+- fast build times
+- support for big endian platforms (as of 0.4.0)
+- support for big endian encoded flatbuffers on both le and be platforms. Enabled on `be` branch.
+- size prefixed buffers - see also [Builder Interface Reference]
+- flexible configuration of malloc alternatives and runtime
+ aligned_alloc/free support in builder library.
+- feature parity with C++ FlatBuffers schema features added in 2017
+ adding support for union vectors and mixed type unions of strings,
+ structs, and tables, and type aliases for uint8, ..., float64.
+- base64(url) encoded binary data in JSON.
+- sort fields by primary key (as of 0.6.0)
+- char arrays (as of 0.6.0)
+- optional scalar values (as of 0.6.1)
+
+There are no plans to make frequent updates once the project becomes
+stable, but input from the community will always be welcome and included
+in releases where relevant, especially with respect to testing on
+different target platforms.
+
+
+### Supported platforms (CI tested)
+
+The ci-more branch tests additional compilers:
+
+- Ubuntu Trusty gcc 4.4, 4.6-4.9, 5, 6, 7 and clang 3.6, 3.8
+- OS-X current clang / gcc
+- Windows MSVC 2010, 2013, 2015, 2015 Win64, 2017, 2017 Win64
+- C++11/C++14 user code on the above platforms.
+
+C11/C++11 is the reference that is expected to always work.
+
+The GCC `--pedantic` compiler option is not supported as of GCC-8+
+because it forces non-portable code changes and because it tends to
+break the code base with each new GCC release.
+
+MSVC 2017 is not always tested because the CI environment then won't
+support MSVC 2010.
+
+Older/non-standard versions of C++ compilers cause problems because
+`static_assert` and `alignas` behave in strange ways where they are
+neither absent nor fully working as expected. There are often
+workarounds, but it is more reliable to use `-std=c++11` or
+`-std=c++14`.
+
+The portably library does not support GCC C++ pre 4.7 because the
+portable library does not work around C++ limitations in stdalign.h and
+stdint.h before GCC 4.7. This could be fixed but is not a priority.
+
+Some previously testet compiler versions may have been retired as the
+CI environment gets updated. See `.travis.yml` and `appveyor.yml` in
+the `ci-more` branch for the current configuration.
+
+The monster sample does not work with MSVC 2010 because it intentionally
+uses C99 style code to better follow the C++ version.
+
+The build option `FLATCC_TEST` can be used to disable all tests which
+might make flatcc compile on platforms that are otherwise problematic.
+The buld option `FLATCC_CXX_TEST` can be disabled specifically for C++
+tests (a simple C++ file that includes generated C code).
+
+### Platforms reported to work by users
+
+- ESP32 SoC SDK with FreeRTOS and newlib has been reported to compile
+ cleanly with C++ 14 using flatcc generated JSON parsers, as of flatcc
+ 0.5.1.
+- FreeRTOS when using custom memory allocation methods.
+- Arduino (at least reading buffers)
+- IBM XLC on AIX big endian Power PC has been tested for release 0.4.0
+ but is not part of regular release tests.
+
+### Portability
+
+There is no reason why other or older compilers cannot be supported, but
+it may require some work in the build configuration and possibly
+updates to the portable library. The above is simply what has been
+tested and configured.
+
+The portability layer has some features that are generally important for
+things like endian handling, and others to provide compatibility for
+optional and missing C11 features. Together this should support most C
+compilers around, but relies on community feedback for maturity.
+
+The necessary size of the runtime include files can be reduced
+significantly by using -std=c11 and avoiding JSON (which needs a lot of
+numeric parsing support), and by removing `include/flatcc/reflection`
+which is present to support handling of binary schema files and can be
+generated from `reflection/reflection.fbs`, and removing
+`include/flatcc/support` which is only used for tests and samples. The
+exact set of required files may change from release to release, and it
+doesn't really matter with respect to the compiled code size.
+
+
+## Time / Space / Usability Tradeoff
+
+The priority has been to design an easy to use C builder interface that
+is reasonably fast, suitable for both servers and embedded devices, but
+with usability over absolute performance - still the small buffer output
+rate is measured in millons per second and read access 10-100 millon
+buffers per second from a rough estimate. Reading FlatBuffers is more
+than an order of magnitude faster than building them.
+
+For 100MB buffers with 1000 monsters, dynamically extended monster
+names, monster vector, and inventory vector, the bandwidth reaches about
+2.2GB/s and 45ms/buffer on 2.2GHz Haswell Core i7 CPU. This includes
+reading back and validating all data. Reading only a few key fields
+increases bandwidth to 2.7GB/s and 37ms/op. For 10MB buffers bandwidth
+may be higher but eventually smaller buffers will be hit by call
+overhead and thus we get down to 300MB/s at about 150ns/op encoding
+small buffers. These numbers are just a rough guideline - they obviously
+depend on hardware, compiler, and data encoded. Measurements are
+excluding an initial warmup step.
+
+The generated JSON parsers are roughly 4 times slower than building a
+FlatBuffer directly in C or C++, or about 2200ns vs 600ns for a 700 byte
+JSON message. JSON parsing is thus roughly two orders of magnitude faster
+than reading the equivalent Protocol Buffer, as reported on the [Google
+FlatBuffers
+Benchmarks](http://google.github.io/flatbuffers/flatbuffers_benchmarks.html)
+page. LZ4 compression would estimated double the overall processing time
+of JSON parsing. JSON printing is faster than parsing but not very
+significantly so. JSON compresses to roughly half the size of compressed
+FlatBuffers on large buffers, but compresses worse on small buffers (not
+to mention when not compressing at all).
+
+It should be noted that FlatBuffer read performance exclude verification
+which JSON parsers and Protocol Buffers inherently include by their
+nature. Verification has not been benchmarked, but would presumably add
+less than 50% read overhead unless only a fraction of a large buffer is to
+be read.
+
+See also [Benchmarks].
+
+The client C code can avoid almost any kind of allocation to build
+buffers as a builder stack provides an extensible arena before
+committing objects - for example appending strings or vectors piecemeal.
+The stack is mostly bypassed when a complete object can be constructed
+directly such as a vector from integer array on little endian platforms.
+
+The reader interface should be pretty fast as is with less room for
+improvement performance wise. It is also much simpler than the builder.
+
+Usability has also been prioritized over smallest possible generated
+source code and compile time. It shouldn't affect the compiled size
+by much.
+
+The compiled binary output should be reasonably small for everything but
+the most restrictive microcontrollers. A 33K monster source test file
+(in addition to the generated headers and the builder library) results
+in a less than 50K optimized binary executable file including overhead
+for printf statements and other support logic, or a 30K object file
+excluding the builder library.
+
+Read-only binaries are smaller but not necessarily much smaller than
+builders considering they do less work: The compatibility test reads a
+pre-generated binary `monsterdata_test.golden` monster file and verifies
+that all content is as expected. This results in a 13K optimized binary
+executable or a 6K object file. The source for this check is 5K
+excluding header files. Readers do not need to link with a library.
+
+JSON parsers bloat the compiled C binary compared to pure Flatbuffer
+usage because they inline the parser decision tree. A JSON parser for
+monster.fbs may add 100K +/- optimization settings to the executable
+binary.
+
+
+## Generated Files
+
+The generated code for building flatbuffers,
+and for parsing and printing flatbuffers, all need access to
+`include/flatcc`. The reader does no rely on any library but all other
+generated files rely on the `libflatccrt.a` runtime library. Note that
+`libflatcc.a` is only required if the flatcc compiler itself is required
+as a library.
+
+The reader and builder rely on generated common reader and builder
+header files. These common file makes it possible to change the global
+namespace and redefine basic types (`uoffset_t` etc.). In the future
+this _might_ move into library code and use macros for these
+abstractions and eventually have a set of predefined files for types
+beyond the standard 32-bit unsigned offset (`uoffset_t`). The runtime
+library is specific to one set of type definitions.
+
+Refer to [monster_test.c] and the generated files for detailed guidance
+on use. The monster schema used in this project is a slight adaptation
+to the original to test some additional edge cases.
+
+For building flatbuffers a separate builder header file is generated per
+schema. It requires a `flatbuffers_common_builder.h` file also generated
+by the compiler and a small runtime library `libflatccrt.a`. It is
+because of this requirement that the reader and builder generated code
+is kept separate. Typical uses can be seen in the [monster_test.c] file.
+The builder allows for repeated pushing of content to a vector or a
+string while a containing table is being updated which simplifies
+parsing of external formats. It is also possible to build nested buffers
+in-line - at first this may sound excessive but it is useful when
+wrapping a union of buffers in a network interface and it ensures proper
+alignment of all buffer levels.
+
+For verifying flatbuffers, a `myschema_verifier.h` is generated. It
+depends on the runtime library and the reader header.
+
+Json parsers and printers generate one file per schema file and included
+schema will have their own parsers and printers which including parsers
+and printers will depend upon, rather similar to how builders work.
+
+Low level note: the builder generates all vtables at the end of the
+buffer instead of ad-hoc in front of each table but otherwise does the
+same deduplication of vtables. This makes it possible to cluster vtables
+in hot cache or to make sure all vtables are available when partially
+transmitting a buffer. This behavior can be disabled by a runtime flag.
+
+Because some use cases may include very constrained embedded devices,
+the builder library can be customized with an allocator object and a
+buffer emitter object. The separate emitter ensures a buffer can be
+constructed without requiring a full buffer to be present in memory at
+once, if so desired.
+
+The typeless builder library is documented in [flatcc_builder.h] and
+[flatcc_emitter.h] while the generated typed builder api for C is
+documented in [Builder Interface Reference].
+
+
+### Use of Macros in Generated Code
+
+Occasionally a concern is raised about the dense nature of the macros
+used in the generated code. These macros make it difficult to understand
+which functions are actually available. The [Builder Interface Reference]
+attempts to document the operations in general fashion. To get more
+detailed information, generated function prototypes can be extracted
+with the `scripts/flatcc-doc.sh` script.
+
+Some are also concerned with macros being "unsafe". Macros are not
+unsafe when used with FlatCC because they generate static or static
+inline functions. These will trigger compile time errors if used
+incorrectly to the same extend that they would in direct C code.
+
+The expansion compresses the generated output by more than a factor 10
+ensuring that code under source control does not explode and making it
+possible to compare versions of generated code in a meaningful manner
+and see if it matches the intended schema. The macros are also important
+for dealing with platform abstractions via the portable headers.
+
+Still, it is possible to see the generated output although not supported
+directly by the build system. As an example,
+`include/flatcc/reflection` contains pre-generated header files for the
+reflection schema. To see the expanded output using the `clang` compiler
+tool chain, run:
+
+ clang -E -DNDEBUG -I include \
+ include/flatcc/reflection/reflection_reader.h | \
+ clang-format
+
+Other similar commands are likely available on platforms not supporting
+clang.
+
+Note that the compiler will optimize out nearly all of the generated
+code and only use the logic actually referenced by end-user code because
+the functions are static or static inline. The remaining parts generally
+inline efficiently into the application code resulting in a reasonably
+small binary code size.
+
+More details can be found in
+[#88](https://github.com/dvidelabs/flatcc/issues/88)
+
+
+### Extracting Documentation
+
+The expansion of generated code can be used to get documentation for
+a specific object type.
+
+The following script automates this process:
+
+ scripts/flatcc-doc.sh <schema-file> <name-prefix> [<outdir>]
+
+writing function prototypes to `<outdir>/<name-prefix>.doc`.
+
+Note that the script requires the clang compiler and the clang-format
+tool, but the script could likely be adapted for other tool chains as well.
+
+The principle behind the script can be illustrated using the reflection
+schema as an example, where documentation for the Object table is
+extracted:
+
+ bin/flatcc reflection/reflection.fbs -a --json --stdout | \
+ clang - -E -DNDEBUG -I include | \
+ clang-format -style="WebKit" | \
+ grep "^static.* reflection_Object_\w*(" | \
+ cut -f 1 -d '{' | \
+ grep -v deprecated | \
+ grep -v ");" | \
+ sed 's/__tmp//g' | \
+ sed 's/)/);/g'
+
+The WebKit style of clang-format ensures that parameters and the return
+type are all placed on the same line. Grep extracts the function headers
+and cut strips function bodies starting on the same line. Sed strips
+`__tmp` suffix from parameter names used to avoid macro name conflicts.
+Grep strips `);` to remove redundant forward declarations and sed then
+adds ; to make each line a valid C prototype.
+
+The above is not guaranteed to always work as output may change, but it
+should go a long way.
+
+A small extract of the output, as of flatcc-v0.5.2
+
+ static inline size_t reflection_Object_vec_len(reflection_Object_vec_t vec);
+ static inline reflection_Object_table_t reflection_Object_vec_at(reflection_Object_vec_t vec, size_t i);
+ static inline reflection_Object_table_t reflection_Object_as_root_with_identifier(const void* buffer, const char* fid);
+ static inline reflection_Object_table_t reflection_Object_as_root_with_type_hash(const void* buffer, flatbuffers_thash_t thash);
+ static inline reflection_Object_table_t reflection_Object_as_root(const void* buffer);
+ static inline reflection_Object_table_t reflection_Object_as_typed_root(const void* buffer);
+ static inline flatbuffers_string_t reflection_Object_name_get(reflection_Object_table_t t);
+ static inline flatbuffers_string_t reflection_Object_name(reflection_Object_table_t t);
+ static inline int reflection_Object_name_is_present(reflection_Object_table_t t);
+ static inline size_t reflection_Object_vec_scan_by_name(reflection_Object_vec_t vec, const char* s);
+ static inline size_t reflection_Object_vec_scan_n_by_name(reflection_Object_vec_t vec, const char* s, int n);
+ ...
+
+
+Examples are provided in following script using the reflection and monster schema:
+
+ scripts/reflection-doc-example.sh
+ scripts/monster-doc-example.sh
+
+The monster doc example essentially calls:
+
+ scripts/flatcc-doc.sh samples/monster/monster.fbs MyGame_Sample_Monster_
+
+resulting in the file `MyGame_Sample_Monster_.doc`:
+
+ static inline size_t MyGame_Sample_Monster_vec_len(MyGame_Sample_Monster_vec_t vec);
+ static inline MyGame_Sample_Monster_table_t MyGame_Sample_Monster_vec_at(MyGame_Sample_Monster_vec_t vec, size_t i);
+ static inline MyGame_Sample_Monster_table_t MyGame_Sample_Monster_as_root_with_identifier(const void* buffer, const char* fid);
+ static inline MyGame_Sample_Monster_table_t MyGame_Sample_Monster_as_root_with_type_hash(const void* buffer, flatbuffers_thash_t thash);
+ static inline MyGame_Sample_Monster_table_t MyGame_Sample_Monster_as_root(const void* buffer);
+ static inline MyGame_Sample_Monster_table_t MyGame_Sample_Monster_as_typed_root(const void* buffer);
+ static inline MyGame_Sample_Vec3_struct_t MyGame_Sample_Monster_pos_get(MyGame_Sample_Monster_table_t t);
+ static inline MyGame_Sample_Vec3_struct_t MyGame_Sample_Monster_pos(MyGame_Sample_Monster_table_t t);
+ static inline int MyGame_Sample_Monster_pos_is_present(MyGame_Sample_Monster_table_t t);
+ static inline int16_t MyGame_Sample_Monster_mana_get(MyGame_Sample_Monster_table_t t);
+ static inline int16_t MyGame_Sample_Monster_mana(MyGame_Sample_Monster_table_t t);
+ static inline const int16_t* MyGame_Sample_Monster_mana_get_ptr(MyGame_Sample_Monster_table_t t);
+ static inline int MyGame_Sample_Monster_mana_is_present(MyGame_Sample_Monster_table_t t);
+ static inline size_t MyGame_Sample_Monster_vec_scan_by_mana(MyGame_Sample_Monster_vec_t vec, int16_t key);
+ static inline size_t MyGame_Sample_Monster_vec_scan_ex_by_mana(MyGame_Sample_Monster_vec_t vec, size_t begin, size_t end, int16_t key);
+ ...
+
+
+FlatBuffer native types can also be extracted, for example string operations:
+
+ scripts/flatcc-doc.sh samples/monster/monster.fbs flatbuffers_string_
+
+resulting in `flatbuffers_string_.doc`:
+
+ static inline size_t flatbuffers_string_len(flatbuffers_string_t s);
+ static inline size_t flatbuffers_string_vec_len(flatbuffers_string_vec_t vec);
+ static inline flatbuffers_string_t flatbuffers_string_vec_at(flatbuffers_string_vec_t vec, size_t i);
+ static inline flatbuffers_string_t flatbuffers_string_cast_from_generic(const flatbuffers_generic_t p);
+ static inline flatbuffers_string_t flatbuffers_string_cast_from_union(const flatbuffers_union_t u);
+ static inline size_t flatbuffers_string_vec_find(flatbuffers_string_vec_t vec, const char* s);
+ static inline size_t flatbuffers_string_vec_find_n(flatbuffers_string_vec_t vec, const char* s, size_t n);
+ static inline size_t flatbuffers_string_vec_scan(flatbuffers_string_vec_t vec, const char* s);
+ static inline size_t flatbuffers_string_vec_scan_n(flatbuffers_string_vec_t vec, const char* s, size_t n);
+ static inline size_t flatbuffers_string_vec_scan_ex(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char* s);
+ ...
+
+## Using flatcc
+
+Refer to `flatcc -h` for details.
+
+An online version listed here: [flatcc-help.md] but please use `flatcc
+-h` for an up to date reference.
+
+
+The compiler can either generate a single header file or headers for all
+included schema and a common file and with or without support for both
+reading (default) and writing (-w) flatbuffers. The simplest option is
+to use (-a) for all and include the `myschema_builder.h` file.
+
+The (-a) or (-v) also generates a verifier file.
+
+Make sure `flatcc` under the `include` folder is visible in the C
+compilers include path when compiling flatbuffer builders.
+
+The `flatcc` (-I) include path will assume all schema files with same
+base name (case insentive) are identical and will only include the
+first. All generated files use the input basename and will land in
+working directory or the path set by (-o).
+
+Files can be generated to stdout using (--stdout). C headers will be
+ordered and concatenated, but are otherwise identical to the separate
+file output. Each include statement is guarded so this will not lead to
+missing include files.
+
+The generated code, especially with all combined with --stdout, may
+appear large, but only the parts actually used will take space up the
+the final executable or object file. Modern compilers inline and include
+only necessary parts of the statically linked builder library.
+
+JSON printer and parser can be generated using the --json flag or
+--json-printer or json-parser if only one of them is required. There are
+some certain runtime library compile time flags that can optimize out
+printing symbolic enums, but these can also be disabled at runtime.
+
+## Trouble Shooting
+
+Make sure to link with `libflatccrt` (rt for runtime) and not `libflatcc` (the schema compiler), otherwise the builder will not be available. Also make sure to have the 'include' of the flatcc project root in the include path.
+
+Flatcc will by default expect a `file_identifier` in the buffer when reading or
+verifying a buffer.
+
+A buffer can have an unexpected 4-byte identifier at offset 4, or the identifier
+might be absent.
+
+Not all language interfaces support file identifiers in buffers, and if they do, they might not do so in an older version. Users have reported problems with both Python and Lua interfaces but this is easily resolved.
+
+Check the return value of the verifier:
+
+ int ret;
+ char *s;
+
+ ret = MyTable_verify_as_root(buf, size);
+ if (ret) {
+ s = flatcc_verify_error_string(ret);
+ printf("buffer failed: %s\n", s);
+ }
+
+To verify a buffer with no identifier, or to ignore a different identifier,
+use the `_with_identifier` version of the verifier with a null identifier:
+
+ char *identifier = 0;
+
+ MyTable_verify_as_root_with_identifier(buf, size, identifier);
+
+To read a buffer use:
+
+ MyTable_as_root_with_identifier(buf, 0);
+
+And to build a buffer without an identifier use:
+
+ MyTable_start_as_root_with_identifier(builder, 0);
+ ...
+ MyTable_end_as_root_with_identifier(builder, 0);
+
+Several other `as_root` calls have an `as_root_with_identifier` version,
+including JSON printing.
+
+## Quickstart
+
+After [building](https://github.com/dvidelabs/flatcc#building) the `flatcc tool`,
+binaries are located in the `bin` and `lib` directories under the
+`flatcc` source tree.
+
+You can either jump directly to the [monster
+example](https://github.com/dvidelabs/flatcc/tree/master/samples/monster)
+that follows
+[Googles FlatBuffers Tutorial](https://google.github.io/flatbuffers/flatbuffers_guide_tutorial.html), or you can read along the quickstart guide below. If you follow
+the monster tutorial, you may want to clone and build flatcc and copy
+the source to a separate project directory as follows:
+
+ git clone https://github.com/dvidelabs/flatcc.git
+ flatcc/scripts/setup.sh -a mymonster
+ cd mymonster
+ scripts/build.sh
+ build/mymonster
+
+`scripts/setup.sh` will as a minimum link the library and tool into a
+custom directory, here `mymonster`. With (-a) it also adds a simple
+build script, copies the example, and updates `.gitignore` - see
+`scripts/setup.sh -h`. Setup can also build flatcc, but you still have
+to ensure the build environment is configured for your system.
+
+To write your own schema files please follow the main FlatBuffers
+project documentation on [writing schema
+files](https://google.github.io/flatbuffers/flatbuffers_guide_writing_schema.html).
+
+The [Builder Interface Reference] may be useful after studying the
+monster sample and quickstart below.
+
+When looking for advanced examples such as sorting vectors and finding
+elements by a key, you should find these in the
+[`test/monster_test`](https://github.com/dvidelabs/flatcc/tree/master/test/monster_test) project.
+
+The following quickstart guide is a broad simplification of the
+`test/monster_test` project - note that the schema is slightly different
+from the tutorial. Focus is on the C specific framework rather
+than general FlatBuffers concepts.
+
+You can still use the setup tool to create an empty project and
+follow along, but there are no assumptions about that in the text below.
+
+### Reading a Buffer
+
+Here we provide a quick example of read-only access to Monster flatbuffer -
+it is an adapted extract of the [monster_test.c] file.
+
+First we compile the schema read-only with common (-c) support header and we
+add the recursion because [monster_test.fbs] includes other files.
+
+ flatcc -cr --reader test/monster_test/monster_test.fbs
+
+For simplicity we assume you build an example project in the project
+root folder, but in praxis you would want to change some paths, for
+example:
+
+ mkdir -p build/example
+ flatcc -cr --reader -o build/example test/monster_test/monster_test.fbs
+ cd build/example
+
+We get:
+
+ flatbuffers_common_reader.h
+ include_test1_reader.h
+ include_test2_reader.h
+ monster_test_reader.h
+
+(There is also the simpler `samples/monster/monster.fbs` but then you won't get
+included schema files).
+
+Namespaces can be long so we optionally use a macro to manage this.
+
+ #include "monster_test_reader.h"
+
+ #undef ns
+ #define ns(x) FLATBUFFERS_WRAP_NAMESPACE(MyGame_Example, x)
+
+ int verify_monster(void *buffer)
+ {
+ ns(Monster_table_t) monster;
+ /* This is a read-only reference to a flatbuffer encoded struct. */
+ ns(Vec3_struct_t) vec;
+ flatbuffers_string_t name;
+ size_t offset;
+
+ if (!(monster = ns(Monster_as_root(buffer)))) {
+ printf("Monster not available\n");
+ return -1;
+ }
+ if (ns(Monster_hp(monster)) != 80) {
+ printf("Health points are not as expected\n");
+ return -1;
+ }
+ if (!(vec = ns(Monster_pos(monster)))) {
+ printf("Position is absent\n");
+ return -1;
+ }
+
+ /* -3.2f is actually -3.20000005 and not -3.2 due to representation loss. */
+ if (ns(Vec3_z(vec)) != -3.2f) {
+ printf("Position failing on z coordinate\n");
+ return -1;
+ }
+
+ /* Verify force_align relative to buffer start. */
+ offset = (char *)vec - (char *)buffer;
+ if (offset & 15) {
+ printf("Force align of Vec3 struct not correct\n");
+ return -1;
+ }
+
+ /*
+ * If we retrieved the buffer using `flatcc_builder_finalize_aligned_buffer` or
+ * `flatcc_builder_get_direct_buffer` the struct should also
+ * be aligned without subtracting the buffer.
+ */
+ if (vec & 15) {
+ printf("warning: buffer not aligned in memory\n");
+ }
+
+ /* ... */
+ return 0;
+ }
+ /* main() {...} */
+
+
+### Compiling for Read-Only
+
+Assuming our above file is `monster_example.c` the following are a few
+ways to compile the project for read-only - compilation with runtime
+library is shown later on.
+
+ cc -I include monster_example.c -o monster_example
+
+ cc -std=c11 -I include monster_example.c -o monster_example
+
+ cc -D FLATCC_PORTABLE -I include monster_example.c -o monster_example
+
+The include path or source path is likely different. Some files in
+`include/flatcc/portable` are always used, but the `-D FLATCC_PORTABLE`
+flag includes additional files to support compilers lacking c11
+features.
+
+NOTE: on some clang/gcc platforms it may be necessary to use -std=gnu99 or
+-std=gnu11 if the linker is unable find `posix_memalign`, see also comments in
+[paligned_alloc.h].
+
+
+### Building a Buffer
+
+Here we provide a very limited example of how to build a buffer - only a few
+fields are updated. Pleaser refer to [monster_test.c] and the doc directory
+for more information.
+
+First we must generate the files:
+
+ flatcc -a monster_test.fbs
+
+This produces:
+
+ flatbuffers_common_reader.h
+ flatbuffers_common_builder.h
+ include_test1_reader.h
+ include_test1_builder.h
+ include_test1_verifier.h
+ include_test2_reader.h
+ include_test2_builder.h
+ include_test2_verifier.h
+ monster_test_reader.h
+ monster_test_builder.h
+ monster_test_verifier.h
+
+Note: we wouldn't actually do the readonly generation shown earlier
+unless we only intend to read buffers - the builder generation always
+generates read acces too.
+
+By including `"monster_test_builder.h"` all other files are included
+automatically. The C compiler needs the `-I include` directive to access
+`flatcc/flatcc_builder.h`, `flatcc/flatcc_verifier.h`, and other files
+depending on specifics, assuming the project root is the current
+directory.
+
+The verifiers are not required and just created because we lazily chose
+the -a option.
+
+The builder must be initialized first to set up the runtime environment
+we need for building buffers efficiently - the builder depends on an
+emitter object to construct the actual buffer - here we implicitly use
+the default. Once we have that, we can just consider the builder a
+handle and focus on the FlatBuffers generated API until we finalize the
+buffer (i.e. access the result). For non-trivial uses it is recommended
+to provide a custom emitter and for example emit pages over the network
+as soon as they complete rather than merging all pages into a single
+buffer using `flatcc_builder_finalize_buffer`, or the simplistic
+`flatcc_builder_get_direct_buffer` which returns null if the buffer is
+too large. See also documentation comments in [flatcc_builder.h] and
+[flatcc_emitter.h]. See also `flatc_builder_finalize_aligned_buffer` in
+`builder.h` and the [Builder Interface Reference] when malloc aligned
+buffers are insufficent.
+
+
+ #include "monster_test_builder.h"
+
+ /* See [monster_test.c] for more advanced examples. */
+ void build_monster(flatcc_builder_t *B)
+ {
+ ns(Vec3_t *vec);
+
+ /* Here we use a table, but structs can also be roots. */
+ ns(Monster_start_as_root(B));
+
+ ns(Monster_hp_add(B, 80));
+ /* The vec struct is zero-initalized. */
+ vec = ns(Monster_pos_start(B));
+ /* Native endian. */
+ vec->x = 1, vec->y = 2, vec->z = -3.2f;
+ /* _end call converts to protocol endian format - for LE it is a nop. */
+ ns(Monster_pos_end(B));
+
+ /* Name is required, or we get an assertion in debug builds. */
+ ns(Monster_name_create_str(B, "MyMonster"));
+
+ ns(Monster_end_as_root(B));
+ }
+
+ #include "flatcc/support/hexdump.h"
+
+ int main(int argc, char *argv[])
+ {
+ flatcc_builder_t builder;
+ void *buffer;
+ size_t size;
+
+ flatcc_builder_init(&builder);
+
+ build_monster(&builder);
+ /* We could also use `flatcc_builder_finalize_buffer` and free the buffer later. */
+ buffer = flatcc_builder_get_direct_buffer(&builder, &size);
+ assert(buffer);
+ verify_monster(buffer);
+
+ /* Visualize what we got ... */
+ hexdump("monster example", buffer, size, stdout);
+
+ /*
+ * Here we can call `flatcc_builder_reset(&builder) if
+ * we wish to build more buffers before deallocating
+ * internal memory with `flatcc_builder_clear`.
+ */
+
+ flatcc_builder_clear(&builder);
+ return 0;
+ }
+
+Compile the example project:
+
+ cc -std=c11 -I include monster_example.c lib/libflatccrt.a -o monster_example
+
+Note that the runtime library is required for building buffers, but not
+for reading them. If it is incovenient to distribute the runtime library
+for a given target, source files may be used instead. Each feature has
+its own source file, so not all runtime files are needed for building a
+buffer:
+
+ cc -std=c11 -I include monster_example.c \
+ src/runtime/emitter.c src/runtime/builder.c \
+ -o monster_example
+
+Other features such as the verifier and the JSON printer and parser
+would each need a different file in src/runtime. Which file should be
+obvious from the filenames except that JSON parsing also requires the
+builder and emitter source files.
+
+
+### Verifying a Buffer
+
+A buffer can be verified to ensure it does not contain any ranges that
+point outside the the given buffer size, that all data structures are
+aligned according to the flatbuffer principles, that strings are zero
+terminated, and that required fields are present.
+
+In the builder example above, we can apply a verifier to the output:
+
+ #include "monster_test_builder.h"
+ #include "monster_test_verifier.h"
+ int ret;
+ ...
+ ... finalize
+ if ((ret = ns(Monster_verify_as_root_with_identifier(buffer, size,
+ "MONS")))) {
+ printf("Monster buffer is invalid: %s\n",
+ flatcc_verify_error_string(ret));
+ }
+
+The [readfile.h] utility may also be helpful in reading an existing
+buffer for verification.
+
+Flatbuffers can optionally leave out the identifier, here "MONS". Use a
+null pointer as identifier argument to ignore any existing identifiers
+and allow for missing identifiers.
+
+Nested flatbuffers are always verified with a null identifier, but it
+may be checked later when accessing the buffer.
+
+The verifier does NOT verify that two datastructures are not
+overlapping. Sometimes this is indeed valid, such as a DAG (directed
+acyclic graph) where for example two string references refer to the same
+string in the buffer. In other cases an attacker may maliciously
+construct overlapping datastructures such that in-place updates may
+cause subsequent invalid buffers. Therefore an untrusted buffer should
+never be updated in-place without first rewriting it to a new buffer.
+
+The CMake build system has build option to enable assertions in the
+verifier. This will break debug builds and not usually what is desired,
+but it can be very useful when debugging why a buffer is invalid. Traces
+can also be enabled so table offset and field id can be reported.
+
+See also `include/flatcc/flatcc_verifier.h`.
+
+When verifying buffers returned directly from the builder, it may be
+necessary to use the `flatcc_builder_finalize_aligned_buffer` to ensure
+proper alignment and use `aligned_free` to free the buffer (or as of
+v0.5.0 also `flatcc_builder_aligned_free`), see also the
+[Builder Interface Reference]. Buffers may also be copied into aligned
+memory via mmap or using the portable layers `paligned_alloc.h` feature
+which is available when including generated headers.
+`test/flatc_compat/flatc_compat.c` is an example of how this can be
+done. For the majority of use cases, standard allocation would be
+sufficient, but for example standard 32-bit Windows only allocates on an
+8-byte boundary and can break the monster schema because it has 16-byte
+aligned fields.
+
+
+### Potential Name Conflicts
+
+If unfortunate, it is possible to have a read accessor method conflict
+with other generated methods and typenames. Usually a small change in
+the schema will resolve this issue.
+
+As of flatcc 0.5.2 read accors are generated with and without a `_get`
+suffix so it is also possible to use `Monster_pos_get(monster)` instead
+of `Monster_pos(monster)`. When calling flatcc with option `-g` the
+read accesors will only be generated with `_get` suffix. This avoids
+potential name conflicts. An example of a conflict is a field name
+like `pos_add` when there is also a `pos` field because the builder
+interface generates the `add` suffix. Using the -g option avoids this
+problem, but it is preferable to choose another name such as `added_pos`
+when the schema can be modified.
+
+The `-g` option only changes the content of the
+`flatbuffers_common_reader.h` file, so it is technically possible to
+use different versions of this file if they are not mixed.
+
+If an external code generator depends on flatcc output, it should use
+the `_get` suffix because it will work with and without the -g option,
+but only as of version 0.5.2 or later. For human readable code it is
+probaly simpler to stick to the orignal naming convention without the
+`_get` suffix.
+
+Even with the above, it is still possible to have a conflict with the
+union type field. If a union field is named `foo`, an additional field
+is automatically - this field is named `foo_type` and holds,
+unsurprisingly, the type of the union.
+
+Namespaces can also cause conflicts. If a schema has the namespace
+Foo.Bar and table named MyTable with a field name hello, then a
+read accessor will be named: `Foo_Bar_MyTable_hello_get`. It
+is also possible to have a table named `Bar_MyTable` because `_` are
+allowed in FlatBuffers schema names, but in this case we have name
+conflict in the generated the C code. FlatCC does not attempt to avoid
+such conflicts so such schema are considered invalid.
+
+Notably several users have experienced conflicts with a table or struct
+field named 'identifier' because `<table-name>_identifier` has been
+defined to be the file identifier to be used when creating a buffer with
+that table (or struct) as root. As of 0.6.1, the name is
+`<table-name>_file_identifier` to reduce the risk of conflicts. The old
+form is deprecated but still generated for tables without a field named
+'identifier' for backwards compatibility. Mostly this macro is used for
+higher level functions such as `mytable_create_as_root` which need to
+know what identifier to use.
+
+
+### Debugging a Buffer
+
+When reading a FlatBuffer does not provide the expected results, the
+first line of defense is to ensure that the code being tested is linked
+against `flatccrt_d`, the debug build of the runtime library. This will
+raise an assertion if calls to the builder are not properly balanced or
+if required fields are not being set.
+
+To dig further into a buffer, call the buffer verifier and see if the
+buffer is actually valid with respect to the expected buffer type.
+
+Strings and tables will be returned as null pointers when their
+corresponding field is not set in the buffer. User code should test for
+this but it might also be helpful to temporarily or permanently set the
+`required` attribute in the schema. The builder will then detect missing fields
+when cerating buffers and the verifier can will detect their absence in
+an existing buffer.
+
+If the verifier rejects a buffer, the error can be printed (see
+[Verifying a Buffer](#verifying-a-buffer)), but it will not say exactly
+where the problem was found. To go further, the verifier can be made to
+assert where the problem is encountered so the buffer content can be
+analyzed. This is enabled with:
+
+ -DFLATCC_DEBUG_VERIFY=1
+
+Note that this will break test cases where a buffer is expected to fail
+verification.
+
+To dump detailed contents of a valid buffer, or the valid contents up to
+the point of failure, use:
+
+ -DFLATCC_TRACE_VERIFY=1
+
+Both of these options can be set as CMake options, or in the
+[flatcc_rtconfig.h] file.
+
+When reporting bugs, output from the above might also prove helpful.
+
+The JSON parser and printer can also be used to create and display
+buffers. The parser will use the builder API correctly or issue a syntax
+error or an error on required field missing. This can rule out some
+uncertainty about using the api correctly. The [test_json.c] file and
+[test_json_parser.c] have
+test functions that can be adapted for custom tests.
+
+For advanced debugging the [hexdump.h] file can be used to dump the buffer
+contents. It is used in [test_json.c] and also in [monster_test.c].
+See also [FlatBuffers Binary Format].
+
+As of April 2022, Googles flatc tool has implemented an `--annotate` feature.
+This provides an annotated hex dump given a binary buffer and a schema. The
+output can be used to troubleshoot and rule out or confirm suspected encoding
+bugs in the buffer at hand. The eclectic example in the [FlatBuffers Binary
+Format] document contains a hand written annotated example which inspired the
+`--annotate` feature, but it is not the exact same output format. Note also that
+`flatc` generated buffers tend to have vtables before the table it is referenced
+by, while flatcc normally packs all vtables at the end of the buffer for
+better padding and cache efficiency.
+
+See also [flatc --annotate].
+
+Note: There is experimental support for text editor that supports
+clangd language server or similar. You can edit `CMakeList.txt`
+to generate `build/Debug/compile_comands.json`, at least when
+using clang as a compiler, and copy or symlink it from root. Or
+come with a better suggestion. There are `.gitignore` entries for
+`compile_flags.txt` and `compile_commands.json` in project root.
+
+
+## File and Type Identifiers
+
+There are two ways to identify the content of a FlatBuffer. The first is
+to use file identifiers which are defined in the schema. The second is
+to use `type identifiers` which are calculated hashes based on each
+tables name prefixed with its namespace, if any. In either case the
+identifier is stored at offset 4 in binary FlatBuffers, when present.
+Type identifiers are not to be confused with union types.
+
+### File Identifiers
+
+The FlatBuffers schema language has the optional `file_identifier`
+declaration which accepts a 4 characer ASCII string. It is intended to be
+human readable. When absent, the buffer potentially becomes 4 bytes
+shorter (depending on padding).
+
+The `file_identifier` is intended to match the `root_type` schema
+declaration, but this does not take into account that it is convenient
+to create FlatBuffers for other types as well. `flatcc` makes no special
+destinction for the `root_type` while Googles `flatc` JSON parser uses
+it to determine the JSON root object type.
+
+As a consequence, the file identifier is ambigous. Included schema may
+have separate `file_identifier` declarations. To at least make sure each
+type is associated with its own schemas `file_identifier`, a symbol is
+defined for each type. If the schema has such identifier, it will be
+defined as the null identifier.
+
+The generated code defines the identifiers for a given table:
+
+ #ifndef MyGame_Example_Monster_file_identifier
+ #define MyGame_Example_Monster_file_identifier "MONS"
+ #endif
+
+The user can now override the identifier for a given type, for example:
+
+ #define MyGame_Example_Vec3_file_identifier "VEC3"
+ #include "monster_test_builder.h"
+
+ ...
+ MyGame_Example_Vec3_create_as_root(B, ...);
+
+The `create_as_root` method uses the identifier for the type in question,
+and so does other `_as_root` methods.
+
+The `file_extension` is handled in a similar manner:
+
+ #ifndef MyGame_Example_Monster_file_extension
+ #define MyGame_Example_Monster_file_extension "mon"
+ #endif
+
+### Type Identifiers
+
+To better deal with the ambigouties of file identifiers, type
+identifiers have been introduced as an alternative 4 byte buffer
+identifier. The hash is standardized on FNV-1a for interoperability.
+
+The type identifier use a type hash which maps a fully qualified type
+name into a 4 byte hash. The type hash is a 32-bit native value and the
+type identifier is a 4 character little endian encoded string of the
+same value.
+
+In this example the type hash is derived from the string
+"MyGame.Example.Monster" and is the same for all FlatBuffer code
+generators that supports type hashes.
+
+The value 0 is used to indicate that one does not care about the
+identifier in the buffer.
+
+ ...
+ MyGame_Example_Monster_create_as_typed_root(B, ...);
+ buffer = flatcc_builder_get_direct_buffer(B);
+ MyGame_Example_Monster_verify_as_typed_root(buffer, size);
+ // read back
+ monster = MyGame_Example_Monster_as_typed_root(buffer);
+
+ switch (flatbuffers_get_type_hash(buffer)) {
+ case MyGame_Example_Monster_type_hash:
+ ...
+
+ }
+ ...
+ if (flatbuffers_get_type_hash(buffer) ==
+ flatbuffers_type_hash_from_name("Some.Old.Buffer")) {
+ printf("Buffer is the old version, not supported.\n");
+ }
+
+More API calls are available to naturally extend the existing API. See
+[monster_test.c] for more.
+
+The type identifiers are defined like:
+
+ #define MyGame_Example_Monster_type_hash ((flatbuffers_thash_t)0x330ef481)
+ #define MyGame_Example_Monster_type_identifier "\x81\xf4\x0e\x33"
+
+The `type_identifier` can be used anywhere the original 4 character
+file identifier would be used, but a buffer must choose which system, if any,
+to use. This will not affect the `file_extension`.
+
+NOTE: The generated `_type_identifier` strings should not normally be
+used when an identifier string is expected in the generated API because
+it may contain null bytes which will be zero padded after the first null
+before comparison. Use the API calls that take a type hash instead. The
+`type_identifier` can be used in low level [flatcc_builder.h] calls
+because it handles identifiers as a fixed byte array and handles type
+hashes and strings the same.
+
+NOTE: it is possible to compile the flatcc runtime to encode buffers in
+big endian format rather than the standard little endian format
+regardless of the host platforms endianness. If this is done, the
+identifier field in the buffer is always byte swapped regardless of the
+identifier method chosen. The API calls make this transparent, so "MONS"
+will be stored as "SNOM" but should still be verified as "MONS" in API
+calls. This safeguards against mixing little- and big-endian buffers.
+Likewise, type hashes are always tested in native (host) endian format.
+
+
+The
+[`flatcc/flatcc_identifier.h`](https://github.com/dvidelabs/flatcc/blob/master/include/flatcc/flatcc_identifier.h)
+file contains an implementation of the FNV-1a hash used. The hash was
+chosen for simplicity, availability, and collision resistance. For
+better distribution, and for internal use only, a dispersion function is
+also provided, mostly to discourage use of alternative hashes in
+transmission since the type hash is normally good enough as is.
+
+_Note: there is a potential for collisions in the type hash values
+because the hash is only 4 bytes._
+
+
+## JSON Parsing and Printing
+
+JSON support files are generated with `flatcc --json`.
+
+This section is not a tutorial on JSON printing and parsing, it merely
+covers some non-obvious aspects. The best source to get started quickly
+is the test file:
+
+ test/json_test/json_test.c
+
+For detailed usage, please refer to:
+
+ test/json_test/test_json_printer.c
+ test/json_test/test_json_parser.c
+ test/json_test/json_test.c
+ test/benchmark/benchflatccjson
+
+See also JSON parsing section in the Googles FlatBuffers [schema
+documentation](https://google.github.io/flatbuffers/flatbuffers_guide_writing_schema.html).
+
+By using the flatbuffer schema it is possible to generate schema
+specific JSON printers and parsers. This differs for better and worse
+from Googles `flatc` tool which takes a binary schema as input and
+processes JSON input and output. Here that parser and printer only rely
+on the `flatcc` runtime library, is faster (probably significantly so),
+but requires recompilition when new JSON formats are to be supported -
+this is not as bad as it sounds - it would for example not be difficult
+to create a Docker container to process a specific schema in a web
+server context.
+
+The parser always takes a text buffer as input and produces output
+according to how the builder object is initialized. The printer has
+different init functions: one for printing to a file pointer, including
+stdout, one for printing to a fixed length external buffer, and one for
+printing to a dynamically growing buffer. The dynamic buffer may be
+reused between prints via the reset function. See `flatcc_json_parser.h`
+for details.
+
+The parser will accept unquoted names (not strings) and trailing commas,
+i.e. non-strict JSON and also allows for hex `\x03` in strings. Strict
+mode must be enabled by a compile time flag. In addition the parser
+schema specific symbolic enum values that can optionally be unquoted
+where a numeric value is expected:
+
+ color: Green
+ color: Color.Green
+ color: MyGame.Example.Color.Green
+ color: 2
+
+The symbolic values do not have to be quoted (unless required by runtime
+or compile time configuration), but can be while numeric values cannot
+be quoted. If no namespace is provided, like `color: Green`, the symbol
+must match the receiving enum type. Any scalar value may receive a
+symbolic value either in a relative namespace like `hp: Color.Green`, or
+an absolute namespace like `hp: MyGame.Example.Color.Green`, but not
+`hp: Green` (since `hp` in the monster example schema) is not an enum
+type with a `Green` value). A namespace is relative to the namespace of
+the receiving object.
+
+It is also possible to have multiple values, but these always have to be
+quoted in order to be compatible with Googles flatc tool for Flatbuffers
+1.1:
+
+ color: "Green Red"
+
+_Unquoted multi-valued enums can be enabled at compile time but this is
+deprecated because it is incompatible with both Googles flatc JSON and
+also with other possible future extensions: `color: Green Red`_
+
+These value-valued expressions were originally intended for enums that
+have the bit flag attribute defined (which Color does have), but this is
+tricky to process, so therefore any symblic value can be listed in a
+sequence with or without namespace as appropriate. Because this further
+causes problems with signed symbols the exact definition is that all
+symbols are first coerced to the target type (or fail), then added to
+the target type if not the first this results in:
+
+ color: "Green Blue Red Blue"
+ color: 19
+
+Because Green is 2, Red is 1, Blue is 8 and repeated.
+
+__NOTE__: Duplicate values should be considered implemention dependent
+as it cannot be guaranteed that all flatbuffer JSON parsers will handle
+this the same. It may also be that this implementation will change in
+the future, for example to use bitwise or when all members and target
+are of bit flag type.
+
+It is not valid to specify an empty set like:
+
+ color: ""
+
+because it might be understood as 0 or the default value, and it does
+not unquote very well.
+
+The printer will by default print valid json without any spaces and
+everything quoted. Use the non-strict formatting option (see headers and
+test examples) to produce pretty printing. It is possibly to disable
+symbolic enum values using the `noenum` option.
+
+Only enums will print symbolic values are there is no history of any
+parsed symbolic values at all. Furthermore, symbolic values are only
+printed if the stored value maps cleanly to one value, or in the case of
+bit-flags, cleanly to multiple values. For exmaple if parsing `color: Green Red`
+it will print as `"color":"Red Green"` by default, while `color: Green
+Blue Red Blue` will print as `color:19`.
+
+Both printer and parser are limited to roughly 100 table nesting levels
+and an additional 100 nested struct depths. This can be changed by
+configuration flags but must fit in the runtime stack since the
+operation is recursive descent. Exceedning the limits will result in an
+error.
+
+Numeric values are coerced to the receiving type. Integer types will
+fail if the assignment does not fit the target while floating point
+values may loose precision silently. Integer types never accepts
+floating point values. Strings only accept strings.
+
+Nested flatbuffers may either by arrays of byte sized integers, or a
+table or a struct of the target type. See test cases for details.
+
+The parser will by default fail on unknown fields, but these can also be
+skipped silently with a runtime option.
+
+Unions are difficult to parse. A union is two json fields: a table as
+usual, and an enum to indicate the type which has the same name with a
+`_type` suffix and accepts a numeric or symbolic type code:
+
+ {
+ name: "Container Monster",
+ test_type: Monster,
+ test: { name: "Contained Monster" }
+ }
+
+based on the schema is defined in [monster_test.fbs].
+
+Because other json processors may sort fields, it is possible to receive
+the type field after the test field. The parser does not store temporary
+datastructures. It constructs a flatbuffer directly. This is not
+possible when the type is late. This is handled by parsing the field as
+a skipped field on a first pass, followed by a typed back-tracking
+second pass once the type is known (only the table is parsed twice, but
+for nested unions this can still expand). Needless to say this slows down
+parsing. It is an error to provide only the table field or the type
+field alone, except if the type is `NONE` or `0` in which case the table
+is not allowed to be present.
+
+Union vectors are supported as of v0.5.0. A union vector is represented
+as two vectors, one with a vector of tables and one with a vector of
+types, similar to ordinary unions. It is more efficient to place the
+type vector first because it avoids backtracking. Because a union of
+type NONE cannot be represented by absence of table field when dealing
+with vectors of unions, a table must have the value `null` if its type
+is NONE in the corresponding type vector. In other cases a table should
+be absent, and not null.
+
+Here is an example of JSON containing Monster root table with a union
+vector field named `manyany` which is a vector of `Any` unions in the
+[monster_test.fbs] schema:
+
+ {
+ "name": "Monster",
+ "manyany_type": [ "Monster", "NONE" ],
+ "manyany": [{"name": "Joe"}, null]
+ }
+
+### Base64 Encoding
+
+As of v0.5.0 it is possible to encode and decode a vector of type
+`[uint8]` (aka `[ubyte]`) as a base64 encoded string or a base64url
+encoded string as documented in RFC 4648. Any other type, notably the
+string type, do not handle base64 encoding.
+
+Limiting the support to `[uint8]` avoids introducing binary data into
+strings and also avoids dealing with sign and endian encoding of binary
+data of other types. Furthermore, array encoding of values larger than 8
+bits are not necessarily less efficient than base64.
+
+Base64 padding is always printed and is optional when parsed. Spaces,
+linebreaks, JSON string escape character '\\', or any other character
+not in the base64(url) alphabet are rejected as a parse error.
+
+The schema must add the attribute `(base64)` or `(base64url)` to the
+field holding the vector, for example:
+
+ table Monster {
+ name: string;
+ sprite: [uint8] (base64);
+ token: [uint8] (base64url);
+ }
+
+If more complex data needs to be encoded as base64 such as vectors of
+structs, this can be done via nested FlatBuffers which are also of type
+`[uint8]`.
+
+Note that for some use cases it might be desireable to read binary data as
+base64 into memory aligned to more than 8 bits. This is not currently
+possible, but it is recognized that a `(force_align: n)` attribute on
+`[ubyte]` vectors could be useful, but it can also be handled via nested
+flatbuffers which also align data.
+
+### Fixed Length Arrays
+
+Fixed length arrays introduced in 0.6.0 allow for structs containing arrays
+of fixed length scalars, structs and chars. Arrays are parsed like vectors
+for of similar type but are zero padded if shorter than expected and fails
+if longer than expected. The flag `reject_array_underflow` will error if an
+array is shorter than expected instead of zero padding. The flag
+`skip_array_overflow` will allow overlong arrays and simply drop extra elements.
+
+Char arrays are parsed like strings and zero padded if short than expected, but
+they are not zero terminated. A string like "hello" will exactly fit into a
+field of type `[char:5]`. Trailing zero characters are not printed, but embedded
+zero characters are. This allows for loss-less roundtrips without having to zero
+pad strings. Note that other arrays are always printed in full. If the flag
+`skip_array_overflow` is set, a string might be truncated in the middle of a
+multi-byte character. This is not checked nor enforced by the verifier.
+
+### Runtime Flags
+
+Both the printer and the parser have the ability to accept runtime flags that
+modifies their behavior. Please refer to header file comments for documentation
+and test cases for examples. Notably it is possible to print unquoted symbols
+and to ignore unknown fields when parsing instead of generating an error.
+
+Note that deprecated fields are considered unknown fields during parsing so in
+order to process JSON from an old schema version with deprecated fields present,
+unknown symbols must be skipped.
+
+### Generic Parsing and Printing.
+
+As of v0.5.1 [test_json.c] demonstrates how a single parser driver can be used
+to parse different table types without changes to the driver or to the schema.
+
+For example, the following layout can be used to configure a generic parser or printer.
+
+ struct json_scope {
+ const char *identifier;
+ flatcc_json_parser_table_f *parser;
+ flatcc_json_printer_table_f *printer;
+ flatcc_table_verifier_f *verifier;
+ };
+
+ static const struct json_scope Monster = {
+ /* The is the schema global file identifier. */
+ ns(Monster_identifier),
+ ns(Monster_parse_json_table),
+ ns(Monster_print_json_table),
+ ns(Monster_verify_table)
+ };
+
+The `Monster` scope can now be used by a driver or replaced with a new scope as needed:
+
+ /* Abbreviated ... */
+ struct json_scope = Monster;
+ flatcc_json_parser_table_as_root(B, &parser_ctx, json, strlen(json), parse_flags,
+ scope->identifier, scope->parser);
+ /* Printing and verifying works roughly the same. */
+
+The generated table `MyGame_Example_Monster_parse_json_as_root` is a thin
+convenience wrapper roughly implementing the above.
+
+The generated `monster_test_parse_json` is a higher level convenience wrapper named
+of the schema file itself, not any specific table. It parses the `root_type` configured
+in the schema. This is how the `test_json.c` test driver operated prior to v0.5.1 but
+it made it hard to test parsing and printing distinct table types.
+
+Note that verification is not really needed for JSON parsing because a
+generated JSON parser is supposed to build buffers that always verify (except
+for binary encoded nested buffers), but it is useful for testing.
+
+
+### Performance Notes
+
+Note that json parsing and printing is very fast reaching 500MB/s for
+printing and about 300 MB/s for parsing. Floating point parsing can
+signficantly skew these numbers. The integer and floating point parsing
+and printing are handled via support functions in the portable library.
+In addition the floating point `include/flatcc/portable/grisu3_*` library
+is used unless explicitly disable by a compile time flag. Disabling
+`grisu3` will revert to `sprintf` and `strtod`. Grisu3 will fall back to
+`strtod` and `grisu3` in some rare special cases. Due to the reliance on
+`strtod` and because `strtod` cannot efficiently handle
+non-zero-terminated buffers, it is recommended to zero terminate
+buffers. Alternatively, grisu3 can be compiled with a flag that allows
+errors in conversion. These errors are very small and still correct, but
+may break some checksums. Allowing for these errors can significantly
+improve parsing speed and moves the benchmark from below half a million
+parses to above half a million parses per second on 700 byte json
+string, on a 2.2 GHz core-i7.
+
+While unquoted strings may sound more efficient due to the compact size,
+it is actually slower to process. Furthermore, large flatbuffer
+generated JSON files may compress by a factor 8 using gzip or a factor
+4 using LZ4 so this is probably the better place to optimize. For small
+buffers it may be more efficient to compress flatbuffer binaries, but
+for large files, json may actually compress significantly better due to
+the absence of pointers in the format.
+
+SSE 4.2 has been experimentally added, but it the gains are limited
+because it works best when parsing space, and the space parsing is
+already fast without SSE 4.2 and because one might just leave out the
+spaces if in a hurry. For parsing strings, trivial use of SSE 4.2 string
+scanning doesn't work well becasuse all the escape codes below ASCII 32
+must be detected rather than just searching for `\` and `"`. That is not
+to say there are not gains, they just don't seem worthwhile.
+
+The parser is heavily optimized for 64-bit because it implements an
+8-byte wide trie directly in code. It might work well for 32-bit
+compilers too, but this hasn't been tested. The large trie does put some
+strain on compile time. Optimizing beyond -O2 leads to too large
+binaries which offsets any speed gains.
+
+
+## Global Scope and Included Schema
+
+Attributes included in the schema are viewed in a global namespace and
+each include file adds to this namespace so a schema file can use
+included attributes without namespace prefixes.
+
+Each included schema will also add types to a global scope until it sees
+a `namespace` declaration. An included schema does not inherit the
+namespace of an including file or an earlier included file, so all
+schema files starts in the global scope. An included file can, however,
+see other types previously defined in the global scope. Because include
+statements always appear first in a schema, this can only be earlier
+included files, not types from a containing schema.
+
+The generated output for any included schema is indendent of how it was
+included, but it might not compile without the earlier included files
+being present and included first. By including the toplevel `myschema.h`
+or `myschema_builder.h` all these dependencies are handled correctly.
+
+Note: `libflatcc.a` can only parse a single schema when the schema is
+given as a memory buffer, but can handle the above when given a
+filename. It is possible to concatenate schema files, but a `namespace;`
+declaration must be inserted as a separator to revert to global
+namespace at the start of each included file. This can lead to subtle
+errors because if one parent schema includes two child schema `a.fbs`
+and `b.fbs`, then `b.fbs` should not be able to see anything in `a.fbs`
+even if they share namespaces. This would rarely be a problem in praxis,
+but it means that schema compilation from memory buffers cannot
+authoratively validate a schema. The reason the schema must be isolated
+is that otherwise code generation for a given schema could change with
+how it is being used leading to very strange errors in user code.
+
+
+## Required Fields and Duplicate Fields
+
+If a field is required such as Monster.name, the table end call will
+assert in debug mode and create incorrect tables in non-debug builds.
+The assertion may not be easy to decipher as it happens in library code
+and it will not tell which field is missing.
+
+When reading the name, debug mode will again assert and non-debug builds
+will return a default value.
+
+Writing the same field twice will also trigger an assertion in debug
+builds.
+
+
+## Fast Buffers
+
+Buffers can be used for high speed communication by using the ability to
+create buffers with structs as root. In addition the default emitter
+supports `flatcc_emitter_direct_buffer` for small buffers so no extra copy
+step is required to get a linear buffer in memory. Preliminary
+measurements suggests there is a limit to how fast this can go (about
+6-7 mill. buffers/sec) because the builder object must be reset between
+buffers which involves zeroing allocated buffers. Small tables with a
+simple vector achieve roughly half that speed. For really high speed a
+dedicated builder for structs would be needed. See also
+[monster_test.c].
+
+
+## Types
+
+All types stored in a buffer has a type suffix such as `Monster_table_t`
+or `Vec3_struct_t` (and namespace prefix which we leave out here). These
+types are read-only pointers into endian encoded data. Enum types are
+just constants easily grasped from the generated code. Tables are dense so
+they are never accessed directly.
+
+Enums support schema evolution meaning that more names can be added to
+the enumeration in a future schema version. As of v0.5.0 the function
+`_is_known_value` can be used ot check if an enum value is known to the
+current schema version.
+
+Structs have a dual purpose because they are also valid types in native
+format, yet the native reprsention has a slightly different purpose.
+Thus the convention is that a const pointer to a struct encoded in a
+flatbuffer has the type `Vec3_struct_t` where as a writeable pointer to
+a native struct has the type `Vec3_t *` or `struct Vec3 *`.
+
+All types have a `_vec_t` suffix which is a const pointer to the
+underlying type. For example `Monster_table_t` has the vector type
+`Monster_vec_t`. There is also a non-const variant with suffix
+`_mutable_vec_t` which is rarely used. However, it is possible to sort
+vectors in-place in a buffer, and for this to work, the vector must be
+cast to mutable first. A vector (or string) type points to the element
+with index 0 in the buffer, just after the length field, and it may be
+cast to a native type for direct access with attention to endian
+encoding. (Note that `table_t` types do point to the header field unlike
+vectors.) These types are all for the reader interface. Corresponding
+types with a `_ref_t` suffix such as `_vec_ref_t` are used during
+the construction of buffers.
+
+Native scalar types are mapped from the FlatBuffers schema type names
+such as ubyte to `uint8_t` and so forth. These types also have vector
+types provided in the common namespace (default `flatbuffers_`) so
+a `[ubyte]` vector has type `flatbuffers_uint8_vec_t` which is defined
+as `const uint8_t *`.
+
+The FlatBuffers boolean type is strictly 8 bits wide so we cannot use or
+emulate `<stdbool.h>` where `sizeof(bool)` is implementation dependent.
+Therefore `flatbuffers_bool_t` is defined as `uint8_t` and used to
+represent FlatBuffers boolean values and the constants of same type:
+`flatbuffers_true = 1` and `flatbuffers_false = 0`. Even so,
+`pstdbool.h` is available in the `include/flatcc/portable` directory if
+`bool`, `true`, and `false` are desired in user code and `<stdbool.h>`
+is unavailable.
+
+`flatbuffers_string_t` is `const char *` but imply the returned pointer
+has a length prefix just before the pointer. `flatbuffers_string_vec_t`
+is a vector of strings. The `flatbufers_string_t` type guarantees that a
+length field is present using `flatbuffers_string_len(s)` and that the
+string is zero terminated. It also suggests that it is in utf-8 format
+according to the FlatBuffers specification, but not checks are done and
+the `flatbuffers_create_string(B, s, n)` call explicitly allows for
+storing embedded null characters and other binary data.
+
+All vector types have operations defined as the typename with `_vec_t`
+replaced by `_vec_at` and `_vec_len`. For example
+`flatbuffers_uint8_vec_at(inv, 1)` or `Monster_vec_len(inv)`. The length
+or `_vec_len` will be 0 if the vector is missing whereas `_vec_at` will
+assert in debug or behave undefined in release builds following out of
+bounds access. This also applies to related string operations.
+
+The FlatBuffers schema uses the following scalar types: `ubyte`, `byte`,
+`ushort`, `short, uint`, `int`, `ulong`, and `long` to represent
+unsigned and signed integer types of length 8, 16, 32, and 64
+respectively. The schema syntax has been updated to also support the
+type aliases `uint8`, `int8`, `uint16`, `int16`, `uint32`, `int32`,
+`uint64`, `int64` to represent the same basic types. Likewise, the
+schema uses the types `float` and `double` to represent IEEE-754
+binary32 and binary64 floating point formats where the updated syntax
+also supports the type aliases `float32` and `float64`.
+
+The C interface uses the standard C types such as uint8 and double to
+represent scalar types and this is unaffected by the schema type name
+used, so the schema vector type `[float64]` is represented as
+`flatbuffers_double_vec_t` the same as `[double]` would be.
+
+Note that the C standard does not guarantee that the C types `float` and
+`double` are represented by the IEEE-754 binary32 single precision
+format and the binary64 double precision format respectively, although
+they usually are. If this is not the case FlatCC cannot work correctly
+with FlatBuffers floating point values. (If someone really has this
+problem, it would be possible to fix).
+
+Unions are represented with a two table fields, one with a table field
+and one with a type field. See separate section on Unions. As of flatcc
+v0.5.0 union vectors are also supported.
+
+## Unions
+
+A union represents one of several possible tables. A table with a union
+field such as `Monster.equipped` in the samples schema will have two
+accessors: `MyGame_Sample_Monster_equipped(t)` of type
+`flatbuffers_generic_t` and `MyGame_Sample_Monster_equipped_type(t)` of
+type `MyGame_Sample_Equipment_union_type_t`. A generic type is is just a
+const void pointer that can be assigned to the expected table type,
+struct type, or string type. The enumeration has a type code for member
+of the union and also `MyGame_Sample_Equipment_NONE` which has the value
+0.
+
+The union interface were changed in 0.5.0 and 0.5.1 to use a consistent
+{ type, value } naming convention for both unions and union vectors
+in all interfaces and to support unions and union vectors of multiple
+types.
+
+A union can be accessed by its field name, like Monster
+`MyGame_Sample_Monster_equipped(t)` and its type is given by
+`MyGame_Sample_Monster_type(t)`, or a `flatbuffers_union_t` struct
+can be returned with `MyGame_Sample_monster_union(t)` with the fields
+{ type, value }. A union vector is accessed in the same way but {
+type, value } represents a type vector and a vector of the given type,
+e.g. a vector Monster tables or a vector of strings.
+
+There is a test in [monster_test.c] covering union vectors and a
+separate test focusing on mixed type unions that also has union vectors.
+
+
+### Union Scope Resolution
+
+Googles `monster_test.fbs` schema has the union (details left out):
+
+ namespace MyGame.Example2;
+ table Monster{}
+
+ namespace MyGame.Example;
+ table Monster{}
+
+ union Any { Monster, MyGame.Example2.Monster }
+
+where the two Monster tables are defined in separate namespaces.
+
+`flatcc` rejects this schema due to a name conflict because it uses the
+basename of a union type, here `Monster` to generate the union member names
+which are also used in JSON parsing. This can be resolved by adding an
+explicit name such as `Monster2` to resolve the conflict:
+
+ union Any { Monster, Monster2: MyGame.Example2.Monster }
+
+This syntax is accepted by both `flatc` and `flatcc`.
+
+Both versions will implement the same union with the same type codes in the
+binary format but generated code will differ in how the types are referred to.
+
+In JSON the monster type values are now identified by
+`MyGame.Example.Any.Monster`, or just `Monster`, when assigning the first
+monster type to an Any union field, and `MyGame.Example.Any.Monster2`, or just
+`Monster2` when assigning the second monster type. C uses the usual enum
+namespace prefixed symbols like `MyGame_Example_Any_Monster2`.
+
+## Fixed Length Arrays
+
+Fixed Length Arrays is a late feature to the FlatBuffers format introduced in
+flatc and flatcc mid 2019. Currently only scalars arrays are supported, and only
+as struct fields. To use fixed length arrays as a table field wrap it in a
+struct first. It would make sense to support struct elements and enum elements,
+but that has not been implemented. Char arrays are more controversial due to
+verification and zero termination and are also not supported. Arrays are aligned
+to the size of the first field and are equivalent to repeating elements within
+the struct.
+
+The schema syntax is:
+
+```
+struct MyStruct {
+ my_array : [float:10];
+}
+```
+
+See `test_fixed_array` in [monster_test.c] for an example of how to work with
+these arrays.
+
+Flatcc opts to allow arbitrary length fixed length arrays but limit the entire
+struct to 2^16-1 bytes. Tables cannot hold larger structs, and the C language
+does not guarantee support for larger structs. Other implementations might have
+different limits on maximum array size. Arrays of 0 length are not permitted.
+
+
+## Optional Fields
+
+Optional scalar table fields were introduced to FlatBuffers mid 2020 in order to
+better handle null values also for scalar data types, as is common in SQL
+databases. Before describing optional values, first understand how ordinary
+scalar values work in FlatBuffers:
+
+Imagine a FlatBuffer table with a `mana` field from the monster sample schema.
+Ordinarily a scalar table field has implicit default value of 0 like `mana :
+uint8;`, or an explicit default value specified in the schema like `mana : uint8
+= 100;`. When a value is absent from a table field, the default value is
+returned, and when a value is added during buffer construction, it will not
+actually be stored if the value matches the default value, unless the
+`force_add` option is used to write a value even if it matches the default
+value. Likewise the `is_present` method can be used to test if a field was
+actually stored in the buffer when reading it.
+
+When a table has many fields, most of which just hold default settings,
+signficant space can be saved using default values, but it also means that an
+absent value does not indicate null. Field absence is essentially just a data
+compression technique, not a semantic change to the data. However, it is
+possible to use `force_add` and `is_present` to interpret values as null when
+not present, except that this is not a standardized technique. Optional fields
+represents a standardized way to achieve this.
+
+Scalar fields can be marked as optional by assigning `null` as a default
+value. For example, some objects might not have a meaningful `mana`
+value, so it could be represented as `lifeforce : uint8 = null`. Now the
+`lifeforce` field has become an optional field. In the FlatCC implementation
+this means that the field is written, it will always be written also if the
+value is 0 or any other representable value. It also means that the `force_add`
+method is not available for the field because `force_add` is essentially always
+in effect for the field. On the read side, optional scalar fields behave exactly is ordinary scalar fields that have not specified a default value, that is, if the field is absent, 0 will be returned and `is_present` will return false. Instead optional scalar fields get a new accessor method with the suffix `_option()` which returns a struct with two fiels: `{ is_null, value }` where `_option().is_null == !is_present()` and `_option().value` is the same value is the `_get()` method, which will be 0 if `is_null` is true. The option struct is named after the type similar to unions, for example `flatbuffers_uint8_option_t` or `MyGame_Example_Color_option_t`, and the option accessor method also works similar to unions. Note that `_get()` will also return 0 for optional enum values that are null (i.e. absent), even if the enum value does not have an enumerated element with the value 0. Normally enums without a 0 element is not allowed in the schema unless a default value is specified, but in this case it is null, and `_get()` needs some value to return in this case.
+
+By keeping the original accessors, read logic can be made simpler and faster when it is not important whether a value is null or 0 and at the same time the option value can be returned and stored.
+
+Note that struct fields cannot be optional. Also note that, non-scalar table fields are not declared optional because these types can already represent null via a null pointer or a NONE union type.
+
+JSON parsing and printing change behavior for scalar fields by treating absent
+fields differently according the optional semantics. For example parsing a
+missing field will not store a default value even if the parser is executed with
+a flag to force default values to be stored and the printer will not print
+absent optional fields even if otherwise flagged to print default values.
+Currenlty the JSON printers and parsers do not print or parse JSON null and can
+only represent null be absence of a field.
+
+For an example of reading and writing, as well as printing and parsing JSON,
+optional scalar fields, please refer to [optional_scalars_test.fbs] and [optional_scalars_test.c].
+
+
+## Endianness
+
+The `include/flatcc/portable/pendian_detect.h` file detects endianness
+for popular compilers and provides a runtime fallback detection for
+others. In most cases even the runtime detection will be optimized out
+at compile time in release builds.
+
+The `FLATBUFFERS_LITTLEENDIAN` flag is respected for compatibility with
+Googles `flatc` compiler, but it is recommended to avoid its use and
+work with the mostly standard flags defined and/or used in
+`pendian_detect.h`, or to provide for additional compiler support.
+
+As of flatcc 0.4.0 there is support for flatbuffers running natively on
+big endian hosts. This has been tested on IBM AIX. However, always run
+tests against the system of interest - the release process does not cover
+automated tests on any BE platform.
+
+As of flatcc 0.4.0 there is also support for compiling the flatbuffers
+runtime library with flatbuffers encoded in big endian format regardless
+of the host platforms endianness. Longer term this should probably be
+placed in a separate library with separate name prefixes or suffixes,
+but it is usable as is. Redefine `FLATBUFFERS_PROTOCOL_IS_LE/BE`
+accordingly in `include/flatcc/flatcc_types.h`. This is already done in
+the `be` branch. This branch is not maintained but the master branch can
+be merged into it as needed.
+
+Note that standard flatbuffers are always encoded in little endian but
+in situations where all buffer producers and consumers are big endian,
+the non standard big endian encoding may be faster, depending on
+intrinsic byteswap support. As a curiosity, the `load_test` actually
+runs faster with big endian buffers on a little endian MacOS platform
+for reasons only the optimizer will know, but read performance of small
+buffers drop to 40% while writing buffers generally drops to 80-90%
+performance. For platforms without compiler intrinsics for byteswapping,
+this can be much worse.
+
+Flatbuffers encoded in big endian will have the optional file identifier
+byteswapped. The interface should make this transparent, but details
+are still being worked out. For example, a buffer should always verify
+the monster buffer has the identifier "MONS", but internally the buffer
+will store the identifier as "SNOM" on big endian encoded buffers.
+
+Because buffers can be encode in two ways, `flatcc` uses the term
+`native` endianness and `protocol` endianess. `_pe` is a suffix used in
+various low level API calls to convert between native and protocol
+endianness without caring about whether host or buffer is little or big
+endian.
+
+If it is necessary to write application code that behaves differently if
+the native encoding differs from protocol encoding, use
+`flatbuffers_is_pe_native()`. This is a function, not a define, but for
+all practical purposes it will have same efficience while also
+supporting runtime endian detection where necessary.
+
+The flatbuffer environment only supports reading either big or little
+endian for the time being. To test which is supported, use the define
+`FLATBUFFERS_PROTOCOL_IS_LE` or `FLATBUFFERS_PROTOCOL_IS_BE`. They are
+defines as 1 and 0 respectively.
+
+
+## Pitfalls in Error Handling
+
+The builder API often returns a reference or a pointer where null is
+considered an error or at least a missing object default. However, some
+operations do not have a meaningful object or value to return. These
+follow the convention of 0 for success and non-zero for failure.
+Also, if anything fails, it is not safe to proceed with building a
+buffer. However, to avoid overheads, there is no hand holding here. On
+the upside, failures only happen with incorrect use or allocation
+failure and since the allocator can be customized, it is possible to
+provide a central error state there or to guarantee no failure will
+happen depending on use case, assuming the API is otherwise used
+correctly. By not checking error codes, this logic also optimizes out
+for better performance.
+
+
+## Searching and Sorting
+
+The builder API does not support sorting due to the complexity of
+customizable emitters, but the reader API does support sorting so a
+buffer can be sorted at a later stage. This requires casting a vector to
+mutable and calling the sort method available for fields with keys.
+
+The sort uses heap sort and can sort a vector in-place without using
+external memory or recursion. Due to the lack of external memory, the
+sort is not stable. The corresponding find operation returns the lowest
+index of any matching key, or `flatbuffers_not_found`.
+
+When configured in `config.h` (the default), the `flatcc` compiler
+allows multiple keyed fields unlike Googles `flatc` compiler. This works
+transparently by providing `<table_name>_vec_sort_by_<field_name>` and
+`<table_name>_vec_find_by_<field_name>` methods for all keyed fields.
+The first field maps to `<table_name>_vec_sort` and
+`<table_name>_vec_find`. Obviously the chosen find method must match
+the chosen sort method. The find operation is O(logN).
+
+As of v0.6.0 the default key used for find and and sort without the `by_name`
+suffix is the field with the smaller id instead of the first listed in the
+schema which is often but not always the same thing.
+
+v0.6.0 also introduces the `primary_key` attribute that can be used instead of
+the `key` attribute on at most one field. The two attributes are mutually
+exclusive. This can be used if a key field with a higher id should be the
+default key. There is no difference when only one field has a `key` or
+`primary_key` attribute, so in that case choose `key` for compatiblity.
+Googles flatc compiler does not recognize the `primary_key` attribute.
+
+As of v0.6.0 a 'sorted' attribute has been introduced together with the sort
+operations `<table_name>_sort` and `<union_name>_sort`. If a table or a union,
+directly or indirectly, contains a vector with the 'sorted' attribute, then the
+sort operation is made available. The sort will recursively visit all children
+with vectors marked sorted. The sort operatoin will use the default (primary)
+key. A table or union must first be cast to mutable, for example
+`ns(Monster_sort((ns(Monster_mutable_table_t))monster)`. The actual vector
+sort operations are the same as before, they are just called automatically.
+The `sorted` attribute can only be set on vectors that are not unions. The
+vector can be of scalar, string, struct, or table type. `sorted` is only valid
+for a struct or table vector if the struct or table has a field with a `key`
+or `primary_key` attribute. NOTE: A FlatBuffer can reference the same object
+multiple times. The sort operation will be repeated if this is the case.
+Sometimes that is OK, but if it is a concern, remove the `sorted` attribute
+and sort the vector manually. Note that sharing can also happen via a shared
+containing object. The sort operations are generated in `_reader.h` files
+and only for objects directly or indirectly affected by the `sorted` attribute.
+Unions have a new mutable case operator for use with sorting unions:
+`ns(Any_sort(ns(Any_mutable_cast)(my_any_union))`. Usually unions will be
+sorted via a containing table which performs this cast automatically. See also
+`test_recursive_sort` in [monster_test.c].
+
+As of v0.4.1 `<table_name>_vec_scan_by_<field_name>` and the default
+`<table_name>_vec_scan` are also provided, similar to `find`, but as a
+linear search that does not require the vector to be sorted. This is
+especially useful for searching by a secondary key (multiple keys is a
+non-standard flatcc feature). `_scan_ex` searches a sub-range [a, b)
+where b is an exclusive index. `b = flatbuffers_end == flatbuffers_not_found
+== (size_t)-1` may be used when searching from a position to the end,
+and `b` can also conveniently be the result of a previous search.
+
+`rscan` searches in the opposite direction starting from the last
+element. `rscan_ex` accepts the same range arguments as `scan_ex`. If
+`a >= b or a >= len` the range is considered empty and
+`flatbuffers_not_found` is returned. `[r]scan[_ex]_n[_by_name]` is for
+length terminated string keys. See [monster_test.c] for examples.
+
+Note that `find` requires `key` attribute in the schema. `scan` is also
+available on keyed fields. By default `flatcc` will also enable scan by
+any other field but this can be disabled by a compile time flag.
+
+Basic types such as `uint8_vec` also have search operations.
+
+See also [Builder Interface Reference] and [monster_test.c].
+
+
+## Null Values
+
+The FlatBuffers format does not fully distinguish between default values
+and missing or null values but it is possible to force values to be
+written to the buffer. This is discussed further in the
+[Builder Interface Reference]. For SQL data roundtrips this may be more
+important that having compact data.
+
+The `_is_present` suffix on table access methods can be used to detect if
+value is present in a vtable, for example `Monster_hp_present`. Unions
+return true of the type field is present, even if it holds the value
+None.
+
+The `add` methods have corresponding `force_add` methods for scalar and enum
+values to force storing the value even if it is default and thus making
+it detectable by `is_present`.
+
+
+## Portability Layer
+
+The portable library is placed under `include/flatcc/portable` and is
+required by flatcc, but isn't strictly part of the `flatcc` project. It
+is intended as an independent light-weight header-only library to deal
+with compiler and platform variations. It is placed under the flatcc
+include path to simplify flatcc runtime distribution and to avoid
+name and versioning conflicts if used by other projects.
+
+The license of portable is different from `flatcc`. It is mostly MIT or
+Apache depending on the original source of the various parts.
+
+A larger set of portable files is included if `FLATCC_PORTABLE` is
+defined by the user when building.
+
+ cc -D FLATCC_PORTABLE -I include monster_test.c -o monster_test
+
+Otherwise a targeted subset is
+included by `flatcc_flatbuffers.h` in order to deal with non-standard
+behavior of some C11 compilers.
+
+`pwarnings.h` is also always included so compiler specific warnings can
+be disabled where necessary.
+
+The portable library includes the essential parts of the grisu3 library
+found in `external/grisu3`, but excludes the test cases. The JSON
+printer and parser relies on fast portable numeric print and parse
+operations based mostly on grisu3.
+
+If a specific platform has been tested, it would be good with feedback
+and possibly patches to the portability layer so these can be made
+available to other users.
+
+
+## Building
+
+### Unix Build (OS-X, Linux, related)
+
+To initialize and run the build (see required build tools below):
+
+ scripts/build.sh
+
+The `bin` and `lib` folders will be created with debug and release
+build products.
+
+The build depends on `CMake`. By default the `Ninja` build tool is also required,
+but alternatively `make` can be used.
+
+Optionally switch to a different build tool by choosing one of:
+
+ scripts/initbuild.sh make
+ scripts/initbuild.sh make-concurrent
+ scripts/initbuild.sh ninja
+
+where `ninja` is the default and `make-concurrent` is `make` with the `-j` flag.
+
+To enforce a 32-bit build on a 64-bit machine the following configuration
+can be used:
+
+ scripts/initbuild.sh make-32bit
+
+which uses `make` and provides the `-m32` flag to the compiler.
+A custom build configuration `X` can be added by adding a
+`scripts/build.cfg.X` file.
+
+`scripts/initbuild.sh` cleans the build if a specific build
+configuration is given as argument. Without arguments it only ensures
+that CMake is initialized and is therefore fast to run on subsequent
+calls. This is used by all test scripts.
+
+To install build tools on OS-X, and build:
+
+ brew update
+ brew install cmake ninja
+ git clone https://github.com/dvidelabs/flatcc.git
+ cd flatcc
+ scripts/build.sh
+
+To install build tools on Ubuntu, and build:
+
+ sudo apt-get update
+ sudo apt-get install cmake ninja-build
+ git clone https://github.com/dvidelabs/flatcc.git
+ cd flatcc
+ scripts/build.sh
+
+To install build tools on Centos, and build:
+
+ sudo yum group install "Development Tools"
+ sudo yum install cmake
+ git clone https://github.com/dvidelabs/flatcc.git
+ cd flatcc
+ scripts/initbuild.sh make # there is no ninja build tool
+ scripts/build.sh
+
+
+OS-X also has a HomeBrew package:
+
+ brew update
+ brew install flatcc
+
+or for the bleeding edge:
+
+ brew update
+ brew install flatcc --HEAD
+
+
+### Windows Build (MSVC)
+
+Install CMake, MSVC, and git (tested with MSVC 14 2015).
+
+In PowerShell:
+
+ git clone https://github.com/dvidelabs/flatcc.git
+ cd flatcc
+ mkdir build\MSVC
+ cd build\MSVC
+ cmake -G "Visual Studio 14 2015" ..\..
+
+Optionally also build from the command line (in build\MSVC):
+
+ cmake --build . --target --config Debug
+ cmake --build . --target --config Release
+
+In Visual Studio:
+
+ open flatcc\build\MSVC\FlatCC.sln
+ build solution
+ choose Release build configuration menu
+ rebuild solution
+
+*Note that `flatcc\CMakeList.txt` sets the `-DFLATCC_PORTABLE` flag and
+that `include\flatcc\portable\pwarnings.h` disable certain warnings for
+warning level -W3.*
+
+### Docker
+
+Docker image:
+
+- <https://github.com/neomantra/docker-flatbuffers>
+
+
+### Cross-compilation
+
+Users have been reporting some degree of success using cross compiles
+from Linux x86 host to embedded ARM Linux devices.
+
+For this to work, `FLATCC_TEST` option should be disabled in part
+because cross-compilation cannot run the cross-compiled flatcc tool, and
+in part because there appears to be some issues with CMake custom build
+steps needed when building test and sample projects.
+
+The option `FLATCC_RTONLY` will disable tests and only build the runtime
+library.
+
+The following is not well tested, but may be a starting point:
+
+ mkdir -p build/xbuild
+ cd build/xbuild
+ cmake ../.. -DBUILD_SHARED_LIBS=on -DFLATCC_RTONLY=on \
+ -DCMAKE_BUILD_TYPE=Release
+
+Overall, it may be simpler to create a separate Makefile and just
+compile the few `src/runtime/*.c` into a library and distribute the
+headers as for other platforms, unless `flatcc` is also required for the
+target. Or to simply include the runtime source and header files in the user
+project.
+
+Note that no tests will be built nor run with `FLATCC_RTONLY` enabled.
+It is highly recommended to at least run the `tests/monster_test`
+project on a new platform.
+
+
+### Custom Allocation
+
+Some target systems will not work with Posix `malloc`, `realloc`, `free`
+and C11 `aligned_alloc`. Or they might, but more allocation control is
+desired. The best approach is to use `flatcc_builder_custom_init` to
+provide a custom allocator and emitter object, but for simpler case or
+while piloting a new platform
+[flatcc_alloc.h](include/flatcc/flatcc_alloc.h) can be used to override
+runtime allocation functions. _Carefully_ read the comments in this file
+if doing so. There is a test case implementing a new emitter, and a
+custom allocator can be copied from the one embedded in the builder
+library source.
+
+
+### Custom Asserts
+
+On systems where the default POSIX `assert` call is unavailable, or when
+a different assert behaviour is desirable, it is possible to override
+the default behaviour in runtime part of flatcc library via logic defined
+in [flatcc_assert.h](include/flatcc/flatcc_assert.h).
+
+By default Posix `assert` is beeing used. It can be changed by preprocessor definition:
+
+ -DFLATCC_ASSERT=own_assert
+
+but it will not override assertions used in the portable library, notably the
+Grisu3 fast numerical conversion library used with JSON parsing.
+
+Runtime assertions can be disabled using:
+
+ -DFLATCC_NO_ASSERT
+
+This will also disable Grisu3 assertions. See
+[flatcc_assert.h](include/flatcc/flatcc_assert.h) for details.
+
+The `<assert.h>` file will in all cases remain a dependency for C11 style static
+assertions. Static assertions are needed to ensure the generated structs have
+the correct physical layout on all compilers. The portable library has a generic
+static assert implementation for older compilers.
+
+
+### Shared Libraries
+
+By default libraries are built statically.
+
+Occasionally there are requests
+[#42](https://github.com/dvidelabs/flatcc/pull/42) for also building shared
+libraries. It is not clear how to build both static and shared libraries
+at the same time without choosing some unconvential naming scheme that
+might affect install targets unexpectedly.
+
+CMake supports building shared libraries out of the box using the
+standard library name using the following option:
+
+ CMAKE ... -DBUILD_SHARED_LIBS=ON ...
+
+See also [CMake Gold: Static + shared](http://cgold.readthedocs.io/en/latest/tutorials/libraries/static-shared.html).
+
+
+## Distribution
+
+Install targes may be built with:
+
+ mkdir -p build/install
+ cd build/install
+ cmake ../.. -DBUILD_SHARED_LIBS=on -DFLATCC_RTONLY=on \
+ -DCMAKE_BUILD_TYPE=Release -DFLATCC_INSTALL=on
+ make install
+
+However, this is not well tested and should be seen as a starting point.
+The normal scripts/build.sh places files in bin and lib of the source tree.
+
+By default lib files a built into the `lib` subdirectory of the project. This
+can be changed, for example like `-DFLATCC_INSTALL_LIB=lib64`.
+
+
+### Unix Files
+
+To distribute the compiled binaries the following files are
+required:
+
+Compiler:
+
+ bin/flatcc (command line interface to schema compiler)
+ lib/libflatcc.a (optional, for linking with schema compiler)
+ include/flatcc/flatcc.h (optional, header and doc for libflatcc.a)
+
+Runtime:
+
+ include/flatcc/** (runtime header files)
+ include/flatcc/reflection (optional)
+ include/flatcc/support (optional, only used for test and samples)
+ lib/libflatccrt.a (runtime library)
+
+In addition the runtime library source files may be used instead of
+`libflatccrt.a`. This may be handy when packaging the runtime library
+along with schema specific generated files for a foreign target that is
+not binary compatible with the host system:
+
+ src/runtime/*.c
+
+### Windows Files
+
+The build products from MSVC are placed in the bin and lib subdirectories:
+
+ flatcc\bin\Debug\flatcc.exe
+ flatcc\lib\Debug\flatcc_d.lib
+ flatcc\lib\Debug\flatccrt_d.lib
+ flatcc\bin\Release\flatcc.exe
+ flatcc\lib\Release\flatcc.lib
+ flatcc\lib\Release\flatccrt.lib
+
+Runtime `include\flatcc` directory is distributed like other platforms.
+
+
+## Running Tests on Unix
+
+Run
+
+ scripts/test.sh [--no-clean]
+
+**NOTE:** The test script will clean everything in the build directy before
+initializing CMake with the chosen or default build configuration, then
+build Debug and Release builds, and run tests for both.
+
+The script must end with `TEST PASSED`, or it didn't pass.
+
+To make sure everything works, also run the benchmarks:
+
+ scripts/benchmark.sh
+
+
+## Running Tests on Windows
+
+In Visual Studio the test can be run as follows: first build the main
+project, the right click the `RUN_TESTS` target and chose build. See
+the output window for test results.
+
+It is also possible to run tests from the command line after the project has
+been built:
+
+ cd build\MSVC
+ ctest
+
+Note that the monster example is disabled for MSVC 2010.
+
+Be aware that tests copy and generate certain files which are not
+automatically cleaned by Visual Studio. Close the solution and wipe the
+`MSVC` directory, and start over to get a guaranteed clean build.
+
+Please also observe that the file `.gitattributes` is used to prevent
+certain files from getting CRLF line endings. Using another source
+control systems might break tests, notably
+`test/flatc_compat/monsterdata_test.golden`.
+
+
+*Note: Benchmarks have not been ported to Windows.*
+
+
+## Configuration
+
+The configuration
+
+ config/config.h
+
+drives the permitted syntax and semantics of the schema compiler and
+code generator. These generally default to be compatible with
+Googles `flatc` compiler. It also sets things like permitted nesting
+depth of structs and tables.
+
+The runtime library has a separate configuration file
+
+ include/flatcc/flatcc_rtconfig.h
+
+This file can modify certain aspects of JSON parsing and printing such
+as disabling the Grisu3 library or requiring that all names in JSON are
+quoted.
+
+For most users, it should not be relevant to modify these configuration
+settings. If changes are required, they can be given in the build
+system - it is not necessary to edit the config files, for example
+to disable trailing comma in the JSON parser:
+
+ cc -DFLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA=0 ...
+
+
+## Using the Compiler and Builder library
+
+The compiler library `libflatcc.a` can compile schemas provided
+in a memory buffer or as a filename. When given as a buffer, the schema
+cannot contain include statements - these will cause a compile error.
+
+When given a filename the behavior is similar to the commandline
+`flatcc` interface, but with more options - see `flatcc.h` and
+`config/config.h`.
+
+`libflatcc.a` supports functions named `flatcc_...`. `reflection...` may
+also be available which are simple the C generated interface for the
+binary schema. The builder library is also included. These last two
+interfaces are only present because the library supports binary schema
+generation.
+
+The standalone runtime library `libflatccrt.a` is a collection of the
+`src/runtime/*.c` files. This supports the generated C headers for
+various features. It is also possible to distribute and compile with the
+source files directly. For debugging, it is useful to use the
+`libflatccrt_d.a` version because it catches a lot of incorrect API use
+in assertions.
+
+The runtime library may also be used by other languages. See comments
+in [flatcc_builder.h]. JSON parsing is on example of an
+alternative use of the builder library so it may help to inspect the
+generated JSON parser source and runtime source.
+
+## FlatBuffers Binary Format
+
+Mostly for implementers: [FlatBuffers Binary Format]
+
+
+## Security Considerations
+
+See [Security Considerations].
+
+
+## Style Guide
+
+FlatCC coding style is largely similar to the [WebKit Style], with the following notable exceptions:
+
+* Syntax requiring C99 or later is avoided, except `<stdint.h>` types are made available.
+* If conditions always use curly brackets, or single line statements without linebreak: `if (err) return -1;`.
+* NULL and nullptr are generally just represented as `0`.
+* Comments are old-school C-style (pre C99). Text is generally cased with punctuation: `/* A comment. */`
+* `true` and `false` keywords are not used (pre C99).
+* In code generation there is essentially no formatting to avoid excessive bloat.
+* Struct names and other types is lower case since this is C, not C++.
+* `snake_case` is used over `camelCase`.
+* Header guards are used over `#pragma once` because it is non-standard and not always reliable in filesystems with ambigious paths.
+* Comma is not placed first in multi-line calls (but maybe that would be a good idea for diff stability).
+* `config.h` inclusion might be handled differently in that `flatbuffers.h` includes the config file.
+* `unsigned` is not used without `int` for historical reasons. Generally a type like `uint32_t` is preferred.
+* Use `TODO:` instead of `FIXME:` in comments for historical reasons.
+
+All the main source code in compiler and runtime aim to be C11 compatible and
+uses many C11 constructs. This is made possible through the included portable
+library such that older compilers can also function. Therefore any platform specific adaptations will be provided by updating
+the portable library rather than introducing compile time flags in the main
+source code.
+
+
+## Benchmarks
+
+See [Benchmarks]
+
+[Builder Interface Reference]: https://github.com/dvidelabs/flatcc/blob/master/doc/builder.md
+[FlatBuffers Binary Format]: https://github.com/dvidelabs/flatcc/blob/master/doc/binary-format.md
+[Benchmarks]: https://github.com/dvidelabs/flatcc/blob/master/doc/benchmarks.md
+[monster_test.c]: https://github.com/dvidelabs/flatcc/blob/master/test/monster_test/monster_test.c
+[monster_test.fbs]: https://github.com/dvidelabs/flatcc/blob/master/test/monster_test/monster_test.fbs
+[optional_scalars_test.fbs]: https://github.com/dvidelabs/flatcc/blob/optional/test/optional_scalars_test/optional_scalars_test.fbs
+[optional_scalars_test.c]: https://github.com/dvidelabs/flatcc/blob/optional/test/optional_scalars_test/optional_scalars_test.c
+[paligned_alloc.h]: https://github.com/dvidelabs/flatcc/blob/master/include/flatcc/portable/paligned_alloc.h
+[test_json.c]: https://github.com/dvidelabs/flatcc/blob/master/test/json_test/test_json.c
+[test_json_parser.c]: https://github.com/dvidelabs/flatcc/blob/master/test/json_test/test_json_parser.c
+[flatcc_builder.h]: https://github.com/dvidelabs/flatcc/blob/master/include/flatcc/flatcc_builder.h
+[flatcc_emitter.h]: https://github.com/dvidelabs/flatcc/blob/master/include/flatcc/flatcc_emitter.h
+[flatcc-help.md]: https://github.com/dvidelabs/flatcc/blob/master/doc/flatcc-help.md
+[flatcc_rtconfig.h]: https://github.com/dvidelabs/flatcc/blob/master/include/flatcc/flatcc_rtconfig.h
+[hexdump.h]: https://github.com/dvidelabs/flatcc/blob/master/include/flatcc/support/hexdump.h
+[readfile.h]: include/flatcc/support/readfile.h
+[Security Considerations]: https://github.com/dvidelabs/flatcc/blob/master/doc/security.md
+[flatc --annotate]: https://github.com/google/flatbuffers/tree/master/tests/annotated_binary
+[WebKit Style]: https://webkit.org/code-style-guidelines/
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..87499a9
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,38 @@
+version: "win#{build}"
+
+image: Visual Studio 2015
+
+init:
+ - git config --global core.autocrlf input
+ - cmake --version
+ - msbuild /version
+
+shallow_clone: true
+
+clone_folder: c:\projects\flatcc
+
+platform:
+ - Win32
+ - x64
+
+configuration:
+ - Debug
+ - Release
+
+skip_tags: true
+
+before_build:
+ - echo Running cmake...
+ - cd c:\projects\flatcc
+ - mkdir build\MSVC
+ - cd build\MSVC
+ - if "%platform%"=="Win32" cmake -G "Visual Studio 14 2015" -DCMAKE_BUILD_TYPE=%configuration% ..\..
+ - if "%platform%"=="x64" cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=%configuration% ..\..
+
+build:
+ parallel: true # enable MSBuild parallel builds
+ project: c:\projects\flatcc\build\MSVC\FlatCC.sln
+
+test_script:
+ - ctest -C "%configuration%" -VV
+
diff --git a/config/config.h b/config/config.h
new file mode 100644
index 0000000..b07fa6c
--- /dev/null
+++ b/config/config.h
@@ -0,0 +1,477 @@
+#ifndef CONFIG_H
+#define CONFIG_H
+
+#include "flatcc/flatcc_version.h"
+
+/*
+ * This is an alternative implementation to googles `flatc` compiler.
+ * This configuration enables or disable features which can be used
+ * to closer match the flatc behavior. The current flatc version
+ * may evolve over time, so the comments on flatc behavior should
+ * be seen as how things were when this file was last updated.
+ *
+ * One notable, but convenient, difference is that structs and enums are
+ * not required to be defined in a specific order here (as of June 2020
+ * flatc supports out of order enums). The schema can be rearranged by
+ * the user to support the flatc compiler without changing the
+ * underlying. This does put an extra restraint on struct depth as
+ * recursion must have set maximum level, but this also applies to
+ * target C compilers, e.g. clang has a limit of about 100-200 nestings.
+ *
+ * Another more significant difference is that structs are allowed as
+ * root objects. This is important if a query should return a partial
+ * result from a stored buffer but it isn't currently supported by
+ * flatc, although the documentation hints that it might be possible.
+ *
+ * Value overflows and conversions of enums and default values are
+ * checked more than by flatc, and this could break some schemas.
+ * However, hopefully it will not break any production schemas since the
+ * results would not be well-defined.
+ *
+ * Struct fields may be deprecated by padding. This isn't supported by
+ * flatc but was added because it seemed inconsistent to not have it
+ * when deprecated table fields are possible.
+ *
+ * Enums can be used in structs, but this is not supported by flatc.
+ * The reason might be either the additional work in the parser, or
+ * because it adds a burden to code generators, for example, C output
+ * must place enums before structs. However, they make perfect sense in
+ * the format.
+ */
+
+#define FLATCC_INFO_TEXT "FlatBuffers schema compiler for C by dvide.com"
+#define FLATCC_TITLE_TEXT "flatcc " FLATCC_INFO_TEXT
+#define FLATCC_GENERATED_BY "Generated by flatcc " FLATCC_VERSION_TEXT " " FLATCC_INFO_TEXT
+
+/* Include portability layer here since all other files depend on it. */
+#ifdef FLATCC_PORTABLE
+#include "flatcc/portable/portable.h"
+#endif
+
+/*
+ * Limit the input buffer or file size being parsed to prevent brining
+ * the whole system down on invalid or malicous content. This limit
+ * is covers the accumulated size of all included files. 0 is unlimited.
+ */
+#ifndef FLATCC_MAX_SCHEMA_SIZE
+#define FLATCC_MAX_SCHEMA_SIZE 1000000
+#endif
+
+/*
+ * Max nesting level of includes - the affects recursion on the call
+ * stack. 0 is unlimited.
+ */
+#ifndef FLATCC_MAX_INCLUDE_DEPTH
+#define FLATCC_MAX_INCLUDE_DEPTH 100
+#endif
+
+/*
+ * Max number of include files allowed regardless of how they are
+ * nested. This affects memory usage but it may be very large. It should
+ * not normally affect the number of open file handles since files are
+ * opened and closed individually, depending on the interfacing driver
+ * logic. 0 is unlimited.
+ */
+#ifndef FLATCC_MAX_INCLUDE_COUNT
+#define FLATCC_MAX_INCLUDE_COUNT 100
+#endif
+
+/*
+ * Limit parser recursion depth when analyzing dependent structs.
+ * The grammar itself does not contain recursion.
+ */
+#ifndef FLATCC_NESTING_MAX
+#define FLATCC_NESTING_MAX 100
+#endif
+
+#ifndef FLATCC_MAX_ERRORS
+#define FLATCC_MAX_ERRORS 10
+#endif
+
+/* Used when truncating some error messages. */
+#ifndef FLATCC_MAX_IDENT_SHOW
+#define FLATCC_MAX_IDENT_SHOW 50
+#endif
+
+/*
+ * Allow FlatBuffer schema reserved keywords
+ * to be used as field identifiers. This can
+ * be confusing, but also important for use with
+ * JSON where the field names are visible
+ * externally.
+ */
+#ifndef FLATCC_ALLOW_KW_FIELDS
+#define FLATCC_ALLOW_KW_FIELDS 1
+#endif
+
+/*
+ * Applies to the members of an enum. These
+ * names a visible in JSON. Using reserved
+ * names prevent them from being used in
+ * field default values.
+ */
+#ifndef FLATCC_ALLOW_KW_ENUMS
+#define FLATCC_ALLOW_KW_ENUMS 1
+#endif
+
+/*
+ * Artificial limit on grammar attributes to prevent abuse.
+ */
+#ifndef FLATCC_ATTR_MAX
+#define FLATCC_ATTR_MAX 100
+#endif
+
+#ifndef FLATCC_FORCE_ALIGN_MAX
+#define FLATCC_FORCE_ALIGN_MAX 256
+#endif
+
+/*
+ * sizeof(uoffset_t), sizeof(soffset_t) in FlatBuffers file format.
+ * Permissible values are 2, 4 (default), and 8.
+ */
+#ifndef FLATCC_OFFSET_SIZE
+#define FLATCC_OFFSET_SIZE 4
+#endif
+
+/*
+ * sizeof(voffset_t) in FlatBuffers file format.
+ * Permissible values are 2 (default), 4, and 8.
+ */
+#ifndef FLATCC_VOFFSET_SIZE
+#define FLATCC_VOFFSET_SIZE 2
+#endif
+
+/*
+ * DO NOT CHANGE
+ * The union type field type.
+ * This is hardcoded in many places and difficult
+ * to change since size = 1 does not require
+ * endian conversion. Config only placed here
+ * for consistency and clarity.
+ */
+#ifndef FLATCC_UTYPE_SIZE
+#define FLATCC_UTYPE_SIZE 1
+#endif
+
+/*
+ * DO NOT CHANGE, see also FLATCC_UTYPE_SIZE
+ */
+#ifndef FLATCC_BOOL_SIZE
+#define FLATCC_BOOL_SIZE 1
+#endif
+
+/*
+ * There are no hard limits on structs, but
+ * they cannot be effectively handled by tables
+ * if larger than 64K and 64K-1 allows us to store
+ * the size in short where needed. Internally
+ * we can handle 64 bits.
+ */
+#ifndef FLATCC_STRUCT_MAX_SIZE
+#define FLATCC_STRUCT_MAX_SIZE ((1 << 16) - 1)
+#endif
+
+#ifndef FLATCC_NAMESPACE_MAX
+#define FLATCC_NAMESPACE_MAX 100
+#endif
+
+/* Reserve space for trailing _ and \0. */
+#define FLATCC_NAMESPACE_BUFSIZ (FLATCC_NAMESPACE_MAX + 2)
+#define FLATCC_NAME_BUFSIZ (2 * FLATCC_NAMESPACE_BUFSIZ)
+
+#ifndef FLATCC_FILENAME_MAX
+#define FLATCC_FILENAME_MAX 256
+#endif
+
+/*
+ * If set, enum fields initialized with a numeric constant must match an
+ * existing enum value. This only applies to the schema intializers, not
+ * values assigned during buffer construction. It may be useful to allow
+ * non-existing values (and expensive to enforce), but for schema
+ * intializers, it could easily break future changes if inconsistent
+ * values are being used.
+ */
+#ifndef FLATCC_STRICT_ENUM_INIT
+#define FLATCC_STRICT_ENUM_INIT 1
+#endif
+
+/*
+ * flatc requires ascending enums, and it does simplify code generation
+ * to multiple languages, and name mapping of enums, so we require this
+ * by default, but C code generator handles both cases fine.
+ * As of June 2020 flatc supports out of order enums.
+ */
+#ifndef FLATCC_ASCENDING_ENUM
+#define FLATCC_ASCENDING_ENUM 0
+#endif
+
+/*
+ * flatc tool accepts any integer as bool because it
+ * doesn't do range check. By enabling this flag we get more compatible,
+ * but still only accepts 1 or 0 as bool, and also allow assignment of
+ * bool to other types (useful in enums that are not bool because
+ * official flatc has no boolean enum type.
+ *
+ * However, there appears to be no strong reason for mixing boolean types
+ * with other types, but since `flatc` supports, it is enabled by
+ * default.
+ */
+#ifndef FLATCC_ALLOW_BOOLEAN_CONVERSION
+#define FLATCC_ALLOW_BOOLEAN_CONVERSION 1
+#endif
+
+/* flatc does not recognize later defined enums, be we do by default. */
+#ifndef FLATCC_HIDE_LATER_ENUM
+#define FLATCC_HIDE_LATER_ENUM 0
+#endif
+
+/*
+ * The flatc parser does not recognize later structs.
+ * If disabled dependent structs will be topologically
+ * sorted to the set maximum recursion depth.
+ */
+#ifndef FLATCC_HIDE_LATER_STRUCT
+#define FLATCC_HIDE_LATER_STRUCT 0
+#endif
+
+/*
+ * In all cases we permit at most one root type per file.
+ * flatc allows none, or multiple, but apparently only generates
+ * accessors for the last seen.
+ */
+#ifndef FLATCC_REQUIRE_ROOT_TYPE
+#define FLATCC_REQUIRE_ROOT_TYPE 0
+#endif
+
+/*
+ * This also applies to nested_flatbuffers.
+ * flatc does not allow structs.
+ */
+#ifndef FLATCC_ALLOW_STRUCT_ROOT
+#define FLATCC_ALLOW_STRUCT_ROOT 1
+#endif
+
+/*
+ * pad deprecated struct fields with __depracedx[size] i C output.
+ * flatc does not permit deprecated struct fields.
+ */
+#ifndef FLATCC_ALLOW_STRUCT_FIELD_DEPRECATE
+#define FLATCC_ALLOW_STRUCT_FIELD_DEPRECATE 1
+#endif
+
+/*
+ * flatc only allows one key on table fields, which makes sense in
+ * C++ STL where a vector can have on comparison operator,
+ * but in C output we can easily have multiple <structname>_find_by_<fieldname>.
+ * Vectors can only be sorted by one field at a time, but e.g. an
+ * octree- or trie-like data-structure might choose to sort on different
+ * coordinates at different levels.
+ */
+#ifndef FLATCC_ALLOW_MULTIPLE_KEY_FIELDS
+#define FLATCC_ALLOW_MULTIPLE_KEY_FIELDS 1
+#endif
+
+/*
+ * flatc does not have a primary_key field attribute but it is useful
+ * when having multiple keys. Without a primary_key the field with the
+ * lowest id becomes the default, or primary, key. This key has a
+ * simpler find syntax and is also used by default when sorting.
+ * When there is only one keyed field on a table it is better to use
+ * the key attribute for compatibility reasons. It will automatically
+ * become primary.
+ */
+#ifndef FLATCC_ALLOW_PRIMARY_KEY
+#define FLATCC_ALLOW_PRIMARY_KEY 1
+#endif
+
+/*
+ * flatcc has scan functions which are equivalent to find except they
+ * don't require arrays to be sorted by field and have O(N) complexity.
+ * It makes sense to generate these functions by default for all fields,
+ * not just ones marked as key.
+ */
+
+#ifndef FLATCC_ALLOW_SCAN_FOR_ALL_FIELDS
+#define FLATCC_ALLOW_SCAN_FOR_ALL_FIELDS 1
+#endif
+
+/* flatc currently does not allow keys on structs, but it makes sense to have them. */
+#ifndef FLATCC_ALLOW_STRUCT_FIELD_KEY
+#define FLATCC_ALLOW_STRUCT_FIELD_KEY 1
+#endif
+
+/*
+ * Structs cannot have defaults. If an enum does not define a 0 value
+ * this is not considered an error when used with a struct.
+ * The flatc compiler does not accept enums on structs.
+ */
+#ifndef FLATCC_ALLOW_ENUM_STRUCT_FIELD
+#define FLATCC_ALLOW_ENUM_STRUCT_FIELD 1
+#endif
+
+/*
+ * flatc currrently only allows scalar and strings as keys,
+ * but enums are really a kind of scalar and it makes sense to support
+ * them.
+ */
+#ifndef FLATCC_ALLOW_ENUM_KEY
+#define FLATCC_ALLOW_ENUM_KEY 1
+#endif
+
+/*
+ * String keys are allowed (and actively used in refletion.fbs) by
+ * flatc, but they are more tricky to support in code generation
+ * so allow them to be disabled.
+ */
+#ifndef FLATCC_ALLOW_STRING_KEY
+#define FLATCC_ALLOW_STRING_KEY 1
+#endif
+
+/* Code gen specific. */
+
+#ifndef FLATCC_DEFAULT_SCHEMA_EXT
+#define FLATCC_DEFAULT_SCHEMA_EXT ".fbs"
+#endif
+
+#ifndef FLATCC_DEFAULT_BIN_SCHEMA_EXT
+#define FLATCC_DEFAULT_BIN_SCHEMA_EXT ".bfbs"
+#endif
+
+/* Schema file extensions do not carry a dot by convention, do the same here. */
+#ifndef FLATCC_DEFAULT_BIN_EXT
+#define FLATCC_DEFAULT_BIN_EXT "bin"
+#endif
+
+#ifndef FLATCC_DEFAULT_DEP_EXT
+#define FLATCC_DEFAULT_DEP_EXT ".fbs.d"
+#endif
+
+/*
+ * A depends file sometimes needs a single file to hook
+ * the include file dependencies onto.
+ */
+#ifndef FLATCC_DEFAULT_DEP_TARGET_SUFFIX
+#define FLATCC_DEFAULT_DEP_TARGET_SUFFIX "_reader.h"
+#endif
+
+#ifndef FLATCC_DEFAULT_FILENAME
+#define FLATCC_DEFAULT_FILENAME "test"
+#endif
+
+#ifndef FLATCC_DEFAULT_NAMESPACE
+#define FLATCC_DEFAULT_NAMESPACE ""
+#endif
+
+#ifndef FLATCC_DEFAULT_NAMESPACE_COMMON
+#define FLATCC_DEFAULT_NAMESPACE_COMMON "flatbuffers"
+#endif
+
+/* rpc attributes aren't spec'ed at this point, so we flag the code. */
+/* Similar to attributes after table name (no runtime option). */
+#ifndef FLATCC_ALLOW_RPC_SERVICE_ATTRIBUTES
+#define FLATCC_ALLOW_RPC_ATTRIBUTES 1
+#endif
+
+/* Similar to attributes after a tables field name (no runtime option). */
+#ifndef FLATCC_ALLOW_RPC_METHOD_ATTRIBUTES
+#define FLATCC_ALLOW_RPC_METHOD_ATTRIBUTES 1
+#endif
+
+/* Only takes effect if rpc method attributes are allowed (no runtime option). */
+#ifndef FLATCC_ALLOW_DEPRECATED_RPC_METHOD
+#define FLATCC_ALLOW_DEPRECATED_RPC_METHOD 1
+#endif
+
+
+/*
+ * This inserts #pragma pack(1) and padding fields instead
+ * of using alignas on field members.
+ */
+#define FLATCC_CGEN_PAD 0
+
+
+/* ---- CGEN are flagss specific the C code generator. ---- */
+
+/*
+ * Generate support code for sorting arrays.
+ */
+#ifndef FLATCC_CGEN_SORT
+#define FLATCC_CGEN_SORT 1
+#endif
+
+/* Disable warnings for known compilers */
+#ifndef FLATCC_CGEN_PRAGMAS
+#define FLATCC_CGEN_PRAGMAS 1
+#endif
+
+/* Default spacing when generating auto idented code. */
+#ifndef FLATCC_CGEN_SPACING
+#define FLATCC_CGEN_SPACING 4
+#endif
+
+/*
+ * Field names risk conflict with other names when
+ * generated symbols do not all have a suffix.
+ * This can avoid by only generating accessors
+ * with the `_get` suffix, rather than both with
+ * and without like:
+ * `Monster_name(monster)`
+ * `Monster_name_get(monster)`.
+ *
+ * Note: test code might break if enabled.
+ */
+#ifndef FLATCC_CGEN_NO_CONFLICTS
+#define FLATCC_CGEN_NO_CONFLICTS 0
+#endif
+
+/* ---- BGEN are flags specific to the binary schema generator. ---- */
+
+/*
+ * If disabled, no binary schema support is compiled in.
+ * This may be useful when generection reflection headers
+ * are no longer compatible with the current build. Disabling
+ * reflection makes it possible to still build the compiler and
+ * thus generate new reflection headers.
+ *
+ * This flag is enabled by the build system so files can also be
+ * excluded or included.
+ *
+ * #ifndef FLATCC_REFLECTION
+ * #define FLATCC_REFLECTION 0
+ * #endif
+*/
+
+/* Export option for --schema. */
+#ifndef FLATCC_BGEN_BFBS
+#define FLATCC_BGEN_BFBS 0
+#endif
+
+/*
+ * Add uoffset length field before buffer. This is not a valid buffer
+ * then but can be used to outfileenate multiple files and it is
+ * compatible with the layout of nested buffers that have a similar
+ * prefix. The prefix is a uoffset_t field.
+ */
+#ifndef FLATCC_BGEN_LENGTH_PREFIX
+#define FLATCC_BGEN_LENGTH_PREFIX 0
+#endif
+
+/* Prefix type names with namespace during schema export.
+ * flatc does not include the namespace prefix. */
+#ifndef FLATCC_BGEN_QUALIFY_NAMES
+#define FLATCC_BGEN_QUALIFY_NAMES 1
+#endif
+
+
+/* ---- JSON related code generation. ---- */
+
+/*
+ * Translate enum values to symbolic names. Ditto union types.
+ * This is also a runtime option but disabling it here uses
+ * a faster code path.
+ */
+#ifndef FLATCC_JSON_PRINT_MAP_ENUMS
+#define FLATCC_JSON_PRINT_MAP_ENUMS 1
+#endif
+
+#endif /* CONFIG_H */
diff --git a/doc/Grammar-2015-07-23.md b/doc/Grammar-2015-07-23.md
new file mode 100644
index 0000000..cafc054
--- /dev/null
+++ b/doc/Grammar-2015-07-23.md
@@ -0,0 +1,42 @@
+# Formal Grammar of the schema language
+
+schema = include*
+ ( namespace\_decl | type\_decl | enum\_decl | root\_decl |
+ file_extension_decl | file_identifier_decl |
+ attribute\_decl | object )*
+
+include = `include` string\_constant `;`
+
+namespace\_decl = `namespace` ident ( `.` ident )* `;`
+
+attribute\_decl = `attribute` string\_constant `;`
+
+type\_decl = ( `table` | `struct` ) ident metadata `{` field\_decl+ `}`
+
+enum\_decl = ( `enum` | `union` ) ident [ `:` type ] metadata `{` commasep(
+enumval\_decl ) `}`
+
+root\_decl = `root_type` ident `;`
+
+field\_decl = ident `:` type [ `=` scalar ] metadata `;`
+
+type = `bool` | `byte` | `ubyte` | `short` | `ushort` | `int` | `uint` |
+`float` | `long` | `ulong` | `double`
+ | `string` | `[` type `]` | ident
+
+enumval\_decl = ident [ `=` integer\_constant ]
+
+metadata = [ `(` commasep( ident [ `:` scalar ] ) `)` ]
+
+scalar = integer\_constant | float\_constant | `true` | `false`
+
+object = { commasep( ident `:` value ) }
+
+value = scalar | object | string\_constant | `[` commasep( value ) `]`
+
+commasep(x) = [ x ( `,` x )\* ]
+
+file_extension_decl = `file_extension` string\_constant `;`
+
+file_identifier_decl = `file_identifier` string\_constant `;`
+
diff --git a/doc/Grammar.README.md b/doc/Grammar.README.md
new file mode 100644
index 0000000..15fdaac
--- /dev/null
+++ b/doc/Grammar.README.md
@@ -0,0 +1,2 @@
+Official flatbuffer grammer listed for reference.
+
diff --git a/doc/benchmarks.md b/doc/benchmarks.md
new file mode 100644
index 0000000..06863da
--- /dev/null
+++ b/doc/benchmarks.md
@@ -0,0 +1,147 @@
+# Benchmarks
+
+Benchmarks are defined for raw C structs, Googles `flatc` generated C++
+and the `flatcc` compilers C ouput.
+
+These can be run with:
+
+ scripts/benchmark.sh
+
+and requires a C++ compiler installed - the benchmark for flatcc alone can be
+run with:
+
+ test/benchmark/benchflatcc/run.sh
+
+this only requires a system C compiler (cc) to be installed (and
+flatcc's build environment).
+
+A summary for OS-X 2.2 GHz Haswell core-i7 is found below. Generated
+files for OS-X and Ubuntu are found in the benchmark folder.
+
+The benchmarks use the same schema and dataset as Google FPL's
+FlatBuffers benchmark.
+
+In summary, 1 million iterations runs at about 500-540MB/s at 620-700
+ns/op encoding buffers and 29-34ns/op traversing buffers. `flatc` and
+`flatcc` are close enough in performance for it not to matter much.
+`flatcc` is a bit faster encoding but it is likely due to less memory
+allocation. Throughput and time per operatin are of course very specific
+to this test case.
+
+Generated JSON parser/printer shown below, for flatcc only but for OS-X
+and Linux.
+
+
+## operation: flatbench for raw C structs encode (optimized)
+ elapsed time: 0.055 (s)
+ iterations: 1000000
+ size: 312 (bytes)
+ bandwidth: 5665.517 (MB/s)
+ throughput in ops per sec: 18158707.100
+ throughput in 1M ops per sec: 18.159
+ time per op: 55.070 (ns)
+
+## operation: flatbench for raw C structs decode/traverse (optimized)
+ elapsed time: 0.012 (s)
+ iterations: 1000000
+ size: 312 (bytes)
+ bandwidth: 25978.351 (MB/s)
+ throughput in ops per sec: 83263946.711
+ throughput in 1M ops per sec: 83.264
+ time per op: 12.010 (ns)
+
+## operation: flatc for C++ encode (optimized)
+ elapsed time: 0.702 (s)
+ iterations: 1000000
+ size: 344 (bytes)
+ bandwidth: 490.304 (MB/s)
+ throughput in ops per sec: 1425301.380
+ throughput in 1M ops per sec: 1.425
+ time per op: 701.606 (ns)
+
+## operation: flatc for C++ decode/traverse (optimized)
+ elapsed time: 0.029 (s)
+ iterations: 1000000
+ size: 344 (bytes)
+ bandwidth: 11917.134 (MB/s)
+ throughput in ops per sec: 34642832.398
+ throughput in 1M ops per sec: 34.643
+ time per op: 28.866 (ns)
+
+
+## operation: flatcc for C encode (optimized)
+ elapsed time: 0.626 (s)
+ iterations: 1000000
+ size: 336 (bytes)
+ bandwidth: 536.678 (MB/s)
+ throughput in ops per sec: 1597255.277
+ throughput in 1M ops per sec: 1.597
+ time per op: 626.074 (ns)
+
+## operation: flatcc for C decode/traverse (optimized)
+ elapsed time: 0.029 (s)
+ iterations: 1000000
+ size: 336 (bytes)
+ bandwidth: 11726.930 (MB/s)
+ throughput in ops per sec: 34901577.551
+ throughput in 1M ops per sec: 34.902
+ time per op: 28.652 (ns)
+
+## JSON benchmark
+
+*Note: this benchmark is only available for `flatcc`. It uses the exact
+same data set as above.*
+
+The benchmark uses Grisu3 floating point parsing and printing algorithm
+with exact fallback to strtod/sprintf when the algorithm fails to be
+exact. Better performance can be gained by enabling inexact Grisu3 and
+SSE 4.2 in build options, but likely not worthwhile in praxis.
+
+## operation: flatcc json parser and printer for C encode (optimized)
+
+(encode means printing from existing binary buffer to JSON)
+
+ elapsed time: 1.407 (s)
+ iterations: 1000000
+ size: 722 (bytes)
+ bandwidth: 513.068 (MB/s)
+ throughput in ops per sec: 710619.931
+ throughput in 1M ops per sec: 0.711
+ time per op: 1.407 (us)
+
+## operation: flatcc json parser and printer for C decode/traverse (optimized)
+
+(decode/traverse means parsing json to flatbuffer binary and calculating checksum)
+
+ elapsed time: 2.218 (s)
+ iterations: 1000000
+ size: 722 (bytes)
+ bandwidth: 325.448 (MB/s)
+ throughput in ops per sec: 450758.672
+ throughput in 1M ops per sec: 0.451
+ time per op: 2.218 (us)
+
+## JSON parsing and printing on same hardware in Virtual Box Ubuntu
+
+Numbers for Linux included because parsing is significantly faster.
+
+## operation: flatcc json parser and printer for C encode (optimized)
+
+ elapsed time: 1.210 (s)
+ iterations: 1000000
+ size: 722 (bytes)
+ bandwidth: 596.609 (MB/s)
+ throughput in ops per sec: 826328.137
+ throughput in 1M ops per sec: 0.826
+ time per op: 1.210 (us)
+
+## operation: flatcc json parser and printer for C decode/traverse
+
+ elapsed time: 1.772 (s)
+ iterations: 1000000
+ size: 722 (bytes)
+ bandwidth: 407.372 (MB/s)
+ throughput in ops per sec: 564227.736
+ throughput in 1M ops per sec: 0.564
+ time per op: 1.772 (us)
+
diff --git a/doc/binary-format.md b/doc/binary-format.md
new file mode 100644
index 0000000..7c2f53e
--- /dev/null
+++ b/doc/binary-format.md
@@ -0,0 +1,1378 @@
+# FlatBuffers Binary Format
+
+
+<!-- vim-markdown-toc GFM -->
+
+* [Overview](#overview)
+* [Memory Blocks](#memory-blocks)
+* [Example](#example)
+* [Primitives](#primitives)
+ * [Numerics](#numerics)
+ * [Boolean](#boolean)
+ * [Format Internal Types](#format-internal-types)
+ * [Scalars](#scalars)
+ * [Structs](#structs)
+* [Internals](#internals)
+* [Type Hashes](#type-hashes)
+ * [Conflicts](#conflicts)
+ * [Type Hash Variants](#type-hash-variants)
+* [Unions](#unions)
+* [Optional Fields](#optional-fields)
+* [Alignment](#alignment)
+* [Default Values and Deprecated Values](#default-values-and-deprecated-values)
+* [Schema Evolution](#schema-evolution)
+* [Keys and Sorting](#keys-and-sorting)
+* [Size Limits](#size-limits)
+* [Verification](#verification)
+* [Risks](#risks)
+* [Nested FlatBuffers](#nested-flatbuffers)
+* [Fixed Length Arrays](#fixed-length-arrays)
+* [Big Endian FlatBuffers](#big-endian-flatbuffers)
+* [StructBuffers](#structbuffers)
+* [StreamBuffers](#streambuffers)
+* [Bidirectional Buffers](#bidirectional-buffers)
+* [Possible Future Features](#possible-future-features)
+ * [Force Align](#force-align)
+ * [Mixins](#mixins)
+
+<!-- vim-markdown-toc -->
+
+
+## Overview
+
+## Memory Blocks
+
+A FlatBuffers layout consists of the following types of memory blocks:
+
+- header
+- table
+- vector
+- string
+- vtable
+- (structs)
+
+Each of these have a contigous memory layout. The header references the
+root table. Every table references a vtable and stores field in a single
+block of memory. Some of these fields can be offsets to vectors, strings
+and vectors. Vectors are one-dimensional blocks where each element is
+self-contained or stores reference to table or a string based on schema
+type information. A vtable decides which fields are present in table and
+where they are stored. Vtables are the key to support schema evolution.
+A table has no special rules about field ordering except fields must be
+properly aligned, and if they are ordered by size the will pack more
+densely but possibly more complicated to construct and the schema may
+provide guidelines on the preferred order. Two vtables might mean
+different things but accidentally have the same structure. Such vtables
+can be shared between different tables. vtable sharing is important when
+vectors store many similar tables. Structs a dense memory regions of
+scalar fields and smaller structs. They are mostly found embedded in
+tables but they are independent blocks when referenced from a union or
+union vector, or when used as a buffer root. Structs hold no references.
+
+Space between the above blocks are zero padded and present in order to
+ensure proper alignment. Structs must be packed as densely as possible
+according the alignment rules that apply - this ensures that all
+implementations will agree on the layout. The blocks must not overlap in
+memory but two blocks may be shared if they represent the same data such
+as sharing a string.
+
+FlatBuffers are constructed back to front such that lower level objects
+such as sub-tables and strings are created first in stored last and such
+that the root object is stored last and early in the buffer. See also
+[Stream Buffers](#stream-buffers) for a proposed variation over this
+theme.
+
+All addressing in FlatBuffers are relative. The reason for this? When
+navigating the buffer you don't need to store the buffer start or the
+buffer length, you can also find a new reference relative the reference
+you already have. vtables require the table location to find a field,
+but a table field referencing a string field only needs the table field
+location, not the table start in order to find the referenced string.
+This results in efficient navigation.
+
+## Example
+
+Uoffsets add their content to their address and are positive while a
+tables offset to its vtable is signed and is substracted. A vtables
+element is added to the start of the table referring to the vtable.
+
+
+Schema ([eclectic.fbs]) :
+
+ namespace Eclectic;
+
+ enum Fruit : byte { Banana = -1, Orange = 42 }
+ table FooBar {
+ meal : Fruit = Banana;
+ density : long (deprecated);
+ say : string;
+ height : short;
+ }
+ file_identifier "NOOB";
+ root_type FooBar;
+
+JSON :
+
+ { "meal": "Orange", "say": "hello", "height": -8000 }
+
+Buffer :
+
+ header:
+
+ +0x0000 00 01 00 00 ; find root table at offset +0x00000100.
+ +0x0004 'N', 'O', 'O', 'B' ; possibly our file identifier
+
+ ...
+
+ table:
+
+ +0x0100 e0 ff ff ff ; 32-bit soffset to vtable location
+ ; two's complement: 2^32 - 0xffffffe0 = -0x20
+ ; effective address: +0x0100 - (-0x20) = +0x0120
+ +0x0104 00 01 00 00 ; 32-bit uoffset string field (FooBar.say)
+ ; find string +0x100 = 256 bytes _from_ here
+ ; = +0x0104 + 0x100 = +0x0204.
+ +0x0108 42d ; 8-bit (FooBar.meal)
+ +0x0109 0 ; 8-bit padding
+ +0x010a -8000d ; 16-bit (FooBar.height)
+ +0x010c ... ; (first byte after table end)
+
+ ...
+
+ vtable:
+
+ +0x0120 0c 00 ; vtable length = 12 bytes
+ +0x0122 0c 00 ; table length = 12 bytes
+ +0x0124 08 00 ; field id 0: +0x08 (meal)
+ +0x0126 00 00 ; field id 1: <missing> (density)
+ +0x0128 04 00 ; field id 2: +0004 (say)
+ +0x012a 0a 00 ; field id 3: +0x0a (height)
+
+ ...
+
+ string:
+
+ +0x0204 05 00 00 00 ; vector element count (5 ubyte elements)
+ +0x0208 'h' 'e' ; vector data
+ +0x020a 'l' 'l' ; vector data
+ +0x020c 'o' ; vector data
+ +0x020d 00 ; zero termination
+ ; special case for string vectors
+
+ ...
+
+
+Actual FlatCC generated buffer :
+
+ Eclectic.FooBar:
+ 0000 08 00 00 00 4e 4f 4f 42 e8 ff ff ff 08 00 00 00 ....NOOB........
+ 0010 2a 00 c0 e0 05 00 00 00 68 65 6c 6c 6f 00 00 00 *.......hello...
+ 0020 0c 00 0c 00 08 00 00 00 04 00 0a 00 ............
+
+
+Note that FlatCC often places vtables last resulting in `e0 ff ff ff`
+style vtable offsets, while Googles flatc builder typically places them
+before the table resulting in `20 00 00 00` style vtable offsets which
+might help understand why the soffset is subtracted from and not added
+to the table start. Both forms are equally valid.
+
+
+## Primitives
+
+Primitives are data used to build more complex structures such as
+strings, vectors, vtables and tables. Although structs are not strictly
+a primitive, it helps to view them as self-contained primitives.
+
+
+### Numerics
+
+FlatBuffers are based on the following primitives that are 8, 16, 32 and
+64 bits in size respectively (IEEE-754, two's complement, little endian):
+
+ uint8, uint16, uint32, uint64 (unsigned)
+ int8, int16, int32, int64 (two's complement)
+ float32, float64 (IEEE-754)
+
+
+### Boolean
+
+ flatbuffers_bool (uint8)
+ flatbuffers_true (flatbuffers_bool assign as 1, read as != 0)
+ flatbuffers_false (flatbuffers_bool = 0)
+
+Note that a C99 `bool` type has no defined size or sign so it is not an
+exact representation of a flatbuffers boolean encoding.
+
+When a stored value is interpreted as boolean it should not be assumed
+to be either 1 or 0 but rather as `not equal to 0`. When storing a
+boolean value or when converting a boolean value to integer before
+storing, the value should be 1 for true and 0 for false, In C this can
+be done using `!!x`.
+
+### Format Internal Types
+
+ flatbuffers_union_type_t (uint8, NONE = 0, 0 <= type <= 255)
+ flatbuffers_identifier_t (uint8[4])
+ flatbuffers_uoffset_t (uin32)
+ flatbuffers_soffset_t (int32),
+ flatbuffers_voffset_t (uint16)
+
+These types can change in FlatBuffer derived formats, but when we say
+FlatBuffers without further annotations, we mean the above sizes in
+little endian encoding. We always refer to specific field type and not a
+specific field size because this allows us to derive new formats easily.
+
+
+### Scalars
+
+To simpify discussion we use the term scalars to mean integers, boolean,
+floating point and enumerations. Enumerations always have an underlying
+signed or unsigned integer type used for its representation.
+
+Scalars are primitives that has a size between 1 and 8 bytes. Scalars
+are stored aligned to the own size.
+
+The format generally supports vectors and structs of any scalar type but
+some language bindings might not have support for all combinations such
+as arrays of booleans.
+
+An special case is the encoding of a unions type code which internally
+is an enumeration but it is not normally permitted in places where we
+otherwise allow for scalars.
+
+Another special case is enumerations of type boolean which may not be
+widely supported, but possible. The binary format is not concerned with
+this distinction because a boolean is just an integer at this level.
+
+### Structs
+
+A struct is a fixed lengthd block of a fixed number of fields in a specific order
+defined by a schema. A field is either a scalar, another struct or a fixed length
+array of these, or a fixed length char array. A struct cannot contain fields that
+contain itself directly or indirectly. A struct is self-contained and has no
+references. A struct cannot be empty.
+
+A schema cannot change the layout of a struct without breaking binary
+compatibility, unlike tables.
+
+When used as a table field, a struct is embedded within the table block
+unless it is union value. A vector of structs are placed in a separate
+memory block, similar to vectors of scalars. A vector of unions that has
+a struct as member will reference the struct as an offset, and the
+struct is then an independent memory block like a table.
+
+FlatCC supports that a struct can be the root object of a FlatBuffer, but
+most implementations likely won't support this. Structs as root are very
+resource efficient.
+
+Structs cannot have vectors but they can have fixed length array fields. which
+are equivalent to stacking multiple non-array fields of the same type after each
+other in a compact notation with similar alignment rules. Additionally arrays
+can be of char type to have a kind of fixed length string. The char type is not
+used outside of char arrays. A fixed length array can contain a struct that
+contains one more fixed length arrays. If the char array type is not support, it
+can be assumed to be a byte array.
+
+
+## Internals
+
+All content is little endian and offsets are 4 bytes (`uoffset_t`).
+A new buffer location is found by adding a uoffset to the location where
+the offset is stored. The first location (offset 0) points to the root
+table. `uoffset_t` based references are to tables, vectors, and strings.
+References to vtables and to table fields within a table have a
+different calculations as discussed below.
+
+A late addition to the format allow for adding a size prefix before the
+standard header. When this is done, the builder must know about so it
+can align content according to the changed starting position. Receivers
+must also know about the size field just as they must know about the
+excpected buffer type.
+
+The next 4 bytes (`sizeof(uoffset_t)`) might represent a 4 byte buffer
+identifier, or it might be absent but there is no obvious way to know
+which. The file identifier is typically ASCII characters from the
+schemas `file_identifier` field padded with 0 bytes but may also contain
+any custom binary identifier in little endian encoding. See
+[Type-Hashes](#type-hashes). The 0 identifier should be avoided because
+the buffer might accidentally contain zero padding when an identifier is
+absent and because 0 can be used by API's to speficy that no identifier
+should be stored.
+
+When reading a buffer, it should be checked that the length is at least
+8 bytes (2 * `sizeof(uoffset_t)`) Otherwise it is not safe to check the
+file identifier.
+
+The root table starts with a 4 byte vtable offset (`soffset_t`). The
+`soffset_t` has the same size as `uoffset_t` but is signed.
+
+The vtable is found by *subtracting* the signed 4 byte offset to the
+location where the vtable offset is stored. Note that the `FlatCC`
+builder typically stores vtables at the end of the buffer (clustered
+vtables) and therefore the vtable offset is normally negative. Other
+builders often store the vtable before the table unless reusing an
+existing vtable and this makes the soffset positive.
+(Nested FlatBuffers will not store vtables at the end because it would
+break compartmentalization).
+
+The vtable is a table of 2 byte offsets (`voffset_t`). The first two
+entreis are the vtable size in bytes and the table size in bytes. The
+next offset is the vtable entry for table field 0. A vtable will always
+have as many entries as the largest stored field in the table, but it
+might skip entries that are not needed or known (version independence) -
+therefore the vtable length must be checked. The table length is only
+needed by verifiers. A vtable lookup of an out of range table id is the
+same as a vtable entry that has a zero entry and results in a default
+value for the expected type. Multiple tables may shared the same vtable,
+even if they have different types. This is done via deduplication during
+buffer construction. A table field id maps to a vtable offset using the
+formula `vtable-start + sizeof(voffset_t) * (field-id + 2)`, iff the
+result is within the vtable size.
+
+The vtable entry stores a byte offset relative to the location where the
+`soffset_t` where stored at the start of the table. A table field is
+stored immediately after the `soffset_t` or may contain 0 padding. A
+table field is always aligned to at least its own size, or more if the
+schema demands it. Strings, sub-tables and vectors are stored as a
+4-byte `uoffset_t`. Such sub-elements are always found later in the
+buffer because `uoffset_t` is unsigned. A struct is stored in-place.
+Conceptually a struct is not different from an integer in this respect,
+just larger.
+
+If a sub-table or vector is absent and not just without fields, 0
+length, the containing table field pointing the sub-table or vector
+should be absent. Note that structs cannot contain sub-tables or
+vectors.
+
+A struct is always 0 padded up to its alignment. A structs alignment
+is given as the largest size of any non-struct member, or the alignment
+of a struct member (but not necessarily the size of a struct member), or
+the schema specified aligment.
+
+A structs members are stored in-place so the struct fields are known
+via the the schema, not via information in the binary format
+
+An enum is represent as its underlying integer type and can be a member
+of structs, fields and vectors just like integer and floating point
+scalars.
+
+A table is always aligned to `sizeof(uoffset_t)`, but may contain
+internal fields with larger alignment. That is, the table start or end
+are not affected by alignment requirements of field members, unlike
+structs.
+
+A string is a special case of a vector with the extra requirement that a
+0 byte must follow the counted content, so the following also applies to
+strings.
+
+A vector starts with a `uoffset_t` field the gives the length in element
+counts, not in bytes. Table fields points to the length field of a
+vector, not the first element. A vector may have 0 length. Note that the
+FlatCC C binding will return a pointer to a vectors first element which
+is different from the buffer internal reference.
+
+All elements of a vector has the same type which is either a scalar type
+including enums, a struct, or a uoffset reference where all the
+reference type is to tables all of the same type, all strings, or,
+for union vectors, references to members of the same union. Because a
+union member can be a struct, it is possible to have vectors that
+reference structs instead of embeddding them, but only via unions. It is
+not possible to have vectors of vectors other than string vectors,
+except indirectly via vectors containing tables.
+
+A vectors first element is aligned to the size of `uoffset_t` or the
+alignment of its element type, or the alignment required by the schema,
+whichever is larger. Note that the vector itself might be aligned to 4
+bytes while the first element is aligned to 8 bytes.
+
+(Currently the schema semantics do no support aligning vectors beyond
+their element size, but that might change and can be forced when
+building buffers via dedicated api calls).
+
+Strings are stored as vectors of type `ubyte_t`, i.e. 8-bit elements. In
+addition, a trailing zero is always stored. The trailing zero is not
+counted in the length field of the vector. Technically a string is
+supposed to be valid UTF-8, but in praxis it is permitted that strings
+contain 0 bytes or other invalid sequences. It is recommended to
+explicitly check strings for UTF-8 conformance when this is required
+rather than to expect this to alwways be true. However, the ubyte vector
+should be preferred for binary data.
+
+A string, vector or table may be referenced by other tables and vectors.
+This is known as a directed acyclic graph (DAG). Because uoffsets are
+unsigned and because uoffsets are never stored with a zero value (except
+for null entries in union vectors), it is not possible for a buffer to
+contain cycles which makes them safe to traverse with too much fear of
+excessive recursion. This makes it possible to efficiently verify that a
+buffer does not contain references or content outside of its expected
+boundaries.
+
+A vector can also hold unions, but it is not supported by all
+implementations. A union vector is in reality two separate vectors: a
+type vector and an offset vector in place of a single unions type and
+value fields in table. See unions.
+
+
+## Type Hashes
+
+A type hash is a 32-bit value defined as the fnv1a32 hash of a tables or
+a structs fully qualified name. If the fnv1a32 hash returns 0 it should
+instead hash the empty string. 0 is used to indicate that a buffer
+should not store an identifier.
+
+Every table and struct has a name and optionally also a namespace of one
+or more levels. The fully qualified name is optional namespace followed
+by the type name using '.' as a separator. For example
+"MyGame.Sample.Monster", or "Eclectic.FooBar".
+
+The type hash can be used as the buffer identifier instead of the schema
+provided `file_identifier`. The type hash makes it possible to
+distinguish between different root types from the same schema, and even
+across schema as long as the namespace is unique.
+
+Type hashes introduce no changes to the binary format but the application
+interface must choose to support user defined identifiers or explicitly
+support type hashes. Alternatively an application can peak directly into
+the buffer at offset 4 (when `uoffset_t` is 4 bytes long).
+
+FlatCC generates the following identifier for the "MyGame.Sample.Monster"
+table:
+
+ #define MyGame_Sample_Monster_type_hash ((flatbuffers_thash_t)0xd5be61b)
+ #define MyGame_Sample_Monster_type_identifier "\x1b\xe6\x5b\x0d"
+
+But we can also
+[compute one online](https://www.tools4noobs.com/online_tools/hash/)
+for our example buffer:
+
+ fnv1a32("Eclectic.FooBar") = 0a604f58
+
+Thus we can open a hex editor and locate
+
+ +0x0000 00 01 00 00 ; find root table at offset +0x00000100.
+ +0x0004 'N', 'O', 'O', 'B' ; possibly our file identifier
+
+and replace it with
+
+ +0x0000 00 01 00 00 ; find root table at offset +0x00000100.
+ +0x0004 58 4f 60 0a ; very likely our file identifier identifier
+
+or generate it with `flatcc`:
+
+ $ bin/flatcc --stdout doc/eclectic.fbs | grep FooBar_type
+ #define Eclectic_FooBar_type_hash ((flatbuffers_thash_t)0xa604f58)
+ #define Eclectic_FooBar_type_identifier "\x58\x4f\x60\x0a"
+
+
+The following snippet implements fnv1a32, and returns the empty string
+hash if the hash accidentially should return 0:
+
+
+ static inline flatbuffers_thash_t flatbuffers_type_hash_from_name(const char *name)
+ {
+ uint32_t hash = 2166136261UL;
+ while (*name) {
+ hash ^= (uint32_t)*name;
+ hash = hash * 16777619UL;
+ ++name;
+ }
+ if (hash == 0) {
+ hash = 2166136261UL;
+ }
+ return hash;
+ }
+
+### Conflicts
+
+It is possible to have conficts between two type hashes although the
+risk is small. Conflicts are not important as long as an application can
+distinguish between all the types it may encouter in actual use. A
+switch statement in C will error at compile time for two cases that have
+the same value, so the problem is easily detectable and fixable by
+modifying a name or a namespace.
+
+For global conflict resolution, a type should be identified by its fully
+qualified name using adequate namespaces. This obviously requires it to
+be stored separate from the buffer identifier due to size constraints.
+
+
+### Type Hash Variants
+
+If an alternative buffer format is used, the type hash should be
+modified. For example, if `uoffset_t` is defined as a 64-bit value, the
+fnv1a64 hash should be used instead. For big endian variants the hash
+remains unchanged but is byteswapped. The application will use the same
+id while the acces layer will handle the translation.
+
+For buffers using structs as roots, the hash remains unchanged because
+the struct is a unique type in schema. In this way a receiver that does
+not handle struct roots can avoid trying to read the root as a table.
+
+For futher variations of the format, a format identifier can be inserted
+in front of the namespace when generating the hash. There is no formal
+approach to this, but as an example, lets say we want to use only 1 byte
+per vtable entry and identify these buffers with type hash using the
+prefix "ebt:" for example buffer type. We then have the type hash:
+
+ #define type_hash_prefix "ebt:"
+
+ hash = fnv1a32(type_hash_prefix "Eclectic.FooBar");
+ hash = hash ? hash : fnv1a32(type_hash_prefix);
+
+If the hash returns 0 we hash the prefix.
+
+
+## Unions
+
+A union is a contruction on top of the above primitives. It consists of
+a type and a value.
+
+In the schema a union type is a set of table types with each table name
+assigned a type enumeration starting from 1. 0 is the type NONE meaning
+the union has not value assigned. The union type is represented as a
+ubyte enum type, or in the binary format as a value of type
+`union_type_t` which for standard FlatBuffers is an 8-bit unsigned code
+with 0 indicating the union stores not value and a non-zero value
+indicating the type of the stored union.
+
+A union is stored in a table as a normal sub-table reference with the
+only difference being that the offset does not always point to a table
+of the same type. The 8-bit union type is stored as a separate table
+field conventially named the same as the union value field except for a
+`_type` suffix. The value (storing the table offset) MUST have a field
+ID that is exactly one larger than the type field. If value field is
+present the type field MUST also be present. If the type is NONE the
+value field MUST be absent and the type field MAY be absent because a
+union type always defaults to the value NONE.
+
+Vectors of unions is a late addition (mid 2017) to the FlatBuffers
+format. FlatCC supports union vectors as of v0.5.0.
+
+Vectors of unions have the same two fields as normal unions but they
+both store a vector and both vectors MUST have the same length or both
+be absent from the table. The type vector is a vector of 8-bit enums and
+the value vector is a vector of table offsets. Obviously each type
+vector element represents the type of the table in the corresponding
+value element. If an element is of type NONE the value offset must be
+stored as 0 which is a circular reference. This is the only offset that
+can have the value 0.
+
+A later addition (mid 2017) to the format allows for structs and strings
+to also be member of a union. A union value is always an offset to an
+independent memory block. For strings this is just the offset to the
+string. For tables it is the offset to the table, naturally, and for
+structs, it is an offset to an separate aligned memory block that holds
+a struct and not an offset to memory inside any other table or struct.
+FlatCC supports mixed type unions and vectors of these as of v0.5.0.
+
+## Optional Fields
+
+As of mid 2020 the FlatBuffers format introduced optional scalar table fields.
+There is no change to the binary schema, but the semantics have changed slightly
+compared to ordinary scalar fields (which remain supported as is): If an
+optional field is not stored in a table, it is considered to be a null value. An
+optinal scalar field will have null as its default value, so any representable
+scalar value will always be stored in the buffer, unlike other scalar fields
+which by default do not store the field if the value matches the default numeric
+value. This was already possible before by using `force_add` semantics to force
+a value to be written even if it was matching the default value, and by
+providing an `is_present` test when reading a value so that it would be possible
+to distinguish between a value that happened to be a default value, and a value
+that was actually absent. However, such techniques were ad-hoc. Optional
+fields formalize the semantics of null values for scalars. Other field types
+already have meaningful null values. Only table fields can be optional so struct
+fields must always assign a value to all members.
+
+## Alignment
+
+All alignments are powers of two between 1 and 256. Large alignments are
+only possible via schema specified alignments. The format naturally has
+a maximum alignment of the largest scalar it stores, which is a 64-bit
+integer or floating point value. Because C malloc typically returns
+buffers aligned to a least 8 bytes, it is often safe to place buffers in
+heap allocated memory, but if the target system does not permit
+unaligned access, or is slow on unaligned access, a buffer should be
+placed in sufficiently aligned memory. Typically it is a good idea to
+place buffer in cacheline aligned boundary anyway.
+
+A buffers alignment is the same as the largest alignment of any
+object or struct it contains.
+
+The buffer size is not guaranteed to be aligned to its own alignment
+unlike struct. Googles `flatc` builder does this, at least when size
+prefixed. The `FlatCC` tool currently does not, but it might later add
+an option to pad up to alignment. This would make it simpler to stack
+similar typed buffers in file - but users can retrieve the buffers
+alignment and do this manually. Thus, when stacking size prefixed
+buffers, each buffer should start aligned to its own size starting at
+the size field, and should also be zero padded up to its own alignment.
+
+
+## Default Values and Deprecated Values
+
+A table can can omit storing any field that is not a required field. For
+strings, vectors and tables this result in returning a null value
+different from an empty value when reading the buffer. Struct fields not
+present in table are also returned as null.
+
+All fields that can return null do not have a default value. Other
+values, which are integers, floats and enumerations, can all have
+default values. The default value is returned if not found in the table.
+
+If a default value is not specified the default defaults to zero for the
+corresponding type. If an enumeration does not have a 0 value and no
+explicit default value, this is a schema error.
+
+When building a buffer the builder will compare a stored field to the
+known default value. If it matches the field will simple be skipped.
+Some builder API's makes it possible to force a default value to be
+stored and to check if a field is missing when reading the buffer. This
+can be used to handle NULL values differently from default or missing
+values.
+
+A deprecated field is a schema construct. The binary format either stores
+a table field, or it does not.
+
+A deprecated field should be treated as not available, as in no way to
+read the value as opposed to returning a default value regardless of
+whether the field is present or not. If they for some reason are made
+accessible the verifier must also understand and verify these fields.
+
+A deprecated field also cannot be written to a new buffer, although if
+it against guidelines remains possible to do so, it should be done as if
+the field was not deprecated.
+
+Structs cannot have default values and cannot have deprecated fields in
+stadard FlatBuffers. FlatCC supports marking a struct field as
+deprecated. This implies the field will always be zeroed and with no
+trivial accessors. A struct can never change size without breaking
+support for schema evolution.
+
+FlatCC JSON parsers allow structs to only set some values. Remaining
+values will be implicitly zeroed. The C API for writing buffers do not
+have this concern because a struct can just be written as a C struct
+so there is no control over which fields a user choose to set or zero.
+However, structs should be zeroed and padded where data is not otherwise
+set. This makes it possible to hash and integrity check structs (though
+this is not an integral part of the format).
+
+
+## Schema Evolution
+
+A table has a known set of low-valued field identifiers. Any unused
+field id can be used in a future version. If a field (as is normal) is
+implicitly assigned an id, new fields can only be added at the end of
+the table. Internally this translates into new versions getting ever
+larger vtables. Note that vtables are only stored as large as needed for
+the actual content of table, so a rarely used new field will not cause
+vtables to grew when unused.
+
+Enumarations may not change values in future versions. Unions may only
+added new table names to the end of the union list.
+
+Structs cannot change size nor content. They cannot evolve. FlatCC
+permits deprecating fields which means old fields will be zeroed.
+
+Names can be changed to the extend it make sense to the applications already
+written around the schema, but it may still break applications relying
+on some reflective information. For example, a reader may provide the
+string representation of a numeric enum value.
+
+New types can be added. For example adding a new table is always safe
+as long as it does not conflict with any existing schemas using the same
+namespace.
+
+Required fields cannot stop being required and they cannot be deprecated.
+
+Various attributes and other changes form a gray area that will not make
+the binary format unsafe but may still break due to changes in code
+generation, serialization to JSON, or similar. For example, a generated
+constructor that creates a table from a list of positional arguments
+might break if the field order changes or grows or have fields
+deprecated. JSON parsers could cease to work on old formats if base64
+serialization is added subsequently, and so on.
+
+## Keys and Sorting
+
+Keys and sorting is a meta construct driven by the schema. The binary
+format has no special concept of keys and sorting and a vector can be
+sorted by one of several keys so it makes no sense to enforce a specific
+order.
+
+The basic FlatBuffers format only permit at most one key and generally
+sorts vectors by that key during buffer construction. FlatCC does not do
+this both because sorting is not practical while building the buffer and
+because FlatCC supports sorting by one of several keys. Thus, in general
+it is not safe to assume that a vector is sorted, but it can be sorted
+if needed.
+
+## Size Limits
+
+A buffer should never be larger than `2^(sizeof(soffset_t) * 8 - 1) - 1`
+or `2^31 - 1` i.e. 2GB for standard FlatBuffers. Beyond this safe it is
+not safe to represent vtable offsets and implementations can no longer
+use signed types to store `uoffset_t` values. This limit also ensures
+that all vectors can be represented safely with a signed 32-bit length
+type.
+
+The application interface is likely to use a native type for
+representing sizes and vector indices. If this type is smaller that
+`sizeof(soffset_t)` or equivalently `sizeof(uoffset_t)` there is a risk
+of overflow. The simplest way to avoid any issues is to limit the
+accepted buffer size of the native size type. For example, on some
+embedded microprocessor systems a C compiler might have a 16-bit int and
+`size_t` type, even if it supports `uint32_t` as well. In this the safe
+assumption is to limit buffers to `2^15 - 1` which is very likely more
+than sufficient on such systems.
+
+A builder API might also want to prevent vectors from being created when
+they cannot stay within the size type of the platform when the element
+size is multipled by the element count. This is deceiving because the
+element count might easily be within range. Such issues will be rare in
+praxis but they can be the source of magnificent vulnerabilites if not
+handled appropriately.
+
+
+## Verification
+
+Verification as discussed here is not just about implementing a
+verifier. It is as much requirements that any builder must fulfill.
+
+The objective of verification is to make it safe to read from an
+untrusted buffer, or a trusted buffer that accidentally has an
+unexpected type. The verifier does not guarantee that the type is indeed
+the expeced type exact it can read the buffer identifier if present
+which is still no guarantee. The verifier cannot make it safe to modify
+buffers in-place because the cost of doing such a check would be
+prohitive in the average case.
+
+A buffer verifier is expected to verify that all objects (strings,
+vectors and tables) do not have an end position beyond the externally
+specified buffer size and that all offset are aligned relative to offset
+zero, and sometimes also relative to actually specified buffer (but
+sometimes it is desireable to verify buffers that are not stored
+aligned, such as in network buffers).
+
+A verifier primarily checks that:
+
+- the buffer is at least `2 * sizeof(uoffset_t)` such that the root
+ root table and buffer identifier can be checked safely.
+- any offset being chased is inside the buffer that any data accessed
+ from that resuling location is entirely inside the buffer and aligned
+ notably the vtables first entry must be valid before the vtable can
+ safely validate itself, but this also applies to table, string and
+ vector fields.
+- any uoffset has size of at least `sizeof(uoffse_t)` to aviod
+ self-reference and is no larger than the largest positive soffset_t
+ value of same size - this ensures that implementations can safely add
+ uoffset_t even if converting to signed first. It also, incidentally,
+ ensures compatibility with StreamBuffers - see below.
+- vtable size is aligned and does not end outside buffer.
+- vtable size is at least the two header fields
+ (`2 * `sizeof(voffset_t)`).
+- required table fields are present.
+- recursively verify all known fields and ignore other fields. Unknown
+ fields are vtable entries after the largest known field ID of a table.
+ These should be ignored in order to support forward versioning.
+- deprecated fields are valid if accessors are available to do so, or
+ ignore if the there is no way to access the field by application code.
+- vectors end within the buffer.
+- strings end within the buffer and has a zero byte after the end which
+ is also within the buffer.
+- table fields are aligned relative to buffer start - both structs,
+ scalars, and offset types.
+- table field size is aligned relative to field start.
+- any table field does not end outside the tables size as given by the
+ vtable.
+- table end (without chasing offsets) is not outside buffer.
+- all data referenced by offsets are also valid within the buffer
+ according the type given by the schema.
+- verify that recursion depth is limited to a configurable acceptable
+ level for the target system both to protect itself and such that
+ general recursive buffer operations need not be concerned with stack
+ overflow checks (a depth of 100 or so would normally do).
+- verify that if the union type is NONE the value (offset) field is absent and
+ if it is not NONE that the value field is present. If the union type
+ is known, the table should be verified. If the type is not known
+ the table field should be ignored. A reader using the same schema would
+ see the union as NONE. An unknown union is not an error in order to
+ support forward versioning.
+- verifies the union value according to the type just like any other
+ field or element.
+- verify that a union vector always has type vector if the offset vector
+ is present and vice versa.
+
+A verifier may choose to reject unknown fields and union types, but this
+should only be an user selectable option, otherwise schema evolution
+will not be possible.
+
+A verifier needs to be very careful in how it deals with overflow and
+signs. Vector elements multiplied by element size can overflow. Adding
+an invalid offset might become valid due to overflow. In C math on
+unsigned types yield predictable two's complement overflow while signed
+overflow is undefined behavior and can and will result in unpredictable
+values with modern optimizing compilers. The upside is that if the
+verifier handles all this correctly, the application reader logic can be
+much simpler while staying safe.
+
+
+A verifier does __not__ enforce that:
+
+- structs and other table fields are aligned relative to table start because
+ tables are only aligned to their soffset field. This means a table cannot be
+ copied naively into a new buffer even if it has no offset fields.
+- the order of individual fields within a table. Even if a schema says
+ something about ordering this should be considered advisory. A
+ verifier may additionally check for ordering for specific
+ applications. Table order does not affect safety nor general buffer
+ expectations. Ordering might affect size and performance.
+- sorting as specified by schema attributes. A table might be sorted in
+ different ways and an implementation might avoid sorting for
+ performance reasons and other practical reasons. A verifier may,
+ however, offer additional verification to ensure specific vectors or
+ all vectors are sorted according to schema or other guidelines. Lack
+ of sorting might affect expected behavior but will not make the buffer
+ unsafe to access.
+- that structures do not overlap. Overlap can result in vulnerabilities
+ if a buffer is modified, and no sane builder should create overlaps
+ other than proper DAGs except via a separate compression/decopression
+ stage of no interest to the verifier.
+- strings are UTF-8 compliant. Lack of compliance may affect expections
+ or may make strings appear shorter or garbled. Worst case a naive
+ UTF-8 reader might reach beyond the end when observing a lead byte
+ suggest data after buffer end, but such a read should discover the
+ terminal 0 before things get out of hand. Being relaxed permits
+ specific use cases where UTF-8 is not suitable without giving up the
+ option to verify buffers. Implementations can add additional
+ verification for specific usecases for example by providing a
+ strict-UTF8 flag to a verifier or by verifying at the application
+ level. This also avoids unnecessary duplicate validation, for example
+ when an API first verifies the buffer then converts strings to an
+ internal heap representation where UTF-8 is validated anyway.
+- default values are not stored. It is common to force default values to
+ be stored. This may be used to implement NULL values as missing
+ values different from default values.
+- enum values are only among the enum values of its type. There are many
+ use cases where it is convenient to add enum values for example flags
+ or enums as units e.g. `2 * kilo + 3 * ounce. More generally ordinary
+ integers may have value range restrictions which is also out of scope
+ for verifier. An application may provide additional verification when
+ requested.
+
+More generally we can say that a verifier is a basic fast assurance that
+the buffer is safe to access. Any additional verification is application
+specific. The verifier makes it safe to apply secondary validation.
+Seconary validation could be automated via schema attributes and may be
+very useful as such, but it is a separate problem and out of scope for a
+core binary format verifier.
+
+A buffer identifier is optional so the verifier should be informed
+whether an identifier must match a given id. It should check both ASCII
+text and zero padding not just a string compare. It is non-trivial to
+decide if the second buffer field is an identifier, or some other data,
+but if the field does not match the expected identifier, it certainly
+isn't what is expected.
+
+Note that it is not entirely trivial to check vector lengths because the
+element size must be mulplied by the stored element count. For large
+elements this can lead to overflows.
+
+
+## Risks
+
+Because a buffer can contain DAGs constructed to explode exponentiall,
+it can be dangerous to print JSON or otherwise copy content blindly
+if there is no upper limit on the export size.
+
+In-place modification cannot be trusted because a standard buffer
+verifier will detect safe read, but will not detect if two objects are
+overlapping. For example, a table could be stored inside another table.
+Modifing one table might cause access to the contained table to go out
+of bounds, for example by directing the vtable elsewhere.
+
+The platform native integer and size type might not be able to handle
+large FlatBuffers - see [Size Limits](#size-limits).
+
+Becaue FlatCC requires buffers to be sorted after builiding, there is
+risk due to buffer modifications. It is not sufficient to verify buffers
+after sorting because sorting is done inline. Therefore buffers must be
+trusted or rewritten before sorting.
+
+
+## Nested FlatBuffers
+
+FlatBuffers can be nested inside other FlatBuffers. In concept this is
+very simple: a nested buffer is just a chunk of binary data stored in a
+`ubyte` vector, typically with some convenience methods generated to
+access a stored buffer. In praxis it adds a lot of complexity. Either
+a nested buffer must be created strictly separately and copied in as
+binary data, but often users mix the two builder contexts accidentally
+storing strings from one buffer inside another. And when done right, the
+containing ubyte vector might not be aligned appropriately making it
+invalid to access the buffer without first copying out of the containing
+buffer except where unaligned access is permitted. Further, a nested
+FlatBuffer necessarily has a length prefix because any ubyte vector has
+a length field at its start. Therefore, size prefixed flatbuffers should
+not normally be stored as nested buffers, but sometimes it is necessary
+in order have the buffer properly aligned after extraction.
+
+The FlatCC builder makes it possible to build nested FlatBuffers while
+the containing table of the parent buffer is still open. It is very
+careful to ensure alignment and to ensure that vtables are not shared
+between the two (or more) buffers, otherwise a buffer could not safely
+be copied out. Users can still make mistakes by storing references from
+one buffer in another.
+
+Still, this area is so complex that several bugs have been found.
+Thus, it is advice to use nested FlatBuffers with some care.
+
+On the other hand, nested FlatBuffers make it possible to trivially
+extract parts of FlatBuffer data. Extracting a table would require
+chasing pointers and could potentially explode due to shared sub-graphs,
+if not handled carefully.
+
+## Fixed Length Arrays
+
+This feature can be seen as equivalent to repeating a field of the same type
+multiple times in struct.
+
+Fixed length array struct fields has been introduced mid 2019.
+
+A fixed length array is somewhat like a vector of fixed length containing inline
+fixed length elements with no stored size header. The element type can be
+scalars, enums and structs but not other fixed length errors (without wrapping
+them in a struct).
+
+An array should not be mistaken for vector as vectors are independent objects
+while arrays are not. Vectors cannot be fixed length. An array can store fixed
+size arrays inline by wrapping them in a struct and the same applies to unions.
+
+The binary format of a fixed length vector of length `n` and type `t` can
+be precisely emulated by created a struct that holds exactly `n` fields
+of type `t`, `n > 0`. This means that a fixed length array does not
+store any length information in a header and that it is stored inline within
+a struct. Alignment follows the structs alignment rules with arrays having the
+same alignment as their elements and not their entire size.
+
+The maximum length is limited by the maximum struct size and / or an
+implementation imposed length limit. Flatcc accepts any array that will fit in
+struct with a maximum size of 2^16-1 by default but can be compiled with a
+different setting. Googles flatc implementation currently enforces a maximum
+element count of 2^16-1.
+
+Assuming the schema compiler computes the correct alignment for the overall
+struct, there is no additonal work in verifying a buffer containing a fixed
+length array because structs are verified based on the outermost structs size
+and alignment without having to inspect its content.
+
+Fixed lenght arrays also support char arrays. The `char` type is similar to the
+`ubyte` or `uint8` type but a char can only exist as a char array like
+`x:[char:10]`. Chars cannot exist as a standalone struct or table field, and
+also not as a vector element. Char arrays are like strings, but they contain no
+length information and no zero terminator. They are expected to be endian
+neutral and to contain ASCII or UTF-8 encoded text zero padded up the array
+size. Text can contain embedded nulls and other control characters. In JSON form
+the text is printed with embedded null characters but stripped from trailing
+null characters and a parser will padd the missing null characters.
+
+
+The following example uses fixed length arrays. The example is followed by the
+equivalent representation without such arrays.
+
+ struct Basics {
+ int a;
+ int b;
+ }
+
+ struct MyStruct {
+ x: int;
+ z: [short:3];
+ y: float;
+ w: [Basics:2];
+ name: [char:4];
+ }
+
+ // NOT VALID - use a struct wrapper:
+ table MyTable {
+ t: [ubyte:2];
+ m: [MyStruct:2];
+ }
+
+Equivalent representation:
+
+ struct MyStructEquivalent {
+ x: int;
+ z1: short;
+ z2: short;
+ z3: short;
+ y: float;
+ wa1: int;
+ wa2: int;
+ name1: ubyte;
+ name2: ubyte;
+ name3: ubyte;
+ name4: ubyte;
+ }
+
+ struct MyStructArrayEquivalent {
+ s1: MyStructEquivalent;
+ s2: MyStructEquivalent;
+ }
+
+ struct tEquivalent {
+ t1: ubyte;
+ t2: ubyte;
+ }
+
+ table MyTableEquivalent {
+ t: tEquivalent;
+ m: MyStructArrayEquivalent;
+ }
+
+
+Note that forced zero-termination can be obtained by adding a trailing ubyte
+field since uninitialized struct fields should be zeroed:
+
+ struct Text {
+ str: [char:255];
+ zterm: ubyte;
+ }
+
+Likewise a length prefix field could be added if the applications involved know
+how to interpret such a field:
+
+ struct Text {
+ len: ubyte;
+ str: [char:254];
+ zterm: ubyte;
+ }
+
+The above is just an example and not part of the format as such.
+
+
+## Big Endian FlatBuffers
+
+FlatBuffers are formally always little endian and even on big-endian
+platforms they are reasonably efficient to access.
+
+However it is possible to compile FlatBuffers with native big endian
+suppport using the FlatCC tool. The procedure is out of scope for this
+text, but the binary format is discussed here:
+
+All fields have exactly the same type and size as the little endian
+format but all scalars including floating point values are stored
+byteswapped within their field size. Offset types are also byteswapped.
+
+The buffer identifier is stored byte swapped if present. For example
+the 4-byte "MONS" identifier becomes "SNOM" in big endian format. It is
+therefore reasonably easy to avoid accidentially mixing little- and
+big-endian buffers. However, it is not trivial to handle identifers that
+are not exactly 4-bytes. "HI\0\0" could be "IH\0\0" or "\0\0IH". It is
+recommended to always use 4-byte identifies to avoid this problem. See
+the FlatCC release 0.4.0 big endian release for details.
+
+When using type hash identifiers the identifier is stored as a big
+endian encoded hash value. The user application will the hash in its
+native form and accessor code will do the conversion as for other
+values.
+
+
+## StructBuffers
+
+_NOTE: the Google FlatBuffer project originally documented structs as
+valid root objects, but never actually implemented it, and has as of mid
+2020 changed the specification to disallow root structs as covered in
+this section. FlatCC for C has been supporting root structs for a long
+time, and they can provide significant speed advantages, so FlatCC will
+continue to support these._
+
+Unlike tables, structs are are usually embedded in in a fixed memory
+block representing a table, in a vector, or embedded inline in other
+structs, but can also be independent when used in a union.
+
+The root object in FlatBuffers is conventionally expected to be a table,
+but it can also be struct. FlatCC supports StructBuffers. Since structs
+do not contain references, such buffers are truly flat. Most
+implementations are not likely to support structs are root but even so
+they are very useful:
+
+It is possible to create very compact and very fast buffers this way.
+They can be used where one would otherwise consider using manual structs
+or memory blocks but with the advantage of a system and language
+independent schema.
+
+StructBuffers may be particularly interesting for the Big Endian
+variant of FlatBuffers for two reasons: the first being that performance
+likely matters when using such buffers and thus Big Endian platforms
+might want them. The second reason is the network byte order is
+traditionally big endian, and this has proven very difficult to change,
+even in new evolving IETF standards. StructBuffers can be used to manage
+non-trival big endian encoded structs, especially structs containing
+other structs, even when the receiver does not understand FlatBuffers as
+concept since the header can just be dropped or trivially documented.
+
+
+## StreamBuffers
+
+StreamBuffers are so far only a concept although some implementations
+may already be able to read them. The format is documented to aid
+possible future implementations.
+
+StreamBuffers makes it possible to store partially completed buffers
+for example by writing directly to disk or by sending partial buffer
+data over a network. Traditional FlatBuffers require an extra copying
+step to make this possible, and if the writes are partial, each section
+written must also store the segment length to support reassembly.
+StreamBuffers avoid this problem.
+
+StreamBuffers treat `uoffset_t` the same as `soffset_t` when the special
+limitation that `uoffset_t` is always negative when viewed as two's
+complement values.
+
+The implication is that everthing a table or vector references must be
+stored earlier in the buffer, except vtables that can be stored freely.
+Existing reader implementations that treat `uoffset_t` as signed, such as
+Javascript, will be able to read StreamBuffers with no modification.
+Other readers can easily be modified by casting the uoffset value to
+signed before it.
+
+Verifiers must ensure that any buffer verified always stores all
+`uoffset_t` with the same sign. This ensures the DAG structure is
+preserved without cycles.
+
+The root table offset remains positive as an exception and points to the
+root buffer. A stream buffer root table will be the last table in the
+buffer.
+
+The root table offset may be replaced with a root table offset at the
+end of the buffer instead of the start. An implementation may also
+zero the initial offset and update it later. In either case the buffer
+should be aligned accordingly.
+
+Some may prefer the traditional FlatBuffer approach because the root
+table is stored and it is somehow easier or faster to access, but modern
+CPU cache systems do not care much about the order of access as long as
+as their is some aspect of locality of reference and the same applies to
+disk access while network access likely will have the end of the buffer
+in hot cache as this is the last sent. The average distance between
+objects will be the same for both FlatBuffers and StreamBuffers.
+
+
+## Bidirectional Buffers
+
+Bidirectional Buffers is a generalization of StreamBuffers and FlatBuffers
+and so far only exists as an idea.
+
+FlatBuffers and StreamBuffers only allow one direction because it
+guarantees easy cycle rejection. Cycles are unwanted because it is
+expensive to verify buffers with cycles and because recursive navigation
+might never terminate.
+
+As it happens, but we can also easily reject cycles in bidirectional
+buffers if we apply certain constraints that are fully backwards
+compatible with existing FlatBuffers that have a size below 2GB.
+
+The rule is that when an offset changes direction relative to a parent
+objects direction, the object creating the change of direction becomes a
+new start or end of buffer for anything reachable via that offset.
+
+The root table R is reached via forward reference from the buffer
+header. Its boundaries are itself and the buffer end. If the this table
+has an offset pointing back, for example to new table X, then table X
+must see the buffer start and R as two boundaries. X must not directly
+or indirectly reach outside this region. Likewise, if the table R points
+to new table Y in the forward direction, then Y is bounded by itself and
+the buffer end.
+
+A lower bound is the end of a table or a vector althoug we just say the
+table or vector is a boundary. An upper bound is until the start of the
+table or vector, or the end of the buffer.
+
+When a buffer is verified, the boundaries are initially the buffer start
+and buffer end. When the references from the root table are followed,
+there new boundaries are either buffer start to root table, or root
+table to end, depending on the offsets direction. And so forth
+recursively.
+
+We can relax the requirement that simple vectors and vtables cannot
+cross these boundaries, except for the hard buffer limits, but it does
+compicate things and is likely not necessary.
+
+Nested FlatBuffers already follow similar boundary rules.
+
+The point of Bidirectional buffers is not that it would be interesting
+to store things in both directions, but to provide a single coherent
+buffer model for both ways of storing buffers without making a special
+case. This will allow StreamBuffers to co-exist with FlatBuffers without
+any change, and it will allow procedures to build larger buffers to make
+their own changes on how to build their subsection of the buffer.
+
+We still need to make allowance for keeping the buffer headers root
+pointer implicit by context, at least as an option. Otherwise it is not,
+in general, possible to start streaming buffer content before the entire
+buffer is written.
+
+
+## Possible Future Features
+
+This is highly speculative, but documents some ideas that have been
+floating in order to avoid incompatible custom extensions on the same
+theme. Still these might never be implemented or end up being
+implemented differently. Be warned.
+
+### Force Align
+
+`force_align` attribute supported on fields of structs, scalars and
+and vectors of fixed length elements.
+
+### Mixins
+
+A mixin is a table or a struct that is apparently expanded into the table
+or struct that contains it.
+
+Mixins add new accessors to make it appear as if the fields of the mixed
+in type is copied into the containing type although physically this
+isn't the case. Mixins can include types that themselves have mixins
+and these mixed in fields are also expanded.
+
+A `mixin` is an attribute applied to table or a struct field when that
+field has a struct or a table type. The binary format is unchanged and
+the table or struct will continue to work as if it wasn't a mixin and
+is therefore fully backwards compatible.
+
+Example:
+
+ struct Position {
+ spawned: bool(required);
+ x: int;
+ y: int;
+ }
+
+ table Motion {
+ place: Position(mixin);
+ vx: int = 0;
+ vy: int = 0;
+ }
+
+ table Status {
+ title: string;
+ energy: int;
+ sleep: int;
+ }
+
+ table NPC {
+ npcid: int;
+ motion: Motion(mixin);
+ stat: Status(mixin);
+ }
+
+ table Rock {
+ here: Position(mixin);
+ color: uint32 = 0xa0a0a000;
+ }
+
+ table Main {
+ npc1: NPC;
+ rock1: Rock;
+ }
+
+ root_type Main;
+
+
+Here the table NPC and Rock will appear with read accessors is if they have the fields:
+
+ table NPC {
+ npcid: int;
+ spawned: bool(required);
+ x: int;
+ y: int;
+ vx: int = 0;
+ vy: int = 0;
+ title: string;
+ energy: int;
+ sleep: int;
+ place: Position;
+ motion: Motion(required);
+ stat: Status;
+ }
+
+ table Rock {
+ spawned: bool(required);
+ x: int;
+ y: int;
+ here: Position(required);
+ color: uint32 = 0xa0a0a000;
+ }
+
+ table Main {
+ npc1: NPC;
+ rock1: Rock;
+ }
+
+ root_type Main;
+
+or in JSON:
+
+ {
+ "npc1": {
+ "npcid": 1,
+ "spawned": true;
+ "x": 2,
+ "y": 3,
+ "vx": -4,
+ "vy": 5,
+ "title": "Monster",
+ "energy": 6,
+ "sleep": 0
+ },
+ "rock1": {
+ "spawned": false;
+ "x": 0,
+ "y": 0,
+ "color": 0xa0a0a000
+ }
+ }
+
+
+
+Note that there is some redundancy here which makes it possible to
+access the mixed in fields in different ways and to perform type casts
+to a mixed in type.
+
+A cast can happen through a generated function along the lines of
+`npc.castToPosition()`, or via field a accessor `npc.getPlace()`.
+
+A JSON serializer excludes the intermediate container fields such as
+`place` and `motion` in the example.
+
+A builder may choose to only support the basic interface and required
+each mixed in table or struct be created separately. A more advanced
+builder would alternatively accept adding fields directly, but would
+error if a field is set twice by mixing up the approaches.
+
+If a mixed type has a required field, the required status propagates to
+the parent, but non-required siblings are not required as can be seen in
+the example above.
+
+Mixins also places some constraints on the involved types. It is not
+possible to mix in the same type twice because names would conflict and
+it would no longer be possible to do trivially cast a table or struct
+to one of its kinds. An empty table could be mixed in to
+provide type information but such a type can also only be added once.
+
+Mixing in types introduces the risk of name conflicts. It is not valid
+to mix in a type directly or indirectly in a way that would lead to
+conflicting field names in a containing type.
+
+Note that in the example it is not possible to mix in the pos struct
+twice, otherwise we could have mixed in a Coord class twice to have
+position and velocity, but in that case it would be natural to
+have two fields of Coord type which are not mixed in.
+
+Schema evolution is fully supported because the vtable and field id's
+are kept separate. It is possible to add new fields to any type that
+is mixed in. However, adding fields could lead to name conficts
+which are then reported by the schema compiler.
+
+
+[eclectic.fbs]: https://github.com/dvidelabs/flatcc/blob/master/doc/eclectic.fbs
diff --git a/doc/builder.md b/doc/builder.md
new file mode 100644
index 0000000..c953770
--- /dev/null
+++ b/doc/builder.md
@@ -0,0 +1,1845 @@
+# Builder Interface Reference
+
+<!-- vim-markdown-toc GFM -->
+
+* [Introduction](#introduction)
+* [Size Prefixed Buffers](#size-prefixed-buffers)
+* [Namespaces](#namespaces)
+* [Error Codes](#error-codes)
+* [Endianess](#endianess)
+ * [Deprecated](#deprecated)
+* [Buffers](#buffers)
+* [Tables](#tables)
+ * [Adding Fields](#adding-fields)
+ * [Nested Tables](#nested-tables)
+* [Packing tables](#packing-tables)
+* [Strings](#strings)
+* [Structs](#structs)
+ * [Fixed Length Arrays in Structs](#fixed-length-arrays-in-structs)
+* [Nested Buffers](#nested-buffers)
+* [Scalars and Enums](#scalars-and-enums)
+* [Vectors](#vectors)
+* [Unions](#unions)
+ * [Union Vectors](#union-vectors)
+ * [Unions of Strings and Structs](#unions-of-strings-and-structs)
+* [Error Handling](#error-handling)
+* [Type System Overview](#type-system-overview)
+* [Cloning](#cloning)
+* [Picking](#picking)
+* [Sorting Vectors](#sorting-vectors)
+ * [Dangers of Sorting](#dangers-of-sorting)
+ * [Scanning](#scanning)
+* [Example of different interface type users](#example-of-different-interface-type-users)
+* [Special Emitters](#special-emitters)
+
+<!-- vim-markdown-toc -->
+
+
+## Introduction
+
+We assume a separate read-only file and add extensions to this with
+support from a builder library and a builder object.
+
+The underlying builder library supports two modes of operation that mix
+together: `create` which sends data directly to the target buffer
+(emitter object) and a stack driven `start/end` approach which allocates
+objects and vectors on the stack. The code generator chooses the most
+efficient approach given the circumstances.
+
+Unlike most FlatBuffer language interfaces, tables and vectors are not
+created back to front: They are either created completely in one
+operation, or they are constructed on a stack front to back until they
+can be emitted. The final buffer is still constructed back to front.
+For big-endian platforms this may require temporary stack allocation of
+complete vectors where little endian platforms can emit directly.
+
+Tables and vectors stored in other tables or vectors must be completed
+before the can be stored, but unlike must language interfaces they can
+be constructed while a parent is also being constructed as long as
+nesting remains balanced. While this occasionally may require more
+stack, it may also avoid external temporary allocation.
+
+A builder object is required to start buffer construction. The builder
+must be initialized first and can be reset and reused between buffers,
+reusing stack allocation. The builder can have a customized emitter
+object but here we use the default. Finalizing the buffer depends
+the emitter and we can use a default finalizer only because we use the
+default emitter - it allocates and populates a linear buffer from a
+paged emitter ring buffer.
+
+Note that in most cases `flatcc_builder_finalize_buffer` is sufficient,
+but to be strictly portable, use
+`flatcc_builder_finalize_aligned_buffer` and `aligned_free`.
+`aligned_free` is often implemented as `free` in `flatcc/portable` but
+not on all platforms. As of flatcc version 0.5.0
+`flatcc_builder_aligned_free` is provided to add robustness in case the
+applications `aligned_free` implementation might differ from the library
+version due to changes in compile time flags.
+
+Generally we use the monster example with various extensions, but to
+show a simple complete example we use a very simple schema (`myschema.fbs`):
+
+ table mytable { myfield1: int; myfield2: int; }
+
+ #include "myschema_builder.h"
+
+ void testfun() {
+
+ void *buffer;
+ size_t size;
+ flatcc_builder_t builder, *B;
+ mytable_table_t mt;
+ B = &builder;
+ flatcc_builder_init(B);
+
+ /* Construct a buffer specific to schema. */
+ mytable_create_as_root(B, 1, 2);
+
+ /* Retrieve buffer - see also `flatcc_builder_get_direct_buffer`. */
+ /* buffer = flatcc_builder_finalize_buffer(B, &size); */
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+
+ /* This is read-only buffer access. */
+ mt = mytable_as_root(buffer);
+ assert(mytable_myfield1(mt) == 1);
+ assert(mytable_myfield2(mt) == 2);
+
+ /* free(buffer); */
+ flatcc_builder_aligned_free(buffer);
+
+ /*
+ * Reset, but keep allocated stack etc.,
+ * or optionally reduce memory using `flatcc_builder_custom_reset`.
+ */
+ flatcc_builder_reset(B);
+
+ /* ... construct another a buffer */
+
+ /* Reclaim all memory. */
+ flatcc_builder_clear(B);
+ }
+
+Note that a compiled schema generates a `myschema_reader.h` file and
+optionally a `myschema_builder.h` and some common support files. When
+building a buffer the `myschema_builder.h` must be used but when only
+reading then the `myschema_reader.h` file should be used instead. Here
+we are only concerned with building. When building, it is necessary to
+link with `libflatccrt.a` runtime library but when reading, all
+nesessary code is contained in the generated header files.
+
+The builder object only manages a stack of currently active objects and
+does not store an object that is complete. Instead it calls an emitter
+object with the partial data ready for emission, similar to a write
+function. A default emitter is provided which implements a ring buffer
+and the result may be written to a file, copied to a buffer or a
+finalized to an allocated buffer. The builder supports these methods
+directly for default emitter, and only the default emitter because
+emitters are otherwise defined by only one simple emit function - see
+`emit_test.c` for a simple example of a custom emitter.
+A custom allocator may be useful when working with small buffers in a
+constrained environment - the allocator handles temporary stacks,
+virtual table caches etc. but not the emitter.
+
+The allocator and emitter interface is documented in the builder library
+header pflatcc_builder.h] and the default implementation in
+[flatcc_emitter.h]. The default allocator is implemented as part of the
+flatcc_builder source.
+
+The builder can be reused between buffers using the `reset` operation.
+The default emitter can also be reused and will automaticallhy reset
+when the buffer is. For custom emitters, any reset operation must be
+called manually. The same applies to clear. The reset operations
+maintain allocated memory by also reduce memory consumption across
+multiple resets heuristically.
+
+
+## Size Prefixed Buffers
+
+Buffers can be created with a size prefix of type `uoffset_t`. When
+doing this, the buffer is aligned relative to the size prefix such that
+buffers can be stacked in a file and for example be accessed via memory
+mapping.
+
+The usual `create_as_root` and `start_as_root` has a variant called
+`create_as_root_with_size` and `start_as_root_with_size`.
+
+To read a buffer with a size prefix use:
+
+ size_t size;
+ buffer = flatbuffers_read_size_prefix(rawbuffer, &size);
+
+The size the size of the buffer excluding the size prefix. When
+verifying buffers the buffer and size arguments should be used. See also
+[monster_test.c] for an example.
+
+Note that the size prefix ensures internal alignment but does not
+guarantee that the next buffer in a file can be appended directly
+because the next buffers alignment is unknown and because it potentially
+wastes padding bytes. The buffer size at offset 0 can increased to the
+needed alignment as long as endianness is handled and the size of the
+size field is subtracted, and zeroes are appended as necesary.
+
+## Namespaces
+
+The generated code is typically wrapped in a custom namespace and
+functions and definitions that are library specific are usually mapped
+into the namespace. We often use an empty namespace for custom types and
+`flatbuffers_` for library names, but usually a `foo_` prefix could also
+be used on both cases, where `foo` is a custom namespace.
+
+Note that the name `flatcc_emitter` is only used with the default emitter
+and the name [flatcc_builder] is only used for buffer management but not
+for constructing content. Once a valid buffer is ready the common and
+namespace (`flatbuffers`) and schema specific (or empty) namespace is used
+with schema specific operations.
+
+All schema specific content is prefixed with a namespace to avoid
+conflicts - although the namespace is empty if the schema doesn't
+specify any. Note that the same schema can have multiple
+namespaces. An example of a namespace prefixed operation:
+
+ MyGame_Example_Monster_create_as_root(B, ... lots of args);
+
+To simplify this we can use a macro to prefix a namespace. The use
+of the name `ns` is arbitrary and we can choose different names for
+different namespaces.
+
+ #undef ns
+ #define ns(x) MyGame_Example_ ## x
+
+But the above doesn't work with nested calls to ns such as
+
+ ns(Monster_color_add(B, ns(Color_Green));
+
+it would have to be:
+
+ ns(Monster_color_add)(B, ns(Color_Green);
+
+Therefore we have a helper macro the does allow nesting:
+
+ #undef ns
+ #define ns(x) FLATBUFFERS_WRAP_NAMESPACE(MyGame_Example, x)
+
+The common namespace can also be wrapped for a more consistent
+appearance:
+
+ #undef nsc
+ #define nsc(x) FLATBUFFERS_WRAP_NAMESPACE(flatbuffers, x)
+
+ nsc(string_ref_t) s;
+ s = nsc(string_create_str(B, "hello, world!"));
+
+instead of
+
+ flatbuffers_string_ref_t s;
+ s = flatbuffers_string_create_str(B, "hellow, world!);
+
+
+## Error Codes
+
+Functions return values can be grouped roughly into 4 groups: functions
+returning pointer, references, `size_t` lengths, and `int` status codes.
+Pointers and references return 0 on error. Sizes do not return error.
+Status codes return 0 on success or an error code that is usually -1.
+Status codes may be checked with `flatbuffers_failed(...)`.
+
+
+## Endianess
+
+The function `flatbuffers_is_native_pe()` provide an efficient runtime
+check for endianness. Since FlatBuffers are little endian, the function
+returns true when the native endianness matches the protocol endianness
+which for FlatBuffers is little endian. We do not hardcode little endian
+because it enables us to support other protocols in the future - for
+example the struct conversions may be very useful for big endian network
+protocols.
+
+> As of flatcc 0.4.0 it is possible to compile flatcc with native
+> big-endian support which has been tested on AIX. More details in
+> [README Endianness](https://github.com/dvidelabs/flatcc#endianness)
+
+
+By testing `is_native_pe` dependencies on speficic compile time flags
+can be avoided, and these are fragile:
+
+During build, vectors and structs behave differently from tables: A
+table updates one field at a time, doing endian conversion along the
+way. A struct is either placed in a table, and is converted by the table
+specific operation, or it is placed in a vector. A vector only does the
+endian conversion when the vector is finished, so when a vector is not
+created atomically with a single `create` call, the elements are placed on a
+stack. By default this is in native format, but the user may choose to
+place buffer encoded structs or scalars in the vector and call
+`vec_end_pe`. The same `push` operation can be used to place a
+natively encoded struct and a buffer encoded struct in the vector
+because it does no conversion at that point. Therefore there is also no
+`push_pe` method that would mean to push an unconverted element unto
+the stack. Only for tables and entire vectors does the pe command make
+sense. If a vector wishes to push a buffer encoded struct when the
+vector is otherwise constructed in native encoding or vice versa, the
+vector may be extended empty and then assigned using any of the
+`assign`, `assign_from_pe` or `assign_to_pe` calls.
+
+We did not mention that a struct can also be a standalone object
+as a buffer root, and for that it has a `end_pe` call that essentially
+works like a single element vector without a length prefix.
+
+The `clone` operation is a more userfriendly `pe` operation which takes
+an object or a vector from an existing buffer and places it in a new
+buffer without endian conversion.
+
+### Deprecated
+
+__NOTE: `FLATBUFFERS_LITTLEENDIAN` is deprecated and will be removed in
+a future version. It just complicates endina handling.__
+
+The header files tries to define `FLATBUFFERS_LITTLEENDIAN` to 0 or 1
+based on system definitions but otherwise leaves the flag undefined.
+Simply testing for
+
+ #if FLATBUFFERS_LITTLEENDIAN
+ ...
+ #endif
+
+will not fail if the endianness is undetected but rather give the
+impression that the system is big endian, which is not necessarily true.
+The `flatbuffers_is_native_pe()` relates to the detected or system
+provided conversion functions if a suitable `endian.h` file after the
+header file gave up on its own detection (e.g. `le16toh(1) == 1`).
+Therefore, it is better to use `flatbuffers_is_native_pe()` in most
+cases. It also avoids making assumptions on whether the protocol is
+little or big endian.
+
+## Buffers
+
+A buffer can most simply be created with the `create_as_root` call for
+a table or a struct as seen ealier. The `as_root` part is just a thin
+wrapper around buffer start and stop calls and using these allows for
+more flexibility. the `as_root` also automatically uses the defined file
+identifier if any.
+
+The build process begins with starting a buffer. The buffer may contain
+a struct or table, so one of these should be constructed subsequently.
+Structs are generally created inline in tables, only at the buffer level
+is a struct created independently. The api actually permits other
+formats, but it will not be valid flatbuffers then.
+
+ flatcc_builder_ref_t root;
+ flatcc_builder_init(B);
+ /* 0 indicates no file identifier. */
+ flatcc_builder_buffer_start(B, 0);
+ root = /* ... construct a table or a struct */
+ flatcc_builder_buffer_end(B, root);
+
+`buffer_start` takes a file identifier as second argument. If null or a
+string with null characters, the identifier is not stored in the buffer.
+
+Regardless of whether a struct or table is declared as root in the schema or
+not, there are methods to automatically start both the buffer and struct or buffer
+and table such as `Monster_start/end_as_root`. This is also valid for
+nested buffers. If the schema has a file identifier, it is used as
+identifier for the created object. The alternative
+`create_as_root_with_identifier` allows for explicitly setting an id or
+explicitly dropping an id by providing a null argument. The
+corresponding reader function `Monster_as_root(buffer)` also has a
+`Monster_as_root_with_identifier(buffer, id)`. Here the id is ignored if the id
+is null, and otherwise the operation returns null if the id does not match.
+For the most part ids are handled transparently by these defaults.
+
+The buffer can be started with block alignment and/or a custom
+identifier using the `flatcc_builder_buffer_start_aligned`:
+
+ flatcc_builder_buffer_start_aligned(B, "myid", 16);
+ ...
+ flatcc_builder_buffer_end(B, root);
+
+The alignment can be 0 using the minimum required alignment, which is
+derived from the operations between `start/end`. The alignment argument
+is called `block_align` and is useful if the emitter operates on blocks
+such as encryption, cache line isolation, or compression blocks where
+the final buffer should align with the blocks used during construction.
+This can lead to significant zero padding just after the block header,
+depending on block size.
+
+The schema specified identifier is given as:
+
+ flatbuffers_identifier
+
+and defaults to null. The schema specified extension is given as:
+
+ flatbuffers_extension
+
+and defaults to null. Note that `flatbuffers_` is replaced by whatever
+namespace is chosen. Each specific schema type also has a named file
+exntension reflection the extension active when the type was defined,
+for example:
+
+ MyGame_Example_Monster_file_identifier
+
+This define is used when `create_as_root` automatically sets a file
+identifier.
+
+NOTE: before flatcc 0.6.1, the identifier was named
+
+ MyGame_Example_Monster_identifier (DEPRECATED)
+
+but that would conflict with a table field named `identifier` which
+happened often enough to be a problem. This naming is now removed on
+conflict and will be completely removed in a future version.
+
+When the buffer is ended, nothing special happens but only at this point
+does it really makes sense to access the resulting buffer. The default
+emitter provides a copy method and a direct buffer access method. These
+are made available in the builder interface and will return null for
+other emitters. See also [flatcc_builder.h] and the default emitter in
+`flatcc_emitter.h`.
+
+
+## Tables
+
+### Adding Fields
+
+If `Monster` is a table, we can create a Monster buffer (after
+builder init) as follows:
+
+ Monster_start(B);
+ Monster_Hp_add(B, 80);
+ ...
+ flatcc_builder_buffer_create(B, Monster_end(B));
+
+All scalar and enums are added similar to the `Monster_add_Hp` call. We
+will subsequently see how to deal with other types.
+
+A table can also be created in a single operation using `create`:
+
+ Monster_ref_t m;
+ m = Monster_create(B, 80, ...);
+
+The create arguments are those taken by the individual fields `add`
+operations which is either an scalar, enum, or a reference returned by
+another create or end call. Note that unlike the C++ interface, unions
+only take a single argument that is also accepted by the `add` operation
+of a union field. Deprecated fields are not included in the argument
+list.
+
+As of v0.5.3 the arguments are given in field id order which is usually
+the same as the schema listed order, except with id attributes are
+given explicitly. Using id order ensures version stability. Note that
+since deprecated fields are omitted, deprecated fields can still break
+existing code.
+
+BREAKING: Prior to flatcc v0.5.3 the create call would use the schema order
+also when fields have id attributes specifying a different order. This
+could break code across versions and did not match the C++ behavior.
+It was also document that the `original_order` attribute affected create
+argument order, but that was incorrect.
+
+NOTE: If the `original_order` attribute is set on a table, the `create`
+implementation adds fields to the table in schema listed order,
+otherwise it adds fields in order of decreasing size to reduce alignment
+overhead. Generally there should be no need to use the `original_order`
+attribute. This doesn't affect the call argument order although that
+was incorrectly document prior to v 0.5.3.
+
+NOTE: the `create` and `create_as_root` operations are not guaranteed to
+be available when the number of fields is sufficiently large because it
+might break some compilers. Currently there are no such restrictions.
+
+Scalars and enums do not store the value if it it matches the default
+value which is by default 0 and otherwise defined in the schema. To
+override this behavior, use `force_add`. In the monster example, health
+points default to 100 (percent), so if we wish to force store it in the
+buffer we could use:
+
+ Monster_hp_force_add(B, 100);
+
+Only scalar fields and enums have a `force_add` operation since only these
+types have a default value, and other types have a meaningful
+interpretation of null. (It is not quite clear if empty tables separate
+from null/absent are valid in all implementations).
+
+`force_add` may be useful when roundtripping data from a database where it is
+relevant to distinguish between any valid value and null. Most readers will not
+be able to tell the difference, but it is possible to inspect a flatbuffer to
+see if a table field is present, present and default, or absent, meaning null.
+
+NOTE: As of mid 2020, FlatBuffers added optional scalar table fields with support in flatcc 0.6.1. These fields automatically imply `force_add` to represent null values when a field is absent and therefore these fields do not have a `force_add` method and these fields also do not have a default value other than `null`, i.e. null if not added.
+
+If Monster is declared as root, the above may also be called as:
+
+ Monster_start_as_root(B);
+ Monster_add_hp(B, 80);
+ ...
+ Monster_end_as_root(B);
+
+(Calling `Monster_end` instead would require `buffer_end` call
+subsequently, and is basically a violation of nesting).
+
+### Nested Tables
+
+Tables can be nested, for example the Mini field may have type
+Monster table again (a recursive type):
+
+ buffer_start(B);
+ Monster_start(B);
+ Monster_add_Hp(B, 80);
+ Monster_start(B);
+ Monster_hp_add(B, 81);
+ ...
+ Monster_mini_add(Monster_end(B));
+ ...
+ flatcc_builder_buffer_end(B, Monster_end(B));
+
+The child Monster table may be created before the parent or as above
+between the tables start and end. If created before, reference must be
+stored until it can be added. The only requirement is that start and
+end are balanced, that the sub-table is ended before the parent, and
+that both are created in the same buffer (nested buffers can be created
+while the parent buffer is still being created, similar to sub-tables,
+so it is possible to mess this up):
+
+ Monster_ref_t root, mini;
+
+ buffer_start(B);
+ Monster_start(B);
+ Monster_hp_add(B, 81);
+ mini = Monster_end(B);
+
+ Monster_start(B);
+ Monster_hp_add(B, 80);
+ Monster_mini_add(B, mini);
+ root = Monster_end(B);
+
+ flatcc_builder_buffer_end(B, root)
+
+
+Rather than adding a child table explicitly, it can be started and ended
+as an operation on the field name, here with `Monster_Mini_start/end`:
+
+ Monster_ref_t root;
+
+ Monster_start(B);
+ Monster_add_Hp(B, 80);
+ Monster_mini_start(B);
+ Monster_hp_add(B, 81);
+ Monster_mini_end(B);
+ root = Monster_end(B);
+
+ flatcc_builder_buffer_end(B, root);
+
+We can repeat the the table nesting as deep as we like, provided our
+builder is willing to allocate enough stack space.
+
+**Warning**: It is possible to use the wrong table type operations
+between `start/end` - don't do that. It is a tradeoff between usability
+and type safety.
+
+Note that vectors, strings and structs map several standard operations
+to a field name, for example `mytable_myfield_push(B, x)`. This is not the
+case with table fields which only map `start/end/create` in part because it
+would never terminate for recursive types and in part because each table
+is different making a generic mapping rather complex and with very long
+names.
+
+A table may be created with a constructor, but it requires all
+non-scalar objects to be references or pointers. Struct fields must be
+pointers to zero padded structs, and strings, vectors and tables must be
+references. The constructors are probably most useful for simple tables
+with mostly scalar values (here we use the original Monster fields and
+leaves out any we have invented for the sake of illustration):
+
+IMPORTANT: objects can generally only be created within a buffer
+context, i.e. after `buffer_start`. For example calling
+`flatbuffers_uint8_vec_create` before `Monster_create_as_root`
+technically violates this rule because the create call also starts the
+buffer. It is, however, allowed at the top level. For nested buffers
+(see later) this must be avoided because the vector would end up in the
+wrong buffer.
+
+ Monster_ref_t m;
+ uint8_t invdata[4] = { 1, 2, 3, 4 };
+ Vec3_t vec;
+
+ flatbuffers_uint8_vec_ref_t inventory =
+ flatbuffers_uint8_vec_create(B, invdata, 4);
+ m = Monster_create(B, &vec, 150, 80, name, inventory,
+ Color_Red, Any_as_NONE());
+ flatcc_builder_buffer_create(m);
+
+or
+
+ Monster_create_as_root(B, &vec, 150, 80, name, inventory,
+ Color_Red, Any_as_NONE());
+
+## Packing tables
+
+By reordering the fields, the table may be packed better, or be better
+able to reuse an existing vtable. The `create` call already does this
+unless the attribute `original_order` has been set. Unions present a
+special problem since it is two fields treated as one and the type field
+will generally waste padding space if stored in order:
+
+To help pack unions better these can be added with the type
+seperate from the value reference using `add_type(B, test.type)`,
+`add_value(B, test)` where the value is only added if the type is
+not `NONE`. The `add_type` should be called last since it is the
+smallest type.
+
+The same field should not be added more than at most once. Internal
+reservations that track offset fields may overflow otherwise. An
+assertion will fail in debug builds.
+
+Required table fields will be asserted in debug builds as part of the
+`end/create` call. Only offset fields can have a required attribute.
+
+The generated `monster_test_reader.h` from [monster_test.fbs] shows how
+the default packing takes place in generated `create` calls, see for
+example the typealias test. Note that for example vectors are stored
+together with integers like `uint32` because references to vectors have
+the same size as `uint32`.
+
+
+## Strings
+
+Strings can be added to tables with zero terminated strings as source
+
+ Monster_start(B);
+ ...
+ Monster_name_create_str(B, "Mega Monster");
+ Monster_end(B);
+
+or strings potententially containing zeroes:
+
+ #define MONSTER "Mega\0Monster"
+ Monster_start(B);
+ ...
+ /* Includes embedded zero. */
+ Monster_name_create(B, MONSTER, sizeof(MONSTER));
+ Monster_end(B);
+
+or zero terminated source up to at most `max_len` characters.
+
+ #define MONSTER "Mega\0Monster"
+ Monster_start(B);
+ ...
+ /* "Mega" */
+ Monster_name_create_strn(B, MONSTER, 12);
+ Monster_end(B);
+
+The `create_str` and `create_strn` versions finds the string length via strlen
+and strnlen respectively. `append_string` also has `_str/_strn` versions.
+
+A string can also be created from an existing flatbuffer string in which
+case the length is expected to be stored 4 bytes before the pointer in
+little endian format, and aligned properly:
+
+ Monster_name_clone(B, mybufferstring);
+
+or, create a string at most 4 characters long starting at 0-based index
+10, if present:
+
+ Monster_name_slice(B, mybufferstring, 10, 4);
+
+If index or index + len goes beyond the source, the result is truncated
+accordingly, possibly resulting in an empty string.
+
+A string can also be create independently. The above is just shortcuts
+for that:
+
+ flatbuffers_string_ref_t monster_name;
+ monster_name = flatbuffers_string_create_str("Mega Monster");
+ Monster_name_add(B, monster_name);
+
+Strings are generally expected to be utf-8, but any binary data will be
+stored. Zero termination or embedded control codes are includes as is.
+The string gets a final zero temination regardless, not counted in the
+string length (in compliance with the FlatBuffers format).
+
+A string can also be constructed from a more elaborate sequence of
+operations. A string can be extended, appended to, or truncated and
+reappended to, but it cannot be edited after other calls including calls
+to update the same string. This may be useful if stripping escape codes
+or parsed delimiters, etc., but here we just create the same "Mega
+Monster" string in a more convoluted way:
+
+ flatbuffers_string_ref_t name;
+ char *s;
+ #define N 20
+ Monster_start(B);
+ ...
+ flatbuffers_string_start(B);
+ flatbuffers_string_append(B, "Mega", 4);
+ flatbuffers_string_append(B, " ", 1);
+ s = flatbuffers_string_extend(B, N);
+ strncpy(s, "Monster", N);
+ flatbuffers_string_truncate(B, N - strlen(s));
+ name = flatbuffers_string_end(B);
+ Monster_name_add(B, name);
+ ...
+ Monster_end(B);
+
+`flatbuffers_string_create...` calls are also available when creating
+the string separate from adding it to a table, for example:
+
+ flatbuffers_string_h name;
+ name = flatbuffers_string_create_str(B, "Mini Monster");
+
+It is guaranteed that any returned the string buffer is zero filled and
+has an extra zero after the requested length such that strlen can be
+called on the content, but only the requested bytes may be updated.
+
+Every call only returns the substring being added to the string in that
+operation. It is also possible to call `flatbuffers_string_edit` to get a
+modifiable pointer to the start of the string.
+
+`flatbuffers_string_reserved_len(B)` returns the current string length
+including any embedded zeroes, but excluding final zero termination. It
+is only valid until `string_end` is called.
+
+See [flatcc_builder.h] for detailed documentation. Essentially `extend`
+reserves zeroed space on the stack and returns a buffer to the new
+space, and truncate reduces the overall size again, and the string is
+then given the final length and a zero termination at the end.
+
+There is no endian conversion (except internally for the string length),
+because UTF-8 strings are not sensitive to endianness.
+
+Like tables, the string may be created while a parent container is being
+constructed, or before.
+
+Strings can also be used as vector elements, but we will get that when
+discussing vectors.
+
+## Structs
+
+Structs in tables can be added as:
+
+ Monster_pos_create(B, 1, 2, 3);
+
+The above essentially does the following:
+
+ Vec3_t *v;
+ v = Monster_pos_start(B);
+ Vec3_assign(v, 1, 2, -3.2);
+ Monster_pos_end(B);
+
+Some versions of the monster schema has extra test fields - these would
+break the assign approach above because there would be extra arguments.
+Instead we can rely on the zero intialization and assign known fields.
+
+ Vec3_t *v;
+ v = Monster_pos_start(B);
+ v->x = 1, v->y = 2, v->z = -3.2;
+ Monster_pos_end(B);
+
+`Monster_pos_end_pe(B)` can be used when the struct is known to be
+little endian (pe for protocol endian, meaning no conversion is necessary),
+for example copied from an existing buffer, but then `clone` is a better
+choice:
+
+ Monster_pos_clone(B, &v);
+
+When the struct is created alone for use as root:
+
+ Vec3_ref_t root;
+ root = Vec3_create(B, 1, 2, 3)
+ flatcc_builder_buffer_create(B, root);
+
+An existing struct can be added as:
+
+ Vec3_t v;
+ Vec3_assign(&v, 1, 2, 3);
+ /* v does not have to be zero padded. */
+ Monster_pos_add(B, &v);
+
+When adding a struct that is already little endian, presumably from an
+existing buffer, it can be cloned using:
+
+ Monster_pos_clone(B, &v);
+
+Clone assumes the source struct is both little endian and that padding
+is already zeroed (example ignores error handling), and `end_pe`
+does nothing.
+
+ *Monster_pos_start(B) = v;
+ Monster_pos_end_pe(B);
+
+There are several assignment types that convert between host (native)
+endianness and buffer endiannes. We use `pe` to indicate
+`protocol_endian` rather than just `le` for `little endian` because it
+allows us to change endianness to big endian in the the future and it
+more clearly states the intention. While big endian is not allowed in
+FlatBuffers, big endian structs may be useful in other network
+protocols - but it is not currently supported because it would force
+little endian platforms to support byte-swapping. The operations are:
+
+`assign_from_pe`, `assign_to_pe`, `copy`, `copy_from_pe`,
+`copy_to_pe`, `to_pe` and `from_pe`.
+
+All the copy operations takes a const pointer as source, and
+`to/from_pe` is just copy with same source and destination:
+
+ Vec3_t v, v2;
+ Vec3_assign_to_pe(&v2, 1, 2, 3);
+ Vec3_copy_from_pe(Vec3_clear(&v), &v2);
+ Vec3_to_pe(&v);
+
+`from_pe` means from little endian to native endian, end `to_pe`
+is the opposite. On little endian platforms all copy operations behave
+the same and only move fields, not padding. `to/from_pe` conversion
+will leave deprecated fields either as they were, or zero them because
+the operation may be skipped entirely on protocol endian native platforms.
+
+While struct fields cannot be deprecated officially, they are supported
+if the schema compiler is flagged to accept then. The struct fields are
+renamed and assigned 0 when using assign or copy, and assign / create has
+no argument for them.
+
+Because padding can carry noise and unintended information, structs
+should be cleared before assignment - but if used as a source to copy
+the padding is not copied so only the destation need to be zeroed.
+
+If a struct is nested, the assign operation includes all fields as if
+the struct was flattened:
+
+ typedef struct Plane Plane_t;
+ struct Plane {
+ Vec3_t direction;
+ Vec3_t normal;
+ };
+ Plane_t plane;
+ Plane_clear(&plane);
+ Plane_assign(&plane, 1, 2, 3, 7, 8, 9);
+
+Structs can also be created standalone, similar to tables and vectors,
+but FlatBuffers only support this when the struct is used as root.
+
+Assuming Vec3 is declared as root, a buffer only holding a Vec3 struct
+can be created using:
+
+ Vec3_create_as_root(B, 1, 2, 3);
+
+Important: do not store the above as a nested buffer - it would be
+missing the vector size field. If `Monster_playground` is a ubyte vector
+with `nested_flatbuffer` attribute, then
+`Monster_playground_start/end_as_root` may be used.
+
+Structs also support `start/end_as_root`. In this case `start` returns
+the struct pointer, and `end_pe_as_root` is supported:
+
+ Vec3_t *v;
+ v = Vec3_start_as_root(B);
+ v->x = 1, v->y = 2, v->z = 3;
+ Vec3_end_as_root(B);
+
+(Be careful with the different result codes since a tables `start_as_root`
+returns an integer result code where 0 is success while a struct returns
+a pointer that is null on failure.)
+
+The following also creates a buffer at top-level, but it may also be
+added as a nested buffer because the stack frame detects the nesting:
+
+ Vec3_t *v;
+ flatcc_builder_buffer_start(B);
+ v = Vec3_start(B);
+ v->x = 1, v->y = 2, v->z = 3;
+ flatcc_builder_buffer_end(B, Vec3_end(B));
+
+or
+ flatcc_builder_buffer_start(B);
+ ...
+ Monster_start(B);
+ flatcc_builder_buffer_start(B);
+ v = Vec3_start(B);
+ v->x = 1, v->y = 2, v->z = 3;
+ Monster_playground_add(B,
+ flatcc_builder_buffer_end(B, Vec3_end(B)));
+ flatcc_builder_buffer_end(B, Monster_end(B));
+
+or
+
+ flatcc_builder_buffer_ref_t nested_root;
+ flatcc_builder_buffer_start(B);
+ nested_root = Vec3_create_as_root(B, 1, 2, 3);
+ Monster_start(B);
+ Monster_playground_add(B, nested_root);
+ flatcc_builder_buffer_end(B, Monster_end(B));
+
+A `buffer_ref_t` can be used as `uint8_vec_ref_t` when the
+buffer is nested, and otherwise the reference cannot be used
+for anything other than testing for failure. The buffer content
+should match the type declared in a `nested_flatbuffers` attribute
+but it isn't enforced, and a root can be stored in any field of
+[ubyte] type.
+
+When `Monster_playground` is declared as nested:
+
+ ...
+ Monster_start(B);
+ Monster_playground_create_as_root(B, 1, 2, 3);
+ flatcc_builder_buffer_end(B, Monster_end(B));
+ ...
+
+Be aware that `Vec3_t` is for native updates while `Vec3_struct_t` is a const
+pointer to an endian encoded struct used in the reader interface, and actually
+also as source type in the clone operation.
+
+### Fixed Length Arrays in Structs
+
+As of flatcc 0.6.0 it is possible to have fixed length arrays as structs
+members. A fixed length array is equivalent to having a struct field repeated
+one or more times. The schema syntax is `name : [type:count];` similar to an
+ordinary struct field `name : type;`. The type is any type that can ba valid
+struct field type including enums and nested structs. The size cannot be 0 and
+the overall size is limited by the maximum struct size the array is contained
+within which is typically 65535 (2^16-1).
+
+For example, given the schema:
+
+ struct MyStruct {
+ counters:[int:3];
+ // char is only valid as a fixed length array type
+ name:[char:6];
+ }
+ table MyTable {
+ mystruct:MyStruct;
+ }
+
+The table can be created with:
+
+ ns(MyStruct_t) *x;
+ ns(MyTable_start_as_root(B));
+ x = ns(MyTable_mystruct_start(B));
+ x->counters[0] = 1;
+ x->counters[1] = 2;
+ x->counters[2] = 3;
+ strncpy(x->name, "Kermit", sizeof(x->name));
+ ns(MyTable_mystruct_end(B));
+ ns(MyTable_end_as_root(B));
+
+Note that char arrays are not zero terminated but they are zero padded, so
+strncpy is exactly the right operation to use when assigning to char arrays,
+at least when they do not contain embedded nulls which is valid.
+Char arrays are expected to be ASCII or UTF-8, but an application may use
+other encodings if this is clear to all users.
+
+With assignment:
+
+ int data[3] = { 1, 2, 3 };
+ ns(MyStruct_t) *x;
+ ns(MyTable_start_as_root(B));
+ x = ns(MyTable_mystruct_start(B));
+ // Careful: the name argument does not use strncpy internally
+ // so the source must be at least the expected length
+ // like other array arguments. Strings can have embedded nulls.
+ ns(MyStruct_assign(x, data, "Kermit");
+ ns(MyTable_mystruct_end(B));
+ ns(MyTable_end_as_root(B));
+
+To read a struct the pointer to the struct is retrieved first
+
+ int sum;
+ int i;
+ const char *name;
+ size_t name_len;
+ ns(MyTable_table_t) t;
+ ns(MyStruct_struct_t) x;
+
+ t = ns(MyTable_as_root(buf));
+ x = ns(MyTable_mystruct_get(t));
+ for (sum = 0, i = 0; i < ns(MyStruct_counters_get_len()); ++i) {
+ sum += ns(MyStruct_counters_get(x, i)) +
+ // char arrays are endian neutral, so we can use pointer access.
+ name = ns(MyStruct_name_get_ptr(x);
+ name_len = strnlen(name, ns(MyStruct_name_get_len()));
+ printf("Added counters from %.*s", name_len, name);
+ // char arrays can be accessed like other arrays:
+ // ns(MyStruct_name_get(x, i);
+ }
+
+An alternative to `strnlen` is strip trailing zeroes which will allow for
+char arrays embedded zeroes, but there is no direct support for this. The JSON
+printer uses this approach to shorten the printed char array string.
+
+The `_get` suffix can be ommitted in the above if the flatcc `-g` has not
+supplied to reduce the risk of name conflicts, but not for `_get_len` and
+`_get_ptr`.
+
+Note that it is not possible to have fixed length arrays as part of a table but
+it is possible to wrap such data in a struct, and it is also possible to have
+vectors of structs that contain fixed length arrays.
+
+
+## Nested Buffers
+
+These are discussed under Structs and Table sections but it is worth
+noting that a nested buffers can also be added as pe ubyte vectors
+which is probably the original intention with nested buffers. However,
+when doing so it can be difficult to ensure the buffer is correctly
+aligned. The untyped `flatcc_builder` has various options to deal with
+this, but with generated code it is better to create a nested buffer
+inline when suitable (with nested `buffer_start/end` or
+`mytable_myfield_create_as_root`) - for example a message wrapper with
+a union of tables holding buffer for a specific message type. In other
+cases the buffer may truly be created independently of the current
+buffer and then it can be added with controlled alignment using either
+the `flatcc_builder` api for full control, or the `nest` operation on
+nested table and struct fields:
+
+To create and add a ubyte vector with a higher alignment than ubytes
+single byte alignment, the following operation is available as an
+operation on a nested buffer field:
+
+ Monster_playground_nest(B, void *data, size_t size, uint16_t align);
+
+If alignment is unknown, it can be set to 0, and it will default to 8
+for nested table types, and to the struct alignment for struct buffers.
+
+Block alignment is inherited from the parent buffer so the child buffer
+ends up in its own set of blocks, if block alignment is being used. If
+the nested buffer needs a different block alignment, the `flatcc_builder`
+api must be used.
+
+All structs and tables have an `start/end/create_as_root` even if they
+are not referenced by any `nested_flatbuffers` field and they will
+create [ubyte] vectors containing a nested buffer but only [ubyte]
+fields with `nested_flatbuffers` attribute will dedicated
+`start/end/create_as_root` on the field name. Structs also have
+`end_pe_as_root`.
+
+
+## Scalars and Enums
+
+Scalars keep their original type names `uint8_t`, `double`, etc, but
+they get some operations similar to structs. These are contained in a
+namespace which by default is `flatbuffers_`, for example:
+
+ uint16_t *flatbuffers_uint16_to_pe(uint16_t *p);
+ uint16_t *flatbuffers_uint16_from_pe(uint16_t *p);
+ flatbuffers_bool_t *flatbuffers_bool_to_pe(flatbuffers_bool_t *p);
+ flatbuffers_bool_t *flatbuffers_bool_from_pe(flatbuffers_bool_t *p);
+
+These may be used freely, but are primarily present as an interface to
+the vector operations also defined for structs.
+
+Enums have similar definitions which may be used to convert endianness
+without being concerned with the underlying integer type, for example:
+
+ Color_enum_t *Color_to_pe(Color_enum_t *p);
+
+## Vectors
+
+Vectors can be created independently, or directly when updating a table - the
+end result is the same. Builder vector operations always reference element
+values by pointer, or by reference for offset types like tables and strings.
+
+ uint8_t v;
+ Monster_inventory_start(B);
+ v = 1;
+ flatbuffers_uint8_vec_push(B, &v);
+ v = 2;
+ flatbuffers_uint8_vec_push(B, &v);
+ v = 3;
+ flatbuffers_uint8_vec_push(B, &v);
+ Monster_inventory_end(B);
+
+or
+
+ flatbuffers_uint8_vec_ref_t inv;
+ uint8_t v;
+ flatbuffers_uint8_vec_start(B);
+ v = 1;
+ flatbuffers_uint8_vec_push(B, &v);
+ v = 2;
+ flatbuffers_uint8_vec_push(B, &v);
+ v = 3;
+ flatbuffers_uint8_vec_push(B, &v);
+ inv = flatbuffers_uint8_vec_end(B);
+ Monster_inventory_add(B, inv);
+
+Because it can be tedious and error-prone to recall the exact field
+type, and because the operations are not type safe (any kind of push
+would be accepted), some vector operations are also mapped to the field
+name:
+
+ uint8_t v;
+ Monster_inventory_start(B);
+ v = 1;
+ Monster_inventory_push(B, &v);
+ v = 2;
+ Monster_inventory_push(B, &v);
+ v = 3;
+ Monster_inventory_push(B, &v);
+ Monster_inventory_end(B);
+
+Note: vector operations on a type uses the `_vec_<operation>` syntax, for
+example `uint8_vec_push` or `Monster_vec_push` while operations that are mapped
+onto table field names of vector type do not use the `_vec` infix because it is
+not a type name, for example `Monster_inventory_push`.
+
+A slightly faster operation preallocates the vector:
+
+ uint8_t *v;
+ Monster_inventory_start(B);
+ v = Monster_inventory_extend(B, 3);
+ v[0] = 1, v[1] = 2, v[2] = 3;
+ v = Monster_inventory_extend(B, 2);
+ v[0] = 4, v[1] = 5;
+ Monster_inventory_end(B);
+
+Push just extends one element at time. Note that `extend` returns the
+pointer to the extended vector segment. The full vector can be accessed
+with `edit` and `reserved_len` between `start/end` (recalling that pointers
+cannot be reused across buffer calls):
+
+ uint8_t *v, i;
+ uint8_t data[] = { 1, 2 };
+ Monster_inventory_start(B);
+ Monster_inventory_push(B, &data[0]);
+ Monster_inventory_push(B, &data[1]);
+ v = Monster_inventory_edit(B);
+ for (i = 1; i < Monster_inventory_reserved_len(B); ++i) {
+ v[i] = v[i - 1] + v[i];
+ }
+ Monster_inventory_end(B);
+
+Note that the name `reserved_len` is to avoid confusion with
+`_vec_len` read operation. It also indicates that it is not the final
+size since it may change with `truncate/extend`.
+
+A vector can also contain structs. Let us extend the Monster example
+with a vector of positions, so we can have a breadcrumb trail:
+
+ Monster_breadcrumbs_start(B);
+ Vec3_vec_push_create(B, 1, 2, 3);
+ Vec3_vec_push_create(B, 3, 4, 5);
+ Monster_breadcrumbs_end(B);
+
+or
+
+ Monster_breadcrumbs_start(B);
+ Monster_breadcrumbs_push_create(B, 1, 2, 3);
+ Monster_breadcrumbs_push_create(B, 3, 4, 5);
+ Monster_breadcrumbs_end(B);
+
+or
+
+ Vec3_t *trails[2];
+ Monster_breadcrumbs_start(B);
+ trails = Monster_breadcrumbs_extend(B, 2);
+ Vec3_create(&trails[0], 1, 2, 3);
+ Vec3_create(&trails[1], 4, 5, 6);
+ Monster_breadcrumbs_end(B);
+
+The `vec_start/exttend/end/end_pe/create/create_pe/clone/slice` are
+translated into similar calls prefixed with the field name instead of
+`vector` and except for `start`, the calls also add the vector to the
+table if successful, for example:
+
+ uint8_t data[] = { 1, 2, 3 };
+ Monster_inventory_create(B, data, 3);
+ Monster_breadcrumbs_slice(B, some_other_breadcrumbs, 0, 10);
+
+Vector operations that are allowed between `vec_start` and
+`vec_end(_pe)` are also mapped. These are
+`vec_extend/append/truncate/edit/reserved_len`, and `push/push_create/push_copy`.
+`push_copy` ensures only valid fields are copied, not zero padding (or
+the unofficial deprecated fields).
+
+A struct `push_clone` is the same as a `push_copy` operation
+because structs are stored inline in vectors - with the
+exception of union vectors which have `push_clone` that does the
+right thing.
+
+The `add` call adds a vector created independently from the table field,
+and this is what is going on under the surface in the other calls:
+
+ Vec3_t x;
+ Vec3_vec_ref_t inv;
+
+ /* Clear any padding in `x` because it is not allocated by builder. */
+ Vec3_assign(Vec3_clear(&x), 3, 4, 5);
+ Vec3_vec_start(B);
+ Vec3_vec_push_create(B, 1, 2, 3);
+ Vec3_vec_push(B, &v);
+ inv = Vec3_vec_end(B);
+
+ Monster_breadcrumbs_add(B, inv);
+
+As always, a reference such as `inv` may only be used at most once, and
+should be used once to avoid garbage.
+
+Note that `Vec3_vec_start` would create an independent struct instead of a
+vector of structs. Also note that `vec_ref_t` is a builder specific
+temporary type while `vec_t` is intended as a const pointer to the first
+element in an existing buffer in little endian encoding with a size
+prefix (to be used with clone, for example).
+
+An existing Vec3 struct can also be pushed with `Vec3_push(B, &v)`. The
+argument must be zero padded. Because vectors are converted at the end,
+there is no `push_pe`, but a struct may be in little endian using push
+on all platforms if `vec_end_pe` is used at the end.
+
+A vector may also be created from an existing array:
+
+ uint8_t data[] = { 1, 2, 3 };
+ Monster_inventory_add(B, flatbuffers_uint8_vec_create(B, data, 3));
+
+This also applies to arrays of structs as long as they are properly zero
+padded. `create_pe` is similar but does not do any endian conversion,
+and is similar to `clone` except there are no header prefix.
+
+Likewise an existing vector with proper zero padding may be appended
+using the `extend` operation. The format must be native or little endian
+depending on whether `vec_end` or `vec_end_pe` is called at the end.
+
+All vectors are converted to little endian when the `end` command is
+called. `end_pe` prevents this from happening.
+
+`clone` and `slice` and can be used to copy an entire, or a partial
+array from an existing buffer. The pointer must be to the first vector
+element in little endian format, and it must have a size prefix and be
+aligned (like any flatbuffer vector). `slice` takes a base-0 index and
+a vector length where the result is truncated if the source is not
+large enough.
+
+ Monster_inventory_clone(B, v);
+
+or
+
+ Monster_inventory_add(flatbuffers_int8_clone(B, v);
+
+or
+
+ Monster_inventory_add(flatbuffers_int8_slice(B, v, 2, 4);
+
+or
+
+ Monster_inventory_slice(B, v, 2, 4);
+
+A vector of strings an be constructed as (`friends` is a string
+vector field that we just invented for the occasion):
+
+ flatbuffers_string_ref_t friend, *p;
+ Monster_friends_start(B);
+ friend = flatbuffer_string_create_str(B, "Peter Pan");
+ Monster_friends_push_create_str(B, "Shrek");
+ Monster_friends_push_create_str(B, "Pinnochio");
+ Monster_friends_push_create_str(B, "Pinnochio");
+ Monster_friends_push_create(B, "Hector", 6);
+ Monster_friends_push(friend);
+ p = Monster_friends_extend(B, 1);
+ *p = flatbuffers_string_create_str("Cindarella");
+ Monster_friends_push_start(B);
+ flatbuffers_string_append("The Little");
+ flatbuffers_string_append("Mermaid");
+ Monster_friends_push_end(B);
+ Monster_friends_end(B);
+
+Vectors and strings have a second argument to start, see also the `spawn` example
+below.
+
+Finally, vectors can contain tables. Table vectors are offset
+vectors just like string vectors. `push_start` pushes a new table and
+allows for updates until `push_end`. If we have a spawn vector of monsters in
+the Monster table, we can populate it like this:
+
+ Monster_spawn_start(B);
+ Monster_vec_push_start(B);
+ Monster_Hp_add(B, 27);
+ Monster_vec_push_end(B);
+ Monster_vec_push_create(B,
+ /* Approximate argument list for illustration only. */
+ &vec, 150, 80, name, inventory, Color_Red, Any_as_None());
+ Monster_spawn_end(B);
+
+The push operation has constructors `push_start/end/create` for both tables
+struct, and string elements. String elements also have
+`push_create_str/create_strn/clone/slice`. Structs also have
+`push_copy`. Between `push_start` and
+`push_end` the operations valid for the given table or string element can be
+used (typically `add` for tables, and `append` for strings).
+
+Instead of `Monster_vec_push_start` we can also uses
+`Monster_spawn_push_start` etc. - in this case the child type is the
+same as the parent, but using the field specific `push_start` ensures we
+get the right table element type.
+
+`Monster_spawn_push_start(B)` takes no length argument because it is a
+table element, while `Monster_friends_push_start(B)` because it is a
+string element (similar to a vector).
+
+`Monster_spawn_start(B)` should just be followed by push operations
+rather than following up with `Monster_spawn_extend(B, n)` because we
+risk loose references that can lead to crashes. But handled carefully
+it is possible:
+
+ Monster_vec_ref_t mvec;
+ Monster_spawn_start(B);
+ mvec = Monster_spawn_extend(B, 2);
+ mvec[0] = Monster_create(B, ...);
+ mvec[1] = Monster_create(B, ...);
+ Monster_spawn_end(B);
+
+We can also push a reference to an independently create monster table,
+all as seen before with strings.
+
+As of flatcc version 0.5.2 it is also possible to clone tables.
+Therefore we also have `push_clone` on vectors of tables.
+
+While the use of `extend` and `truncate` is possible with vectors of
+strings and tables, they should be used with care because the elements
+are references and will just end up as garbage if truncated. On the
+other hand, unused elements should be truncated as 0 elements in an
+offset vector is not valid.
+
+A vector of tables or strings can be created using an externally built
+array of references, for example:
+
+ Monster_ref_t monsters[20];
+ Monster_vec_ref_t mvec;
+ monsters[0] = Monster_create(B, ...);
+ ...
+ mvec = Monster_vec_create(B, monsters, 20);
+
+By convention, create calls bypass the internal stack when the endian
+format is otherwise compatible, and thus feed the emitter directly.
+This is not possible with table and string vectors because the
+references in the source vectors must be translated into offsets.
+Therefore these create calls are similar to start, append, end calls.
+There is an internal, but unexposed `flatcc_builder` version
+`create_offset_vector_direct` which destroys the source vector instead
+of allocating a stack copy.
+
+## Unions
+
+Unlike the C++ Flatbuffers library, we do not expose a separate union
+type field except via a small struct with a union of typed references
+and a type field. This struct is given to the create argument, and above
+it is zero initialized meaning default None.
+
+Unions can be created with value specific `start/end/create` calls. The add
+call is not specialized since it takes a union reference:
+
+
+ Monster_test_Weapon_start(B);
+ Weapon_rounds_add(B, 50);
+ Monster_test_Weapon_end(B);
+
+or
+
+ Monster_test_Weapon_create(B, 50);
+
+or
+
+ Monster_test_Weapon_add(B, Weapon_create(B, 50));
+
+or
+
+ Monster_test_Pickup_start(B);
+ Pickup_location_create(B, 0, 0, 17);
+ Pickup_hint_create_str(B, "Jump High!");
+ Monster_test_Pickup_end(B);
+
+or
+
+ Pickup_ref_t test;
+ Pickup_start(B);
+ Pickup_location_create(B, 0, 0, 17);
+ test = Pickup_end(B);
+ Monster_test_add(B, Any_as_Pickup(test));
+
+or
+
+ Any_union_ref_t test;
+ Pickup_start(B);
+ Pickup_location_create(B, 0, 0, 17);
+ /* test.Pickup = Pickup_end(B); no longer possible as of v0.5.0 */
+ test.value = Pickup_end(B); /* As of v0.5.1. */
+ test.type = Any_Pickup;
+ Monster_test_add(B, test);
+
+The following is valid and will not return an error, but also has no effect:
+
+ Monster_test_add(B, Any_as_NONE());
+
+
+_Note: the union structure has been changed for v0.5.0, and v0.5.1.
+Both unions and union vectors are now represented by a struct with the
+fields { type, value } in the low level interfaces. Before 0.5.0 only
+unions of tables were supported._
+
+
+### Union Vectors
+
+The `monster_test.fbs` schema has a field named manyany in the Monster
+table. It is vector of unions of type Any.
+
+We can create a vector using
+
+ Any_union_vec_ref_t anyvec_ref;
+
+ Any_vec_start(B);
+ Any_vec_push(TestSimpleTableWithEnum_create(B));
+ anyvec_ref = Any_vec_end(B);
+ Monster_manyany_add(anyvec_ref);
+
+A union can be constructed with type specific `_push` or `_push_create` operations:
+
+ Monster_manyany_start(B);
+ Monster_manyany_push(B, Any_as_TestSimpleTableWithEnum(ref));
+ Monster_manyany_end(B);
+
+ Monster_manyany_start(B);
+ Monster_manyany_TestSimpleTableWithEnum_push(B, ref);
+ Monster_manyany_end(B);
+
+ Monster_manyany_start(B);
+ Monster_manyany_TestSimpleTableWithEnum_push_create(B, args);
+ Monster_manyany_end(B);
+
+and other similar operations, much like other vectors.
+
+Note that internally `anyvec_ref` is really two references, one to type
+vector and one to a table vector. The vector is constructed a single
+vector of unions and later split into two before final storage. If it is
+necessary to create a union vector from a vector of tables and types,
+the low level builder interface has a `direct` call to do this.
+
+Union vectos generally use more temporary stack space because during
+construction because each element as a struct of type and reference
+which don't back as densely as a two separate tables. In addition the
+separated type and table vectors must be constructed temporarily. The
+finaly buffer result is resonably compatct since the type vector does
+not use much space. Unions will also be somewhat slower to construct,
+but not unreasonably so.
+
+
+### Unions of Strings and Structs
+
+_Note: as of v0.5.0 unions can also contain strings and structs in
+addition to tables. Support for these types in other languages may vary,
+but C++ does support them too._
+
+All union values are stored by reference. Structs that are not unions
+are stored inline in tables and cannot be shared but unions of struct
+type are stored by reference and can be shared. A union value is
+therefore always a reference. This is mostly transparent because the
+generated table field methods has `create/start/end` calls for each union
+value type and addition to `add`.
+
+To illustrate the use of these variation we use the Movie table from
+[monster_test.fbs]:
+
+ namespace Fantasy;
+
+ table Attacker {
+ sword_attack_damage: int;
+ }
+
+ struct Rapunzel {
+ hair_length: uint16;
+ }
+
+ struct BookReader {
+ books_read: int;
+ }
+
+ union Character {
+ MuLan: Attacker = 2, // Can have name be different from type.
+ Rapunzel = 8, // Or just both the same, as before.
+ Belle: Fantasy.BookReader,
+ BookFan: BookReader,
+ Other: string,
+ Unused: string = 255
+ }
+
+ table Movie {
+ main_character: Character;
+ antagonist: Character;
+ side_kick: Character;
+ cameo: Character;
+ characters: [Character];
+ }
+
+
+and the mixed type test case from [monster_test.c]:
+
+
+ nsf(Character_union_ref_t) ut;
+ nsf(Rapunzel_ref_t) cameo_ref;
+ nsf(Attacker_ref_t) attacker_ref;
+ nsf(BookReader_ref_t) br_ref;
+ nsf(BookReader_t *) pbr;
+ nsf(Movie_table_t) mov;
+
+ nsf(Movie_start_as_root(B));
+ br_ref = nsf(BookReader_create(B, 10));
+ cameo_ref = nsf(Rapunzel_create(B, 22));
+ ut = nsf(Character_as_Rapunzel(cameo_ref));
+ nsf(Movie_main_character_Rapunzel_create(B, 19));
+ nsf(Movie_cameo_Rapunzel_add(B, cameo_ref));
+ attacker_ref = nsf(Attacker_create(B, 42));
+ nsf(Movie_antagonist_MuLan_add(B, attacker_ref));
+ nsf(Movie_side_kick_Other_create_str(B, "Nemo"));
+ nsf(Movie_characters_start(B));
+ nsf(Movie_characters_push(B, ut));
+ nsf(Movie_characters_MuLan_push(B, attacker_ref));
+ nsf(Movie_characters_MuLan_push_create(B, 1));
+ nsf(Character_vec_push(B, nsf(Character_as_Other(nsc(string_create_str(B, "other"))))));
+ nsf(Movie_characters_Belle_push(B, br_ref));
+ pbr = nsf(Movie_characters_Belle_push_start(B));
+ pbr->books_read = 3;
+ nsf(Movie_characters_Belle_push_end(B));
+ nsf(Movie_characters_Belle_push(B, nsf(BookReader_create(B, 1))));
+ nsf(Movie_characters_Belle_push_create(B, 2));
+ nsf(Movie_characters_Other_push(B, nsc(string_create_str(B, "another"))));
+ nsf(Movie_characters_Other_push_create_str(B, "yet another"));
+ nsf(Movie_characters_end(B));
+ nsf(Movie_end_as_root(B));
+
+Note that reading a union of string type requires a cast which can be
+seen in the full test case in [monster_test.c].
+## Error Handling
+
+The API generally expects all error codes to be checked but the
+following table and vector operations will accept and return an error:
+
+- `add` null reference to table, vector, or string.
+- `push` null reference to table or string.
+- `buffer_end/create` null reference to root.
+
+This can simplify pushing or adding atomically created objects, for
+example by adding a cloned vector to table field.
+
+It is especially important to check start operations because the builder
+will not be in the expected stack frame context after failure and will
+not have reserved necessary internal memory, for example when adding a
+table field.
+
+On a server with reasonable amount of memory using the default
+allocator, and with an emitter that will not return errors, and when it
+can be expected that inputs will not exceed the size contraints of the
+flatbuffer data types, and if the api is being used correctly, then there
+are no reason for failure and error handling may be skipped. However,
+it is sometimes desireable for servers to restrict a single clients
+memory usage, and then errors are very likely unless the source data is
+already limited. As an opposite example, an embedded device sending
+small network packages using a fixed but large enough allocation pool,
+would be in total control and need not be concerned with any errors.
+
+
+
+## Type System Overview
+
+The generated methods for building buffers may look the same but
+have different semantics. For example `_clone` on a table field
+such as `Monster_enemy_clone` will actually create a table based
+on the content of a table in a another buffer, then add that
+table to the currently open table. But `Monster_clone` will
+create clone and just return a reference without adding the
+reference to any table. There is also `push_clone` which adds
+an element to an open vector. The same applies to many other
+operations.
+
+Basically there are
+the following different types of methods:
+
+- Methods on native flatbuffer types, such as
+ `flatbuffer_string_start`.
+- Methods on generated types types such as `Monster_start`
+- Methods on field members such as as `Monster_emeny_start`
+- Methods on vectors on vectors of the above such as
+ `flatbuffers_string_vec_start`, `Monster_vec_start`.
+ `Monster_inventory_vec_start`.
+- Slight adaptions for buffer roots and nested buffer roots.
+
+For unions and union vectors the story is more complex - and the
+api might need to be cleaned up further, but generally there are
+both union type fields, union value fields, and union fields
+representing both, and vectors of the same. In additional there
+are pseudo fields for each union member because `create` on a
+union does not make sense, but
+`Monster_myvariant_MyTable_create` does create and `MyTable`
+table and assigns it with the correct type to the field
+`Monster_myvariant_type` and `Monster_myvariant.
+
+
+## Cloning
+
+As of flatcc v0.5.2 it is also possible to clone tables, unions,
+vectors of tables, vectors of strings, and vectors of unions.
+Previously many operations did have a clone or a `push_clone`
+operator, but these were all raw byte copies. Table cloning and
+union cloning is signficantly more involved as it a simple copy
+will not work due to stored references, possible sharing of
+references and because the required alignment of table is hard
+to reason about without building a new table. Unions and union
+vectors are even more difficult.
+
+That said, cloning is now implemented for all relevant data
+types.
+
+All clone operations expect the content to originate from
+another finalized buffer. For scalars and structs there are
+copy operations that are almost the same as clone - they both
+avoid endian conversion.
+
+Structs have a special case with clone and copy: Whenever a
+struct is stored inline in the desitination buffer, it behaves
+like copy. Whenever the destination is a buffer root, or a union
+member, the result is a reference to an independent memory
+block. When calling clone on a struct type the destination is
+unknown and a indendpendent reference is created. If this is not
+the intention a `copy` operation can be used. When used field
+methods the destination type is known at the right thing will
+happen.
+
+Cloning a table will, by default, expand any shared references
+in the source into separate copies. This is also true when
+cloning string vectors, or any other data that holds references.
+Worst case this can blow up memory (which is also true when
+printing JSON from a buffer).
+
+It is possible to preserve the exact DAG structure when cloning.
+It may not worthwhile for simple use cases but it goes as
+follows:
+
+The builder has a pointer to a `flatcc_refmap_t` object. This is
+a fairly small stack allocated object that implements a
+hashtable. By default this pointer is null, and we have the
+above mentioned expansion. If it is not null, each newly cloned
+object will have its reference stored in the refmap. The next
+time the same object is cloned, the existing reference will be
+taken from the refmap instead. See source comments in
+`flatcc_refmap.h` and `flatcc_builder.h`, and `monster_test.c`
+clone tests.
+
+Note that, for example, it might be relevant to preserve DAG
+structure when cloning one object with all its sub-objects, but
+if it is cloned a second time, a new copy is desired still while
+preseving the inner DAG structure. This can be done by working
+with multiple refmaps and simple swapping them out via
+`flatcc_builder_set_refmap`. It is also possible to add
+references manually to a refmap before cloning.
+
+Warning: the refmap MUST not hold any foreign references when
+starting a nested root clone or when cloning inside a nested
+buffer that has been started but not ended because it is
+invalid to share references between buffers and there are no
+safety checks for this.
+
+
+## Picking
+
+Picking is a method that is related to clone and also introduced
+with flatcc 0.5.2. A pick method is only defined on a table
+field or a struct field. Instead of taking an a read reference
+of same type as the field, it takes a reference to to the same
+container type (table or struct). Essentially pick means: find
+myself in the other table, clone me, and and me to the new table
+which is currently open. So clone takes an entire table where
+pick takes a single field. Table cloning is implemented as a
+sequence of pick method, one for each field as can be seen in
+the generated builder source. A pick operation does nothting if
+the field is not set. Pick also works with refmaps because it
+does an internal clone operation. In the generated code, only
+clone on types will use the refmap but other clone and pick
+operations do depend on these type clone methods.
+
+
+## Sorting Vectors
+
+Vectors can be sorted, but not by the primary builder interface because:
+
+String and table elements cannot be accessed after they have been
+emitted. The emitter can do all sorts of async operations other than
+actually building a buffer, for example encrypting blocks and / or send
+partial buffers over the network. Scalars could be sorted, but the most
+efficient way of emitting vectors does not create a temporary vector but
+emits the source directly when endianess allows for it. Less
+significant, the buffer producer is likely busy processing content and /
+or on a resource constrained device. Altogether, it is much simpler to
+not support sorting at this interface level.
+
+To understand how sorting is implemented, lets first look at how an
+already sorted vector can be searched:
+
+Every vector of string, scalar and enum element types have a `find`
+operation in the reader interface that performs a binary seach. Every
+vector of table and struct elements have a `find_by_<field_name>` iff
+there is a key attribute on at least one top-level scalar, enum or
+string field type. FlatBuffers do not officially allow for multiple key
+attributes, but if enabled, there will by a `find_by` operation for
+every keyed element field. In addition there is a `find` operation that
+maps to the first keyed field.
+
+The read interface returns a vector type, which is a const pointer, when
+accessing a table field of vector type. The find operation takes such a
+vector as first argument, and a key as second. Strings have variations
+to allow for keys with a given length (similar to strcmp vs strncmp).
+
+This leads us to the sort interface:
+
+Every `find` and `find_by` operation has a matching `sort` and `sort_by`
+operation table and struct vectors maps `sort` to the first keyed
+`sort_by` operation. The sort operation takes a non-const vector which
+has the type name suffix `_mutable_vec_t`. These
+vectors are not available via the reader interface and must be cast
+explicitly from `_vec_t` to `_mutable_vec_t`. When this is done, the
+vector can be sorted in-place in the buffer without any memory
+allocation and without any recursion.
+
+
+
+If the namespace is
+`flatbuffers`, a string vector is sorted by:
+
+ flatbuffers_string_vec_t vec;
+ vec = ...;
+ `flatbuffers_string_vec_sort((flatbuffers_string_mutable_vec_t)vec)`
+
+Scalar and enum vectors have similar inline sort operations, for
+example:
+
+ flatbuffers_uint8_vec_sort(flatbuffer_uint8_mutable_vec_t vec);
+
+For vectors of tables or structs the sort function is named by the key
+field. Assuming the Monster table has a key attribute on the `Hp` field,
+the following sort operation is available:
+
+ MyGame_Example_Monster_vec_t monsters;
+ monsters = ...;
+ MyGame_Example_Monster_vec_sort_by_Hp(
+ (MyGame_Example_Monster_mutable_vec_t)monsters);
+
+Note: this is the reader interface. Any kind of `ref_t` type used by the
+builder do not apply here. (Advanced: if an emitter builds a buffer, the
+ref type can be used to find the actual vector pointer and then it can
+be sorted by casting the pointer to a vector, even if the buffer isn't
+finished).
+
+Multiple keys per table or struct is an optional feature. Each key will
+have its own sort and find function similar to the above. The first key
+also has the shortcut:
+
+ MyGame_Example_Monster_vec_sort(m);
+
+The current implementation uses heap sort which is nearly as fast as
+quicksort and has a compact implementation that does not require
+recursion or external memory and is robust against DOS attacks by having
+worst case O(n log n). It is, however, not a stable sort. The sort
+assumes struct have a reasonable size so swap operations can be done
+efficiently. For large structs a decicated sort operation building an
+external index vector would be better, but this is not supported.
+
+Note that a DAG is valid so there can be multiple vectors referring to
+the same table elements, and each can be sorted by a different key.
+
+The find operations are stable meaning they always return the lowest
+index of any matching key or `flatbuffers_not_found` which is larger
+than any other index.
+
+### Dangers of Sorting
+
+If a buffer was received over, say, an untrusted network the buffer
+should be verified before being accessed. But verification only makes it
+safe to read a buffer, not to modify a buffer because for example two
+vectors can be crafted to overlap each other without breaking any
+verification rules.
+
+Thus, sorting is intended to be done shortly after the buffer is
+constructed while it can still be trusted.
+
+Using find on a buffer that is supposed to be sorted, but isn't, can
+yield unexpected search results, but the result will always be a one
+element in the vector being searched, not a buffer overrun.
+
+### Scanning
+
+Some vectors can be sorted by different keys depending on which version
+version of `_sort_by` is being used. Obviously `_find_by` must match the
+sorted key.
+
+If we need to search for a key that is not sorted, or if we simply do
+not want to sort the vector, it is possible to use scanning operations
+instead by using `_scan` or `_scan_by`. Scanning is similar to find
+except that it does a linear search and it supports scanning from a
+given position.
+
+More information on scanning in the
+[README](https://github.com/dvidelabs/flatcc#searching-and-sorting)
+file, and in the [monster_test.c] test file.
+
+
+## Example of different interface type users
+
+A resource constrained microcontroller is building flatbuffers from
+sensor data using an emitter that sends UDP packages of the flatbuffer
+as soon as enough data is ready. A server reassembles the packages or
+discards them if any UDP package was lost. One the package is assembled,
+the server sorts specific vectors such as temparture levels in the buffer
+before it sends the buffer upstream to a storage service through a
+TCP/IP connection. The analyzers perform taks such as detecting
+abnormal temparature readings based on the sorted vector data.
+
+In the above example, the original sensor devices are not interested in
+the reader interface nor the sort interface. While the sort and find
+operations may be available, it is dead inline code that does not
+inflate the binary codes image size, but the libflatccrt library is
+linked in. The collecting server is not interested in the builder
+interface and does not link with the `libflatccrt` library but uses
+both the inline functions of the reader intrface and the sort interface.
+The upstream data storage service uses no interface at all since it
+treats the buffers as binary blobs in a database indexed by device and
+time. The end users only use the read only interface to visualize and
+analyze and has no need for the builder or the sort interface.
+
+
+## Special Emitters
+
+An emitter only need to implement one function to replace or wrap the
+default emitter. See [flatcc_builder.h] on `flatcc_builder_emit_fun` for
+details, and also `emit_test.c` for a very simple custom emitter that
+just prints debug messages, and [flatcc_emitter.h].
+
+When adding padding `flatcc_builder_padding_base` is used as base in iov
+entries and an emitter may detect this pointer and assume the entire
+content is just nulls. Usually padding is of limited size by its very
+nature so the benefit of handling this is also limited, but it, or a
+similar user provided constants can be used for similar purposes:
+
+When creating a vector in a single operation from an external C-array,
+no copying takes place on the internal builder stack. Therefore it is
+valid to provide a null pointer or a valid array such as
+`flatcc_builder_padding_base` that is is too small for the given length,
+provided that the emitter is aware of it. This in turn can be used to
+allocate space in the emitters internal datastructure so the vector can
+be filled after the fact if so desired. Pointer tagging may be another
+way to communicate special intent. Be aware that only `create` calls
+support this - any `append`, `start/end` or other dynamic operation will
+require valid inpout and will stack allocate temporary space.
+
+Emitters always receive a small table of iov entries that together form
+a single object including necessary headers and padding, for example a
+vector, a string, a nested buffer header, or a vtable. This is
+guaranteed by the api, but there is no coordination to provide details
+about which call is in order to keep the interface simple and fast. If
+this is desired the user must hint the emitter out of band before
+calling the relevant build operation. This can also be one indirectly by
+setting `user_state` in the emitter and have the emitter inspect this
+setting.
+
+When adding vectors piecemeal using `append` or similar as opposed to
+zero or less than zero copy approach above, the memory cost is obviously
+higher, but unless the individual objects grow large, the stack will
+operate in hot cpu cache so the bandwidth from main memory to cpu and
+back will not necessarily double. If the stack grows large it may also
+be worthwhile trimming the stack with a custom allocator and custom
+builder reset between buffers to reduce stack size and initialization
+overhead.
+
+[monster_test.c]: https://github.com/dvidelabs/flatcc/blob/master/test/monster_test/monster_test.c
+[flatcc_builder.h]: https://github.com/dvidelabs/flatcc/blob/master/include/flatcc/flatcc_builder.h
+[flatcc_emitter.h]: https://github.com/dvidelabs/flatcc/blob/master/include/flatcc/flatcc_emitter.h
+[monster_test.fbs]: https://github.com/dvidelabs/flatcc/blob/master/test/monster_test/monster_test.fbs
diff --git a/doc/eclectic.fbs b/doc/eclectic.fbs
new file mode 100644
index 0000000..26ecd85
--- /dev/null
+++ b/doc/eclectic.fbs
@@ -0,0 +1,12 @@
+ namespace Eclectic;
+
+ enum Fruit : byte { Banana = -1, Orange = 42 }
+ table FooBar {
+ meal : Fruit = Banana;
+ density : long (deprecated);
+ say : string;
+ height : short;
+ }
+ file_identifier "NOOB";
+ root_type FooBar;
+
diff --git a/doc/flatcc-help.md b/doc/flatcc-help.md
new file mode 100644
index 0000000..b9efc20
--- /dev/null
+++ b/doc/flatcc-help.md
@@ -0,0 +1,106 @@
+```
+flatcc FlatBuffers schema compiler for C by dvide.com
+version: 0.5.2-pre
+usage: flatcc [options] file [...]
+options:
+ --reader (default) Generate reader
+ -c, --common Generate common include header(s)
+ --common_reader Generate common reader include header(s)
+ --common_builder Generate common builder include header(s)
+ -w, --builder Generate builders (writable buffers)
+ -v, --verifier Generate verifier
+ -r, --recursive Recursively generate included schema files
+ -a Generate all (like -cwvr)
+ -g Use _get suffix only to avoid conflicts
+ -d Dependency file like gcc -MMD
+ -I<inpath> Search path for include files (multiple allowed)
+ -o<outpath> Write files relative to this path (dir must exist)
+ --stdout Concatenate all output to stdout
+ --outfile=<file> Like --stdout, but to a file.
+ --depfile=<file> Dependency file like gcc -MF.
+ --deptarget=<file> Override --depfile target like gcc -MT.
+ --prefix=<prefix> Add prefix to all generated names (no _ added)
+ --common-prefix=<prefix> Replace 'flatbuffers' prefix in common files
+ --schema Generate binary schema (.bfbs)
+ --schema-length=no Add length prefix to binary schema
+ --verifier Generate verifier for schema
+ --json-parser Generate json parser for schema
+ --json-printer Generate json printer for schema
+ --json Generate both json parser and printer for schema
+ --version Show version
+ -h | --help Help message
+
+This is a flatbuffer compatible compiler implemented in C generating C
+source. It is largely compatible with the flatc compiler provided by
+Google Fun Propulsion Lab but does not support JSON objects or binary
+schema.
+
+By example 'flatcc monster.fbs' generates a 'monster.h' file which
+provides functions to read a flatbuffer. A common include header is also
+required. The common file is generated with the -c option. The reader
+has no external dependencies.
+
+The -w (--builder) option enables code generation to build buffers:
+`flatbuffers -w monster.fbs` will generate `monster.h` and
+`monster_builder.h`, and also a builder specific common file with the
+-cw option. The builder must link with the extern `flatbuilder` library.
+
+-v (--verifier) generates a verifier file per schema. It depends on the
+runtime library but not on other generated files, except other included
+verifiers.
+
+-r (--recursive) generates all schema included recursively.
+
+--reader is the default option to generate reader output but can be used
+explicitly together with other options that would otherwise disable it.
+
+All C output can be concated to a single file using --stdout or
+--outfile with content produced in dependency order. The outfile is
+relative to cwd.
+
+-g Only add '_get' suffix to read accessors such that, for example,
+only 'Monster_name_get(monster)` will be generated and not also
+'Monster_name(monster)'. This avoids potential conflicts with
+other generated symbols when a schema change is impractical.
+
+-d generates a dependency file, e.g. 'monster.fbs.d' in the output dir.
+
+--depfile implies -d but accepts an explicit filename with a path
+relative to cwd. The dependency files content is a gnu make rule with a
+target followed by the included schema files The target must match how
+it is seen by the rest of the build system and defaults to e.g.
+'monster_reader.h' or 'monster.bfbs' paths relative to the working
+directory.
+
+--deptarget overrides the default target for --depfile, simiar to gcc -MT.
+
+--schema will generate a binary .bfbs file for each top-level schema file.
+Can be used with --stdout if no C output is specified. When used with multiple
+files --schema-length=yes is recommend.
+
+--schema-length adds a length prefix of type uoffset_t to binary schema so
+they can be concatenated - the aligned buffer starts after the prefix.
+
+--json-parser generates a file that implements a fast typed json parser for
+the schema. It depends on some flatcc headers and the runtime library but
+not on other generated files except other parsers from included schema.
+
+--json-printer generates a file that implements json printers for the schema
+and has dependencies similar to --json-parser.
+
+--json is generates both printer and parser.
+
+The generated source can redefine offset sizes by including a modified
+`flatcc_types.h` file. The flatbuilder library must then be compiled with the
+same `flatcc_types.h` file. In this case --prefix and --common-prefix options
+may be helpful to avoid conflict with standard offset sizes.
+
+The output size may seem bulky, but most content is rarely used inline
+functions and macros. The compiled binary need not be large.
+
+The generated source assumes C11 functionality for alignment, compile
+time assertions and inline functions but an optional set of portability
+headers can be included to work with most any compiler. The portability
+layer is not throughly tested so a platform specific test is required
+before production use. Upstream patches are welcome.
+```
diff --git a/doc/json_parser_design.md b/doc/json_parser_design.md
new file mode 100644
index 0000000..3d7cc6a
--- /dev/null
+++ b/doc/json_parser_design.md
@@ -0,0 +1,138 @@
+# JSON Parser Design
+
+The overall principle of the json parser is as follows:
+
+`flatcc/flatcc_json.h` contains functions to parse json primitives
+and type conversions, and a generic json parser to skip unrecognized
+fields.
+
+For each table all known fields are sorted and a trie is constructed.
+After recognizing a json object, each member name is read partially
+and matched against the trie, and more of the field name is read as
+demanded by the trie and a match or rejection is decided.
+
+The member name is read as a single 64 bit word read in big endian
+format (zero padded at the end of file). Big endian makes the first
+character in the name most significant and allows the word to contain
+trailing "garbage". The efficiency depends on unaligned 64-bit reads
+with a fast byteswapping operation, but there is a fall-back that
+works in all cases via an unaligned read support function.
+
+The trie is constructed by sorting all field names and taking a
+median split. If the split name has preceeding name which is a strict
+prefix, that name is chosen as split instead. This is important
+because words are read with trailing "garbage". If the median name is
+longer than a 64 bit word, it is treated specially where a match
+triggers reading more data and repeating the process.
+
+The median is tested for less than, but not equality. Each side if
+the choice uses a new median like above, until there is only one
+option left. This option is then tested for exact match by masking
+out any trailing data. If the match succeeds the input member name
+must be checked for closing `"` or other valid termination if
+allowing unquoted names.
+
+Note that this match only visits data once, unlike a hash table where
+a lookup still requires matching the name. Since we expect the number
+of field names per table to be reasonable, this approach is likely
+faster than a hash table.
+
+If the match fails, then an error can be generated with a "unknown
+field" error, or the field can be consumed by the generic json
+parser.
+
+When a member name is successfully matched againts a known field, the
+member value is expected to be of a primitive type such as a string
+or an integer, the value is parsed using a json support parser and
+type converter which may issue overflow and format errors. For
+example, integers won't accept decimal points and ubyte fields won't
+accept integers of value 256 or higher. If the parse was successful
+the member is added the current flatbuffer table opened at start of
+the json object. In fact, the value is parsed directly into the
+flatbuffer builders entry for the field.
+
+If a scalar field fails to parse its value, a second attempt is
+done parsing the value as a symbolic value. This parse is similar
+to parsing member names but uses a global set of known constants
+from enumerations. If this also fails, the field is invalid and
+an error is generated.
+
+When the field is parsed, comma is detected and the process is
+repeated. It may fail on duplicate fields because the builder will
+complain.
+
+Once a closing bracket is matched, the table is closed in the
+builder.
+
+In the above we discussed tables, but structs work the same with the
+exception that any fields not present are simply set to zero and it
+is not checked. Nor are duplicate fields checked. This is to avoid
+allocating extra space for something that isn't very important.
+
+Complex member types require more consideration. We wish to avoid a
+recursive descent parser because it imposes limits on nesting depth,
+but we want to have a composable parser. This leads to solutions such
+as a trampoline, returning a parser function for the matched field
+which is called by a driver function, before resuming the current
+parser. We wish to avoid this as it adds overhead, especially in
+inlining.
+
+To avoid trampolines we compile all tables and structs into one large
+function where each type can be reached by a goto label or a switch
+statement. We can then use a variant of Simon Tathams famous
+co-routine by duff device to abort and resume the current parse. We
+still need a stack to track return state.
+<http://www.chiark.greenend.org.uk/~sgtatham/coroutines.html>
+
+Outside the giant state machine we provide wrappers so we can parse
+each type from a clean call interface to parse root type.
+
+The above solution has one major problem: we apparently have to
+include types from included schema as well. But we don't have to it
+turns out: Each included schema is closed and can never have a member
+type in our current schema. Therefore mutual recursion is not
+possible. This means we can afford to use function call recursion
+into included parsers with recursion depth bounded by the number of
+included schema.
+
+Spaces are optimized by first testing to see if there is a character
+above space and exit immediately if so. Otherwise, if unaligned
+access is valid and the buffer holds at least 16 bytes, we descend
+hierarchically to test ever small unligned loads against a word of
+spaces. This loops as long as 2x64 bit words can be matched to a word
+of pure spaces (ASCII 0x20). If there is any control character, or
+if we reach near the end or, or if unaligned access is not available,
+we bail out to a standard character parsing loop that also does line
+counting.
+
+A final, and major issue, is how to handle unions:
+
+Googles `flatc` compiler makes the assumtion that the union type
+field appear before the union table field. However, this order cannot
+be trusted in standard JSON. If we were to sort the fields first
+or prescan for type, it would complete ruin our parsing strategy.
+
+To handle unions efficiently we therefore require either that union
+fields appear before the union table, and that the associated
+union table is present before another unions type field, or we
+require that the member name is tagged with its type. We prefer
+the tagged approach, but it is not compliant with `flatc v1.2`.
+
+By using a tag such as `"test_as_Monster"` instead of just "test",
+the parser can treat the field as any other table field. Of course
+there is only allowed to be one instance, so `"test_as_Weapon"`
+in the same object would not be allowed. To the parser it just
+triggers a duplicate field error.
+
+In addition, the `"test_type"` field is allowed to appear anywhere,
+or to be absent. If it is present, it must have a type that
+matches the tagged member, unless it is of type NONE. NONE may
+also be represented as `"test_as_NONE": null`.
+
+While the tagged approach has no use of the type field, it may still
+be very useful to other JSON consumers so they can know what tagged
+field to look up in the JSON object.
+
+The parser handles the type field by setting the type field in the
+flatcc builder table and checking if it is already set when
+seeing a tagged or untagged union table field.
diff --git a/doc/security.md b/doc/security.md
new file mode 100644
index 0000000..52e0515
--- /dev/null
+++ b/doc/security.md
@@ -0,0 +1,272 @@
+# Security Considerations
+
+This sections covers questions such as when is it safe to access a
+buffer without risking access violation, buffer overruns, or denial of
+service attacks, but cannot possibly cover all security aspects.
+
+## Reading Buffers
+
+When reading a buffer you have to know the schema type before
+reading and it is preferable to know the size of the buffer although not
+strictly required. If the type is not known, the `file_type`, aka buffer
+type, can be checked. This no guarantee due to collisions with other
+data formats and because the identifier field may be absent or
+misleading. The identifier therefore works best on buffers that can be
+trusted.
+
+If a buffer cannot be trusted, such as when receiving it over a public
+network, it may be the case that buffer type is known, but it is not
+known if someone uses an incorrect implementation of FlatBuffers, or if
+the buffer has somehow been corrupted in transit, or someone
+intentionally tampered with the buffer. In this case the buffer can be
+verified. A verification does not prove that the buffer has the correct
+type, but it does prove that it is safe to read (not write) from the
+buffer. The buffer size must be known in order to verify the buffer. If
+the buffer has a wrong type, but still (unlikey but possible) passes
+verification, then unexpected data may be read from the buffer, but it
+will not cause any crashes when using the API correctly.
+
+It is preferable to know the required alignment of a buffer which isn't
+trivially available unless retrieved from the builder when the buffer is
+created. The buffer alignment can be deduced from the schema.
+
+On many systems there is no real problem in accessing a buffer
+unaligned, but for systems where it matters, care must be taken because
+unaligned access can result in slow performance or access violations.
+Even on systems where alignment matters, a standard malloc operation is
+often sufficient because it normally aligns to the largest word that
+could cause access violations when unaligned. For special use case such
+as GPU memory access more alignment may be needed and FlatBuffers
+support higher alignments in the schema. Portable `aligned_alloc` and
+`aligned_free` support methods are available to help allocate memory with
+sufficient alignment. Because compile time flags can change between
+compilation of the runtime library and the application,
+`flatcc_builder_aligned_free` ensures a consistent deallocation method
+for aligned buffers allocated by the runtime library.
+
+A verifier for C requires the buffer to placed in aligned memory and it
+will fail if the buffer content is not properly aligned relative to the
+buffer or to an absolute memory address regardless of whether the
+current systems requires alignment or not. Therefore a buffer verified
+on one system is safe to use on all systems. One could use this fact to
+sign a buffer, but this is beyond the scope of FlatBuffers itself, and
+verifying a signature is likely much slower than re-verifying a buffer
+when a verifier is available.
+
+Note: it would be helpful if the verifier allowed verification only
+relative to the buffer start instead of requiring the absolute addresses
+to be aligned. This would allow verification of buffers before copying
+them out of unaligned locations in network buffers and also allow
+subsequent reading of such buffers without copying iff the system
+supports unaligned access. However, the verifier does not currently
+support this.
+
+It is not always safe to verify a buffer. A buffer can be constructed to
+trigger deep nesting. The FlatCC verifier has a hard coded non-exact
+limit of about 100 levels. This is to protection stack recursion. If the
+limit is exceeded, the verifier will safely fail. The limit can be
+changed with a compile time flag. If the limit is too permissive a
+system may run into stack overflow, but it is unlikely on most systems
+today. Typical application code may have similar recursive access
+functions. Therefore it is likely that recursion is safe if the verifier
+succeeds but it depends on the application.
+
+A buffer can point to the same data from multiple places. This is known
+as a DAG. The verifier rejects cycles that could lead to infinite loops
+during application traversal but does permit DAGs. For normal use DAGs
+are safe but it is possible to maliciously construct a buffer with a
+long vector where all elements points to a table that also has a vector
+of a similar nature. After a few levels, this can lead to a finite but
+exponentially large number of places to visit. The current FlatCC
+verifier does not protect against this but Googles flatc compiler has a
+verifier the limits the number of visited tables.
+
+When reading a buffer in C no memory allocation takes place after the
+buffer has initially been placed in memory. For example, strings can be
+read directly as C strings and strings are 0-terminated. A string might
+contain embedded 0 bytes which is not strictly correct but permitted.
+This will result in a shorter string if used naively as a C string
+without reading the string length via the API but it will still be safe.
+Other languages might have to allocate memory for objects such as
+strings though.
+
+A field can generally be absent. Scalar fields are always safe to
+access, even if they are absent, because they have a default value that
+will be returned. Tables, Vectors, Strings, and Structs may return null
+when a field is absent. This is perfectly valid but if the application
+does not check for null, this can lead to an access violation.
+
+A field can marked 'required' in the schema. If it is required, it will
+never return null unless the buffer is invalid. A verifier will detect
+this. On a practical note some FlatBuffer builders might not enforce the
+required field and readers do not always verify buffers before access
+(nor should they have to) - therefore an application is advised to check
+return values even on required fields unless the buffer is entirely
+trusted.
+
+If a buffer is verified, it is safe to access all vector elements up to
+its size. Access of elements via API calls do not necessarily check for
+out of bounds but some might.
+
+A buffer may also be encoded in big endian format. This is not standard,
+but FlatCC supports for systems that are primarily big endian. The
+buffer identifier will usually detect the difference because the
+identifier will be byte swapped. A reader therefore need to be aware of
+this possiblity, but most often this is not a concern since standard
+FlatBuffers are always little endian. The verifier will likely fail an
+unpexcted endian encoding but at least make it safe to access.
+
+
+## Thread Safety
+
+There is no thread safety on the FlatBuffers API but read access does
+not mutate any state. Every read location is a temporary variable so as
+long as the application code is otherwise sane, it is safe read a buffer
+from multiple threads and if the buffer is placed on cache line
+alignment (typically 64 or 128 bytes) it is also efficient without false
+sharing.
+
+A verifier is also safe to use because it it only reads from a buffer.
+
+A builder is inherently NOT safe for multihreaded access. However, with
+proper synchronization there is nothing preventing one thread from doing
+the grunt work and another putting the high level pieces together as
+long as only one thread at a time is access the builder object, or the
+associated allocator and emitter objects. From a performance perspective
+this doesn't make much sense, but it might from an architectural
+perspective.
+
+A builder object can be cleared and reused after a buffer is constructed
+or abandoned. The clear operation can optionally reduce the amount of
+memory or keep all the memory from the previous operation. In either
+case it is safe for new thread to use the builder after it is cleared
+but two threads cannot use the builder at the same time.
+
+It is fairly cheap to create a new builder object, but of course cheaper
+to reuse existing memory. Often the best option is for each thread to
+have its own builder and own memory and defer any sharing to the point
+where the buffer is finished and readable.
+
+
+## Schema Evolution
+
+Accessing a buffer that was created by a more recent of a FlatBuffers
+schema is safe iff the new version of the schema was created according
+the guidelines for schema evolution - notably no change of field
+identifiers or existing enum values and no removal or deprecation of
+required fields. Googles flatc tool can check if a new schema version is
+safe.
+
+Fields that are not required but deprecated in a new version will still
+be safe to access by old version but they will likely read default or
+null values and should be prepared for this.
+
+
+## Copying or Printing Buffer Content
+
+Even if it is safe to read a buffer, it is not safe to copy or even
+print a buffer because a DAG can unfold to consume much more output
+space than the given input. In data compression this is known as a Zip
+Bomb but even without malicious intent, users need to aware of the
+potential expansion. This is also a concern when printing JSON.
+
+A table also cannot be trivially copied based on memory content because
+it has offsets to other content. This is not an issue when using any
+official API but may be if a new buffer is attempted to be constructed
+by Frankenstein from parts of existing buffers.
+
+Nested buffers are not allowed to share any content with its parents,
+siblings or child buffers for similar reasons.
+
+The verifier should complain if a buffer goes out if bounds.
+
+
+## Modifying Buffers
+
+It is not safe to modify buffers in-place unless the buffer is
+absolutely trusted. Verifying a buffer is not enough. FlatC does not
+provide any means to modify a buffer in-place but it is not hard to
+achieve this is if so desired. It is especially easy to do this with
+structs, so if this is needed this is the way to do it.
+
+Modifying a buffer is unsafe because it is possible to place one table
+inside another table, as an example, even if this is not valid. Such
+overlaps are too expensive to verify by a standard verifier. As long as
+the buffer is not modified this does not pose any real problem, but if a
+field is modified in one table, it might cause a field of another table
+to point out of bounds. This is so obvious an attack vector that anyone
+wanting to hack a system is likely to use this approach. Therefore
+in-place modification should be avoided unless on a trusted platform.
+For example, a trusted network might bump a time-to-live counter when
+passing buffers around.
+
+Even if it is safe to modify buffers, this will not work on platforms
+that require endian conversion. This is usually big endian platforms,
+but it is possible to compile flatbuffers with native big endian format
+as well.
+platforms unless extra precautions are taken. FlatCC has a lot of
+low-level `to/from_pe` calls that performs the proper
+
+
+## Building Buffers
+
+A buffer can be constructed incorrectly in a large number of ways that
+are not efficient to detect at runtime.
+
+When a buffer is constructed with a debug library then assertions will
+sometimes find the most obvious problems such as closing a table after
+opening a vector. FlatCC is quite permissive in the order of object
+creation but the nesting order must be respected, and it cannot type
+check all data. Other FlatBuffer builders typically require that child
+objects are completed created before a parent object is started. FlatCC
+does not require this but will internally organize objects in a
+comptible way. This removes a number of potential mistakes, but not all.
+
+Notably a table from a parent object or any other external reference
+should not be used in a nested buffer.
+
+It is a good idea to run a verifier on a constructed buffer at least
+until some confidence has been gained in the code building buffers.
+
+If a buffer needs to be constructed with sorted keys this cannot be done
+during construction, unlike the C++ API because the builder allocates as
+little memory as possible. Instead the reader interface supports a
+mutable cast for use with a sort operation. This sort operation must
+only be used on absolutely trusted buffers and verification is not
+sufficient if malicous overlaps can be expected.
+
+The builder will normally consume very little memory. It operates a few
+stacks and a small hash table in additional to a circular buffer to
+consum temporary buffer output. It is not possible to access constructed
+buffer objects buffer the buffer is complete because data may span
+multiple buffers. Once a buffer is complete the content can be
+copied out, or a support function can be used allocate new memory with
+the final buffer as content.
+
+The internal memory can grow large when the buffer grows large,
+naturally. In addition the temporary stacks may grow large if there are
+are large tables or notably large vectors that cannot be be copied
+directly to the output buffers. This creates a potential for memory
+allocation errors, especially on constrained systems.
+
+The builder returns error codes, but it is tedious to check these. It is
+not necessary to check return codes if the API is used correctly and if
+there are no allocation errors. It is possible to provide a custom
+allocator and a custom emitter. These can detect memory failures early
+making it potentially safe to use the builder API without any per
+operation checks.
+
+The generated JSON parser checks all return codes and can be used to
+construct a buffer safely, especially since the buffer is naturally
+bounded by the size of the JSON input. JSON printing, on the other hand,
+can potentially explode, as discussed earlier.
+
+FlatCC generated create calls such as `MyGame_Example_Monster_create()`
+will not be compatible across versions if there are deprecated fields
+even if the schema change otherwise respects schema evolutation rules.
+This is mostly a concern if new fields are added because compilation
+will otherwise break on argument count mismatch. Prior to flatcc-0.5.3
+argument order could change if the field (id: x) attribute was used
+which could lead to buffers with unexpected content. JSON parsers that
+support constructors (objects given as an array of create arguments)
+have similar concerns but here trailing arguments can be optional.
diff --git a/external/grisu3/.gitignore b/external/grisu3/.gitignore
new file mode 100644
index 0000000..567609b
--- /dev/null
+++ b/external/grisu3/.gitignore
@@ -0,0 +1 @@
+build/
diff --git a/external/grisu3/LICENSE b/external/grisu3/LICENSE
new file mode 100644
index 0000000..bb7ca57
--- /dev/null
+++ b/external/grisu3/LICENSE
@@ -0,0 +1,14 @@
+Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+Some files also Copyright author of MathGeoLib (https://github.com/juj)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
diff --git a/external/grisu3/README.md b/external/grisu3/README.md
new file mode 100644
index 0000000..5f5c62e
--- /dev/null
+++ b/external/grisu3/README.md
@@ -0,0 +1,9 @@
+Implements the grisu3 floating point printing and parsing algorithm
+based on earlier work:
+
+- <http://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf>
+- <https://github.com/google/double-conversion>
+- <https://github.com/juj/MathGeoLib/blob/master/src/Math/grisu3.c>
+- <http://www.exploringbinary.com/quick-and-dirty-floating-point-to-decimal-conversion/>
+
+
diff --git a/external/grisu3/grisu3_math.h b/external/grisu3/grisu3_math.h
new file mode 100644
index 0000000..cff6e8c
--- /dev/null
+++ b/external/grisu3/grisu3_math.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ * Copyright author of MathGeoLib (https://github.com/juj)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/* 2016-02-02: Updated by mikkelfj
+ *
+ * Extracted from MatGeoLib grisu3.c, Apache 2.0 license, and extended.
+ *
+ * This file is usually include via grisu3_print.h or grisu3_parse.h.
+ *
+ * The original MatGeoLib dtoa_grisu3 implementation is largely
+ * unchanged except for the uint64 to double cast. The remaining changes
+ * are file structure, name changes, and new additions for parsing:
+ *
+ * - Split into header files only:
+ * grisu3_math.h, grisu3_print.h, (added grisu3_parse.h)
+ *
+ * - names prefixed with grisu3_, grisu3_diy_fp_, GRISU3_.
+ * - added static to all functions.
+ * - disabled clang unused function warnings.
+ * - guarded <stdint.h> to allow for alternative impl.
+ * - added extra numeric constants needed for parsing.
+ * - added dec_pow, cast_double_from_diy_fp.
+ * - changed some function names for consistency.
+ * - moved printing specific grisu3 functions to grisu3_print.h.
+ * - changed double to uint64 cast to avoid aliasing.
+ * - added new grisu3_parse.h for parsing doubles.
+ * - grisu3_print_double (dtoa_grisu3) format .1 as 0.1 needed for valid JSON output
+ * and grisu3_parse_double wouldn't consume it.
+ * - grsu3_print_double changed formatting to prefer 0.012 over 1.2e-2.
+ *
+ * These changes make it possible to include the files as headers only
+ * in other software libraries without risking name conflicts, and to
+ * extend the implementation with a port of Googles Double Conversion
+ * strtod functionality for parsing doubles.
+ *
+ * Extracted from: rev. 915501a / Dec 22, 2015
+ * <https://github.com/juj/MathGeoLib/blob/master/src/Math/grisu3.c>
+ * MathGeoLib License: http://www.apache.org/licenses/LICENSE-2.0.html
+ */
+
+#ifndef GRISU3_MATH_H
+#define GRISU3_MATH_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Guarded to allow inclusion of pstdint.h first, if stdint.h is not supported. */
+#ifndef UINT8_MAX
+#include <stdint.h> /* uint64_t etc. */
+#endif
+
+#ifdef GRISU3_NO_ASSERT
+#undef GRISU3_ASSERT
+#define GRISU3_ASSERT(x) ((void)0)
+#endif
+
+#ifndef GRISU3_ASSERT
+#include <assert.h> /* assert */
+#define GRISU3_ASSERT(x) assert(x)
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(disable : 4204) /* nonstandard extension used : non-constant aggregate initializer */
+#endif
+
+#define GRISU3_D64_SIGN 0x8000000000000000ULL
+#define GRISU3_D64_EXP_MASK 0x7FF0000000000000ULL
+#define GRISU3_D64_FRACT_MASK 0x000FFFFFFFFFFFFFULL
+#define GRISU3_D64_IMPLICIT_ONE 0x0010000000000000ULL
+#define GRISU3_D64_EXP_POS 52
+#define GRISU3_D64_EXP_BIAS 1075
+#define GRISU3_D64_DENORM_EXP (-GRISU3_D64_EXP_BIAS + 1)
+#define GRISU3_DIY_FP_FRACT_SIZE 64
+#define GRISU3_D_1_LOG2_10 0.30102999566398114 /* 1 / lg(10) */
+#define GRISU3_MIN_TARGET_EXP -60
+#define GRISU3_MASK32 0xFFFFFFFFULL
+#define GRISU3_MIN_CACHED_EXP -348
+#define GRISU3_MAX_CACHED_EXP 340
+#define GRISU3_CACHED_EXP_STEP 8
+#define GRISU3_D64_MAX_DEC_EXP 309
+#define GRISU3_D64_MIN_DEC_EXP -324
+#define GRISU3_D64_INF GRISU3_D64_EXP_MASK
+
+#define GRISU3_MIN(x,y) ((x) <= (y) ? (x) : (y))
+#define GRISU3_MAX(x,y) ((x) >= (y) ? (x) : (y))
+
+
+typedef struct grisu3_diy_fp
+{
+ uint64_t f;
+ int e;
+} grisu3_diy_fp_t;
+
+typedef struct grisu3_diy_fp_power
+{
+ uint64_t fract;
+ int16_t b_exp, d_exp;
+} grisu3_diy_fp_power_t;
+
+typedef union {
+ uint64_t u64;
+ double d64;
+} grisu3_cast_double_t;
+
+static uint64_t grisu3_cast_uint64_from_double(double d)
+{
+ grisu3_cast_double_t cd;
+ cd.d64 = d;
+ return cd.u64;
+}
+
+static double grisu3_cast_double_from_uint64(uint64_t u)
+{
+ grisu3_cast_double_t cd;
+ cd.u64 = u;
+ return cd.d64;
+}
+
+#define grisu3_double_infinity grisu3_cast_double_from_uint64(GRISU3_D64_INF)
+#define grisu3_double_nan grisu3_cast_double_from_uint64(GRISU3_D64_INF + 1)
+
+static const grisu3_diy_fp_power_t grisu3_diy_fp_pow_cache[] =
+{
+ { 0xfa8fd5a0081c0288ULL, -1220, -348 },
+ { 0xbaaee17fa23ebf76ULL, -1193, -340 },
+ { 0x8b16fb203055ac76ULL, -1166, -332 },
+ { 0xcf42894a5dce35eaULL, -1140, -324 },
+ { 0x9a6bb0aa55653b2dULL, -1113, -316 },
+ { 0xe61acf033d1a45dfULL, -1087, -308 },
+ { 0xab70fe17c79ac6caULL, -1060, -300 },
+ { 0xff77b1fcbebcdc4fULL, -1034, -292 },
+ { 0xbe5691ef416bd60cULL, -1007, -284 },
+ { 0x8dd01fad907ffc3cULL, -980, -276 },
+ { 0xd3515c2831559a83ULL, -954, -268 },
+ { 0x9d71ac8fada6c9b5ULL, -927, -260 },
+ { 0xea9c227723ee8bcbULL, -901, -252 },
+ { 0xaecc49914078536dULL, -874, -244 },
+ { 0x823c12795db6ce57ULL, -847, -236 },
+ { 0xc21094364dfb5637ULL, -821, -228 },
+ { 0x9096ea6f3848984fULL, -794, -220 },
+ { 0xd77485cb25823ac7ULL, -768, -212 },
+ { 0xa086cfcd97bf97f4ULL, -741, -204 },
+ { 0xef340a98172aace5ULL, -715, -196 },
+ { 0xb23867fb2a35b28eULL, -688, -188 },
+ { 0x84c8d4dfd2c63f3bULL, -661, -180 },
+ { 0xc5dd44271ad3cdbaULL, -635, -172 },
+ { 0x936b9fcebb25c996ULL, -608, -164 },
+ { 0xdbac6c247d62a584ULL, -582, -156 },
+ { 0xa3ab66580d5fdaf6ULL, -555, -148 },
+ { 0xf3e2f893dec3f126ULL, -529, -140 },
+ { 0xb5b5ada8aaff80b8ULL, -502, -132 },
+ { 0x87625f056c7c4a8bULL, -475, -124 },
+ { 0xc9bcff6034c13053ULL, -449, -116 },
+ { 0x964e858c91ba2655ULL, -422, -108 },
+ { 0xdff9772470297ebdULL, -396, -100 },
+ { 0xa6dfbd9fb8e5b88fULL, -369, -92 },
+ { 0xf8a95fcf88747d94ULL, -343, -84 },
+ { 0xb94470938fa89bcfULL, -316, -76 },
+ { 0x8a08f0f8bf0f156bULL, -289, -68 },
+ { 0xcdb02555653131b6ULL, -263, -60 },
+ { 0x993fe2c6d07b7facULL, -236, -52 },
+ { 0xe45c10c42a2b3b06ULL, -210, -44 },
+ { 0xaa242499697392d3ULL, -183, -36 },
+ { 0xfd87b5f28300ca0eULL, -157, -28 },
+ { 0xbce5086492111aebULL, -130, -20 },
+ { 0x8cbccc096f5088ccULL, -103, -12 },
+ { 0xd1b71758e219652cULL, -77, -4 },
+ { 0x9c40000000000000ULL, -50, 4 },
+ { 0xe8d4a51000000000ULL, -24, 12 },
+ { 0xad78ebc5ac620000ULL, 3, 20 },
+ { 0x813f3978f8940984ULL, 30, 28 },
+ { 0xc097ce7bc90715b3ULL, 56, 36 },
+ { 0x8f7e32ce7bea5c70ULL, 83, 44 },
+ { 0xd5d238a4abe98068ULL, 109, 52 },
+ { 0x9f4f2726179a2245ULL, 136, 60 },
+ { 0xed63a231d4c4fb27ULL, 162, 68 },
+ { 0xb0de65388cc8ada8ULL, 189, 76 },
+ { 0x83c7088e1aab65dbULL, 216, 84 },
+ { 0xc45d1df942711d9aULL, 242, 92 },
+ { 0x924d692ca61be758ULL, 269, 100 },
+ { 0xda01ee641a708deaULL, 295, 108 },
+ { 0xa26da3999aef774aULL, 322, 116 },
+ { 0xf209787bb47d6b85ULL, 348, 124 },
+ { 0xb454e4a179dd1877ULL, 375, 132 },
+ { 0x865b86925b9bc5c2ULL, 402, 140 },
+ { 0xc83553c5c8965d3dULL, 428, 148 },
+ { 0x952ab45cfa97a0b3ULL, 455, 156 },
+ { 0xde469fbd99a05fe3ULL, 481, 164 },
+ { 0xa59bc234db398c25ULL, 508, 172 },
+ { 0xf6c69a72a3989f5cULL, 534, 180 },
+ { 0xb7dcbf5354e9beceULL, 561, 188 },
+ { 0x88fcf317f22241e2ULL, 588, 196 },
+ { 0xcc20ce9bd35c78a5ULL, 614, 204 },
+ { 0x98165af37b2153dfULL, 641, 212 },
+ { 0xe2a0b5dc971f303aULL, 667, 220 },
+ { 0xa8d9d1535ce3b396ULL, 694, 228 },
+ { 0xfb9b7cd9a4a7443cULL, 720, 236 },
+ { 0xbb764c4ca7a44410ULL, 747, 244 },
+ { 0x8bab8eefb6409c1aULL, 774, 252 },
+ { 0xd01fef10a657842cULL, 800, 260 },
+ { 0x9b10a4e5e9913129ULL, 827, 268 },
+ { 0xe7109bfba19c0c9dULL, 853, 276 },
+ { 0xac2820d9623bf429ULL, 880, 284 },
+ { 0x80444b5e7aa7cf85ULL, 907, 292 },
+ { 0xbf21e44003acdd2dULL, 933, 300 },
+ { 0x8e679c2f5e44ff8fULL, 960, 308 },
+ { 0xd433179d9c8cb841ULL, 986, 316 },
+ { 0x9e19db92b4e31ba9ULL, 1013, 324 },
+ { 0xeb96bf6ebadf77d9ULL, 1039, 332 },
+ { 0xaf87023b9bf0ee6bULL, 1066, 340 }
+};
+
+/* Avoid dependence on lib math to get (int)ceil(v) */
+static int grisu3_iceil(double v)
+{
+ int k = (int)v;
+ if (v < 0) return k;
+ return v - k == 0 ? k : k + 1;
+}
+
+static int grisu3_diy_fp_cached_pow(int exp, grisu3_diy_fp_t *p)
+{
+ int k = grisu3_iceil((exp+GRISU3_DIY_FP_FRACT_SIZE-1) * GRISU3_D_1_LOG2_10);
+ int i = (k-GRISU3_MIN_CACHED_EXP-1) / GRISU3_CACHED_EXP_STEP + 1;
+ p->f = grisu3_diy_fp_pow_cache[i].fract;
+ p->e = grisu3_diy_fp_pow_cache[i].b_exp;
+ return grisu3_diy_fp_pow_cache[i].d_exp;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_minus(grisu3_diy_fp_t x, grisu3_diy_fp_t y)
+{
+ grisu3_diy_fp_t d; d.f = x.f - y.f; d.e = x.e;
+ GRISU3_ASSERT(x.e == y.e && x.f >= y.f);
+ return d;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_multiply(grisu3_diy_fp_t x, grisu3_diy_fp_t y)
+{
+ uint64_t a, b, c, d, ac, bc, ad, bd, tmp;
+ grisu3_diy_fp_t r;
+ a = x.f >> 32; b = x.f & GRISU3_MASK32;
+ c = y.f >> 32; d = y.f & GRISU3_MASK32;
+ ac = a*c; bc = b*c;
+ ad = a*d; bd = b*d;
+ tmp = (bd >> 32) + (ad & GRISU3_MASK32) + (bc & GRISU3_MASK32);
+ tmp += 1U << 31; /* round */
+ r.f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+ r.e = x.e + y.e + 64;
+ return r;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_normalize(grisu3_diy_fp_t n)
+{
+ GRISU3_ASSERT(n.f != 0);
+ while(!(n.f & 0xFFC0000000000000ULL)) { n.f <<= 10; n.e -= 10; }
+ while(!(n.f & GRISU3_D64_SIGN)) { n.f <<= 1; --n.e; }
+ return n;
+}
+
+static grisu3_diy_fp_t grisu3_cast_diy_fp_from_double(double d)
+{
+ grisu3_diy_fp_t fp;
+ uint64_t u64 = grisu3_cast_uint64_from_double(d);
+ if (!(u64 & GRISU3_D64_EXP_MASK)) { fp.f = u64 & GRISU3_D64_FRACT_MASK; fp.e = 1 - GRISU3_D64_EXP_BIAS; }
+ else { fp.f = (u64 & GRISU3_D64_FRACT_MASK) + GRISU3_D64_IMPLICIT_ONE; fp.e = (int)((u64 & GRISU3_D64_EXP_MASK) >> GRISU3_D64_EXP_POS) - GRISU3_D64_EXP_BIAS; }
+ return fp;
+}
+
+static double grisu3_cast_double_from_diy_fp(grisu3_diy_fp_t n)
+{
+ const uint64_t hidden_bit = GRISU3_D64_IMPLICIT_ONE;
+ const uint64_t frac_mask = GRISU3_D64_FRACT_MASK;
+ const int denorm_exp = GRISU3_D64_DENORM_EXP;
+ const int exp_bias = GRISU3_D64_EXP_BIAS;
+ const int exp_pos = GRISU3_D64_EXP_POS;
+
+ grisu3_diy_fp_t v = n;
+ uint64_t e_biased;
+
+ while (v.f > hidden_bit + frac_mask) {
+ v.f >>= 1;
+ ++v.e;
+ }
+ if (v.e < denorm_exp) {
+ return 0.0;
+ }
+ while (v.e > denorm_exp && (v.f & hidden_bit) == 0) {
+ v.f <<= 1;
+ --v.e;
+ }
+ if (v.e == denorm_exp && (v.f & hidden_bit) == 0) {
+ e_biased = 0;
+ } else {
+ e_biased = (uint64_t)(v.e + exp_bias);
+ }
+ return grisu3_cast_double_from_uint64((v.f & frac_mask) | (e_biased << exp_pos));
+}
+
+/* pow10_cache[i] = 10^(i-1) */
+static const unsigned int grisu3_pow10_cache[] = { 0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
+
+static int grisu3_largest_pow10(uint32_t n, int n_bits, uint32_t *power)
+{
+ int guess = ((n_bits + 1) * 1233 >> 12) + 1/*skip first entry*/;
+ if (n < grisu3_pow10_cache[guess]) --guess; /* We don't have any guarantees that 2^n_bits <= n. */
+ *power = grisu3_pow10_cache[guess];
+ return guess;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_MATH_H */
diff --git a/external/grisu3/grisu3_parse.h b/external/grisu3/grisu3_parse.h
new file mode 100644
index 0000000..3d67c9a
--- /dev/null
+++ b/external/grisu3/grisu3_parse.h
@@ -0,0 +1,582 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/*
+ * Port of parts of Google Double Conversion strtod functionality
+ * but with fallback to strtod instead of a bignum implementation.
+ *
+ * Based on grisu3 math from MathGeoLib.
+ *
+ * See also grisu3_math.h comments.
+ */
+
+#ifndef GRISU3_PARSE_H
+#define GRISU3_PARSE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include <stdlib.h>
+#include <limits.h>
+
+#include "grisu3_math.h"
+
+
+/*
+ * The maximum number characters a valid number may contain. The parse
+ * fails if the input length is longer but the character after max len
+ * was part of the number.
+ *
+ * The length should not be set too high because it protects against
+ * overflow in the exponent part derived from the input length.
+ */
+#define GRISU3_NUM_MAX_LEN 1000
+
+/*
+ * The lightweight "portable" C library recognizes grisu3 support if
+ * included first.
+ */
+#define grisu3_parse_double_is_defined 1
+
+/*
+ * Disable to compare performance and to test diy_fp algorithm in
+ * broader range.
+ */
+#define GRISU3_PARSE_FAST_CASE
+
+/* May result in a one off error, otherwise when uncertain, fall back to strtod. */
+//#define GRISU3_PARSE_ALLOW_ERROR
+
+
+/*
+ * The dec output exponent jumps in 8, so the result is offset at most
+ * by 7 when the input is within range.
+ */
+static int grisu3_diy_fp_cached_dec_pow(int d_exp, grisu3_diy_fp_t *p)
+{
+ const int cached_offset = -GRISU3_MIN_CACHED_EXP;
+ const int d_exp_dist = GRISU3_CACHED_EXP_STEP;
+ int i, a_exp;
+
+ GRISU3_ASSERT(GRISU3_MIN_CACHED_EXP <= d_exp);
+ GRISU3_ASSERT(d_exp < GRISU3_MAX_CACHED_EXP + d_exp_dist);
+
+ i = (d_exp + cached_offset) / d_exp_dist;
+ a_exp = grisu3_diy_fp_pow_cache[i].d_exp;
+ p->f = grisu3_diy_fp_pow_cache[i].fract;
+ p->e = grisu3_diy_fp_pow_cache[i].b_exp;
+
+ GRISU3_ASSERT(a_exp <= d_exp);
+ GRISU3_ASSERT(d_exp < a_exp + d_exp_dist);
+
+ return a_exp;
+}
+
+/*
+ * Ported from google double conversion strtod using
+ * MathGeoLibs diy_fp functions for grisu3 in C.
+ *
+ * ulp_half_error is set if needed to trunacted non-zero trialing
+ * characters.
+ *
+ * The actual value we need to encode is:
+ *
+ * (sign ? -1 : 1) * fraction * 2 ^ (exponent - fraction_exp)
+ * where exponent is the base 10 exponent assuming the decimal point is
+ * after the first digit. fraction_exp is the base 10 magnitude of the
+ * fraction or number of significant digits - 1.
+ *
+ * If the exponent is between 0 and 22 and the fraction is encoded in
+ * the lower 53 bits (the largest bit is implicit in a double, but not
+ * in this fraction), then the value can be trivially converted to
+ * double without loss of precision. If the fraction was in fact
+ * multiplied by trailing zeroes that we didn't convert to exponent,
+ * we there are larger values the 53 bits that can also be encoded
+ * trivially - but then it is better to handle this during parsing
+ * if it is worthwhile. We do not optimize for this here, because it
+ * can be done in a simple check before calling, and because it might
+ * not be worthwile to do at all since it cery likely will fail for
+ * numbers printed to be convertible back to double without loss.
+ *
+ * Returns 0 if conversion was not exact. In that case the vale is
+ * either one smaller than the correct one, or the correct one.
+ *
+ * Exponents must be range protected before calling otherwise cached
+ * powers will blow up.
+ *
+ * Google Double Conversion seems to prefer the following notion:
+ *
+ * x >= 10^309 => +Inf
+ * x <= 10^-324 => 0,
+ *
+ * max double: HUGE_VAL = 1.7976931348623157 * 10^308
+ * min double: 4.9406564584124654 * 10^-324
+ *
+ * Values just below or above min/max representable number
+ * may round towards large/small non-Inf/non-neg values.
+ *
+ * but `strtod` seems to return +/-HUGE_VAL on overflow?
+ */
+static int grisu3_diy_fp_encode_double(uint64_t fraction, int exponent, int fraction_exp, int ulp_half_error, double *result)
+{
+ /*
+ * Error is measures in fractions of integers, so we scale up to get
+ * some resolution to represent error expressions.
+ */
+ const int log2_error_one = 3;
+ const int error_one = 1 << log2_error_one;
+ const int denorm_exp = GRISU3_D64_DENORM_EXP;
+ const uint64_t hidden_bit = GRISU3_D64_IMPLICIT_ONE;
+ const int diy_size = GRISU3_DIY_FP_FRACT_SIZE;
+ const int max_digits = 19;
+
+ int error = ulp_half_error ? error_one / 2 : 0;
+ int d_exp = (exponent - fraction_exp);
+ int a_exp;
+ int o_exp;
+ grisu3_diy_fp_t v = { fraction, 0 };
+ grisu3_diy_fp_t cp;
+ grisu3_diy_fp_t rounded;
+ int mag;
+ int prec;
+ int prec_bits;
+ int half_way;
+
+ /* When fractions in a double aren't stored with implicit msb fraction bit. */
+
+ /* Shift fraction to msb. */
+ v = grisu3_diy_fp_normalize(v);
+ /* The half point error moves up while the exponent moves down. */
+ error <<= -v.e;
+
+ a_exp = grisu3_diy_fp_cached_dec_pow(d_exp, &cp);
+
+ /* Interpolate between cached powers at distance 8. */
+ if (a_exp != d_exp) {
+ int adj_exp = d_exp - a_exp - 1;
+ static grisu3_diy_fp_t cp_10_lut[] = {
+ { 0xa000000000000000ULL, -60 },
+ { 0xc800000000000000ULL, -57 },
+ { 0xfa00000000000000ULL, -54 },
+ { 0x9c40000000000000ULL, -50 },
+ { 0xc350000000000000ULL, -47 },
+ { 0xf424000000000000ULL, -44 },
+ { 0x9896800000000000ULL, -40 },
+ };
+ GRISU3_ASSERT(adj_exp >= 0 && adj_exp < 7);
+ v = grisu3_diy_fp_multiply(v, cp_10_lut[adj_exp]);
+
+ /* 20 decimal digits won't always fit in 64 bit.
+ * (`fraction_exp` is one less than significant decimal
+ * digits in fraction, e.g. 1 * 10e0).
+ * If we cannot fit, introduce 1/2 ulp error
+ * (says double conversion reference impl.) */
+ if (1 + fraction_exp + adj_exp > max_digits) {
+ error += error_one / 2;
+ }
+ }
+
+ v = grisu3_diy_fp_multiply(v, cp);
+ /*
+ * Google double conversion claims that:
+ *
+ * The error introduced by a multiplication of a*b equals
+ * error_a + error_b + error_a*error_b/2^64 + 0.5
+ * Substituting a with 'input' and b with 'cached_power' we have
+ * error_b = 0.5 (all cached powers have an error of less than 0.5 ulp),
+ * error_ab = 0 or 1 / error_oner > error_a*error_b/ 2^64
+ *
+ * which in our encoding becomes:
+ * error_a = error_one/2
+ * error_ab = 1 / error_one (rounds up to 1 if error != 0, or 0 * otherwise)
+ * fixed_error = error_one/2
+ *
+ * error += error_a + fixed_error + (error ? 1 : 0)
+ *
+ * (this isn't entirely clear, but that is as close as we get).
+ */
+ error += error_one + (error ? 1 : 0);
+
+ o_exp = v.e;
+ v = grisu3_diy_fp_normalize(v);
+ /* Again, if we shift the significant bits, the error moves along. */
+ error <<= o_exp - v.e;
+
+ /*
+ * The value `v` is bounded by 2^mag which is 64 + v.e. because we
+ * just normalized it by shifting towards msb.
+ */
+ mag = diy_size + v.e;
+
+ /* The effective magnitude of the IEEE double representation. */
+ mag = mag >= diy_size + denorm_exp ? diy_size : mag <= denorm_exp ? 0 : mag - denorm_exp;
+ prec = diy_size - mag;
+ if (prec + log2_error_one >= diy_size) {
+ int e_scale = prec + log2_error_one - diy_size - 1;
+ v.f >>= e_scale;
+ v.e += e_scale;
+ error = (error >> e_scale) + 1 + error_one;
+ prec -= e_scale;
+ }
+ rounded.f = v.f >> prec;
+ rounded.e = v.e + prec;
+ prec_bits = (int)(v.f & ((uint64_t)1 << (prec - 1))) * error_one;
+ half_way = (int)((uint64_t)1 << (prec - 1)) * error_one;
+ if (prec >= half_way + error) {
+ rounded.f++;
+ /* Prevent overflow. */
+ if (rounded.f & (hidden_bit << 1)) {
+ rounded.f >>= 1;
+ rounded.e += 1;
+ }
+ }
+ *result = grisu3_cast_double_from_diy_fp(rounded);
+ return half_way - error >= prec_bits || prec_bits >= half_way + error;
+}
+
+/*
+ * `end` is unchanged if number is handled natively, or it is the result
+ * of strtod parsing in case of fallback.
+ */
+static const char *grisu3_encode_double(const char *buf, const char *end, int sign, uint64_t fraction, int exponent, int fraction_exp, int ulp_half_error, double *result)
+{
+ const int max_d_exp = GRISU3_D64_MAX_DEC_EXP;
+ const int min_d_exp = GRISU3_D64_MIN_DEC_EXP;
+
+ char *v_end;
+
+ /* Both for user experience, and to protect internal power table lookups. */
+ if (fraction == 0 || exponent < min_d_exp) {
+ *result = 0.0;
+ goto done;
+ }
+ if (exponent - 1 > max_d_exp) {
+ *result = grisu3_double_infinity;
+ goto done;
+ }
+
+ /*
+ * `exponent` is the normalized value, fraction_exp is the size of
+ * the representation in the `fraction value`, or one less than
+ * number of significant digits.
+ *
+ * If the final value can be kept in 53 bits and we can avoid
+ * division, then we can convert to double quite fast.
+ *
+ * ulf_half_error only happens when fraction is maxed out, so
+ * fraction_exp > 22 by definition.
+ *
+ * fraction_exp >= 0 always.
+ *
+ * http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
+ */
+
+
+#ifdef GRISU3_PARSE_FAST_CASE
+ if (fraction < (1ULL << 53) && exponent >= 0 && exponent <= 22) {
+ double v = (double)fraction;
+ /* Multiplying by 1e-k instead of dividing by 1ek results in rounding error. */
+ switch (exponent - fraction_exp) {
+ case -22: v /= 1e22; break;
+ case -21: v /= 1e21; break;
+ case -20: v /= 1e20; break;
+ case -19: v /= 1e19; break;
+ case -18: v /= 1e18; break;
+ case -17: v /= 1e17; break;
+ case -16: v /= 1e16; break;
+ case -15: v /= 1e15; break;
+ case -14: v /= 1e14; break;
+ case -13: v /= 1e13; break;
+ case -12: v /= 1e12; break;
+ case -11: v /= 1e11; break;
+ case -10: v /= 1e10; break;
+ case -9: v /= 1e9; break;
+ case -8: v /= 1e8; break;
+ case -7: v /= 1e7; break;
+ case -6: v /= 1e6; break;
+ case -5: v /= 1e5; break;
+ case -4: v /= 1e4; break;
+ case -3: v /= 1e3; break;
+ case -2: v /= 1e2; break;
+ case -1: v /= 1e1; break;
+ case 0: break;
+ case 1: v *= 1e1; break;
+ case 2: v *= 1e2; break;
+ case 3: v *= 1e3; break;
+ case 4: v *= 1e4; break;
+ case 5: v *= 1e5; break;
+ case 6: v *= 1e6; break;
+ case 7: v *= 1e7; break;
+ case 8: v *= 1e8; break;
+ case 9: v *= 1e9; break;
+ case 10: v *= 1e10; break;
+ case 11: v *= 1e11; break;
+ case 12: v *= 1e12; break;
+ case 13: v *= 1e13; break;
+ case 14: v *= 1e14; break;
+ case 15: v *= 1e15; break;
+ case 16: v *= 1e16; break;
+ case 17: v *= 1e17; break;
+ case 18: v *= 1e18; break;
+ case 19: v *= 1e19; break;
+ case 20: v *= 1e20; break;
+ case 21: v *= 1e21; break;
+ case 22: v *= 1e22; break;
+ }
+ *result = v;
+ goto done;
+ }
+#endif
+
+ if (grisu3_diy_fp_encode_double(fraction, exponent, fraction_exp, ulp_half_error, result)) {
+ goto done;
+ }
+#ifdef GRISU3_PARSE_ALLOW_ERROR
+ goto done;
+#endif
+ *result = strtod(buf, &v_end);
+ if (v_end < end) {
+ return v_end;
+ }
+ return end;
+done:
+ if (sign) {
+ *result = -*result;
+ }
+ return end;
+}
+
+/*
+ * Returns buf if number wasn't matched, or null if number starts ok
+ * but contains invalid content.
+ */
+static const char *grisu3_parse_hex_fp(const char *buf, const char *end, int sign, double *result)
+{
+ (void)buf;
+ (void)end;
+ (void)sign;
+ *result = 0.0;
+ /* Not currently supported. */
+ return buf;
+}
+
+/*
+ * Returns end pointer on success, or null, or buf if start is not a number.
+ * Sets result to 0.0 on error.
+ * Reads up to len + 1 bytes from buffer where len + 1 must not be a
+ * valid part of a number, but all of buf, buf + len need not be a
+ * number. Leading whitespace is NOT valid.
+ * Very small numbers are truncated to +/-0.0 and numerically very large
+ * numbers are returns as +/-infinity.
+ *
+ * A value must not end or begin with '.' (like JSON), but can have
+ * leading zeroes (unlike JSON). A single leading zero followed by
+ * an encoding symbol may or may not be interpreted as a non-decimal
+ * encoding prefix, e.g. 0x, but a leading zero followed by a digit is
+ * NOT interpreted as octal.
+ * A single leading negative sign may appear before digits, but positive
+ * sign is not allowed and space after the sign is not allowed.
+ * At most the first 1000 characters of the input is considered.
+ */
+static const char *grisu3_parse_double(const char *buf, size_t len, double *result)
+{
+ const char *mark, *k, *end;
+ int sign = 0, esign = 0;
+ uint64_t fraction = 0;
+ int exponent = 0;
+ int ee = 0;
+ int fraction_exp = 0;
+ int ulp_half_error = 0;
+
+ *result = 0.0;
+
+ end = buf + len + 1;
+
+ /* Failsafe for exponent overflow. */
+ if (len > GRISU3_NUM_MAX_LEN) {
+ end = buf + GRISU3_NUM_MAX_LEN + 1;
+ }
+
+ if (buf == end) {
+ return buf;
+ }
+ mark = buf;
+ if (*buf == '-') {
+ ++buf;
+ sign = 1;
+ if (buf == end) {
+ return 0;
+ }
+ }
+ if (*buf == '0') {
+ ++buf;
+ /* | 0x20 is lower case ASCII. */
+ if (buf != end && (*buf | 0x20) == 'x') {
+ k = grisu3_parse_hex_fp(buf, end, sign, result);
+ if (k == buf) {
+ return mark;
+ }
+ return k;
+ }
+ /* Not worthwhile, except for getting the scale of integer part. */
+ while (buf != end && *buf == '0') {
+ ++buf;
+ }
+ } else {
+ if (*buf < '1' || *buf > '9') {
+ /*
+ * If we didn't see a sign, just don't recognize it as
+ * number, otherwise make it an error.
+ */
+ return sign ? 0 : mark;
+ }
+ fraction = (uint64_t)(*buf++ - '0');
+ }
+ k = buf;
+ /*
+ * We do not catch trailing zeroes when there is no decimal point.
+ * This misses an opportunity for moving the exponent down into the
+ * fast case. But it is unlikely to be worthwhile as it complicates
+ * parsing.
+ */
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ if (fraction >= UINT64_MAX / 10) {
+ fraction += *buf >= '5';
+ ulp_half_error = 1;
+ break;
+ }
+ fraction = fraction * 10 + (uint64_t)(*buf++ - '0');
+ }
+ fraction_exp = (int)(buf - k);
+ /* Skip surplus digits. Trailing zero does not introduce error. */
+ while (buf != end && *buf == '0') {
+ ++exponent;
+ ++buf;
+ }
+ if (buf != end && *buf >= '1' && *buf <= '9') {
+ ulp_half_error = 1;
+ ++exponent;
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++exponent;
+ ++buf;
+ }
+ }
+ if (buf != end && *buf == '.') {
+ ++buf;
+ k = buf;
+ if (*buf < '0' || *buf > '9') {
+ /* We don't accept numbers without leading or trailing digit. */
+ return 0;
+ }
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ if (fraction >= UINT64_MAX / 10) {
+ if (!ulp_half_error) {
+ fraction += *buf >= '5';
+ ulp_half_error = 1;
+ }
+ break;
+ }
+ fraction = fraction * 10 + (uint64_t)(*buf++ - '0');
+ --exponent;
+ }
+ fraction_exp += (int)(buf - k);
+ while (buf != end && *buf == '0') {
+ ++exponent;
+ ++buf;
+ }
+ if (buf != end && *buf >= '1' && *buf <= '9') {
+ ulp_half_error = 1;
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ }
+ /*
+ * Normalized exponent e.g: 1.23434e3 with fraction = 123434,
+ * fraction_exp = 5, exponent = 3.
+ * So value = fraction * 10^(exponent - fraction_exp)
+ */
+ exponent += fraction_exp;
+ if (buf != end && (*buf | 0x20) == 'e') {
+ if (end - buf < 2) {
+ return 0;
+ }
+ ++buf;
+ if (*buf == '+') {
+ ++buf;
+ if (buf == end) {
+ return 0;
+ }
+ } else if (*buf == '-') {
+ esign = 1;
+ ++buf;
+ if (buf == end) {
+ return 0;
+ }
+ }
+ if (*buf < '0' || *buf > '9') {
+ return 0;
+ }
+ ee = *buf++ - '0';
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ /*
+ * This test impacts performance and we do not need an
+ * exact value just one large enough to dominate the fraction_exp.
+ * Subsequent handling maps large absolute ee to 0 or infinity.
+ */
+ if (ee <= 0x7fff) {
+ ee = ee * 10 + *buf - '0';
+ }
+ ++buf;
+ }
+ }
+ exponent = exponent + (esign ? -ee : ee);
+
+ /*
+ * Exponent is now a base 10 normalized exponent so the absolute value
+ * is less the 10^(exponent + 1) for positive exponents. For
+ * denormalized doubles (using 11 bit exponent 0 with a fraction
+ * shiftet down, extra small numbers can be achieved.
+ *
+ * https://en.wikipedia.org/wiki/Double-precision_floating-point_format
+ *
+ * 10^-324 holds the smallest normalized exponent (but not value) and
+ * 10^308 holds the largest exponent. Internally our lookup table is
+ * only safe to use within a range slightly larger than this.
+ * Externally, a slightly larger/smaller value represents NaNs which
+ * are technically also possible to store as a number.
+ *
+ */
+
+ /* This also protects strod fallback parsing. */
+ if (buf == end) {
+ return 0;
+ }
+ return grisu3_encode_double(mark, buf, sign, fraction, exponent, fraction_exp, ulp_half_error, result);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_PARSE_H */
diff --git a/external/grisu3/grisu3_print.h b/external/grisu3/grisu3_print.h
new file mode 100644
index 0000000..d748408
--- /dev/null
+++ b/external/grisu3/grisu3_print.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ * Copyright author of MathGeoLib (https://github.com/juj)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/*
+ * Extracted from MathGeoLib.
+ *
+ * mikkelfj:
+ * - Fixed final output when printing single digit negative exponent to
+ * have leading zero (important for JSON).
+ * - Changed formatting to prefer 0.012 over 1.2-e-2.
+ *
+ * Large portions of the original grisu3.c file has been moved to
+ * grisu3_math.h, the rest is placed here.
+ *
+ * See also comments in grisu3_math.h.
+ *
+ * MatGeoLib grisu3.c comment:
+ *
+ * This file is part of an implementation of the "grisu3" double to string
+ * conversion algorithm described in the research paper
+ *
+ * "Printing Floating-Point Numbers Quickly And Accurately with Integers"
+ * by Florian Loitsch, available at
+ * http://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf
+ */
+
+#ifndef GRISU3_PRINT_H
+#define GRISU3_PRINT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h> /* sprintf, only needed for fallback printing */
+#include <assert.h> /* assert */
+
+#include "grisu3_math.h"
+
+/*
+ * The lightweight "portable" C library recognizes grisu3 support if
+ * included first.
+ */
+#define grisu3_print_double_is_defined 1
+
+/*
+ * Not sure we have an exact definition, but we get up to 23
+ * emperically. There is some math ensuring it does not go awol though,
+ * like 18 digits + exponent or so.
+ * This max should be safe size buffer for printing, including zero term.
+ */
+#define GRISU3_PRINT_MAX 30
+
+static int grisu3_round_weed(char *buffer, int len, uint64_t wp_W, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t ulp)
+{
+ uint64_t wp_Wup = wp_W - ulp;
+ uint64_t wp_Wdown = wp_W + ulp;
+ while(rest < wp_Wup && delta - rest >= ten_kappa
+ && (rest + ten_kappa < wp_Wup || wp_Wup - rest >= rest + ten_kappa - wp_Wup))
+ {
+ --buffer[len-1];
+ rest += ten_kappa;
+ }
+ if (rest < wp_Wdown && delta - rest >= ten_kappa
+ && (rest + ten_kappa < wp_Wdown || wp_Wdown - rest > rest + ten_kappa - wp_Wdown))
+ return 0;
+
+ return 2*ulp <= rest && rest <= delta - 4*ulp;
+}
+
+static int grisu3_digit_gen(grisu3_diy_fp_t low, grisu3_diy_fp_t w, grisu3_diy_fp_t high, char *buffer, int *length, int *kappa)
+{
+ uint64_t unit = 1;
+ grisu3_diy_fp_t too_low = { low.f - unit, low.e };
+ grisu3_diy_fp_t too_high = { high.f + unit, high.e };
+ grisu3_diy_fp_t unsafe_interval = grisu3_diy_fp_minus(too_high, too_low);
+ grisu3_diy_fp_t one = { 1ULL << -w.e, w.e };
+ uint32_t p1 = (uint32_t)(too_high.f >> -one.e);
+ uint64_t p2 = too_high.f & (one.f - 1);
+ uint32_t div;
+ *kappa = grisu3_largest_pow10(p1, GRISU3_DIY_FP_FRACT_SIZE + one.e, &div);
+ *length = 0;
+
+ while(*kappa > 0)
+ {
+ uint64_t rest;
+ char digit = (char)(p1 / div);
+ buffer[*length] = '0' + digit;
+ ++*length;
+ p1 %= div;
+ --*kappa;
+ rest = ((uint64_t)p1 << -one.e) + p2;
+ if (rest < unsafe_interval.f) return grisu3_round_weed(buffer, *length, grisu3_diy_fp_minus(too_high, w).f, unsafe_interval.f, rest, (uint64_t)div << -one.e, unit);
+ div /= 10;
+ }
+
+ for(;;)
+ {
+ char digit;
+ p2 *= 10;
+ unit *= 10;
+ unsafe_interval.f *= 10;
+ /* Integer division by one. */
+ digit = (char)(p2 >> -one.e);
+ buffer[*length] = '0' + digit;
+ ++*length;
+ p2 &= one.f - 1; /* Modulo by one. */
+ --*kappa;
+ if (p2 < unsafe_interval.f) return grisu3_round_weed(buffer, *length, grisu3_diy_fp_minus(too_high, w).f * unit, unsafe_interval.f, p2, one.f, unit);
+ }
+}
+
+static int grisu3(double v, char *buffer, int *length, int *d_exp)
+{
+ int mk, kappa, success;
+ grisu3_diy_fp_t dfp = grisu3_cast_diy_fp_from_double(v);
+ grisu3_diy_fp_t w = grisu3_diy_fp_normalize(dfp);
+
+ /* normalize boundaries */
+ grisu3_diy_fp_t t = { (dfp.f << 1) + 1, dfp.e - 1 };
+ grisu3_diy_fp_t b_plus = grisu3_diy_fp_normalize(t);
+ grisu3_diy_fp_t b_minus;
+ grisu3_diy_fp_t c_mk; /* Cached power of ten: 10^-k */
+ uint64_t u64 = grisu3_cast_uint64_from_double(v);
+ assert(v > 0 && v <= 1.7976931348623157e308); /* Grisu only handles strictly positive finite numbers. */
+ if (!(u64 & GRISU3_D64_FRACT_MASK) && (u64 & GRISU3_D64_EXP_MASK) != 0) { b_minus.f = (dfp.f << 2) - 1; b_minus.e = dfp.e - 2;} /* lower boundary is closer? */
+ else { b_minus.f = (dfp.f << 1) - 1; b_minus.e = dfp.e - 1; }
+ b_minus.f = b_minus.f << (b_minus.e - b_plus.e);
+ b_minus.e = b_plus.e;
+
+ mk = grisu3_diy_fp_cached_pow(GRISU3_MIN_TARGET_EXP - GRISU3_DIY_FP_FRACT_SIZE - w.e, &c_mk);
+
+ w = grisu3_diy_fp_multiply(w, c_mk);
+ b_minus = grisu3_diy_fp_multiply(b_minus, c_mk);
+ b_plus = grisu3_diy_fp_multiply(b_plus, c_mk);
+
+ success = grisu3_digit_gen(b_minus, w, b_plus, buffer, length, &kappa);
+ *d_exp = kappa - mk;
+ return success;
+}
+
+static int grisu3_i_to_str(int val, char *str)
+{
+ int len, i;
+ char *s;
+ char *begin = str;
+ if (val < 0) { *str++ = '-'; val = -val; }
+ s = str;
+
+ for(;;)
+ {
+ int ni = val / 10;
+ int digit = val - ni*10;
+ *s++ = (char)('0' + digit);
+ if (ni == 0)
+ break;
+ val = ni;
+ }
+ *s = '\0';
+ len = (int)(s - str);
+ for(i = 0; i < len/2; ++i)
+ {
+ char ch = str[i];
+ str[i] = str[len-1-i];
+ str[len-1-i] = ch;
+ }
+
+ return (int)(s - begin);
+}
+
+static int grisu3_print_nan(uint64_t v, char *dst)
+{
+ static char hexdigits[16] = "0123456789ABCDEF";
+ int i = 0;
+
+ dst[0] = 'N';
+ dst[1] = 'a';
+ dst[2] = 'N';
+ dst[3] = '(';
+ dst[20] = ')';
+ dst[21] = '\0';
+ dst += 4;
+ for (i = 15; i >= 0; --i) {
+ dst[i] = hexdigits[v & 0x0F];
+ v >>= 4;
+ }
+ return 21;
+}
+
+static int grisu3_print_double(double v, char *dst)
+{
+ int d_exp, len, success, decimals, i;
+ uint64_t u64 = grisu3_cast_uint64_from_double(v);
+ char *s2 = dst;
+ assert(dst);
+
+ /* Prehandle NaNs */
+ if ((u64 << 1) > 0xFFE0000000000000ULL) return grisu3_print_nan(u64, dst);
+ /* Prehandle negative values. */
+ if ((u64 & GRISU3_D64_SIGN) != 0) { *s2++ = '-'; v = -v; u64 ^= GRISU3_D64_SIGN; }
+ /* Prehandle zero. */
+ if (!u64) { *s2++ = '0'; *s2 = '\0'; return (int)(s2 - dst); }
+ /* Prehandle infinity. */
+ if (u64 == GRISU3_D64_EXP_MASK) { *s2++ = 'i'; *s2++ = 'n'; *s2++ = 'f'; *s2 = '\0'; return (int)(s2 - dst); }
+
+ success = grisu3(v, s2, &len, &d_exp);
+ /* If grisu3 was not able to convert the number to a string, then use old sprintf (suboptimal). */
+ if (!success) return sprintf(s2, "%.17g", v) + (int)(s2 - dst);
+
+ /* We now have an integer string of form "151324135" and a base-10 exponent for that number. */
+ /* Next, decide the best presentation for that string by whether to use a decimal point, or the scientific exponent notation 'e'. */
+ /* We don't pick the absolute shortest representation, but pick a balance between readability and shortness, e.g. */
+ /* 1.545056189557677e-308 could be represented in a shorter form */
+ /* 1545056189557677e-323 but that would be somewhat unreadable. */
+ decimals = GRISU3_MIN(-d_exp, GRISU3_MAX(1, len-1));
+
+ /* mikkelfj:
+ * fix zero prefix .1 => 0.1, important for JSON export.
+ * prefer unscientific notation at same length:
+ * -1.2345e-4 over -1.00012345,
+ * -1.0012345 over -1.2345e-3
+ */
+ if (d_exp < 0 && (len + d_exp) > -3 && len <= -d_exp)
+ {
+ /* mikkelfj: fix zero prefix .1 => 0.1, and short exponents 1.3e-2 => 0.013. */
+ memmove(s2 + 2 - d_exp - len, s2, (size_t)len);
+ s2[0] = '0';
+ s2[1] = '.';
+ for (i = 2; i < 2-d_exp-len; ++i) s2[i] = '0';
+ len += i;
+ }
+ else if (d_exp < 0 && len > 1) /* Add decimal point? */
+ {
+ for(i = 0; i < decimals; ++i) s2[len-i] = s2[len-i-1];
+ s2[len++ - decimals] = '.';
+ d_exp += decimals;
+ /* Need scientific notation as well? */
+ if (d_exp != 0) { s2[len++] = 'e'; len += grisu3_i_to_str(d_exp, s2+len); }
+ }
+ /* Add scientific notation? */
+ else if (d_exp < 0 || d_exp > 2) { s2[len++] = 'e'; len += grisu3_i_to_str(d_exp, s2+len); }
+ /* Add zeroes instead of scientific notation? */
+ else if (d_exp > 0) { while(d_exp-- > 0) s2[len++] = '0'; }
+ s2[len] = '\0'; /* grisu3 doesn't null terminate, so ensure termination. */
+ return (int)(s2+len-dst);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_PRINT_H */
diff --git a/external/grisu3/grisu3_test.c b/external/grisu3/grisu3_test.c
new file mode 100644
index 0000000..930e027
--- /dev/null
+++ b/external/grisu3/grisu3_test.c
@@ -0,0 +1,141 @@
+#include <inttypes.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "grisu3_parse.h"
+#include "grisu3_print.h"
+
+#define TEST(x, s) do { \
+ if (!(x)) { \
+ fprintf(stderr, \
+ "fail: %s\n" \
+ " input: %s\n" \
+ " expected: %.17g\n" \
+ " got: %.17g\n" \
+ " binary xor: 0x%016"PRId64"\n", \
+ s, buf, expect, v, (a ^ b)); \
+ return 1; \
+ } \
+ } while (0)
+
+static int test_parse_double(char *buf)
+{
+ const char *k, *end;
+ double v, expect;
+ uint64_t a = 0, b = 0;
+ int len = strlen(buf);
+
+ end = buf + len;
+
+ expect = strtod(buf, 0);
+ /* Include '\0' in bytes being parsed to make strtod safe. */
+ k = grisu3_parse_double(buf, len, &v);
+
+ /* Make sure we parsed and accepted everything. */
+ TEST(k == end, "didn't parse to end");
+
+ a = grisu3_cast_uint64_from_double(expect);
+ b = grisu3_cast_uint64_from_double(v);
+
+#ifdef GRISU3_PARSE_ALLOW_ERROR
+ /*
+ * Just where exponent wraps, this assumption will be incorrect.
+ * TODO: need next higher double function.
+ */
+ TEST(a - b <= 1, "binary representation differs by more than lsb");
+#else
+ /* Binary comparison should match. */
+ TEST(expect == v, "double representation differs");
+ TEST(a == b, "binary representation differs");
+#endif
+
+#if 0
+ /* This will print the test data also when correct. */
+ TEST(0, "test case passed, just debugging");
+#endif
+
+ return 0;
+}
+
+/*
+ * We currently do not test grisu3_print_double because
+ * it is a direct port of dtoa_grisu3 from grisu3.c
+ * which presumably has been tested in MathGeoLib.
+ *
+ * grisu3_parse_double is a new implementation.
+ */
+int test_suite()
+{
+ char buf[50];
+ int fail = 0;
+
+ fail += test_parse_double("1.23434");
+ fail += test_parse_double("1234.34");
+ fail += test_parse_double("1234.34e4");
+ fail += test_parse_double("1234.34e-4");
+ fail += test_parse_double("1.23434E+4");
+ fail += test_parse_double("3.2897984798741413E+194");
+ fail += test_parse_double("-3.2897984798741413E-194");
+
+ sprintf(buf, "3289798479874141.314124124128497098e109");
+ fail += test_parse_double(buf);
+ sprintf(buf, "3289798479874141.314124124128497098e209");
+ fail += test_parse_double(buf);
+ sprintf(buf, "-3289798479874141.314124124128497098e209");
+ fail += test_parse_double(buf);
+ sprintf(buf, "3289798479874141.314124124128497098e+209");
+ fail += test_parse_double(buf);
+ sprintf(buf, "-3289798479874141.314124124128497098e-209");
+ fail += test_parse_double(buf);
+
+ return fail;
+}
+
+void example()
+{
+ double v;
+ const char *buf = "1234.34e-4";
+ const char *x, *end;
+ char result_buf[50];
+ int len;
+
+ fprintf(stderr, "grisu3_parse_double example:\n parsing '%s' as double\n", buf);
+ /* A non-numeric terminator (e.g. '\0') is required to ensure strtod fallback is safe. */
+ len = strlen(buf);
+ end = buf + len;
+ x = grisu3_parse_double(buf, len, &v);
+ if (x == 0) {
+ fprintf(stderr, "syntax or range error\n");
+ } else if (x == buf) {
+ fprintf(stderr, "parse double failed\n");
+ } else if (x != end) {
+ fprintf(stderr, "parse double did not read everything\n");
+ } else {
+ fprintf(stderr, "got: %.17g\n", v);
+ }
+ /*
+ * TODO: with the current example: the input "0.123434" is printed
+ * as "1.23434e-1" which is sub-optimal and different from sprintf.
+ *
+ * This is not the grisu3 algorithm but a post formatting step
+ * in grisu3_print_double (originally dtoa_grisu) and may be a bug
+ * in the logic choosing the best print format.
+ * sprintf "%.17g" and "%g" both print as "0.123434"
+ */
+ fprintf(stderr, "grisu3_print_double example:\n printing %g\n", v);
+ grisu3_print_double(v, result_buf);
+ fprintf(stderr, "got: %s\n", result_buf);
+}
+
+int main()
+{
+ example();
+ fprintf(stderr, "running tests\n");
+ if (test_suite()) {
+ fprintf(stderr, "GRISU3 PARSE TEST FAILED\n");
+ return -1;
+ } else {
+ fprintf(stderr, "GRISU3 PARSE TEST PASSED\n");
+ return 0;
+ }
+}
diff --git a/external/grisu3/grisu3_test_dblcnv.c b/external/grisu3/grisu3_test_dblcnv.c
new file mode 100644
index 0000000..f0e98cc
--- /dev/null
+++ b/external/grisu3/grisu3_test_dblcnv.c
@@ -0,0 +1,482 @@
+/*
+ * Test cases from Googles Double Conversion Library
+ *
+ * https://github.com/google/double-conversion/blob/master/test/cctest/test-strtod.cc
+ *
+ * Added extra tests for grisu parse print roundtrip and negative sign.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <math.h>
+
+#include "grisu3_print.h"
+#include "grisu3_parse.h"
+
+#define BEGIN_TEST(name) int test_ ## name() { \
+ int fail = 0; char *id = #name; double v; char *vector; \
+ char buf[1001];
+
+#define END_TEST() return fail; }
+
+
+void check_double(double x1, double x2, char *id, int line, int *fail)
+{
+ char tmp[50];
+ const char *k;
+ int n;
+ int failed = 0;
+ double v;
+
+ if (x1 != x2) {
+ failed = 1;
+ fprintf(stderr, "%d: fail (%s): %.17g != %.17g\n",
+ line, id, x1, x2);
+ } else {
+#if 1
+ n = grisu3_print_double(x1, tmp);
+ if (n >= GRISU3_PRINT_MAX) { /* Leave space for zterm. */
+ failed = 1;
+ fprintf(stderr, "%d: fail (%s): print length exceeded max: %d, input: %.17g\n",
+ line, id, n, x1);
+ } else if ((int)strlen(tmp) != n) {
+ failed = 1;
+ fprintf(stderr, "%d: fail (%s): print length does not match strlen of output, input: %.17g, got: %s\n",
+ line, id, x1, tmp);
+ } else if (!isinf(x1)) {
+ /* We do expect print/parse to handle inf. */
+ k = grisu3_parse_double(tmp, n, &v);
+ if (k == 0 || k == tmp) {
+ failed = 1;
+ fprintf(stderr, "%d: fail (%s): roundtrip parse failed "
+ "input: %g, printed value %s\n",
+ line, id, x1, tmp);
+ } else if (x1 != v) {
+ failed = 1;
+ fprintf(stderr, "%d: fail (%s): print/parse roundtrip mismatch for "
+ "input: %.17g, got %.17g\n",
+ line, id, x1, v);
+ }
+ }
+#endif
+ }
+ *fail += failed;
+}
+
+#define CHECK_EQ(v1, v2) check_double((v1), (v2), id, __LINE__, &fail)
+
+#define StringToVector(f) f
+
+#define Strtod(f, e) (sprintf(buf, "%se%d", f, e), \
+ grisu3_parse_double(buf, strlen(buf), &v), v)
+
+#define StrtodChar(f, e) (sprintf(buf, "%se%d", f, e), \
+ grisu3_parse_double(buf, strlen(buf), &v), v)
+
+#define double_infinity grisu3_double_infinity
+
+BEGIN_TEST(Strtod)
+ vector = StringToVector("0");
+ CHECK_EQ(0.0, Strtod(vector, 1));
+ CHECK_EQ(0.0, Strtod(vector, 2));
+ CHECK_EQ(0.0, Strtod(vector, -2));
+ CHECK_EQ(0.0, Strtod(vector, -999));
+ CHECK_EQ(0.0, Strtod(vector, +999));
+
+ vector = StringToVector("1");
+ CHECK_EQ(1.0, Strtod(vector, 0));
+ CHECK_EQ(10.0, Strtod(vector, 1));
+ CHECK_EQ(100.0, Strtod(vector, 2));
+ CHECK_EQ(1e20, Strtod(vector, 20));
+ CHECK_EQ(1e22, Strtod(vector, 22));
+ CHECK_EQ(1e23, Strtod(vector, 23));
+
+ CHECK_EQ(1e35, Strtod(vector, 35));
+ CHECK_EQ(1e36, Strtod(vector, 36));
+ CHECK_EQ(1e37, Strtod(vector, 37));
+ CHECK_EQ(1e-1, Strtod(vector, -1));
+ CHECK_EQ(1e-2, Strtod(vector, -2));
+ CHECK_EQ(1e-5, Strtod(vector, -5));
+ CHECK_EQ(1e-20, Strtod(vector, -20));
+ CHECK_EQ(1e-22, Strtod(vector, -22));
+ CHECK_EQ(1e-23, Strtod(vector, -23));
+ CHECK_EQ(1e-25, Strtod(vector, -25));
+ CHECK_EQ(1e-39, Strtod(vector, -39));
+
+ vector = StringToVector("2");
+ CHECK_EQ(2.0, Strtod(vector, 0));
+ CHECK_EQ(20.0, Strtod(vector, 1));
+ CHECK_EQ(200.0, Strtod(vector, 2));
+ CHECK_EQ(2e20, Strtod(vector, 20));
+ CHECK_EQ(2e22, Strtod(vector, 22));
+ CHECK_EQ(2e23, Strtod(vector, 23));
+ CHECK_EQ(2e35, Strtod(vector, 35));
+ CHECK_EQ(2e36, Strtod(vector, 36));
+ CHECK_EQ(2e37, Strtod(vector, 37));
+ CHECK_EQ(2e-1, Strtod(vector, -1));
+ CHECK_EQ(2e-2, Strtod(vector, -2));
+ CHECK_EQ(2e-5, Strtod(vector, -5));
+ CHECK_EQ(2e-20, Strtod(vector, -20));
+ CHECK_EQ(2e-22, Strtod(vector, -22));
+ CHECK_EQ(2e-23, Strtod(vector, -23));
+ CHECK_EQ(2e-25, Strtod(vector, -25));
+ CHECK_EQ(2e-39, Strtod(vector, -39));
+
+ vector = StringToVector("9");
+ CHECK_EQ(9.0, Strtod(vector, 0));
+ CHECK_EQ(90.0, Strtod(vector, 1));
+ CHECK_EQ(900.0, Strtod(vector, 2));
+ CHECK_EQ(9e20, Strtod(vector, 20));
+ CHECK_EQ(9e22, Strtod(vector, 22));
+ CHECK_EQ(9e23, Strtod(vector, 23));
+ CHECK_EQ(9e35, Strtod(vector, 35));
+ CHECK_EQ(9e36, Strtod(vector, 36));
+ CHECK_EQ(9e37, Strtod(vector, 37));
+ CHECK_EQ(9e-1, Strtod(vector, -1));
+ CHECK_EQ(9e-2, Strtod(vector, -2));
+ CHECK_EQ(9e-5, Strtod(vector, -5));
+ CHECK_EQ(9e-20, Strtod(vector, -20));
+ CHECK_EQ(9e-22, Strtod(vector, -22));
+ CHECK_EQ(9e-23, Strtod(vector, -23));
+ CHECK_EQ(9e-25, Strtod(vector, -25));
+ CHECK_EQ(9e-39, Strtod(vector, -39));
+
+ vector = StringToVector("12345");
+ CHECK_EQ(12345.0, Strtod(vector, 0));
+ CHECK_EQ(123450.0, Strtod(vector, 1));
+ CHECK_EQ(1234500.0, Strtod(vector, 2));
+ CHECK_EQ(12345e20, Strtod(vector, 20));
+ CHECK_EQ(12345e22, Strtod(vector, 22));
+ CHECK_EQ(12345e23, Strtod(vector, 23));
+ CHECK_EQ(12345e30, Strtod(vector, 30));
+ CHECK_EQ(12345e31, Strtod(vector, 31));
+ CHECK_EQ(12345e32, Strtod(vector, 32));
+ CHECK_EQ(12345e35, Strtod(vector, 35));
+ CHECK_EQ(12345e36, Strtod(vector, 36));
+ CHECK_EQ(12345e37, Strtod(vector, 37));
+ CHECK_EQ(12345e-1, Strtod(vector, -1));
+ CHECK_EQ(12345e-2, Strtod(vector, -2));
+ CHECK_EQ(12345e-5, Strtod(vector, -5));
+ CHECK_EQ(12345e-20, Strtod(vector, -20));
+ CHECK_EQ(12345e-22, Strtod(vector, -22));
+ CHECK_EQ(12345e-23, Strtod(vector, -23));
+ CHECK_EQ(12345e-25, Strtod(vector, -25));
+ CHECK_EQ(12345e-39, Strtod(vector, -39));
+
+ vector = StringToVector("12345678901234");
+ CHECK_EQ(12345678901234.0, Strtod(vector, 0));
+ CHECK_EQ(123456789012340.0, Strtod(vector, 1));
+ CHECK_EQ(1234567890123400.0, Strtod(vector, 2));
+ CHECK_EQ(12345678901234e20, Strtod(vector, 20));
+ CHECK_EQ(12345678901234e22, Strtod(vector, 22));
+ CHECK_EQ(12345678901234e23, Strtod(vector, 23));
+ CHECK_EQ(12345678901234e30, Strtod(vector, 30));
+ CHECK_EQ(12345678901234e31, Strtod(vector, 31));
+ CHECK_EQ(12345678901234e32, Strtod(vector, 32));
+ CHECK_EQ(12345678901234e35, Strtod(vector, 35));
+ CHECK_EQ(12345678901234e36, Strtod(vector, 36));
+ CHECK_EQ(12345678901234e37, Strtod(vector, 37));
+ CHECK_EQ(12345678901234e-1, Strtod(vector, -1));
+ CHECK_EQ(12345678901234e-2, Strtod(vector, -2));
+ CHECK_EQ(12345678901234e-5, Strtod(vector, -5));
+ CHECK_EQ(12345678901234e-20, Strtod(vector, -20));
+ CHECK_EQ(12345678901234e-22, Strtod(vector, -22));
+ CHECK_EQ(12345678901234e-23, Strtod(vector, -23));
+ CHECK_EQ(12345678901234e-25, Strtod(vector, -25));
+ CHECK_EQ(12345678901234e-39, Strtod(vector, -39));
+
+ vector = StringToVector("123456789012345");
+ CHECK_EQ(123456789012345.0, Strtod(vector, 0));
+ CHECK_EQ(1234567890123450.0, Strtod(vector, 1));
+ CHECK_EQ(12345678901234500.0, Strtod(vector, 2));
+ CHECK_EQ(123456789012345e20, Strtod(vector, 20));
+ CHECK_EQ(123456789012345e22, Strtod(vector, 22));
+ CHECK_EQ(123456789012345e23, Strtod(vector, 23));
+ CHECK_EQ(123456789012345e35, Strtod(vector, 35));
+ CHECK_EQ(123456789012345e36, Strtod(vector, 36));
+ CHECK_EQ(123456789012345e37, Strtod(vector, 37));
+ CHECK_EQ(123456789012345e39, Strtod(vector, 39));
+ CHECK_EQ(123456789012345e-1, Strtod(vector, -1));
+ CHECK_EQ(123456789012345e-2, Strtod(vector, -2));
+ CHECK_EQ(123456789012345e-5, Strtod(vector, -5));
+ CHECK_EQ(123456789012345e-20, Strtod(vector, -20));
+ CHECK_EQ(123456789012345e-22, Strtod(vector, -22));
+ CHECK_EQ(123456789012345e-23, Strtod(vector, -23));
+ CHECK_EQ(123456789012345e-25, Strtod(vector, -25));
+ CHECK_EQ(123456789012345e-39, Strtod(vector, -39));
+ CHECK_EQ(0.0, StrtodChar("0", 12345));
+
+ CHECK_EQ(0.0, StrtodChar("", 1324));
+ CHECK_EQ(0.0, StrtodChar("000000000", 123));
+ CHECK_EQ(0.0, StrtodChar("2", -324));
+ CHECK_EQ(4e-324, StrtodChar("3", -324));
+
+ // It would be more readable to put non-zero literals on the left side (i.e.
+ // CHECK_EQ(1e-325, StrtodChar("1", -325))), but then Gcc complains that
+ // they are truncated to zero.
+ CHECK_EQ(0.0, StrtodChar("1", -325));
+ CHECK_EQ(0.0, StrtodChar("1", -325));
+ CHECK_EQ(0.0, StrtodChar("20000", -328));
+ CHECK_EQ(40000e-328, StrtodChar("30000", -328));
+ CHECK_EQ(0.0, StrtodChar("10000", -329));
+ CHECK_EQ(0.0, StrtodChar("90000", -329));
+ CHECK_EQ(0.0, StrtodChar("000000001", -325));
+ CHECK_EQ(0.0, StrtodChar("000000001", -325));
+ CHECK_EQ(0.0, StrtodChar("0000000020000", -328));
+ CHECK_EQ(40000e-328, StrtodChar("00000030000", -328));
+ CHECK_EQ(0.0, StrtodChar("0000000010000", -329));
+ CHECK_EQ(0.0, StrtodChar("0000000090000", -329));
+
+
+ // It would be more readable to put the literals (and not double_infinity)
+ // on the left side (i.e. CHECK_EQ(1e309, StrtodChar("1", 309))), but then Gcc
+ // complains that the floating constant exceeds range of 'double'.
+
+ CHECK_EQ(double_infinity, StrtodChar("1", 309));
+
+ CHECK_EQ(1e308, StrtodChar("1", 308));
+ CHECK_EQ(1234e305, StrtodChar("1234", 305));
+ CHECK_EQ(1234e304, StrtodChar("1234", 304));
+
+ CHECK_EQ(double_infinity, StrtodChar("18", 307));
+ CHECK_EQ(17e307, StrtodChar("17", 307));
+
+ CHECK_EQ(double_infinity, StrtodChar("0000001", 309));
+
+ CHECK_EQ(1e308, StrtodChar("00000001", 308));
+
+ CHECK_EQ(1234e305, StrtodChar("00000001234", 305));
+ CHECK_EQ(1234e304, StrtodChar("000000001234", 304));
+ CHECK_EQ(double_infinity, StrtodChar("0000000018", 307));
+ CHECK_EQ(17e307, StrtodChar("0000000017", 307));
+ CHECK_EQ(double_infinity, StrtodChar("1000000", 303));
+ CHECK_EQ(1e308, StrtodChar("100000", 303));
+ CHECK_EQ(1234e305, StrtodChar("123400000", 300));
+ CHECK_EQ(1234e304, StrtodChar("123400000", 299));
+ CHECK_EQ(double_infinity, StrtodChar("180000000", 300));
+ CHECK_EQ(17e307, StrtodChar("170000000", 300));
+ CHECK_EQ(double_infinity, StrtodChar("00000001000000", 303));
+ CHECK_EQ(1e308, StrtodChar("000000000000100000", 303));
+ CHECK_EQ(1234e305, StrtodChar("00000000123400000", 300));
+ CHECK_EQ(1234e304, StrtodChar("0000000123400000", 299));
+ CHECK_EQ(double_infinity, StrtodChar("00000000180000000", 300));
+ CHECK_EQ(17e307, StrtodChar("00000000170000000", 300));
+ CHECK_EQ(1.7976931348623157E+308, StrtodChar("17976931348623157", 292));
+ CHECK_EQ(1.7976931348623158E+308, StrtodChar("17976931348623158", 292));
+ CHECK_EQ(double_infinity, StrtodChar("17976931348623159", 292));
+
+ // The following number is the result of 89255.0/1e-22. Both floating-point
+ // numbers can be accurately represented with doubles. However on Linux,x86
+ // the floating-point stack is set to 80bits and the double-rounding
+ // introduces an error.
+ CHECK_EQ(89255e-22, StrtodChar("89255", -22));
+
+ // Some random values.
+ CHECK_EQ(358416272e-33, StrtodChar("358416272", -33));
+ CHECK_EQ(104110013277974872254e-225,
+ StrtodChar("104110013277974872254", -225));
+
+ CHECK_EQ(123456789e108, StrtodChar("123456789", 108));
+ CHECK_EQ(123456789e109, StrtodChar("123456789", 109));
+ CHECK_EQ(123456789e110, StrtodChar("123456789", 110));
+ CHECK_EQ(123456789e111, StrtodChar("123456789", 111));
+ CHECK_EQ(123456789e112, StrtodChar("123456789", 112));
+ CHECK_EQ(123456789e113, StrtodChar("123456789", 113));
+ CHECK_EQ(123456789e114, StrtodChar("123456789", 114));
+ CHECK_EQ(123456789e115, StrtodChar("123456789", 115));
+
+ CHECK_EQ(1234567890123456789012345e108,
+ StrtodChar("1234567890123456789012345", 108));
+ CHECK_EQ(1234567890123456789012345e109,
+ StrtodChar("1234567890123456789012345", 109));
+ CHECK_EQ(1234567890123456789012345e110,
+ StrtodChar("1234567890123456789012345", 110));
+ CHECK_EQ(1234567890123456789012345e111,
+ StrtodChar("1234567890123456789012345", 111));
+ CHECK_EQ(1234567890123456789012345e112,
+ StrtodChar("1234567890123456789012345", 112));
+ CHECK_EQ(1234567890123456789012345e113,
+ StrtodChar("1234567890123456789012345", 113));
+ CHECK_EQ(1234567890123456789012345e114,
+ StrtodChar("1234567890123456789012345", 114));
+ CHECK_EQ(1234567890123456789012345e115,
+ StrtodChar("1234567890123456789012345", 115));
+ CHECK_EQ(1234567890123456789052345e108,
+ StrtodChar("1234567890123456789052345", 108));
+ CHECK_EQ(1234567890123456789052345e109,
+ StrtodChar("1234567890123456789052345", 109));
+ CHECK_EQ(1234567890123456789052345e110,
+ StrtodChar("1234567890123456789052345", 110));
+ CHECK_EQ(1234567890123456789052345e111,
+ StrtodChar("1234567890123456789052345", 111));
+ CHECK_EQ(1234567890123456789052345e112,
+ StrtodChar("1234567890123456789052345", 112));
+ CHECK_EQ(1234567890123456789052345e113,
+ StrtodChar("1234567890123456789052345", 113));
+ CHECK_EQ(1234567890123456789052345e114,
+ StrtodChar("1234567890123456789052345", 114));
+ CHECK_EQ(1234567890123456789052345e115,
+ StrtodChar("1234567890123456789052345", 115));
+ CHECK_EQ(5.445618932859895e-255,
+ StrtodChar("5445618932859895362967233318697132813618813095743952975"
+ "4392982234069699615600475529427176366709107287468930197"
+ "8628345413991790019316974825934906752493984055268219809"
+ "5012176093045431437495773903922425632551857520884625114"
+ "6241265881735209066709685420744388526014389929047617597"
+ "0302268848374508109029268898695825171158085457567481507"
+ "4162979705098246243690189880319928315307816832576838178"
+ "2563074014542859888710209237525873301724479666744537857"
+ "9026553346649664045621387124193095870305991178772256504"
+ "4368663670643970181259143319016472430928902201239474588"
+ "1392338901353291306607057623202353588698746085415097902"
+ "6640064319118728664842287477491068264828851624402189317"
+ "2769161449825765517353755844373640588822904791244190695"
+ "2998382932630754670573838138825217065450843010498555058"
+ "88186560731", -1035));
+
+ // Boundary cases. Boundaries themselves should round to even.
+ //
+ // 0x1FFFFFFFFFFFF * 2^3 = 72057594037927928
+ // next: 72057594037927936
+ // boundary: 72057594037927932 should round up.
+ CHECK_EQ(72057594037927928.0, StrtodChar("72057594037927928", 0));
+ CHECK_EQ(72057594037927936.0, StrtodChar("72057594037927936", 0));
+ CHECK_EQ(72057594037927936.0, StrtodChar("72057594037927932", 0));
+ CHECK_EQ(72057594037927928.0, StrtodChar("7205759403792793199999", -5));
+ CHECK_EQ(72057594037927936.0, StrtodChar("7205759403792793200001", -5));
+
+ // 0x1FFFFFFFFFFFF * 2^10 = 9223372036854774784
+ // next: 9223372036854775808
+ // boundary: 9223372036854775296 should round up.
+ CHECK_EQ(9223372036854774784.0, StrtodChar("9223372036854774784", 0));
+ CHECK_EQ(9223372036854775808.0, StrtodChar("9223372036854775808", 0));
+ CHECK_EQ(9223372036854775808.0, StrtodChar("9223372036854775296", 0));
+
+ CHECK_EQ(9223372036854774784.0, StrtodChar("922337203685477529599999", -5));
+ CHECK_EQ(9223372036854775808.0, StrtodChar("922337203685477529600001", -5));
+
+ // 0x1FFFFFFFFFFFF * 2^50 = 10141204801825834086073718800384
+ // next: 10141204801825835211973625643008
+ // boundary: 10141204801825834649023672221696 should round up.
+ //
+ CHECK_EQ(10141204801825834086073718800384.0,
+ StrtodChar("10141204801825834086073718800384", 0));
+ CHECK_EQ(10141204801825835211973625643008.0,
+ StrtodChar("10141204801825835211973625643008", 0));
+ CHECK_EQ(10141204801825835211973625643008.0,
+ StrtodChar("10141204801825834649023672221696", 0));
+ CHECK_EQ(10141204801825834086073718800384.0,
+ StrtodChar("1014120480182583464902367222169599999", -5));
+ CHECK_EQ(10141204801825835211973625643008.0,
+ StrtodChar("1014120480182583464902367222169600001", -5));
+ // 0x1FFFFFFFFFFFF * 2^99 = 5708990770823838890407843763683279797179383808
+ // next: 5708990770823839524233143877797980545530986496
+ // boundary: 5708990770823839207320493820740630171355185152
+ // The boundary should round up.
+ CHECK_EQ(5708990770823838890407843763683279797179383808.0,
+ StrtodChar("5708990770823838890407843763683279797179383808", 0));
+ CHECK_EQ(5708990770823839524233143877797980545530986496.0,
+ StrtodChar("5708990770823839524233143877797980545530986496", 0));
+ CHECK_EQ(5708990770823839524233143877797980545530986496.0,
+ StrtodChar("5708990770823839207320493820740630171355185152", 0));
+ CHECK_EQ(5708990770823838890407843763683279797179383808.0,
+ StrtodChar("5708990770823839207320493820740630171355185151999", -3));
+ CHECK_EQ(5708990770823839524233143877797980545530986496.0,
+ StrtodChar("5708990770823839207320493820740630171355185152001", -3));
+
+ // The following test-cases got some public attention in early 2011 when they
+ // sent Java and PHP into an infinite loop.
+ CHECK_EQ(2.225073858507201e-308, StrtodChar("22250738585072011", -324));
+ CHECK_EQ(2.22507385850720138309e-308,
+ StrtodChar("22250738585072011360574097967091319759348195463516456480"
+ "23426109724822222021076945516529523908135087914149158913"
+ "03962110687008643869459464552765720740782062174337998814"
+ "10632673292535522868813721490129811224514518898490572223"
+ "07285255133155755015914397476397983411801999323962548289"
+ "01710708185069063066665599493827577257201576306269066333"
+ "26475653000092458883164330377797918696120494973903778297"
+ "04905051080609940730262937128958950003583799967207254304"
+ "36028407889577179615094551674824347103070260914462157228"
+ "98802581825451803257070188608721131280795122334262883686"
+ "22321503775666622503982534335974568884423900265498198385"
+ "48794829220689472168983109969836584681402285424333066033"
+ "98508864458040010349339704275671864433837704860378616227"
+ "71738545623065874679014086723327636718751", -1076));
+END_TEST()
+
+
+/* Non-google test */
+BEGIN_TEST(grisu3_print_double)
+ vector = "13";
+ CHECK_EQ(13e-2, Strtod(vector, -2));
+ CHECK_EQ(13e-3, Strtod(vector, -3));
+
+ vector = "-13";
+ CHECK_EQ(-13e-2, Strtod(vector, -2));
+ CHECK_EQ(-13e-3, Strtod(vector, -3));
+ vector = "-1";
+ CHECK_EQ(-1e-2, Strtod(vector, -2));
+ CHECK_EQ(-1e-3, Strtod(vector, -3));
+
+ CHECK_EQ(-1e1, StrtodChar("-1", 1));
+ CHECK_EQ(-1e+1, StrtodChar("-1", 1));
+ CHECK_EQ(-1e-0, StrtodChar("-1", -0));
+ CHECK_EQ(-1e-1, StrtodChar("-1", -1));
+ CHECK_EQ(-1e-2, StrtodChar("-1", -2));
+ CHECK_EQ(-1e-3, StrtodChar("-1", -3));
+ CHECK_EQ(-1e-4, StrtodChar("-1", -4));
+
+ CHECK_EQ(-12e1, StrtodChar("-12", 1));
+ CHECK_EQ(-12e+1, StrtodChar("-12", 1));
+ CHECK_EQ(-12e-0, StrtodChar("-12", -0));
+ CHECK_EQ(-12e-1, StrtodChar("-12", -1));
+ CHECK_EQ(-12e-2, StrtodChar("-12", -2));
+ CHECK_EQ(-12e-3, StrtodChar("-12", -3));
+ CHECK_EQ(-12e-4, StrtodChar("-12", -4));
+
+ CHECK_EQ(-123e1, StrtodChar("-123", 1));
+ CHECK_EQ(-123e+1, StrtodChar("-123", 1));
+ CHECK_EQ(-123e-0, StrtodChar("-123", -0));
+ CHECK_EQ(-123e-1, StrtodChar("-123", -1));
+ CHECK_EQ(-123e-2, StrtodChar("-123", -2));
+ CHECK_EQ(-123e-3, StrtodChar("-123", -3));
+ CHECK_EQ(-123e-4, StrtodChar("-123", -4));
+
+ CHECK_EQ(-1234e1, StrtodChar("-1234", 1));
+ CHECK_EQ(-1234e+1, StrtodChar("-1234", 1));
+ CHECK_EQ(-1234e-0, StrtodChar("-1234", -0));
+ CHECK_EQ(-1234e-1, StrtodChar("-1234", -1));
+ CHECK_EQ(-1234e-2, StrtodChar("-1234", -2));
+ CHECK_EQ(-1234e-3, StrtodChar("-1234", -3));
+ CHECK_EQ(-1234e-4, StrtodChar("-1234", -4));
+
+ CHECK_EQ(-12345e1, StrtodChar("-12345", 1));
+ CHECK_EQ(-12345e+1, StrtodChar("-12345", 1));
+ CHECK_EQ(-12345e-0, StrtodChar("-12345", -0));
+ CHECK_EQ(-12345e-1, StrtodChar("-12345", -1));
+ CHECK_EQ(-12345e-2, StrtodChar("-12345", -2));
+ CHECK_EQ(-12345e-3, StrtodChar("-12345", -3));
+ CHECK_EQ(-12345e-4, StrtodChar("-12345", -4));
+
+ CHECK_EQ(-12345e-5, StrtodChar("-12345", -5));
+ CHECK_EQ(-12345e-6, StrtodChar("-12345", -6));
+ CHECK_EQ(-12345e-7, StrtodChar("-12345", -7));
+ CHECK_EQ(-12345e-8, StrtodChar("-12345", -8));
+ CHECK_EQ(-12345e-9, StrtodChar("-12345", -9));
+ CHECK_EQ(-12345e-10, StrtodChar("-12345", -10));
+END_TEST()
+
+int main()
+{
+ int fail = 0;
+
+ fail += test_Strtod();
+ fail += test_grisu3_print_double();
+
+ if (fail) {
+ fprintf(stderr, "FAILURE\n");
+ return -1;
+ }
+ fprintf(stderr, "SUCCESS\n");
+ return 0;
+}
diff --git a/external/grisu3/test.sh b/external/grisu3/test.sh
new file mode 100755
index 0000000..1794fbb
--- /dev/null
+++ b/external/grisu3/test.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+set -e
+
+cd $(dirname $0)
+mkdir -p build
+
+CC=cc
+
+$CC -g -Wall -Wextra $INCLUDE -I.. grisu3_test.c -lm -o build/grisu3_test_d
+$CC -DNDEBUG -Wall -Wextra -O2 $INCLUDE -I.. grisu3_test.c -lm -o build/grisu3_test
+echo "DEBUG:"
+build/grisu3_test_d
+echo "OPTIMIZED:"
+build/grisu3_test
+
+echo "running double conversion tests"
+./test_dblcnv.sh
diff --git a/external/grisu3/test_dblcnv.sh b/external/grisu3/test_dblcnv.sh
new file mode 100755
index 0000000..89f58f4
--- /dev/null
+++ b/external/grisu3/test_dblcnv.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+cd $(dirname $0)
+mkdir -p build
+
+CC=cc
+
+$CC -g -Wall -Wextra $INCLUDE -I.. grisu3_test_dblcnv.c -o build/grisu3_test_dblcnv_d
+$CC -DNDEBUG -Wall -Wextra -O2 $INCLUDE -I.. grisu3_test_dblcnv.c -o build/grisu3_test_dblcnv
+echo "DEBUG:"
+build/grisu3_test_dblcnv_d
+echo "OPTIMIZED:"
+build/grisu3_test_dblcnv
diff --git a/external/hash/.gitignore b/external/hash/.gitignore
new file mode 100644
index 0000000..a007fea
--- /dev/null
+++ b/external/hash/.gitignore
@@ -0,0 +1 @@
+build/*
diff --git a/external/hash/CMakeLists.txt b/external/hash/CMakeLists.txt
new file mode 100644
index 0000000..7b7d990
--- /dev/null
+++ b/external/hash/CMakeLists.txt
@@ -0,0 +1,38 @@
+cmake_minimum_required (VERSION 3.0.2)
+
+project (HashTest)
+
+SET(CMAKE_C_FLAGS_DEBUG "-g")
+SET(CMAKE_C_FLAGS_RELEASE "-O3 -DNDEBUG")
+
+add_executable (hash_test hash_test.c str_set.c token_map.c ht32.c ht64.c ht32rh.c ht64rh.c cmetrohash64.c)
+add_executable (hash_test_32 hash_test.c str_set.c token_map.c ht32.c ht64.c ht32rh.c ht64rh.c PMurHash.c)
+add_executable (hash_test_rh hash_test.c str_set.c token_map.c ht32.c ht64.c ht32rh.c ht64rh.c cmetrohash64.c)
+
+target_compile_definitions(hash_test_32 PRIVATE
+ -DHT_HASH_32)
+target_compile_definitions(hash_test_rh PRIVATE
+ -DSTR_SET_RH -DTOKEN_MAP_RH)
+
+add_executable (load_test load_test.c ptr_set.c)
+# robin hood hash table
+add_executable (load_test_rh load_test.c ptr_set.c)
+
+target_compile_definitions(load_test PRIVATE
+ -DPTR_SET_INT_HASH)
+target_compile_definitions(load_test_rh PRIVATE
+ -DPTR_SET_RH -DPTR_SET_INT_HASH)
+
+# default hash function
+add_executable (load_test_d load_test.c ptr_set.c cmetrohash64.c)
+add_executable (load_test_d_rh load_test.c ptr_set.c cmetrohash64.c)
+target_compile_definitions(load_test_rh PRIVATE
+ -DPTR_SET_RH)
+
+add_test(hash_test hash_test)
+add_test(hash_test_32 hash_test_32)
+add_test(hash_test_rh hash_test_rh)
+add_test(load_test load_test)
+add_test(load_test_rh load_test_rh)
+
+enable_testing()
diff --git a/external/hash/LICENSE b/external/hash/LICENSE
new file mode 100644
index 0000000..a561b5f
--- /dev/null
+++ b/external/hash/LICENSE
@@ -0,0 +1,28 @@
+This license applies to the content of the current directory.
+
+Some sources are externally provided - see respective file headers.
+All source is MIT or public domain with varying copyright.
+
+Unless otherwise stated, the following license apply:
+
+The MIT License (MIT)
+
+Copyright (c) 2015 Mikkel F. Jørgensen, dvide.com
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/external/hash/PMurHash.c b/external/hash/PMurHash.c
new file mode 100644
index 0000000..7284434
--- /dev/null
+++ b/external/hash/PMurHash.c
@@ -0,0 +1,334 @@
+/*-----------------------------------------------------------------------------
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain.
+ *
+ * This implementation was written by Shane Day, and is also public domain.
+ *
+ * This is a portable ANSI C implementation of MurmurHash3_x86_32 (Murmur3A)
+ * with support for progressive processing.
+ */
+
+/*-----------------------------------------------------------------------------
+
+If you want to understand the MurmurHash algorithm you would be much better
+off reading the original source. Just point your browser at:
+http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
+
+
+What this version provides?
+
+1. Progressive data feeding. Useful when the entire payload to be hashed
+does not fit in memory or when the data is streamed through the application.
+Also useful when hashing a number of strings with a common prefix. A partial
+hash of a prefix string can be generated and reused for each suffix string.
+
+2. Portability. Plain old C so that it should compile on any old compiler.
+Both CPU endian and access-alignment neutral, but avoiding inefficient code
+when possible depending on CPU capabilities.
+
+3. Drop in. I personally like nice self contained public domain code, making it
+easy to pilfer without loads of refactoring to work properly in the existing
+application code & makefile structure and mucking around with licence files.
+Just copy PMurHash.h and PMurHash.c and you're ready to go.
+
+
+How does it work?
+
+We can only process entire 32 bit chunks of input, except for the very end
+that may be shorter. So along with the partial hash we need to give back to
+the caller a carry containing up to 3 bytes that we were unable to process.
+This carry also needs to record the number of bytes the carry holds. I use
+the low 2 bits as a count (0..3) and the carry bytes are shifted into the
+high byte in stream order.
+
+To handle endianess I simply use a macro that reads a uint32_t and define
+that macro to be a direct read on little endian machines, a read and swap
+on big endian machines, or a byte-by-byte read if the endianess is unknown.
+
+-----------------------------------------------------------------------------*/
+
+
+#include "PMurHash.h"
+
+/* I used ugly type names in the header to avoid potential conflicts with
+ * application or system typedefs & defines. Since I'm not including any more
+ * headers below here I can rename these so that the code reads like C99 */
+#undef uint32_t
+#define uint32_t MH_UINT32
+#undef uint8_t
+#define uint8_t MH_UINT8
+
+/* MSVC warnings we choose to ignore */
+#if defined(_MSC_VER)
+ #pragma warning(disable: 4127) /* conditional expression is constant */
+#endif
+
+/*-----------------------------------------------------------------------------
+ * Endianess, misalignment capabilities and util macros
+ *
+ * The following 3 macros are defined in this section. The other macros defined
+ * are only needed to help derive these 3.
+ *
+ * READ_UINT32(x) Read a little endian unsigned 32-bit int
+ * UNALIGNED_SAFE Defined if READ_UINT32 works on non-word boundaries
+ * ROTL32(x,r) Rotate x left by r bits
+ */
+
+/* Convention is to define __BYTE_ORDER == to one of these values */
+#if !defined(__BIG_ENDIAN)
+ #define __BIG_ENDIAN 4321
+#endif
+#if !defined(__LITTLE_ENDIAN)
+ #define __LITTLE_ENDIAN 1234
+#endif
+
+/* I386 */
+#if defined(_M_IX86) || defined(__i386__) || defined(__i386) || defined(i386)
+ #define __BYTE_ORDER __LITTLE_ENDIAN
+ #define UNALIGNED_SAFE
+#endif
+
+/* gcc 'may' define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ to 1 (Note the trailing __),
+ * or even _LITTLE_ENDIAN or _BIG_ENDIAN (Note the single _ prefix) */
+#if !defined(__BYTE_ORDER)
+ #if defined(__LITTLE_ENDIAN__) && __LITTLE_ENDIAN__==1 || defined(_LITTLE_ENDIAN) && _LITTLE_ENDIAN==1
+ #define __BYTE_ORDER __LITTLE_ENDIAN
+ #elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__==1 || defined(_BIG_ENDIAN) && _BIG_ENDIAN==1
+ #define __BYTE_ORDER __BIG_ENDIAN
+ #endif
+#endif
+
+/* gcc (usually) defines xEL/EB macros for ARM and MIPS endianess */
+#if !defined(__BYTE_ORDER)
+ #if defined(__ARMEL__) || defined(__MIPSEL__)
+ #define __BYTE_ORDER __LITTLE_ENDIAN
+ #endif
+ #if defined(__ARMEB__) || defined(__MIPSEB__)
+ #define __BYTE_ORDER __BIG_ENDIAN
+ #endif
+#endif
+
+/* Now find best way we can to READ_UINT32 */
+#if __BYTE_ORDER==__LITTLE_ENDIAN
+ /* CPU endian matches murmurhash algorithm, so read 32-bit word directly */
+ #define READ_UINT32(ptr) (*((uint32_t*)(ptr)))
+#elif __BYTE_ORDER==__BIG_ENDIAN
+ /* TODO: Add additional cases below where a compiler provided bswap32 is available */
+ #if defined(__GNUC__) && (__GNUC__>4 || (__GNUC__==4 && __GNUC_MINOR__>=3))
+ #define READ_UINT32(ptr) (__builtin_bswap32(*((uint32_t*)(ptr))))
+ #else
+ /* Without a known fast bswap32 we're just as well off doing this */
+ #define READ_UINT32(ptr) (ptr[0]|ptr[1]<<8|ptr[2]<<16|ptr[3]<<24)
+ #define UNALIGNED_SAFE
+ #endif
+#else
+ /* Unknown endianess so last resort is to read individual bytes */
+ #define READ_UINT32(ptr) (ptr[0]|ptr[1]<<8|ptr[2]<<16|ptr[3]<<24)
+
+ /* Since we're not doing word-reads we can skip the messing about with realignment */
+ #define UNALIGNED_SAFE
+#endif
+
+/* Find best way to ROTL32 */
+#if defined(_MSC_VER)
+ #include <stdlib.h> /* Microsoft put _rotl declaration in here */
+ #define ROTL32(x,r) _rotl(x,r)
+#else
+ /* gcc recognises this code and generates a rotate instruction for CPUs with one */
+ #define ROTL32(x,r) (((uint32_t)x << r) | ((uint32_t)x >> (32 - r)))
+#endif
+
+
+/*-----------------------------------------------------------------------------
+ * Core murmurhash algorithm macros */
+
+#define C1 (0xcc9e2d51)
+#define C2 (0x1b873593)
+
+/* This is the main processing body of the algorithm. It operates
+ * on each full 32-bits of input. */
+#define DOBLOCK(h1, k1) do{ \
+ k1 *= C1; \
+ k1 = ROTL32(k1,15); \
+ k1 *= C2; \
+ \
+ h1 ^= k1; \
+ h1 = ROTL32(h1,13); \
+ h1 = h1*5+0xe6546b64; \
+ }while(0)
+
+
+/* Append unaligned bytes to carry, forcing hash churn if we have 4 bytes */
+/* cnt=bytes to process, h1=name of h1 var, c=carry, n=bytes in c, ptr/len=payload */
+#define DOBYTES(cnt, h1, c, n, ptr, len) do{ \
+ int _i = cnt; \
+ while(_i--) { \
+ c = c>>8 | *ptr++<<24; \
+ n++; len--; \
+ if(n==4) { \
+ DOBLOCK(h1, c); \
+ n = 0; \
+ } \
+ } }while(0)
+
+/*---------------------------------------------------------------------------*/
+
+/* Main hashing function. Initialise carry to 0 and h1 to 0 or an initial seed
+ * if wanted. Both ph1 and pcarry are required arguments. */
+void PMurHash32_Process(uint32_t *ph1, uint32_t *pcarry, const void *key, int len)
+{
+ uint32_t h1 = *ph1;
+ uint32_t c = *pcarry;
+
+ const uint8_t *ptr = (uint8_t*)key;
+ const uint8_t *end;
+
+ /* Extract carry count from low 2 bits of c value */
+ int n = c & 3;
+
+#if defined(UNALIGNED_SAFE)
+ /* This CPU handles unaligned word access */
+
+ /* Consume any carry bytes */
+ int i = (4-n) & 3;
+ if(i && i <= len) {
+ DOBYTES(i, h1, c, n, ptr, len);
+ }
+
+ /* Process 32-bit chunks */
+ end = ptr + len/4*4;
+ for( ; ptr < end ; ptr+=4) {
+ uint32_t k1 = READ_UINT32(ptr);
+ DOBLOCK(h1, k1);
+ }
+
+#else /*UNALIGNED_SAFE*/
+ /* This CPU does not handle unaligned word access */
+
+ /* Consume enough so that the next data byte is word aligned */
+ int i = -(long)ptr & 3;
+ if(i && i <= len) {
+ DOBYTES(i, h1, c, n, ptr, len);
+ }
+
+ /* We're now aligned. Process in aligned blocks. Specialise for each possible carry count */
+ end = ptr + len/4*4;
+ switch(n) { /* how many bytes in c */
+ case 0: /* c=[----] w=[3210] b=[3210]=w c'=[----] */
+ for( ; ptr < end ; ptr+=4) {
+ uint32_t k1 = READ_UINT32(ptr);
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 1: /* c=[0---] w=[4321] b=[3210]=c>>24|w<<8 c'=[4---] */
+ for( ; ptr < end ; ptr+=4) {
+ uint32_t k1 = c>>24;
+ c = READ_UINT32(ptr);
+ k1 |= c<<8;
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 2: /* c=[10--] w=[5432] b=[3210]=c>>16|w<<16 c'=[54--] */
+ for( ; ptr < end ; ptr+=4) {
+ uint32_t k1 = c>>16;
+ c = READ_UINT32(ptr);
+ k1 |= c<<16;
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 3: /* c=[210-] w=[6543] b=[3210]=c>>8|w<<24 c'=[654-] */
+ for( ; ptr < end ; ptr+=4) {
+ uint32_t k1 = c>>8;
+ c = READ_UINT32(ptr);
+ k1 |= c<<24;
+ DOBLOCK(h1, k1);
+ }
+ }
+#endif /*UNALIGNED_SAFE*/
+
+ /* Advance over whole 32-bit chunks, possibly leaving 1..3 bytes */
+ len -= len/4*4;
+
+ /* Append any remaining bytes into carry */
+ DOBYTES(len, h1, c, n, ptr, len);
+
+ /* Copy out new running hash and carry */
+ *ph1 = h1;
+ *pcarry = (c & ~0xff) | n;
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Finalize a hash. To match the original Murmur3A the total_length must be provided */
+uint32_t PMurHash32_Result(uint32_t h, uint32_t carry, uint32_t total_length)
+{
+ uint32_t k1;
+ int n = carry & 3;
+ if(n) {
+ k1 = carry >> (4-n)*8;
+ k1 *= C1; k1 = ROTL32(k1,15); k1 *= C2; h ^= k1;
+ }
+ h ^= total_length;
+
+ /* fmix */
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+
+ return h;
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Murmur3A compatable all-at-once */
+uint32_t PMurHash32(uint32_t seed, const void *key, int len)
+{
+ uint32_t h1=seed, carry=0;
+ PMurHash32_Process(&h1, &carry, key, len);
+ return PMurHash32_Result(h1, carry, len);
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Provide an API suitable for smhasher */
+void PMurHash32_test(const void *key, int len, uint32_t seed, void *out)
+{
+ uint32_t h1=seed, carry=0;
+ const uint8_t *ptr = (uint8_t*)key;
+ const uint8_t *end = ptr + len;
+
+#if 0 /* Exercise the progressive processing */
+ while(ptr < end) {
+ //const uint8_t *mid = ptr + rand()%(end-ptr)+1;
+ const uint8_t *mid = ptr + (rand()&0xF);
+ mid = mid<end?mid:end;
+ PMurHash32_Process(&h1, &carry, ptr, mid-ptr);
+ ptr = mid;
+ }
+#else
+ PMurHash32_Process(&h1, &carry, ptr, (int)(end-ptr));
+#endif
+ h1 = PMurHash32_Result(h1, carry, len);
+ *(uint32_t*)out = h1;
+}
+
+/*---------------------------------------------------------------------------*/
+#ifdef TEST
+int main() {
+ // http://www.cprover.org/cbmc/
+ // cbmc PMurHash.c --function PMurHash32 --unwind 255 --bounds-check --pointer-check
+ //=> seed=308736u (00000000000001001011011000000000)
+ // key=INVALID-128 (1000000011111111111111111111111111111111111111111111110101100111)
+ // len=640
+ // Violated property:
+ //file PMurHash.c line 201 function PMurHash32_Process
+ //dereference failure: object bounds
+ //!(POINTER_OFFSET(ptr) < 0) && OBJECT_SIZE(ptr) >= 1 + POINTER_OFFSET(ptr) || DYNAMIC_OBJECT(ptr)
+
+ uint32_t seed = 308736;
+ unsigned long long key = 0x80fffffffffffd67ULL;
+ PMurHash32(seed, &key, sizeof(key));
+}
+#endif
diff --git a/external/hash/PMurHash.h b/external/hash/PMurHash.h
new file mode 100644
index 0000000..28ead00
--- /dev/null
+++ b/external/hash/PMurHash.h
@@ -0,0 +1,64 @@
+/*-----------------------------------------------------------------------------
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain.
+ *
+ * This implementation was written by Shane Day, and is also public domain.
+ *
+ * This is a portable ANSI C implementation of MurmurHash3_x86_32 (Murmur3A)
+ * with support for progressive processing.
+ */
+
+/* ------------------------------------------------------------------------- */
+/* Determine what native type to use for uint32_t */
+
+/* We can't use the name 'uint32_t' here because it will conflict with
+ * any version provided by the system headers or application. */
+
+/* First look for special cases */
+#if defined(_MSC_VER)
+ #define MH_UINT32 unsigned long
+#endif
+
+/* If the compiler says it's C99 then take its word for it */
+#if !defined(MH_UINT32) && ( \
+ defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L )
+ #include <stdint.h>
+ #define MH_UINT32 uint32_t
+#endif
+
+/* Otherwise try testing against max value macros from limit.h */
+#if !defined(MH_UINT32)
+ #include <limits.h>
+ #if (USHRT_MAX == 0xffffffffUL)
+ #define MH_UINT32 unsigned short
+ #elif (UINT_MAX == 0xffffffffUL)
+ #define MH_UINT32 unsigned int
+ #elif (ULONG_MAX == 0xffffffffUL)
+ #define MH_UINT32 unsigned long
+ #endif
+#endif
+
+#if !defined(MH_UINT32)
+ #error Unable to determine type name for unsigned 32-bit int
+#endif
+
+/* I'm yet to work on a platform where 'unsigned char' is not 8 bits */
+#define MH_UINT8 unsigned char
+
+
+/* ------------------------------------------------------------------------- */
+/* Prototypes */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void PMurHash32_Process(MH_UINT32 *ph1, MH_UINT32 *pcarry, const void *key, int len);
+MH_UINT32 PMurHash32_Result(MH_UINT32 h1, MH_UINT32 carry, MH_UINT32 total_length);
+MH_UINT32 PMurHash32(MH_UINT32 seed, const void *key, int len);
+
+void PMurHash32_test(const void *key, int len, MH_UINT32 seed, void *out);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/external/hash/README.md b/external/hash/README.md
new file mode 100644
index 0000000..d0f03e8
--- /dev/null
+++ b/external/hash/README.md
@@ -0,0 +1,158 @@
+Generic hash table implementation with focus on being minimally
+invasive on existing items to be indexed.
+
+The key is stored arbitrarily in the referenced item. A custom match
+function `HT_MATCH` provides the necessary abstraction. Items are
+NOT allocated by the hash table.
+
+Removed items are replaced with a sentinel value (1) to preserve
+chaining.
+
+See the example implementations `hash_set.h`, `hash_item_table.h`,
+and `hash_test.c`.
+
+The hash function can also be customized, see the default below.
+
+In all cases the key as assumed to be char string that is not
+(necessarily) zero terminated. The length is given separately. Keys
+can therefore be arbitrary binary values of arbitrary length.
+
+Instead of initializing the hash table, it may be zeroed. In that
+case the count defaults to 4 upon first insert, meaning it can hold
+up to 4 items before resizing or less depending on load factor. By
+zeroing memory, hash tables use no memory until actually used.
+
+For increased portability we do not rely upon `stdint.h` outside the
+default hash function.
+
+Build
+-----
+
+There are no special build requirements.
+
+CMakeLists.txt simply links the appropriate hash function with the test
+files, but CMake is not required, for example:
+
+ cc load_test.c ptr_set.c cmetrohash64.c -O4 -DNDEBUG -o load_test
+
+There are several significant flags that can be set, but look at
+`CMakeLists.txt`, `hash_test.c`, and `load_test.c`.
+
+`initbuild.sh` is an easy way to create a CMake Ninja build for
+platforms that support it.
+
+Usage
+-----
+
+The hash table is implemented in a generic form with a static (private)
+interface. The macros
+
+`HASH_TABLE_HEADER(name, item)` defines the public prototype for the
+specialized type, and `HASH_TABLE_API(name)` defines non-static wrapper
+functions to access the generic implementation. This avoids creating all
+the code as macros which are painful to develop and debug.
+
+See `token_map.h`, `token_map.c` which are used in `hash_test.c`.
+
+If the datatype is only needed in one file, the implementation such as
+`token_map.c` can be included after defining `HT_PRIVATE`. This gives
+the compiler better optimization opportunities and hides the interface
+from other compilation units.
+
+The basic datatype `hash_table_t` is a small struct that can be embedded
+anywhere and used as the instance of any hash table derived type.
+
+
+Note on algorithmic choice
+--------------------------
+
+We use linear or quadratic probing hash tables because it allows for
+many small hash tables. We overallocate the hash table by a factor 2
+(default) but only store a single pointer per item. This probing does
+not allow for dense tables by itself, but because the hash table only
+stores a single pointer per bucket, we can afford a larger table.
+Advanced hashing such as Hopscotch can pack much more densely but
+e.g. Hopscotch need to store a bitmask, thus already doubling the
+size of the table. Hopscotch is probably good, but more complex and
+depends on optimizing bit scan insructions, furthermore, when the use
+case is many small tables such as symbol table scopes, cache locality
+is less relevant. Chained hashing with 50% load factor is a good
+choice, but require intrusive links, and cannot trivially hash string
+sets without extra allocation. There is some evidence that linear
+probing may be faster than quadratic probing due to cache effects, as
+long as we do not pack too densely - however, the tradional quadratic
+probing (k + i * i) modulo prime does not cover all buckets. We use
+(k + i * (i + 1) / 2) modulo power of 2 which covers all buckets so
+without experimentation it is unclear whether linear probing or
+quadratic probing is best.
+
+The use of open addressing leads to more key comparisons than chained
+hashing. The fact we store the keys indirectly in the stored item is
+also not ideal, except when the item is also directly the key. If we
+use larger hash tables from the saved space, we suspect this will
+still perform well, also considering external factors such as not
+having to allocate and copy a key from e.g. a text buffer being
+parsed.
+
+It is generally understood that linear probing degrades significantly
+with a load factor above 0.7. In this light, it is interesting to note
+that Emmanuel Goossaert tested hopscotch hashing and found that bucket
+swaps only take place in significance above a load factor of 0.7. A
+commenter to Goossaert's blog also found that neighbourhoods rarely
+exceed 64 even when allowed to grow on demand. Without deep analysis
+it would appear that linear probing and hopscotch is pretty similar
+at a load factor of 0.5 especially if tombstones are not present.
+Because hopscotch requires extra data (e.g. the hash key or a bitmap
+or a linked list) this confirms our intuition that it is better with
+lower load factors and smaller buckets, than advanced algorithms.
+Furthermore, hopscotch insert degrades badly when it needs to search for
+empty buckets at high load factors. Of course, for on disk storage
+it is a different matter, and this is why Goossaert is interested
+in caching hash keys in buckets.
+
+Robin Hood hashing is mostly interesting when there are many deletions
+to clean up and when the load factor increases. In our implementation we
+try to keep the per bucket size small: a pointer and a 8 bit offset, or
+just a pointer for the linear and quadratic probing implementations.
+This makes it affordable with a lower load factor.
+
+This Robin Hood variation stores the offset from the hashed bucket to
+where the first entry is stored. This means we can avoiding sampling any
+bucket not indexed by the current hash key, and it also means that we
+avoid having to store or calculate the hash key when updating.
+
+A sorted Robin Hood hashing implementation was also made, but it prooved
+to be error prone with many special cases and slower than regular Robin
+Hood hashing. It would conceivably protect against hash collision
+attacks through exponential search, but insertions and deletions would
+still need to move memory in linear time, making this point mood.
+Therefore the sorted Robin Hood variant has been removed.
+
+
+Section 4.5:
+<http://codecapsule.com/2014/05/07/implementing-a-key-value-store-part-6-open-addressing-hash-tables/>
+
+<http://codecapsule.com/2013/08/11/hopscotch-hashing/>
+
+Source file references
+----------------------
+
+<http://www.jandrewrogers.com/2015/05/27/metrohash/>
+
+downloaded from
+
+ <https://github.com/rurban/smhasher>
+ <https://github.com/rurban/smhasher/commit/00a4e5ab6bfb7b25bd3c7cf915f68984d4910cfd>
+
+ <https://raw.githubusercontent.com/rurban/smhasher/master/cmetrohash64.c>
+ <https://raw.githubusercontent.com/rurban/smhasher/master/cmetrohash.h>
+ <https://raw.githubusercontent.com/rurban/smhasher/master/PMurHash.c>
+ <https://raw.githubusercontent.com/rurban/smhasher/master/PMurHash.h>
+
+As of July 2015, for 64-bit hashes, the C port of the 64 bit metro hash
+is a good trade-off between speed and simplicity. The For a 32-bit C hash
+function, the ported MurmurHash3 is safe and easy to use in this
+environment, but xxHash32 may also be worth considering.
+
+See also <http://www.strchr.com/hash_functions>
+
diff --git a/external/hash/cmetrohash.h b/external/hash/cmetrohash.h
new file mode 100644
index 0000000..b2c869a
--- /dev/null
+++ b/external/hash/cmetrohash.h
@@ -0,0 +1,78 @@
+// metrohash.h
+//
+// The MIT License (MIT)
+//
+// Copyright (c) 2015 J. Andrew Rogers
+//
+// Updated Nov. 2015 to use safe unaligned reads and platform neutral
+// hash. This WILL change hashes on big endian platfors. / mikkelfj
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+
+#ifndef CMETROHASH_METROHASH_H
+#define CMETROHASH_METROHASH_H
+
+#include "ht_portable.h"
+#include "unaligned.h"
+
+#pragma once
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <string.h>
+
+// MetroHash 64-bit hash functions
+void cmetrohash64_1(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * out);
+void cmetrohash64_2(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * out);
+
+
+/* rotate right idiom recognized by compiler*/
+inline static uint64_t crotate_right(uint64_t v, unsigned k)
+{
+ return (v >> k) | (v << (64 - k));
+}
+
+inline static uint64_t cread_u64(const void * const ptr)
+{
+ return (uint64_t)unaligned_read_le64toh(ptr);
+}
+
+inline static uint64_t cread_u32(const void * const ptr)
+{
+ return (uint64_t)unaligned_read_le32toh(ptr);
+}
+
+inline static uint64_t cread_u16(const void * const ptr)
+{
+ return (uint64_t)unaligned_read_le16toh(ptr);
+}
+
+inline static uint64_t cread_u8 (const void * const ptr)
+{
+ return * (uint8_t *) ptr;
+}
+
+#if defined (__cplusplus)
+}
+#endif
+#endif // #ifndef CMETROHASH_METROHASH_H
diff --git a/external/hash/cmetrohash64.c b/external/hash/cmetrohash64.c
new file mode 100644
index 0000000..2923958
--- /dev/null
+++ b/external/hash/cmetrohash64.c
@@ -0,0 +1,185 @@
+// metrohash64.cpp
+//
+// The MIT License (MIT)
+//
+// Copyright (c) 2015 J. Andrew Rogers
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+
+#include "cmetrohash.h"
+
+
+void cmetrohash64_1(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * out)
+{
+ static const uint64_t k0 = 0xC83A91E1;
+ static const uint64_t k1 = 0x8648DBDB;
+ static const uint64_t k2 = 0x7BDEC03B;
+ static const uint64_t k3 = 0x2F5870A5;
+
+ const uint8_t * ptr = key;
+ const uint8_t * const end = ptr + len;
+
+ uint64_t hash = ((((uint64_t) seed) + k2) * k0) + len;
+
+ if (len >= 32)
+ {
+ uint64_t v[4];
+ v[0] = hash;
+ v[1] = hash;
+ v[2] = hash;
+ v[3] = hash;
+
+ do
+ {
+ v[0] += cread_u64(ptr) * k0; ptr += 8; v[0] = crotate_right(v[0],29) + v[2];
+ v[1] += cread_u64(ptr) * k1; ptr += 8; v[1] = crotate_right(v[1],29) + v[3];
+ v[2] += cread_u64(ptr) * k2; ptr += 8; v[2] = crotate_right(v[2],29) + v[0];
+ v[3] += cread_u64(ptr) * k3; ptr += 8; v[3] = crotate_right(v[3],29) + v[1];
+ }
+ while (ptr <= (end - 32));
+
+ v[2] ^= crotate_right(((v[0] + v[3]) * k0) + v[1], 33) * k1;
+ v[3] ^= crotate_right(((v[1] + v[2]) * k1) + v[0], 33) * k0;
+ v[0] ^= crotate_right(((v[0] + v[2]) * k0) + v[3], 33) * k1;
+ v[1] ^= crotate_right(((v[1] + v[3]) * k1) + v[2], 33) * k0;
+ hash += v[0] ^ v[1];
+ }
+
+ if ((end - ptr) >= 16)
+ {
+ uint64_t v0, v1;
+ v0 = hash + (cread_u64(ptr) * k0); ptr += 8; v0 = crotate_right(v0,33) * k1;
+ v1 = hash + (cread_u64(ptr) * k1); ptr += 8; v1 = crotate_right(v1,33) * k2;
+ v0 ^= crotate_right(v0 * k0, 35) + v1;
+ v1 ^= crotate_right(v1 * k3, 35) + v0;
+ hash += v1;
+ }
+
+ if ((end - ptr) >= 8)
+ {
+ hash += cread_u64(ptr) * k3; ptr += 8;
+ hash ^= crotate_right(hash, 33) * k1;
+
+ }
+
+ if ((end - ptr) >= 4)
+ {
+ hash += cread_u32(ptr) * k3; ptr += 4;
+ hash ^= crotate_right(hash, 15) * k1;
+ }
+
+ if ((end - ptr) >= 2)
+ {
+ hash += cread_u16(ptr) * k3; ptr += 2;
+ hash ^= crotate_right(hash, 13) * k1;
+ }
+
+ if ((end - ptr) >= 1)
+ {
+ hash += cread_u8 (ptr) * k3;
+ hash ^= crotate_right(hash, 25) * k1;
+ }
+
+ hash ^= crotate_right(hash, 33);
+ hash *= k0;
+ hash ^= crotate_right(hash, 33);
+
+ memcpy(out, &hash, 8);
+}
+
+
+void cmetrohash64_2(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * out)
+{
+ static const uint64_t k0 = 0xD6D018F5;
+ static const uint64_t k1 = 0xA2AA033B;
+ static const uint64_t k2 = 0x62992FC1;
+ static const uint64_t k3 = 0x30BC5B29;
+
+ const uint8_t * ptr = key;
+ const uint8_t * const end = ptr + len;
+
+ uint64_t hash = ((((uint64_t) seed) + k2) * k0) + len;
+
+ if (len >= 32)
+ {
+ uint64_t v[4];
+ v[0] = hash;
+ v[1] = hash;
+ v[2] = hash;
+ v[3] = hash;
+
+ do
+ {
+ v[0] += cread_u64(ptr) * k0; ptr += 8; v[0] = crotate_right(v[0],29) + v[2];
+ v[1] += cread_u64(ptr) * k1; ptr += 8; v[1] = crotate_right(v[1],29) + v[3];
+ v[2] += cread_u64(ptr) * k2; ptr += 8; v[2] = crotate_right(v[2],29) + v[0];
+ v[3] += cread_u64(ptr) * k3; ptr += 8; v[3] = crotate_right(v[3],29) + v[1];
+ }
+ while (ptr <= (end - 32));
+
+ v[2] ^= crotate_right(((v[0] + v[3]) * k0) + v[1], 30) * k1;
+ v[3] ^= crotate_right(((v[1] + v[2]) * k1) + v[0], 30) * k0;
+ v[0] ^= crotate_right(((v[0] + v[2]) * k0) + v[3], 30) * k1;
+ v[1] ^= crotate_right(((v[1] + v[3]) * k1) + v[2], 30) * k0;
+ hash += v[0] ^ v[1];
+ }
+
+ if ((end - ptr) >= 16)
+ {
+ uint64_t v0, v1;
+ v0 = hash + (cread_u64(ptr) * k2); ptr += 8; v0 = crotate_right(v0,29) * k3;
+ v1 = hash + (cread_u64(ptr) * k2); ptr += 8; v1 = crotate_right(v1,29) * k3;
+ v0 ^= crotate_right(v0 * k0, 34) + v1;
+ v1 ^= crotate_right(v1 * k3, 34) + v0;
+ hash += v1;
+ }
+
+ if ((end - ptr) >= 8)
+ {
+ hash += cread_u64(ptr) * k3; ptr += 8;
+ hash ^= crotate_right(hash, 36) * k1;
+ }
+
+ if ((end - ptr) >= 4)
+ {
+ hash += cread_u32(ptr) * k3; ptr += 4;
+ hash ^= crotate_right(hash, 15) * k1;
+ }
+
+ if ((end - ptr) >= 2)
+ {
+ hash += cread_u16(ptr) * k3; ptr += 2;
+ hash ^= crotate_right(hash, 15) * k1;
+ }
+
+ if ((end - ptr) >= 1)
+ {
+ hash += cread_u8 (ptr) * k3;
+ hash ^= crotate_right(hash, 23) * k1;
+ }
+
+ hash ^= crotate_right(hash, 28);
+ hash *= k0;
+ hash ^= crotate_right(hash, 29);
+
+ memcpy(out, &hash, 8);
+}
+
+
diff --git a/external/hash/hash.h b/external/hash/hash.h
new file mode 100644
index 0000000..c5a6fc6
--- /dev/null
+++ b/external/hash/hash.h
@@ -0,0 +1,115 @@
+#ifndef HASH_H
+#define HASH_H
+
+/* Misc. hash functions that do not comply to a specific interface. */
+
+#include <stdlib.h>
+
+#ifdef _MSC_VER
+/* `inline` only advisory anyway. */
+#pragma warning(disable: 4710) /* function not inlined */
+#endif
+
+static inline uint32_t hash_fnv1a32_update(uint32_t seed, uint8_t *buf, size_t len)
+{
+ uint8_t *p = buf;
+#ifndef FNV1A_NOMUL
+ const uint64_t prime = UINT32_C(0x1000193);
+#endif
+ uint64_t hash = seed;
+
+ while (len--) {
+ hash ^= (uint64_t)*p++;
+#ifndef FNV1A_NOMUL
+ hash *= prime;
+#else
+ hash += (hash << 1) + (hash << 4) + (hash << 7) +
+ (hash << 8) + (hash << 24);
+#endif
+ }
+ return hash;
+}
+
+static inline uint32_t hash_fnv1a32(uint8_t *buf, size_t len)
+{
+ return hash_fnv1a32_update(UINT32_C(0x811c9dc5), buf, len);
+}
+
+static inline uint64_t hash_fnv1a64_update(uint64_t v, uint8_t *buf, size_t len)
+{
+ uint8_t *p = buf;
+#ifndef FNV1A_NOMUL
+ const uint64_t prime = UINT64_C(0x100000001b3);
+#endif
+ uint64_t hash = v;
+
+ while (len--) {
+ hash ^= (uint64_t)*p++;
+#ifndef FNV1A_NOMUL
+ hash *= prime;
+#else
+ hash += (hash << 1) + (hash << 4) + (hash << 5) +
+ (hash << 7) + (hash << 8) + (hash << 40);
+#endif
+ }
+ return hash;
+}
+
+static inline uint64_t hash_fnv1a64(uint8_t *buf, size_t len)
+{
+ return hash_fnv1a64_update(UINT64_C(0xcbf29ce484222325), buf, len);
+}
+
+/*
+ * MurmurHash 3 final mix with seed to handle 0.
+ *
+ * Width is number of bits of the value to return.
+ * http://stackoverflow.com/a/12996028
+ */
+static inline uint32_t hash_bucket32(uint32_t v, size_t width)
+{
+ uint32_t x = v + UINT32_C(0x2f693b52);
+
+ x = ((x >> 16) ^ x) * UINT32_C(0x45d9f3b);
+ x = ((x >> 16) ^ x) * UINT32_C(0x45d9f3b);
+ x = ((x >> 16) ^ x);
+ return x >> (32 - width);
+}
+
+/*
+ * SplitMix64 - can be used to disperse fnv1a hash, to hash
+ * an integer, or as a simple non-cryptographic prng.
+ *
+ * Width is number of bits of the value to return.
+ * http://stackoverflow.com/a/12996028
+ */
+static inline uint64_t hash_bucket64(uint64_t v, size_t width)
+{
+ uint64_t x = v + UINT64_C(0x9e3779b97f4a7c15);
+
+ x = (x ^ (x >> 30)) * UINT64_C(0xbf58476d1ce4e5b9);
+ x = (x ^ (x >> 27)) * UINT64_C(0x94d049bb133111eb);
+ x = x ^ (x >> 31);
+ return x >> (64 - width);
+}
+
+static inline uint64_t hash_random64(uint64_t *state)
+{
+ uint64_t x;
+
+ x = hash_bucket64(*state, 64);
+ *state = x;
+ return x;
+}
+
+/*
+ * Faster, less random hash bucket compared to hash_bucket32, but works
+ * for smaller integers.
+ */
+static inline uint32_t hash_mult32(uint32_t v, size_t width)
+{
+ /* Knuth's multiplicative hash. */
+ return (v * UINT32_C(2654435761)) >> (32 - width);
+}
+
+#endif /* HASH_H */
diff --git a/external/hash/hash_table.h b/external/hash/hash_table.h
new file mode 100644
index 0000000..5c3e9cd
--- /dev/null
+++ b/external/hash/hash_table.h
@@ -0,0 +1,266 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef HASH_TABLE_H
+#define HASH_TABLE_H
+
+#include "ht_portable.h"
+#include <stddef.h>
+
+/*
+ * Define HT_PRIVATE to make all name wrapping interface functions static
+ * inline when including hash implementation directly in user code. This
+ * can increase performance significantly (3x) on small hash tables with
+ * fast hash functions because the compiler can better optimize static
+ * functions. Some compiler optimizations will get the same speed
+ * with external linkage (clang 4.2 -O4 but not -O3).
+ *
+ * Can also be used to simple hide the interface from global
+ * linkage to avoid name clashes.
+ */
+#ifndef HT_PRIVATE
+#define HT_PRIV
+#else
+#define HT_PRIV static inline
+#endif
+
+/*
+ * Generic hash table type. This makes it possible to use hash tables
+ * in datastructures and header files that do not have access to
+ * the specific hash table implementation. Call to init is optional
+ * if the structure is zeroed.
+ *
+ * Offsets are only used with Robin Hood hashing to segment each chain.
+ *
+ * Keys and values are both stored in the same item pointer. There are
+ * downsides to this over a key / value represention, but since we also
+ * use less space we can afford lower the load factor and we can have a
+ * more complex key representations. The smaller bucket size also helps
+ * when ordering Robin Hood hash chains.
+ */
+typedef struct hash_table hash_table_t;
+struct hash_table {
+ void *table;
+ char *offsets;
+ size_t count;
+ /* May be stored as a direct count, or log2. */
+ size_t buckets;
+};
+
+enum hash_table_insert_mode {
+ ht_replace = 0,
+ ht_keep = 1,
+ ht_unique = 2,
+ ht_multi = 3,
+};
+
+/*
+ * This macro defines the prototypes of the hash table that user code
+ * needs for linkage.
+ *
+ * See also "hash_table_def.h" which builds wrapper functions to a
+ * generic hash table implementation so each specialization gets its own
+ * set of named functions.
+ *
+ * The HT_ITEM is normally a pointer to and the hash table does not
+ * store any signficant information internally. Customizations map
+ * the item value to a key. Certain values can be reserved, for
+ * example 0 indicates missing value, and sometimes 1 is reserved for
+ * internal tombstones and 2 may be used to return allocation failure.
+ */
+#define DECLARE_HASH_TABLE(HT_NAME, HT_ITEM) \
+ \
+typedef hash_table_t HT_NAME##_t; \
+typedef HT_ITEM HT_NAME##_item_t; \
+ \
+/* Prototype for user supplied callback when visiting all elements. */ \
+typedef void HT_NAME##_visitor_f(void *context, HT_ITEM item); \
+ \
+extern const HT_NAME##_item_t HT_NAME##_missing; \
+extern const HT_NAME##_item_t HT_NAME##_nomem; \
+extern const HT_NAME##_item_t HT_NAME##_deleted; \
+ \
+static inline int HT_NAME##_is_valid(HT_ITEM item) \
+{ \
+ return \
+ item != HT_NAME##_missing && \
+ item != HT_NAME##_nomem && \
+ item != HT_NAME##_deleted; \
+} \
+ \
+static inline int HT_NAME##_is_missing(HT_ITEM item) \
+{ \
+ return item == HT_NAME##_missing; \
+} \
+ \
+static inline int HT_NAME##_is_nomem(HT_ITEM item) \
+{ \
+ return item == HT_NAME##_nomem; \
+} \
+ \
+static inline int HT_NAME##_is_deleted(HT_ITEM item) \
+{ \
+ return item == HT_NAME##_deleted; \
+} \
+ \
+/* \
+ * Allocates enough buckets to represent count elements without resizing. \
+ * The actual number of allocated buckets depends on the load factor \
+ * given as a macro argument in the implementation. The bucket number \
+ * rounds up to the nearest power of 2. \
+ * \
+ * `ht` should not be initialized beforehand, otherwise use resize. \
+ * Alternatively, it is also valid to zero initialize the table by \
+ * other means - this will postpone allocation until needed. \
+ * \
+ * The load factor (template argument) should be positive and at most \
+ * 100%, otherwise insertion and resize cannot succeed. The recommended \
+ * load factor is between 25% and 75%. \
+ * \
+ * Returns 0 on success, -1 on allocation failure or invalid load factor. \
+ */ \
+HT_PRIV int HT_NAME##_init(HT_NAME##_t *ht, size_t count); \
+ \
+/* \
+ * Clears the allocated memory. Optionally takes a destructor \
+ * that will visit all items. \
+ * The table struct may be reused after being destroyed. \
+ * May also be called on a zero initialised hash table. \
+ * \
+ * Can be called in place of clear for more control. \
+ */ \
+HT_PRIV void HT_NAME##_destroy(HT_NAME##_t *ht, \
+ HT_NAME##_visitor_f *destructor, void *context); \
+ \
+/* \
+ * Clears the allocated memory, but does manage memory or state of any \
+ * stored items. It is a simpler version of destroy. \
+ */ \
+HT_PRIV void HT_NAME##_clear(HT_NAME##_t *ht); \
+ \
+/* \
+ * Resizes the hash table to hold at least `count` elements. \
+ * The actual number of allocated buckets is a strictly larger power of \
+ * two. If `count` is smaller than the current number of elements, \
+ * that number is used instead of count. Thus, resize(ht, 0) may be \
+ * used to reduce the table size after a spike. \
+ * The function is called automatically as elements are inserted, \
+ * but shrinking the table should be done manually. \
+ * \
+ * If resizing to same size, table is still reallocated but will then \
+ * clean up old tombstones from excessive deletion. \
+ * \
+ * Returns 0 on success, -1 on allocation failure. \
+ */ \
+HT_PRIV int HT_NAME##_resize(HT_NAME##_t *ht, size_t count); \
+ \
+/* \
+ * Inserts an item pointer in one of the following modes: \
+ * \
+ * ht_keep: \
+ * If the key exists, the stored item is kept and returned, \
+ * otherwise it is inserted and null is returned. \
+ * \
+ * ht_replace: \
+ * If the key exists, the stored item is replaced and the old \
+ * item is returned, otherwise the item is inserted and null \
+ * is returned. \
+ * \
+ * ht_unique: \
+ * Inserts an item without checking if a key exists. Always return \
+ * null. This is faster when it is known that the key does not exists. \
+ * \
+ * ht_multi: \
+ * Same as ht_unique but with the intention that a duplicate key \
+ * might exist. This should not be abused because not all hash table \
+ * implementions work well with too many collissions. Robin Hood \
+ * hashing might reallocate aggressively to keep the chain length \
+ * down. Linear and Quadratic probing do handle this, albeit slow. \
+ * \
+ * The inserted item cannot have the value HT_MISSING and depending on \
+ * implementation also not HT_DELETED and HT_NOMEM, but the \
+ * definitions are type specific. \
+ */ \
+HT_PRIV HT_ITEM HT_NAME##_insert(HT_NAME##_t *ht, \
+ const void *key, size_t len, HT_ITEM item, int mode); \
+ \
+/* Similar to insert, but derives key from item. */ \
+HT_PRIV HT_ITEM HT_NAME##_insert_item(HT_NAME##_t *ht, \
+ HT_ITEM item, int mode); \
+ \
+/* \
+ * Finds the first matching item if any, or returns null. \
+ * If there are duplicate keys, the first inserted is returned. \
+ */ \
+HT_PRIV HT_ITEM HT_NAME##_find(HT_NAME##_t *ht, \
+ const void *key, size_t len); \
+ \
+/* \
+ * Removes first inserted item that match the given key, if any. \
+ * Returns the removed item if any, otherwise null. \
+ */ \
+HT_PRIV HT_ITEM HT_NAME##_remove(HT_NAME##_t *ht, \
+ const void *key, size_t len); \
+ \
+/* \
+ * Finds an item that compares the same as the given item but it is \
+ * not necessarily the same item if it either isn't stored, or if \
+ * there are duplicates in the table. \
+ */ \
+HT_PRIV HT_ITEM HT_NAME##_find_item(HT_NAME##_t *ht, HT_ITEM item); \
+ \
+/* \
+ * This removes the first item that matches the given item, not \
+ * necessarily the item itself, and the item need not be present \
+ * in the table. Even if the item is in fact removed, it may still \
+ * be present if stored multiple times through abuse use of the \
+ * insert_unique function. \
+ */ \
+HT_PRIV HT_ITEM HT_NAME##_remove_item(HT_NAME##_t *ht, HT_ITEM item); \
+ \
+/* \
+ * Calls a function for every item in the hash table. This may be \
+ * used for destructing items, provided the table is not accessed \
+ * subsequently. In fact, the hash_table_clear function takes an \
+ * optional visitor that does exactly that. \
+ * \
+ * The function is linear of the allocated hash table size, so will be \
+ * inefficient if the hash table was resized much larger than the number \
+ * of stored items. In that case it is better to store links in the \
+ * items. For the default resizing, the function is reasonably fast \
+ * because for cache reasons it is very fast to exclude empty elements. \
+ */ \
+HT_PRIV void HT_NAME##_visit(HT_NAME##_t *ht, \
+ HT_NAME##_visitor_f *visitor, void *context); \
+ \
+/* \
+ * Returns number of elements in the table. (Not necessarily the number of \
+ * unique keys. \
+ */ \
+static inline size_t HT_NAME##_count(HT_NAME##_t *ht) \
+{ \
+ return ht->count; \
+} \
+
+#endif /* HASH_TABLE_H */
diff --git a/external/hash/hash_table_def.h b/external/hash/hash_table_def.h
new file mode 100644
index 0000000..5362d47
--- /dev/null
+++ b/external/hash/hash_table_def.h
@@ -0,0 +1,154 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef HASH_TABLE_DEF_H
+#define HASH_TABLE_DEF_H
+
+#include "ht_hash_function.h"
+#ifndef HT_HASH_FUNCTION
+/*
+ * If the default hash function is used, make sure to link with the
+ * appropriate hash implementation file.
+ */
+#define HT_HASH_FUNCTION ht_default_hash_function
+#endif
+
+#ifndef HT_LOAD_FACTOR
+#define HT_LOAD_FACTOR 0.7
+#endif
+
+#define HT_LOAD_FACTOR_FRAC ((size_t)((float)(HT_LOAD_FACTOR)*256))
+
+#ifndef HT_PANIC
+#include <stdio.h>
+#define HT_PANIC(s) { fprintf(stderr, "aborting on panic: %s\n", s); exit(1); }
+#endif
+
+#ifndef HT_MISSING
+#define HT_MISSING ((ht_item_t)0)
+#endif
+
+#ifndef HT_NOMEM
+#define HT_NOMEM ((ht_item_t)1)
+#endif
+
+#ifndef HT_DELETED
+#define HT_DELETED ((ht_item_t)2)
+#endif
+
+#define DEFINE_HASH_TABLE(HT_NAME) \
+ \
+typedef HT_NAME##_item_t ht_item_t; \
+typedef HT_NAME##_visitor_f ht_visitor_f; \
+ \
+/* User supplied. */ \
+static inline int ht_match(const void *key, size_t len, ht_item_t item); \
+static inline const void *ht_key(ht_item_t item); \
+static inline size_t ht_key_len(ht_item_t item); \
+ \
+/* Implementation supplied. */ \
+static ht_item_t ht_insert(hash_table_t *ht, \
+ const void *key, size_t len, ht_item_t new_item, int mode); \
+static ht_item_t ht_find(hash_table_t *ht, const void *key, size_t len); \
+static ht_item_t ht_remove(hash_table_t *ht, const void *key, size_t len); \
+static int ht_init(hash_table_t *ht, size_t count); \
+static int ht_resize(hash_table_t *ht, size_t count); \
+static void ht_clear(hash_table_t *ht); \
+static void ht_visit(hash_table_t *ht, \
+ ht_visitor_f *visitor, void *context); \
+ \
+const ht_item_t HT_NAME##_missing = HT_MISSING; \
+const ht_item_t HT_NAME##_nomem = HT_NOMEM; \
+const ht_item_t HT_NAME##_deleted = HT_DELETED; \
+ \
+HT_PRIV void HT_NAME##_clear(HT_NAME##_t *ht) \
+{ \
+ ht_clear(ht); \
+} \
+ \
+HT_PRIV void HT_NAME##_destroy(HT_NAME##_t *ht, \
+ HT_NAME##_visitor_f *destructor, void *context) \
+{ \
+ if (destructor) { \
+ ht_visit(ht, destructor, context); \
+ } \
+ ht_clear(ht); \
+} \
+ \
+HT_PRIV int HT_NAME##_init(HT_NAME##_t *ht, size_t count) \
+{ \
+ return ht_init(ht, count); \
+} \
+ \
+HT_PRIV int HT_NAME##_resize(HT_NAME##_t *ht, size_t count) \
+{ \
+ return ht_resize(ht, count); \
+} \
+ \
+HT_PRIV ht_item_t HT_NAME##_insert(HT_NAME##_t *ht, \
+ const void *key, size_t len, ht_item_t new_item, int mode) \
+{ \
+ return ht_insert(ht, key, len, new_item, mode); \
+} \
+ \
+HT_PRIV ht_item_t HT_NAME##_insert_item(HT_NAME##_t *ht, \
+ ht_item_t item, int mode) \
+{ \
+ return ht_insert(ht, \
+ ht_key(item), \
+ ht_key_len(item), \
+ item, mode); \
+} \
+ \
+HT_PRIV ht_item_t HT_NAME##_find(HT_NAME##_t *ht, \
+ const void *key, size_t len) \
+{ \
+ return ht_find(ht, key, len); \
+} \
+ \
+HT_PRIV ht_item_t HT_NAME##_find_item(HT_NAME##_t *ht, ht_item_t item) \
+{ \
+ return ht_find(ht, \
+ ht_key(item), \
+ ht_key_len(item)); \
+} \
+ \
+HT_PRIV ht_item_t HT_NAME##_remove(HT_NAME##_t *ht, \
+ const void *key, size_t len) \
+{ \
+ return ht_remove(ht, key, len); \
+} \
+ \
+HT_PRIV ht_item_t HT_NAME##_remove_item(HT_NAME##_t *ht, ht_item_t item) \
+{ \
+ return ht_remove(ht, ht_key(item), ht_key_len(item)); \
+} \
+ \
+HT_PRIV void HT_NAME##_visit(HT_NAME##_t *ht, \
+ HT_NAME##_visitor_f *visitor, void *context) \
+{ \
+ ht_visit(ht, visitor, context); \
+} \
+
+#endif /* HASH_TABLE_DEF_H */
diff --git a/external/hash/hash_table_impl.h b/external/hash/hash_table_impl.h
new file mode 100644
index 0000000..94fc9b8
--- /dev/null
+++ b/external/hash/hash_table_impl.h
@@ -0,0 +1,233 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+/*
+ * This file implements a generic hash interface such that different
+ * instances have the same name, but hidden from each other.
+ * The interface maps the local names to a public specific type.
+ *
+ * This implementations implements a hash table with linear or quadratic
+ * probing.
+ */
+
+#ifdef HASH_TABLE_IMPL
+#error "cannot have multiple implementations in same compilation unit"
+#endif
+#define HASH_TABLE_IMPL
+/* Open Addressing */
+#define HT_OA
+
+#if defined(_MSC_VER)
+#pragma warning(disable: 4127) /* conditional expression is constant */
+#endif
+
+#include <stdlib.h>
+#include <assert.h>
+
+#ifndef HT_PROBE
+#ifdef HT_PROBE_QUADRATIC
+#define HT_PROBE(k, i, N) ((k + (i + i * i) / 2) & N)
+#else
+#define HT_PROBE(k, i, N) ((k + i) & N)
+#endif
+#endif
+
+static int ht_init(hash_table_t *ht, size_t count)
+{
+ size_t buckets = 4;
+
+ if ((HT_LOAD_FACTOR_FRAC) > 256 || (HT_LOAD_FACTOR_FRAC) < 1) {
+ /*
+ * 100% is bad but still the users choice.
+ * 101% will never terminate insertion.
+ */
+ HT_PANIC("hash table failed with impossible load factor");
+ return -1;
+ }
+ while (count > buckets * (HT_LOAD_FACTOR_FRAC) / 256) {
+ buckets *= 2;
+ }
+ ht->table = calloc(buckets, sizeof(ht_item_t));
+ if (ht->table == 0) {
+ return -1;
+ }
+ ht->offsets = 0;
+ ht->buckets = buckets;
+ ht->count = 0;
+ return 0;
+}
+
+static int ht_resize(hash_table_t *ht, size_t count)
+{
+ size_t i;
+ hash_table_t ht2;
+ ht_item_t *T = ht->table;
+ void *item;
+
+ if (count < ht->count) {
+ count = ht->count;
+ }
+ if (ht_init(&ht2, count)) {
+ return -1;
+ }
+ for (i = 0; i < ht->buckets; ++i) {
+ item = T[i];
+ if ((item && item != HT_DELETED)) {
+ ht_insert(&ht2, ht_key(item), ht_key_len(item), item, ht_multi);
+ }
+ }
+ ht_clear(ht);
+ memcpy(ht, &ht2, sizeof(*ht));
+ return 0;
+}
+
+static ht_item_t ht_insert(hash_table_t *ht,
+ const void *key, size_t len, ht_item_t new_item, int mode)
+{
+ ht_item_t *T;
+ size_t N, i, j, k;
+ ht_item_t item, *vacant = 0;
+
+ assert(new_item != HT_MISSING);
+ assert(new_item != HT_DELETED);
+ assert(new_item != HT_NOMEM);
+
+ if (ht->count >= ht->buckets * (HT_LOAD_FACTOR_FRAC) / 256) {
+ if (ht_resize(ht, ht->count * 2)) {
+ HT_PANIC("hash table failed to allocate memory during resize");
+ return HT_NOMEM;
+ }
+ }
+ T = ht->table;
+ N = ht->buckets - 1;
+ k = HT_HASH_FUNCTION(key, len);
+ i = 0;
+ j = HT_PROBE(k, i, N);
+ if (mode == ht_unique || mode == ht_multi) {
+ ++ht->count;
+ while (T[j] && T[j] != HT_DELETED) {
+ ++i;
+ j = HT_PROBE(k, i, N);
+ }
+ T[j] = new_item;
+ return 0;
+ }
+ while ((item = T[j])) {
+ if (item == HT_DELETED) {
+ if (vacant == 0) {
+ /*
+ * If a tombstone was found, use the first available,
+ * but continue search for possible match.
+ */
+ vacant = &T[j];
+ }
+ } else if (ht_match(key, len, item)) {
+ if (mode == ht_replace) {
+ T[j] = new_item;
+ }
+ return item;
+ }
+ ++i;
+ j = HT_PROBE(k, i, N);
+ }
+ if (vacant == 0) {
+ vacant = &T[j];
+ }
+ ++ht->count;
+ *vacant = new_item;
+ return 0;
+}
+
+static ht_item_t ht_find(hash_table_t *ht, const void *key, size_t len)
+{
+ ht_item_t *T = ht->table;
+ size_t N, i, j, k;
+ ht_item_t item;
+
+ if (T == 0) {
+ return 0;
+ }
+ N = ht->buckets - 1;
+ k = HT_HASH_FUNCTION(key, len);
+ i = 0;
+ j = HT_PROBE(k, i, N);
+ while ((item = T[j])) {
+ if ((item != HT_DELETED) &&
+ ht_match(key, len, item)) {
+ return item;
+ }
+ ++i;
+ j = HT_PROBE(k, i, N);
+ }
+ return 0;
+}
+
+static ht_item_t ht_remove(hash_table_t *ht, const void *key, size_t len)
+{
+ ht_item_t *T = ht->table;
+ size_t N, i, j, k;
+ ht_item_t item;
+
+ if (T == 0) {
+ return 0;
+ }
+ N = ht->buckets - 1;
+ k = HT_HASH_FUNCTION(key, len);
+ i = 0;
+ j = HT_PROBE(k, i, N);
+ while ((item = T[j])) {
+ if (item != HT_DELETED &&
+ ht_match(key, len, item)) {
+ T[j] = HT_DELETED;
+ --ht->count;
+ return item;
+ }
+ ++i;
+ j = HT_PROBE(k, i, N);
+ }
+ return 0;
+}
+
+static void ht_visit(hash_table_t *ht, ht_visitor_f *visitor, void *context)
+{
+ size_t i;
+ ht_item_t *T = ht->table;
+ ht_item_t item;
+
+ for (i = 0; i < ht->buckets; ++i) {
+ item = T[i];
+ if (item && item != HT_DELETED) {
+ visitor(context, item);
+ }
+ }
+}
+
+static void ht_clear(hash_table_t *ht)
+{
+ if (ht->table) {
+ free(ht->table);
+ }
+ memset(ht, 0, sizeof(*ht));
+}
diff --git a/external/hash/hash_table_impl_rh.h b/external/hash/hash_table_impl_rh.h
new file mode 100644
index 0000000..b4cabae
--- /dev/null
+++ b/external/hash/hash_table_impl_rh.h
@@ -0,0 +1,360 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* We use the same define for all implementations */
+#ifdef HASH_TABLE_IMPL
+#error "cannot have multiple implementations in same compilation unit"
+#endif
+#define HASH_TABLE_IMPL
+/* Robin Hood (with offset table) */
+#define HT_RH
+
+#if defined(_MSC_VER)
+#pragma warning(disable: 4127) /* conditional expression is constant */
+#endif
+
+#include <stdlib.h>
+#include <assert.h>
+
+/*
+ * A variation of Robin Hashing:
+ * We do not calcute distance from buckets, nor do we cache
+ * hash keys. Instead we maintain a 7-bit offset that points
+ * to where the first entry of a bucket is stored. In Robin Hood hashing
+ * all entries conceptually chained to the same bucket are stored
+ * immediately after each other in order of insertion. The offset of
+ * the next bucket is naturally the end of the previous bucket, off by
+ * one. This breaks down when the bucket offset is 0 and the bucket is
+ * empty because it suggests there is an element. We cannot distinguish
+ * between a single used and unused entry, except by looking at the
+ * content or otherwise tag the information on. This is not a problem,
+ * just a special case to deal with.
+ *
+ * The offsets are stored separately which might lead to more cache line
+ * traffic, but the alternative is not very elegant - either wasting
+ * space or trying to pack offsets on a per cache line basis. We only
+ * need 8 bits for offsets. If the offset overflows, bit 7 will be set
+ * which we can easily detect. During insertion, offsets are increated
+ * on all affected buckets, and likewise decrement on remove. In
+ * principle we can use bit parallel increments to update most offsets
+ * in a single operation, but it is hardly worthwhile due to setup
+ * cost. The approach bears some resemblance to hopscotch hashing which
+ * uses local offsets for chaining, but we prefer the simpler Robin
+ * Hood approach.
+ *
+ * If the offset overflows, the table is resized. We expect the packed
+ * chains to behave like a special case of a hopscotch layout and
+ * consequently have the same bounds, meaning we are unlikely to have
+ * neither long offsets nor long chains if we resize below very full
+ * so resizing on an offset of 128 should be ok.
+ *
+ * Our main motivation for this hashing is actually to get rid of
+ * tombstones in quadruatic and linear probing. Avoiding tombstones
+ * is much simpler when sorting chains Robin Hood style, and we avoid
+ * checking for tombstones. We loose this benefit by having to inspect
+ * offsets, but then also avoid checking keys before the chain, and
+ * after because we can zero in on exactly the entries belonging to
+ * bucket.
+ *
+ * Unlike traditional Robin Hood, we can find a missing key very quickly
+ * without any heuristics: we only need to inspect exactly the number
+ * of entries in the bucket (or at most 1 if the bucket is empty).
+ *
+ * Find operations start exactly at an entry with a matching hash key
+ * unlike normal Robin Hood which must scan past a few earlier entries
+ * on average, or guestimate where to start and seek both ways.
+ *
+ * We can also very quickly insert a key that is known to be unique
+ * because we can add it directly to the end (but possibly requiring
+ * a shift of later entries Robin Hood style).
+ *
+ * Whether these benefits outweighs the cost of a separate offset
+ * lookup is unclear, but the reduced memory consumption certainly
+ * allows for a lower load factor, which also helps a lot.
+ *
+ * Traditional Robin Hood Hashing actually permits a chain to become
+ * very long. We do not permit this, in line with hopscotch hashing.
+ * This is a drawback from a security perspective because worst case
+ * this can trigger resizing ad infinitum iff the hash function can
+ * be hacked or massive duplicate key insertion can be triggered. By
+ * used the provided hash functions and seeding them randomly at
+ * startup, and avoiding the multi key feature, it is very unlikely to
+ * be a problem with what is known about hash table attacks so far.
+ *
+ * Values and keys are not stored, only item pointers. Custom macroes
+ * or inline functions provide access to key data from the item. We
+ * could add a separate value array and treat the item strictly as a
+ * key, but we can have a smaller load factor instead, and can more
+ * easily avoid copying complex key structures, such as start end
+ * pointers to token data for parser.
+ *
+ * A typical hash table has: key pointer or key value, value pointer
+ * or value, a cached hash key or bitmap (for Robin Hood or Hopscotch)
+ * which on 64 bit platforms easily amounts to 20 bytes or more per
+ * bucket. We use 9 bytes on 64 bit platforms and 5 bytes on 32 bit.
+ * This gets us down to a max load of 0.5 and on average about 0.37.
+ * This should make it very likely that the first bucket inspected is
+ * a direct hit negating the benefit of caching hash keys. In addition,
+ * when it is not a direct hit, we get pointers loaded in a cache line
+ * to inspect, all known to have the same hash key.
+ */
+
+int ht_init(hash_table_t *ht, size_t count)
+{
+ size_t buckets = 4;
+
+ if ((HT_LOAD_FACTOR_FRAC) > 256 || (HT_LOAD_FACTOR_FRAC) < 1) {
+ /*
+ * 101% will never terminate insertion.
+ * 0% will never terminate resize.
+ */
+ HT_PANIC("robin hood hash table failed with impossible load factor");
+ return -1;
+ }
+ while (count > buckets * (HT_LOAD_FACTOR_FRAC) / 256) {
+ buckets *= 2;
+ }
+ ht->table = calloc(buckets, sizeof(ht_item_t));
+ if (ht->table == 0) {
+ return -1;
+ }
+ ht->offsets = calloc(buckets, sizeof(char));
+ if (ht->offsets == 0) {
+ free(ht->table);
+ ht->table = 0;
+ return -1;
+ }
+ ht->buckets = buckets;
+ ht->count = 0;
+ return 0;
+}
+
+int ht_resize(hash_table_t *ht, size_t count)
+{
+ size_t i;
+ hash_table_t ht2;
+ ht_item_t *T = ht->table;
+ ht_item_t item;
+
+ if (count < ht->count) {
+ count = ht->count;
+ }
+ if (ht_init(&ht2, count)) {
+ return -1;
+ }
+ for (i = 0; i < ht->buckets; ++i) {
+ item = T[i];
+ if (item > (ht_item_t)1) {
+ ht_insert(&ht2, ht_key(item), ht_key_len(item), item, ht_multi);
+ }
+ }
+ ht_clear(ht);
+ memcpy(ht, &ht2, sizeof(*ht));
+ return 0;
+}
+
+ht_item_t ht_insert(hash_table_t *ht,
+ const void *key, size_t len, ht_item_t item, int mode)
+{
+ ht_item_t *T;
+ size_t N, n, j, k, offset;
+ ht_item_t new_item;
+ char overflow = 0;
+
+ new_item = item;
+ if (ht->count >= ht->buckets * (HT_LOAD_FACTOR_FRAC) / 256) {
+ if (ht_resize(ht, ht->count * 2)) {
+ HT_PANIC("robin hood hash table failed to allocate memory during resize");
+ return HT_NOMEM;
+ }
+ }
+ T = ht->table;
+ N = ht->buckets - 1;
+ k = HT_HASH_FUNCTION(key, len) & N;
+ offset = ht->offsets[k];
+ j = (k + offset) & N;
+ /*
+ * T[j] == 0 is a special case because we cannot count
+ * zero probe length, and because we should not increment
+ * the offset at insertion point in this case.
+ *
+ * T[j] == 0 implies offset == 0, but this way we avoid
+ * hitting memory that we don't need.
+ */
+ if (offset == 0 && T[j] == 0) {
+ ++ht->count;
+ T[j] = new_item;
+ return 0;
+ }
+ n = ht->offsets[(k + 1) & N] - offset + 1;
+ if (mode == ht_multi) {
+ /* Don't search for match before inserting. */
+ j = (j + n) & N;
+ n = 0;
+ }
+ while (n--) {
+ item = T[j];
+ if (ht_match(key, len, item)) {
+ if (mode == ht_replace) {
+ T[j] = new_item;
+ }
+ return item;
+ }
+ j = (j + 1) & N;
+ }
+ ++ht->count;
+ while (k != j) {
+ /* Only increment buckets after own bucket. */
+ k = (k + 1) & N;
+ overflow |= ++ht->offsets[k];
+ }
+ while ((item = T[j])) {
+ T[j] = new_item;
+ new_item = item;
+ j = (j + 1) & N;
+ overflow |= ++ht->offsets[j];
+ }
+ T[j] = new_item;
+
+ if (overflow < 0) {
+ /*
+ * At least one offset overflowed, so we need to
+ * resize the table.
+ */
+ if (ht->count * 10 < ht->buckets) {
+ HT_PANIC("FATAL: hash table resize on low utilization would explode\n"\
+ " possible collision DoS or bad hash function");
+ return HT_NOMEM;
+ }
+ if (ht_resize(ht, ht->count * 2)) {
+ HT_PANIC("FATAL: hash table resize failed and left hash table inconsistent");\
+ /*
+ * This renders the hash table in a bad state
+ * because we have updated to an inconsistent
+ * state.
+ */
+ return HT_NOMEM;
+ }
+ }
+ return item;
+}
+
+ht_item_t ht_find(hash_table_t *ht, const void *key, size_t len)
+{
+ ht_item_t *T = ht->table;
+ size_t N, n, j, k, offset;
+ ht_item_t item;
+
+ if (T == 0) {
+ return 0;
+ }
+ N = ht->buckets - 1;
+ k = HT_HASH_FUNCTION(key, len) & N;
+ offset = ht->offsets[k];
+ j = (k + offset) & N;
+ if (offset == 0 && T[j] == 0) {
+ /* Special case because we cannot count zero probe length. */
+ return 0;
+ }
+ n = ht->offsets[(k + 1) & N] - offset + 1;
+ while (n--) {
+ item = T[j];
+ if (ht_match(key, len, item)) {
+ return item;
+ }
+ j = (j + 1) & N;
+ }
+ return 0;
+}
+
+ht_item_t ht_remove(hash_table_t *ht, const void *key, size_t len)
+{
+ ht_item_t *T = ht->table;
+ size_t N, n, j, k, offset;
+ ht_item_t item, *next_item;
+
+ if (T == 0) {
+ return 0;
+ }
+ N = ht->buckets - 1;
+ k = HT_HASH_FUNCTION(key, len) & N;
+ offset = ht->offsets[k];
+ j = (k + offset) & N;
+ if (offset == 0 && T[j] == 0) {
+ return 0;
+ }
+ n = ht->offsets[(k + 1) & N] - offset + 1;
+ while (n) {
+ item = T[j];
+ if (ht_match(key, len, item)) {
+ break;
+ }
+ j = (j + 1) & N;
+ --n;
+ }
+ if (n == 0) {
+ return 0;
+ }
+ --ht->count;
+ while (k != j) {
+ /* Do not update the offset of the bucket that we own. */
+ k = (k + 1) & N;
+ --ht->offsets[k];
+ }
+ for (;;) {
+ j = (j + 1) & N;
+ if (ht->offsets[j] == 0) {
+ T[k] = 0;
+ return item;
+ }
+ --ht->offsets[j];
+ T[k] = T[j];
+ k = j;
+ }
+}
+
+void ht_visit(hash_table_t *ht, ht_visitor_f *visitor, void *context)
+{
+ size_t i;
+ ht_item_t *T = ht->table;
+ ht_item_t item;
+
+ for (i = 0; i < ht->buckets; ++i) {
+ item = T[i];
+ if (item > (ht_item_t)1) {
+ visitor(context, item);
+ }
+ }
+}
+
+void ht_clear(hash_table_t *ht)
+{
+ if (ht->table) {
+ free(ht->table);
+ }
+ if (ht->offsets) {
+ free(ht->offsets);
+ }
+ memset(ht, 0, sizeof(*ht));
+}
diff --git a/external/hash/hash_test.c b/external/hash/hash_test.c
new file mode 100644
index 0000000..d54cc07
--- /dev/null
+++ b/external/hash/hash_test.c
@@ -0,0 +1,419 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+
+/* Not used here, just included to catch compiler errors and warnings. */
+#include "hash.h"
+
+#include "str_set.h"
+#include "token_map.h"
+#include "ht64.h"
+#include "ht32.h"
+#include "ht64rh.h"
+#include "ht32rh.h"
+
+#include "ht_trace.h"
+
+#define test_assert(x) if (!(x)) { printf("Test failed at %s:%d\n", __FILE__, __LINE__); assert(0); exit(1); }
+
+
+str_set_t S;
+token_map_t TM;
+
+char *keys[] = {
+ "foo",
+ "bar",
+ "baz",
+ "gimli",
+ "bofur"
+};
+
+struct token tokens[5];
+
+void free_key(void *context, char *key) {
+ free(key);
+}
+
+void test_str_set()
+{
+ int i;
+ char *s, *s0, *s1;
+ unsigned int n = sizeof(keys)/sizeof(keys[0]);
+
+ /* We rely on zero initialization here. */
+ test_assert(str_set_count(&S) == 0);
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ /* We don't have to use strdup, but we test the
+ * allocation management and item replacement. */
+ s = str_set_insert(&S, s, strlen(s), strdup(s), ht_keep);
+ test_assert(str_set_count(&S) == i + 1);
+ test_assert(s == 0);
+ }
+ test_assert(n == 5);
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ s = str_set_find(&S, s, strlen(s));
+ test_assert(strcmp(s, keys[i]) == 0);
+ }
+ s = str_set_remove(&S, "gimlibofur", 5);
+ test_assert(strcmp(s, "gimli") == 0);
+ free(s);
+ test_assert(str_set_count(&S) == n - 1);
+ s = str_set_remove(&S, "gimlibofur", 5);
+ test_assert(s == 0);
+ test_assert(str_set_count(&S) == n - 1);
+ s = str_set_insert(&S, "foobarbaz", 6,
+ (s0 = strndup("foobarbaz", 6)), ht_keep);
+ test_assert(s == 0);
+ test_assert(str_set_count(&S) == n);
+ s = str_set_insert(&S, "foobarbaz", 6,
+ (s1 = strndup("foobarbaz", 6)), ht_keep);
+ test_assert(s == s0);
+ free(s1);
+ test_assert(str_set_count(&S) == n);
+ s = str_set_find(&S, "foobar", 6);
+ test_assert(s == s0);
+ s = str_set_insert(&S, "foobarbaz", 6,
+ (s1 = strndup("foobarbaz", 6)), ht_replace);
+ test_assert(s == s0);
+ free(s);
+ s = str_set_find(&S, "foobar", 6);
+ test_assert(s == s1);
+ s = str_set_find(&S, "foobarbaz", 9);
+ test_assert(s == 0);
+ str_set_destroy(&S, free_key, 0);
+ s = str_set_find(&S, "foobar", 6);
+ test_assert(s == 0);
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ s = str_set_find(&S, s, strlen(s));
+ test_assert(s == 0);
+ }
+}
+
+void test_str_set2()
+{
+ int i;
+ char *s, *s1;
+ unsigned int n = sizeof(keys)/sizeof(keys[0]);
+
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ str_set_insert(&S, s, strlen(s), s, ht_unique);
+ }
+ test_assert(str_set_count(&S) == n);
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ /*
+ * Unique and multi are the same logically, but different
+ * intentionally.
+ */
+ str_set_insert(&S, s, strlen(s), s, ht_multi);
+ }
+ test_assert(str_set_count(&S) == 2 * n);
+ ht_trace_buckets(&S, "after double insert", 0, 8);
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ s1 = str_set_find(&S, s, strlen(s));
+ test_assert(strcmp(s, s1) == 0);
+ }
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ s1 = str_set_remove(&S, s, strlen(s));
+ test_assert(strcmp(s, s1) == 0);
+ test_assert(str_set_count(&S) == 2 * n - i - 1);
+ ht_trace_buckets(&S, "after single", 8, 8);
+ }
+ ht_trace_buckets(&S, "after first remove", 0, 8);
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ s1 = str_set_remove(&S, s, strlen(s));
+ test_assert(strcmp(s, s1) == 0);
+ test_assert(str_set_count(&S) == n - i - 1);
+ }
+ ht_trace_buckets(&S, "efter second remove", 0, 8);
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ s1 = str_set_remove(&S, s, strlen(s));
+ test_assert(s1 == 0);
+ test_assert(str_set_count(&S) == 0);
+ }
+ str_set_clear(&S);
+}
+
+void test_str_set3()
+{
+ int i;
+ char *s, *s1;
+ unsigned int n = sizeof(keys)/sizeof(keys[0]);
+
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ str_set_insert_item(&S, s, ht_unique);
+ }
+ test_assert(str_set_count(&S) == n);
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ str_set_insert_item(&S, s, ht_keep);
+ }
+ test_assert(str_set_count(&S) == n);
+ for (i = 0; i < n; ++i) {
+ s = keys[i];
+ s1 = str_set_find_item(&S, s);
+ test_assert(strcmp(s, s1) == 0);
+ }
+ s = keys[1];
+ s1 = str_set_remove_item(&S, s);
+ /*
+ * This doesn't always hold, but here we
+ * are sure because of how we inserted data.
+ */
+ test_assert(s == s1);
+ s1 = str_set_find_item(&S, s);
+ test_assert(s1 == 0);
+ str_set_clear(&S);
+}
+
+void test_str_set4()
+{
+ char *s, *s1;
+
+ s = "dumble";
+ str_set_insert_item(&S, "dumble", ht_keep);
+ s1 = str_set_find_item(&S, s);
+ /* TMnsert without replace. */
+ str_set_insert_item(&S, "2dumble" + 1, ht_keep);
+ test_assert(s == s1);
+ s1 = str_set_find_item(&S, s);
+ test_assert(s == s1);
+ /* TMnsert with replace. */
+ s1 = str_set_insert_item(&S, "2dumble" + 1, ht_replace);
+ /* Old value still returned. */
+ test_assert(s == s1);
+ s1 = str_set_find_item(&S, s);
+ test_assert(s != s1);
+ /* New item returned. */
+ test_assert(strcmp(s1 - 1, "2dumble") == 0);
+ str_set_clear(&S);
+}
+
+void visit_item_set(void *context, token_map_item_t item)
+{
+ int *count = context;
+ ++*count;
+}
+
+void test_token_map()
+{
+ int i, count;
+ token_map_item_t item;
+ unsigned int n = sizeof(keys)/sizeof(keys[0]);
+
+ test_assert(sizeof(tokens)/sizeof(item[0]) == n);
+
+ for (i = 0; i < n; ++i) {
+ tokens[i].token = keys[i];
+ tokens[i].len = strlen(keys[i]);
+ }
+ for (i = 0; i < n; ++i) {
+ item = &tokens[i];
+ token_map_insert(&TM, item->token, item->len, item, ht_unique);
+ }
+ count = 0;
+ token_map_visit(&TM, visit_item_set, &count);
+ test_assert(count == n);
+
+ for (i = 0; i < n; ++i) {
+ item = token_map_find(&TM, keys[i], strlen(keys[i]));
+ test_assert(item->type == 0);
+ item->type = 1;
+ }
+ for (i = 0; i < n; ++i) {
+ item = token_map_find_item(&TM, &tokens[i]);
+ test_assert(item->type == 1);
+ item->type = 2;
+ }
+}
+
+void test_ht32()
+{
+ uint32_t keys[100];
+ int i, j;
+ ht32_t ht;
+ uint32_t *x, *y;
+
+ ht32_init(&ht, 10);
+ for (i = 0; i < 100; ++i) {
+ keys[i] = i + 3398;
+ }
+ for (i = 0; i < 100; ++i) {
+ x = ht32_insert_item(&ht, &keys[i], ht_unique);
+ }
+ for (i = 0; i < 100; ++i) {
+ x = ht32_find_item(&ht, &keys[i]);
+ test_assert(x != 0);
+ test_assert(*x == i + 3398);
+ }
+ for (i = 0; i < 100; ++i) {
+ y = ht32_remove_item(&ht, &keys[i]);
+ test_assert(y != ht32_missing);
+ for (j = 0; j < 100; ++j) {
+ x = ht32_find_item(&ht, &keys[j]);
+ if (j > i) {
+ test_assert(x != ht32_missing);
+ test_assert(*x == j + 3398);
+ } else {
+ test_assert(x == ht32_missing);
+ }
+ }
+ }
+ ht32_clear(&ht);
+}
+
+void test_ht64()
+{
+ uint64_t keys[100];
+ int i, j;
+ ht64_t ht;
+ uint64_t *x, *y;
+
+ ht64_init(&ht, 10);
+ for (i = 0; i < 100; ++i) {
+ keys[i] = i + 3398;
+ }
+ for (i = 0; i < 100; ++i) {
+ x = ht64_insert_item(&ht, &keys[i], ht_unique);
+ }
+ for (i = 0; i < 100; ++i) {
+ x = ht64_find_item(&ht, &keys[i]);
+ test_assert(x != 0);
+ test_assert(*x == i + 3398);
+ }
+ for (i = 0; i < 100; ++i) {
+ y = ht64_remove_item(&ht, &keys[i]);
+ test_assert(y != ht64_missing);
+ for (j = 0; j < 100; ++j) {
+ x = ht64_find_item(&ht, &keys[j]);
+ if (j > i) {
+ test_assert(x != ht64_missing);
+ test_assert(*x == j + 3398);
+ } else {
+ test_assert(x == ht64_missing);
+ }
+ }
+ }
+ ht64_clear(&ht);
+}
+
+void test_ht32rh()
+{
+ uint32_t keys[100];
+ int i, j;
+ ht32rh_t ht;
+ uint32_t *x, *y;
+
+ ht32rh_init(&ht, 10);
+ for (i = 0; i < 100; ++i) {
+ keys[i] = i + 3398;
+ }
+ for (i = 0; i < 100; ++i) {
+ x = ht32rh_insert_item(&ht, &keys[i], ht_unique);
+ }
+ for (i = 0; i < 100; ++i) {
+ x = ht32rh_find_item(&ht, &keys[i]);
+ test_assert(x != 0);
+ test_assert(*x == i + 3398);
+ }
+ for (i = 0; i < 100; ++i) {
+ y = ht32rh_remove_item(&ht, &keys[i]);
+ test_assert(y != ht32rh_missing);
+ for (j = 0; j < 100; ++j) {
+ x = ht32rh_find_item(&ht, &keys[j]);
+ if (j > i) {
+ test_assert(x != ht32rh_missing);
+ test_assert(*x == j + 3398);
+ } else {
+ test_assert(x == ht32rh_missing);
+ }
+ }
+ }
+ ht32rh_clear(&ht);
+}
+
+void test_ht64rh()
+{
+ uint64_t keys[100];
+ int i, j;
+ ht64rh_t ht;
+ uint64_t *x, *y;
+
+ ht64rh_init(&ht, 10);
+ for (i = 0; i < 100; ++i) {
+ keys[i] = i + 3398;
+ }
+ for (i = 0; i < 100; ++i) {
+ x = ht64rh_insert_item(&ht, &keys[i], ht_unique);
+ }
+ for (i = 0; i < 100; ++i) {
+ x = ht64rh_find_item(&ht, &keys[i]);
+ test_assert(x != 0);
+ test_assert(*x == i + 3398);
+ }
+ for (i = 0; i < 100; ++i) {
+ y = ht64rh_remove_item(&ht, &keys[i]);
+ test_assert(y != ht64rh_missing);
+ for (j = 0; j < 100; ++j) {
+ x = ht64rh_find_item(&ht, &keys[j]);
+ if (j > i) {
+ test_assert(x != ht64rh_missing);
+ test_assert(*x == j + 3398);
+ } else {
+ test_assert(x == ht64rh_missing);
+ }
+ }
+ }
+ ht64rh_clear(&ht);
+}
+
+int main(int argc, char *argv[])
+{
+ test_str_set();
+ test_str_set2();
+ test_str_set3();
+ test_str_set4();
+ test_token_map();
+ test_ht32();
+ test_ht64();
+ test_ht32rh();
+ test_ht64rh();
+
+ printf("all tests passed\n");
+
+ return 0;
+}
diff --git a/external/hash/ht32.c b/external/hash/ht32.c
new file mode 100644
index 0000000..9954bde
--- /dev/null
+++ b/external/hash/ht32.c
@@ -0,0 +1,47 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ht32.h"
+#define HT_HASH_FUNCTION ht_uint32_hash_function
+
+#include "hash_table_def.h"
+DEFINE_HASH_TABLE(ht32)
+
+#include "hash_table_impl.h"
+
+
+static inline int ht_match(const void *key, size_t len, const ht32_item_t item)
+{
+ return *(const ht32_item_t)key == *item;
+}
+
+static inline const void *ht_key(const ht32_item_t item)
+{
+ return (const void *)item;
+}
+
+static inline size_t ht_key_len(const ht32_item_t item)
+{
+ return sizeof(*item);
+}
diff --git a/external/hash/ht32.h b/external/hash/ht32.h
new file mode 100644
index 0000000..dab9ffb
--- /dev/null
+++ b/external/hash/ht32.h
@@ -0,0 +1,36 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef HT32_H
+#define HT32_H
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include "hash_table.h"
+
+DECLARE_HASH_TABLE(ht32, uint32_t *)
+
+#endif /* HT32_H */
diff --git a/external/hash/ht32rh.c b/external/hash/ht32rh.c
new file mode 100644
index 0000000..de6dae2
--- /dev/null
+++ b/external/hash/ht32rh.c
@@ -0,0 +1,47 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ht32rh.h"
+#define HT_HASH_FUNCTION ht_uint32_hash_function
+
+#include "hash_table_def.h"
+DEFINE_HASH_TABLE(ht32rh)
+
+#include "hash_table_impl_rh.h"
+
+
+static inline int ht_match(const void *key, size_t len, const ht32rh_item_t item)
+{
+ return *(const ht32rh_item_t)key == *item;
+}
+
+static inline const void *ht_key(const ht32rh_item_t item)
+{
+ return (const void *)item;
+}
+
+static inline size_t ht_key_len(const ht32rh_item_t item)
+{
+ return sizeof(*item);
+}
diff --git a/external/hash/ht32rh.h b/external/hash/ht32rh.h
new file mode 100644
index 0000000..061328e
--- /dev/null
+++ b/external/hash/ht32rh.h
@@ -0,0 +1,36 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef HT32RH_H
+#define HT32RH_H
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include "hash_table.h"
+
+DECLARE_HASH_TABLE(ht32rh, uint32_t *)
+
+#endif /* HT32RH_H */
diff --git a/external/hash/ht64.c b/external/hash/ht64.c
new file mode 100644
index 0000000..eaebbc5
--- /dev/null
+++ b/external/hash/ht64.c
@@ -0,0 +1,47 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ht64.h"
+#define HT_HASH_FUNCTION ht_uint64_hash_function
+
+#include "hash_table_def.h"
+DEFINE_HASH_TABLE(ht64)
+
+#include "hash_table_impl.h"
+
+
+static inline int ht_match(const void *key, size_t len, const ht64_item_t item)
+{
+ return *(const ht64_item_t)key == *item;
+}
+
+static inline const void *ht_key(const ht64_item_t item)
+{
+ return (const void *)item;
+}
+
+static inline size_t ht_key_len(const ht64_item_t item)
+{
+ return sizeof(*item);
+}
diff --git a/external/hash/ht64.h b/external/hash/ht64.h
new file mode 100644
index 0000000..b9f9fbe
--- /dev/null
+++ b/external/hash/ht64.h
@@ -0,0 +1,36 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef HT64_H
+#define HT64_H
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include "hash_table.h"
+
+DECLARE_HASH_TABLE(ht64, uint64_t *)
+
+#endif /* HT64_H */
diff --git a/external/hash/ht64rh.c b/external/hash/ht64rh.c
new file mode 100644
index 0000000..bfde550
--- /dev/null
+++ b/external/hash/ht64rh.c
@@ -0,0 +1,47 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ht64rh.h"
+#define HT_HASH_FUNCTION ht_uint64_hash_function
+
+#include "hash_table_def.h"
+DEFINE_HASH_TABLE(ht64rh)
+
+#include "hash_table_impl_rh.h"
+
+
+static inline int ht_match(const void *key, size_t len, const ht64rh_item_t item)
+{
+ return *(const ht64rh_item_t)key == *item;
+}
+
+static inline const void *ht_key(const ht64rh_item_t item)
+{
+ return (const void *)item;
+}
+
+static inline size_t ht_key_len(const ht64rh_item_t item)
+{
+ return sizeof(*item);
+}
diff --git a/external/hash/ht64rh.h b/external/hash/ht64rh.h
new file mode 100644
index 0000000..5b3d454
--- /dev/null
+++ b/external/hash/ht64rh.h
@@ -0,0 +1,36 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef HT64RH_H
+#define HT64RH_H
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include "hash_table.h"
+
+DECLARE_HASH_TABLE(ht64rh, uint64_t *)
+
+#endif /* HT64RH_H */
diff --git a/external/hash/ht_hash_function.h b/external/hash/ht_hash_function.h
new file mode 100644
index 0000000..1f65ee5
--- /dev/null
+++ b/external/hash/ht_hash_function.h
@@ -0,0 +1,258 @@
+#ifndef HT_HASH_FUNCTION_H
+#define HT_HASH_FUNCTION_H
+
+#include <stddef.h>
+
+#ifdef _MSC_VER
+/* `inline` only advisory anyway. */
+#pragma warning(disable: 4710) /* function not inlined */
+#endif
+
+/* Avoid 0 special case in hash functions and allow for configuration with unguessable seed. */
+#ifndef HT_HASH_SEED
+#define HT_HASH_SEED UINT32_C(0x2f693b52)
+#endif
+
+#ifndef HT_HASH_32
+
+#include "cmetrohash.h"
+
+static inline size_t ht_default_hash_function(const void *key, size_t len)
+{
+ uint64_t out;
+
+ cmetrohash64_1((const uint8_t *)key, len, HT_HASH_SEED, (uint8_t *)&out);
+ return (unsigned int)out;
+}
+
+/* When using the pointer directly as a hash key. */
+static inline size_t ht_ptr_hash_function(const void *key, size_t len)
+{
+ /* MurmurHash3 64-bit finalizer */
+ uint64_t x;
+
+ (void)len;
+
+ x = ((uint64_t)(size_t)key) ^ (HT_HASH_SEED);
+
+ x ^= x >> 33;
+ x *= 0xff51afd7ed558ccdULL;
+ x ^= x >> 33;
+ x *= 0xc4ceb9fe1a85ec53ULL;
+ x ^= x >> 33;
+ return (size_t)x;
+}
+
+#else
+
+#include "PMurHash.h"
+
+static inline size_t ht_default_hash_function(const void *key, size_t len)
+{
+ return (size_t)PMurHash32((HT_HASH_SEED), key, (int)len);
+}
+
+/* When using the pointer directly as a hash key. */
+static inline size_t ht_ptr_hash_function(const void *key, size_t len)
+{
+ /* http://stackoverflow.com/a/12996028 */
+ size_t x;
+
+ x = (size_t)key ^ (HT_HASH_SEED);
+
+ x = ((x >> 16) ^ x) * 0x45d9f3bUL;
+ x = ((x >> 16) ^ x) * 0x45d9f3bUL;
+ x = ((x >> 16) ^ x);
+ return x;
+}
+
+#endif /* HT_HASH_32 */
+
+
+/* This assumes the key points to a 32-bit aligned random value that is its own hash function. */
+static inline size_t ht_uint32_identity_hash_function(const void *key, size_t len)
+{
+ (void)len;
+ return (size_t)*(uint32_t *)key;
+}
+
+/* This assumes the key points to a 64-bit aligned random value that is its own hash function. */
+static inline size_t ht_uint64_identity_hash_function(const void *key, size_t len)
+{
+ (void)len;
+ return (size_t)*(uint64_t *)key;
+}
+
+/* This assumes the key points to a 32-bit aligned value. */
+static inline size_t ht_uint32_hash_function(const void *key, size_t len)
+{
+ uint32_t x = *(uint32_t *)key + (uint32_t)(HT_HASH_SEED);
+
+ (void)len;
+
+ /* http://stackoverflow.com/a/12996028 */
+ x = ((x >> 16) ^ x) * UINT32_C(0x45d9f3b);
+ x = ((x >> 16) ^ x) * UINT32_C(0x45d9f3b);
+ x = ((x >> 16) ^ x);
+ return x;
+}
+
+/* This assumes the key points to a 64-bit aligned value. */
+static inline size_t ht_uint64_hash_function(const void *key, size_t len)
+{
+ uint64_t x = *(uint64_t *)key + UINT64_C(0x9e3779b97f4a7c15) + (uint64_t)(HT_HASH_SEED);
+
+ (void)len;
+
+ x = (x ^ (x >> 30)) * UINT64_C(0xbf58476d1ce4e5b9);
+ x = (x ^ (x >> 27)) * UINT64_C(0x94d049bb133111eb);
+ return (size_t)(x ^ (x >> 31));
+}
+
+/*
+ * Suited for set operations of low-valued integers where the stored
+ * hash pointer is the key and the value.
+ *
+ * This function is especially useful for small hash tables (<1000)
+ * where collisions are cheap due to caching but also works for integer
+ * sets up to at least 1,000,000.
+ *
+ * NOTE: The multiplicative hash function by Knuth requires the modulo
+ * to table size be done by shifting the upper bits down, since this is
+ * where the quality bits reside. This yields significantly fewer
+ * collisions which is important for e.g. chained hashing. However, our
+ * interface does not provide the required information.
+ *
+ * When used in open hashing with load factors below 0.7 where the
+ * stored pointer is also the key, collision checking is very cheap and
+ * this pays off in a large range of table sizes where a more
+ * complicated hash simply doesn't pay off.
+ *
+ * When used with a pointer set where the pointer is also the key, it is
+ * not likely to work as well because the pointer acts as a large
+ * integer which works against the design of the hash function. Here a
+ * better mix function is probably worthwhile - therefore we also have
+ * ht_ptr_hash_function.
+ */
+static inline size_t ht_int_hash_function(const void *key, size_t len)
+{
+ (void)len;
+ return ((size_t)key ^ (HT_HASH_SEED)) * 2654435761UL;
+}
+
+/* Bernsteins hash function, assumes string is zero terminated, len is ignored. */
+static inline size_t ht_str_hash_function(const void *key, size_t len)
+{
+ const unsigned char *str = key;
+ size_t hash = 5381 ^ (HT_HASH_SEED);
+ size_t c;
+
+ (void)len;
+
+ while ((c = (size_t)*str++))
+ hash = ((hash << 5) + hash) ^ c; /* (hash * 33) xor c */
+
+ return hash;
+}
+
+/* Hashes at most len characters or until zero termination. */
+static inline size_t ht_strn_hash_function(const void *key, size_t len)
+{
+ const unsigned char *str = key;
+ size_t hash = 5381 ^ (HT_HASH_SEED);
+ size_t c;
+
+ while (--len && (c = (size_t)*str++))
+ hash = ((hash << 5) + hash) ^ c; /* (hash * 33) xor c */
+
+ return hash;
+}
+
+static inline uint32_t ht_fnv1a32_hash_function(const void *key, size_t len)
+{
+#ifndef FNV1A_NOMUL
+ const uint32_t prime = UINT32_C(0x1000193);
+#endif
+ uint32_t hash = UINT32_C(0x811c9dc5);
+ const uint8_t *p = key;
+
+ while (len--) {
+ hash ^= (uint64_t)*p++;
+#ifndef FNV1A_NOMUL
+ hash *= prime;
+#else
+ hash += (hash << 1) + (hash << 4) + (hash << 7) +
+ (hash << 8) + (hash << 24);
+#endif
+ }
+ return hash;
+}
+
+static inline uint64_t ht_fnv1a64_hash_function(const void *key, size_t len)
+{
+#ifndef FNV1A_NOMUL
+ const uint64_t prime = UINT64_C(0x100000001b3);
+#endif
+ uint64_t hash = UINT64_C(0xcbf29ce484222325);
+ const uint8_t *p = key;
+
+ while (len--) {
+ hash ^= (uint64_t)*p++;
+#ifndef FNV1A_NOMUL
+ hash *= prime;
+#else
+ hash += (hash << 1) + (hash << 4) + (hash << 5) +
+ (hash << 7) + (hash << 8) + (hash << 40);
+#endif
+ }
+ return hash;
+}
+
+/* Hashes until string termination and ignores length argument. */
+static inline uint32_t ht_fnv1a32_str_hash_function(const void *key, size_t len)
+{
+#ifndef FNV1A_NOMUL
+ const uint32_t prime = UINT32_C(0x1000193);
+#endif
+ uint32_t hash = UINT32_C(0x811c9dc5);
+ const uint8_t *p = key;
+
+ (void)len;
+
+ while (*p) {
+ hash ^= (uint64_t)*p++;
+#ifndef FNV1A_NOMUL
+ hash *= prime;
+#else
+ hash += (hash << 1) + (hash << 4) + (hash << 7) +
+ (hash << 8) + (hash << 24);
+#endif
+ }
+ return hash;
+}
+
+/* Hashes until string termination and ignores length argument. */
+static inline uint64_t ht_fnv1a64_str_hash_function(const void *key, size_t len)
+{
+#ifndef FNV1A_NOMUL
+ const uint64_t prime = UINT64_C(0x100000001b3);
+#endif
+ uint64_t hash = UINT64_C(0xcbf29ce484222325);
+ const uint8_t *p = key;
+
+ (void)len;
+
+ while (*p) {
+ hash ^= (uint64_t)*p++;
+#ifndef FNV1A_NOMUL
+ hash *= prime;
+#else
+ hash += (hash << 1) + (hash << 4) + (hash << 5) +
+ (hash << 7) + (hash << 8) + (hash << 40);
+#endif
+ }
+ return hash;
+}
+
+
+#endif /* HT_HASH_FUNCTION_H */
diff --git a/external/hash/ht_portable.h b/external/hash/ht_portable.h
new file mode 100644
index 0000000..3affc1d
--- /dev/null
+++ b/external/hash/ht_portable.h
@@ -0,0 +1,9 @@
+#ifndef HT_PORTABLE_H
+#define HT_PORTABLE_H
+
+#if defined(_MSC_VER) && !defined(inline)
+#define inline __inline
+#endif
+#include "pstdint.h"
+
+#endif
diff --git a/external/hash/ht_trace.h b/external/hash/ht_trace.h
new file mode 100644
index 0000000..63af4a8
--- /dev/null
+++ b/external/hash/ht_trace.h
@@ -0,0 +1,59 @@
+#ifndef HT_TRACE_H
+#define HT_TRACE_H
+
+#ifdef HT_TRACE_ON
+#ifndef HT_TRACE_OUT
+#define HT_TRACE_OUT stderr
+#endif
+
+#include <stdio.h>
+#define ht_trace(s) fprintf(HT_TRACE_OUT, "trace: %s\n", s)
+#define ht_tracei(s, i) fprintf(HT_TRACE_OUT, "trace: %s: %d\n", s, (int)i)
+#define ht_tracex(s, x) fprintf(HT_TRACE_OUT, "trace: %s: 0x%lx\n", s, (long)x)
+#define ht_traces(s, s2, len) fprintf(HT_TRACE_OUT, "trace: %s: %.*s\n", s, (int)len, s2)
+
+static void ht_trace_buckets(hash_table_t *ht, char *msg, int first, int count)
+{
+ int i, j, N, n;
+
+ n = ht->buckets;
+ N = n - 1;
+
+ if (count == 0) {
+ count = 32;
+ }
+ if (count > n) {
+ count = n;
+ }
+
+ first = first & N;
+ fprintf(HT_TRACE_OUT, "bucket trace: %s\n", msg);
+ if (n > count) {
+ n = count;
+ }
+ fprintf(HT_TRACE_OUT, "item count: %ld, bucket count %ld, utilization: %0.1f%%\n",
+ ht->count, ht->buckets, (double)ht->count / ht->buckets * 100);
+
+ if (ht->offsets) {
+ for (i = 0; i < n; ++i) {
+ j = (first + i) & N;
+ fprintf(HT_TRACE_OUT, "%03d:%08x:[%02d]\n",
+ j, (unsigned int)((void **)ht->table)[j], (unsigned int)ht->offsets[j]);
+ }
+ } else {
+ for (i = 0; i < n; ++i) {
+ j = (first + i) & N;
+ fprintf(HT_TRACE_OUT, "%03d:%08x\n", j, (unsigned int)((void **)ht->table)[j]);
+ }
+ }
+ fprintf(HT_TRACE_OUT, "--\n");
+}
+#else
+#define ht_trace(arg1) ((void)0)
+#define ht_tracei(arg1, arg2) ((void)0)
+#define ht_tracex(arg1, arg2) ((void)0)
+#define ht_traces(arg1, arg2, arg3) ((void)0)
+#define ht_trace_buckets(arg1, arg2, arg3, arg4) ((void)0)
+#endif
+
+#endif /* HT_TRACE_H */
diff --git a/external/hash/initbuild.sh b/external/hash/initbuild.sh
new file mode 100755
index 0000000..34a3fc0
--- /dev/null
+++ b/external/hash/initbuild.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+cd `dirname $0`
+mkdir -p "build/release"
+cd build/release && cmake -GNinja ../.. -DCMAKE_BUILD_TYPE=Release && ninja
diff --git a/external/hash/initbuild_debug.sh b/external/hash/initbuild_debug.sh
new file mode 100755
index 0000000..d190139
--- /dev/null
+++ b/external/hash/initbuild_debug.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+cd `dirname $0`
+mkdir -p "build/debug"
+cd build/debug && cmake -GNinja ../.. -DCMAKE_BUILD_TYPE=Debug && ninja
diff --git a/external/hash/int_set.h b/external/hash/int_set.h
new file mode 100644
index 0000000..b873ef9
--- /dev/null
+++ b/external/hash/int_set.h
@@ -0,0 +1,50 @@
+#ifndef INT_SET_H
+#define INT_SET_H
+
+#include "ptr_set.h"
+
+/*
+ * The values 0, 1, and 2 are reserved so we map integers
+ * before casting them to void *.
+ *
+ * Instead we disallow the largest positive integers.
+ *
+ * This is specfic to the implementation of ptr_set, so
+ * if it changes, we may have to change here as well.
+ */
+
+#define HT_INT_SET_OFFSET ((1 << (8 * sizeof(int) - 1)) - 2)
+#define HT_INT_TO_PTR(x) ((void *)(size_t)((x) - HT_INT_SET_OFFSET))
+#define HT_PTR_TO_INT(x) ((int)(size_t)(x) + HT_INT_SET_OFFSET)
+
+/* Return value helpers. */
+#define INT_SET_IS_MISSING(x) (HT_PTR_SET_MISSING(HT_INT_TO_PTR(x)))
+#define INT_SET_IS_ERROR(x) (HT_PTR_SET_IS_ERROR(HT_INT_TO_PTR(x)))
+#define INT_SET_IS_VALID(x) (HT_PTR_SET_IS_VALID(HT_INT_TO_PTR(x)))
+
+typedef ptr_set_t int_set_t;
+
+/* Returns 1 if already present, 0 otherwise. */
+static inline int int_set_add(int_set_t *S, int x)
+{
+ return ptr_set_insert_item(S, HT_INT_TO_PTR(x), ht_keep) != 0;
+}
+
+/* Returns 1 if removed, 0 otherwise. */
+static inline int int_set_remove(int_set_t *S, int x)
+{
+ return ptr_set_remove_item(S, HT_INT_TO_PTR(x)) != 0;
+}
+
+static inline int int_set_count(int_set_t *S)
+{
+ return ptr_set_count(S);
+}
+
+/* Returns 1 if present, 0 otherwise. */
+static inline int int_set_exists(int_set_t *S, int x)
+{
+ return ptr_set_exists(S, HT_INT_TO_PTR(x));
+}
+
+#endif /* INT_SET_H */
diff --git a/external/hash/load_test.c b/external/hash/load_test.c
new file mode 100644
index 0000000..1c3d0e7
--- /dev/null
+++ b/external/hash/load_test.c
@@ -0,0 +1,86 @@
+#include <assert.h>
+#include <sys/time.h>
+#include <stdio.h>
+
+//#define INT_SET_PRIVATE
+#ifdef INT_SET_PRIVATE
+/* Make all hash functions private to this module for better
+ * performance. This may not be necessary depending on compiler
+ * optimizations. clang 4.2 -O3 benefits while -O4 figures it and get
+ * same speed with external linkage. */
+#define HT_PRIVATE
+#include "int_set.h"
+#include "ptr_set.c"
+#undef HT_PRIVATE
+#else
+/* Use external linkage. Link with ptr_set.c which int_set depends upon. */
+#include "int_set.h"
+#endif
+
+struct timeval time_diff(struct timeval start, struct timeval end)
+{
+ struct timeval temp;
+ if ((end.tv_usec-start.tv_usec)<0) {
+ temp.tv_sec = end.tv_sec-start.tv_sec-1;
+ temp.tv_usec = 1000000+end.tv_usec-start.tv_usec;
+ } else {
+ temp.tv_sec = end.tv_sec-start.tv_sec;
+ temp.tv_usec = end.tv_usec-start.tv_usec;
+ }
+ return temp;
+}
+
+double elapsed_ms(struct timeval td)
+{
+ return (double)td.tv_sec * 1000 + (double)td.tv_usec / 1000;
+}
+
+void test_int_set()
+{
+ int i, x;
+ const int N = 1000000;
+ //const int N = 1000;
+ int_set_t ht = {0};
+ int_set_t *S = &ht;
+ double ms, nsop, opms;
+ struct timeval t1, t2, td;
+
+ for (i = 1; i <= N; ++i) {
+ int_set_add(S, i);
+ assert(int_set_exists(S, i));
+ }
+ assert(int_set_count(S) == N);
+
+ for (i = 1; i <= N; ++i) {
+ assert(int_set_exists(S, i));
+ }
+
+ gettimeofday(&t1, 0);
+ for (x = 0, i = 1; i <= N; ++i) {
+ x += int_set_exists(S, i);
+ }
+ gettimeofday(&t2, 0);
+
+ td = time_diff(t1, t2);
+ ms = elapsed_ms(td);
+
+ nsop = ms * 1000000 / x;
+ opms = (double)x / ms;
+ printf("%d out of %d keys found in time %0.03f ms or %0.01f ns per op\n",
+ x, N, ms, nsop);
+ printf("ops / ms: %0.0f\n", opms);
+
+ for (i = 1; i <= N; ++i) {
+ assert(int_set_count(S) == N - i + 1);
+ assert(int_set_exists(S, i));
+ int_set_remove(S, i);
+ assert(!int_set_exists(S, i));
+ }
+ assert(int_set_count(S) == 0);
+}
+
+int main(int argc, char *argv[])
+{
+ test_int_set();
+ return 0;
+}
diff --git a/external/hash/pstdint.h b/external/hash/pstdint.h
new file mode 100644
index 0000000..14444aa
--- /dev/null
+++ b/external/hash/pstdint.h
@@ -0,0 +1,898 @@
+/* A portable stdint.h
+ ****************************************************************************
+ * BSD License:
+ ****************************************************************************
+ *
+ * Copyright (c) 2005-2016 Paul Hsieh
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************
+ *
+ * Version 0.1.15.2
+ *
+ * The ANSI C standard committee, for the C99 standard, specified the
+ * inclusion of a new standard include file called stdint.h. This is
+ * a very useful and long desired include file which contains several
+ * very precise definitions for integer scalar types that is
+ * critically important for making portable several classes of
+ * applications including cryptography, hashing, variable length
+ * integer libraries and so on. But for most developers its likely
+ * useful just for programming sanity.
+ *
+ * The problem is that some compiler vendors chose to ignore the C99
+ * standard and some older compilers have no opportunity to be updated.
+ * Because of this situation, simply including stdint.h in your code
+ * makes it unportable.
+ *
+ * So that's what this file is all about. Its an attempt to build a
+ * single universal include file that works on as many platforms as
+ * possible to deliver what stdint.h is supposed to. Even compilers
+ * that already come with stdint.h can use this file instead without
+ * any loss of functionality. A few things that should be noted about
+ * this file:
+ *
+ * 1) It is not guaranteed to be portable and/or present an identical
+ * interface on all platforms. The extreme variability of the
+ * ANSI C standard makes this an impossibility right from the
+ * very get go. Its really only meant to be useful for the vast
+ * majority of platforms that possess the capability of
+ * implementing usefully and precisely defined, standard sized
+ * integer scalars. Systems which are not intrinsically 2s
+ * complement may produce invalid constants.
+ *
+ * 2) There is an unavoidable use of non-reserved symbols.
+ *
+ * 3) Other standard include files are invoked.
+ *
+ * 4) This file may come in conflict with future platforms that do
+ * include stdint.h. The hope is that one or the other can be
+ * used with no real difference.
+ *
+ * 5) In the current verison, if your platform can't represent
+ * int32_t, int16_t and int8_t, it just dumps out with a compiler
+ * error.
+ *
+ * 6) 64 bit integers may or may not be defined. Test for their
+ * presence with the test: #ifdef INT64_MAX or #ifdef UINT64_MAX.
+ * Note that this is different from the C99 specification which
+ * requires the existence of 64 bit support in the compiler. If
+ * this is not defined for your platform, yet it is capable of
+ * dealing with 64 bits then it is because this file has not yet
+ * been extended to cover all of your system's capabilities.
+ *
+ * 7) (u)intptr_t may or may not be defined. Test for its presence
+ * with the test: #ifdef PTRDIFF_MAX. If this is not defined
+ * for your platform, then it is because this file has not yet
+ * been extended to cover all of your system's capabilities, not
+ * because its optional.
+ *
+ * 8) The following might not been defined even if your platform is
+ * capable of defining it:
+ *
+ * WCHAR_MIN
+ * WCHAR_MAX
+ * (u)int64_t
+ * PTRDIFF_MIN
+ * PTRDIFF_MAX
+ * (u)intptr_t
+ *
+ * 9) The following have not been defined:
+ *
+ * WINT_MIN
+ * WINT_MAX
+ *
+ * 10) The criteria for defining (u)int_least(*)_t isn't clear,
+ * except for systems which don't have a type that precisely
+ * defined 8, 16, or 32 bit types (which this include file does
+ * not support anyways). Default definitions have been given.
+ *
+ * 11) The criteria for defining (u)int_fast(*)_t isn't something I
+ * would trust to any particular compiler vendor or the ANSI C
+ * committee. It is well known that "compatible systems" are
+ * commonly created that have very different performance
+ * characteristics from the systems they are compatible with,
+ * especially those whose vendors make both the compiler and the
+ * system. Default definitions have been given, but its strongly
+ * recommended that users never use these definitions for any
+ * reason (they do *NOT* deliver any serious guarantee of
+ * improved performance -- not in this file, nor any vendor's
+ * stdint.h).
+ *
+ * 12) The following macros:
+ *
+ * PRINTF_INTMAX_MODIFIER
+ * PRINTF_INT64_MODIFIER
+ * PRINTF_INT32_MODIFIER
+ * PRINTF_INT16_MODIFIER
+ * PRINTF_LEAST64_MODIFIER
+ * PRINTF_LEAST32_MODIFIER
+ * PRINTF_LEAST16_MODIFIER
+ * PRINTF_INTPTR_MODIFIER
+ *
+ * are strings which have been defined as the modifiers required
+ * for the "d", "u" and "x" printf formats to correctly output
+ * (u)intmax_t, (u)int64_t, (u)int32_t, (u)int16_t, (u)least64_t,
+ * (u)least32_t, (u)least16_t and (u)intptr_t types respectively.
+ * PRINTF_INTPTR_MODIFIER is not defined for some systems which
+ * provide their own stdint.h. PRINTF_INT64_MODIFIER is not
+ * defined if INT64_MAX is not defined. These are an extension
+ * beyond what C99 specifies must be in stdint.h.
+ *
+ * In addition, the following macros are defined:
+ *
+ * PRINTF_INTMAX_HEX_WIDTH
+ * PRINTF_INT64_HEX_WIDTH
+ * PRINTF_INT32_HEX_WIDTH
+ * PRINTF_INT16_HEX_WIDTH
+ * PRINTF_INT8_HEX_WIDTH
+ * PRINTF_INTMAX_DEC_WIDTH
+ * PRINTF_INT64_DEC_WIDTH
+ * PRINTF_INT32_DEC_WIDTH
+ * PRINTF_INT16_DEC_WIDTH
+ * PRINTF_UINT8_DEC_WIDTH
+ * PRINTF_UINTMAX_DEC_WIDTH
+ * PRINTF_UINT64_DEC_WIDTH
+ * PRINTF_UINT32_DEC_WIDTH
+ * PRINTF_UINT16_DEC_WIDTH
+ * PRINTF_UINT8_DEC_WIDTH
+ *
+ * Which specifies the maximum number of characters required to
+ * print the number of that type in either hexadecimal or decimal.
+ * These are an extension beyond what C99 specifies must be in
+ * stdint.h.
+ *
+ * Compilers tested (all with 0 warnings at their highest respective
+ * settings): Borland Turbo C 2.0, WATCOM C/C++ 11.0 (16 bits and 32
+ * bits), Microsoft Visual C++ 6.0 (32 bit), Microsoft Visual Studio
+ * .net (VC7), Intel C++ 4.0, GNU gcc v3.3.3
+ *
+ * This file should be considered a work in progress. Suggestions for
+ * improvements, especially those which increase coverage are strongly
+ * encouraged.
+ *
+ * Acknowledgements
+ *
+ * The following people have made significant contributions to the
+ * development and testing of this file:
+ *
+ * Chris Howie
+ * John Steele Scott
+ * Dave Thorup
+ * John Dill
+ * Florian Wobbe
+ * Christopher Sean Morrison
+ * Mikkel Fahnoe Jorgensen
+ *
+ */
+
+#include <stddef.h>
+#include <limits.h>
+#include <signal.h>
+
+/*
+ * For gcc with _STDINT_H, fill in the PRINTF_INT*_MODIFIER macros, and
+ * do nothing else. On the Mac OS X version of gcc this is _STDINT_H_.
+ */
+
+#if ((defined(_MSC_VER) && _MSC_VER >= 1600) || (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || (defined (__WATCOMC__) && (defined (_STDINT_H_INCLUDED) || __WATCOMC__ >= 1250)) || (defined(__GNUC__) && (__GNUC__ > 3 || defined(_STDINT_H) || defined(_STDINT_H_) || defined (__UINT_FAST64_TYPE__)) )) && !defined (_PSTDINT_H_INCLUDED)
+#include <stdint.h>
+#define _PSTDINT_H_INCLUDED
+# if defined(__GNUC__) && (defined(__x86_64__) || defined(__ppc64__)) && !(defined(__APPLE__) && defined(__MACH__))
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "l"
+# endif
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+# else
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# ifndef PRINTF_INT32_MODIFIER
+# if (UINT_MAX == UINT32_MAX)
+# define PRINTF_INT32_MODIFIER ""
+# else
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+# endif
+# endif
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT64_MODIFIER
+# endif
+# ifndef PRINTF_INT64_HEX_WIDTH
+# define PRINTF_INT64_HEX_WIDTH "16"
+# endif
+# ifndef PRINTF_UINT64_HEX_WIDTH
+# define PRINTF_UINT64_HEX_WIDTH "16"
+# endif
+# ifndef PRINTF_INT32_HEX_WIDTH
+# define PRINTF_INT32_HEX_WIDTH "8"
+# endif
+# ifndef PRINTF_UINT32_HEX_WIDTH
+# define PRINTF_UINT32_HEX_WIDTH "8"
+# endif
+# ifndef PRINTF_INT16_HEX_WIDTH
+# define PRINTF_INT16_HEX_WIDTH "4"
+# endif
+# ifndef PRINTF_UINT16_HEX_WIDTH
+# define PRINTF_UINT16_HEX_WIDTH "4"
+# endif
+# ifndef PRINTF_INT8_HEX_WIDTH
+# define PRINTF_INT8_HEX_WIDTH "2"
+# endif
+# ifndef PRINTF_UINT8_HEX_WIDTH
+# define PRINTF_UINT8_HEX_WIDTH "2"
+# endif
+# ifndef PRINTF_INT64_DEC_WIDTH
+# define PRINTF_INT64_DEC_WIDTH "19"
+# endif
+# ifndef PRINTF_UINT64_DEC_WIDTH
+# define PRINTF_UINT64_DEC_WIDTH "20"
+# endif
+# ifndef PRINTF_INT32_DEC_WIDTH
+# define PRINTF_INT32_DEC_WIDTH "10"
+# endif
+# ifndef PRINTF_UINT32_DEC_WIDTH
+# define PRINTF_UINT32_DEC_WIDTH "10"
+# endif
+# ifndef PRINTF_INT16_DEC_WIDTH
+# define PRINTF_INT16_DEC_WIDTH "5"
+# endif
+# ifndef PRINTF_UINT16_DEC_WIDTH
+# define PRINTF_UINT16_DEC_WIDTH "5"
+# endif
+# ifndef PRINTF_INT8_DEC_WIDTH
+# define PRINTF_INT8_DEC_WIDTH "3"
+# endif
+# ifndef PRINTF_UINT8_DEC_WIDTH
+# define PRINTF_UINT8_DEC_WIDTH "3"
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_UINT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_UINTMAX_HEX_WIDTH
+# define PRINTF_UINTMAX_HEX_WIDTH PRINTF_UINT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_UINT64_DEC_WIDTH
+# endif
+# ifndef PRINTF_UINTMAX_DEC_WIDTH
+# define PRINTF_UINTMAX_DEC_WIDTH PRINTF_UINT64_DEC_WIDTH
+# endif
+
+/*
+ * Something really weird is going on with Open Watcom. Just pull some of
+ * these duplicated definitions from Open Watcom's stdint.h file for now.
+ */
+
+# if defined (__WATCOMC__) && __WATCOMC__ >= 1250
+# if !defined (INT64_C)
+# define INT64_C(x) (x + (INT64_MAX - INT64_MAX))
+# endif
+# if !defined (UINT64_C)
+# define UINT64_C(x) (x + (UINT64_MAX - UINT64_MAX))
+# endif
+# if !defined (INT32_C)
+# define INT32_C(x) (x + (INT32_MAX - INT32_MAX))
+# endif
+# if !defined (UINT32_C)
+# define UINT32_C(x) (x + (UINT32_MAX - UINT32_MAX))
+# endif
+# if !defined (INT16_C)
+# define INT16_C(x) (x)
+# endif
+# if !defined (UINT16_C)
+# define UINT16_C(x) (x)
+# endif
+# if !defined (INT8_C)
+# define INT8_C(x) (x)
+# endif
+# if !defined (UINT8_C)
+# define UINT8_C(x) (x)
+# endif
+# if !defined (UINT64_MAX)
+# define UINT64_MAX 18446744073709551615ULL
+# endif
+# if !defined (INT64_MAX)
+# define INT64_MAX 9223372036854775807LL
+# endif
+# if !defined (UINT32_MAX)
+# define UINT32_MAX 4294967295UL
+# endif
+# if !defined (INT32_MAX)
+# define INT32_MAX 2147483647L
+# endif
+# if !defined (INTMAX_MAX)
+# define INTMAX_MAX INT64_MAX
+# endif
+# if !defined (INTMAX_MIN)
+# define INTMAX_MIN INT64_MIN
+# endif
+# endif
+#endif
+
+#ifndef _PSTDINT_H_INCLUDED
+#define _PSTDINT_H_INCLUDED
+
+#ifndef SIZE_MAX
+# define SIZE_MAX (~(size_t)0)
+#endif
+
+/*
+ * Deduce the type assignments from limits.h under the assumption that
+ * integer sizes in bits are powers of 2, and follow the ANSI
+ * definitions.
+ */
+
+#ifndef UINT8_MAX
+# define UINT8_MAX 0xff
+#endif
+#if !defined(uint8_t) && !defined(_UINT8_T)
+# if (UCHAR_MAX == UINT8_MAX) || defined (S_SPLINT_S)
+ typedef unsigned char uint8_t;
+# define UINT8_C(v) ((uint8_t) v)
+# else
+# error "Platform not supported"
+# endif
+#endif
+
+#ifndef INT8_MAX
+# define INT8_MAX 0x7f
+#endif
+#ifndef INT8_MIN
+# define INT8_MIN INT8_C(0x80)
+#endif
+#if !defined(int8_t) && !defined(_INT8_T)
+# if (SCHAR_MAX == INT8_MAX) || defined (S_SPLINT_S)
+ typedef signed char int8_t;
+# define INT8_C(v) ((int8_t) v)
+# else
+# error "Platform not supported"
+# endif
+#endif
+
+#ifndef UINT16_MAX
+# define UINT16_MAX 0xffff
+#endif
+#if !defined(uint16_t) && !defined(_UINT16_T)
+#if (UINT_MAX == UINT16_MAX) || defined (S_SPLINT_S)
+ typedef unsigned int uint16_t;
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER ""
+# endif
+# define UINT16_C(v) ((uint16_t) (v))
+#elif (USHRT_MAX == UINT16_MAX)
+ typedef unsigned short uint16_t;
+# define UINT16_C(v) ((uint16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef INT16_MAX
+# define INT16_MAX 0x7fff
+#endif
+#ifndef INT16_MIN
+# define INT16_MIN INT16_C(0x8000)
+#endif
+#if !defined(int16_t) && !defined(_INT16_T)
+#if (INT_MAX == INT16_MAX) || defined (S_SPLINT_S)
+ typedef signed int int16_t;
+# define INT16_C(v) ((int16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER ""
+# endif
+#elif (SHRT_MAX == INT16_MAX)
+ typedef signed short int16_t;
+# define INT16_C(v) ((int16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef UINT32_MAX
+# define UINT32_MAX (0xffffffffUL)
+#endif
+#if !defined(uint32_t) && !defined(_UINT32_T)
+#if (ULONG_MAX == UINT32_MAX) || defined (S_SPLINT_S)
+ typedef unsigned long uint32_t;
+# define UINT32_C(v) v ## UL
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+#elif (UINT_MAX == UINT32_MAX)
+ typedef unsigned int uint32_t;
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+# define UINT32_C(v) v ## U
+#elif (USHRT_MAX == UINT32_MAX)
+ typedef unsigned short uint32_t;
+# define UINT32_C(v) ((unsigned short) (v))
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef INT32_MAX
+# define INT32_MAX (0x7fffffffL)
+#endif
+#ifndef INT32_MIN
+# define INT32_MIN INT32_C(0x80000000)
+#endif
+#if !defined(int32_t) && !defined(_INT32_T)
+#if (LONG_MAX == INT32_MAX) || defined (S_SPLINT_S)
+ typedef signed long int32_t;
+# define INT32_C(v) v ## L
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+#elif (INT_MAX == INT32_MAX)
+ typedef signed int int32_t;
+# define INT32_C(v) v
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#elif (SHRT_MAX == INT32_MAX)
+ typedef signed short int32_t;
+# define INT32_C(v) ((short) (v))
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+/*
+ * The macro stdint_int64_defined is temporarily used to record
+ * whether or not 64 integer support is available. It must be
+ * defined for any 64 integer extensions for new platforms that are
+ * added.
+ */
+
+#undef stdint_int64_defined
+#if (defined(__STDC__) && defined(__STDC_VERSION__)) || defined (S_SPLINT_S)
+# if (__STDC__ && __STDC_VERSION__ >= 199901L) || defined (S_SPLINT_S)
+# define stdint_int64_defined
+ typedef long long int64_t;
+ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# endif
+#endif
+
+#if !defined (stdint_int64_defined)
+# if defined(__GNUC__)
+# define stdint_int64_defined
+ __extension__ typedef long long int64_t;
+ __extension__ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# elif defined(__MWERKS__) || defined (__SUNPRO_C) || defined (__SUNPRO_CC) || defined (__APPLE_CC__) || defined (_LONG_LONG) || defined (_CRAYC) || defined (S_SPLINT_S)
+# define stdint_int64_defined
+ typedef long long int64_t;
+ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# elif (defined(__WATCOMC__) && defined(__WATCOM_INT64__)) || (defined(_MSC_VER) && _INTEGRAL_MAX_BITS >= 64) || (defined (__BORLANDC__) && __BORLANDC__ > 0x460) || defined (__alpha) || defined (__DECC)
+# define stdint_int64_defined
+ typedef __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+# define UINT64_C(v) v ## UI64
+# define INT64_C(v) v ## I64
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "I64"
+# endif
+# endif
+#endif
+
+#if !defined (LONG_LONG_MAX) && defined (INT64_C)
+# define LONG_LONG_MAX INT64_C (9223372036854775807)
+#endif
+#ifndef ULONG_LONG_MAX
+# define ULONG_LONG_MAX UINT64_C (18446744073709551615)
+#endif
+
+#if !defined (INT64_MAX) && defined (INT64_C)
+# define INT64_MAX INT64_C (9223372036854775807)
+#endif
+#if !defined (INT64_MIN) && defined (INT64_C)
+# define INT64_MIN INT64_C (-9223372036854775808)
+#endif
+#if !defined (UINT64_MAX) && defined (INT64_C)
+# define UINT64_MAX UINT64_C (18446744073709551615)
+#endif
+
+/*
+ * Width of hexadecimal for number field.
+ */
+
+#ifndef PRINTF_INT64_HEX_WIDTH
+# define PRINTF_INT64_HEX_WIDTH "16"
+#endif
+#ifndef PRINTF_INT32_HEX_WIDTH
+# define PRINTF_INT32_HEX_WIDTH "8"
+#endif
+#ifndef PRINTF_INT16_HEX_WIDTH
+# define PRINTF_INT16_HEX_WIDTH "4"
+#endif
+#ifndef PRINTF_INT8_HEX_WIDTH
+# define PRINTF_INT8_HEX_WIDTH "2"
+#endif
+#ifndef PRINTF_INT64_DEC_WIDTH
+# define PRINTF_INT64_DEC_WIDTH "19"
+#endif
+#ifndef PRINTF_INT32_DEC_WIDTH
+# define PRINTF_INT32_DEC_WIDTH "10"
+#endif
+#ifndef PRINTF_INT16_DEC_WIDTH
+# define PRINTF_INT16_DEC_WIDTH "5"
+#endif
+#ifndef PRINTF_INT8_DEC_WIDTH
+# define PRINTF_INT8_DEC_WIDTH "3"
+#endif
+#ifndef PRINTF_UINT64_DEC_WIDTH
+# define PRINTF_UINT64_DEC_WIDTH "20"
+#endif
+#ifndef PRINTF_UINT32_DEC_WIDTH
+# define PRINTF_UINT32_DEC_WIDTH "10"
+#endif
+#ifndef PRINTF_UINT16_DEC_WIDTH
+# define PRINTF_UINT16_DEC_WIDTH "5"
+#endif
+#ifndef PRINTF_UINT8_DEC_WIDTH
+# define PRINTF_UINT8_DEC_WIDTH "3"
+#endif
+
+/*
+ * Ok, lets not worry about 128 bit integers for now. Moore's law says
+ * we don't need to worry about that until about 2040 at which point
+ * we'll have bigger things to worry about.
+ */
+
+#ifdef stdint_int64_defined
+ typedef int64_t intmax_t;
+ typedef uint64_t uintmax_t;
+# define INTMAX_MAX INT64_MAX
+# define INTMAX_MIN INT64_MIN
+# define UINTMAX_MAX UINT64_MAX
+# define UINTMAX_C(v) UINT64_C(v)
+# define INTMAX_C(v) INT64_C(v)
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT64_MODIFIER
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_INT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_INT64_DEC_WIDTH
+# endif
+#else
+ typedef int32_t intmax_t;
+ typedef uint32_t uintmax_t;
+# define INTMAX_MAX INT32_MAX
+# define UINTMAX_MAX UINT32_MAX
+# define UINTMAX_C(v) UINT32_C(v)
+# define INTMAX_C(v) INT32_C(v)
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT32_MODIFIER
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_INT32_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_INT32_DEC_WIDTH
+# endif
+#endif
+
+/*
+ * Because this file currently only supports platforms which have
+ * precise powers of 2 as bit sizes for the default integers, the
+ * least definitions are all trivial. Its possible that a future
+ * version of this file could have different definitions.
+ */
+
+#ifndef stdint_least_defined
+ typedef int8_t int_least8_t;
+ typedef uint8_t uint_least8_t;
+ typedef int16_t int_least16_t;
+ typedef uint16_t uint_least16_t;
+ typedef int32_t int_least32_t;
+ typedef uint32_t uint_least32_t;
+# define PRINTF_LEAST32_MODIFIER PRINTF_INT32_MODIFIER
+# define PRINTF_LEAST16_MODIFIER PRINTF_INT16_MODIFIER
+# define UINT_LEAST8_MAX UINT8_MAX
+# define INT_LEAST8_MAX INT8_MAX
+# define UINT_LEAST16_MAX UINT16_MAX
+# define INT_LEAST16_MAX INT16_MAX
+# define UINT_LEAST32_MAX UINT32_MAX
+# define INT_LEAST32_MAX INT32_MAX
+# define INT_LEAST8_MIN INT8_MIN
+# define INT_LEAST16_MIN INT16_MIN
+# define INT_LEAST32_MIN INT32_MIN
+# ifdef stdint_int64_defined
+ typedef int64_t int_least64_t;
+ typedef uint64_t uint_least64_t;
+# define PRINTF_LEAST64_MODIFIER PRINTF_INT64_MODIFIER
+# define UINT_LEAST64_MAX UINT64_MAX
+# define INT_LEAST64_MAX INT64_MAX
+# define INT_LEAST64_MIN INT64_MIN
+# endif
+#endif
+#undef stdint_least_defined
+
+/*
+ * The ANSI C committee pretending to know or specify anything about
+ * performance is the epitome of misguided arrogance. The mandate of
+ * this file is to *ONLY* ever support that absolute minimum
+ * definition of the fast integer types, for compatibility purposes.
+ * No extensions, and no attempt to suggest what may or may not be a
+ * faster integer type will ever be made in this file. Developers are
+ * warned to stay away from these types when using this or any other
+ * stdint.h.
+ */
+
+typedef int_least8_t int_fast8_t;
+typedef uint_least8_t uint_fast8_t;
+typedef int_least16_t int_fast16_t;
+typedef uint_least16_t uint_fast16_t;
+typedef int_least32_t int_fast32_t;
+typedef uint_least32_t uint_fast32_t;
+#define UINT_FAST8_MAX UINT_LEAST8_MAX
+#define INT_FAST8_MAX INT_LEAST8_MAX
+#define UINT_FAST16_MAX UINT_LEAST16_MAX
+#define INT_FAST16_MAX INT_LEAST16_MAX
+#define UINT_FAST32_MAX UINT_LEAST32_MAX
+#define INT_FAST32_MAX INT_LEAST32_MAX
+#define INT_FAST8_MIN INT_LEAST8_MIN
+#define INT_FAST16_MIN INT_LEAST16_MIN
+#define INT_FAST32_MIN INT_LEAST32_MIN
+#ifdef stdint_int64_defined
+ typedef int_least64_t int_fast64_t;
+ typedef uint_least64_t uint_fast64_t;
+# define UINT_FAST64_MAX UINT_LEAST64_MAX
+# define INT_FAST64_MAX INT_LEAST64_MAX
+# define INT_FAST64_MIN INT_LEAST64_MIN
+#endif
+
+#undef stdint_int64_defined
+
+/*
+ * Whatever piecemeal, per compiler thing we can do about the wchar_t
+ * type limits.
+ */
+
+#if defined(__WATCOMC__) || defined(_MSC_VER) || defined (__GNUC__)
+# include <wchar.h>
+# ifndef WCHAR_MIN
+# define WCHAR_MIN 0
+# endif
+# ifndef WCHAR_MAX
+# define WCHAR_MAX ((wchar_t)-1)
+# endif
+#endif
+
+/*
+ * Whatever piecemeal, per compiler/platform thing we can do about the
+ * (u)intptr_t types and limits.
+ */
+
+#if (defined (_MSC_VER) && defined (_UINTPTR_T_DEFINED)) || defined (_UINTPTR_T)
+# define STDINT_H_UINTPTR_T_DEFINED
+#endif
+
+#ifndef STDINT_H_UINTPTR_T_DEFINED
+# if defined (__alpha__) || defined (__ia64__) || defined (__x86_64__) || defined (_WIN64) || defined (__ppc64__)
+# define stdint_intptr_bits 64
+# elif defined (__WATCOMC__) || defined (__TURBOC__)
+# if defined(__TINY__) || defined(__SMALL__) || defined(__MEDIUM__)
+# define stdint_intptr_bits 16
+# else
+# define stdint_intptr_bits 32
+# endif
+# elif defined (__i386__) || defined (_WIN32) || defined (WIN32) || defined (__ppc64__)
+# define stdint_intptr_bits 32
+# elif defined (__INTEL_COMPILER)
+/* TODO -- what did Intel do about x86-64? */
+# else
+/* #error "This platform might not be supported yet" */
+# endif
+
+# ifdef stdint_intptr_bits
+# define stdint_intptr_glue3_i(a,b,c) a##b##c
+# define stdint_intptr_glue3(a,b,c) stdint_intptr_glue3_i(a,b,c)
+# ifndef PRINTF_INTPTR_MODIFIER
+# define PRINTF_INTPTR_MODIFIER stdint_intptr_glue3(PRINTF_INT,stdint_intptr_bits,_MODIFIER)
+# endif
+# ifndef PTRDIFF_MAX
+# define PTRDIFF_MAX stdint_intptr_glue3(INT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef PTRDIFF_MIN
+# define PTRDIFF_MIN stdint_intptr_glue3(INT,stdint_intptr_bits,_MIN)
+# endif
+# ifndef UINTPTR_MAX
+# define UINTPTR_MAX stdint_intptr_glue3(UINT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef INTPTR_MAX
+# define INTPTR_MAX stdint_intptr_glue3(INT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef INTPTR_MIN
+# define INTPTR_MIN stdint_intptr_glue3(INT,stdint_intptr_bits,_MIN)
+# endif
+# ifndef INTPTR_C
+# define INTPTR_C(x) stdint_intptr_glue3(INT,stdint_intptr_bits,_C)(x)
+# endif
+# ifndef UINTPTR_C
+# define UINTPTR_C(x) stdint_intptr_glue3(UINT,stdint_intptr_bits,_C)(x)
+# endif
+ typedef stdint_intptr_glue3(uint,stdint_intptr_bits,_t) uintptr_t;
+ typedef stdint_intptr_glue3( int,stdint_intptr_bits,_t) intptr_t;
+# else
+/* TODO -- This following is likely wrong for some platforms, and does
+ nothing for the definition of uintptr_t. */
+ typedef ptrdiff_t intptr_t;
+# endif
+# define STDINT_H_UINTPTR_T_DEFINED
+#endif
+
+/*
+ * Assumes sig_atomic_t is signed and we have a 2s complement machine.
+ */
+
+#ifndef SIG_ATOMIC_MAX
+# define SIG_ATOMIC_MAX ((((sig_atomic_t) 1) << (sizeof (sig_atomic_t)*CHAR_BIT-1)) - 1)
+#endif
+
+#endif
+
+#if defined (__TEST_PSTDINT_FOR_CORRECTNESS)
+
+/*
+ * Please compile with the maximum warning settings to make sure macros are
+ * not defined more than once.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#define glue3_aux(x,y,z) x ## y ## z
+#define glue3(x,y,z) glue3_aux(x,y,z)
+
+#define DECLU(bits) glue3(uint,bits,_t) glue3(u,bits,) = glue3(UINT,bits,_C) (0);
+#define DECLI(bits) glue3(int,bits,_t) glue3(i,bits,) = glue3(INT,bits,_C) (0);
+
+#define DECL(us,bits) glue3(DECL,us,) (bits)
+
+#define TESTUMAX(bits) glue3(u,bits,) = ~glue3(u,bits,); if (glue3(UINT,bits,_MAX) != glue3(u,bits,)) printf ("Something wrong with UINT%d_MAX\n", bits)
+
+#define REPORTERROR(msg) { err_n++; if (err_first <= 0) err_first = __LINE__; printf msg; }
+
+int main () {
+ int err_n = 0;
+ int err_first = 0;
+ DECL(I,8)
+ DECL(U,8)
+ DECL(I,16)
+ DECL(U,16)
+ DECL(I,32)
+ DECL(U,32)
+#ifdef INT64_MAX
+ DECL(I,64)
+ DECL(U,64)
+#endif
+ intmax_t imax = INTMAX_C(0);
+ uintmax_t umax = UINTMAX_C(0);
+ char str0[256], str1[256];
+
+ sprintf (str0, "%" PRINTF_INT32_MODIFIER "d", INT32_C(2147483647));
+ if (0 != strcmp (str0, "2147483647")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str0));
+ if (atoi(PRINTF_INT32_DEC_WIDTH) != (int) strlen(str0)) REPORTERROR (("Something wrong with PRINTF_INT32_DEC_WIDTH : %s\n", PRINTF_INT32_DEC_WIDTH));
+ sprintf (str0, "%" PRINTF_INT32_MODIFIER "u", UINT32_C(4294967295));
+ if (0 != strcmp (str0, "4294967295")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str0));
+ if (atoi(PRINTF_UINT32_DEC_WIDTH) != (int) strlen(str0)) REPORTERROR (("Something wrong with PRINTF_UINT32_DEC_WIDTH : %s\n", PRINTF_UINT32_DEC_WIDTH));
+#ifdef INT64_MAX
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "d", INT64_C(9223372036854775807));
+ if (0 != strcmp (str1, "9223372036854775807")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str1));
+ if (atoi(PRINTF_INT64_DEC_WIDTH) != (int) strlen(str1)) REPORTERROR (("Something wrong with PRINTF_INT64_DEC_WIDTH : %s, %d\n", PRINTF_INT64_DEC_WIDTH, (int) strlen(str1)));
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "u", UINT64_C(18446744073709550591));
+ if (0 != strcmp (str1, "18446744073709550591")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str1));
+ if (atoi(PRINTF_UINT64_DEC_WIDTH) != (int) strlen(str1)) REPORTERROR (("Something wrong with PRINTF_UINT64_DEC_WIDTH : %s, %d\n", PRINTF_UINT64_DEC_WIDTH, (int) strlen(str1)));
+#endif
+
+ sprintf (str0, "%d %x\n", 0, ~0);
+
+ sprintf (str1, "%d %x\n", i8, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i8 : %s\n", str1));
+ sprintf (str1, "%u %x\n", u8, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u8 : %s\n", str1));
+ sprintf (str1, "%d %x\n", i16, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i16 : %s\n", str1));
+ sprintf (str1, "%u %x\n", u16, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u16 : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INT32_MODIFIER "d %x\n", i32, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i32 : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INT32_MODIFIER "u %x\n", u32, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u32 : %s\n", str1));
+#ifdef INT64_MAX
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "d %x\n", i64, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i64 : %s\n", str1));
+#endif
+ sprintf (str1, "%" PRINTF_INTMAX_MODIFIER "d %x\n", imax, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with imax : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INTMAX_MODIFIER "u %x\n", umax, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with umax : %s\n", str1));
+
+ TESTUMAX(8);
+ TESTUMAX(16);
+ TESTUMAX(32);
+#ifdef INT64_MAX
+ TESTUMAX(64);
+#endif
+
+#define STR(v) #v
+#define Q(v) printf ("sizeof " STR(v) " = %u\n", (unsigned) sizeof (v));
+ if (err_n) {
+ printf ("pstdint.h is not correct. Please use sizes below to correct it:\n");
+ }
+
+ Q(int)
+ Q(unsigned)
+ Q(long int)
+ Q(short int)
+ Q(int8_t)
+ Q(int16_t)
+ Q(int32_t)
+#ifdef INT64_MAX
+ Q(int64_t)
+#endif
+
+ return EXIT_SUCCESS;
+}
+
+#endif
diff --git a/external/hash/ptr_set.c b/external/hash/ptr_set.c
new file mode 100644
index 0000000..ab12ddf
--- /dev/null
+++ b/external/hash/ptr_set.c
@@ -0,0 +1,60 @@
+/*
+ * Creates a set of stored pointers by using the pointer itself as key.
+ *
+ * (void *)0 (HT_MISSING) cannot be stored.
+ * (void *)1 (HT_DELETED) also cannot be stored.
+ *
+ * ht_item, ht_key, ht_key_len, and ht_match are required.
+ *
+ * In this case HT_HASH_FUNCTION is also required because
+ * we do not read the content of the key but use the pointer
+ * itself as a key. The default behavior would crash.
+ *
+ * Only one hash table can be defined in a single compilation unit
+ * because of static function names in the generic implementation.
+ */
+
+#include "ptr_set.h"
+
+static inline size_t ptr_set_hash_function(const void *s, size_t len);
+#define HT_HASH_FUNCTION ptr_set_hash_function
+
+#define HT_LOAD_FACTOR 0.7
+#include "hash_table_def.h"
+DEFINE_HASH_TABLE(ptr_set)
+
+#if defined(PTR_SET_RH)
+#include "hash_table_impl_rh.h"
+#else
+#include "hash_table_impl.h"
+#endif
+
+static inline const void *ht_key(ht_item_t x)
+{
+ return (const void *)x;
+}
+
+static inline size_t ht_key_len(ht_item_t x)
+{
+ return sizeof(x);
+}
+
+static inline int ht_match(const void *key, size_t len, ht_item_t x)
+{
+ (void)len;
+ return (size_t)key == (size_t)x;
+}
+
+static inline size_t ptr_set_hash_function(const void *s, size_t len)
+{
+#if defined (PTR_SET_PTR_HASH)
+ /* Murmur hash like finalization step. */
+ return ht_ptr_hash_function(s, len);
+#elif defined (PTR_SET_INT_HASH)
+ /* Knuths multiplication. */
+ return ht_int_hash_function(s, len);
+#else
+ (void)len;
+ return ht_default_hash_function(&s, sizeof(char *));
+#endif
+}
diff --git a/external/hash/ptr_set.h b/external/hash/ptr_set.h
new file mode 100644
index 0000000..f66e70e
--- /dev/null
+++ b/external/hash/ptr_set.h
@@ -0,0 +1,19 @@
+#ifndef HT_PTR_SET_H
+#define HT_PTR_SET_H
+
+#include "hash_table.h"
+
+DECLARE_HASH_TABLE(ptr_set, void *)
+
+/* Return value helpers - these are specific to the implementation. */
+#define PTR_SET_IS_MISSING(x) ((void *)x == (void *)0)
+#define PTR_SET_IS_ERROR(x) ((void *)x == (void *)2)
+#define PTR_SET_IS_VALID(x) ((void *)x > (void *)2)
+
+/* Extensions to std. interface. */
+static inline int ptr_set_exists(ptr_set_t *S, void *p)
+{
+ return ptr_set_find_item(S, p) != (void *)0;
+}
+
+#endif /* HT_PTR_SET_H */
diff --git a/external/hash/str_set.c b/external/hash/str_set.c
new file mode 100644
index 0000000..87a3766
--- /dev/null
+++ b/external/hash/str_set.c
@@ -0,0 +1,61 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <string.h>
+
+#include "str_set.h"
+#include "hash_table_def.h"
+DEFINE_HASH_TABLE(str_set)
+#if defined(STR_SET_RH)
+#include "hash_table_impl_rh.h"
+#else
+#include "hash_table_impl.h"
+#endif
+
+/*
+ * Simple default implementation of a hash set. The stored items are
+ * zero-terminated strings. The hash table does not manage the
+ * allocation of the strings, like it doesn't manage any stored items.
+ * However, it items are created with, say, strndup, a destructor can be
+ * provided to free each item when clearing the table. The remove
+ * operation also returns the removed item so it can be deallocated by
+ * callee.
+ *
+ * In general, the key and the item are different, but here they are the
+ * same. Normally the key would be referenced by the item.
+ */
+static inline int ht_match(const void *key, size_t len, str_set_item_t item)
+{
+ return strncmp(key, item, len) == 0;
+}
+
+static inline const void *ht_key(str_set_item_t item)
+{
+ return (const void *)item;
+}
+
+static inline size_t ht_key_len(str_set_item_t item)
+{
+ return strlen(item);
+}
diff --git a/external/hash/str_set.h b/external/hash/str_set.h
new file mode 100644
index 0000000..df5d1c7
--- /dev/null
+++ b/external/hash/str_set.h
@@ -0,0 +1,32 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef STR_SET_H
+#define STR_SET_H
+
+#include "hash_table.h"
+
+DECLARE_HASH_TABLE(str_set, char *)
+
+#endif /* STR_SET_H */
diff --git a/external/hash/token_map.c b/external/hash/token_map.c
new file mode 100644
index 0000000..9bf85df
--- /dev/null
+++ b/external/hash/token_map.c
@@ -0,0 +1,54 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <string.h>
+
+/* These are just example settings. */
+
+#include "token_map.h"
+#define HT_LOAD_FACTOR 0.85
+/* Quadratic probing is ignored with Robin Hood hashing. */
+#define HT_PROBE_QUADRATIC
+#include "hash_table_def.h"
+DEFINE_HASH_TABLE(token_map)
+#if defined(TOKEN_MAP_RH)
+#include "hash_table_impl_rh.h"
+#else
+#include "hash_table_impl.h"
+#endif
+
+static inline const void *ht_key(ht_item_t item)
+{
+ return item->token;
+}
+
+static inline size_t ht_key_len(ht_item_t item)
+{
+ return item->len;
+}
+
+static inline int ht_match(const void *key, size_t len, ht_item_t item)
+{
+ return len == item->len && memcmp(key, item->token, len) == 0;
+}
diff --git a/external/hash/token_map.h b/external/hash/token_map.h
new file mode 100644
index 0000000..700c60e
--- /dev/null
+++ b/external/hash/token_map.h
@@ -0,0 +1,39 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef TOKEN_MAP_H
+#define TOKEN_MAP_H
+
+#include "hash_table.h"
+
+struct token {
+ char *token;
+ size_t len;
+ int type;
+ void *data;
+};
+
+DECLARE_HASH_TABLE(token_map, struct token *)
+
+#endif /* TOKEN_MAP_H */
diff --git a/external/hash/unaligned.h b/external/hash/unaligned.h
new file mode 100644
index 0000000..0431f96
--- /dev/null
+++ b/external/hash/unaligned.h
@@ -0,0 +1,42 @@
+#ifndef UNALIGNED_H
+#define UNALIGNED_H
+
+/*
+ * This is a simplified version of portable/punaligned.h that does not depend on
+ * endian detection, but which assumes x86 is always little endian.
+ * Include the portable version for better precision.
+ */
+
+#ifndef unaligned_read_le16toh
+
+#if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+
+#define unaligned_read_le16toh(p) (*(uint16_t*)(p))
+#define unaligned_read_le32toh(p) (*(uint32_t*)(p))
+#define unaligned_read_le64toh(p) (*(uint64_t*)(p))
+
+#else
+
+#define unaligned_read_le16toh(p) ( \
+ (((uint16_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint16_t)(((uint8_t *)(p))[1])) << 8))
+
+#define unaligned_read_le32toh(p) ( \
+ (((uint32_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint32_t)(((uint8_t *)(p))[1])) << 8) | \
+ (((uint32_t)(((uint8_t *)(p))[2])) << 16) | \
+ (((uint32_t)(((uint8_t *)(p))[3])) << 24))
+
+#define unaligned_read_le64toh(p) ( \
+ (((uint64_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint64_t)(((uint8_t *)(p))[1])) << 8) | \
+ (((uint64_t)(((uint8_t *)(p))[2])) << 16) | \
+ (((uint64_t)(((uint8_t *)(p))[3])) << 24) | \
+ (((uint64_t)(((uint8_t *)(p))[4])) << 32) | \
+ (((uint64_t)(((uint8_t *)(p))[5])) << 40) | \
+ (((uint64_t)(((uint8_t *)(p))[6])) << 48) | \
+ (((uint64_t)(((uint8_t *)(p))[7])) << 56))
+#endif
+#endif
+
+#endif /* UNALIGNED_H */
diff --git a/external/lex/LICENSE b/external/lex/LICENSE
new file mode 100644
index 0000000..8e84a48
--- /dev/null
+++ b/external/lex/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Mikkel F. Jørgensen, dvide.com
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/external/lex/README.md b/external/lex/README.md
new file mode 100644
index 0000000..3144091
--- /dev/null
+++ b/external/lex/README.md
@@ -0,0 +1,3 @@
+Essential files extracted from the luthor scanner - a generic scanner
+similar to a handwritten scanner, but covering many common cases by
+default.
diff --git a/external/lex/luthor.c b/external/lex/luthor.c
new file mode 100644
index 0000000..fc81985
--- /dev/null
+++ b/external/lex/luthor.c
@@ -0,0 +1,1509 @@
+/*
+ * Designed to be included in other C files which define emitter
+ * operations. The same source may thus be used to parse different
+ * grammars.
+ *
+ * The operators cover the most common operators i the C family. Each
+ * operator does not have a name, it is represent by a long token code
+ * with up to 4 ASCII characters embedded literally. This avoids any
+ * semantic meaning at the lexer level. Emitters macros can redefine
+ * this behavior.
+ *
+ * No real harm is done in accepting a superset, but the source is
+ * intended to be modified, have things flagged or removed, other things
+ * added. The real complicity is in numbers, identifiers, and comments,
+ * which should be fairly complete with flagging as is.
+ *
+ * Keyword handling is done at macroes, and described elsewhere, but for
+ * identifier compatible keywords, this is quite efficient to handle on
+ * a per language basis without modifying this source.
+ *
+ * The Lisp language family is somewhat different and not directly
+ * suited for this lexer, although it can easily be modified to suit.
+ * The main reason is ';' for comments, and operators used as part of
+ * the identifier symbol set, and no need for operator classification,
+ * and different handling of single character symbols.
+ *
+ * So overall, we more or less have one efficient unified lexer that can
+ * manage many languages - this is good, because it is a pain to write a
+ * new lexer by hand, and lexer tools are what they are.
+ */
+
+#include "luthor.h"
+
+#ifdef LEX_C99_NUMERIC
+#define LEX_C_NUMERIC
+#define LEX_HEX_FLOAT_NUMERIC
+#define LEX_BINARY_NUMERIC
+#endif
+
+#ifdef LEX_C_NUMERIC
+#define LEX_C_OCTAL_NUMERIC
+#define LEX_HEX_NUMERIC
+#endif
+
+#ifdef LEX_JULIA_NUMERIC
+#ifdef LEX_C_OCTAL_NUMERIC
+/*
+ * LEX_JULIA_OCTAL_NUMERIC and LEX_C_OCTAL_NUMERIC can technically
+ * coexist, but leading zeroes give C style leading zero numbers
+ * which can lead to incorrect values depending on expectations.
+ * Therefore the full LEX_JULIA_NUMERIC flag is designed to not allow this.
+ */
+#error "LEX_C_OCTAL_NUMERIC conflicts with LEX_JULIA_NUMERIC leading zero integers"
+#endif
+
+/*
+ * Julia v0.3 insists on lower case, and has a different meaning for
+ * upper case.
+ */
+#define LEX_LOWER_CASE_NUMERIC_PREFIX
+#define LEX_JULIA_OCTAL_NUMERIC
+#define LEX_HEX_FLOAT_NUMERIC
+#define LEX_BINARY_NUMERIC
+
+#endif
+
+#ifdef LEX_HEX_FLOAT_NUMERIC
+#define LEX_HEX_NUMERIC
+#endif
+
+/*
+ * Numeric and string constants do not accept prefixes such as u, l, L,
+ * U, ll, LL, f, or F in C, or various others in Julia strings. Use the
+ * parser to detect juxtaposition between identifier and constant. In
+ * Julia numeric suffix means multiplication, in C it is a type
+ * qualifier. Sign, such as defined in JSON, are also not accepted -
+ * they must be operators. See source for various flag to enable
+ * different token types.
+ */
+
+/*
+ * Includes '_' in identifers by default. Defines follow characters in
+ * identifiers but not the lead character - it must be defined in switch
+ * cases. If the identifier allows for dash '-', it is probably better
+ * to handle it as an operator and flag surrounding space in the parser.
+ */
+#ifndef lex_isalnum
+
+/*
+ * NOTE: isalnum, isalpha, is locale dependent. We only want to
+ * to consider that ASCII-7 subset and treat everything else as utf-8.
+ * This table is not for leading identifiers, as it contains 0..9.
+ *
+ * For more correct handling of UTF-8, see:
+ * https://theantlrguy.atlassian.net/wiki/display/ANTLR4/Grammar+Lexicon
+ * based on Java Ident = NameStartChar NameChar*
+ *
+ * While the following is UTF-16, it can be adapted to UTF-8 easily.
+
+
+ fragment
+ NameChar
+ : NameStartChar
+ | '0'..'9'
+ | '_'
+ | '\u00B7'
+ | '\u0300'..'\u036F'
+ | '\u203F'..'\u2040'
+ ;
+ fragment
+ NameStartChar
+ : 'A'..'Z' | 'a'..'z'
+ | '\u00C0'..'\u00D6'
+ | '\u00D8'..'\u00F6'
+ | '\u00F8'..'\u02FF'
+ | '\u0370'..'\u037D'
+ | '\u037F'..'\u1FFF'
+ | '\u200C'..'\u200D'
+ | '\u2070'..'\u218F'
+ | '\u2C00'..'\u2FEF'
+ | '\u3001'..'\uD7FF'
+ | '\uF900'..'\uFDCF'
+ | '\uFDF0'..'\uFFFD'
+ ;
+ */
+
+static const char lex_alnum[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0..9 */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ /* A..O */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* P..Z, _ */
+#ifdef LEX_ID_WITHOUT_UNDERSCORE
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+#else
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
+#endif
+ /* a..o */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* p..z */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+#ifdef LEX_ID_WITH_UTF8
+ /* utf-8 */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+#else
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+#endif
+};
+
+#define lex_isalnum(c) (lex_alnum[(unsigned char)(c)])
+#endif
+
+#ifndef lex_isbindigit
+#define lex_isbindigit(c) ((c) == '0' || (c) == '1')
+#endif
+
+#ifndef lex_isoctdigit
+#define lex_isoctdigit(c) ((unsigned)((c) - '0') < 8)
+#endif
+
+#ifndef lex_isdigit
+#define lex_isdigit(c) ((c) >= '0' && (c) <= '9')
+#endif
+
+#ifndef lex_ishexdigit
+#define lex_ishexdigit(c) (((c) >= '0' && (c) <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f'))
+#endif
+
+#ifndef lex_isctrl
+#include <ctype.h>
+#define lex_isctrl(c) ((c) < 0x20 || (c) == 0x7f)
+#endif
+
+#ifndef lex_isblank
+#define lex_isblank(c) ((c) == ' ' || (c) == '\t')
+#endif
+
+#ifndef lex_iszterm
+#define lex_iszterm(c) ((c) == '\0')
+#endif
+
+/*
+ * If ZTERM is disabled, zero will be a LEX_CTRL token
+ * and allowed to be embedded in comments and strings, or
+ * elsewhere, as long as the parser accepts the token.
+ */
+#ifdef LEX_DISABLE_ZTERM
+#undef lex_iszterm
+#define lex_iszterm(c) (0)
+#endif
+
+/*
+ * The mode is normally LEX_MODE_NORMAL = 0 initially, or the returned
+ * mode from a previous call, unless LEX_MODE_INVALID = 1 was returned.
+ * If a buffer stopped in the middle of a string or a comment, the mode
+ * will reflect that. In all cases some amount of recovery is needed
+ * before starting a new buffer - see detailed comments in header file.
+ * If only a single buffer is used, special handling is still needed if
+ * the last line contains a single line comment because it will not be
+ * terminated, but it amounts to replace the emitted unterminated
+ * comment token with an end of comment token.
+ *
+ * Instead of 0, the mode can initially also be LEX_MODE_BOM - it will
+ * an strip optional BOM before moving to normal mode. Currently only
+ * UTF-8 BOM is supported, and this is unlikely to change.
+ *
+ * The context variable is user-defined and available to emitter macros.
+ * It may be null if unused.
+ *
+ */
+static int lex(const char *buf, size_t len, int mode, void *context)
+{
+ const char *p, *q, *s, *d;
+#if 0
+ /* TODO: old, remove this */
+ , *z, *f;
+#endif
+
+ p = buf; /* next char */
+ q = p + len; /* end of buffer */
+ s = p; /* start of token */
+ d = p; /* end of integer part */
+
+#if 0
+ /* TODO: old, remove this */
+
+ /* Used for float and leading zero detection in numerics. */
+ z = p;
+ f = p;
+#endif
+
+ /*
+ * Handle mid string and mid comment for reentering across
+ * buffer boundaries. Strip embedded counter from mode.
+ */
+ switch(mode & (LEX_MODE_COUNT_BASE - 1)) {
+
+ case LEX_MODE_NORMAL:
+ goto lex_mode_normal;
+
+ case LEX_MODE_BOM:
+ goto lex_mode_bom;
+
+#ifdef LEX_C_STRING
+ case LEX_MODE_C_STRING:
+ goto lex_mode_c_string;
+#endif
+#ifdef LEX_PYTHON_BLOCK_STRING
+ case LEX_MODE_PYTHON_BLOCK_STRING:
+ goto lex_mode_python_block_string;
+#endif
+#ifdef LEX_C_STRING_SQ
+ case LEX_MODE_C_STRING_SQ:
+ goto lex_mode_c_string_sq;
+#endif
+#ifdef LEX_PYTHON_BLOCK_STRING_SQ
+ case LEX_MODE_PYTHON_BLOCK_STRING_SQ:
+ goto lex_mode_python_block_string_sq;
+#endif
+#ifdef LEX_C_BLOCK_COMMENT
+ case LEX_MODE_C_BLOCK_COMMENT:
+ goto lex_mode_c_block_comment;
+#endif
+#if defined(LEX_SHELL_LINE_COMMENT) || defined(LEX_C99_LINE_COMMENT)
+ case LEX_MODE_LINE_COMMENT:
+ goto lex_mode_line_comment;
+#endif
+#ifdef LEX_JULIA_NESTED_COMMENT
+ case LEX_MODE_JULIA_NESTED_COMMENT:
+ goto lex_mode_julia_nested_comment;
+#endif
+
+ default:
+ /*
+ * This is mostly to kill unused label warning when comments
+ * are disabled.
+ */
+ goto lex_mode_exit;
+ }
+
+lex_mode_bom:
+
+ mode = LEX_MODE_BOM;
+
+ /*
+ * Special entry mode to consume utf-8 bom if present. We don't
+ * support other boms, but we would use the same token if we did.
+ *
+ * We generally expect no bom present, but it is here if needed
+ * without requiring ugly hacks elsewhere.
+ */
+ if (p + 3 < q && p[0] == '\xef' && p[1] == '\xbb' && p[2] == '\xbf') {
+ p += 3;
+ lex_emit_bom(s, p);
+ }
+ goto lex_mode_normal;
+
+/* If source is updated, also update LEX_C_STRING_SQ accordingly. */
+#ifdef LEX_C_STRING
+lex_mode_c_string:
+
+ mode = LEX_MODE_C_STRING;
+
+ for (;;) {
+ --p;
+ /* We do not allow blanks that are also control characters, such as \t. */
+ while (++p != q && *p != '\\' && *p != '\"' && !lex_isctrl(*p)) {
+ }
+ if (s != p) {
+ lex_emit_string_part(s, p);
+ s = p;
+ }
+ if (*p == '\"') {
+ ++p;
+ lex_emit_string_end(s, p);
+ goto lex_mode_normal;
+ }
+ if (p == q || lex_iszterm(*p)) {
+ lex_emit_string_unterminated(p);
+ goto lex_mode_normal;
+ }
+ if (*p == '\\') {
+ ++p;
+ /* Escape is only itself, whatever is escped follows separately. */
+ lex_emit_string_escape(s, p);
+ s = p;
+ if (p == q || lex_iszterm(*p)) {
+ lex_emit_string_unterminated(p);
+ goto lex_mode_normal;
+ }
+ if (*p == '\\' || *p == '\"') {
+ ++p;
+ continue;
+ }
+ /*
+ * Flag only relevant for single line strings, as it
+ * controls whether we fail on unterminated string at line
+ * ending with '\'.
+ *
+ * Julia does not support line continuation in strings
+ * (or elsewhere). C, Python, and Javascript do.
+ */
+#ifndef LEX_DISABLE_STRING_CONT
+ if (*p == '\n') {
+ if (++p != q && *p == '\r') {
+ ++p;
+ }
+ lex_emit_string_newline(s, p);
+ s = p;
+ continue;
+ }
+ if (*p == '\r') {
+ if (++p != q && *p == '\n') {
+ ++p;
+ }
+ lex_emit_string_newline(s, p);
+ s = p;
+ continue;
+ }
+#endif
+ }
+ if (*p == '\n' || *p == '\r') {
+ lex_emit_string_unterminated(p);
+ goto lex_mode_normal;
+ }
+ ++p;
+ lex_emit_string_ctrl(s);
+ s = p;
+ }
+#endif
+
+/*
+ * This is a copy if LEX_C_STRING with single quote. It's not DRY, but
+ * no reason to parameterized inner loops, just because. Recopy of
+ * changes are to the above.
+ *
+ * Even if single quote is only used for CHAR types, it makes sense to
+ * parse as a full string since there can be all sorts of unicocde
+ * escapes and line continuations, newlines to report and unexpected
+ * control characters to deal with.
+ */
+#ifdef LEX_C_STRING_SQ
+lex_mode_c_string_sq:
+
+ mode = LEX_MODE_C_STRING_SQ;
+
+ for (;;) {
+ --p;
+ while (++p != q && *p != '\\' && *p != '\'' && !lex_isctrl(*p)) {
+ }
+ if (s != p) {
+ lex_emit_string_part(s, p);
+ s = p;
+ }
+ if (*p == '\'') {
+ ++p;
+ lex_emit_string_end(s, p);
+ goto lex_mode_normal;
+ }
+ if (p == q || lex_iszterm(*p)) {
+ lex_emit_string_unterminated(p);
+ goto lex_mode_normal;
+ }
+ if (*p == '\\') {
+ ++p;
+ /* Escape is only itself, whatever is escped follows separately. */
+ lex_emit_string_escape(s, p);
+ s = p;
+ if (p == q || lex_iszterm(*p)) {
+ lex_emit_string_unterminated(p);
+ goto lex_mode_normal;
+ }
+ if (*p == '\\' || *p == '\'') {
+ ++p;
+ continue;
+ }
+ /*
+ * Flag only relevant for single line strings, as it
+ * controls whether we fail on unterminated string at line
+ * ending with '\'.
+ *
+ * Julia does not support line continuation in strings
+ * (or elsewhere). C, Python, and Javascript do.
+ */
+#ifndef LEX_DISABLE_STRING_CONT
+ if (*p == '\n') {
+ if (++p != q && *p == '\r') {
+ ++p;
+ }
+ lex_emit_string_newline(s, p);
+ s = p;
+ continue;
+ }
+ if (*p == '\r') {
+ if (++p != q && *p == '\n') {
+ ++p;
+ }
+ lex_emit_string_newline(s, p);
+ s = p;
+ continue;
+ }
+#endif
+ }
+ if (*p == '\n' || *p == '\r') {
+ lex_emit_string_unterminated(p);
+ goto lex_mode_normal;
+ }
+ ++p;
+ lex_emit_string_ctrl(s);
+ s = p;
+ }
+#endif
+
+/*
+ * """ Triple quoted Python block strings. """
+ * Single quoted version (''') is a direct copy, update both places
+ * if a changed is needed.
+ *
+ * Note: there is no point in disabling line continuation
+ * for block strings, since it only affects unterminated
+ * string errors at newline. It all comes down to how
+ * escaped newline is interpreted by the parser.
+ */
+#ifdef LEX_PYTHON_BLOCK_STRING
+lex_mode_python_block_string:
+
+ mode = LEX_MODE_PYTHON_BLOCK_STRING;
+
+ for (;;) {
+ --p;
+ while (++p != q && *p != '\\' && !lex_isctrl(*p)) {
+ if (*p == '\"' && p + 2 < q && p[1] == '\"' && p[2] == '\"') {
+ break;
+ }
+ }
+ if (s != p) {
+ lex_emit_string_part(s, p);
+ s = p;
+ }
+ if (p == q || lex_iszterm(*p)) {
+ lex_emit_string_unterminated(p);
+ goto lex_mode_normal;
+ }
+ if (*p == '\"') {
+ p += 3;
+ lex_emit_string_end(s, p);
+ goto lex_mode_normal;
+ }
+ if (*p == '\\') {
+ /* Escape is only itself, allowing parser to interpret and validate. */
+ ++p;
+ lex_emit_string_escape(s, p);
+ s = p;
+ if (p + 1 != q && (*p == '\\' || *p == '\"')) {
+ ++p;
+ }
+ continue;
+ }
+ if (*p == '\n') {
+ if (++p != q && *p == '\r') {
+ ++p;
+ }
+ lex_emit_string_newline(s, p);
+ s = p;
+ continue;
+ }
+ if (*p == '\r') {
+ if (++p != q && *p == '\n') {
+ ++p;
+ }
+ lex_emit_string_newline(s, p);
+ s = p;
+ continue;
+ }
+ ++p;
+ lex_emit_string_ctrl(s);
+ s = p;
+ }
+#endif
+
+/*
+ * Python ''' style strings.
+ * Direct copy of """ quote version, update both if changed.
+ */
+#ifdef LEX_PYTHON_BLOCK_STRING_SQ
+lex_mode_python_block_string_sq:
+
+ mode = LEX_MODE_PYTHON_BLOCK_STRING_SQ;
+
+ for (;;) {
+ --p;
+ while (++p != q && *p != '\\' && !lex_isctrl(*p)) {
+ if (*p == '\'' && p + 2 < q && p[1] == '\'' && p[2] == '\'') {
+ break;
+ }
+ }
+ if (s != p) {
+ lex_emit_string_part(s, p);
+ s = p;
+ }
+ if (p == q || lex_iszterm(*p)) {
+ lex_emit_string_unterminated(p);
+ goto lex_mode_normal;
+ }
+ if (*p == '\'') {
+ p += 3;
+ lex_emit_string_end(s, p);
+ goto lex_mode_normal;
+ }
+ if (*p == '\\') {
+ /* Escape is only itself, allowing parser to interpret and validate. */
+ ++p;
+ lex_emit_string_escape(s, p);
+ s = p;
+ if (p + 1 != q && (*p == '\\' || *p == '\'')) {
+ ++p;
+ }
+ continue;
+ }
+ if (*p == '\n') {
+ if (++p != q && *p == '\r') {
+ ++p;
+ }
+ lex_emit_string_newline(s, p);
+ s = p;
+ continue;
+ }
+ if (*p == '\r') {
+ if (++p != q && *p == '\n') {
+ ++p;
+ }
+ lex_emit_string_newline(s, p);
+ s = p;
+ continue;
+ }
+ ++p;
+ lex_emit_string_ctrl(s);
+ s = p;
+ }
+#endif
+
+/*
+ * We don't really care if it is a shell style comment or a C99,
+ * or any other line oriented commment, as the termination is
+ * the same.
+ */
+#if defined(LEX_SHELL_LINE_COMMENT) || defined(LEX_C99_LINE_COMMENT)
+lex_mode_line_comment:
+
+ mode = LEX_MODE_LINE_COMMENT;
+
+ for (;;) {
+ --p;
+ while (++p != q && (!lex_isctrl(*p))) {
+ }
+ if (s != p) {
+ lex_emit_comment_part(s, p);
+ s = p;
+ }
+ if (p == q || lex_iszterm(*p)) {
+ /*
+ * Unterminated comment here is not necessarily true,
+ * not even likely, nor possible, but we do this to
+ * handle buffer switch consistently: any non-normal
+ * mode exit will have an unterminated token to fix up.
+ * Here it would be conversion to end of comment, which
+ * we cannot know yet, since the line might continue in
+ * the next buffer. This is a zero length token.
+ */
+ lex_emit_comment_unterminated(p);
+ goto lex_mode_exit;
+ }
+ if (*p == '\n' || *p == '\r') {
+ lex_emit_comment_end(s, p);
+ goto lex_mode_normal;
+ }
+ ++p;
+ lex_emit_comment_ctrl(s);
+ s = p;
+ }
+#endif
+
+#ifdef LEX_C_BLOCK_COMMENT
+lex_mode_c_block_comment:
+
+ mode = LEX_MODE_C_BLOCK_COMMENT;
+
+ for (;;) {
+ --p;
+ while (++p != q && (!lex_isctrl(*p))) {
+ if (*p == '/' && p[-1] == '*') {
+ --p;
+ break;
+ }
+ }
+ if (s != p) {
+ lex_emit_comment_part(s, p);
+ s = p;
+ }
+ if (p == q || lex_iszterm(*p)) {
+ lex_emit_comment_unterminated(p);
+ goto lex_mode_exit;
+ }
+ if (*p == '\n') {
+ if (++p != q && *p == '\r') {
+ ++p;
+ }
+ lex_emit_newline(s, p);
+ s = p;
+ continue;
+ }
+ if (*p == '\r') {
+ if (++p != q && *p == '\n') {
+ ++p;
+ }
+ lex_emit_newline(s, p);
+ s = p;
+ continue;
+ }
+ if (lex_isctrl(*p)) {
+ ++p;
+ lex_emit_comment_ctrl(s);
+ s = p;
+ continue;
+ }
+ p += 2;
+ lex_emit_comment_end(s, p);
+ s = p;
+ goto lex_mode_normal;
+ }
+#endif
+
+ /* Julia nests block comments as #= ... #= ...=# ... =# across multiple lines. */
+#ifdef LEX_JULIA_NESTED_COMMENT
+lex_mode_julia_nested_comment:
+
+ /* Preserve nesting level on re-entrance. */
+ if ((mode & (LEX_MODE_COUNT_BASE - 1)) != LEX_MODE_JULIA_NESTED_COMMENT) {
+ mode = LEX_MODE_JULIA_NESTED_COMMENT;
+ }
+ /* We have already entered. */
+ mode += LEX_MODE_COUNT_BASE;
+
+ for (;;) {
+ --p;
+ while (++p != q && !lex_isctrl(*p)) {
+ if (*p == '#') {
+ if (p[-1] == '=') {
+ --p;
+ break;
+ }
+ if (p + 1 != q && p[1] == '=') {
+ break;
+ }
+ }
+ }
+ if (s != p) {
+ lex_emit_comment_part(s, p);
+ s = p;
+ }
+ if (p == q || lex_iszterm(*p)) {
+ lex_emit_comment_unterminated(p);
+ goto lex_mode_exit;
+ }
+ if (*p == '\n') {
+ if (++p != q && *p == '\r') {
+ ++p;
+ }
+ lex_emit_newline(s, p);
+ s = p;
+ continue;
+ }
+ if (*p == '\r') {
+ if (++p != q && *p == '\n') {
+ ++p;
+ }
+ lex_emit_newline(s, p);
+ s = p;
+ continue;
+ }
+ if (lex_isctrl(*p)) {
+ ++p;
+ lex_emit_comment_ctrl(s);
+ s = p;
+ continue;
+ }
+ if (*p == '=') {
+ p += 2;
+ lex_emit_comment_end(s, p);
+ s = p;
+ mode -= LEX_MODE_COUNT_BASE;
+ if (mode / LEX_MODE_COUNT_BASE > 0) {
+ continue;
+ }
+ goto lex_mode_normal;
+ }
+ /* The upper bits are used as counter. */
+ mode += LEX_MODE_COUNT_BASE;
+ p += 2;
+ lex_emit_comment_begin(s, p, 0);
+ s = p;
+ if (mode / LEX_MODE_COUNT_BASE > LEX_MAX_NESTING_LEVELS) {
+ /* Prevent malicious input from overflowing counter. */
+ lex_emit_comment_deeply_nested(p);
+ lex_emit_abort(p);
+ return mode;
+ }
+ }
+#endif
+
+/* Unlike other modes, we can always jump here without updating token start `s` first. */
+lex_mode_normal:
+
+ mode = LEX_MODE_NORMAL;
+
+ while (p != q) {
+ s = p;
+
+ switch(*p) {
+
+#ifndef LEX_DISABLE_ZTERM
+ case '\0':
+ lex_emit_eos(s, p);
+ return mode;
+#endif
+
+ /* \v, \f etc. are covered by the CTRL token, don't put it here. */
+ case '\t': case ' ':
+ while (++p != q && lex_isblank(*p)) {
+ }
+ lex_emit_blank(s, p);
+ continue;
+
+ /*
+ * Newline should be emitter in all constructs, also comments
+ * and strings which have their own newline handling.
+ * Only one line is emitted at a time permitting simple line
+ * counting.
+ */
+ case '\n':
+ if (++p != q && *p == '\r') {
+ ++p;
+ }
+ lex_emit_newline(s, p);
+ continue;
+
+ case '\r':
+ if (++p != q && *p == '\n') {
+ ++p;
+ }
+ lex_emit_newline(s, p);
+ continue;
+
+ /*
+ * C-style string, and Python style triple double quote
+ * delimited multi-line string. Prefix and suffix symbols
+ * should be parsed separately, e.g. L"hello" are two
+ * tokens.
+ */
+#if defined(LEX_C_STRING) || defined(LEX_PYTHON_BLOCK_STRING)
+ case '\"':
+#ifdef LEX_PYTHON_BLOCK_STRING
+ if (p + 2 < q && p[1] == '\"' && p[2] == '\"') {
+ p += 3;
+ lex_emit_string_begin(s, p);
+ s = p;
+ goto lex_mode_python_block_string;
+ }
+#endif
+#ifdef LEX_C_STRING
+ ++p;
+ lex_emit_string_begin(s, p);
+ s = p;
+ goto lex_mode_c_string;
+#endif
+#endif
+
+ /*
+ * Single quoted version of strings, otherwise identical
+ * behavior. Can also be used for char constants if checked
+ * by parser subsequently.
+ */
+#if defined(LEX_C_STRING_SQ) || defined(LEX_PYTHON_BLOCK_STRING_SQ)
+ case '\'':
+#ifdef LEX_PYTHON_BLOCK_STRING_SQ
+ if (p + 2 < q && p[1] == '\'' && p[2] == '\'') {
+ p += 3;
+ lex_emit_string_begin(s, p);
+ s = p;
+ goto lex_mode_python_block_string_sq;
+ }
+#endif
+#ifdef LEX_C_STRING_SQ
+ ++p;
+ lex_emit_string_begin(s, p);
+ s = p;
+ goto lex_mode_c_string_sq;
+#endif
+#endif
+
+#if defined(LEX_SHELL_LINE_COMMENT) || defined(LEX_JULIA_NESTED_COMMENT)
+ /*
+ * Line comment excluding terminal line break.
+ *
+ * See also C99 line comment `//`.
+ *
+ * Julia uses `#=` and `=#` for nested block comments.
+ * (According to Julia developers, '#=` is motivated by `=`
+ * not being likely to start anything that you would put a
+ * comment around, unlike `#{`, `}#` or `#(`, `)#`)).
+ *
+ * Some known doc comment formats are identified and
+ * included in the comment_begin token.
+ */
+ case '#':
+ ++p;
+#ifdef LEX_JULIA_NESTED_COMMENT
+ if (p != q && *p == '=') {
+ ++p;
+ lex_emit_comment_begin(s, p, 0);
+ s = p;
+ goto lex_mode_julia_nested_comment;
+ }
+#endif
+ lex_emit_comment_begin(s, p, 0);
+ s = p;
+ goto lex_mode_line_comment;
+#endif
+
+ case '/':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+#ifdef LEX_C99_LINE_COMMENT
+ case '/':
+ ++p;
+ p += p != q && (*p == '/' || *p == '!');
+ lex_emit_comment_begin(s, p, (p - s == 3));
+ s = p;
+ goto lex_mode_line_comment;
+#endif
+#ifdef LEX_C_BLOCK_COMMENT
+ case '*':
+ ++p;
+ p += p != q && (*p == '*' || *p == '!');
+ lex_emit_comment_begin(s, p, (p - s == 3));
+ s = p;
+ goto lex_mode_c_block_comment;
+#endif
+ case '=':
+ ++p;
+ lex_emit_compound_op('/', '=', s, p);
+ continue;
+ default:
+ break;
+ }
+ }
+ lex_emit_op('/', s, p);
+ continue;
+
+ case '(': case ')': case '[': case ']': case '{': case '}':
+ case ',': case ';': case '\\': case '?':
+ ++p;
+ lex_emit_op(*s, s, p);
+ continue;
+
+ case '%': case '!': case '~': case '^':
+ ++p;
+ if (p != q && *p == '=') {
+ ++p;
+ lex_emit_compound_op(*s, '=', s, p);
+ continue;
+ }
+ lex_emit_op(*s, s, p);
+ continue;
+
+ case '|':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '=':
+ ++p;
+ lex_emit_compound_op('|', '=', s, p);
+ continue;
+ case '|':
+ ++p;
+ lex_emit_compound_op('|', '|', s, p);
+ break;
+ default:
+ break;
+ }
+ }
+ lex_emit_op('|', s, p);
+ continue;
+
+ case '&':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '=':
+ ++p;
+ lex_emit_compound_op('&', '=', s, p);
+ continue;
+ case '&':
+ ++p;
+ lex_emit_compound_op('&', '&', s, p);
+ break;
+ default:
+ break;
+ }
+ }
+ lex_emit_op('&', s, p);
+ continue;
+
+ case '=':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '>':
+ ++p;
+ lex_emit_compound_op('=', '>', s, p);
+ continue;
+ case '=':
+ ++p;
+ if (p != q && *p == '=') {
+ ++p;
+ lex_emit_tricompound_op('=', '=', '=', s, p);
+ continue;
+ }
+ lex_emit_compound_op('=', '=', s, p);
+ break;
+ default:
+ break;
+ }
+ }
+ lex_emit_op('=', s, p);
+ continue;
+
+ case ':':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '=':
+ ++p;
+ lex_emit_compound_op(':', '=', s, p);
+ continue;
+ case ':':
+ ++p;
+ if (p != q && *p == '=') {
+ ++p;
+ lex_emit_tricompound_op(':', ':', '=', s, p);
+ continue;
+ }
+ lex_emit_compound_op(':', ':', s, p);
+ continue;
+ default:
+ break;
+ }
+ }
+ lex_emit_op(':', s, p);
+ continue;
+
+ case '*':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '=':
+ lex_emit_compound_op('*', '=', s, p);
+ continue;
+ case '*':
+ /* **= hardly used anywhere? */
+ lex_emit_compound_op('*', '*', s, p);
+ continue;
+ default:
+ break;
+ }
+ }
+ lex_emit_op('*', s, p);
+ continue;
+
+ case '<':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '-':
+ ++p;
+ lex_emit_compound_op('<', '-', s, p);
+ continue;
+ case '=':
+ ++p;
+ lex_emit_compound_op('<', '=', s, p);
+ continue;
+ case '<':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '=':
+ ++p;
+ lex_emit_tricompound_op('<', '<', '=', s, p);
+ continue;
+ case '<':
+ ++p;
+ if (p != q && *p == '=') {
+ ++p;
+ lex_emit_quadcompound_op('<', '<', '<', '=', s, p);
+ continue;
+ }
+ lex_emit_tricompound_op('<', '<', '<', s, p);
+ continue;
+ default:
+ break;
+ }
+ }
+ lex_emit_compound_op('<', '<', s, p);
+ continue;
+ default:
+ break;
+ }
+ }
+ lex_emit_op('<', s, p);
+ continue;
+
+ case '>':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '=':
+ ++p;
+ lex_emit_compound_op('>', '=', s, p);
+ continue;
+ case '>':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '=':
+ ++p;
+ lex_emit_tricompound_op('>', '>', '=', s, p);
+ continue;
+ case '>':
+ ++p;
+ if (p != q && *p == '=') {
+ ++p;
+ lex_emit_quadcompound_op('>', '>', '>', '=', s, p);
+ continue;
+ }
+ lex_emit_tricompound_op('>', '>', '>', s, p);
+ continue;
+ default:
+ break;
+ }
+ }
+ lex_emit_compound_op('>', '>', s, p);
+ continue;
+ default:
+ break;
+ }
+ }
+ lex_emit_op('>', s, p);
+ continue;
+
+ case '-':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '=':
+ ++p;
+ lex_emit_compound_op('-', '=', s, p);
+ continue;
+ case '-':
+ ++p;
+ lex_emit_compound_op('-', '-', s, p);
+ continue;
+ case '>':
+ ++p;
+ lex_emit_compound_op('-', '>', s, p);
+ continue;
+ default:
+ break;
+ }
+ }
+ lex_emit_op('-', s, p);
+ continue;
+
+ case '+':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '=':
+ ++p;
+ lex_emit_compound_op('+', '=', s, p);
+ continue;
+
+ case '+':
+ ++p;
+ lex_emit_compound_op('+', '+', s, p);
+ continue;
+ default:
+ break;
+ }
+ }
+ lex_emit_op('+', s, p);
+ continue;
+
+ case '.':
+ ++p;
+ if (p != q) {
+ switch (*p) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ d = s;
+ goto lex_dot_to_fraction_part;
+ case '.':
+ ++p;
+ if (p != q && *p == '.') {
+ ++p;
+ lex_emit_tricompound_op('.', '.', '.', s, p);
+ continue;
+ }
+ lex_emit_compound_op('.', '.', s, p);
+ continue;
+ default:
+ break;
+ }
+ }
+ lex_emit_op('.', s, p);
+ continue;
+
+ case '0':
+ if (++p != q) {
+ switch (*p) {
+#ifdef LEX_C_OCTAL_NUMERIC
+
+ case '0': case '1': case '2': case '3':
+ case '4': case '5': case '6': case '7':
+ while (++p != q && lex_isoctdigit(*p)) {
+ }
+ d = p;
+ if (p != q) {
+ /*
+ * Leading zeroes like 00.10 are valid C
+ * floating point constants.
+ */
+ if (*p == '.') {
+ goto lex_c_octal_to_fraction_part;
+ }
+ if (*p == 'e' || *p == 'E') {
+ goto lex_c_octal_to_exponent_part;
+ }
+ }
+ lex_emit_octal(s, p);
+ /*
+ * If we have a number like 0079, it becomes
+ * 007(octal), 9(decimal). The parser should
+ * deal with this.
+ *
+ * To add to confusion i64 is a C integer suffix
+ * like in 007i64, but 2+2i is a Go complex
+ * constant. (Not specific to octals).
+ *
+ * This can all be handled by having the parser inspect
+ * following identifier or numeric, parser
+ * here meaning a lexer post processing step, not
+ * necessarily the parser itself.
+ */
+
+ continue;
+#else
+ /*
+ * All integers reach default and enter
+ * integer part. As a result, leading zeroes are
+ * mapped to floats and integers which matches
+ * Julia behavior. Other languages should decide
+ * if leading zero is valid or not. JSON
+ * disallows leading zero.
+ */
+#endif
+
+#ifdef LEX_JULIA_OCTAL_NUMERIC
+ /*
+ * This is the style of octal, not 100% Julia
+ * compatible. Also define Julia numeric to enforce
+ * lower case.
+ */
+#ifndef LEX_LOWER_CASE_NUMERIC_PREFIX
+ /* See also hex 0X. Julia v.0.3 uses lower case only here. */
+ case 'O':
+#endif
+ /*
+ * Julia accepts 0o700 as octal and 0b100 as
+ * binary, and 0xa00 as hex, and 0100 as
+ * integer, and 1e2 as 64 bit float and 1f2 as
+ * 32 bit float. Julia 0.3 does not support
+ * octal and binary fractions.
+ */
+ case 'o':
+ while (++p != q && lex_isoctdigit(*p)) {
+ }
+ lex_emit_octal(s, p);
+ /* Avoid hitting int fall through. */
+ continue;
+#endif
+#ifdef LEX_BINARY_NUMERIC
+ /* Binary in C++14. */
+ case 'b':
+#ifndef LEX_LOWER_CASE_NUMERIC_PREFIX
+ /* See also hex 0X. Julia v.0.3 uses lower case only here. */
+ case 'B':
+#endif
+ while (++p != q && lex_isbindigit(*p)) {
+ }
+ lex_emit_binary(s, p);
+ /* Avoid hitting int fall through. */
+ continue;
+#endif
+#ifdef LEX_HEX_NUMERIC
+ case 'x':
+#ifndef LEX_LOWER_CASE_NUMERIC_PREFIX
+ /*
+ * Julia v0.3 does not allow this, it thinks 0X1 is
+ * 0 * X1, X1 being an identifier.
+ * while 0x1 is a hex value due to precedence.
+ *
+ * TODO: This might change.
+ */
+
+ case 'X':
+#endif
+ while (++p != q && lex_ishexdigit(*p)) {
+ }
+#ifdef LEX_HEX_FLOAT_NUMERIC
+ /*
+ * Most hexadecimal floating poing conversion
+ * functions, including Pythons
+ * float.fromhex("0x1.0"), Julias parse
+ * function, and and C strtod on
+ * supporting platforms, will parse without
+ * exponent. The same languages do not support
+ * literal constants without the p exponent.
+ * First it is named p because e is a hex digit,
+ * second, the float suffix f is also a hex
+ * digit: 0x1.f is ambigious in C without that
+ * rule. Conversions have no such ambiguity.
+ * In Julia, juxtaposition means that 0x1.f
+ * could mean 0x1p0 * f or 0x1.fp0.
+ *
+ * Since we are not doing conversion here but
+ * lexing a stream, we opt to require the p
+ * suffix because making it optional could end
+ * up consuming parts of the next token.
+ *
+ * But, we also make a flag to make the exponent
+ * optional, anyway. It could be used for better
+ * error reporting than just consuming the hex
+ * part since we likely should accept the ambigous
+ * syntax either way.
+ */
+ d = p;
+ if (p != q && *p == '.') {
+ while (++p != q && lex_ishexdigit(*p)) {
+ }
+ }
+ if (p != q && (*p == 'p' || *p == 'P')) {
+ if (++p != q && *p != '+' && *p != '-') {
+ --p;
+ }
+ /* The exponent is a decimal power of 2. */
+ while (++p != q && lex_isdigit(*p)) {
+ }
+ lex_emit_hex_float(s, p);
+ continue;
+ }
+#ifdef LEX_HEX_FLOAT_OPTIONAL_EXPONENT
+ if (d != p) {
+ lex_emit_hex_float(s, p);
+ continue;
+ }
+#else
+ /*
+ * Backtrack to decimal point. We require p to
+ * be present because we could otherwise consume
+ * part of the next token.
+ */
+ p = d;
+#endif
+#endif /* LEX_HEX_FLOAT_NUMERIC */
+ lex_emit_hex(s, p);
+ continue;
+#endif /* LEX_HEX_NUMERIC */
+
+ default:
+ /*
+ * This means leading zeroes like 001 or 001.0 are
+ * treated like like int and float respectively,
+ * iff C octals are flaggged out. Otherwise they
+ * become 001(octal), and 001(octal),.0(float)
+ * which should be treated as an error because
+ * future extensions might allow octal floats.
+ * (Not likely, but interpretion is ambigious).
+ */
+ break;
+ } /* Switch under '0' case. */
+
+ /*
+ * Pure single digit '0' is an octal number in the C
+ * spec. We have the option to treat it as an integer,
+ * or as an octal. For strict C behavior, this can be
+ * flagged in, but is disabled by default. It only
+ * applies to single digit 0. Thus, with C octal
+ * enabled, leading zeroes always go octal.
+ */
+ } /* If condition around switch under '0' case. */
+ --p;
+ goto lex_fallthrough_1; /* silence warning */
+
+ lex_fallthrough_1:
+ /* Leading integer digit in C integers. */
+ case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ while (++p && lex_isdigit(*p)) {
+ }
+ d = p;
+ if (*p == '.') {
+/* Silence unused label warnings when features are disabled. */
+#ifdef LEX_C_OCTAL_NUMERIC
+lex_c_octal_to_fraction_part:
+#endif
+lex_dot_to_fraction_part:
+ while (++p != q && lex_isdigit(*p)) {
+ }
+ }
+ if (p != q && (*p == 'e' || *p == 'E')) {
+/* Silence unused label warnings when features are disabled. */
+#ifdef LEX_C_OCTAL_NUMERIC
+lex_c_octal_to_exponent_part:
+#endif
+ if (++p != q && *p != '+' && *p != '-') {
+ --p;
+ }
+ while (++p != q && lex_isdigit(*p)) {
+ }
+ }
+ if (d != p) {
+ lex_emit_float(s, p);
+ } else {
+#ifdef LEX_C_OCTAL_NUMERIC
+ if (*s == '0') {
+ lex_emit_octal(s, p);
+ continue;
+ }
+#endif
+ lex_emit_int(s, p);
+ }
+ continue;
+
+#ifndef LEX_ID_WITHOUT_UNDERSCORE
+ case '_':
+#endif
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F': case 'G': case 'H': case 'I': case 'J':
+ case 'K': case 'L': case 'M': case 'N': case 'O':
+ case 'P': case 'Q': case 'R': case 'S': case 'T':
+ case 'U': case 'V': case 'W': case 'X': case 'Y':
+ case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f': case 'g': case 'h': case 'i': case 'j':
+ case 'k': case 'l': case 'm': case 'n': case 'o':
+ case 'p': case 'q': case 'r': case 's': case 't':
+ case 'u': case 'v': case 'w': case 'x': case 'y':
+ case 'z':
+
+ /*
+ * We do not try to ensure utf-8 is terminated correctly nor
+ * that any unicode character above ASCII is a character
+ * suitable for identifiers.
+ *
+ * tag is calculated for keyword lookup, and we assume these
+ * are always ASCII-7bit. It has the form: length, first
+ * char, second, char, last char in lsb to msb order. If the
+ * second char is missing, it becomes '\0'. The tag is not
+ * entirely unique, but suitable for fast lookup.
+ *
+ * If utf-8 appears in tag, the tag is undefined except the
+ * length is valid or overflows (meaning longer than any
+ * keyword and thus safe to compare against if tag matches).
+ *
+ * If the grammar is case insensitive, the tag be can
+ * downcased trivially by or'ring with 0x20202000 which
+ * preserves the length field (clever design by ASCII
+ * designers). After tag matching, a case insentive
+ * compare is obviously also needed against the full lexeme.
+ */
+
+ {
+ unsigned long tag;
+
+ tag = (unsigned long)*p << 8;
+ if (++p != q && lex_isalnum(*p)) {
+ tag |= (unsigned long)*p << 16;
+ while (++p != q && lex_isalnum(*p)) {
+ }
+ }
+ tag |= (unsigned long)p[-1] << 24;
+ tag |= (unsigned char)(p - s) + (unsigned long)'0';
+ lex_emit_id(s, p, tag);
+ continue;
+ }
+
+ default:
+
+#ifdef LEX_ID_WITH_UTF8
+ /*
+ * Identifier again, in case it starts with a utf-8 lead
+ * character. This time we can ignore the tag, except the
+ * length char must be valid to avoid buffer overruns
+ * on potential kw check upstream.
+ */
+ if (*p & '\x80') {
+ unsigned long tag;
+
+ while (++p != q && lex_isalnum(*p)) {
+ }
+ tag = (unsigned char)(p - s) + '0';
+ lex_emit_id(s, p, tag);
+ continue;
+ }
+#endif
+ ++p;
+ /* normally 0x7f DEL and 0x00..0x1f incl. */
+ if (lex_isctrl(*s) && !lex_isblank(*s)) {
+ lex_emit_ctrl(s);
+ } else {
+ lex_emit_symbol(*s, s, p);
+ }
+ continue;
+ } /* Main switch in normal mode. */
+ } /* Main while loop in normal mode. */
+
+lex_mode_exit:
+ if (mode == LEX_MODE_INVALID) {
+ return mode;
+ }
+
+#ifndef LEX_DISABLE_ZTERM
+ if (p != q && lex_iszterm(*p)) {
+ lex_emit_eos(s, p);
+ return mode;
+ }
+#endif
+ lex_emit_eob(p);
+ return mode;
+}
+
diff --git a/external/lex/luthor.h b/external/lex/luthor.h
new file mode 100644
index 0000000..6ca373d
--- /dev/null
+++ b/external/lex/luthor.h
@@ -0,0 +1,472 @@
+/*
+ * Mostly generic lexer that can be hacked to suit specific syntax. See
+ * more detailed comments further down in this file.
+ *
+ * Normally include luthor.c instead of luthor.h so emitter functions
+ * can be custom defined, and optionally also fast keyword definitions.
+ *
+ * At the very minimum, define lex_emit which other emitters default to.
+ *
+ * Create a wrapper function to drive the lex function in said file.
+ *
+ * Use this header in separate parser logic to access the token values
+ * if relevant.
+ */
+
+#ifndef LUTHOR_H
+#define LUTHOR_H
+
+#ifdef LEX_KEYWORDS
+#include <string.h> /* memcmp for kw match */
+#endif
+
+#include "tokens.h"
+
+#ifndef lex_emit
+#define lex_emit(token, first, last) ((void)0)
+#endif
+
+/*
+ * Default for comments, bom, and other things that are not necessarily
+ * of interest to the parser, but may be to buffer wrap handling,
+ * debugging, and pretty printers.
+ */
+#ifndef lex_emit_other
+#define lex_emit_other(token, first, last) ((void)0)
+#endif
+
+#ifndef lex_emit_eof
+#define lex_emit_eof(pos) lex_emit(LEX_TOK_EOF, pos, pos)
+#endif
+
+#ifndef lex_emit_abort
+#define lex_emit_abort(pos) lex_emit(LEX_TOK_ABORT, pos, pos)
+#endif
+
+#ifndef lex_emit_eob
+#define lex_emit_eob(pos) lex_emit(LEX_TOK_EOB, pos, pos)
+#endif
+
+#ifndef lex_emit_eos
+#define lex_emit_eos(first, last) lex_emit(LEX_TOK_EOS, first, last)
+#endif
+
+#ifndef lex_emit_bom
+#define lex_emit_bom(first, last) lex_emit_other(LEX_TOK_BOM, first, last)
+#endif
+
+#ifndef lex_emit_id
+#ifdef LEX_KEYWORDS
+/* LEX_KW_TABLE_BEGIN .. LEX_KEYWORD_TABLE_END defines lex_match_kw. */
+#define lex_emit_id(first, last, tag) lex_emit(lex_match_kw(tag, first), first, last)
+#else
+#define lex_emit_id(first, last, tag) lex_emit(LEX_TOK_ID, first, last)
+#endif
+#endif
+
+/*
+ * This is a default for unknown symbols. It may be treated as an error,
+ * or it can be processed further by the parser instead of customizing
+ * the lexer. It ensures that there is always a token for every part of
+ * the input stream.
+ */
+#ifndef lex_emit_symbol
+#define lex_emit_symbol(token, first, last) lex_emit(LEX_TOK_SYMBOL, first, last)
+#endif
+
+/*
+ * Control characters 0x01 .. 0x1f, 0x7f(DEL), excluding \0\r\n\t which have
+ * separate tokens.
+ *
+ * Control characters in strings and comments are passed on as body
+ * elements, except \0\r\n which breaks the string up.
+ */
+#ifndef lex_emit_ctrl
+#define lex_emit_ctrl(pos) lex_emit(LEX_TOK_CTRL, pos, pos + 1)
+#endif
+
+#ifndef lex_emit_string_ctrl
+#define lex_emit_string_ctrl(pos) lex_emit(LEX_TOK_STRING_CTRL, pos, pos + 1)
+#endif
+
+#ifndef lex_emit_comment_ctrl
+#define lex_emit_comment_ctrl(pos) lex_emit_other(LEX_TOK_COMMENT_CTRL, pos, pos + 1)
+#endif
+
+/*
+ * This enables user to both count lines, and to calculate character
+ * offset for subsequent lexemes. New line starts a lexeme, line break
+ * symbol is located at lexeme - skipped and with have length 2 if \r\n
+ * or \n\r break, and 1 otherwise.
+ */
+#ifndef lex_emit_newline
+#define lex_emit_newline(first, last) lex_emit(LEX_TOK_NEWLINE, first, last)
+#endif
+
+#ifndef lex_emit_string_newline
+#define lex_emit_string_newline(first, last) lex_emit(LEX_TOK_STRING_NEWLINE, first, last)
+#endif
+
+#ifndef lex_emit_int
+#define lex_emit_int(first, last) lex_emit(LEX_TOK_INT, first, last)
+#endif
+
+#ifndef lex_emit_float
+#define lex_emit_float(first, last) lex_emit(LEX_TOK_FLOAT, first, last)
+#endif
+
+#ifndef lex_emit_int_suffix
+#define lex_emit_int_suffix(first, last) lex_emit(LEX_TOK_INT_SUFFIX, first, last)
+#endif
+
+#ifndef lex_emit_float_suffix
+#define lex_emit_floatint_suffix(first, last) lex_emit(LEX_TOK_FLOAT_SUFFIX, first, last)
+#endif
+
+#ifndef lex_emit_binary
+#define lex_emit_binary(first, last) lex_emit(LEX_TOK_BINARY, first, last)
+#endif
+
+#ifndef lex_emit_octal
+#define lex_emit_octal(first, last) lex_emit(LEX_TOK_OCTAL, first, last)
+#endif
+
+#ifndef lex_emit_hex
+#define lex_emit_hex(first, last) lex_emit(LEX_TOK_HEX, first, last)
+#endif
+
+#ifndef lex_emit_hex_float
+#define lex_emit_hex_float(first, last) lex_emit(LEX_TOK_HEX_FLOAT, first, last)
+#endif
+
+/*
+ * The comment token can be used to aid backtracking during buffer
+ * switch.
+ */
+#ifndef lex_emit_comment_begin
+#define lex_emit_comment_begin(first, last, is_doc) \
+ lex_emit_other(LEX_TOK_COMMENT_BEGIN, first, last)
+#endif
+
+#ifndef lex_emit_comment_part
+#define lex_emit_comment_part(first, last) lex_emit_other(LEX_TOK_COMMENT_PART, first, last)
+#endif
+
+#ifndef lex_emit_comment_end
+#define lex_emit_comment_end(first, last) lex_emit_other(LEX_TOK_COMMENT_END, first, last)
+#endif
+
+#ifndef lex_emit_comment_unterminated
+#define lex_emit_comment_unterminated(pos) \
+ lex_emit_other(LEX_TOK_COMMENT_UNTERMINATED, pos, pos)
+#endif
+
+#ifndef lex_emit_comment_deeply_nested
+#define lex_emit_comment_deeply_nested(pos) \
+ lex_emit_other(LEX_TOK_COMMENT_DEEPLY_NESTED, pos, pos)
+#endif
+
+#ifndef lex_emit_string_begin
+#define lex_emit_string_begin(first, last) lex_emit(LEX_TOK_STRING_BEGIN, first, last)
+#endif
+
+#ifndef lex_emit_string_part
+#define lex_emit_string_part(first, last) lex_emit(LEX_TOK_STRING_PART, first, last)
+#endif
+
+#ifndef lex_emit_string_end
+#define lex_emit_string_end(first, last) lex_emit(LEX_TOK_STRING_END, first, last)
+#endif
+
+#ifndef lex_emit_string_escape
+#define lex_emit_string_escape(first, last) lex_emit(LEX_TOK_STRING_ESCAPE, first, last)
+#endif
+
+#ifndef lex_emit_string_unterminated
+#define lex_emit_string_unterminated(pos) \
+ lex_emit(LEX_TOK_STRING_UNTERMINATED, pos, pos)
+#endif
+
+#ifndef lex_emit_blank
+#define lex_emit_blank(first, last) \
+ lex_emit_other(LEX_TOK_BLANK, first, last)
+#endif
+
+#ifndef lex_emit_op
+#define lex_emit_op(op, first, last) lex_emit((long)(op), first, last)
+#endif
+
+#ifndef lex_emit_compound_op
+#define lex_emit_compound_op(op1, op2, first, last) \
+ lex_emit(((long)(op1) | ((long)(op2) << 8)), first, last)
+#endif
+
+#ifndef lex_emit_tricompound_op
+#define lex_emit_tricompound_op(op1, op2, op3, first, last) \
+ lex_emit(((long)(op1) | ((long)(op2) << 8)) | \
+ ((long)(op3)<<16), first, last)
+#endif
+
+#ifndef lex_emit_quadcompound_op
+#define lex_emit_quadcompound_op(op1, op2, op3, op4, first, last) \
+ lex_emit(((long)(op1) | ((long)(op2) << 8)) | \
+ ((long)(op3) << 16) | ((long)(op4) << 24), first, last)
+#endif
+
+/* Used to limit number of nested comment level. */
+#ifndef LEX_MAX_NESTING_LEVELS
+#define LEX_MAX_NESTING_LEVELS 100
+#endif
+
+
+/* Keyword handling macros, see `keywords.c` for an example usage. */
+#ifdef LEX_KEYWORDS
+
+/*
+ * This implements a switch statement branching on the 4 character
+ * keyword tag (unsigned long value) which is produced by the lexers id
+ * recognizer. A final check is needed with to ensure an exact
+ * match with a given id. Two keywords rarely conflicts, but it is
+ * possible, and therefore kw_begin kw_match kw_match ... kw_end is used
+ * to cover this.
+ *
+ * See example usage elsewhere for details.
+ *
+ * The first element x0 is length '0'..'9' and ensure comparisons will
+ * not overrun the buffer where the lexeme is stored during string
+ * comparison, iff the keywords report the length correctly.
+ *
+ * The next elements in the tag are the first, second, and last
+ * character of lexeme / keyword, replacing second character with '\0'
+ * on single length keywords, so keyword 'e' is tagged '1', 'e', '\0', 'e',
+ * and 'while' is tagged '5' 'w', 'h', 'e', where the length is lsb
+ * and last chararacter is msb.
+ *
+ * An enum with tok_kw_<name> elements is expected to provide return
+ * values on match. These should start at LEX_TOK_KW_BASE and are
+ * negative.
+ *
+ */
+#define lex_kw_begin(x0, x1, x2, x3) \
+ case \
+ ((unsigned long)(x0) | \
+ ((unsigned long)(x1) << 8) | \
+ ((unsigned long)(x2) << 16) | \
+ ((unsigned long)(x3) << 24)) :
+
+#define lex_kw_match(kw) \
+ if (memcmp(#kw, lexeme, sizeof(#kw) - 1) == 0) \
+ return tok_kw_##kw;
+
+#define lex_kw_end() \
+ break;
+
+#define lex_kw(kw, x0, x1, x2, x3) \
+ lex_kw_begin(x0, x1, x2, x3) \
+ lex_kw_match(kw) \
+ lex_kw_end()
+
+static long lex_match_kw(unsigned long tag, const char *lexeme);
+
+/* Static so multiple grammers are possible in a single program. */
+#define LEX_KW_TABLE_BEGIN \
+static long lex_match_kw(unsigned long tag, const char *lexeme) \
+{ \
+ switch (tag) { \
+
+#define LEX_KW_TABLE_END \
+ default: \
+ break; \
+ } \
+ return LEX_TOK_KW_NOT_FOUND; \
+}
+
+#else
+
+/* Allow flagging in and out without unused warning or missing macros */
+#define lex_kw_begin(x0, x1, x2, x3)
+#define lex_kw_match(kw)
+#define lex_kw_end()
+#define lex_kw(kw, x0, x1, x2, x3)
+#define LEX_KEYWORD_TABLE_BEGIN
+#define LEX_KEYWORD_TABLE_END
+
+#endif /* LEX_KEYWORDS */
+
+
+
+/*
+ * Modes used for recovery when switching to a new buffer and handling
+ * internal state changes for strings and comments.
+ */
+enum {
+ /* Always 0, is initial lexer state. */
+ LEX_MODE_NORMAL = 0,
+
+ /* Returned if lex is given unsupported mode. */
+ LEX_MODE_INVALID = 1,
+
+ /*
+ * Can be used in place of normal mode to consume optional bom
+ * marker at buffer start. Only utf-8 bom is supported.
+ */
+ LEX_MODE_BOM,
+
+ /*
+ * Returned at end of buffer if mid string or mid comment, may also
+ * be larger for nested comments as nesting level is encoded.
+ */
+ LEX_MODE_C_STRING,
+ LEX_MODE_C_STRING_SQ,
+ LEX_MODE_PYTHON_BLOCK_STRING,
+ LEX_MODE_PYTHON_BLOCK_STRING_SQ,
+ LEX_MODE_C_BLOCK_COMMENT,
+ LEX_MODE_LINE_COMMENT,
+ LEX_MODE_JULIA_NESTED_COMMENT,
+
+
+ /* Counter embedded in mode. */
+ LEX_MODE_COUNT_BASE = 16,
+};
+
+
+
+/* ON CALLING AND USING LEX FUNCTION
+ *
+ * If utf-8 BOM possible, detect this before calling the lexer and
+ * advance the buffer. JSON explititly disallows BOM, but recommends
+ * consuming it if present. If some other Unicode BOM is found, convert
+ * the buffer first. The lexer assumes ALL non-ascii characters are
+ * valid trailing identifiers which mostly works well. Strings with
+ * broken utf-8 are passed on as is. utf-8 identifiers must be enabled
+ * with #define LEX_ENABLE_UTF8_ID
+ *
+ * If required, postprocess identifiers and strings for valid utf-8. It
+ * is assumed that all keywords are at most 9 characters long and always
+ * ASCII. Otherwise post process them in a hash table on identifier
+ * event. This enables a fast compiled trie lookup of keywords.
+ *
+ * Newline and control characters are always emitted, also inside
+ * strings and comments. The exception is \r, \n, \t, \0 which are
+ * handled specially, or if the lexer is adapted to handle certain
+ * control characters specially.
+ *
+ * Each token is not guaranteed correct, only to be delimited correct,
+ * if it is indeed correct. Only very few tokens can be zero length, for
+ * example, the parser can rely on string part token not being empty
+ * which is important in dealing with line continuation. The end of
+ * buffer token is empty, and so is the unterminates string token, and
+ * also the comment end token for single line tokens, but not the
+ * multi-line version. There is a token for every part of the input
+ * stream, but the parser can easily define some to be ignored and have
+ * them optimized out.
+ *
+ * Strings have start token, and optionally sequences of control,
+ * escape, and newline tokens, followed by either string end token or
+ * string unterminated token. Strings delimiters can be one
+ * (single-line) or three double quotes (multi-line, like python, but
+ * cannot be single quotes, unlike Python. Python, C and Javascript
+ * string continuation is handled by having the parser observing string
+ * escape followed by newline token. Escape is always a single
+ * character '\' token, and the parser is responsible for consuming the
+ * following content. If string syntax with double delimiter is used to
+ * define escaped delimiter, this will occur as two separate strings
+ * with no space between. The parser can handle this on its own; if, in
+ * such strings, '\"' does not mean escaped delimiter, the string will
+ * not terminate correctly, and the lexer must be apapted. Unterminated
+ * string may happen at end of buffer, also for single line comments.
+ * This is because the string might continue in a new buffer. The parser
+ * should deal with this.
+ *
+ * Comments always start with a start token, followed by zero or more
+ * comment part tokens interleaved with control and newline tokens,
+ * terminated by either comment end token, or unterminated comment
+ * token. If the comment is single, the unterminated comment token may
+ * appear at the last line instead of the expected end of comment token
+ * because the comment might continue in a new buffer. The parser
+ * should deal with this. Escapes and line continuations have no effects
+ * in comments, unlike strings.
+ *
+ * The lexer only carries one state variable: the mode. The mode can be
+ * normal (default and equals zero), or single or multi string or
+ * comment modes. These modes are used to to recover after switching
+ * buffers as discussed below.
+ *
+ * The lexer can run to completion without involving the parser and
+ * could be used to pipeline tokens into another thread for concurrent
+ * parsing which is safe since the input buffer is considered read-only.
+ *
+ *
+ * KEYWORDS
+ *
+ * Keywords are treated as identifiers by default. By including a
+ * keyword table the `lex_emit_id` macro will check if the id is a
+ * keyword and translate the token if it is. Using the provided keyword
+ * table macros is just one way to do it. This is better explained by
+ * looking at an example. Keyword lookup based on the precomputed keyword
+ * tag provided to the lookup function are limited to 9 characters, but a
+ * custom lookup function need not use it and then the tag precomputation
+ * will be optimized out.
+ *
+ * Keywords are defined by the lookup function and should be negative
+ * starting at LEX_TOK_KW_BASE to avoid conflicts with other token types.
+ *
+ *
+ * WRAPPING MULTIPLE BUFFERS
+ *
+ * The user may need to deal with multiple buffers because data may
+ * arrive asynchronously over the network, and may have many concurrent
+ * lexing jobs. The emitter part is not difficult since a ring buffer
+ * can grow, or the parser can be called directly (except queuing a few
+ * tokens for backtracking as we shall see).
+ *
+ * If the lexer were an explicit statemachine as in Flex, we could get
+ * an yywrap event to fill buffers, but our state is on the stack and in
+ * registers for optimization. We may use co-routines, but it doesn't
+ * cover all issues, and, as it turns out is not necessary with the
+ * following restrictions on syntax:
+ *
+ * All variable length tokens such as numerics and identifiers are
+ * limited in length. Strings and comments are not, but are broken into
+ * zero, one, or several body tokens per line. ANSI-C limits line length
+ * to 509 characters (allowing for continuation and two byte linebreaks
+ * in a 512 byte buffer). But JSON has no line continuation for strings
+ * and may (and often do) store everything on a single line. Whitespace
+ * can also extend beyond given limit.
+ *
+ * If we ignore whitespace, strings and comments, we can discard the
+ * last token (or last two in case there are paired tokens, such as
+ * leading zero followed by numeric. Parsing can then resume in a new
+ * buffer where the first 512 bytes (or similar) are duplicated from the
+ * previous buffer. The lexer is then restarted at the last token (pair)
+ * start which may turn out to change the length or even introduce a
+ * different result such introducing leading zero. The lexer need no
+ * specific state to do this.
+ *
+ * For strings and comments, we need a flag to allow entering the lexer
+ * mid string or mid comment. The newline and line continuation tokens
+ * need to be dropped, and the last body may need to be truncated as it
+ * can embed a partial delimiter. The simplest way to deal with this is
+ * to backtrack tokens until the last token begins at a safe position,
+ * about 3-6 charaters earlier, and truncating body segments that span
+ * this barrier. Whitespace can also be truncated.
+ *
+ * We can generalize this further by going at least K bytes back in an N
+ * overlap buffer region and require non-strings (and non-comments) to
+ * not exceed N-K bytes, where K and N are specific to the syntax and
+ * the I/O topology.
+ *
+ * We can add flags to tokens that can help decide how to enter
+ * backtracking mode without covering every possible scanner loop - i.e.
+ * are we mid string, mid comment, single-line or multi-line.
+ *
+ * All the lexer needs to do then, is to receive the backtracking mode
+ * flags. A wrapping driver can deal with backtrack logic, which is
+ * specific to how tokens are emitted. Whitespace need no recovery mode
+ * but perhaps new whitespace should extend existing to simplify
+ * parsing.
+ */
+
+
+#endif /* LUTHOR_H */
+
diff --git a/external/lex/tokens.h b/external/lex/tokens.h
new file mode 100644
index 0000000..2bdbd7c
--- /dev/null
+++ b/external/lex/tokens.h
@@ -0,0 +1,554 @@
+#ifndef LEX_TOKENS_H
+#define LEX_TOKENS_H
+
+/* Define LEX_DEBUG to enable token printing and describing functions. */
+
+
+enum {
+
+ /*
+ * EOF is not emitted by lexer, but may be used by driver after
+ * last buffer is processed.
+ */
+ LEX_TOK_EOF = 0,
+
+ /*
+ * Either EOB or EOS is emitted as the last token before exit,
+ * or also ABORT in some lexers. Unterminated string or comment
+ * will be emitted immediately one of these when relevant.
+ *
+ * It may be useful to redefine lex_emit_eos and lex_emit_eob to
+ * produce LEX_TOK_EOF or error directly for simple string lexing.
+ */
+ LEX_TOK_EOB = 1,
+ LEX_TOK_EOS = 2,
+
+ /*
+ * ABORT can be used for early exit by some lexers while other
+ * lexers may choose to run to buffer end regardless of input (with
+ * the exception of deeply nested comments).
+ */
+ LEX_TOK_ABORT = 3,
+
+ /*
+ * Byte order marker. Only happen if lexer was started in bom mode
+ * and the input stream contains a leading bom marker.
+ * The token can only be the first token in the stream. Utf-8 is the
+ * only supported bom, but the lexeme may be checked in case other
+ * boms are added later. Normally it is routed to lex_emit_other
+ * along with comments so it just ignores the bom if present. It is
+ * generally recommended to consume utf-8 bom for interoperability,
+ * but also to not store it for the same reason.
+ */
+ LEX_TOK_BOM,
+
+ /*
+ * Any control character that is not newline or blank will be
+ * emitted as single character token here. This token is discussed
+ * in several comments below. For strings and comments, also
+ * blank control characters will be emitted since they are usually
+ * not desired unexpectd.
+ */
+ LEX_TOK_CTRL,
+ LEX_TOK_STRING_CTRL,
+ LEX_TOK_COMMENT_CTRL,
+
+ /*
+ * Any printable ASCII character that is not otherwise consumed will
+ * be issued as a single length symbol token. Further discussion
+ * below. The symbol and CTRL tokens ensure that the entire input
+ * stream is covered by tokens. If utf-8 identifies have not been
+ * flagged, utf-8 leading characters may also end up here, and so
+ * my utf-8 characters in general, that are not viewed as valid
+ * identifiers (depending on configuration).
+ */
+ LEX_TOK_SYMBOL,
+
+ /*
+ * Variable length identifier starting with (_A-Za-z) by default and
+ * followed by zero or more (_A-Za-z0-9) characters. (_) can be
+ * flagged out. utf-8 can be flagged in. Be default any non-ASCII
+ * character (0x80 and above), is treated as part of an identifier
+ * for simplicity and speed, but this may be redefined. Any broken
+ * utf-8 is not sanitized, thus 0x80 would be a valid identifier
+ * token with utf-8 identifiers enabled, and otherwise it would be a
+ * symbol token.
+ *
+ * The ID does a magic trick: It maps the lexeme to a very simple
+ * and fast 32 bit hash code called a tag. The tag is emitted with
+ * the id token and can be used for fast keyword lookup. The
+ * hash tag is:
+ *
+ * (length)(first char)(second char)(last char)
+ *
+ * where length is ASCII '0' .. '9' where any length overflow is an
+ * arbitrary value, but such that the length is never longer than
+ * the lexeme. The last char is the last char regardless of length.
+ * For short identifiers, the second char may be the first char
+ * duplicated, and the last char may be first char.
+ *
+ * This code is very simple to write by hand: "5whe" means while,
+ * and can be used in a case switch before a strcmp with "while".
+ * Conflicts are possible, but then several keywords are tested like
+ * any other hash conflict. This keyword lookup is user driven, but
+ * can follow example code quite straightforward.
+ *
+ * The lex_emit_id macro can be implemented to provide the above
+ * lookup and inject a keyword token instead. By convention such
+ * tokens have negative values to avoid conflicts with lexer
+ * generated tokens.
+ *
+ * The ID also has a special role in prefixes and suffixes: C string
+ * literals like (L"hello") and numeric literals like (42f) are
+ * lexed as two tokens, one of which is an ID. The parser must
+ * process this and observe absence of whitespace where such syntax
+ * is relevant.
+ *
+ * While not specific to ID, the emitter macroes can be designed to
+ * keep track of start of lines and end of whitespace and attach
+ * state flags to each token (at line start, after whitespace). The
+ * whitespace tokens can then be dropped. This might help parsing
+ * things like suffixes efficiently.
+ */
+ LEX_TOK_ID,
+
+ /*
+ * C-int :: pos-dec-digit dec-digit *
+ * Julia-int ::= dec-digit+
+ *
+ * pos-dec-digit ::= '1'..'9'
+ * dec-digit ::= '0'..'9'
+ *
+ * Floating point numbers take precedence when possible so 00.10 is
+ * always a deciaml floating point value when decimal floats are
+ * enabled.
+ *
+ * The C-int is automatically enabled if C-octals are enabled, and
+ * disabled otherwise. There is no specific Julia-int type - we just
+ * use the terminology to represent integers with leading zeroes.
+ *
+ * Julia style integers accept leading zeroes. C style integers with
+ * leading zeroes are consumed as C style octal numbers, so 0019 is
+ * parsed as either 0019(Julia-int), or 001(C-octal), 9(C-int).
+ *
+ * Single digit '0' maps to octal when C-octals are enabled and to
+ * Julia-int otherwise. (Yes, integers are not that simple, it
+ * seems).
+ *
+ * Both C and Julia octal numbers (see octal token) can be active
+ * simultaneously. This can be used to control leading zero
+ * behavior, even if C-octal numbers are not part of the grammar
+ * being parsed. For example, a language might use 0o777 octal
+ * numbers and disallow 0777 integers. Enabling C-octals makes this
+ * easy to detect (but should accept octal 0).
+ *
+ * There is no destinction between the styles in the int token, but
+ * leading zeroes are easily detected in the lexeme.
+ *
+ * Constant suffixes like 1L are treated as 1(INT), and L(ID). The
+ * same goes for other numeric values.
+ *
+ * Parser should check for leading zeroes and decide if it is valid,
+ * a warning, or an error (it is in JSON). This also goes for float.
+ *
+ * Numericals, not limited to INT, may appear shorter than they are
+ * due to buffer splits. Special recovery is required, but will only
+ * happen just before EOS or EOB tokens (i.e. buffer split events).
+ */
+ LEX_TOK_INT,
+
+ /*
+ * float ::= (int ['.' dec-digits*] dec-exponent)
+ * | ([int] '.' dec-digits* [dec-exponent])
+ * dec-exponents ::= ('e' | 'E') ['+' | '-'] dec-digits*
+ * dec-digits ::= '0'..'9'
+ * int ::= dec-digits*
+ *
+ * Consumes a superset of C float representation without suffix.
+ * Some invalid tokens such as 0.E are accepted. Valid tokens such
+ * as 00.10 take precedence over octal numbers even if it is a
+ * prefix, and the same is obviously true with respect to decimal
+ * integers.
+ *
+ * JSON does not allow leading zeroes, and also not leading '.'.
+ * This can easily be checked in the lexeme.
+ *
+ * The octal notation affecting integer leading zeroes is not
+ * relevant to floats because floats take precedence over octal and
+ * decimal int when containing '.', 'e' or 'E'.
+ */
+ LEX_TOK_FLOAT,
+
+ /*
+ * binary ::= (0b | 0B) ('0' | '1')*
+ *
+ * 0b100 or just 0b, parser must check that digits are present,
+ * otherwise it may be interpreted as zero, just like octal zero
+ * in C.
+ *
+ * Like 0X hex, 0B can be flagged out because Julia v0.3 does not
+ * support uppercase 0B.
+ */
+ LEX_TOK_BINARY,
+
+ /*
+ * C-octal ::= 0 octal-digit*
+ * octal-digits ::= '0'..'7'
+ *
+ * Julia-octal ::= 0o octal-digits*
+ * octal-digits ::= '0'..'7'
+ *
+ * 0777 for C style octal numbers, or 0o777 for Julia syntax. Julia
+ * v.0.3 does not allow uppercase 0O777, it would mean 0 * O777.
+ *
+ * When enabled, decimal floating points take precedence: 00.10 is
+ * parsed as 00.10(decimal float), as per C standard.
+ *
+ * NOTE: It is possible for both styles to be active simultaneously.
+ * This may be relevant in order to control handling of leading
+ * zeroes in decimal integers.
+ *
+ * If C-octal numbers are flagged out, leading zeroes are mapped to
+ * integers and the numerical value may change. Julia behaves this
+ * way. Nothing prevents support of both C and Julia octal numbers,
+ * but leading zeroes will then be interpreted the C way - it is not
+ * recommended to do this.
+ */
+ LEX_TOK_OCTAL,
+
+ /*
+ * hex ::= hex-int
+ * hex-digits ::= 'a'..'f'| 'A'..'f' | '0'..'9'
+ * hex-int ::= (0x | 0X) hex_digts*
+ *
+ * where hex_digits are customizable (e.g. all lower case), and hex
+ * prefix 0x can be flagged to be lower case only (as in Julia).
+ *
+ * If hex floats are enabled, they take precedence:
+ * 0x1.0(hex-float), if not, 0x1.0 will parse as: 0x1(hex) followed
+ * by .0(decimal float).
+ *
+ * The lead prefix 0x may by flagged to be lower case only because
+ * this is required by Julia v0.3 where 0X means 0 * X. Julia
+ * accepts uppercase in the remaining hex digits (and exponent for
+ * floats). This could possibly change in future versions.
+ *
+ * The zero length sequence (0x | 0X) is accepted and left to the
+ * parser since the lexer emits a token for everything it sees.
+ * Conceptually it may be interpreted as zero, equivalent to 0 being
+ * both octal prefix and numeric 0 in C style octal representation.
+ * Or it may be an error.
+ */
+ LEX_TOK_HEX,
+
+ /*
+ * hex_float ::= hex-int ['.' hex_digit*] hex-exponent
+ * hex-exponent ::= ('p' | 'P') ['+' | '-'] decimal-digit*
+ * decimal-digit ::= '0'..'9'
+ *
+ * A superset of IEEE-754-2008 Hexadecimal Floating Point notation.
+ *
+ * We require the exponent to be present, but does not ensure the
+ * value is otherwise complete, e.g. 0x1p+ would be accepted. The p
+ * is needed because otherwise 0x1.f could be accepted, and f is a
+ * float suffix in C, and juxtapostion factor (0x1. * f) in Julia,
+ * at least, that is one possible interpretation.
+ *
+ * The exponent can be flagged optional in which case 0x1.f will be
+ * consumed as a single hex float toke as a single hex float token.
+ * This may either simply be accepted in some grammars, or used to
+ * provide an error message. If the exponent is required, 0x1.f will
+ * be lexed as three tokens:
+ *
+ * <'0x1'(hex int), '.'(op), 'f'(id)>.
+ *
+ * Thus it may be a good idea to allow the exponent to be optional
+ * anyway and issue an error message or warning if the p is absent
+ * later in the parsing stage.
+ *
+ * Note that, as per IEEE-754, the exponent is a decimal power of
+ * two. In other words, the number of bits to shift the
+ * (hexa)decimal point. Also note that it is p and not e because e
+ * is a hex digit.
+ */
+ LEX_TOK_HEX_FLOAT,
+
+ /*
+ * blank ::= ('\t' | '\x20')+
+ *
+ * Longest run in buffer holding only '\t' and '\x20' (space).
+ *
+ * buffer splits may generate adjacent blanks depending on recovery
+ * processing. (The same goes for other line oriented runs such as
+ * string parts and comment parts).
+ */
+ LEX_TOK_BLANK,
+
+ /* newline ::= '\r' | '\n' | '\r\n' | '\n\r'
+ *
+ * Will always appear, also inside strings and comments. Can be used
+ * to track line starts and counts reliably as only one newline is
+ * issued at a time, and it is issued everywhere, also in strings
+ * and comments.
+ *
+ * May be preceeded by string escape token inside strings. This can
+ * be interpreted as line continuation within strings specifically,
+ * as is the case in Python and Javascript (and in C via
+ * pre-processor).
+ *
+ * The LEX_TOK_STRING_NEWLINE is emitted inside strings so the ordinary
+ * newline may be ignored in comments and other non-string content.
+ */
+ LEX_TOK_NEWLINE,
+ LEX_TOK_STRING_NEWLINE,
+
+ /*
+ * string ::= string_start
+ * (string_part | string_escape |
+ * string_ctrl | string_newline)*
+ * (string_end | string_unterminated)
+ *
+ * There are several optional string styles. They all start with
+ * this token. The length and content provided details. Python
+ * may start with """ or ''' and this token will then have length
+ * 3 and three quotes as lexeme content. If the lexer exits before
+ * string end token, the returned lexer mode will remember the
+ * state and can be used for reentry - this also goes for comments.
+ *
+ * Strings can only contain part, escape, newline, and control
+ * tokens, and either string unterminated or string end token
+ * at last.
+ */
+ LEX_TOK_STRING_BEGIN,
+
+ /* Longest run without control characters, without (\), without
+ * newline, and without the relevant end delimiter. The run may be
+ * shortened due to buffer splits. The part may, as an exception,
+ * begin with an end delimiter character or a (\) if it was
+ * preceeded by a string escape token. The escape character is
+ * always (\). Strings that use "" or '' as escape will be treated
+ * as start and end of separate strings. Strings that do not supoort
+ * (\) should just treat escape as a part of the string.
+ */
+ LEX_TOK_STRING_PART,
+
+ /*
+ * This is always a single character token (\) and only happens
+ * inside strings. See also string part token.
+ */
+ LEX_TOK_STRING_ESCAPE,
+
+ /* This token is similar to string start. It may be absent at buffer
+ * splits, but will then an unterminated string token will be used
+ * just before the split event token.
+ *
+ * */
+ LEX_TOK_STRING_END,
+
+ /*
+ * This is emitted before the buffer ends, or before unescaped
+ * newlines for line oriented string types (the usual strings).
+ * At buffer splits, recovery should clean it up. The returned
+ * mode allow parsing to continue in a new buffer with a slight
+ * content overlap.
+ *
+ * If string like ("hello, world!") in C, reaches end of line, it
+ * may be continued" ("hello, \)newline(world!"). If this line
+ * continuation is flagged out, this will lead to string
+ * unterminated, even if not at end of buffer. For block strings
+ * like """hello""", this only happens at end of buffer.
+ */
+ LEX_TOK_STRING_UNTERMINATED,
+
+ /*
+ *
+ * comment ::= comment_start
+ * (comment_part | ctrl | newline)*
+ * (comment_end | comment_unterminated)
+ *
+ *
+ * Comments work like strings in most respects. They emit parts, and
+ * control characters, but not escape characters, and cannot be
+ * continued at end of line. Block comments are like python block
+ * strings (''').
+ *
+ * Julia supports nested comments (#= ... #= =# =#). In this case
+ * a new start token can be emitted before an end token. If the
+ * parser exits due to buffer split, the mode has the nesting level
+ * encoded so it can resumed in a new buffer.
+ *
+ * Line comments will have their end token just before newline, or
+ * unterminated comment just before buffer split token (EOB or EOS).
+ * (\) characters are consumed by the comment part tokens and do not
+ * affect the end of any comment.
+ *
+ * Comment begin may include extra characters when a doc comment is
+ * recognized. The emitter flags this. End comments are unaffected.
+ */
+ LEX_TOK_COMMENT_BEGIN,
+ LEX_TOK_COMMENT_PART,
+ LEX_TOK_COMMENT_END,
+ LEX_TOK_COMMENT_UNTERMINATED,
+
+ /*
+ * Issued before ABORT token if nesting level is above a predefined
+ * level. This is to protect against malicious and misguided
+ * content, otherwise the nesting level counter could wrap and
+ * generate a different interpretation, which could be bad. The
+ * parser would probably do similar things with nested tokens.
+ */
+ LEX_TOK_COMMENT_DEEPLY_NESTED,
+
+
+ /* Operators are all recognized single character symbols, or up to
+ * four characters. The token value is the ASCII codes shifted 8
+ * bits per extra character, by default, but the emitter macros
+ * can redefine this. Values below 32 are reserved token types as
+ * discussed above.
+ *
+ * What exactly represents an operator depends on what the lexer has
+ * enabled.
+ *
+ * Printable ASCII symbols that are NOT recognized, are emitted as
+ * the SYMBOL token and is always length 1. The value can be derived
+ * from the lexeme, but not the token itself. This may be perfectly
+ * fine for the parser, or it may be used to indicate an error.
+ * There are no illegal characters per se.
+ *
+ * Non-printable ASCII characters that are not covered by newline or
+ * blank, are emitted as CTRL tokens. These act the same as the
+ * symbol token and may be used to indicate error, or to handle form
+ * feed and other whitespace not handled by default. Unlike symbol,
+ * however, CTRL also appear in strings and comments since they are
+ * generally not allowed and this makes it easy to capture (there is
+ * virtually no performance overhead in providing this service
+ * unless attempting to parse a binary format).
+ */
+
+ /* Don't bleed into this range. */
+ LEX_TOK_OPERATOR_BASE = 32,
+
+
+ /*
+ * Operators use ASCII range.
+ * Compound operators use range 0x80 to 0x7fff
+ * and possibly above for triple sequences.
+ * Custom keywords are normally negative but can be mapped
+ * to any other.
+ *
+ * The layout is designed for efficient table lookup.
+ * Compound operators might benefit from remapping down to a smaller
+ * range for compact lookup tables, but it depends on the parser.
+ */
+};
+
+/*
+ * Custom keyword token range is negative, and well below -99..0 where
+ * special codes are reserved.
+ */
+#ifndef LEX_TOK_KW_BASE
+#define LEX_TOK_KW_BASE -1000
+#endif
+
+#ifndef LEX_TOK_KW_NOT_FOUND
+#define LEX_TOK_KW_NOT_FOUND LEX_TOK_ID
+#endif
+
+
+#ifdef LEX_DEBUG
+
+#include <stdio.h>
+#include <string.h>
+
+static const char *lex_describe_token(long token)
+{
+ switch(token) {
+ case LEX_TOK_BOM: return "BOM marker";
+ case LEX_TOK_EOF: return "EOF";
+ case LEX_TOK_EOS: return "buffer zero terminated";
+ case LEX_TOK_EOB: return "buffer exhausted";
+ case LEX_TOK_ABORT: return "abort";
+ case LEX_TOK_CTRL: return "control";
+ case LEX_TOK_STRING_CTRL: return "string control";
+ case LEX_TOK_COMMENT_CTRL: return "comment control";
+ case LEX_TOK_SYMBOL: return "symbol";
+ case LEX_TOK_ID: return "identifier";
+ case LEX_TOK_INT: return "integer";
+ case LEX_TOK_FLOAT: return "float";
+ case LEX_TOK_BINARY: return "binary";
+ case LEX_TOK_OCTAL: return "octal";
+ case LEX_TOK_HEX: return "hex";
+ case LEX_TOK_HEX_FLOAT: return "hex float";
+ case LEX_TOK_BLANK: return "blank";
+ case LEX_TOK_NEWLINE: return "newline";
+ case LEX_TOK_STRING_NEWLINE: return "string newline";
+ case LEX_TOK_STRING_BEGIN: return "string begin";
+ case LEX_TOK_STRING_PART: return "string part";
+ case LEX_TOK_STRING_END: return "string end";
+ case LEX_TOK_STRING_ESCAPE: return "string escape";
+ case LEX_TOK_STRING_UNTERMINATED: return "unterminated string";
+ case LEX_TOK_COMMENT_BEGIN: return "comment begin";
+ case LEX_TOK_COMMENT_PART: return "comment part";
+ case LEX_TOK_COMMENT_END: return "comment end";
+ case LEX_TOK_COMMENT_UNTERMINATED: return "unterminated comment";
+ case LEX_TOK_COMMENT_DEEPLY_NESTED: return "deeply nested comment";
+
+ default:
+ if (token < LEX_TOK_EOF) {
+ return "keyword";
+ }
+ if (token < 32) {
+ return "undefined";
+ }
+ if (token < 0x100L) {
+ return "operator";
+ }
+ if (token < 0x10000L) {
+ return "compound operator";
+ }
+ if (token < 0x1000000L) {
+ return "tricompound operator";
+ }
+ if (token < 0x7f0000000L) {
+ return "quadcompound operator";
+ }
+ return "reserved";
+ }
+}
+
+static void lex_fprint_token(FILE *fp,
+ long token,
+ const char *first, const char *last,
+ int line, int pos)
+{
+ char buf[10];
+ const char *lexeme = first;
+ int len = (int)(last - first);
+ switch (token) {
+ case LEX_TOK_EOS:
+ case LEX_TOK_CTRL:
+ sprintf(buf, "^%02x", (int)*first);
+ lexeme = buf;
+ len = strlen(buf);
+ break;
+ default:
+ break;
+ }
+ fprintf(fp, "%04d:%03d %s (0x%lx): `%.*s`\n",
+ line, pos, lex_describe_token(token), token, len, lexeme);
+}
+
+#define lex_print_token(token, first, last, line, pos) \
+ lex_fprint_token(stdout, token, first, last, line, pos)
+
+#else /* LEX_DEBUG */
+
+#define lex_describe_token(token) "debug not available"
+#define lex_fprint_token(fp, token, first, last, line, pos) ((void)0)
+#define lex_print_token(token, first, last, line, pos) ((void)0)
+
+#endif /* LEX_DEBUG */
+
+
+#endif /* LEX_TOKENS_H */
+
diff --git a/include/flatcc/flatcc.h b/include/flatcc/flatcc.h
new file mode 100644
index 0000000..04eb187
--- /dev/null
+++ b/include/flatcc/flatcc.h
@@ -0,0 +1,268 @@
+#ifndef FLATCC_H
+#define FLATCC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This is the primary `flatcc` interface when compiling `flatcc` as a
+ * library. Functions and types in the this interface will be kept
+ * stable to the extend possible or reasonable, but do not rely on other
+ * interfaces except "config.h" used to set default options for this
+ * interface.
+ *
+ * This interface is unrelated to the standalone flatbuilder library
+ * which has a life of its own.
+ */
+
+#include <stddef.h>
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4820) /* x bytes padding added in struct */
+#endif
+
+typedef struct flatcc_options flatcc_options_t;
+typedef void (*flatcc_error_fun) (void *err_ctx, const char *buf, size_t len);
+
+struct flatcc_options {
+ size_t max_schema_size;
+ int max_include_depth;
+ int max_include_count;
+ int disable_includes;
+ int allow_boolean_conversion;
+ int allow_enum_key;
+ int allow_enum_struct_field;
+ int allow_multiple_key_fields;
+ int allow_primary_key;
+ int allow_scan_for_all_fields;
+ int allow_string_key;
+ int allow_struct_field_deprecate;
+ int allow_struct_field_key;
+ int allow_struct_root;
+ int ascending_enum;
+ int hide_later_enum;
+ int hide_later_struct;
+ int offset_size;
+ int voffset_size;
+ int utype_size;
+ int bool_size;
+ int require_root_type;
+ int strict_enum_init;
+ uint64_t vt_max_count;
+
+ const char *default_schema_ext;
+ const char *default_bin_schema_ext;
+ const char *default_bin_ext;
+
+ /* Code Generator specific options. */
+ int gen_stdout;
+ int gen_dep;
+
+ const char *gen_depfile;
+ const char *gen_deptarget;
+ const char *gen_outfile;
+
+ int gen_append;
+
+ int cgen_pad;
+ int cgen_sort;
+ int cgen_pragmas;
+
+ int cgen_common_reader;
+ int cgen_common_builder;
+ int cgen_reader;
+ int cgen_builder;
+ int cgen_verifier;
+ int cgen_json_parser;
+ int cgen_json_printer;
+ int cgen_recursive;
+ int cgen_spacing;
+ int cgen_no_conflicts;
+
+
+ int bgen_bfbs;
+ int bgen_qualify_names;
+ int bgen_length_prefix;
+
+ /* Namespace args - these can override defaults so are null by default. */
+ const char *ns;
+ const char *nsc;
+
+ const char **inpaths;
+ const char **srcpaths;
+ int inpath_count;
+ int srcpath_count;
+ const char *outpath;
+};
+
+/* Runtime configurable optoins. */
+void flatcc_init_options(flatcc_options_t *opts);
+
+typedef void *flatcc_context_t;
+
+/*
+ * Call functions below in order listed one at a time.
+ * Each parse requires a new context.
+ *
+ * A reader file is named after the source base name, e.g.
+ * `monster.fbs` becomes `monster.h`. Builders are optional and created
+ * as `monster_builder.h`. A reader require a common header
+ * `flatbuffers_commoner.h` and a builder requires
+ * `flatbuffers_common_builder.h` in addition to the reader filers. A
+ * reader need no other source, but builders must link with the
+ * `flatbuilder` library and include files in `include/flatbuffers`.
+ *
+ * All the files may also be concatenated into one single file and then
+ * files will not be attempted included externally. This can be used
+ * with stdout output. The common builder can follow the common
+ * reader immediately, or at any later point before the first builder.
+ * The common files should only be included once, but not harm is done
+ * if duplication occurs.
+ *
+ * The outpath is prefixed every output filename. The containing
+ * directory must exist, but the prefix may have text following
+ * the directory, for example the namespace. If outpath = "stdout",
+ * files are generated to stdout.
+ *
+ * Note that const char * options must remain valid for the lifetime
+ * of the context since they are not copied. The options object itself
+ * is not used after initialization and may be reused.
+*/
+
+/*
+ * `name` is the name of the schema file or buffer. If it is path, the
+ * basename is extracted (leading path stripped), and the default schema
+ * extension is stripped if present. The resulting name is used
+ * internally when generating output files. Typically the `name`
+ * argument will be the same as a schema file path given to
+ * `flatcc_parse_file`, but it does not have to be.
+ *
+ * `name` may be null if only common files are generated.
+ *
+ * `error_out` is an optional error handler. If null output is truncated
+ * to a reasonable size and sent to stderr. `error_ctx` is provided as
+ * first argument to `error_out` if `error_out` is non-zero, otherwise
+ * it is ignored.
+ *
+ * Returns context or null on error.
+ */
+flatcc_context_t flatcc_create_context(flatcc_options_t *options, const char *name,
+ flatcc_error_fun error_out, void *error_ctx);
+
+/* Like `flatcc_create_context`, but with length argument for name. */
+/*
+ * Parse is optional - not needed for common files. If the input buffer version
+ * is called, the buffer must be zero terminated, otherwise an input
+ * path can be specified. The output path can be null.
+ *
+ * Only one parse can be called per context.
+ *
+ * The buffer size is limited to the max_schema_size option unless it is
+ * 0. The default is reasonable size like 64K depending on config flags.
+ *
+ * The buffer must remain valid for the duration of the context.
+ *
+ * The schema cannot contain include statements when parsed as a buffer.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_parse_buffer(flatcc_context_t ctx, const char *buf, size_t buflen);
+
+/*
+ * If options contain a non-zero `inpath` option, the resulting filename is
+ * prefixed with that path unless the filename is an absolute path.
+ *
+ * Errors are sent to the error handler given during initialization,
+ * or to stderr.
+ *
+ * The file size is limited to the max_schema_size option unless it is
+ * 0. The default is reasonable size like 64K depending on config flags.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_parse_file(flatcc_context_t ctx, const char *filename);
+
+/*
+ * Generate output files. The basename derived when the context was
+ * created is used used to name the output files with respective
+ * extensions. If the outpath option is not null it is prefixed the
+ * output files. The `cgen_common_reader, cgen_common_builder,
+ * cgen_reader, and cgen_builder` must be set or reset depending on what
+ * is to be generated. The common files do not require a parse, and the
+ * non-common files require a successfull parse or the result is
+ * undefined.
+ *
+ * Unlinke the parser, the code generator produce errors to stderr
+ * always. These errors are rare, such as using too long namespace
+ * names.
+ *
+ * If the `gen_stdout` option is set, all files are generated to stdout.
+ * In this case it is unwise to mix C and binary schema output options.
+ *
+ * If `bgen_bfbs` is set, a binary schema is generated to a file with
+ * the `.bfbs` extension. See also `flatcc_generate_binary_schema` for
+ * further details. Only `flatcc_generate_files` is called via the
+ * `flatcc` cli command.
+ *
+ * The option `bgen_length_prefix` option will cause a length prefix to be
+ * written to the each output binary schema. This option is only
+ * understood when writing to files.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_generate_files(flatcc_context_t ctx);
+
+/*
+ * Returns a buffer with a binary schema for a previous parse.
+ * The user is responsible for calling `free` on the returned buffer
+ * unless it returns 0 on error.
+ *
+ * Can be called instead of generate files, before, or after, but a
+ * schema must be parsed first.
+ *
+ * Returns a binary schema in `reflection.fbs` format. Any included
+ * files will be contained in the schema and there are no separate
+ * schema files for included schema.
+ *
+ * All type names are scoped, mening that they are refixed their
+ * namespace using `.` as the namespace separator, for example:
+ * "MyGame.Example.Monster". Note that the this differs from the current
+ * `flatc` compiler which does not prefix names. Enum names are not
+ * scoped, but the scope is implied by the containing enum type.
+ * The option `bgen_qualify_names=0` changes this behavior.
+ *
+ * If the default option `ascending_enum` is disabled, the `flatcc` will
+ * accept duplicate values and overlapping ranges like the C programming
+ * language. In this case enum values in the binary schema will not be
+ * searchable. At any rate enum names are not searchable in the current
+ * schema format.
+ *
+ */
+void *flatcc_generate_binary_schema(flatcc_context_t ctx, size_t *size);
+
+/*
+ * Similar to `flatcc_generate_binary_schema` but copies the binary
+ * schema into a user supplied buffer. If the buffer is too small
+ * the return value will be negative and the buffer content undefined.
+ */
+int flatcc_generate_binary_schema_to_buffer(flatcc_context_t ctx, void *buf, size_t bufsiz);
+
+/* Must be called to deallocate resources eventually - it valid but
+ * without effect to call with a null context. */
+void flatcc_destroy_context(flatcc_context_t ctx);
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_H */
diff --git a/include/flatcc/flatcc_accessors.h b/include/flatcc/flatcc_accessors.h
new file mode 100644
index 0000000..084ecb1
--- /dev/null
+++ b/include/flatcc/flatcc_accessors.h
@@ -0,0 +1,101 @@
+#ifndef FLATCC_ACCESSORS
+#define FLATCC_ACCESSORS
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#define __flatcc_basic_scalar_accessors_impl(N, T, W, E) \
+static inline size_t N ## __size(void) \
+{ return sizeof(T); } \
+static inline T *N ## __ptr_add(T *p, size_t i) \
+{ return p + i; } \
+static inline const T *N ## __const_ptr_add(const T *p, size_t i) \
+{ return p + i; } \
+static inline T N ## _read_from_pe(const void *p) \
+{ return N ## _cast_from_pe(*(T *)p); } \
+static inline T N ## _read_to_pe(const void *p) \
+{ return N ## _cast_to_pe(*(T *)p); } \
+static inline T N ## _read(const void *p) \
+{ return *(T *)p; } \
+static inline void N ## _write_from_pe(void *p, T v) \
+{ *(T *)p = N ## _cast_from_pe(v); } \
+static inline void N ## _write_to_pe(void *p, T v) \
+{ *(T *)p = N ## _cast_to_pe(v); } \
+static inline void N ## _write(void *p, T v) \
+{ *(T *)p = v; } \
+static inline T N ## _read_from_le(const void *p) \
+{ return N ## _cast_from_le(*(T *)p); } \
+typedef struct { int is_null; T value; } N ## _option_t;
+
+#define __flatcc_define_integer_accessors_impl(N, T, W, E) \
+static inline T N ## _cast_from_pe(T v) \
+{ return (T) E ## W ## toh((uint ## W ## _t)v); } \
+static inline T N ## _cast_to_pe(T v) \
+{ return (T) hto ## E ## W((uint ## W ## _t)v); } \
+static inline T N ## _cast_from_le(T v) \
+{ return (T) le ## W ## toh((uint ## W ## _t)v); } \
+static inline T N ## _cast_to_le(T v) \
+{ return (T) htole ## W((uint ## W ## _t)v); } \
+static inline T N ## _cast_from_be(T v) \
+{ return (T) be ## W ## toh((uint ## W ## _t)v); } \
+static inline T N ## _cast_to_be(T v) \
+{ return (T) htobe ## W((uint ## W ## _t)v); } \
+__flatcc_basic_scalar_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_real_accessors_impl(N, T, W, E) \
+union __ ## N ## _cast { T v; uint ## W ## _t u; }; \
+static inline T N ## _cast_from_pe(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = E ## W ## toh(x.u); return x.v; } \
+static inline T N ## _cast_to_pe(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = hto ## E ## W(x.u); return x.v; } \
+static inline T N ## _cast_from_le(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = le ## W ## toh(x.u); return x.v; } \
+static inline T N ## _cast_to_le(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = htole ## W(x.u); return x.v; } \
+static inline T N ## _cast_from_be(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = be ## W ## toh(x.u); return x.v; } \
+static inline T N ## _cast_to_be(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = htobe ## W(x.u); return x.v; } \
+__flatcc_basic_scalar_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_integer_accessors(N, T, W, E) \
+__flatcc_define_integer_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_real_accessors(N, T, W, E) \
+__flatcc_define_real_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_basic_integer_accessors(NS, TN, T, W, E) \
+__flatcc_define_integer_accessors(NS ## TN, T, W, E)
+
+#define __flatcc_define_basic_real_accessors(NS, TN, T, W, E) \
+__flatcc_define_real_accessors(NS ## TN, T, W, E)
+
+#define __flatcc_define_basic_scalar_accessors(NS, E) \
+__flatcc_define_basic_integer_accessors(NS, char, char, 8, E) \
+__flatcc_define_basic_integer_accessors(NS, uint8, uint8_t, 8, E) \
+__flatcc_define_basic_integer_accessors(NS, uint16, uint16_t, 16, E) \
+__flatcc_define_basic_integer_accessors(NS, uint32, uint32_t, 32, E) \
+__flatcc_define_basic_integer_accessors(NS, uint64, uint64_t, 64, E) \
+__flatcc_define_basic_integer_accessors(NS, int8, int8_t, 8, E) \
+__flatcc_define_basic_integer_accessors(NS, int16, int16_t, 16, E) \
+__flatcc_define_basic_integer_accessors(NS, int32, int32_t, 32, E) \
+__flatcc_define_basic_integer_accessors(NS, int64, int64_t, 64, E) \
+__flatcc_define_basic_real_accessors(NS, float, float, 32, E) \
+__flatcc_define_basic_real_accessors(NS, double, double, 64, E)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ACCESSORS */
diff --git a/include/flatcc/flatcc_alloc.h b/include/flatcc/flatcc_alloc.h
new file mode 100644
index 0000000..155364c
--- /dev/null
+++ b/include/flatcc/flatcc_alloc.h
@@ -0,0 +1,127 @@
+#ifndef FLATCC_ALLOC_H
+#define FLATCC_ALLOC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * These allocation abstractions are __only__ for runtime libraries.
+ *
+ * The flatcc compiler uses Posix allocation routines regardless
+ * of how this file is configured.
+ *
+ * This header makes it possible to use systems where malloc is not
+ * valid to use. In this case the portable library will not help
+ * because it implements Posix / C11 abstractions.
+ *
+ * Systems like FreeRTOS do not work with Posix memory calls and here it
+ * can be helpful to override runtime allocation primitives.
+ *
+ * In general, it is better to customize the allocator and emitter via
+ * flatcc_builder_custom_init and to avoid using the default emitter
+ * specific high level calls the copy out a buffer that must later be
+ * deallocated. This provides full control of allocation withou the need
+ * for this file.
+ *
+ *
+ * IMPORTANT
+ *
+ * If you override malloc, free, etc., make sure your applications
+ * use the same allocation methods. For example, samples/monster.c
+ * and several test cases are no longer guaranteed to work out of the
+ * box.
+ *
+ * The changes must only affect target runtime compilation including the
+ * the runtime library libflatccrt.
+ *
+ * The host system flatcc compiler and the compiler library libflatcc
+ * should NOT be compiled with non-Posix allocation since the compiler
+ * has a dependency on the runtime library and the wrong free operation
+ * might be callled. The safest way to avoid this problem this is to
+ * compile flatcc with the CMake script and the runtime files with a
+ * dedicated build system for the target system.
+ */
+
+#include <stdlib.h>
+
+#ifndef FLATCC_ALLOC
+#define FLATCC_ALLOC(n) malloc(n)
+#endif
+
+#ifndef FLATCC_FREE
+#define FLATCC_FREE(p) free(p)
+#endif
+
+#ifndef FLATCC_REALLOC
+#define FLATCC_REALLOC(p, n) realloc(p, n)
+#endif
+
+#ifndef FLATCC_CALLOC
+#define FLATCC_CALLOC(nm, n) calloc(nm, n)
+#endif
+
+/*
+ * Implements `aligned_alloc` and `aligned_free`.
+ * Even with C11, this implements non-standard aligned_free needed for portable
+ * aligned_alloc implementations.
+ */
+#ifndef FLATCC_USE_GENERIC_ALIGNED_ALLOC
+
+#ifndef FLATCC_NO_PALIGNED_ALLOC
+#include "flatcc/portable/paligned_alloc.h"
+#else
+#if !defined(__aligned_free_is_defined) || !__aligned_free_is_defined
+#define aligned_free free
+#endif
+#endif
+
+#else /* FLATCC_USE_GENERIC_ALIGNED_ALLOC */
+
+#ifndef FLATCC_ALIGNED_ALLOC
+static inline void *__flatcc_aligned_alloc(size_t alignment, size_t size)
+{
+ char *raw;
+ void *buf;
+ size_t total_size = (size + alignment - 1 + sizeof(void *));
+
+ if (alignment < sizeof(void *)) {
+ alignment = sizeof(void *);
+ }
+ raw = (char *)(size_t)FLATCC_ALLOC(total_size);
+ buf = raw + alignment - 1 + sizeof(void *);
+ buf = (void *)(((size_t)buf) & ~(alignment - 1));
+ ((void **)buf)[-1] = raw;
+ return buf;
+}
+#define FLATCC_ALIGNED_ALLOC(alignment, size) __flatcc_aligned_alloc(alignment, size)
+#endif /* FLATCC_USE_GENERIC_ALIGNED_ALLOC */
+
+#ifndef FLATCC_ALIGNED_FREE
+static inline void __flatcc_aligned_free(void *p)
+{
+ char *raw;
+
+ if (!p) return;
+ raw = ((void **)p)[-1];
+
+ FLATCC_FREE(raw);
+}
+#define FLATCC_ALIGNED_FREE(p) __flatcc_aligned_free(p)
+#endif
+
+#endif /* FLATCC_USE_GENERIC_ALIGNED_ALLOC */
+
+#ifndef FLATCC_ALIGNED_ALLOC
+#define FLATCC_ALIGNED_ALLOC(a, n) aligned_alloc(a, n)
+#endif
+
+#ifndef FLATCC_ALIGNED_FREE
+#define FLATCC_ALIGNED_FREE(p) aligned_free(p)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ALLOC_H */
diff --git a/include/flatcc/flatcc_assert.h b/include/flatcc/flatcc_assert.h
new file mode 100644
index 0000000..3db3e7b
--- /dev/null
+++ b/include/flatcc/flatcc_assert.h
@@ -0,0 +1,45 @@
+#ifndef FLATCC_ASSERT_H
+#define FLATCC_ASSERT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+* This assert abstraction is only used for the flatcc runtime library.
+* The flatcc compiler uses Posix assert routines regardless of how this
+* file is configured.
+*
+* This header makes it possible to use systems where assert is not
+* valid to use. Note that `<assert.h>` may remain a dependency for static
+* assertions.
+*
+* `FLATCC_ASSERT` is designed to handle errors which cannot be ignored
+* and could lead to crash. The portable library may use assertions that
+* are not affected by this macro.
+*
+* `FLATCC_ASSERT` defaults to POSIX assert but can be overrided by a
+* preprocessor definition.
+*
+* Runtime assertions can be entirely disabled by defining
+* `FLATCC_NO_ASSERT`.
+*/
+
+#ifdef FLATCC_NO_ASSERT
+/* NOTE: This will not affect inclusion of <assert.h> for static assertions. */
+#undef FLATCC_ASSERT
+#define FLATCC_ASSERT(x) ((void)0)
+/* Grisu3 is used for floating point conversion in JSON processing. */
+#define GRISU3_NO_ASSERT
+#endif
+
+#ifndef FLATCC_ASSERT
+#include <assert.h>
+#define FLATCC_ASSERT assert
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ASSERT_H */
diff --git a/include/flatcc/flatcc_builder.h b/include/flatcc/flatcc_builder.h
new file mode 100644
index 0000000..2e84d29
--- /dev/null
+++ b/include/flatcc/flatcc_builder.h
@@ -0,0 +1,1911 @@
+#ifndef FLATCC_BUILDER_H
+#define FLATCC_BUILDER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Library for building untyped FlatBuffers. Intended as a support
+ * library for generated C code to produce typed builders, but might
+ * also be useful in runtime environments and as support for scripting
+ * languages.
+ *
+ * The builder has two API layers: a stack based `start/end` approach,
+ * and a direct `create`, and they may be mixed freely. The direct
+ * approach may be used as part of more specialized optimizations such
+ * as rewriting buffers while the stack approach is convenient for state
+ * machine driven parsers without a stack, or with a very simple stack
+ * without extra allocations.
+ *
+ * The builder emits partial buffer sequences to a user provided emitter
+ * function and does not require a full buffer reprensenation in memory.
+ * For this reason it also does not support sorting or other operations
+ * that requires representing the buffer, but post-processors can easily
+ * do this, and the generated schema specific code and provide functions
+ * to handle this.
+ *
+ * A custom allocator with a default realloc implementation can place
+ * restraints on resource consumption and provide initial allocation
+ * sizes for various buffers and stacks in use.
+ *
+ * A buffer under construction uses a virtual address space for the
+ * completed part of the buffer, starting at 0 and growing in both
+ * directions, or just down depending on whether vtables should be
+ * clustered at the end or not. Clustering may help caching and
+ * preshipping that part of the buffer.
+ *
+ * Because an offset cannot be known before its reference location is
+ * defined, every completed table, vector, etc. returns a reference into
+ * the virtual address range. If the final buffer keeps the 0 offset,
+ * these references remain stable an may be used for external references
+ * into the buffer.
+ *
+ * The maximum buffer that can be constructed is in praxis limited to
+ * half the UOFFSET_MAX size, typically 2^31 bytes, not counting
+ * clustered vtables that may consume and additional 2^31 bytes
+ * (positive address range), but in praxis cannot because vtable
+ * references are signed and thus limited to 2^31 bytes (or equivalent
+ * depending on the flatbuffer types chosen).
+ *
+ * CORRECTION: in various places rules are mentioned about nesting and using
+ * a reference at most once. In fact, DAG's are also valid flatbuffers.
+ * This means a reference may be reused as long as each individual use
+ * obeys the rules and, for example, circular references are not
+ * constructed (circular types are ok, but objects graphs with cycles
+ * are not permitted). Be especially aware of the offset vector create
+ * call which translates the references into offsets - this can be
+ * reverted by noting the reference in vector and calculate the base
+ * used for the offset to restore the original references after the
+ * vector has been emitted.
+ */
+
+#include <stdlib.h>
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include "flatcc_flatbuffers.h"
+#include "flatcc_emitter.h"
+#include "flatcc_refmap.h"
+
+/* It is possible to enable logging here. */
+#ifndef FLATCC_BUILDER_ASSERT
+#define FLATCC_BUILDER_ASSERT(cond, reason) FLATCC_ASSERT(cond)
+#endif
+
+/*
+ * Eror handling is not convenient and correct use should not cause
+ * errors beyond possibly memory allocation, but assertions are a
+ * good way to trace problems.
+ *
+ * Note: some internal assertion will remain if disabled.
+ */
+#ifndef FLATCC_BUILDER_ASSERT_ON_ERROR
+#define FLATCC_BUILDER_ASSERT_ON_ERROR 1
+#endif
+
+/*
+ * If set, checks user input agains state and returns error,
+ * otherwise errors are ignored (assuming they won't happen).
+ * Errors will be asserted if enabled and checks are not skipped.
+ */
+#ifndef FLATCC_BUILDER_SKIP_CHECKS
+#define FLATCC_BUILDER_SKIP_CHECKS 0
+#endif
+
+
+/*
+ * When adding the same field to a table twice this is either an error
+ * or the existing field is returned, potentially introducing garbage
+ * if the type is a vector, table, or string. When implementing parsers
+ * it may be convenient to not treat this as an error.
+ */
+#ifndef FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
+#define FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD 0
+#endif
+
+/**
+ * This type must have same size as `flatbuffers_uoffset_t`
+ * and must be a signed type.
+ */
+typedef flatbuffers_soffset_t flatcc_builder_ref_t;
+typedef flatbuffers_utype_t flatcc_builder_utype_t;
+
+/**
+ * This type must be compatible with code generation that
+ * creates union specific ref types.
+ */
+typedef struct flatcc_builder_union_ref {
+ flatcc_builder_utype_t type;
+ flatcc_builder_ref_t value;
+} flatcc_builder_union_ref_t;
+
+typedef struct flatcc_builder_union_vec_ref {
+ flatcc_builder_ref_t type;
+ flatcc_builder_ref_t value;
+} flatcc_builder_union_vec_ref_t;
+
+/**
+ * Virtual tables are off by one to avoid being mistaken for error at
+ * position 0, and it makes them detectable as such because no other
+ * reference is uneven. Vtables are emitted at their actual location
+ * which is one less than the reference value.
+ */
+typedef flatbuffers_soffset_t flatcc_builder_vt_ref_t;
+
+typedef flatbuffers_uoffset_t flatcc_builder_identifier_t;
+
+/**
+ * Hints to custom allocators so they can provide initial alloc sizes
+ * etc. There will be at most one buffer for each allocation type per
+ * flatcc_builder instance. Buffers containing only structs may avoid
+ * allocation altogether using a `create` call. The vs stack must hold
+ * vtable entries for all open tables up to their requested max id, but
+ * unused max id overlap on the stack. The final vtables only store the
+ * largest id actually added. The fs stack must hold stack frames for
+ * the nesting levels expected in the buffer, each about 50-100 bytes.
+ * The ds stack holds open vectors, table data, and nested buffer state.
+ * `create` calls bypass the `ds` and `fs` stack and are thus faster.
+ * The vb buffer holds a copy of all vtables seen and emitted since last
+ * vtable flush. The patch log holds a uoffset for every table field
+ * added to currently open tables. The hash table holds a uoffset entry
+ * for each hash slot where the allocator decides how many to provide
+ * above a certain minimum. The vd buffer allocates vtable descriptors
+ * which is a reference to an emitted vtable, an offset to a cached
+ * vtable, and a link to next descriptor with same hash. Calling `reset`
+ * after build can either keep the allocation levels for the next
+ * buffer, or reduce the buffers already allocated by requesting 1 byte
+ * allocations (meaning provide a default).
+ *
+ * The user stack is not automatically allocated, but when entered
+ * explicitly, the boundary is rembered in the current live
+ * frame.
+ */
+enum flatcc_builder_alloc_type {
+ /* The stack where vtables are build. */
+ flatcc_builder_alloc_vs,
+ /* The stack where data structures are build. */
+ flatcc_builder_alloc_ds,
+ /* The virtual table buffer cache, holds a copy of each vt seen. */
+ flatcc_builder_alloc_vb,
+ /* The patch log, remembers table fields with outstanding offset refs. */
+ flatcc_builder_alloc_pl,
+ /* The stack of frames for nested types. */
+ flatcc_builder_alloc_fs,
+ /* The hash table part of the virtual table cache. */
+ flatcc_builder_alloc_ht,
+ /* The vtable descriptor buffer, i.e. list elements for emitted vtables. */
+ flatcc_builder_alloc_vd,
+ /* User stack frame for custom data. */
+ flatcc_builder_alloc_us,
+
+ /* Number of allocation buffers. */
+ flatcc_builder_alloc_buffer_count
+};
+
+/** Must reflect the `flatcc_builder_alloc_type` enum. */
+#define FLATCC_BUILDER_ALLOC_BUFFER_COUNT flatcc_builder_alloc_buffer_count
+
+#ifndef FLATCC_BUILDER_ALLOC
+#define FLATCC_BUILDER_ALLOC(n) FLATCC_ALLOC(n)
+#endif
+
+#ifndef FLATCC_BUILDER_FREE
+#define FLATCC_BUILDER_FREE(p) FLATCC_FREE(p)
+#endif
+
+#ifndef FLATCC_BUILDER_REALLOC
+#define FLATCC_BUILDER_REALLOC(p, n) FLATCC_REALLOC(p, n)
+#endif
+
+#ifndef FLATCC_BUILDER_ALIGNED_ALLOC
+#define FLATCC_BUILDER_ALIGNED_ALLOC(a, n) FLATCC_ALIGNED_ALLOC(a, n)
+#endif
+
+#ifndef FLATCC_BUILDER_ALIGNED_FREE
+#define FLATCC_BUILDER_ALIGNED_FREE(p) FLATCC_ALIGNED_FREE(p)
+#endif
+
+/**
+ * Emits data to a conceptual deque by appending to either front or
+ * back, starting from offset 0.
+ *
+ * Each emit call appends a strictly later or earlier sequence than the
+ * last emit with same offset sign. Thus a buffer is gradually grown at
+ * both ends. `len` is the combined length of all iov entries such that
+ * `offset + len` yields the former offset for negative offsets and
+ * `offset + len` yields the next offset for non-negative offsets.
+ * The bulk of the data will be in the negative range, possibly all of
+ * it. The first emitted emitted range will either start or end at
+ * offset 0. If offset 0 is emitted, it indicates the start of clustered
+ * vtables. The last positive (non-zero) offset may be zero padding to
+ * place the buffer in a full multiple of `block_align`, if set.
+ *
+ * No iov entry is empty, 0 < iov_count <= FLATCC_IOV_COUNT_MAX.
+ *
+ * The source data are in general ephemeral and should be consumed
+ * immediately, as opposed to caching iov.
+ *
+ * For high performance applications:
+ *
+ * The `create` calls may reference longer living data, but header
+ * fields etc. will still be short lived. If an emitter wants to
+ * reference data in another buffer rather than copying, it should
+ * inspect the memory range. The length of an iov entry may also be used
+ * since headers are never very long (anything starting at 16 bytes can
+ * safely be assumed to be user provided, or static zero padding). It is
+ * guaranteed that data pointers in `create` calls receive a unique slot
+ * separate from temporary headers, in the iov table which may be used
+ * for range checking or hashing (`create_table` is the only call that
+ * mutates the data buffer). It is also guaranteed (with the exception
+ * of `create_table` and `create_cached_vtable`) that data provided to
+ * create calls are not referenced at all by the builder, and these data
+ * may therefore de-facto be handles rather than direct pointers when
+ * the emitter and data provider can agree on such a protocol. This does
+ * NOT apply to any start/end/add/etc. calls which do copy to stack.
+ * `flatcc_builder_padding_base` may be used to test if an iov entry is
+ * zero padding which always begins at that address.
+ *
+ * Future: the emit interface could be extended with a type code
+ * and return an existing object insted of the emitted if, for
+ * example, they are identical. Outside this api level, generated
+ * code could provide a table comparison function to help such
+ * deduplication. It would be optional because two equal objects
+ * are not necessarily identical. The emitter already receives
+ * one object at time.
+ *
+ * Returns 0 on success and otherwise causes the flatcc_builder
+ * to fail.
+ */
+typedef int flatcc_builder_emit_fun(void *emit_context,
+ const flatcc_iovec_t *iov, int iov_count, flatbuffers_soffset_t offset, size_t len);
+
+/*
+ * Returns a pointer to static padding used in emitter calls. May
+ * sometimes also be used for empty defaults such as identifier.
+ */
+extern const uint8_t flatcc_builder_padding_base[];
+
+/**
+ * `request` is a minimum size to be returned, but allocation is
+ * expected to grow exponentially or in reasonable chunks. Notably,
+ * `alloc_type = flatcc_builder_alloc_ht` will only use highest available
+ * power of 2. The allocator may shrink if `request` is well below
+ * current size but should avoid repeated resizing on small changes in
+ * request sizes. If `zero_fill` is non-zero, allocated data beyond
+ * the current size must be zeroed. The buffer `b` may be null with 0
+ * length initially. `alloc_context` is completely implementation
+ * dependendent, and not needed when just relying on realloc. The
+ * resulting buffer may be the same or different with moved data, like
+ * realloc. Returns -1 with unmodified buffer on failure or 0 on
+ * success. The `alloc_type` identifies the buffer type. This may be
+ * used to cache buffers between instances of builders, or to decide a
+ * default allocation size larger than requested. If `need` is zero the
+ * buffer should be deallocate if non-zero, and return success (0)
+ * regardless.
+ */
+typedef int flatcc_builder_alloc_fun(void *alloc_context,
+ flatcc_iovec_t *b, size_t request, int zero_fill, int alloc_type);
+
+/*
+ * The number of hash slots there will be allocated space for. The
+ * allocator may provide more. The size returned should be
+ * `sizeof(flatbuffers_uoffset_t) * count`, where the size is a power of
+ * 2 (or the rest is wasted). The hash table can store many more entries
+ * than slots using linear search. The table does not resize.
+ */
+#ifndef FLATCC_BUILDER_MIN_HASH_COUNT
+#define FLATCC_BUILDER_MIN_HASH_COUNT 64
+#endif
+
+typedef struct __flatcc_builder_buffer_frame __flatcc_builder_buffer_frame_t;
+struct __flatcc_builder_buffer_frame {
+ flatcc_builder_identifier_t identifier;
+ flatcc_builder_ref_t mark;
+ flatbuffers_uoffset_t vs_end;
+ flatbuffers_uoffset_t nest_id;
+ uint16_t flags;
+ uint16_t block_align;
+};
+
+typedef struct __flatcc_builder_vector_frame __flatcc_builder_vector_frame_t;
+struct __flatcc_builder_vector_frame {
+ flatbuffers_uoffset_t elem_size;
+ flatbuffers_uoffset_t count;
+ flatbuffers_uoffset_t max_count;
+};
+
+typedef struct __flatcc_builder_table_frame __flatcc_builder_table_frame_t;
+struct __flatcc_builder_table_frame {
+ flatbuffers_uoffset_t vs_end;
+ flatbuffers_uoffset_t pl_end;
+ uint32_t vt_hash;
+ flatbuffers_voffset_t id_end;
+};
+
+/*
+ * Store state for nested structures such as buffers, tables and vectors.
+ *
+ * For less busy data and data where access to a previous state is
+ * irrelevant, the frame may store the current state directly. Otherwise
+ * the current state is maintained in the flatcc_builder_t structure in a
+ * possibly derived form (e.g. ds pointer instead of ds_end offset) and
+ * the frame is used to store the previous state when the frame is
+ * entered.
+ *
+ * Most operations have a start/update/end cycle the decides the
+ * liftetime of a frame, but these generally also have a direct form
+ * (create) that does not use a frame at all. These still do some
+ * state updates notably passing min_align to parent which may also be
+ * an operation without a frame following the child level operation
+ * (e.g. create struct, create buffer). Ending a frame results in the
+ * same kind of updates.
+ */
+typedef struct __flatcc_builder_frame __flatcc_builder_frame_t;
+struct __flatcc_builder_frame {
+ flatbuffers_uoffset_t ds_first;
+ flatbuffers_uoffset_t type_limit;
+ flatbuffers_uoffset_t ds_offset;
+ uint16_t align;
+ uint16_t type;
+ union {
+ __flatcc_builder_table_frame_t table;
+ __flatcc_builder_vector_frame_t vector;
+ __flatcc_builder_buffer_frame_t buffer;
+ } container;
+};
+
+/**
+ * The main flatcc_builder structure. Can be stack allocated and must
+ * be initialized with `flatcc_builder_init` and cleared with
+ * `flatcc_builder_clear` to reclaim memory. Between buffer builds,
+ * `flatcc_builder_reset` may be used.
+ */
+typedef struct flatcc_builder flatcc_builder_t;
+
+struct flatcc_builder {
+ /* Next entry on reserved stack in `alloc_pl` buffer. */
+ flatbuffers_voffset_t *pl;
+ /* Next entry on reserved stack in `alloc_vs` buffer. */
+ flatbuffers_voffset_t *vs;
+ /* One above the highest entry in vs, used to track vt_size. */
+ flatbuffers_voffset_t id_end;
+ /* The evolving vtable hash updated with every new field. */
+ uint32_t vt_hash;
+
+ /* Pointer to ds_first. */
+ uint8_t *ds;
+ /* Offset from `ds` on current frame. */
+ flatbuffers_uoffset_t ds_offset;
+ /* ds buffer size relative to ds_first, clamped to max size of current type. */
+ flatbuffers_uoffset_t ds_limit;
+
+ /* ds_first, ds_first + ds_offset is current ds stack range. */
+ flatbuffers_uoffset_t ds_first;
+ /* Points to currently open frame in `alloc_fs` buffer. */
+ __flatcc_builder_frame_t *frame;
+
+ /* Only significant to emitter function, if at all. */
+ void *emit_context;
+ /* Only significant to allocator function, if at all. */
+ void *alloc_context;
+ /* Customizable write function that both appends and prepends data. */
+ flatcc_builder_emit_fun *emit;
+ /* Customizable allocator that also deallocates. */
+ flatcc_builder_alloc_fun *alloc;
+ /* Buffers indexed by `alloc_type` */
+ flatcc_iovec_t buffers[FLATCC_BUILDER_ALLOC_BUFFER_COUNT];
+ /* Number of slots in ht given as 1 << ht_width. */
+ size_t ht_width;
+
+ /* The location in vb to add next cached vtable. */
+ flatbuffers_uoffset_t vb_end;
+ /* Where to allocate next vtable descriptor for hash table. */
+ flatbuffers_uoffset_t vd_end;
+ /* Ensure final buffer is aligned to at least this. Nested buffers get their own `min_align`. */
+ uint16_t min_align;
+ /* The current active objects alignment isolated from nested activity. */
+ uint16_t align;
+ /* The current buffers block alignment used when emitting buffer. */
+ uint16_t block_align;
+ /* Signed virtual address range used for `flatcc_builder_ref_t` and emitter. */
+ flatcc_builder_ref_t emit_start;
+ flatcc_builder_ref_t emit_end;
+ /* 0 for top level, and end of buffer ref for nested buffers (can also be 0). */
+ flatcc_builder_ref_t buffer_mark;
+ /* Next nest_id. */
+ flatbuffers_uoffset_t nest_count;
+ /* Unique id to prevent sharing of vtables across buffers. */
+ flatbuffers_uoffset_t nest_id;
+ /* Current nesting level. Helpful to state-machines with explicit stack and to check `max_level`. */
+ int level;
+ /* Aggregate check for allocated frame and max_level. */
+ int limit_level;
+ /* Track size prefixed buffer. */
+ uint16_t buffer_flags;
+
+ /* Settings that may happen with no frame allocated. */
+
+ flatcc_builder_identifier_t identifier;
+
+ /* Settings that survive reset (emitter, alloc, and contexts also survive): */
+
+ /* If non-zero, vtable cache gets flushed periodically. */
+ size_t vb_flush_limit;
+ /* If non-zero, fails on deep nesting to help drivers with a stack, such as recursive parsers etc. */
+ int max_level;
+ /* If non-zero, do not cluster vtables at end, only emit negative offsets (0 by default). */
+ int disable_vt_clustering;
+
+ /* Set if the default emitter is being used. */
+ int is_default_emitter;
+ /* Only used with default emitter. */
+ flatcc_emitter_t default_emit_context;
+
+ /* Offset to the last entered user frame on the user frame stack, after frame header, or 0. */
+ size_t user_frame_offset;
+
+ /* The offset to the end of the most recent user frame. */
+ size_t user_frame_end;
+
+ /* The optional user supplied refmap for cloning DAG's - not shared with nested buffers. */
+ flatcc_refmap_t *refmap;
+};
+
+/**
+ * Call this before any other API call.
+ *
+ * The emitter handles the completed chunks of the buffer that will no
+ * longer be required by the builder. It is largely a `write` function
+ * that can append to both positive and negative offsets.
+ *
+ * No memory is allocated during init. Buffers will be allocated as
+ * needed. The `emit_context` is only used by the emitter, if at all.
+ *
+ * `flatcc_builder_reset/clear` calls are automtically forwarded to the
+ * default emitter.
+ *
+ * Returns -1 on failure, 0 on success.
+ */
+int flatcc_builder_init(flatcc_builder_t *B);
+
+/**
+ * Use instead of `flatcc_builder_init` when providing a custom allocator
+ * or emitter. Leave emitter or allocator null to use default.
+ * Cleanup of emit and alloc context must be handled manually after
+ * the builder is cleared or reset, except if emitter is null the
+ * default will be automatically cleared and reset.
+ *
+ * Returns -1 on failure, 0 on success.
+ */
+int flatcc_builder_custom_init(flatcc_builder_t *B,
+ flatcc_builder_emit_fun *emit, void *emit_context,
+ flatcc_builder_alloc_fun *alloc, void *alloc_context);
+
+/*
+ * Returns (flatcc_emitter_t *) if the default context is used.
+ * Other emitter might have null contexts.
+ */
+void *flatcc_builder_get_emit_context(flatcc_builder_t *B);
+
+/**
+ * Prepares builder for a new build. The emitter is not told when a
+ * buffer is finished or when a new begins, and must be told so
+ * separately. Allocated buffers will be zeroed, but may optionally be
+ * reduced to their defaults (signalled by reallocating each non-empty
+ * buffer to a single byte). General settings are cleared optionally,
+ * such as cache flushing. Buffer specific settings such as buffer
+ * identifier are always cleared.
+ *
+ * Returns -1 if allocator complains during buffer reduction, 0 on
+ * success.
+ */
+int flatcc_builder_custom_reset(flatcc_builder_t *B,
+ int reduce_buffers, int set_defaults);
+
+/*
+ * Same as `flatcc_builder_custom_reset` with default arguments
+ * where buffers are not reduced and default settings are not reset.
+ */
+int flatcc_builder_reset(flatcc_builder_t *B);
+
+/**
+ * Deallocates all memory by calling allocate with a zero size request
+ * on each buffer, then zeroing the builder structure itself.
+ */
+void flatcc_builder_clear(flatcc_builder_t *B);
+
+/**
+ * Allocates to next higher power of 2 using system realloc and ignores
+ * `alloc_context`. Only reduces size if a small subsequent increase in
+ * size would not trigger a reallocation. `alloc_type` is used to
+ * set minimum sizes. Hash tables are allocated to the exact requested
+ * size. See also `alloc_fun`.
+ */
+int flatcc_builder_default_alloc(void *alloc_context,
+ flatcc_iovec_t *b, size_t request, int zero_fill, int alloc_type);
+
+/**
+ * If non-zero, the vtable cache will get flushed whenever it reaches
+ * the given limit at a point in time where more space is needed. The
+ * limit is not exact as it is only tested when reallocation is
+ * required.
+ */
+void flatcc_builder_set_vtable_cache_limit(flatcc_builder_t *B, size_t size);
+
+/**
+ * Manual flushing of vtable for long running tasks. Mostly used
+ * internally to deal with nested buffers.
+ */
+void flatcc_builder_flush_vtable_cache(flatcc_builder_t *B);
+
+/**
+ * Low-level support function to aid in constructing nested buffers without
+ * allocation. Not for regular use.
+ *
+ * Call where `start_buffer` would have been placed when using
+ * `create_buffer` in a nested context. Save the return value on a stack
+ * as argument to `pop_buffer_alignment`.
+ *
+ * The call resets the current derived buffer alignment so the nested
+ * buffer will not be aligned to more than required.
+ *
+ * Often it will not be necessary to be so careful with alignment since
+ * the alignment cannot be invalid by failing to use push and pop, but
+ * for code generation it will ensure the correct result every time.
+ */
+uint16_t flatcc_builder_push_buffer_alignment(flatcc_builder_t *B);
+
+/**
+ * Low-level call.
+ *
+ * Call with the return value from push_buffer_alignment after a nested
+ * `create_buffer_call`. The alignments merge back up in the buffer
+ * hierarchy so the top level buffer gets the largest of all aligments.
+ */
+void flatcc_builder_pop_buffer_alignment(flatcc_builder_t *B, uint16_t buffer_align);
+
+/**
+ * This value may be of interest when the buffer has been ended, for
+ * example when subsequently allocating memory for the buffer to ensure
+ * that memory is properly aligned.
+ */
+uint16_t flatcc_builder_get_buffer_alignment(flatcc_builder_t *B);
+
+/**
+ * Level 0 means no buffer is started, otherwise it increments with
+ * start calls and decrements with end calls (approximately for
+ * optimized operations such as table vectors).
+ *
+ * If `max_level` has been set, `get_level` always returns a value <=
+ * `max_level` provided no start call has failed.
+ *
+ * Level continues to increment inside nested buffers.
+ */
+int flatcc_builder_get_level(flatcc_builder_t *B);
+
+/**
+ * Setting the max level triggers a failure on start of new nestings
+ * when the level is reached. May be used to protect recursive descend
+ * parsers etc. or later buffer readers.
+ *
+ * The builder itself is not sensitive to depth, and the allocator is a
+ * better way to protect resource abuse.
+ *
+ * `max_level` is not reset inside nested buffers.
+ */
+void flatcc_builder_set_max_level(flatcc_builder_t *B, int level);
+
+/**
+ * By default ordinary data such as tables are placed in front of
+ * earlier produced content and vtables are placed at the very end thus
+ * clustering vtables together. This can be disabled so all content is
+ * placed in front. Nested buffers ignores this setting because they can
+ * only place content in front because they cannot blend with the
+ * containing buffers content. Clustering could be more cache friendly
+ * and also enables pre-shipping of the vtables during transmission.
+ */
+void flatcc_builder_set_vtable_clustering(flatcc_builder_t *B, int enable);
+
+/**
+ * Sets a new user supplied refmap which maps source pointers to
+ * references and returns the old refmap, or null. It is also
+ * possible to disable an existing refmap by setting a null
+ * refmap.
+ *
+ * A clone or pick operation may use this map when present,
+ * depending on the data type. If a hit is found, the stored
+ * reference will be used instead of performing a new clone or
+ * pick operation. It is also possible to manually populate the
+ * refmap. Note that the builder does not have a concept of
+ * clone or pick - these are higher level recursive operations
+ * to add data from one buffer to another - but such code may
+ * rely on the builder to provide the current refmap during
+ * recursive operations. For this reason, the builder makes no
+ * calls to the refmap interface on its own - it just stores the
+ * current refmap such that recursive operations can find it.
+ *
+ * Refmaps MUST be reset, replaced or disabled if a source
+ * pointer may be reused for different purposes - for example if
+ * repeatedly reading FlatBuffers into the same memory buffer
+ * and performing a clone into a buffer under construction.
+ * Refmaps may also be replaced if the same object is to be
+ * cloned several times keeping the internal DAG structure
+ * intact with every new clone being an independent object.
+ *
+ * Refmaps must also be replaced or disabled prior to starting a
+ * nested buffer and after stopping it, or when cloning a object
+ * as a nested root. THIS IS VERY EASY TO GET WRONG! The
+ * builder does a lot of bookkeeping for nested buffers but not
+ * in this case. Shared references may happen and they WILL fail
+ * verification and they WILL break when copying out a nested
+ * buffer to somewhere else. The user_frame stack may be used
+ * for pushing refmaps, but often user codes recursive stack
+ * will work just as well.
+ *
+ * It is entirely optional to use refmaps when cloning - they
+ * preserve DAG structure and may speed up operations or slow
+ * them down, depending on the source material.
+ *
+ * Refmaps may consume a lot of space when large offset vectors
+ * are cloned when these do not have significant shared
+ * references. They may also be very cheap to use without any
+ * dynamic allocation when objects are small and have at most a
+ * few references.
+ *
+ * Refmaps only support init, insert, find, reset, clear but not
+ * delete. There is a standard implementation in the runtime
+ * source tree but it can easily be replaced compile time and it
+ * may also be left out if unused. The builder wraps reset, insert,
+ * and find so the user does not have to check if a refmap is
+ * present but other operations must be done direcly on the
+ * refmap.
+ *
+ * The builder wrapped refmap operations are valid on a null
+ * refmap which will find nothing and insert nothing.
+ *
+ * The builder will reset the refmap during a builder reset and
+ * clear the refmap during a builder clear operation. If the
+ * refmap goes out of scope before that happens it is important
+ * to call set_refmap with null and manually clear the refmap.
+ */
+static inline flatcc_refmap_t *flatcc_builder_set_refmap(flatcc_builder_t *B, flatcc_refmap_t *refmap)
+{
+ flatcc_refmap_t *refmap_old;
+
+ refmap_old = B->refmap;
+ B->refmap = refmap;
+ return refmap_old;
+}
+
+/* Retrieves the current refmap, or null. */
+static inline flatcc_refmap_t *flatcc_builder_get_refmap(flatcc_builder_t *B)
+{
+ return B->refmap;
+}
+
+/* Finds a reference, or a null reference if no refmap is active. * */
+static inline flatcc_builder_ref_t flatcc_builder_refmap_find(flatcc_builder_t *B, const void *src)
+{
+ return B->refmap ? flatcc_refmap_find(B->refmap, src) : flatcc_refmap_not_found;
+}
+
+/*
+ * Inserts into the current refmap with the inseted ref upon
+ * upon success, or not_found on failure (default 0), or just
+ * returns ref if refmap is absent.
+ *
+ * Note that if an existing item exists, the ref is replaced
+ * and the new, not the old, ref is returned.
+ */
+static inline flatcc_builder_ref_t flatcc_builder_refmap_insert(flatcc_builder_t *B, const void *src, flatcc_builder_ref_t ref)
+{
+ return B->refmap ? flatcc_refmap_insert(B->refmap, src, ref) : ref;
+}
+
+static inline void flatcc_builder_refmap_reset(flatcc_builder_t *B)
+{
+ if (B->refmap) flatcc_refmap_reset(B->refmap);
+}
+
+
+typedef uint16_t flatcc_builder_buffer_flags_t;
+static const flatcc_builder_buffer_flags_t flatcc_builder_is_nested = 1;
+static const flatcc_builder_buffer_flags_t flatcc_builder_with_size = 2;
+
+/* The flag size in the API needs to match the internal size. */
+static_assert(sizeof(flatcc_builder_buffer_flags_t) ==
+ sizeof(((flatcc_builder_t *)0)->buffer_flags), "flag size mismatch");
+
+/**
+ * An alternative to start buffer, start struct/table ... end buffer.
+ *
+ * This call is mostly of interest as a means to quicly create a zero
+ * allocation top-level buffer header following a call to create_struct,
+ * or to create_vtable/create_table. For that, it is quite simple to
+ * use. For general buffer construction without allocation, more care is
+ * needed, as discussed below.
+ *
+ * If the content is created with `start/end_table` calls, or similar,
+ * it is better to use `start/end_buffer` since stack allocation is used
+ * anyway.
+ *
+ * The buffer alignment must be provided manually as it is not derived
+ * from constructed content, unlike `start/end_buffer`. Typically
+ * `align` would be same argument as provided to `create_struct`.
+ * `get_buffer_alignment` may also used (note: `get_buffer_alignment`
+ * may return different after the call because it will be updated with
+ * the `block_align` argument to `create_buffer` but that is ok).
+ *
+ * The buffer may be constructed as a nested buffer with the `is_nested
+ * = 1` flag. As a nested buffer a ubyte vector header is placed before
+ * the aligned buffer header. A top-level buffer will normally have
+ * flags set to 0.
+ *
+ * A top-level buffer may also be constructed with the `with_size = 2`
+ * flag for top level buffers. It adds a size prefix similar to
+ * `is_nested` but the size is part of the aligned buffer. A size
+ * prefixed top level buffer must be accessed with a size prefix aware
+ * reader, or the buffer given to a standard reader must point to after
+ * the size field while keeping the buffer aligned to the size field
+ * (this will depend on the readers API which may be an arbitrary other
+ * language).
+ *
+ * If the `with_size` is used with the `is_nested` flag, the size is
+ * added as usual and all fields remain aligned as before, but padding
+ * is adjusted to ensure the buffer is aligned to the size field so
+ * that, for example, the nested buffer with size can safely be copied
+ * to a new memory buffer for consumption.
+ *
+ * Generally, references may only be used within the same buffer
+ * context. With `create_buffer` this becomes less precise. The rule
+ * here is that anything that would be valid with start/end_buffer
+ * nestings is also valid when removing the `start_buffer` call and
+ * replacing `end_buffer` with `create_buffer`.
+ *
+ * Note the additional burden of tracking buffer alignment manually -
+ * To help with this use `push_buffer_alignment` where `start_buffer`
+ * would have been placed, and `pop_buffer_alignment after the
+ * `create_buffer` call, and use `get_buffer_alignemnt` as described
+ * above.
+ *
+ * `create_buffer` is not suitable as a container for buffers created
+ * with `start/end_buffer` as these make assumptions about context that
+ * create buffer does not provide. Also, there is no point in doing so,
+ * since the idea of `create_buffer` is to avoid allocation in the first
+ * place.
+ */
+flatcc_builder_ref_t flatcc_builder_create_buffer(flatcc_builder_t *B,
+ const char identifier[FLATBUFFERS_IDENTIFIER_SIZE],
+ uint16_t block_align,
+ flatcc_builder_ref_t ref, uint16_t align, flatcc_builder_buffer_flags_t flags);
+
+/**
+ * Creates a struct within the current buffer without using any
+ * allocation.
+ *
+ * The struct should be used as a root in the `end_buffer` call or as a
+ * union value as there are no other ways to use struct while conforming
+ * to the FlatBuffer format - noting that tables embed structs in their
+ * own data area except in union fields.
+ *
+ * The struct should be in little endian format and follow the usual
+ * FlatBuffers alignment rules, although this API won't care about what
+ * is being stored.
+ *
+ * May also be used to simply emit a struct through the emitter
+ * interface without being in a buffer and without being a valid
+ * FlatBuffer.
+ */
+flatcc_builder_ref_t flatcc_builder_create_struct(flatcc_builder_t *B,
+ const void *data, size_t size, uint16_t align);
+
+/**
+ * Starts a struct and returns a pointer that should be used immediately
+ * to fill in the struct in protocol endian format, and when done,
+ * `end_struct` should be called. The returned reference should be used
+ * as argument to `end_buffer` or as a union value. See also
+ * `create_struct`.
+ */
+void *flatcc_builder_start_struct(flatcc_builder_t *B,
+ size_t size, uint16_t align);
+
+/**
+ * Return a pointer also returned at start struct, e.g. for endian
+ * conversion.
+ */
+void *flatcc_builder_struct_edit(flatcc_builder_t *B);
+
+/**
+ * Emits the struct started by `start_struct` and returns a reference to
+ * be used as root in an enclosing `end_buffer` call or as a union
+ * value. As mentioned in `create_struct`, these can also be used more
+ * freely, but not while being conformant FlatBuffers.
+ */
+flatcc_builder_ref_t flatcc_builder_end_struct(flatcc_builder_t *B);
+
+/**
+ * The buffer always aligns to at least the offset size (typically 4)
+ * and the internal alignment requirements of the buffer content which
+ * is derived as content is added.
+ *
+ * In addition, block_align can be specified. This ensures the resulting
+ * buffer is at least aligned to the block size and that the total size
+ * is zero padded to fill a block multiple if necessary. Because the
+ * emitter operates on a virtual address range before the full buffer is
+ * aligned, it may have to make assumptions based on that: For example,
+ * it may be processing encryption blocks in the fly, and the resulting
+ * buffer should be aligned to the encryption block size, even if the
+ * content is just a byte aligned struct. Block align helps ensure this.
+ * If the block align as 1 there will be no attempt to zero pad at the
+ * end, but the content may still warrant padding after the header. End
+ * padding is only needed with clustered vtables (which is the default).
+ *
+ * `block_align` is allowed to be 0 meaning it will inherit from parent if
+ * present, and otherwise it defaults to 1.
+ *
+ * The identifier may be null, and it may optionally be set later with
+ * `set_identifier` before the `end_buffer` call.
+ *
+ * General note:
+ *
+ * Only references returned with this buffer as current (i.e. last
+ * unended buffer) can be stored in other objects (tables, offset
+ * vectors) also belonging to this buffer, or used as the root argument
+ * to `end_buffer`. A reference may be stored at most once, and unused
+ * references will result in buffer garbage. All calls must be balanced
+ * around the respective start / end operations, but may otherwise nest
+ * freely, including nested buffers. Nested buffers are supposed to be
+ * stored in a table offset field to comply with FlatBuffers, but the
+ * API does not place any restrictions on where references are stored,
+ * as long as they are indicated as offset fields.
+ *
+ * All alignment in all API calls must be between 1 and 256 and must be a
+ * power of 2. This is not checked. Only if explicitly documented can it
+ * also be 0 for a default value.
+ *
+ * `flags` can be `with_size` but `is_nested` is derived from context
+ * see also `create_buffer`.
+ */
+int flatcc_builder_start_buffer(flatcc_builder_t *B,
+ const char identifier[FLATBUFFERS_IDENTIFIER_SIZE],
+ uint16_t block_align, flatcc_builder_buffer_flags_t flags);
+
+/**
+ * The root object should be a struct or a table to conform to the
+ * FlatBuffers format, but technically it can also be a vector or a
+ * string, or even a child buffer (which is also vector as seen by the
+ * buffer). The object must be created within the current buffer
+ * context, that is, while the current buffer is the deepest nested
+ * buffer on the stack.
+ */
+flatcc_builder_ref_t flatcc_builder_end_buffer(flatcc_builder_t *B, flatcc_builder_ref_t root);
+
+/**
+ * The embed buffer is mostly intended to add an existing buffer as a
+ * nested buffer. The buffer will be wrapped in a ubyte vector such that
+ * the buffer is aligned at vector start, after the size field.
+ *
+ * If `align` is 0 it will default to 8 so that all FlatBuffer numeric
+ * types will be readable. NOTE: generally do not count on align 0 being
+ * valid or even checked by the API, but in this case it may be
+ * difficult to know the internal buffer alignment, and 1 would be the wrong
+ * choice.
+ *
+ * If `block_align` is set (non-zero), the buffer is placed in an isolated
+ * block multiple. This may cost up to almost 2 block sizes in padding.
+ * If the `block_align` argument is 0, it inherits from the parent
+ * buffer block_size, or defaults to 1.
+ *
+ * The `align` argument must be set to respect the buffers internal
+ * alignment requirements, but if the buffer is smaller it will not be
+ * padded to isolate the buffer. For example a buffer of with
+ * `align = 64` and `size = 65` may share its last 64 byte block with
+ * other content, but not if `block_align = 64`.
+ *
+ * Because the ubyte size field is not, by default, part of the aligned
+ * buffer, significant space can be wasted if multiple blocks are added
+ * in sequence with a large block size.
+ *
+ * In most cases the distinction between the two alignments is not
+ * important, but it allows separate configuration of block internal
+ * alignment and block size, which can be important for auto-generated
+ * code that may know the alignment of the buffer, but not the users
+ * operational requirements.
+ *
+ * If the buffer is embedded without a parent buffer, it will simply
+ * emit the buffer through the emit interface, but may also add padding
+ * up to block alignment. At top-level there will be no size field
+ * header.
+ *
+ * If `with_size` flag is set, the buffer is aligned to size field and
+ * the above note about padding space no longer applies. The size field
+ * is added regardless. The `is_nested` flag has no effect since it is
+ * impplied.
+ */
+flatcc_builder_ref_t flatcc_builder_embed_buffer(flatcc_builder_t *B,
+ uint16_t block_align,
+ const void *data, size_t size, uint16_t align, flatcc_builder_buffer_flags_t flags);
+
+/**
+ * Applies to the innermost open buffer. The identifier may be null or
+ * contain all zero. Overrides any identifier given to the start buffer
+ * call.
+ */
+void flatcc_builder_set_identifier(flatcc_builder_t *B,
+ const char identifier[FLATBUFFERS_IDENTIFIER_SIZE]);
+
+enum flatcc_builder_type {
+ flatcc_builder_empty = 0,
+ flatcc_builder_buffer,
+ flatcc_builder_struct,
+ flatcc_builder_table,
+ flatcc_builder_vector,
+ flatcc_builder_offset_vector,
+ flatcc_builder_string,
+ flatcc_builder_union_vector
+};
+
+/**
+ * Returns the object type currently on the stack, for example if
+ * needing to decide how to close a buffer. Because a table is
+ * automatically added when starting a table buffer,
+ * `flatcc_builder_table_buffer` should not normally be seen and the level
+ * should be 2 before when closing a top-level table buffer, and 0
+ * after. A `flatcc_builder_struct_buffer` will be visible at level 1.
+ *
+ */
+enum flatcc_builder_type flatcc_builder_get_type(flatcc_builder_t *B);
+
+/**
+ * Similar to `get_type` but for a specific level. `get_type_at(B, 1)`
+ * will return `flatcc_builder_table_buffer` if this is the root buffer
+ * type. get_type_at(B, 0) is always `flatcc_builder_empty` and so are any
+ * level above `get_level`.
+ */
+enum flatcc_builder_type flatcc_builder_get_type_at(flatcc_builder_t *B, int level);
+
+/**
+ * The user stack is available for custom data. It may be used as
+ * a simple stack by extending or reducing the inner-most frame.
+ *
+ * A frame has a size and a location on the user stack. Entering
+ * a frame ensures the start is aligned to sizeof(size_t) and
+ * ensures the requested space is available without reallocation.
+ * When exiting a frame, the previous frame is restored.
+ *
+ * A user frame works completely independently of the builders
+ * frame stack for tracking tables vectors etc. and does not have
+ * to be completely at exit, but obviously it is not valid to
+ * exit more often the entered.
+ *
+ * The frame is zeroed when entered.
+ *
+ * Returns a non-zero handle to the user frame upon success or
+ * 0 on allocation failure.
+ */
+size_t flatcc_builder_enter_user_frame(flatcc_builder_t *B, size_t size);
+
+/**
+ * Makes the parent user frame current, if any. It is not valid to call
+ * if there isn't any current frame. Returns handle to parent frame if
+ * any, or 0.
+ */
+size_t flatcc_builder_exit_user_frame(flatcc_builder_t *B);
+
+/**
+ * Exits the frame represented by the given handle. All more
+ * recently entered frames will also be exited. Returns the parent
+ * frame handle if any, or 0.
+ */
+size_t flatcc_builder_exit_user_frame_at(flatcc_builder_t *B, size_t handle);
+
+/**
+ * Returns a non-zero handle to the current inner-most user frame if
+ * any, or 0.
+ */
+size_t flatcc_builder_get_current_user_frame(flatcc_builder_t *B);
+
+/*
+ * Returns a pointer to the user frame at the given handle. Any active
+ * frame can be accessed in this manner but the pointer is invalidated
+ * by user frame enter and exit operations.
+ */
+void *flatcc_builder_get_user_frame_ptr(flatcc_builder_t *B, size_t handle);
+
+/**
+ * Returns the size of the buffer and the logical start and end address
+ * of with respect to the emitters address range. `end` - `start` also
+ * yields the size. During construction `size` is the emitted number of
+ * bytes and after buffer close it is the actual buffer size - by then
+ * the start is also the return value of close buffer. End marks the end
+ * of the virtual table cluster block.
+ *
+ * NOTE: there is no guarantee that all vtables end up in the cluster
+ * block if there is placed a limit on the vtable size, or if nested
+ * buffers are being used. On the other hand, if these conditions are
+ * met, it is guaranteed that all vtables are present if the vtable
+ * block is available (this depends on external transmission - the
+ * vtables are always emitted before tables using them). In all cases
+ * the vtables will behave as valid vtables in a flatbuffer.
+ */
+size_t flatcc_builder_get_buffer_size(flatcc_builder_t *B);
+
+/**
+ * Returns the reference to the start of the emitter buffer so far, or
+ * in total after buffer end, in the virtual address range used
+ * by the emitter. Start is also returned by buffer end.
+ */
+flatcc_builder_ref_t flatcc_builder_get_buffer_start(flatcc_builder_t *B);
+
+/**
+ * Returns the reference to the end of buffer emitted so far. When
+ * clustering vtables, this is the end of tables, or after buffer end,
+ * also zero padding if block aligned. If clustering is disabled, this
+ * method will return 0 as the buffer only grows down then.
+ */
+flatcc_builder_ref_t flatcc_builder_get_buffer_mark(flatcc_builder_t *B);
+
+/**
+ * Creates the vtable in the current buffer context, somewhat similar to
+ * how create_vector operates. Each call results in a new table even if
+ * an identical has already been emitted.
+ *
+ * Also consider `create_cached_vtable` which will reuse existing
+ * vtables.
+ *
+ * This is low-low-level function intended to support
+ * `create_cached_vtable` or equivalent, and `create_table`, both of
+ * which are normally used indirectly via `start_table`, `table_add`,
+ * `table_add_offset`..., `table_end`.
+ *
+ * Creates a vtable as a verbatim copy. This means the vtable must
+ * include the header fields containing the vtable size and the table
+ * size in little endian voffset_t encoding followed by the vtable
+ * entries in same encoding.
+ *
+ * The function may be used to copy vtables from other other buffers
+ * since they are directly transferable.
+ *
+ * The returned reference is actually the emitted location + 1. This
+ * ensures the vtable is not mistaken for error because 0 is a valid
+ * vtable reference. `create_table` is aware of this and substracts one
+ * before computing the final offset relative to the table. This also
+ * means vtable references are uniquely identifiable by having the
+ * lowest bit set.
+ *
+ * vtable references may be reused within the same buffer, not any
+ * parent or other related buffer (technically this is possible though,
+ * as long as it is within same builder context, but it will not construct
+ * valid FlatBuffers because the buffer cannot be extracted in isolation).
+ */
+flatcc_builder_vt_ref_t flatcc_builder_create_vtable(flatcc_builder_t *B,
+ const flatbuffers_voffset_t *vt,
+ flatbuffers_voffset_t vt_size);
+
+/**
+ * Support function to `create_vtable`. See also the uncached version
+ * `create_vtable`.
+ *
+ * Looks up the constructed vtable on the vs stack too see if it matches
+ * a cached entry. If not, it emits a new vtable either at the end if
+ * top-level and clustering is enabled, or at the front (always for
+ * nested buffers).
+ *
+ * If the same vtable was already emitted in a different buffer, but not
+ * in the current buffer, the cache entry will be reused, but a new
+ * table will be emitted the first it happens in the same table.
+ *
+ * The returned reference is + 1 relative to the emitted address range
+ * to identify it as a vtable and to avoid mistaking the valid 0
+ * reference for an error (clustered vtables tend to start at the end at
+ * the virtual address 0, and up).
+ *
+ * The hash function can be chosen arbitrarily but may result in
+ * duplicate emitted vtables if different hash functions are being used
+ * concurrently, such as mixing the default used by `start/end table`
+ * with a custom function (this is not incorrect, it only increases the
+ * buffer size and cache pressure).
+ *
+ * If a vtable has a unique ID by other means than hashing the content,
+ * such as an integer id, and offset into another buffer, or a pointer,
+ * a good hash may be multiplication by a 32-bit prime number. The hash
+ * table is not very sensitive to collissions as it uses externally
+ * chained hashing with move to front semantics.
+ */
+flatcc_builder_vt_ref_t flatcc_builder_create_cached_vtable(flatcc_builder_t *B,
+ const flatbuffers_voffset_t *vt,
+ flatbuffers_voffset_t vt_size, uint32_t vt_hash);
+
+/*
+ * Based on Knuth's prime multiplier.
+ *
+ * This is an incremental hash that is called with id and size of each
+ * non-empty field, and finally with the two vtable header fields
+ * when vtables are constructed via `table_add/table_add_offset`.
+ *
+ */
+#ifndef FLATCC_SLOW_MUL
+#ifndef FLATCC_BUILDER_INIT_VT_HASH
+#define FLATCC_BUILDER_INIT_VT_HASH(hash) { (hash) = (uint32_t)0x2f693b52UL; }
+#endif
+#ifndef FLATCC_BUILDER_UPDATE_VT_HASH
+#define FLATCC_BUILDER_UPDATE_VT_HASH(hash, id, offset) \
+ { (hash) = (((((uint32_t)id ^ (hash)) * (uint32_t)2654435761UL)\
+ ^ (uint32_t)(offset)) * (uint32_t)2654435761UL); }
+#endif
+#ifndef FLATCC_BUILDER_BUCKET_VT_HASH
+#define FLATCC_BUILDER_BUCKET_VT_HASH(hash, width) (((uint32_t)(hash)) >> (32 - (width)))
+#endif
+#endif
+
+/*
+ * By default we use Bernsteins hash as fallback if multiplication is slow.
+ *
+ * This just have to be simple, fast, and work on devices without fast
+ * multiplication. We are not too sensitive to collisions. Feel free to
+ * experiment and replace.
+ */
+#ifndef FLATCC_BUILDER_INIT_VT_HASH
+#define FLATCC_BUILDER_INIT_VT_HASH(hash) { (hash) = 5381; }
+#endif
+#ifndef FLATCC_BUILDER_UPDATE_VT_HASH
+#define FLATCC_BUILDER_UPDATE_VT_HASH(hash, id, offset) \
+ { (hash) = ((((hash) << 5) ^ (id)) << 5) ^ (offset); }
+#endif
+#ifndef FLATCC_BUILDER_BUCKET_VT_HASH
+#define FLATCC_BUILDER_BUCKET_VT_HASH(hash, width) (((1 << (width)) - 1) & (hash))
+#endif
+
+
+
+/**
+ * Normally use `start_table` instead of this call.
+ *
+ * This is a low-level call only intended for high-performance
+ * applications that repeatedly churn about similar tables of known
+ * layout, or as a support layer for other builders that maintain their
+ * own allocation rather than using the stack of this builder.
+ *
+ * Creates a table from an already emitted vtable, actual data that is
+ * properly aligned relative to data start and in little endian
+ * encoding. Unlike structs, tables can have offset fields. These must
+ * be stored as flatcc_builder_ref_t types (which have uoffset_t size) as
+ * returned by the api in native encoding. The `offsets` table contain
+ * voffsets relative to `data` start (this is different from how vtables
+ * store offsets because they are relative to a table header). The
+ * `offsets` table is only used temporarily to translate the stored
+ * references and is not part of final buffer content. `offsets` may be
+ * null if `offset_count` is 0. `align` should be the highest aligned
+ * field in the table, but `size` need not be a multiple of `align`.
+ * Aside from endian encoding, the vtable must record a table size equal
+ * to `size + sizeof(flatbuffers_uoffset_t)` because it includes the
+ * table header field size. The vtable is not accessed by this call (nor
+ * is it available). Unlike other references, the vtable reference may
+ * be shared between tables in the same buffer (not with any related
+ * buffer such as a parent buffer).
+ *
+ * The operation will not use any allocation, but will update the
+ * alignment of the containing buffer if any.
+ *
+ * Note: unlike other create calls, except `create_offset_vector`,
+ * the source data is modified in order to translate references intok
+ * offsets before emitting the table.
+ */
+flatcc_builder_ref_t flatcc_builder_create_table(flatcc_builder_t *B,
+ const void *data, size_t size, uint16_t align,
+ flatbuffers_voffset_t *offsets, int offset_count,
+ flatcc_builder_vt_ref_t vt_ref);
+
+/**
+ * Starts a table, typically following a start_buffer call as an
+ * alternative to starting a struct, or to create table fields to be
+ * stored in a parent table, or in an offset vector.
+ * A number of `table_add` and table_add_offset` call may be placed
+ * before the `end_table` call. Struct fields should NOT use `struct`
+ * related call (because table structs are in-place), rather they should
+ * use the `table_add` call with the appropriate size and alignment.
+ *
+ * A table, like other reference returning calls, may also be started
+ * outside a buffer if the buffer header and alignment is of no
+ * interest to the application, for example as part of an externally
+ * built buffer.
+ *
+ * `count` must be larger than the largest id used for this table
+ * instance. Normally it is set to the number of fields defined in the
+ * schema, but it may be less if memory is constrained and only few
+ * fields with low valued id's are in use. The count can extended later
+ * with `reserve_table` if necessary. `count` may be also be set to a
+ * large enough value such as FLATBUFFERS_ID_MAX + 1 if memory is not a
+ * concern (reserves about twice the maximum vtable size to track the
+ * current vtable and voffsets where references must be translated to
+ * offsets at table end). `count` may be zero if for example
+ * `reserve_table` is being used.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+int flatcc_builder_start_table(flatcc_builder_t *B, int count);
+
+/**
+ * Call before adding a field with an id that is not below the count set
+ * at table start. Not needed in most cases. For performance reasons
+ * the builder does not check all bounds all the the time, but the user
+ * can do so if memory constraints prevent start_table from using a
+ * conservative value. See also `table_start`.
+ *
+ * Note: this call has absolutely no effect on the table layout, it just
+ * prevents internal buffer overruns.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+int flatcc_builder_reserve_table(flatcc_builder_t *B, int count);
+
+/**
+ * Completes the table constructed on the internal stack including
+ * emitting a vtable, or finding a matching vtable that has already been
+ * emitted to the same buffer. (Vtables cannot be shared between
+ * buffers, but they can between tables of the same buffer).
+ *
+ * Note: there is a considerable, but necessary, amount of bookkeeping
+ * involved in constructing tables. The `create_table` call is much
+ * faster, but it also expects a lot of work to be done already.
+ *
+ * Tables can be created with no fields added. This will result in an
+ * empty vtable and a table with just a vtable reference. If a table is
+ * used as a sub-table, such a table might also not be stored at all,
+ * but we do not return a special reference for that, nor do we provide
+ * and option to not create the table in this case. This may be
+ * interpreted as the difference between a null table (not stored in
+ * parent), and an empty table with a unique offset (and thus identity)
+ * different from other empty tables.
+ */
+flatcc_builder_ref_t flatcc_builder_end_table(flatcc_builder_t *B);
+
+/**
+ * Optionally this method can be called just before `flatcc_builder_end_table`
+ * to verify that all required fields have been set.
+ * Each entry is a table field id.
+ *
+ * Union fields should use the type field when checking for presence and
+ * may also want to check the soundness of the union field overall using
+ * `check_union_field` with the id one higher than the type field id.
+ *
+ * This funcion is typically called by an assertion in generated builder
+ * interfaces while release builds may want to avoid this performance
+ * overhead.
+ *
+ * Returns 1 if all fields are matched, 0 otherwise.
+ */
+int flatcc_builder_check_required(flatcc_builder_t *B, const flatbuffers_voffset_t *required, int count);
+
+/**
+ * Same as `check_required` when called with a single element.
+ *
+ * Typically used when direct calls are more convenient than building an
+ * array first. Useful when dealing with untrusted intput such as parsed
+ * text from an external source.
+ */
+int flatcc_builder_check_required_field(flatcc_builder_t *B, flatbuffers_voffset_t id);
+
+/**
+ * Checks that a union field is valid.
+ *
+ * The criteria is:
+ *
+ * If the type field is not present (at id - 1), or it holds a zero value,
+ * then the table field (at id) must be present.
+ *
+ * Generated builder code may be able to enforce valid unions without
+ * this check by setting both type and table together, but e.g. parsers
+ * may receive the type and the table independently and then it makes
+ * sense to validate the union fields before table completion.
+ *
+ * Note that an absent union field is perfectly valid. If a union is
+ * required, the type field (id - 1), should be checked separately
+ * while the table field should only be checked here because it can
+ * (and must) be absent when the type is NONE (= 0).
+ */
+int flatcc_builder_check_union_field(flatcc_builder_t *B, flatbuffers_voffset_t id);
+
+/**
+ * A struct, enum or scalar added should be stored in little endian in
+ * the return pointer location. The pointer is short lived and will
+ * not necessarily survive other builder calls.
+ *
+ * A union type field can also be set using this call. In fact, this is
+ * the only way to deal with unions via this API. Consequently, it is
+ * the users repsonsibility to ensure the appropriate type is added
+ * at the next higher id.
+ *
+ * Null and default values:
+ *
+ * FlatBuffers does not officially provide an option for null values
+ * because it does not distinguish between default values and values
+ * that are not present. At this api level, we do not deal with defaults
+ * at all. Callee should test the stored value against the default value
+ * and only add the field if it does not match the default. This only
+ * applies to scalar and enum values. Structs cannot have defaults so
+ * their absence means null, and strings, vectors and subtables do have
+ * natural null values different from the empty object and empty objects
+ * with different identity is also possible.
+ *
+ * To handle Null for scalars, the following approach is recommended:
+ *
+ * Provide a schema-specific `add` operation that only calls this
+ * low-level add method if the default does not match, and also provide
+ * another `set` operation that always stores the value, regardless of
+ * default. For most readers this will be transparent, except for extra
+ * space used, but for Null aware readers, these can support operations
+ * to test for Null/default/other value while still supporting the
+ * normal read operation that returns default when a value is absent
+ * (i.e. Null).
+ *
+ * It is valid to call with a size of 0 - the effect being adding the
+ * vtable entry. The call may also be dropped in this case to reduce
+ * the vtable size - the difference will be in null detection.
+ */
+void *flatcc_builder_table_add(flatcc_builder_t *B, int id, size_t size, uint16_t align);
+
+/**
+ * Returns a pointer to the buffer holding the last field added. The
+ * size argument must match the field size added. May, for example, be
+ * used to perform endian conversion after initially updating field
+ * as a native struct. Must be called before the table is ended.
+ */
+void *flatcc_builder_table_edit(flatcc_builder_t *B, size_t size);
+
+/**
+ * Similar to `table_add` but copies source data into the buffer before
+ * it is returned. Useful when adding a larger struct already encoded in
+ * little endian.
+ */
+void *flatcc_builder_table_add_copy(flatcc_builder_t *B, int id, const void *data, size_t size, uint16_t align);
+
+/**
+ * Add a string, vector, or sub-table depending on the type if the
+ * field identifier. The offset ref obtained when the field object was
+ * closed should be stored as is in the given pointer. The pointer
+ * is only valid short term, so create the object before calling
+ * add to table, but the owner table can be started earlier. Never mix
+ * refs from nested buffers with parent buffers.
+ *
+ * Also uses this method to add nested buffers. A nested buffer is
+ * simple a buffer created while another buffer is open. The buffer
+ * close operation provides the necessary reference.
+ *
+ * When the table closes, all references get converted into offsets.
+ * Before that point, it is not required that the offset is written
+ * to.
+ */
+flatcc_builder_ref_t *flatcc_builder_table_add_offset(flatcc_builder_t *B, int id);
+
+/*
+ * Adds a union type and reference in a single operation and returns 0
+ * on success. Stores the type field at `id - 1` and the value at
+ * `id`. The `value` is a reference to a table, to a string, or to a
+ * standalone `struct` outside the table.
+ *
+ * If the type is 0, the value field must also be 0.
+ *
+ * Unions can also be added as separate calls to the type and the offset
+ * separately which can lead to better packing when the type is placed
+ * together will other small fields.
+ */
+int flatcc_builder_table_add_union(flatcc_builder_t *B, int id,
+ flatcc_builder_union_ref_t uref);
+
+/*
+ * Adds a union type vector and value vector in a single operations
+ * and returns 0 on success.
+ *
+ * If both the type and value vector is null, nothing is added.
+ * Otherwise both must be present and have the same length.
+ *
+ * Any 0 entry in the type vector must also have a 0 entry in
+ * the value vector.
+ */
+int flatcc_builder_table_add_union_vector(flatcc_builder_t *B, int id,
+ flatcc_builder_union_vec_ref_t uvref);
+/**
+ * Creates a vector in a single operation using an externally supplied
+ * buffer. This completely bypasses the stack, but the size must be
+ * known and the content must be little endian. Do not use for strings
+ * and offset vectors. Other flatbuffer vectors could be used as a
+ * source, but the length prefix is not required.
+ *
+ * Set `max_count` to `FLATBUFFERS_COUNT_MAX(elem_size)` before a call
+ * to any string or vector operation to the get maximum safe vector
+ * size, or use (size_t)-1 if overflow is not a concern.
+ *
+ * The max count property is a global property that remains until
+ * explicitly changed.
+ *
+ * `max_count` is to prevent malicous or accidental overflow which is
+ * difficult to detect by multiplication alone, depending on the type
+ * sizes being used and having `max_count` thus avoids a division for
+ * every vector created. `max_count` does not guarantee a vector will
+ * fit in an empty buffer, it just ensures the internal size checks do
+ * not overflow. A safe, sane limit woud be max_count / 4 because that
+ * is half the maximum buffer size that can realistically be
+ * constructed, corresponding to a vector size of `UOFFSET_MAX / 4`
+ * which can always hold the vector in 1GB excluding the size field when
+ * sizeof(uoffset_t) = 4.
+ */
+flatcc_builder_ref_t flatcc_builder_create_vector(flatcc_builder_t *B,
+ const void *data, size_t count, size_t elem_size, uint16_t align, size_t max_count);
+
+/**
+ * Starts a vector on the stack.
+ *
+ * Do not use these calls for string or offset vectors, but do store
+ * scalars, enums and structs, always in little endian encoding.
+ *
+ * Use `extend_vector` subsequently to add zero, one or more elements
+ * at time.
+ *
+ * See `create_vector` for `max_count` argument (strings and offset
+ * vectors have a fixed element size and does not need this argument).
+ *
+ * Returns 0 on success.
+ */
+int flatcc_builder_start_vector(flatcc_builder_t *B, size_t elem_size,
+ uint16_t align, size_t max_count);
+
+/**
+ * Emits the vector constructed on the stack by start_vector.
+ *
+ * The vector may be accessed in the emitted stream using the returned
+ * reference, even if the containing buffer is still under construction.
+ * This may be useful for sorting. This api does not support sorting
+ * because offset vectors cannot read their references after emission,
+ * and while plain vectors could be sorted, it has been chosen that this
+ * task is better left as a separate processing step. Generated code can
+ * provide sorting functions that work on final in-memory buffers.
+ */
+flatcc_builder_ref_t flatcc_builder_end_vector(flatcc_builder_t *B);
+
+/** Returns the number of elements currently on the stack. */
+size_t flatcc_builder_vector_count(flatcc_builder_t *B);
+
+/**
+ * Returns a pointer ot the first vector element on stack,
+ * accessible up to the number of elements currently on stack.
+ */
+void *flatcc_builder_vector_edit(flatcc_builder_t *B);
+
+/**
+ * Returns a zero initialized buffer to a new region of the vector which
+ * is extended at the end. The buffer must be consumed before other api
+ * calls that may affect the stack, including `extend_vector`.
+ *
+ * Do not use for strings, offset or union vectors. May be used for nested
+ * buffers, but these have dedicated calls to provide better alignment.
+ */
+void *flatcc_builder_extend_vector(flatcc_builder_t *B, size_t count);
+
+/**
+ * A specialized `vector_extend` that pushes a single element.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error. Note: for structs, care must be taken to ensure
+ * the source has been zero padded. For this reason it may be better to
+ * use extend(B, 1) and assign specific fields instead.
+ */
+void *flatcc_builder_vector_push(flatcc_builder_t *B, const void *data);
+
+/**
+ * Pushes multiple elements at a time.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+void *flatcc_builder_append_vector(flatcc_builder_t *B, const void *data, size_t count);
+
+/**
+ * Removes elements already added to vector that has not been ended.
+ * For example, a vector of parsed list may remove the trailing comma,
+ * or the vector may simply overallocate to get some temporary working
+ * space. The total vector size must never become negative.
+ *
+ * Returns -1 if the count as larger than current count, or 0 on success.
+ */
+int flatcc_builder_truncate_vector(flatcc_builder_t *B, size_t count);
+
+/*
+ * Similar to `create_vector` but with references that get translated
+ * into offsets. The references must, as usual, belong to the current
+ * buffer. Strings, scalar and struct vectors can emit directly without
+ * stack allocation, but offset vectors must translate the offsets
+ * and therefore need the temporary space. Thus, this function is
+ * roughly equivalent to to start, append, end offset vector.
+ *
+ * See also `flatcc_builder_create_offset_vector_direct`.
+ */
+flatcc_builder_ref_t flatcc_builder_create_offset_vector(flatcc_builder_t *B,
+ const flatcc_builder_ref_t *data, size_t count);
+
+/*
+ * NOTE: this call takes non-const source array of references
+ * and destroys the content.
+ *
+ * This is a faster version of `create_offset_vector` where the
+ * source references are destroyed. In return the vector can be
+ * emitted directly without passing over the stack.
+ */
+flatcc_builder_ref_t flatcc_builder_create_offset_vector_direct(flatcc_builder_t *B,
+ flatcc_builder_ref_t *data, size_t count);
+
+
+/**
+ * Starts a vector holding offsets to tables or strings. Before
+ * completion it will hold `flatcc_builder_ref_t` references because the
+ * offset is not known until the vector start location is known, which
+ * depends to the final size, which for parsers is generally unknown.
+ */
+int flatcc_builder_start_offset_vector(flatcc_builder_t *B);
+
+/**
+ * Similar to `end_vector` but updates all stored references so they
+ * become offsets to the vector start.
+ */
+flatcc_builder_ref_t flatcc_builder_end_offset_vector(flatcc_builder_t *B);
+
+/**
+ * Same as `flatcc_builder_end_offset_vector` except null references are
+ * permitted when the corresponding `type` entry is 0 (the 'NONE' type).
+ * This makes it possible to build union vectors with less overhead when
+ * the `type` vector is already known. Use standand offset vector calls
+ * prior to this call.
+ */
+flatcc_builder_ref_t flatcc_builder_end_offset_vector_for_unions(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *type);
+
+/** Returns the number of elements currently on the stack. */
+size_t flatcc_builder_offset_vector_count(flatcc_builder_t *B);
+
+/**
+ * Returns a pointer ot the first vector element on stack,
+ * accessible up to the number of elements currently on stack.
+ */
+void *flatcc_builder_offset_vector_edit(flatcc_builder_t *B);
+
+/**
+ * Similar to `extend_vector` but returns a buffer indexable as
+ * `flatcc_builder_ref_t` array. All elements must be set to a valid
+ * unique non-null reference, but truncate and extend may be used to
+ * perform edits. Unused references will leave garbage in the buffer.
+ * References should not originate from any other buffer than the
+ * current, including parents and nested buffers. It is valid to reuse
+ * references in DAG form when contained in the sammer, excluding any
+ * nested, sibling or parent buffers.
+ */
+flatcc_builder_ref_t *flatcc_builder_extend_offset_vector(flatcc_builder_t *B, size_t count);
+
+/** Similar to truncate_vector. */
+int flatcc_builder_truncate_offset_vector(flatcc_builder_t *B, size_t count);
+
+/**
+ * A specialized extend that pushes a single element.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_ref_t *flatcc_builder_offset_vector_push(flatcc_builder_t *B,
+ flatcc_builder_ref_t ref);
+
+/**
+ * Takes an array of refs as argument to do a multi push operation.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_ref_t *flatcc_builder_append_offset_vector(flatcc_builder_t *B,
+ const flatcc_builder_ref_t *refs, size_t count);
+
+/**
+ * All union vector operations are like offset vector operations,
+ * except they take a struct with a type and a reference rather than
+ * just a reference. The finished union vector is returned as a struct
+ * of two references, one for the type vector and one for the table offset
+ * vector. Each reference goes to a separate table field where the type
+ * offset vector id must be one larger than the type vector.
+ */
+
+/**
+ * Creates a union vector which is in reality two vectors, a type vector
+ * and an offset vector. Both vectors references are returned.
+ */
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count);
+
+/*
+ * NOTE: this call takes non-const source array of references
+ * and destroys the content. The type array remains intact.
+ *
+ * This is a faster version of `create_union_vector` where the source
+ * references are destroyed and where the types are given in a separate
+ * array. In return the vector can be emitted directly without passing
+ * over the stack.
+ *
+ * Unlike `create_offset_vector` we do allow null references but only if
+ * the union type is NONE (0).
+ */
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector_direct(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, flatcc_builder_ref_t *data, size_t count);
+
+/*
+ * Creates just the type vector part of a union vector. This is
+ * similar to a normal `create_vector` call except that the size
+ * and alignment are given implicitly. Can be used during
+ * cloning or similar operations where the types are all given
+ * but the values must be handled one by one as prescribed by
+ * the type. The values can be added separately as an offset vector.
+ */
+flatcc_builder_ref_t flatcc_builder_create_type_vector(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, size_t count);
+
+/**
+ * Starts a vector holding types and offsets to tables or strings. Before
+ * completion it will hold `flatcc_builder_union_ref_t` references because the
+ * offset is not known until the vector start location is known, which
+ * depends to the final size, which for parsers is generally unknown,
+ * and also because the union type must be separated out into a separate
+ * vector. It would not be practicaly to push on two different vectors
+ * during construction.
+ */
+int flatcc_builder_start_union_vector(flatcc_builder_t *B);
+
+/**
+ * Similar to `end_vector` but updates all stored references so they
+ * become offsets to the vector start and splits the union references
+ * into a type vector and an offset vector.
+ */
+flatcc_builder_union_vec_ref_t flatcc_builder_end_union_vector(flatcc_builder_t *B);
+
+/** Returns the number of elements currently on the stack. */
+size_t flatcc_builder_union_vector_count(flatcc_builder_t *B);
+
+/**
+ * Returns a pointer ot the first vector element on stack,
+ * accessible up to the number of elements currently on stack.
+ */
+void *flatcc_builder_union_vector_edit(flatcc_builder_t *B);
+
+/**
+ * Similar to `extend_offset_vector` but returns a buffer indexable as a
+ * `flatcc_builder_union_ref_t` array. All elements must be set to a valid
+ * unique non-null reference with a valid union type to match, or it
+ * must be null with a zero union type.
+ */
+flatcc_builder_union_ref_t *flatcc_builder_extend_union_vector(flatcc_builder_t *B, size_t count);
+
+/** Similar to truncate_vector. */
+int flatcc_builder_truncate_union_vector(flatcc_builder_t *B, size_t count);
+
+/**
+ * A specialized extend that pushes a single element.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_union_ref_t *flatcc_builder_union_vector_push(flatcc_builder_t *B,
+ flatcc_builder_union_ref_t uref);
+
+/**
+ * Takes an array of union_refs as argument to do a multi push operation.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_union_ref_t *flatcc_builder_append_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count);
+
+/**
+ * Faster string operation that avoids temporary stack storage. The
+ * string is not required to be zero-terminated, but is expected
+ * (unchecked) to be utf-8. Embedded zeroes would be allowed but
+ * ubyte vectors should be used for that. The resulting string will
+ * have a zero termination added, not included in length.
+ */
+flatcc_builder_ref_t flatcc_builder_create_string(flatcc_builder_t *B,
+ const char *s, size_t len);
+
+/** `create_string` up to zero termination of source. */
+flatcc_builder_ref_t flatcc_builder_create_string_str(flatcc_builder_t *B,
+ const char *s);
+
+/**
+ * `create_string` up to zero termination or at most max_len of source.
+ *
+ * Note that like `strncpy` it will include `max_len` characters if
+ * the source is longer than `max_len`, but unlike `strncpy` it will
+ * always add zero termination.
+ */
+flatcc_builder_ref_t flatcc_builder_create_string_strn(flatcc_builder_t *B, const char *s, size_t max_len);
+
+/**
+ * Starts an empty string that can be extended subsequently.
+ *
+ * While the string is being created, it is guaranteed that there is
+ * always a null character after the end of the current string length.
+ * This also holds after `extend` and `append` operations. It is not
+ * allowed to modify the null character.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_builder_start_string(flatcc_builder_t *B);
+
+/**
+ * Similar to `extend_vector` except for the buffer return type and a
+ * slight speed advantage. Strings are expected to contain utf-8 content
+ * but this isn't verified, and null characters would be accepted. The
+ * length is given in bytes.
+ *
+ * Appending too much, then truncating can be used to trim string
+ * escapes during parsing, or convert between unicode formats etc.
+ */
+char *flatcc_builder_extend_string(flatcc_builder_t *B, size_t len);
+
+/**
+ * Concatenes a length of string. If the string contains zeroes (which
+ * it formally shouldn't), they will be copied in.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+char *flatcc_builder_append_string(flatcc_builder_t *B, const char *s, size_t len);
+
+/** `append_string` up to zero termination of source. */
+char *flatcc_builder_append_string_str(flatcc_builder_t *B, const char *s);
+
+/** `append_string` up zero termination or at most max_len of source. */
+char *flatcc_builder_append_string_strn(flatcc_builder_t *B, const char *s, size_t max_len);
+
+/**
+ * Similar to `truncate_vector` available for consistency and a slight
+ * speed advantage. Reduces string by `len` bytes - it does not set
+ * the length. The resulting length must not become negative. Zero
+ * termination is not counted.
+ *
+ * Returns -1 of the length becomes negative, 0 on success.
+ */
+int flatcc_builder_truncate_string(flatcc_builder_t *B, size_t len);
+
+/**
+ * Similar to `end_vector` but adds a trailing zero not included
+ * in the length. The trailing zero is added regardless of whatever
+ * zero content may exist in the provided string (although it
+ * formally should not contain any).
+ */
+flatcc_builder_ref_t flatcc_builder_end_string(flatcc_builder_t *B);
+
+/** Returns the length of string currently on the stack. */
+size_t flatcc_builder_string_len(flatcc_builder_t *B);
+
+/**
+ * Returns a ponter to the start of the string
+ * accessible up the length of string currently on the stack.
+ */
+char *flatcc_builder_string_edit(flatcc_builder_t *B);
+
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Fast acces to small buffers from default emitter.
+ *
+ * Only valid for default emitters before `flatcc_builder_clear`. The
+ * return buffer is not valid after a call to `flatcc_builder_reset` or
+ * `flatcc_builder_clear`.
+ *
+ * Returns null if the buffer size is too large to a have a linear
+ * memory representation or if the emitter is not the default. A valid
+ * size is between half and a full emitter page size depending on vtable
+ * content.
+ *
+ * Non-default emitters must be accessed by means specific to the
+ * particular emitter.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ *
+ * The returned buffer should NOT be deallocated explicitly.
+ *
+ * The buffer size is the size reported by `flatcc_builder_get_buffer_size`.
+ */
+void *flatcc_builder_get_direct_buffer(flatcc_builder_t *B, size_t *size_out);
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Default finalizer that allocates a buffer from the default emitter.
+ *
+ * Returns null if memory could not be allocated or if the emitter is
+ * not the default. This is just a convenience method - there are many
+ * other possible ways to extract the result of the emitter depending on
+ * use case.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ *
+ * The allocated buffer is aligned according to malloc which may not be
+ * sufficient in advanced cases - for that purpose
+ * `flatcc_builder_finalize_aligned_buffer` may be used.
+ *
+ * It may be worth calling `flatcc_builder_get_direct_buffer` first to see
+ * if the buffer is small enough to avoid copying.
+ *
+ * The returned buffer must be deallocated using `free`.
+ */
+void *flatcc_builder_finalize_buffer(flatcc_builder_t *B, size_t *size_out);
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Similar to `flatcc_builder_finalize_buffer` but ensures the returned
+ * memory is aligned to the overall alignment required for the buffer.
+ * Often it is not necessary unless special operations rely on larger
+ * alignments than the stored scalars.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ *
+ * The returned buffer must be deallocated using `aligned_free` which is
+ * implemented via `flatcc_flatbuffers.h`. `free` will usually work but
+ * is not portable to platforms without posix_memalign or C11
+ * aligned_alloc support.
+ *
+ * NOTE: if a library might be compiled with a version of aligned_free
+ * that differs from the application using it, use
+ * `flatcc_builder_aligned_free` to make sure the correct deallocation
+ * function is used.
+ */
+void *flatcc_builder_finalize_aligned_buffer(flatcc_builder_t *B, size_t *size_out);
+
+/*
+ * A stable implementation of `aligned_alloc` that is not sensitive
+ * to the applications compile time flags.
+ */
+void *flatcc_builder_aligned_alloc(size_t alignment, size_t size);
+
+/*
+ * A stable implementation of `aligned_free` that is not sensitive
+ * to the applications compile time flags.
+ */
+void flatcc_builder_aligned_free(void *p);
+
+/*
+ * Same allocation as `flatcc_builder_finalize_buffer` returnes. Usually
+ * same as `malloc` but can redefined via macros.
+ */
+void *flatcc_builder_alloc(size_t size);
+
+/*
+ * A stable implementation of `free` when the default allocation
+ * methods have been redefined.
+ *
+ * Deallocates memory returned from `flatcc_builder_finalize_buffer`.
+ */
+void flatcc_builder_free(void *p);
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Convenience method to copy buffer from default emitter. Forwards
+ * call to default emitter and returns input pointer, or null if
+ * the emitter is not default or of the given size is smaller than
+ * the buffer size.
+ *
+ * Note: the `size` argument is the target buffers capacity, not the
+ * flatcc_builders buffer size.
+ *
+ * Other emitters have custom interfaces for reaching their content.
+ */
+void *flatcc_builder_copy_buffer(flatcc_builder_t *B, void *buffer, size_t size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_BUILDER_H */
diff --git a/include/flatcc/flatcc_emitter.h b/include/flatcc/flatcc_emitter.h
new file mode 100644
index 0000000..b8c83b9
--- /dev/null
+++ b/include/flatcc/flatcc_emitter.h
@@ -0,0 +1,215 @@
+#ifndef FLATCC_EMITTER_H
+#define FLATCC_EMITTER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Default implementation of a flatbuilder emitter.
+ *
+ * This may be used as a starting point for more advanced emitters,
+ * for example writing completed pages to disk or network and
+ * the recycling those pages.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_types.h"
+#include "flatcc/flatcc_iov.h"
+#include "flatcc/flatcc_alloc.h"
+
+/*
+ * The buffer steadily grows during emission but the design allows for
+ * an extension where individual pages can recycled before the buffer
+ * is complete, for example because they have been transmitted.
+ *
+ * When done, the buffer can be cleared to free all memory, or reset to
+ * maintain an adaptive page pool for next buffer construction.
+ *
+ * Unlike an exponentially growing buffer, each buffer page remains
+ * stable in memory until reset, clear or recycle is called.
+ *
+ * Design notes for possible extensions:
+ *
+ * The buffer is a ring buffer marked by a front and a back page. The
+ * front and back may be the same page and may initially be absent.
+ * Anything outside these pages are unallocated pages for recycling.
+ * Any page between (but excluding) the front and back pages may be
+ * recycled by unlinking and relinking outside the front and back pages
+ * but then copy operations no longer makes sense. Each page stores the
+ * logical offset within the buffer but isn't otherwise used by the
+ * implemention - it might be used for network transmission. The buffer
+ * is not explicitly designed for multithreaded access but any page
+ * strictly between front and back is not touched unless recycled and in
+ * this case aligned allocation is useful to prevent cache line sharing.
+ */
+
+/*
+ * Memory is allocated in fixed length page units - the first page is
+ * split between front and back so each get half the page size. If the
+ * size is a multiple of 128 then each page offset will be a multiple of
+ * 64, which may be useful for sequencing etc.
+ */
+#ifndef FLATCC_EMITTER_PAGE_SIZE
+#define FLATCC_EMITTER_MAX_PAGE_SIZE 3000
+#define FLATCC_EMITTER_PAGE_MULTIPLE 64
+#define FLATCC_EMITTER_PAGE_SIZE ((FLATCC_EMITTER_MAX_PAGE_SIZE) &\
+ ~(2 * (FLATCC_EMITTER_PAGE_MULTIPLE) - 1))
+#endif
+
+#ifndef FLATCC_EMITTER_ALLOC
+#ifdef FLATCC_EMITTER_USE_ALIGNED_ALLOC
+/*
+ * <stdlib.h> does not always provide aligned_alloc, so include whatever
+ * is required when enabling this feature.
+ */
+#define FLATCC_EMITTER_ALLOC(n) aligned_alloc(FLATCC_EMITTER_PAGE_MULTIPLE,\
+ (((n) + FLATCC_EMITTER_PAGE_MULTIPLE - 1) & ~(FLATCC_EMITTER_PAGE_MULTIPLE - 1)))
+#ifndef FLATCC_EMITTER_FREE
+#define FLATCC_EMITTER_FREE(p) aligned_free(p)
+#endif
+#endif
+#endif
+
+#ifndef FLATCC_EMITTER_ALLOC
+#define FLATCC_EMITTER_ALLOC(n) FLATCC_ALLOC(n)
+#endif
+#ifndef FLATCC_EMITTER_FREE
+#define FLATCC_EMITTER_FREE(p) FLATCC_FREE(p)
+#endif
+
+typedef struct flatcc_emitter_page flatcc_emitter_page_t;
+typedef struct flatcc_emitter flatcc_emitter_t;
+
+struct flatcc_emitter_page {
+ uint8_t page[FLATCC_EMITTER_PAGE_SIZE];
+ flatcc_emitter_page_t *next;
+ flatcc_emitter_page_t *prev;
+ /*
+ * The offset is relative to page start, but not necessarily
+ * to any present content if part of front or back page,
+ * and undefined for unused pages.
+ */
+ flatbuffers_soffset_t page_offset;
+};
+
+/*
+ * Must be allocated and zeroed externally, e.g. on the stack
+ * then provided as emit_context to the flatbuilder along
+ * with the `flatcc_emitter` function.
+ */
+struct flatcc_emitter {
+ flatcc_emitter_page_t *front, *back;
+ uint8_t *front_cursor;
+ size_t front_left;
+ uint8_t *back_cursor;
+ size_t back_left;
+ size_t used;
+ size_t capacity;
+ size_t used_average;
+};
+
+/* Optional helper to ensure emitter is zeroed initially. */
+static inline void flatcc_emitter_init(flatcc_emitter_t *E)
+{
+ memset(E, 0, sizeof(*E));
+}
+
+/* Deallocates all buffer memory making the emitter ready for next use. */
+void flatcc_emitter_clear(flatcc_emitter_t *E);
+
+/*
+ * Similar to `clear_flatcc_emitter` but heuristacally keeps some allocated
+ * memory between uses while gradually reducing peak allocations.
+ * For small buffers, a single page will remain available with no
+ * additional allocations or deallocations after first use.
+ */
+void flatcc_emitter_reset(flatcc_emitter_t *E);
+
+/*
+ * Helper function that allows a page between front and back to be
+ * recycled while the buffer is still being constructed - most likely as part
+ * of partial copy or transmission. Attempting to recycle front or back
+ * pages will result in an error. Recycling pages outside the
+ * front and back will be valid but pointless. After recycling and copy
+ * operations are no longer well-defined and should be replaced with
+ * whatever logic is recycling the pages. The reset operation
+ * automatically recycles all (remaining) pages when emission is
+ * complete. After recycling, the `flatcc_emitter_size` function will
+ * return as if recycle was not called, but will only represent the
+ * logical size, not the size of the active buffer. Because a recycled
+ * page is fully utilized, it is fairly easy to compensate for this if
+ * required.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_emitter_recycle_page(flatcc_emitter_t *E, flatcc_emitter_page_t *p);
+
+/*
+ * The amount of data copied with `flatcc_emitter_copy_buffer` and related
+ * functions. Normally called at end of buffer construction but is
+ * always valid, as is the copy functions. The size is a direct
+ * function of the amount emitted data so the flatbuilder itself can
+ * also provide this information.
+ */
+static inline size_t flatcc_emitter_get_buffer_size(flatcc_emitter_t *E)
+{
+ return E->used;
+}
+
+/*
+ * Returns buffer start iff the buffer fits on a single internal page.
+ * Only useful for fairly small buffers - about half the page size since
+ * one half of first page goes to vtables that likely use little space.
+ * Returns null if request could not be served.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ */
+static inline void *flatcc_emitter_get_direct_buffer(flatcc_emitter_t *E, size_t *size_out)
+{
+ if (E->front == E->back) {
+ if (size_out) {
+ *size_out = E->used;
+ }
+ return E->front_cursor;
+ }
+ if (size_out) {
+ *size_out = 0;
+ }
+ return 0;
+}
+
+/*
+ * Copies the internal flatcc_emitter representation to an externally
+ * provided linear buffer that must have size `flatcc_emitter_get_size`.
+ *
+ * If pages have been recycled, only the remaining pages will be copied
+ * and thus less data than what `flatcc_emitter_get_size` would suggest. It
+ * makes more sense to provide a customized copy operation when
+ * recycling pages.
+ *
+ * If the buffer is too small, nothing is copied, otherwise the
+ * full buffer is copied and the input buffer is returned.
+ */
+void *flatcc_emitter_copy_buffer(flatcc_emitter_t *E, void *buf, size_t size);
+
+/*
+ * The emitter interface function to the flatbuilder API.
+ * `emit_context` should be of type `flatcc_emitter_t` for this
+ * particular implementation.
+ *
+ * This function is compatible with the `flatbuilder_emit_fun`
+ * type defined in "flatbuilder.h".
+ */
+int flatcc_emitter(void *emit_context,
+ const flatcc_iovec_t *iov, int iov_count,
+ flatbuffers_soffset_t offset, size_t len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_EMITTER_H */
diff --git a/include/flatcc/flatcc_endian.h b/include/flatcc/flatcc_endian.h
new file mode 100644
index 0000000..0592f31
--- /dev/null
+++ b/include/flatcc/flatcc_endian.h
@@ -0,0 +1,125 @@
+#ifndef FLATCC_ENDIAN_H
+#define FLATCC_ENDIAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This file provides helper macros to define type-specific macros and
+ * inline functions that convert between stored data and native data
+ * indedpently of both native (host) endianness and protocol endianness
+ * (i.e. the serialized endian format).
+ *
+ * To detect endianness correctly ensure one of the following is defined.
+ *
+ * __LITTLE_ENDIAN__
+ * __BIG_ENDIAN__
+ * FLATBUFFERS_LITTLEENDIAN=1
+ * FLATBUFFERS_LITTLEENDIAN=0
+ *
+ * Note: the Clang compiler likely already does this, but other
+ * compilers may have their own way, if at all.
+ *
+ * It is also necessary to include <endian.h> or a compatible
+ * implementation in order to provide:
+ *
+ * le16toh, le32to, le64toh, be16toh, be32toh, be64toh,
+ * htole16, htole32, htole64, htobe16, htobe32, htobe64.
+ *
+ * A simple way to ensure all of the above for most platforms is
+ * to include the portable endian support file:
+ *
+ * #include "flatcc/portable/pendian.h"
+ *
+ * It is also necessary to include
+ *
+ * #include "flatcc/flatcc_types.h"
+ *
+ * or an equivalent file. This makes it possible to change the
+ * endianness of the serialized data and the sizes of flatbuffer
+ * specific types such as `uoffset_t`.
+ *
+ * Note: the mentioned include files are likely already included
+ * by the file including this file, at least for the default
+ * configuration.
+ */
+
+#ifndef UINT8_t
+#include <stdint.h>
+#endif
+
+/* These are needed to simplify accessor macros and are not found in <endian.h>. */
+#ifndef le8toh
+#define le8toh(n) (n)
+#endif
+
+#ifndef be8toh
+#define be8toh(n) (n)
+#endif
+
+#ifndef htole8
+#define htole8(n) (n)
+#endif
+
+#ifndef htobe8
+#define htobe8(n) (n)
+#endif
+
+#include "flatcc/flatcc_accessors.h"
+
+/* This is the binary encoding endianness, usually LE for flatbuffers. */
+#if FLATBUFFERS_PROTOCOL_IS_LE
+#define flatbuffers_endian le
+#elif FLATBUFFERS_PROTOCOL_IS_BE
+#define flatbuffers_endian be
+#else
+#error "flatbuffers has no defined endiannesss"
+#endif
+
+ __flatcc_define_basic_scalar_accessors(flatbuffers_, flatbuffers_endian)
+
+ __flatcc_define_integer_accessors(flatbuffers_bool, flatbuffers_bool_t,
+ FLATBUFFERS_BOOL_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(flatbuffers_union_type, flatbuffers_union_type_t,
+ FLATBUFFERS_UTYPE_WIDTH, flatbuffers_endian)
+
+ __flatcc_define_integer_accessors(__flatbuffers_uoffset, flatbuffers_uoffset_t,
+ FLATBUFFERS_UOFFSET_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_soffset, flatbuffers_soffset_t,
+ FLATBUFFERS_SOFFSET_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_voffset, flatbuffers_voffset_t,
+ FLATBUFFERS_VOFFSET_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_utype, flatbuffers_utype_t,
+ FLATBUFFERS_UTYPE_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_thash, flatbuffers_thash_t,
+ FLATBUFFERS_THASH_WIDTH, flatbuffers_endian)
+
+/* flatcc/portable/pendian.h sets LITTLE/BIG flags if possible, and always defines le16toh. */
+#ifndef flatbuffers_is_native_pe
+#if defined(__LITTLE_ENDIAN__) || FLATBUFFERS_LITTLEENDIAN
+#undef FLATBUFFERS_LITTLEENDIAN
+#define FLATBUFFERS_LITTLEENDIAN 1
+#define flatbuffers_is_native_pe() (FLATBUFFERS_PROTOCOL_IS_LE)
+#elif defined(__BIG_ENDIAN__) || (defined(FLATBUFFERS_LITTLEENDIAN) && !FLATBUFFERS_LITTLEENDIAN)
+#undef FLATBUFFERS_LITTLEENDIAN
+#define FLATBUFFERS_LITTLEENDIAN 0
+#define flatbuffers_is_native_pe() (FLATBUFFERS_PROTOCOL_IS_BE)
+#else
+#define flatbuffers_is_native_pe() (__FLATBUFFERS_CONCAT(flatbuffers_endian, 16toh)(1) == 1)
+#endif
+#endif
+
+#ifndef flatbuffers_is_native_le
+#define flatbuffers_is_native_le() flatbuffers_is_native_pe()
+#endif
+
+#ifndef flatbuffers_is_native_be
+#define flatbuffers_is_native_be() (!flatbuffers_is_native_pe())
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ENDIAN_H */
diff --git a/include/flatcc/flatcc_epilogue.h b/include/flatcc/flatcc_epilogue.h
new file mode 100644
index 0000000..496857b
--- /dev/null
+++ b/include/flatcc/flatcc_epilogue.h
@@ -0,0 +1,8 @@
+/* Include guard intentionally left out. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "flatcc/portable/pdiagnostic_pop.h"
+
diff --git a/include/flatcc/flatcc_flatbuffers.h b/include/flatcc/flatcc_flatbuffers.h
new file mode 100644
index 0000000..4bfc743
--- /dev/null
+++ b/include/flatcc/flatcc_flatbuffers.h
@@ -0,0 +1,55 @@
+/*
+ * Even C11 compilers depend on clib support for `static_assert` which
+ * isn't always present, so we deal with this here for all compilers.
+ *
+ * Outside include guard to handle scope counter.
+ */
+#include "flatcc/portable/pstatic_assert.h"
+
+#ifndef FLATCC_FLATBUFFERS_H
+#define FLATCC_FLATBUFFERS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef flatcc_flatbuffers_defined
+#define flatcc_flatbuffers_defined
+
+#ifdef FLATCC_PORTABLE
+#include "flatcc/flatcc_portable.h"
+#endif
+#include "flatcc/portable/pwarnings.h"
+/* Needed by C99 compilers without FLATCC_PORTABLE. */
+#include "flatcc/portable/pstdalign.h"
+
+/* Handle fallthrough attribute in switch statements. */
+#include "flatcc/portable/pattributes.h"
+
+#include "flatcc/flatcc_alloc.h"
+#include "flatcc/flatcc_assert.h"
+
+#define __FLATBUFFERS_PASTE2(a, b) a ## b
+#define __FLATBUFFERS_PASTE3(a, b, c) a ## b ## c
+#define __FLATBUFFERS_CONCAT(a, b) __FLATBUFFERS_PASTE2(a, b)
+
+/*
+ * "flatcc_endian.h" requires the preceeding include files,
+ * or compatible definitions.
+ */
+#include "flatcc/portable/pendian.h"
+#include "flatcc/flatcc_types.h"
+#include "flatcc/flatcc_endian.h"
+#include "flatcc/flatcc_identifier.h"
+
+#ifndef FLATBUFFERS_WRAP_NAMESPACE
+#define FLATBUFFERS_WRAP_NAMESPACE(ns, x) ns ## _ ## x
+#endif
+
+#endif /* flatcc_flatbuffers_defined */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_FLATBUFFERS_H */
diff --git a/include/flatcc/flatcc_identifier.h b/include/flatcc/flatcc_identifier.h
new file mode 100644
index 0000000..825f0fd
--- /dev/null
+++ b/include/flatcc/flatcc_identifier.h
@@ -0,0 +1,148 @@
+#ifndef FLATCC_IDENTIFIER_H
+#define FLATCC_IDENTIFIER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef FLATCC_FLATBUFFERS_H
+#error "include via flatcc/flatcc_flatbuffers.h"
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+/*
+ * FlatBuffers identifiers are normally specified by "file_identifer" in
+ * the schema, but a standard hash of the fully qualified type name can
+ * also be used. This file implements such a mapping, but the generated
+ * headers also contain the necessary information for known types.
+ */
+
+
+/*
+ * Returns the type hash of a given name in native endian format.
+ * Generated code already provides these, but if a name was changed
+ * in the schema it may be relevant to recompute the hash manually.
+ *
+ * The wire-format of this value should always be little endian.
+ *
+ * Note: this must be the fully qualified name, e.g. in the namespace
+ * "MyGame.Example":
+ *
+ * flatbuffers_type_hash_from_name("MyGame.Example.Monster");
+ *
+ * or, in the global namespace just:
+ *
+ * flatbuffers_type_hash_from_name("MyTable");
+ *
+ * This assumes 32 bit hash type. For other sizes, other FNV-1a
+ * constants would be required.
+ *
+ * Note that we reserve hash value 0 for missing or ignored value.
+ */
+static inline flatbuffers_thash_t flatbuffers_type_hash_from_name(const char *name)
+{
+ uint32_t hash = UINT32_C(2166136261);
+ while (*name) {
+ hash ^= (unsigned char)*name;
+ hash = hash * UINT32_C(16777619);
+ ++name;
+ }
+ if (hash == 0) {
+ hash = UINT32_C(2166136261);
+ }
+ return hash;
+}
+
+/*
+ * Type hash encoded as little endian file identifier string.
+ * Note: if type hash is 0, the identifier should be null which
+ * we cannot return in this interface.
+ */
+static inline void flatbuffers_identifier_from_type_hash(flatbuffers_thash_t type_hash, flatbuffers_fid_t out_identifier)
+{
+ out_identifier[0] = (char)(type_hash & 0xff);
+ type_hash >>= 8;
+ out_identifier[1] = (char)(type_hash & 0xff);
+ type_hash >>= 8;
+ out_identifier[2] = (char)(type_hash & 0xff);
+ type_hash >>= 8;
+ out_identifier[3] = (char)(type_hash & 0xff);
+}
+
+/* Native integer encoding of file identifier. */
+static inline flatbuffers_thash_t flatbuffers_type_hash_from_identifier(const flatbuffers_fid_t identifier)
+{
+ uint8_t *p = (uint8_t *)identifier;
+
+ return identifier ?
+ (uint32_t)p[0] + (((uint32_t)p[1]) << 8) + (((uint32_t)p[2]) << 16) + (((uint32_t)p[3]) << 24) : 0;
+}
+
+/*
+ * Convert a null terminated string identifier like "MONS" or "X" into a
+ * native type hash identifier, usually for comparison. This will not
+ * work with type hash strings because they can contain null bytes.
+ */
+static inline flatbuffers_thash_t flatbuffers_type_hash_from_string(const char *identifier)
+{
+ flatbuffers_thash_t h = 0;
+ const uint8_t *p = (const uint8_t *)identifier;
+
+ if (!p[0]) return h;
+ h += ((flatbuffers_thash_t)p[0]);
+ if (!p[1]) return h;
+ h += ((flatbuffers_thash_t)p[1]) << 8;
+ if (!p[2]) return h;
+ h += ((flatbuffers_thash_t)p[2]) << 16;
+ /* No need to test for termination here. */
+ h += ((flatbuffers_thash_t)p[3]) << 24;
+ return h;
+}
+
+/*
+ * Computes the little endian wire format of the type hash. It can be
+ * used as a file identifer argument to various flatcc buffer calls.
+ *
+ * `flatbuffers_fid_t` is just `char [4]` for the default flatbuffers
+ * type system defined in `flatcc/flatcc_types.h`.
+ */
+static inline void flatbuffers_identifier_from_name(const char *name, flatbuffers_fid_t out_identifier)
+{
+ flatbuffers_identifier_from_type_hash(flatbuffers_type_hash_from_name(name), out_identifier);
+}
+
+/*
+ * This is a collision free hash (a permutation) of the type hash to
+ * provide better distribution for use in hash tables. It is likely not
+ * necessary in praxis, and for uniqueness of identifiers it provides no
+ * advantage over just using the FNV-1a type hash, except when truncating
+ * the identifier to less than 32-bits.
+ *
+ * Note: the output should not be used in transmission. It provides no
+ * additional information and just complicates matters. Furthermore, the
+ * unmodified type hash has the benefit that it can seed a child namespace.
+ */
+static inline uint32_t flatbuffers_disperse_type_hash(flatbuffers_thash_t type_hash)
+{
+ /* http://stackoverflow.com/a/12996028 */
+ uint32_t x = type_hash;
+
+ x = ((x >> 16) ^ x) * UINT32_C(0x45d9f3b);
+ x = ((x >> 16) ^ x) * UINT32_C(0x45d9f3b);
+ x = ((x >> 16) ^ x);
+ return x;
+}
+
+
+/* We have hardcoded assumptions about identifier size. */
+static_assert(sizeof(flatbuffers_fid_t) == 4, "unexpected file identifier size");
+static_assert(sizeof(flatbuffers_thash_t) == 4, "unexpected type hash size");
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_IDENTIFIER_H */
diff --git a/include/flatcc/flatcc_iov.h b/include/flatcc/flatcc_iov.h
new file mode 100644
index 0000000..a6d27f8
--- /dev/null
+++ b/include/flatcc/flatcc_iov.h
@@ -0,0 +1,31 @@
+#ifndef FLATCC_IOV_H
+#define FLATCC_IOV_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+
+/*
+ * The emitter receives one, or a few buffers at a time via
+ * this type. <sys/iov.h> compatible iovec structure used for
+ * allocation and emitter interface.
+ */
+typedef struct flatcc_iovec flatcc_iovec_t;
+struct flatcc_iovec {
+ void *iov_base;
+ size_t iov_len;
+};
+
+/*
+ * The largest iovec vector the builder will issue. It will
+ * always be a relatively small number.
+ */
+#define FLATCC_IOV_COUNT_MAX 8
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_IOV_H */
diff --git a/include/flatcc/flatcc_json_parser.h b/include/flatcc/flatcc_json_parser.h
new file mode 100644
index 0000000..f828129
--- /dev/null
+++ b/include/flatcc/flatcc_json_parser.h
@@ -0,0 +1,908 @@
+#ifndef FLATCC_JSON_PARSE_H
+#define FLATCC_JSON_PARSE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * JSON RFC:
+ * http://www.ietf.org/rfc/rfc4627.txt?number=4627
+ *
+ * With several flatbuffers specific extensions.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_builder.h"
+#include "flatcc/flatcc_unaligned.h"
+
+#define PDIAGNOSTIC_IGNORE_UNUSED
+#include "flatcc/portable/pdiagnostic_push.h"
+
+typedef uint32_t flatcc_json_parser_flags_t;
+static const flatcc_json_parser_flags_t flatcc_json_parser_f_skip_unknown = 1;
+static const flatcc_json_parser_flags_t flatcc_json_parser_f_force_add = 2;
+static const flatcc_json_parser_flags_t flatcc_json_parser_f_with_size = 4;
+static const flatcc_json_parser_flags_t flatcc_json_parser_f_skip_array_overflow = 8;
+static const flatcc_json_parser_flags_t flatcc_json_parser_f_reject_array_underflow = 16;
+
+#define FLATCC_JSON_PARSE_ERROR_MAP(XX) \
+ XX(ok, "ok") \
+ XX(eof, "eof") \
+ XX(deep_nesting, "deep nesting") \
+ XX(trailing_comma, "trailing comma") \
+ XX(expected_colon, "expected colon") \
+ XX(unexpected_character, "unexpected character") \
+ XX(invalid_numeric, "invalid numeric") \
+ XX(overflow, "overflow") \
+ XX(underflow, "underflow") \
+ XX(unbalanced_array, "unbalanced array") \
+ XX(unbalanced_object, "unbalanced object") \
+ XX(precision_loss, "precision loss") \
+ XX(float_unexpected, "float unexpected") \
+ XX(unknown_symbol, "unknown symbol") \
+ XX(unquoted_symbolic_list, "unquoted list of symbols") \
+ XX(unknown_union, "unknown union type") \
+ XX(expected_string, "expected string") \
+ XX(invalid_character, "invalid character") \
+ XX(invalid_escape, "invalid escape") \
+ XX(invalid_type, "invalid type") \
+ XX(unterminated_string, "unterminated string") \
+ XX(expected_object, "expected object") \
+ XX(expected_array, "expected array") \
+ XX(expected_scalar, "expected literal or symbolic scalar") \
+ XX(expected_union_type, "expected union type") \
+ XX(union_none_present, "union present with type NONE") \
+ XX(union_none_not_null, "union of type NONE is not null") \
+ XX(union_incomplete, "table has incomplete union") \
+ XX(duplicate, "table has duplicate field") \
+ XX(required, "required field missing") \
+ XX(union_vector_length, "union vector length mismatch") \
+ XX(base64, "invalid base64 content") \
+ XX(base64url, "invalid base64url content") \
+ XX(array_underflow, "fixed length array underflow") \
+ XX(array_overflow, "fixed length array overflow") \
+ XX(runtime, "runtime error") \
+ XX(not_supported, "not supported")
+
+enum flatcc_json_parser_error_no {
+#define XX(no, str) flatcc_json_parser_error_##no,
+ FLATCC_JSON_PARSE_ERROR_MAP(XX)
+#undef XX
+};
+
+const char *flatcc_json_parser_error_string(int err);
+
+#define flatcc_json_parser_ok flatcc_json_parser_error_ok
+#define flatcc_json_parser_eof flatcc_json_parser_error_eof
+
+/*
+ * The struct may be zero initialized in which case the line count will
+ * start at line zero, or the line may be set to 1 initially. The ctx
+ * is only used for error reporting and tracking non-standard unquoted
+ * ctx.
+ *
+ * `ctx` may for example hold a flatcc_builder_t pointer.
+ */
+typedef struct flatcc_json_parser_ctx flatcc_json_parser_t;
+struct flatcc_json_parser_ctx {
+ flatcc_builder_t *ctx;
+ const char *line_start;
+ flatcc_json_parser_flags_t flags;
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ int unquoted;
+#endif
+
+ int line, pos;
+ int error;
+ const char *start;
+ const char *end;
+ const char *error_loc;
+ /* Set at end of successful parse. */
+ const char *end_loc;
+};
+
+static inline int flatcc_json_parser_get_error(flatcc_json_parser_t *ctx)
+{
+ return ctx->error;
+}
+
+static inline void flatcc_json_parser_init(flatcc_json_parser_t *ctx, flatcc_builder_t *B, const char *buf, const char *end, flatcc_json_parser_flags_t flags)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->ctx = B;
+ ctx->line_start = buf;
+ ctx->line = 1;
+ ctx->flags = flags;
+ /* These are not needed for parsing, but may be helpful in reporting etc. */
+ ctx->start = buf;
+ ctx->end = end;
+ ctx->error_loc = buf;
+}
+
+const char *flatcc_json_parser_set_error(flatcc_json_parser_t *ctx, const char *loc, const char *end, int reason);
+
+/*
+ * Wide space is not necessarily beneficial in the typical space, but it
+ * also isn't expensive so it may be added when there are applications
+ * that can benefit.
+ */
+const char *flatcc_json_parser_space_ext(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+static inline const char *flatcc_json_parser_space(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (end - buf > 1) {
+ if (buf[0] > 0x20) {
+ return buf;
+ }
+ if (buf[0] == 0x20 && buf[1] > 0x20) {
+ return buf + 1;
+ }
+ }
+ return flatcc_json_parser_space_ext(ctx, buf, end);
+}
+
+
+static inline const char *flatcc_json_parser_string_start(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end || *buf != '\"') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_string);
+ }
+ return ++buf;
+}
+
+static inline const char *flatcc_json_parser_string_end(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end || *buf != '\"') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unterminated_string);
+ }
+ return ++buf;
+}
+
+/*
+ * Parse a string as a fixed length char array as `s` with length `n`.
+ * and raise errors according to overflow/underflow runtime flags. Zero
+ * and truncate as needed. A trailing zero is not inserted if the input
+ * is at least the same length as the char array.
+ *
+ * Runtime flags: `skip_array_overflow`, `pad_array_underflow`.
+ */
+const char *flatcc_json_parser_char_array(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, char *s, size_t n);
+
+/*
+ * Creates a string. Returns *ref == 0 on unrecoverable error or
+ * sets *ref to a valid new string reference.
+ */
+const char *flatcc_json_parser_build_string(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref);
+
+typedef char flatcc_json_parser_escape_buffer_t[5];
+/*
+ * If the buffer does not hold a valid escape sequence, an error is
+ * returned with code[0] = 0/
+ *
+ * Otherwise code[0] the length (1-4) of the remaining
+ * characters in the code, transcoded from the escape sequence
+ * where a length of 4 only happens with escapaped surrogate pairs.
+ *
+ * The JSON extension `\xXX` is supported and may produced invalid UTF-8
+ * characters such as 0xff. The standard JSON escape `\uXXXX` is not
+ * checked for invalid code points and may produce invalid UTF-8.
+ *
+ * Regular characters are expected to valid UTF-8 but they are not checked
+ * and may therefore produce invalid UTF-8.
+ *
+ * Control characters within a string are rejected except in the
+ * standard JSON escpaped form for `\n \r \t \b \f`.
+ *
+ * Additional escape codes as per standard JSON: `\\ \/ \"`.
+ */
+const char *flatcc_json_parser_string_escape(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_json_parser_escape_buffer_t code);
+
+/*
+ * Parses the longest unescaped run of string content followed by either
+ * an escape encoding, string termination, or error.
+ */
+const char *flatcc_json_parser_string_part(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+static inline const char *flatcc_json_parser_symbol_start(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end) {
+ return buf;
+ }
+ if (*buf == '\"') {
+ ++buf;
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ ctx->unquoted = 0;
+#endif
+ } else {
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (*buf == '.') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ ctx->unquoted = 1;
+#else
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+#endif
+ }
+ return buf;
+}
+
+static inline uint64_t flatcc_json_parser_symbol_part_ext(const char *buf, const char *end)
+{
+ uint64_t w = 0;
+ size_t n = (size_t)(end - buf);
+
+ if (n > 8) {
+ n = 8;
+ }
+ /* This can bloat inlining for a rarely executed case. */
+#if 1
+ switch (n) {
+ case 8:
+ w |= ((uint64_t)buf[7]) << (0 * 8);
+ goto lbl_n_7;
+ case 7:
+lbl_n_7:
+ w |= ((uint64_t)buf[6]) << (1 * 8);
+ goto lbl_n_6;
+ case 6:
+lbl_n_6:
+ w |= ((uint64_t)buf[5]) << (2 * 8);
+ goto lbl_n_5;
+ case 5:
+lbl_n_5:
+ w |= ((uint64_t)buf[4]) << (3 * 8);
+ goto lbl_n_4;
+ case 4:
+lbl_n_4:
+ w |= ((uint64_t)buf[3]) << (4 * 8);
+ goto lbl_n_3;
+ case 3:
+lbl_n_3:
+ w |= ((uint64_t)buf[2]) << (5 * 8);
+ goto lbl_n_2;
+ case 2:
+lbl_n_2:
+ w |= ((uint64_t)buf[1]) << (6 * 8);
+ goto lbl_n_1;
+ case 1:
+lbl_n_1:
+ w |= ((uint64_t)buf[0]) << (7 * 8);
+ break;
+ case 0:
+ break;
+ }
+#else
+ /* But this is hardly much of an improvement. */
+ {
+ size_t i;
+ for (i = 0; i < n; ++i) {
+ w <<= 8;
+ if (i < n) {
+ w = buf[i];
+ }
+ }
+ }
+#endif
+ return w;
+}
+
+/*
+ * Read out string as a big endian word. This allows for trie lookup,
+ * also when trailing characters are beyond keyword. This assumes the
+ * external words tested against are valid and therefore there need be
+ * no checks here. If a match is not made, the symbol_end function will
+ * consume and check any unmatched content - from _before_ this function
+ * was called - i.e. the returned buffer is tentative for use only if we
+ * accept the part returned here.
+ *
+ * Used for both symbols and symbolic constants.
+ */
+static inline uint64_t flatcc_json_parser_symbol_part(const char *buf, const char *end)
+{
+ size_t n = (size_t)(end - buf);
+
+#if FLATCC_ALLOW_UNALIGNED_ACCESS
+ if (n >= 8) {
+ return be64toh(*(uint64_t *)buf);
+ }
+#endif
+ return flatcc_json_parser_symbol_part_ext(buf, end);
+}
+
+/* Don't allow space in dot notation neither inside nor outside strings. */
+static inline const char *flatcc_json_parser_match_scope(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos)
+{
+ const char *mark = buf;
+
+ (void)ctx;
+
+ if (end - buf <= pos) {
+ return mark;
+ }
+ if (buf[pos] != '.') {
+ return mark;
+ }
+ return buf + pos + 1;
+}
+
+const char *flatcc_json_parser_match_constant(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos, int *more);
+
+/* We allow '.' in unquoted symbols, but not at the start or end. */
+static inline const char *flatcc_json_parser_symbol_end(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ char c, clast = 0;
+
+
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (ctx->unquoted) {
+ while (buf != end && *buf > 0x20) {
+ clast = c = *buf;
+ if (c == '_' || c == '.' || (c & 0x80) || (c >= '0' && c <= '9')) {
+ ++buf;
+ continue;
+ }
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'z') {
+ ++buf;
+ continue;
+ }
+ break;
+ }
+ if (clast == '.') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ } else {
+#else
+ {
+#endif
+ while (buf != end && *buf != '\"') {
+ if (*buf == '\\') {
+ if (end - buf < 2) {
+ break;
+ }
+ ++buf;
+ }
+ ++buf;
+ }
+ if (buf == end || *buf != '\"') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unterminated_string);
+ }
+ ++buf;
+ }
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_constant_start(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ buf = flatcc_json_parser_symbol_start(ctx, buf, end);
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (!ctx->unquoted) {
+#else
+ {
+#endif
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ }
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_object_start(flatcc_json_parser_t *ctx, const char *buf, const char *end, int *more)
+{
+ if (buf == end || *buf != '{') {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_object);
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == '}') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ *more = 1;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_object_end(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int *more)
+{
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf == end) {
+ *more = 0;
+ return buf;
+ }
+ if (*buf != ',') {
+ *more = 0;
+ if (*buf != '}') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_object);
+ } else {
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf == end) {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_object);
+ }
+#if FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA
+ if (*buf == '}') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+#endif
+ *more = 1;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_array_start(flatcc_json_parser_t *ctx, const char *buf, const char *end, int *more)
+{
+ if (buf == end || *buf != '[') {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_array);
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == ']') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ *more = 1;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_array_end(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int *more)
+{
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf == end) {
+ *more = 0;
+ return buf;
+ }
+ if (*buf != ',') {
+ *more = 0;
+ if (*buf != ']') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_array);
+ } else {
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf == end) {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_array);
+ }
+#if FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA
+ if (*buf == ']') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+#endif
+ *more = 1;
+ return buf;
+}
+
+/*
+ * Detects if a symbol terminates at a given `pos` relative to the
+ * buffer pointer, or return fast.
+ *
+ * Failure to match is not an error but a recommendation to try
+ * alternative longer suffixes - only if such do not exist will
+ * there be an error. If a match was not eventually found,
+ * the `flatcc_json_parser_unmatched_symbol` should be called to consume
+ * the symbol and generate error messages.
+ *
+ * If a match was detected, ':' and surrounding space is consumed,
+ * or an error is generated.
+ */
+static inline const char *flatcc_json_parser_match_symbol(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int pos)
+{
+ const char *mark = buf;
+
+ if (end - buf <= pos) {
+ return mark;
+ }
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (ctx->unquoted) {
+ if (buf[pos] > 0x20 && buf[pos] != ':') {
+ return mark;
+ }
+ buf += pos;
+ ctx->unquoted = 0;
+ } else {
+#else
+ {
+#endif
+ if (buf[pos] != '\"') {
+ return mark;
+ }
+ buf += pos + 1;
+ }
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf != end && *buf == ':') {
+ ++buf;
+ return flatcc_json_parser_space(ctx, buf, end);
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_colon);
+}
+
+static inline const char *flatcc_json_parser_match_type_suffix(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos)
+{
+ if (end - buf <= pos + 5) {
+ return buf;
+ }
+ if (memcmp(buf + pos, "_type", 5)) {
+ return buf;
+ }
+ return flatcc_json_parser_match_symbol(ctx, buf, end, pos + 5);
+}
+
+const char *flatcc_json_parser_unmatched_symbol(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+static inline const char *flatcc_json_parser_coerce_uint64(
+ flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, uint64_t *v)
+{
+ if (value_sign) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_underflow);
+ }
+ *v = value;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_coerce_bool(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, uint8_t *v)
+{
+ if (value_sign) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_underflow);
+ }
+ *v = (uint8_t)!!value;
+ return buf;
+}
+
+#define __flatcc_json_parser_define_coerce_unsigned(type, basetype, uctype) \
+static inline const char *flatcc_json_parser_coerce_ ## type( \
+ flatcc_json_parser_t *ctx, const char *buf, \
+ const char *end, int value_sign, uint64_t value, basetype *v) \
+{ \
+ if (value_sign) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_underflow); \
+ } \
+ if (value > uctype ## _MAX) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_overflow); \
+ } \
+ *v = (basetype)value; \
+ return buf; \
+}
+
+__flatcc_json_parser_define_coerce_unsigned(uint32, uint32_t, UINT32)
+__flatcc_json_parser_define_coerce_unsigned(uint16, uint16_t, UINT16)
+__flatcc_json_parser_define_coerce_unsigned(uint8, uint8_t, UINT8)
+
+#define __flatcc_json_parser_define_coerce_signed(type, basetype, uctype) \
+static inline const char *flatcc_json_parser_coerce_ ## type( \
+ flatcc_json_parser_t *ctx, const char *buf, \
+ const char *end, int value_sign, uint64_t value, basetype *v) \
+{ \
+ if (value_sign) { \
+ if (value > (uint64_t)(uctype ## _MAX) + 1) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_underflow); \
+ } \
+ *v = (basetype)-(int64_t)value; \
+ } else { \
+ if (value > uctype ## _MAX) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_overflow); \
+ } \
+ *v = (basetype)value; \
+ } \
+ return buf; \
+}
+
+__flatcc_json_parser_define_coerce_signed(int64, int64_t, INT64)
+__flatcc_json_parser_define_coerce_signed(int32, int32_t, INT32)
+__flatcc_json_parser_define_coerce_signed(int16, int16_t, INT16)
+__flatcc_json_parser_define_coerce_signed(int8, int8_t, INT8)
+
+static inline const char *flatcc_json_parser_coerce_float(
+ flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, float *v)
+{
+ (void)ctx;
+ (void)end;
+
+ *v = value_sign ? -(float)value : (float)value;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_coerce_double(
+ flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, double *v)
+{
+ (void)ctx;
+ (void)end;
+
+ *v = value_sign ? -(double)value : (double)value;
+ return buf;
+}
+
+const char *flatcc_json_parser_double(flatcc_json_parser_t *ctx, const char *buf, const char *end, double *v);
+
+const char *flatcc_json_parser_float(flatcc_json_parser_t *ctx, const char *buf, const char *end, float *v);
+
+/*
+ * If the buffer does not contain a valid start character for a numeric
+ * value, the function will return the the input buffer without failure.
+ * This makes is possible to try a symbolic parse.
+ */
+const char *flatcc_json_parser_integer(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_sign, uint64_t *value);
+
+/* Returns unchanged buffer without error if `null` is not matched. */
+static inline const char *flatcc_json_parser_null(const char *buf, const char *end)
+{
+ if (end - buf >= 4 && memcmp(buf, "null", 4) == 0) {
+ return buf + 4;
+ }
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_none(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end)
+{
+ if (end - buf >= 4 && memcmp(buf, "null", 4) == 0) {
+ return buf + 4;
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end,
+ flatcc_json_parser_error_union_none_not_null);
+}
+
+/*
+ * `parsers` is a null terminated array of parsers with at least one
+ * valid parser. A numeric literal parser may also be included.
+ */
+#define __flatcc_json_parser_define_integral_parser(type, basetype) \
+static inline const char *flatcc_json_parser_ ## type( \
+ flatcc_json_parser_t *ctx, \
+ const char *buf, const char *end, basetype *v) \
+{ \
+ uint64_t value = 0; \
+ int value_sign = 0; \
+ const char *mark = buf; \
+ \
+ *v = 0; \
+ if (buf == end) { \
+ return buf; \
+ } \
+ buf = flatcc_json_parser_integer(ctx, buf, end, &value_sign, &value); \
+ if (buf != mark) { \
+ return flatcc_json_parser_coerce_ ## type(ctx, \
+ buf, end, value_sign, value, v); \
+ } \
+ return buf; \
+}
+
+__flatcc_json_parser_define_integral_parser(uint64, uint64_t)
+__flatcc_json_parser_define_integral_parser(uint32, uint32_t)
+__flatcc_json_parser_define_integral_parser(uint16, uint16_t)
+__flatcc_json_parser_define_integral_parser(uint8, uint8_t)
+__flatcc_json_parser_define_integral_parser(int64, int64_t)
+__flatcc_json_parser_define_integral_parser(int32, int32_t)
+__flatcc_json_parser_define_integral_parser(int16, int16_t)
+__flatcc_json_parser_define_integral_parser(int8, int8_t)
+
+static inline const char *flatcc_json_parser_bool(flatcc_json_parser_t *ctx, const char *buf, const char *end, uint8_t *v)
+{
+ const char *k;
+ uint8_t tmp;
+
+ k = buf;
+ if (end - buf >= 4 && memcmp(buf, "true", 4) == 0) {
+ *v = 1;
+ return k + 4;
+ } else if (end - buf >= 5 && memcmp(buf, "false", 5) == 0) {
+ *v = 0;
+ return k + 5;
+ }
+ buf = flatcc_json_parser_uint8(ctx, buf, end, &tmp);
+ *v = !!tmp;
+ return buf;
+}
+
+/*
+ * The `parsers` argument is a zero terminated array of parser
+ * functions with increasingly general scopes.
+ *
+ * Symbols can be be or'ed together by listing multiple space separated
+ * flags in source being parsed, like `{ x : "Red Blue" }`.
+ * Intended for flags, but generally available.
+ *
+ * `aggregate` means there are more symbols to follow.
+ *
+ * This function does not return input `buf` value if match was
+ * unsuccessful. It will either match or error.
+ */
+typedef const char *flatcc_json_parser_integral_symbol_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, int *value_sign, uint64_t *value, int *aggregate);
+
+/*
+ * Raise an error if a syntax like `color: Red Green` is seen unless
+ * explicitly permitted. `color: "Red Green"` or `"color": "Red Green"
+ * or `color: Red` is permitted if unquoted is permitted but not
+ * unquoted list. Googles flatc JSON parser does not allow multiple
+ * symbolic values unless quoted, so this is the default.
+ */
+#if !FLATCC_JSON_PARSE_ALLOW_UNQUOTED || FLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST
+#define __flatcc_json_parser_init_check_unquoted_list()
+#define __flatcc_json_parser_check_unquoted_list()
+#else
+#define __flatcc_json_parser_init_check_unquoted_list() int list_count = 0;
+#define __flatcc_json_parser_check_unquoted_list() \
+ if (list_count++ && ctx->unquoted) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_unquoted_symbolic_list); \
+ }
+#endif
+
+#define __flatcc_json_parser_define_symbolic_integral_parser(type, basetype)\
+static const char *flatcc_json_parser_symbolic_ ## type( \
+ flatcc_json_parser_t *ctx, \
+ const char *buf, const char *end, \
+ flatcc_json_parser_integral_symbol_f *parsers[], \
+ basetype *v) \
+{ \
+ flatcc_json_parser_integral_symbol_f **p; \
+ const char *mark; \
+ basetype tmp = 0; \
+ uint64_t value; \
+ int value_sign, aggregate; \
+ __flatcc_json_parser_init_check_unquoted_list() \
+ \
+ *v = 0; \
+ buf = flatcc_json_parser_constant_start(ctx, buf, end); \
+ if (buf == end) { \
+ return buf; \
+ } \
+ do { \
+ p = parsers; \
+ do { \
+ /* call parser function */ \
+ buf = (*p)(ctx, (mark = buf), end, \
+ &value_sign, &value, &aggregate); \
+ if (buf == end) { \
+ return buf; \
+ } \
+ } while (buf == mark && *++p); \
+ if (mark == buf) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_expected_scalar); \
+ } \
+ __flatcc_json_parser_check_unquoted_list() \
+ if (end == flatcc_json_parser_coerce_ ## type(ctx, \
+ buf, end, value_sign, value, &tmp)) { \
+ return end; \
+ } \
+ /* \
+ * `+=`, not `|=` because we also coerce to float and double, \
+ * and because we need to handle signed values. This may give \
+ * unexpected results with duplicate flags. \
+ */ \
+ *v += tmp; \
+ } while (aggregate); \
+ return buf; \
+}
+
+__flatcc_json_parser_define_symbolic_integral_parser(uint64, uint64_t)
+__flatcc_json_parser_define_symbolic_integral_parser(uint32, uint32_t)
+__flatcc_json_parser_define_symbolic_integral_parser(uint16, uint16_t)
+__flatcc_json_parser_define_symbolic_integral_parser(uint8, uint8_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int64, int64_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int32, int32_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int16, int16_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int8, int8_t)
+
+__flatcc_json_parser_define_symbolic_integral_parser(bool, uint8_t)
+
+/* We still parse integral values, but coerce to float or double. */
+__flatcc_json_parser_define_symbolic_integral_parser(float, float)
+__flatcc_json_parser_define_symbolic_integral_parser(double, double)
+
+/* Parse vector as a base64 or base64url encoded string with no spaces permitted. */
+const char *flatcc_json_parser_build_uint8_vector_base64(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref, int urlsafe);
+
+/*
+ * This doesn't do anything other than validate and advance past
+ * a JSON value which may use unquoted symbols.
+ *
+ * Upon call it is assumed that leading space has been stripped and that
+ * a JSON value is expected (i.e. root, or just after ':' in a
+ * container object, or less likely as an array member). Any trailing
+ * comma is assumed to belong to the parent context. Returns a parse
+ * location stripped from space so container should post call expect
+ * ',', '}', or ']', or EOF if the JSON is valid.
+ */
+const char *flatcc_json_parser_generic_json(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+/* Parse a JSON table. */
+typedef const char *flatcc_json_parser_table_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *pref);
+
+/* Parses a JSON struct. */
+typedef const char *flatcc_json_parser_struct_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *pref);
+
+/* Constructs a table, struct, or string object unless the type is 0 or unknown. */
+typedef const char *flatcc_json_parser_union_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, uint8_t type, flatcc_builder_ref_t *pref);
+
+typedef int flatcc_json_parser_is_known_type_f(uint8_t type);
+
+/* Called at start by table parsers with at least 1 union. */
+const char *flatcc_json_parser_prepare_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_total, size_t *handle);
+
+const char *flatcc_json_parser_finalize_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t handle);
+
+const char *flatcc_json_parser_union(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_union_f *union_parser);
+
+const char *flatcc_json_parser_union_type(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser);
+
+const char *flatcc_json_parser_union_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_union_f *union_parser);
+
+const char *flatcc_json_parser_union_type_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser,
+ flatcc_json_parser_is_known_type_f accept_type);
+
+/*
+ * Parses a table as root.
+ *
+ * Use the flag `flatcc_json_parser_f_with_size` to create a buffer with
+ * size prefix.
+ *
+ * `ctx` may be null or an uninitialized json parser to receive parse results.
+ * `builder` must a newly initialized or reset builder object.
+ * `buf`, `bufsiz` may be larger than the parsed json if trailing
+ * space or zeroes are expected, but they must represent a valid memory buffer.
+ * `fid` must be null, or a valid file identifier.
+ * `flags` default to 0. See also `flatcc_json_parser_f_` constants.
+ */
+int flatcc_json_parser_table_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid,
+ flatcc_json_parser_table_f *parser);
+
+/*
+ * Similar to `flatcc_json_parser_table_as_root` but parses a struct as
+ * root.
+ */
+int flatcc_json_parser_struct_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid,
+ flatcc_json_parser_struct_f *parser);
+
+#include "flatcc/portable/pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_JSON_PARSE_H */
diff --git a/include/flatcc/flatcc_json_printer.h b/include/flatcc/flatcc_json_printer.h
new file mode 100644
index 0000000..cab49a1
--- /dev/null
+++ b/include/flatcc/flatcc_json_printer.h
@@ -0,0 +1,788 @@
+#ifndef FLATCC_JSON_PRINTER_H
+#define FLATCC_JSON_PRINTER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Definitions for default implementation, do not assume these are
+ * always valid.
+ */
+#define FLATCC_JSON_PRINT_FLUSH_SIZE (1024 * 16)
+#define FLATCC_JSON_PRINT_RESERVE 64
+#define FLATCC_JSON_PRINT_BUFFER_SIZE (FLATCC_JSON_PRINT_FLUSH_SIZE + FLATCC_JSON_PRINT_RESERVE)
+
+#ifndef FLATCC_JSON_PRINTER_ALLOC
+#define FLATCC_JSON_PRINTER_ALLOC(n) FLATCC_ALLOC(n)
+#endif
+
+#ifndef FLATCC_JSON_PRINTER_FREE
+#define FLATCC_JSON_PRINTER_FREE(p) FLATCC_FREE(p)
+#endif
+
+#ifndef FLATCC_JSON_PRINTER_REALLOC
+#define FLATCC_JSON_PRINTER_REALLOC(p, n) FLATCC_REALLOC(p, n)
+#endif
+
+/* Initial size that grows exponentially. */
+#define FLATCC_JSON_PRINT_DYN_BUFFER_SIZE 4096
+
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_flatbuffers.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+#define FLATCC_JSON_PRINT_ERROR_MAP(XX) \
+ XX(ok, "ok") \
+ /* \
+ * When the flatbuffer is null, has too small a header, or has \
+ * mismatching identifier when a match was requested. \
+ */ \
+ XX(bad_input, "bad input") \
+ XX(deep_recursion, "deep recursion") \
+ /* \
+ * When the output was larger than the available fixed length buffer, \
+ * or dynamic allocation could not grow the buffer sufficiently. \
+ */ \
+ XX(overflow, "overflow")
+
+enum flatcc_json_printer_error_no {
+#define XX(no, str) flatcc_json_printer_error_##no,
+ FLATCC_JSON_PRINT_ERROR_MAP(XX)
+#undef XX
+};
+
+#define flatcc_json_printer_ok flatcc_json_printer_error_ok
+
+typedef struct flatcc_json_printer_ctx flatcc_json_printer_t;
+
+typedef void flatcc_json_printer_flush_f(flatcc_json_printer_t *ctx, int all);
+
+struct flatcc_json_printer_ctx {
+ char *buf;
+ size_t size;
+ size_t flush_size;
+ size_t total;
+ const char *pflush;
+ char *p;
+ uint8_t own_buffer;
+ uint8_t indent;
+ uint8_t unquote;
+ uint8_t noenum;
+ uint8_t skip_default;
+ uint8_t force_default;
+ int level;
+ int error;
+
+ void *fp;
+ flatcc_json_printer_flush_f *flush;
+};
+
+static inline void flatcc_json_printer_set_error(flatcc_json_printer_t *ctx, int err)
+{
+ if (!ctx->error) {
+ ctx->error = err;
+ }
+}
+
+const char *flatcc_json_printer_error_string(int err);
+
+static inline int flatcc_json_printer_get_error(flatcc_json_printer_t *ctx)
+{
+ return ctx->error;
+}
+
+/*
+ * Call to reuse context between parses without without
+ * returning buffer. If a file pointer is being used,
+ * it will remain open.
+ *
+ * Reset does not affect the formatting settings indentation, and
+ * operational flags, but does zero the indentation level.
+ */
+static inline void flatcc_json_printer_reset(flatcc_json_printer_t *ctx)
+{
+ ctx->p = ctx->buf;
+ ctx->level = 0;
+ ctx->total = 0;
+ ctx->error = 0;
+}
+
+/*
+ * A custom init function can be implemented with a custom flush
+ * function can be custom implemented. A few have been provided:
+ * init with external fixed length buffer, and init with dynamically
+ * growing buffer.
+ *
+ * Because there are a lot of small print functions, it is essentially
+ * always faster to print to local buffer than moving to io directly
+ * such as using fprintf or fwrite. The flush callback is used to
+ * move data when enough has been collected.
+ *
+ * `fp` should be of type `FILE *` but we do not enforce it here
+ * because it allows the header to be independent of <stdio.h>
+ * when not required. If `fp` is null, it defaults to stdout.
+ *
+ * Returns -1 on alloc error (no cleanup needed), or 0 on success.
+ * Eventually the clear method must be called to return memory.
+ *
+ * The file pointer may be stdout or a custom file. The file pointer
+ * is not affected by reset or clear and should be closed manually.
+ *
+ * `set_flags` and related may be called subsequently to modify
+ * behavior.
+ */
+int flatcc_json_printer_init(flatcc_json_printer_t *ctx, void *fp);
+
+/*
+ * Prints to external buffer and sets overflow error if buffer is too
+ * small. Earlier content is then overwritten. A custom version of this
+ * function could flush the content to elsewhere before allowing the
+ * buffer content to be overwritten. The `buffers_size` must be large
+ * enough to hold `FLATCC_JSON_PRINT_RESERVED_SIZE` which is small but
+ * large enough value to hold entire numbers and the like.
+ *
+ * It is not strictly necessary to call clear because the buffer is
+ * external, but still good form and case the context type is changed
+ * later.
+ *
+ * Returns -1 on buffer size error (no cleanup needed), or 0 on success.
+ *
+ * `set_flags` and related may be called subsequently to modify
+ * behavior.
+ */
+int flatcc_json_printer_init_buffer(flatcc_json_printer_t *ctx, char *buffer, size_t buffer_size);
+
+/*
+ * Returns the current buffer pointer and also the content size in
+ * `buffer_size` if it is null. The operation is not very useful for
+ * file oriented printers (created with `init`) and will then only
+ * return the unflushed buffer content. For fixed length buffers
+ * (`init_buffer`), only the last content is available if the buffer
+ * overflowed. Works well with (`init_buffer`) when the dynamic buffer
+ * is be reused, otherwise `finalize_dynamic_buffer` could be more
+ * appropriate.
+ *
+ * The returned buffer is zero terminated.
+ *
+ * The returned pointer is only valid until next operation and should
+ * not deallocated manually.
+ */
+void *flatcc_json_printer_get_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size);
+
+/*
+ * Set to non-zero if names and enum symbols can be unquoted thus
+ * diverging from standard JSON while remaining compatible with `flatc`
+ * JSON flavor.
+ */
+static inline void flatcc_json_printer_set_unquoted(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->unquote = !!x;
+}
+
+/*
+ * Set to non-zero if enums should always be printed as numbers.
+ * Otherwise enums are printed as a symbol for member values, and as
+ * numbers for other values.
+ *
+ * NOTE: this setting will not affect code generated with enum mapping
+ * disabled - statically disabling enum mapping is signficantly faster
+ * for enums, less so for for union types.
+ */
+static inline void flatcc_json_printer_set_noenum(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->noenum = !!x;
+}
+
+/*
+ * Override priting an existing scalar field if it equals the default value.
+ * Note that this setting is not mutually exclusive to `set_force_default`.
+ */
+static inline void flatcc_json_printer_set_skip_default(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->skip_default = !!x;
+}
+
+/*
+ * Override skipping absent scalar fields and print the default value.
+ * Note that this setting is not mutually exclusive to `set_skip_default`.
+ */
+static inline void flatcc_json_printer_set_force_default(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->force_default = !!x;
+}
+
+
+/*
+ * Set pretty-print indentation in number of spaces. 0 (default) is
+ * compact with no spaces or linebreaks (default), anything above
+ * triggers pretty print.
+ */
+static inline void flatcc_json_printer_set_indent(flatcc_json_printer_t *ctx, uint8_t x)
+{
+ ctx->indent = x;
+}
+
+/*
+ * Override the default compact valid JSON format with a
+ * pretty printed non-strict version. Enums are translated
+ * to names, which is also the default.
+ */
+static inline void flatcc_json_printer_set_nonstrict(flatcc_json_printer_t *ctx)
+{
+ flatcc_json_printer_set_indent(ctx, 2);
+ flatcc_json_printer_set_unquoted(ctx, 1);
+ flatcc_json_printer_set_noenum(ctx, 0);
+}
+
+typedef uint32_t flatcc_json_printer_flags_t;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_unquote = 1;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_noenum = 2;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_skip_default = 4;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_force_default = 8;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_pretty = 16;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_nonstrict = 32;
+
+/*
+ * May be called instead of setting operational modes individually.
+ * Formatting is strict quoted json witout pretty printing by default.
+ *
+ * flags are:
+ *
+ * `unquote`,
+ * `noenum`,
+ * `skip_default`,
+ * `force_default`,
+ * `pretty`,
+ * `nonstrict`
+ *
+ * `pretty` flag sets indentation to 2.
+ * `nonstrict` implies: `noenum`, `unquote`, `pretty`.
+ */
+static inline void flatcc_json_printer_set_flags(flatcc_json_printer_t *ctx, flatcc_json_printer_flags_t flags)
+{
+ ctx->unquote = !!(flags & flatcc_json_printer_f_unquote);
+ ctx->noenum = !!(flags & flatcc_json_printer_f_noenum);
+ ctx->skip_default = !!(flags & flatcc_json_printer_f_skip_default);
+ ctx->force_default = !!(flags & flatcc_json_printer_f_force_default);
+ if (flags & flatcc_json_printer_f_pretty) {
+ flatcc_json_printer_set_indent(ctx, 2);
+ }
+ if (flags & flatcc_json_printer_f_nonstrict) {
+ flatcc_json_printer_set_nonstrict(ctx);
+ }
+}
+
+
+/*
+ * Detects if the conctext type uses dynamically allocated memory
+ * using malloc and realloc and frees any such memory.
+ *
+ * Not all context types needs to be cleared.
+ */
+void flatcc_json_printer_clear(flatcc_json_printer_t *ctx);
+
+/*
+ * Ensures that there ia always buffer capacity for priting the next
+ * primitive with delimiters.
+ *
+ * Only flushes complete flush units and is inexpensive to call.
+ * The content buffer has an extra reserve which ensures basic
+ * data types and delimiters can always be printed after a partial
+ * flush. At the end, a `flush` is required to flush the
+ * remaining incomplete buffer data.
+ *
+ * Numbers do not call partial flush but will always fit into the reserve
+ * capacity after a partial flush, also surrounded by delimiters.
+ *
+ * Variable length operations generally submit a partial flush so it is
+ * safe to print a number after a name without flushing, but vectors of
+ * numbers must (and do) issue a partial flush between elements. This is
+ * handled automatically but must be considered if using the primitives
+ * for special purposes. Because repeated partial flushes are very cheap
+ * this is only a concern for high performance applications.
+ *
+ * When identiation is enabled, partial flush is also automatically
+ * issued .
+ */
+static inline void flatcc_json_printer_flush_partial(flatcc_json_printer_t *ctx)
+{
+ if (ctx->p >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+}
+
+/* Returns the total printed size but flushed and in buffer. */
+static inline size_t flatcc_json_printer_total(flatcc_json_printer_t *ctx)
+{
+ return ctx->total + (size_t)(ctx->p - ctx->buf);
+}
+
+/*
+ * Flush the remaining data not flushed by partial flush. It is valid to
+ * call at any point if it is acceptable to have unaligned flush units,
+ * but this is not desireable if, for example, compression or encryption
+ * is added to the flush pipeline.
+ *
+ * Not called automatically at the end of printing a flatbuffer object
+ * in case more data needs to be appended without submitting incomplete
+ * flush units prematurely - for example adding a newline at the end.
+ *
+ * The flush behavior depeends on the underlying `ctx` object, for
+ * example dynamic buffers have no distinction between partial and full
+ * flushes - here it is merely ensured that the buffer always has a
+ * reserve capacity left.
+ *
+ * Returns the total printed size.
+ */
+static inline size_t flatcc_json_printer_flush(flatcc_json_printer_t *ctx)
+{
+ ctx->flush(ctx, 1);
+ return flatcc_json_printer_total(ctx);
+}
+
+/*
+ * Helper functions to print anything into the json buffer.
+ * Strings are escaped.
+ *
+ * When pretty printing (indent > 0), level 0 has special significance -
+ * so if wrapping printed json in a manually printed container json
+ * object, these functions can help manage this.
+ */
+
+/* Escaped and quoted string. */
+void flatcc_json_printer_string(flatcc_json_printer_t *ctx, const char *s, size_t n);
+/* Unescaped and unquoted string. */
+void flatcc_json_printer_write(flatcc_json_printer_t *ctx, const char *s, size_t n);
+/* Print a newline and issues a partial flush. */
+void flatcc_json_printer_nl(flatcc_json_printer_t *ctx);
+/* Like numbers, a partial flush is not issued. */
+void flatcc_json_printer_char(flatcc_json_printer_t *ctx, char c);
+/* Indents and issues a partial flush. */
+void flatcc_json_printer_indent(flatcc_json_printer_t *ctx);
+/* Adjust identation level, usually +/-1. */
+void flatcc_json_printer_add_level(flatcc_json_printer_t *ctx, int n);
+/* Returns current identation level (0 is top level). */
+int flatcc_json_printer_get_level(flatcc_json_printer_t *ctx);
+
+/*
+ * If called explicitly be aware that repeated calls to numeric
+ * printers may cause buffer overflow without flush in-between.
+ */
+void flatcc_json_printer_uint8(flatcc_json_printer_t *ctx, uint8_t v);
+void flatcc_json_printer_uint16(flatcc_json_printer_t *ctx, uint16_t v);
+void flatcc_json_printer_uint32(flatcc_json_printer_t *ctx, uint32_t v);
+void flatcc_json_printer_uint64(flatcc_json_printer_t *ctx, uint64_t v);
+void flatcc_json_printer_int8(flatcc_json_printer_t *ctx, int8_t v);
+void flatcc_json_printer_int16(flatcc_json_printer_t *ctx, int16_t v);
+void flatcc_json_printer_int32(flatcc_json_printer_t *ctx, int32_t v);
+void flatcc_json_printer_int64(flatcc_json_printer_t *ctx, int64_t v);
+void flatcc_json_printer_bool(flatcc_json_printer_t *ctx, int v);
+void flatcc_json_printer_float(flatcc_json_printer_t *ctx, float v);
+void flatcc_json_printer_double(flatcc_json_printer_t *ctx, double v);
+
+void flatcc_json_printer_enum(flatcc_json_printer_t *ctx,
+ const char *symbol, size_t len);
+
+/*
+ * Convenience function to add a trailing newline, flush the buffer,
+ * test for error and reset the context for reuse.
+ *
+ * Returns total size printed or < 0 on error.
+ *
+ * This function makes most sense for file oriented output.
+ * See also `finalize_dynamic_buffer`.
+ */
+static inline int flatcc_json_printer_finalize(flatcc_json_printer_t *ctx)
+{
+ int ret;
+ flatcc_json_printer_nl(ctx);
+ ret = (int)flatcc_json_printer_flush(ctx);
+ if (ctx->error) {
+ ret = -1;
+ }
+ flatcc_json_printer_reset(ctx);
+ return ret;
+}
+
+/*
+ * Allocates a small buffer and grows it dynamically.
+ * Buffer survives past reset. To reduce size between uses, call clear
+ * followed by init call. To reuse buffer just call reset between uses.
+ * If `buffer_size` is 0 a sensible default is being used. The size is
+ * automatically rounded up to reserved size if too small.
+ *
+ * Returns -1 on alloc error (no cleanup needed), or 0 on success.
+ * Eventually the clear method must be called to return memory.
+ *
+ * `set_flags` and related may be called subsequently to modify
+ * behavior.
+ */
+int flatcc_json_printer_init_dynamic_buffer(flatcc_json_printer_t *ctx, size_t buffer_size);
+
+/*
+ * Similar to calling `finalize` but returns the buffer and does NOT
+ * reset, but rather clears printer object and the returned buffer must
+ * be deallocated with `free`.
+ *
+ * The returned buffer is zero terminated.
+ *
+ * NOTE: it is entirely optional to use this method. For repeated used
+ * of dynamic buffers, `newline` (or not) followed by `get_buffer`
+ * and `reset` will be an alternative.
+ *
+ * Stores the printed buffer size in `buffer_size` if it is not null.
+ *
+ * See also `get_dynamic_buffer`.
+ */
+void *flatcc_json_printer_finalize_dynamic_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size);
+
+
+/*************************************************************
+ * The following is normally only used by generated code.
+ *************************************************************/
+
+typedef struct flatcc_json_printer_table_descriptor flatcc_json_printer_table_descriptor_t;
+
+struct flatcc_json_printer_table_descriptor {
+ const void *table;
+ const void *vtable;
+ int vsize;
+ int ttl;
+ int count;
+};
+
+typedef struct flatcc_json_printer_union_descriptor flatcc_json_printer_union_descriptor_t;
+
+struct flatcc_json_printer_union_descriptor {
+ const void *member;
+ int ttl;
+ uint8_t type;
+};
+
+typedef void flatcc_json_printer_table_f(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td);
+
+typedef void flatcc_json_printer_struct_f(flatcc_json_printer_t *ctx,
+ const void *p);
+
+typedef void flatcc_json_printer_union_f(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud);
+
+/* Generated value to name map callbacks. */
+typedef void flatcc_json_printer_union_type_f(flatcc_json_printer_t *ctx, flatbuffers_utype_t type);
+typedef void flatcc_json_printer_uint8_enum_f(flatcc_json_printer_t *ctx, uint8_t v);
+typedef void flatcc_json_printer_uint16_enum_f(flatcc_json_printer_t *ctx, uint16_t v);
+typedef void flatcc_json_printer_uint32_enum_f(flatcc_json_printer_t *ctx, uint32_t v);
+typedef void flatcc_json_printer_uint64_enum_f(flatcc_json_printer_t *ctx, uint64_t v);
+typedef void flatcc_json_printer_int8_enum_f(flatcc_json_printer_t *ctx, int8_t v);
+typedef void flatcc_json_printer_int16_enum_f(flatcc_json_printer_t *ctx, int16_t v);
+typedef void flatcc_json_printer_int32_enum_f(flatcc_json_printer_t *ctx, int32_t v);
+typedef void flatcc_json_printer_int64_enum_f(flatcc_json_printer_t *ctx, int64_t v);
+typedef void flatcc_json_printer_bool_enum_f(flatcc_json_printer_t *ctx, flatbuffers_bool_t v);
+
+#define __define_print_scalar_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v);
+
+#define __define_print_scalar_optional_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len);
+
+#define __define_print_scalar_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _struct_field(flatcc_json_printer_t *ctx,\
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len);
+
+#define __define_print_scalar_array_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count);
+
+#define __define_print_enum_array_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_enum_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_enum_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_enum_optional_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_scalar_vector_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _vector_field(flatcc_json_printer_t *ctx,\
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len);
+
+#define __define_print_enum_vector_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_vector_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+__define_print_scalar_field_proto(uint8, uint8_t)
+__define_print_scalar_field_proto(uint16, uint16_t)
+__define_print_scalar_field_proto(uint32, uint32_t)
+__define_print_scalar_field_proto(uint64, uint64_t)
+__define_print_scalar_field_proto(int8, int8_t)
+__define_print_scalar_field_proto(int16, int16_t)
+__define_print_scalar_field_proto(int32, int32_t)
+__define_print_scalar_field_proto(int64, int64_t)
+__define_print_scalar_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_field_proto(float, float)
+__define_print_scalar_field_proto(double, double)
+
+__define_print_enum_field_proto(uint8, uint8_t)
+__define_print_enum_field_proto(uint16, uint16_t)
+__define_print_enum_field_proto(uint32, uint32_t)
+__define_print_enum_field_proto(uint64, uint64_t)
+__define_print_enum_field_proto(int8, int8_t)
+__define_print_enum_field_proto(int16, int16_t)
+__define_print_enum_field_proto(int32, int32_t)
+__define_print_enum_field_proto(int64, int64_t)
+__define_print_enum_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_scalar_optional_field_proto(uint8, uint8_t)
+__define_print_scalar_optional_field_proto(uint16, uint16_t)
+__define_print_scalar_optional_field_proto(uint32, uint32_t)
+__define_print_scalar_optional_field_proto(uint64, uint64_t)
+__define_print_scalar_optional_field_proto(int8, int8_t)
+__define_print_scalar_optional_field_proto(int16, int16_t)
+__define_print_scalar_optional_field_proto(int32, int32_t)
+__define_print_scalar_optional_field_proto(int64, int64_t)
+__define_print_scalar_optional_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_optional_field_proto(float, float)
+__define_print_scalar_optional_field_proto(double, double)
+
+__define_print_enum_optional_field_proto(uint8, uint8_t)
+__define_print_enum_optional_field_proto(uint16, uint16_t)
+__define_print_enum_optional_field_proto(uint32, uint32_t)
+__define_print_enum_optional_field_proto(uint64, uint64_t)
+__define_print_enum_optional_field_proto(int8, int8_t)
+__define_print_enum_optional_field_proto(int16, int16_t)
+__define_print_enum_optional_field_proto(int32, int32_t)
+__define_print_enum_optional_field_proto(int64, int64_t)
+__define_print_enum_optional_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_scalar_struct_field_proto(uint8, uint8_t)
+__define_print_scalar_struct_field_proto(uint16, uint16_t)
+__define_print_scalar_struct_field_proto(uint32, uint32_t)
+__define_print_scalar_struct_field_proto(uint64, uint64_t)
+__define_print_scalar_struct_field_proto(int8, int8_t)
+__define_print_scalar_struct_field_proto(int16, int16_t)
+__define_print_scalar_struct_field_proto(int32, int32_t)
+__define_print_scalar_struct_field_proto(int64, int64_t)
+__define_print_scalar_struct_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_struct_field_proto(float, float)
+__define_print_scalar_struct_field_proto(double, double)
+
+/*
+ * char arrays are special as there are no char fields
+ * without arrays and because they are printed as strings.
+ */
+__define_print_scalar_array_struct_field_proto(char, char)
+
+__define_print_scalar_array_struct_field_proto(uint8, uint8_t)
+__define_print_scalar_array_struct_field_proto(uint16, uint16_t)
+__define_print_scalar_array_struct_field_proto(uint32, uint32_t)
+__define_print_scalar_array_struct_field_proto(uint64, uint64_t)
+__define_print_scalar_array_struct_field_proto(int8, int8_t)
+__define_print_scalar_array_struct_field_proto(int16, int16_t)
+__define_print_scalar_array_struct_field_proto(int32, int32_t)
+__define_print_scalar_array_struct_field_proto(int64, int64_t)
+__define_print_scalar_array_struct_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_array_struct_field_proto(float, float)
+__define_print_scalar_array_struct_field_proto(double, double)
+
+__define_print_enum_array_struct_field_proto(uint8, uint8_t)
+__define_print_enum_array_struct_field_proto(uint16, uint16_t)
+__define_print_enum_array_struct_field_proto(uint32, uint32_t)
+__define_print_enum_array_struct_field_proto(uint64, uint64_t)
+__define_print_enum_array_struct_field_proto(int8, int8_t)
+__define_print_enum_array_struct_field_proto(int16, int16_t)
+__define_print_enum_array_struct_field_proto(int32, int32_t)
+__define_print_enum_array_struct_field_proto(int64, int64_t)
+__define_print_enum_array_struct_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_enum_struct_field_proto(uint8, uint8_t)
+__define_print_enum_struct_field_proto(uint16, uint16_t)
+__define_print_enum_struct_field_proto(uint32, uint32_t)
+__define_print_enum_struct_field_proto(uint64, uint64_t)
+__define_print_enum_struct_field_proto(int8, int8_t)
+__define_print_enum_struct_field_proto(int16, int16_t)
+__define_print_enum_struct_field_proto(int32, int32_t)
+__define_print_enum_struct_field_proto(int64, int64_t)
+__define_print_enum_struct_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_scalar_vector_field_proto(uint8, uint8_t)
+__define_print_scalar_vector_field_proto(uint16, uint16_t)
+__define_print_scalar_vector_field_proto(uint32, uint32_t)
+__define_print_scalar_vector_field_proto(uint64, uint64_t)
+__define_print_scalar_vector_field_proto(int8, int8_t)
+__define_print_scalar_vector_field_proto(int16, int16_t)
+__define_print_scalar_vector_field_proto(int32, int32_t)
+__define_print_scalar_vector_field_proto(int64, int64_t)
+__define_print_scalar_vector_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_vector_field_proto(float, float)
+__define_print_scalar_vector_field_proto(double, double)
+
+__define_print_enum_vector_field_proto(uint8, uint8_t)
+__define_print_enum_vector_field_proto(uint16, uint16_t)
+__define_print_enum_vector_field_proto(uint32, uint32_t)
+__define_print_enum_vector_field_proto(uint64, uint64_t)
+__define_print_enum_vector_field_proto(int8, int8_t)
+__define_print_enum_vector_field_proto(int16, int16_t)
+__define_print_enum_vector_field_proto(int32, int32_t)
+__define_print_enum_vector_field_proto(int64, int64_t)
+__define_print_enum_vector_field_proto(bool, flatbuffers_bool_t)
+
+void flatcc_json_printer_uint8_vector_base64_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len, int urlsafe);
+
+/*
+ * If `fid` is null, the identifier is not checked and is allowed to be
+ * entirely absent.
+ *
+ * The buffer must at least be aligned to uoffset_t on systems that
+ * require aligned memory addresses (as always for flatbuffers).
+ */
+int flatcc_json_printer_table_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid,
+ flatcc_json_printer_table_f *pf);
+
+int flatcc_json_printer_struct_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid,
+ flatcc_json_printer_struct_f *pf);
+
+/*
+ * Call before and after enum flags to ensure proper quotation. Enum
+ * quotes may be configured runtime, but regardless of this, multiple
+ * flags may be forced to be quoted depending on compile time flag since
+ * not all parsers may be able to handle unquoted space separated values
+ * even if they handle non-strict unquoted json otherwise.
+ *
+ * Flags should only be called when not empty (0) and when there are no
+ * unknown flags in the value. Otherwise print the numeric value. The
+ * auto generated code deals with this.
+ *
+ * This bit twiddling hack may be useful:
+ *
+ * `multiple = 0 != (v & (v - 1);`
+ */
+void flatcc_json_printer_delimit_enum_flags(flatcc_json_printer_t *ctx, int multiple);
+
+/* The index increments from 0 to handle space. It is not the flag bit position. */
+void flatcc_json_printer_enum_flag(flatcc_json_printer_t *ctx, int index, const char *symbol, size_t len);
+
+/* A struct inside another struct, as opposed to inside a table or a root. */
+void flatcc_json_printer_embedded_struct_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_embedded_struct_array_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ size_t size, size_t count,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_struct_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_struct_f *pf);
+
+void flatcc_json_printer_string_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len);
+
+void flatcc_json_printer_string_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len);
+
+void flatcc_json_printer_table_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_struct_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ size_t size,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_table_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_union_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf);
+
+void flatcc_json_printer_struct_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_struct_f *pf);
+
+void flatcc_json_printer_table_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_union_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf);
+
+void flatcc_json_printer_union_table(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_union_struct(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_union_string(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_JSON_PRINTER_H */
diff --git a/include/flatcc/flatcc_portable.h b/include/flatcc/flatcc_portable.h
new file mode 100644
index 0000000..9b0eb0c
--- /dev/null
+++ b/include/flatcc/flatcc_portable.h
@@ -0,0 +1,14 @@
+#ifndef FLATCC_PORTABLE_H
+#define FLATCC_PORTABLE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "flatcc/portable/portable_basic.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_PORTABLE_H */
diff --git a/include/flatcc/flatcc_prologue.h b/include/flatcc/flatcc_prologue.h
new file mode 100644
index 0000000..3a74ed6
--- /dev/null
+++ b/include/flatcc/flatcc_prologue.h
@@ -0,0 +1,8 @@
+/* Include guard intentionally left out. */
+
+#define PDIAGNOSTIC_IGNORE_UNUSED
+#include "flatcc/portable/pdiagnostic_push.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
diff --git a/include/flatcc/flatcc_refmap.h b/include/flatcc/flatcc_refmap.h
new file mode 100644
index 0000000..062d94f
--- /dev/null
+++ b/include/flatcc/flatcc_refmap.h
@@ -0,0 +1,144 @@
+/*
+ * The flatcc builder supports storing a pointer to a refmap
+ * and wraps some operations to make them work as a dummy
+ * even if no refmap has been set. This enables optional
+ * DAG preservation possible during clone operations.
+ *
+ * A refmap maps a source address to a builder reference.
+ *
+ * This is just a map, but the semantics are important:
+ *
+ * The map thus preserves identity of the source. It is not a
+ * cache because cache eviction would fail to properly track
+ * identity.
+ *
+ * The map is used for memoization during object cloning are and
+ * may also be used by user logic doing similar operations.
+ * This ensures that identity is preserved so a source object is
+ * not duplicated which could lead to either loss of semantic
+ * information, or an explosion in size, or both. In some, or
+ * even most, cases this concern may not be important, but when
+ * it is important, it is important.
+ *
+ * The source address must not be reused for different content
+ * for the lifetime of the map, although the content doest not
+ * have to be valid or event exist at that location since source
+ * address is just used as a key.
+ *
+ * The lifetime may be a single clone operation which then
+ * tracks child object references as well, or it may be the
+ * lifetime of the buffer builder.
+ *
+ * The map may be flushed explicitly when the source addresses
+ * are no longer unique, such as when reusing a memory buffer,
+ * and when identity preservation is no longer important.
+ * Flushing a map is esentially the same as ending a lifetime.
+ *
+ * Multiple maps may exist concurrently for example if cloning
+ * an object twice into two new objects that should have
+ * separate identities. This is especially true and necessary
+ * when creating a new nested buffer because the nested buffer
+ * cannot share references with the parent. Cloning and object
+ * that contains a nested buffer does not require multiple maps
+ * because the nested buffer is then opaque.
+ */
+
+#ifndef FLATCC_REFMAP_H
+#define FLATCC_REFMAP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "flatcc/flatcc_types.h"
+
+#ifndef FLATCC_REFMAP_MIN_BUCKETS
+/* 8 buckets gives us 5 useful initial entries with a load factor of 0.7 */
+#define FLATCC_REFMAP_MIN_BUCKETS 8
+#endif
+
+#define FLATCC_REFMAP_LOAD_FACTOR 0.7f
+
+typedef struct flatcc_refmap flatcc_refmap_t;
+typedef flatbuffers_soffset_t flatcc_refmap_ref_t;
+
+static const flatcc_refmap_ref_t flatcc_refmap_not_found = 0;
+
+struct flatcc_refmap_item {
+ const void *src;
+ flatcc_refmap_ref_t ref;
+};
+
+struct flatcc_refmap {
+ size_t count;
+ size_t buckets;
+ struct flatcc_refmap_item *table;
+ /* Use stack allocation for small maps. */
+ struct flatcc_refmap_item min_table[FLATCC_REFMAP_MIN_BUCKETS];
+};
+
+/*
+ * Fast zero initialization - does not allocate any memory.
+ * May be replaced by memset 0, but `init` avoids clearing the
+ * stack allocated initial hash table until it is needed.
+ */
+static inline int flatcc_refmap_init(flatcc_refmap_t *refmap)
+{
+ refmap->count = 0;
+ refmap->buckets = 0;
+ refmap->table = 0;
+ return 0;
+}
+
+/*
+ * Removes all items and deallocates memory.
+ * Not required unless `insert` or `resize` took place. The map can be
+ * reused subsequently without calling `init`.
+ */
+void flatcc_refmap_clear(flatcc_refmap_t *refmap);
+
+/*
+ * Keeps allocated memory as is, but removes all items. The map
+ * must intialized first.
+ */
+void flatcc_refmap_reset(flatcc_refmap_t *refmap);
+
+/*
+ * Returns the inserted reference if the `src` pointer was found,
+ * without inspecting the content of the `src` pointer.
+ *
+ * Returns flatcc_refmap_not_found (default 0) if the `src` pointer was
+ * not found.
+ */
+flatcc_refmap_ref_t flatcc_refmap_find(flatcc_refmap_t *refmap, const void *src);
+
+/*
+ * Inserts a `src` source pointer and its associated `ref` reference
+ * into the refmap without inspecting the `src` pointer content. The
+ * `ref` value will be replaced if the the `src` pointer already exists.
+ *
+ * Inserting null will just return the ref without updating the map.
+ *
+ * There is no delete operation which simplifies an open
+ * addressing hash table, and it isn't needed for this use case.
+ *
+ * Returns the input ref or not_found on allocation error.
+ */
+flatcc_refmap_ref_t flatcc_refmap_insert(flatcc_refmap_t *refmap, const void *src, flatcc_refmap_ref_t ref);
+
+/*
+ * Set the hash table to accommodate at least `count` items while staying
+ * within the predefined load factor.
+ *
+ * Resize is primarily an internal operation, but the user may resize
+ * ahead of a large anticipated load, or after a large load to shrink
+ * the table using 0 as the `count` argument. The table never shrinks
+ * on its own account.
+ */
+int flatcc_refmap_resize(flatcc_refmap_t *refmap, size_t count);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_REFMAP_H */
diff --git a/include/flatcc/flatcc_rtconfig.h b/include/flatcc/flatcc_rtconfig.h
new file mode 100644
index 0000000..59727b6
--- /dev/null
+++ b/include/flatcc/flatcc_rtconfig.h
@@ -0,0 +1,162 @@
+#ifndef FLATCC_RTCONFIG_H
+#define FLATCC_RTCONFIG_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Include portability layer here since all other files depend on it. */
+#ifdef FLATCC_PORTABLE
+#include "flatcc/portable/portable.h"
+#endif
+
+/*
+ * Fast printing and parsing of double.
+ *
+ * This requires the grisu3/grisu3_* files to be in the include path,
+ * otherwise strod and sprintf will be used (these needed anyway
+ * as fallback for cases not supported by grisu3).
+ */
+#ifndef FLATCC_USE_GRISU3
+#define FLATCC_USE_GRISU3 1
+#endif
+
+/*
+ * This requires compiler that has enabled marc=native or similar so
+ * __SSE4_2__ flag is defined. Otherwise it will have no effect.
+ *
+ * While SSE may be used for different purposes, it has (as of this
+ * writing) only be used to test the effect on JSON whitespace handling
+ * which improved, but not by a lot, assuming 64-bit unligned access is
+ * otherwise available:
+ *
+ * With 8 space indentation, the JSON benchmark handles 308K parse ops/sec
+ * while SSE ups that to 333 parse ops/sec or 336 if \r\n is also
+ * consumed by SSE. Disabling indentation leaves SSE spacing handling
+ * ineffective, and performance reaches 450K parse ops/sec and can
+ * improve further to 500+K parse ops/sec if inexact GRISU3 numbers are
+ * allowed (they are pretty accurate anyway, just not exact). This
+ * feature requires hacking a flag direct in the grisu3 double parsing
+ * lib directly and only mentioned for comparison.
+ *
+ * In conclusion SSE doesn't add a lot to JSON space handling at least.
+ *
+ * Disabled by default, but can be overriden by build system.
+ */
+#ifndef FLATCC_USE_SSE4_2
+#define FLATCC_USE_SSE4_2 0
+#endif
+
+/*
+ * The verifier only reports yes and no. The following setting
+ * enables assertions in debug builds. It must be compiled into
+ * the runtime library and is not normally the desired behavior.
+ *
+ * NOTE: enabling this can break test cases so use with build, not test.
+ */
+#if !defined(FLATCC_DEBUG_VERIFY) && !defined(NDEBUG)
+#define FLATCC_DEBUG_VERIFY 0
+#endif
+
+#if !defined(FLATCC_TRACE_VERIFY)
+#define FLATCC_TRACE_VERIFY 0
+#endif
+
+
+/*
+ * Limit recursion level for tables. Actual level may be deeper
+ * when structs are deeply nested - but these are limited by the
+ * schema compiler.
+ */
+#ifndef FLATCC_JSON_PRINT_MAX_LEVELS
+#define FLATCC_JSON_PRINT_MAX_LEVELS 100
+#endif
+
+/* Maximum length of names printed exluding _type suffix. */
+#ifndef FLATCC_JSON_PRINT_NAME_LEN_MAX
+#define FLATCC_JSON_PRINT_NAME_LEN_MAX 100
+#endif
+
+/*
+ * Print float and double values with C99 hexadecimal floating point
+ * notation. This option is not valid JSON but it avoids precision
+ * loss, correctly handles NaN, +/-Infinity and is significantly faster
+ * to parse and print. Some JSON parsers rely on strtod which does
+ * support hexadecimal floating points when C99 compliant.
+ */
+#ifndef FLATCC_JSON_PRINT_HEX_FLOAT
+#define FLATCC_JSON_PRINT_HEX_FLOAT 0
+#endif
+
+/*
+ * Always print multipe enum flags like `color: "Red Green"`
+ * even when unquote is selected as an option for single
+ * value like `color: Green`. Otherwise multiple values
+ * are printed as `color: Red Green`, but this could break
+ * some flatbuffer json parser.
+ */
+#ifndef FLATCC_JSON_PRINT_ALWAYS_QUOTE_MULTIPLE_FLAGS
+#define FLATCC_JSON_PRINT_ALWAYS_QUOTE_MULTIPLE_FLAGS 1
+#endif
+
+/*
+ * The general nesting limit may be lower, but for skipping
+ * JSON we do not need to - we can set this high as it only
+ * costs a single char per level in a stack array.
+ */
+#ifndef FLATCC_JSON_PARSE_GENERIC_MAX_NEST
+#define FLATCC_JSON_PARSE_GENERIC_MAX_NEST 512
+#endif
+
+/* Store value even if it is default. */
+#ifndef FLATCC_JSON_PARSE_FORCE_DEFAULTS
+#define FLATCC_JSON_PARSE_FORCE_DEFAULTS 0
+#endif
+
+#ifndef FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+#define FLATCC_JSON_PARSE_ALLOW_UNQUOTED 1
+#endif
+
+/*
+ * Multiple enum values are by default not permitted unless
+ * quoted like `color: "Red Green" as per Googles flatc JSON
+ * parser while a single value like `color: Red` can be
+ * unquoted. Enabling this setting will allow `color: Red
+ * Green`, but only if FLATCC_JSON_PARSE_ALLOW_UNQUOTED is
+ * also enabled.
+ */
+#ifndef FLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST
+#define FLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST 0
+#endif
+
+#ifndef FLATCC_JSON_PARSE_ALLOW_UNKNOWN_FIELD
+#define FLATCC_JSON_PARSE_ALLOW_UNKNOWN_FIELD 1
+#endif
+
+#ifndef FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA
+#define FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA 1
+#endif
+
+/*
+ * Just parse to the closing bracket '}' if set.
+ * Otherwise parse to end by consuming space and
+ * fail if anything but space follows.
+ */
+#ifndef FLATCC_PARSE_IGNORE_TRAILING_DATA
+#define FLATCC_PARSE_IGNORE_TRAILING_DATA 0
+#endif
+
+/*
+ * Optimize to parse a lot of white space, but
+ * in most cases it probably slows parsing down.
+ */
+#ifndef FLATCC_JSON_PARSE_WIDE_SPACE
+#define FLATCC_JSON_PARSE_WIDE_SPACE 0
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_RTCONFIG_H */
diff --git a/include/flatcc/flatcc_types.h b/include/flatcc/flatcc_types.h
new file mode 100644
index 0000000..69605d2
--- /dev/null
+++ b/include/flatcc/flatcc_types.h
@@ -0,0 +1,97 @@
+#ifndef FLATCC_TYPES_H
+#define FLATCC_TYPES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+/*
+ * This should match generated type declaratios in
+ * `flatbuffers_common_reader.h` (might have different name prefix).
+ * Read only generated code does not depend on library code,
+ * hence the duplication.
+ */
+#ifndef flatbuffers_types_defined
+#define flatbuffers_types_defined
+
+/*
+ * uoffset_t and soffset_t must be same integer type, except for sign.
+ * They can be (u)int16_t, (u)int32_t, or (u)int64_t.
+ * The default is (u)int32_t.
+ *
+ * voffset_t is expected to be uint16_t, but can experimentally be
+ * compiled from uint8_t up to uint32_t.
+ *
+ * ID_MAX is the largest value that can index a vtable. The table size
+ * is given as voffset value. Each id represents a voffset value index
+ * from 0 to max inclusive. Space is required for two header voffset
+ * fields and the unaddressible highest index (due to the table size
+ * representation). For 16-bit voffsets this yields a max of 2^15 - 4,
+ * or (2^16 - 1) / 2 - 3.
+ */
+
+#define flatbuffers_uoffset_t_defined
+#define flatbuffers_soffset_t_defined
+#define flatbuffers_voffset_t_defined
+#define flatbuffers_utype_t_defined
+#define flatbuffers_bool_t_defined
+#define flatbuffers_thash_t_defined
+#define flatbuffers_fid_t_defined
+
+/* uoffset_t is also used for vector and string headers. */
+#define FLATBUFFERS_UOFFSET_MAX UINT32_MAX
+#define FLATBUFFERS_SOFFSET_MAX INT32_MAX
+#define FLATBUFFERS_SOFFSET_MIN INT32_MIN
+#define FLATBUFFERS_VOFFSET_MAX UINT16_MAX
+#define FLATBUFFERS_UTYPE_MAX UINT8_MAX
+/* Well - the max of the underlying type. */
+#define FLATBUFFERS_BOOL_MAX UINT8_MAX
+#define FLATBUFFERS_THASH_MAX UINT32_MAX
+
+#define FLATBUFFERS_ID_MAX (FLATBUFFERS_VOFFSET_MAX / sizeof(flatbuffers_voffset_t) - 3)
+/* Vectors of empty structs can yield div by zero, so we must guard against this. */
+#define FLATBUFFERS_COUNT_MAX(elem_size) (FLATBUFFERS_UOFFSET_MAX/((elem_size) == 0 ? 1 : (elem_size)))
+
+#define FLATBUFFERS_UOFFSET_WIDTH 32
+#define FLATBUFFERS_COUNT_WIDTH 32
+#define FLATBUFFERS_SOFFSET_WIDTH 32
+#define FLATBUFFERS_VOFFSET_WIDTH 16
+#define FLATBUFFERS_UTYPE_WIDTH 8
+#define FLATBUFFERS_BOOL_WIDTH 8
+#define FLATBUFFERS_THASH_WIDTH 32
+
+#define FLATBUFFERS_TRUE 1
+#define FLATBUFFERS_FALSE 0
+
+#define FLATBUFFERS_PROTOCOL_IS_LE 1
+#define FLATBUFFERS_PROTOCOL_IS_BE 0
+
+typedef uint32_t flatbuffers_uoffset_t;
+typedef int32_t flatbuffers_soffset_t;
+typedef uint16_t flatbuffers_voffset_t;
+typedef uint8_t flatbuffers_utype_t;
+typedef uint8_t flatbuffers_bool_t;
+typedef uint32_t flatbuffers_thash_t;
+/* Public facing type operations. */
+typedef flatbuffers_utype_t flatbuffers_union_type_t;
+
+static const flatbuffers_bool_t flatbuffers_true = FLATBUFFERS_TRUE;
+static const flatbuffers_bool_t flatbuffers_false = FLATBUFFERS_FALSE;
+
+#define FLATBUFFERS_IDENTIFIER_SIZE (FLATBUFFERS_THASH_WIDTH / 8)
+
+typedef char flatbuffers_fid_t[FLATBUFFERS_IDENTIFIER_SIZE];
+
+#endif /* flatbuffers_types_defined */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_TYPES_H */
diff --git a/include/flatcc/flatcc_unaligned.h b/include/flatcc/flatcc_unaligned.h
new file mode 100644
index 0000000..a7dc546
--- /dev/null
+++ b/include/flatcc/flatcc_unaligned.h
@@ -0,0 +1,16 @@
+#ifndef FLATCC_UNLIGNED_H
+#define FLATCC_UNLIGNED_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "flatcc/portable/punaligned.h"
+
+#define FLATCC_ALLOW_UNALIGNED_ACCESS PORTABLE_UNALIGNED_ACCESS
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_UNLIGNED_H */
diff --git a/include/flatcc/flatcc_verifier.h b/include/flatcc/flatcc_verifier.h
new file mode 100644
index 0000000..7e0d296
--- /dev/null
+++ b/include/flatcc/flatcc_verifier.h
@@ -0,0 +1,239 @@
+#ifndef FLATCC_VERIFIER_H
+#define FLATCC_VERIFIER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Runtime support for verifying flatbuffers.
+ *
+ * Link with the verifier implementation file.
+ *
+ * Note:
+ *
+ * 1) nested buffers will NOT have their identifier verified.
+ * The user may do so subsequently. The reason is in part because
+ * the information is not readily avaible without generated reader code,
+ * in part because the buffer might use a different, but valid,
+ * identifier and the user has no chance of specifiying this in the
+ * verifier code. The root verifier also doesn't assume a specific id
+ * but accepts a user supplied input which may be null.
+ *
+ * 2) All offsets in a buffer are verified for alignment relative to the
+ * buffer start, but the buffer itself is only assumed to aligned to
+ * uoffset_t. A reader should therefore ensure buffer alignment separately
+ * before reading the buffer. Nested buffers are in fact checked for
+ * alignment, but still only relative to the root buffer.
+ *
+ * 3) The max nesting level includes nested buffer nestings, so the
+ * verifier might fail even if the individual buffers are otherwise ok.
+ * This is to prevent abuse with lots of nested buffers.
+ *
+ *
+ * IMPORTANT:
+ *
+ * Even if verifier passes, the buffer may be invalid to access due to
+ * lack of alignemnt in memory, but the verifier is safe to call.
+ *
+ * NOTE: The buffer is not safe to modify after verification because an
+ * attacker may craft overlapping data structures such that modification
+ * of one field updates another in a way that violates the buffer
+ * constraints. This may also be caused by a clever compression scheme.
+ *
+ * It is likely faster to rewrite the table although this is also
+ * dangerous because an attacker (or even normal user) can draft a DAG
+ * that explodes when expanded carelesslessly. A safer approach is to
+ * hash all object references written and reuse those that match. This
+ * will expand references into other objects while bounding expansion
+ * and it will be safe to update assuming shared objects are ok to
+ * update.
+ *
+ */
+
+#include "flatcc/flatcc_types.h"
+
+#define FLATCC_VERIFY_ERROR_MAP(XX)\
+ XX(ok, "ok")\
+ XX(buffer_header_too_small, "buffer header too small")\
+ XX(identifier_mismatch, "identifier mismatch")\
+ XX(max_nesting_level_reached, "max nesting level reached")\
+ XX(required_field_missing, "required field missing")\
+ XX(runtime_buffer_header_not_aligned, "runtime: buffer header not aligned")\
+ XX(runtime_buffer_size_too_large, "runtime: buffer size too large")\
+ XX(string_not_zero_terminated, "string not zero terminated")\
+ XX(string_out_of_range, "string out of range")\
+ XX(struct_out_of_range, "struct out of range")\
+ XX(struct_size_overflow, "struct size overflow")\
+ XX(struct_unaligned, "struct unaligned")\
+ XX(table_field_not_aligned, "table field not aligned")\
+ XX(table_field_out_of_range, "table field out of range")\
+ XX(table_field_size_overflow, "table field size overflow")\
+ XX(table_header_out_of_range_or_unaligned, "table header out of range or unaligned")\
+ XX(vector_header_out_of_range_or_unaligned, "vector header out of range or unaligned")\
+ XX(string_header_out_of_range_or_unaligned, "string header out of range or unaligned")\
+ XX(offset_out_of_range, "offset out of range")\
+ XX(table_offset_out_of_range_or_unaligned, "table offset out of range or unaligned")\
+ XX(table_size_out_of_range, "table size out of range")\
+ XX(type_field_absent_from_required_union_field, "type field absent from required union field")\
+ XX(type_field_absent_from_required_union_vector_field, "type field absent from required union vector field")\
+ XX(union_cannot_have_a_table_without_a_type, "union cannot have a table without a type")\
+ XX(union_type_NONE_cannot_have_a_value, "union value field present with type NONE")\
+ XX(vector_count_exceeds_representable_vector_size, "vector count exceeds representable vector size")\
+ XX(vector_out_of_range, "vector out of range")\
+ XX(vtable_header_out_of_range, "vtable header out of range")\
+ XX(vtable_header_too_small, "vtable header too small")\
+ XX(vtable_offset_out_of_range_or_unaligned, "vtable offset out of range or unaligned")\
+ XX(vtable_size_out_of_range_or_unaligned, "vtable size out of range or unaligned")\
+ XX(vtable_size_overflow, "vtable size overflow")\
+ XX(union_element_absent_without_type_NONE, "union element absent without type NONE")\
+ XX(union_element_present_with_type_NONE, "union element present with type NONE")\
+ XX(union_vector_length_mismatch, "union type and table vectors have different lengths")\
+ XX(union_vector_verification_not_supported, "union vector verification not supported")\
+ XX(not_supported, "not supported")
+
+
+enum flatcc_verify_error_no {
+#define XX(no, str) flatcc_verify_error_##no,
+ FLATCC_VERIFY_ERROR_MAP(XX)
+#undef XX
+};
+
+#define flatcc_verify_ok flatcc_verify_error_ok
+
+const char *flatcc_verify_error_string(int err);
+
+/*
+ * Type specific table verifier function that checks each known field
+ * for existence in the vtable and then calls the appropriate verifier
+ * function in this library.
+ *
+ * The table descriptor values have been verified for bounds, overflow,
+ * and alignment, but vtable entries after header must be verified
+ * for all fields the table verifier function understands.
+ *
+ * Calls other typespecific verifier functions recursively whenever a
+ * table field, union or table vector is encountered.
+ */
+typedef struct flatcc_table_verifier_descriptor flatcc_table_verifier_descriptor_t;
+struct flatcc_table_verifier_descriptor {
+ /* Pointer to buffer. Not assumed to be aligned beyond uoffset_t. */
+ const void *buf;
+ /* Buffer size. */
+ flatbuffers_uoffset_t end;
+ /* Time to live: number nesting levels left before failure. */
+ int ttl;
+ /* Vtable of current table. */
+ const void *vtable;
+ /* Table offset relative to buffer start */
+ flatbuffers_uoffset_t table;
+ /* Table end relative to buffer start as per vtable[1] field. */
+ flatbuffers_voffset_t tsize;
+ /* Size of vtable in bytes. */
+ flatbuffers_voffset_t vsize;
+};
+
+typedef int flatcc_table_verifier_f(flatcc_table_verifier_descriptor_t *td);
+
+typedef struct flatcc_union_verifier_descriptor flatcc_union_verifier_descriptor_t;
+
+struct flatcc_union_verifier_descriptor {
+ /* Pointer to buffer. Not assumed to be aligned beyond uoffset_t. */
+ const void *buf;
+ /* Buffer size. */
+ flatbuffers_uoffset_t end;
+ /* Time to live: number nesting levels left before failure. */
+ int ttl;
+ /* Type of union value to be verified */
+ flatbuffers_utype_t type;
+ /* Offset relative to buffer start to where union value offset is stored. */
+ flatbuffers_uoffset_t base;
+ /* Offset of union value relative to base. */
+ flatbuffers_uoffset_t offset;
+};
+
+typedef int flatcc_union_verifier_f(flatcc_union_verifier_descriptor_t *ud);
+
+/*
+ * The `as_root` functions are normally the only functions called
+ * explicitly in this interface.
+ *
+ * If `fid` is null, the identifier is not checked and is allowed to be entirely absent.
+ *
+ * The buffer must at least be aligned to uoffset_t on systems that
+ * require aligned memory addresses. The buffer pointers alignment is
+ * not significant to internal verification of the buffer.
+ */
+int flatcc_verify_struct_as_root(const void *buf, size_t bufsiz, const char *fid,
+ size_t size, uint16_t align);
+
+int flatcc_verify_struct_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash,
+ size_t size, uint16_t align);
+
+int flatcc_verify_table_as_root(const void *buf, size_t bufsiz, const char *fid,
+ flatcc_table_verifier_f *root_tvf);
+
+int flatcc_verify_table_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash,
+ flatcc_table_verifier_f *root_tvf);
+/*
+ * The buffer header is verified by any of the `_as_root` verifiers, but
+ * this function may be used as a quick sanity check.
+ */
+int flatcc_verify_buffer_header(const void *buf, size_t bufsiz, const char *fid);
+
+int flatcc_verify_typed_buffer_header(const void *buf, size_t bufsiz, flatbuffers_thash_t type_hash);
+
+/*
+ * The following functions are typically called by a generated table
+ * verifier function.
+ */
+
+/* Scalar, enum or struct field. */
+int flatcc_verify_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, size_t size, uint16_t align);
+/* Vector of scalars, enums or structs. */
+int flatcc_verify_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, size_t elem_size, uint16_t align, size_t max_count);
+int flatcc_verify_string_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required);
+int flatcc_verify_string_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required);
+int flatcc_verify_table_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_table_verifier_f tvf);
+int flatcc_verify_table_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_table_verifier_f tvf);
+/* Table verifiers pass 0 as fid. */
+int flatcc_verify_struct_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, const char *fid,
+ size_t size, uint16_t align);
+int flatcc_verify_table_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, const char *fid,
+ uint16_t align, flatcc_table_verifier_f tvf);
+
+/*
+ * A NONE type will not accept a table being present, and a required
+ * union will not accept a type field being absent, and an absent type
+ * field will not accept a table field being present.
+ *
+ * If the above checks out and the type is not NONE, the uvf callback
+ * is executed. It must test each known table type and silently accept
+ * any unknown table type for forward compatibility. A union table
+ * value is verified without the required flag because an absent table
+ * encodes a typed NULL value while an absent type field encodes a
+ * missing union which fails if required.
+ */
+int flatcc_verify_union_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_union_verifier_f uvf);
+
+int flatcc_verify_union_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_union_verifier_f uvf);
+
+int flatcc_verify_union_table(flatcc_union_verifier_descriptor_t *ud, flatcc_table_verifier_f *tvf);
+int flatcc_verify_union_struct(flatcc_union_verifier_descriptor_t *ud, size_t size, uint16_t align);
+int flatcc_verify_union_string(flatcc_union_verifier_descriptor_t *ud);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_VERIFIER_H */
diff --git a/include/flatcc/flatcc_version.h b/include/flatcc/flatcc_version.h
new file mode 100644
index 0000000..78bc9c8
--- /dev/null
+++ b/include/flatcc/flatcc_version.h
@@ -0,0 +1,14 @@
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FLATCC_VERSION_TEXT "0.6.2"
+#define FLATCC_VERSION_MAJOR 0
+#define FLATCC_VERSION_MINOR 6
+#define FLATCC_VERSION_PATCH 2
+/* 1 or 0 */
+#define FLATCC_VERSION_RELEASED 0
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/include/flatcc/portable/LICENSE b/include/flatcc/portable/LICENSE
new file mode 100644
index 0000000..bb7ca57
--- /dev/null
+++ b/include/flatcc/portable/LICENSE
@@ -0,0 +1,14 @@
+Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+Some files also Copyright author of MathGeoLib (https://github.com/juj)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
diff --git a/include/flatcc/portable/README.md b/include/flatcc/portable/README.md
new file mode 100644
index 0000000..512b1a8
--- /dev/null
+++ b/include/flatcc/portable/README.md
@@ -0,0 +1,57 @@
+A small library for adding C11 compatibility to older C compilers, but
+only a small highly useful subset such as static assertions, inline
+functions and alignment.
+
+C++ is not a primary target, but the library has been updated to be more
+C++ friendly based on user feedback.
+
+Many compilers already have the required functionality but with slightly
+different names and arguments.
+
+In addition, compatibility with the Linux `<endian.h>` system file is
+provided, and "punaligned.h" is provided for unaligned memory reads
+which in part depends on endian support.
+
+The library also provides fast integer printing and floating point
+printing and parsing optionally using the grisu3 algorithm, but can fall
+back to strtod and related. The `pgrisu3` folder is header only and
+excludes test cases found in the main grisu3 project the files were
+extracted from. Base64 conversion is also provided.
+
+Integer conversion is not just an optimization. It is more difficult
+than it would appear to portably parse an integer of known size such as
+`uint64_t` up to at most n bytes which is needed for safe parsing. At
+the same time, the sometimes significant performance gains warrants
+custom implementations that might as well be done once and for all.
+
+Files can be included individually, or portable.h may be included to get
+all functionality. If the compiler is C11 compliant, portable.h will not
+include anything, except: it will provide a patch for static assertions
+which clang does not fully support in all versions even with C11 flagged.
+
+The grisu3 header files are the runtime files for the Grisu3 floating
+point conversion to/from text C port. Test coverage is provided separately.
+This library can be used indirectly via pparsefp.h and pprintfp.h.
+
+The `pstatic_assert.h` file is often needed on C11 systems because the
+compiler and standard library may support `_Static_assert` without
+`static_assert`. For compilers without `_Static_assert`, a unique
+identifier is needed for each assertion. This is done non-standard with
+the `__COUNTER__` macro, but has a fallback to `pstatic_assert_scope.h`
+for systems witout the `__COUNTER__` macro. Because of this fallback,
+`pstatic_assert.h` needs to be included in every file using
+`static_assert` in order to increment a scope counter, otherwise there
+is a risk of assert identifier conflicts when `static_assert` happen on
+the same line in different files.
+
+The `paligned_alloc.h` file implements the non-standard `aligned_free`
+to match the C11 standard `aligned_alloc` call. `aligned_free` is
+normally equivalent to `free`, but not on systems where `aligned_free`
+cannot be implemented using a system provived `free` call. Use of
+`aligned_free` is thus optional on some systems, but using it increases
+general portablity at the cost of pure C11 compatibility.
+
+IMPORTANT NOTE: this library has been used on various platforms and
+updated with user feedback but it is impossibly to systematically test
+all platforms so please test for specific uses cases and report
+any issues upstream.
diff --git a/include/flatcc/portable/grisu3_math.h b/include/flatcc/portable/grisu3_math.h
new file mode 100644
index 0000000..cff6e8c
--- /dev/null
+++ b/include/flatcc/portable/grisu3_math.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ * Copyright author of MathGeoLib (https://github.com/juj)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/* 2016-02-02: Updated by mikkelfj
+ *
+ * Extracted from MatGeoLib grisu3.c, Apache 2.0 license, and extended.
+ *
+ * This file is usually include via grisu3_print.h or grisu3_parse.h.
+ *
+ * The original MatGeoLib dtoa_grisu3 implementation is largely
+ * unchanged except for the uint64 to double cast. The remaining changes
+ * are file structure, name changes, and new additions for parsing:
+ *
+ * - Split into header files only:
+ * grisu3_math.h, grisu3_print.h, (added grisu3_parse.h)
+ *
+ * - names prefixed with grisu3_, grisu3_diy_fp_, GRISU3_.
+ * - added static to all functions.
+ * - disabled clang unused function warnings.
+ * - guarded <stdint.h> to allow for alternative impl.
+ * - added extra numeric constants needed for parsing.
+ * - added dec_pow, cast_double_from_diy_fp.
+ * - changed some function names for consistency.
+ * - moved printing specific grisu3 functions to grisu3_print.h.
+ * - changed double to uint64 cast to avoid aliasing.
+ * - added new grisu3_parse.h for parsing doubles.
+ * - grisu3_print_double (dtoa_grisu3) format .1 as 0.1 needed for valid JSON output
+ * and grisu3_parse_double wouldn't consume it.
+ * - grsu3_print_double changed formatting to prefer 0.012 over 1.2e-2.
+ *
+ * These changes make it possible to include the files as headers only
+ * in other software libraries without risking name conflicts, and to
+ * extend the implementation with a port of Googles Double Conversion
+ * strtod functionality for parsing doubles.
+ *
+ * Extracted from: rev. 915501a / Dec 22, 2015
+ * <https://github.com/juj/MathGeoLib/blob/master/src/Math/grisu3.c>
+ * MathGeoLib License: http://www.apache.org/licenses/LICENSE-2.0.html
+ */
+
+#ifndef GRISU3_MATH_H
+#define GRISU3_MATH_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Guarded to allow inclusion of pstdint.h first, if stdint.h is not supported. */
+#ifndef UINT8_MAX
+#include <stdint.h> /* uint64_t etc. */
+#endif
+
+#ifdef GRISU3_NO_ASSERT
+#undef GRISU3_ASSERT
+#define GRISU3_ASSERT(x) ((void)0)
+#endif
+
+#ifndef GRISU3_ASSERT
+#include <assert.h> /* assert */
+#define GRISU3_ASSERT(x) assert(x)
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(disable : 4204) /* nonstandard extension used : non-constant aggregate initializer */
+#endif
+
+#define GRISU3_D64_SIGN 0x8000000000000000ULL
+#define GRISU3_D64_EXP_MASK 0x7FF0000000000000ULL
+#define GRISU3_D64_FRACT_MASK 0x000FFFFFFFFFFFFFULL
+#define GRISU3_D64_IMPLICIT_ONE 0x0010000000000000ULL
+#define GRISU3_D64_EXP_POS 52
+#define GRISU3_D64_EXP_BIAS 1075
+#define GRISU3_D64_DENORM_EXP (-GRISU3_D64_EXP_BIAS + 1)
+#define GRISU3_DIY_FP_FRACT_SIZE 64
+#define GRISU3_D_1_LOG2_10 0.30102999566398114 /* 1 / lg(10) */
+#define GRISU3_MIN_TARGET_EXP -60
+#define GRISU3_MASK32 0xFFFFFFFFULL
+#define GRISU3_MIN_CACHED_EXP -348
+#define GRISU3_MAX_CACHED_EXP 340
+#define GRISU3_CACHED_EXP_STEP 8
+#define GRISU3_D64_MAX_DEC_EXP 309
+#define GRISU3_D64_MIN_DEC_EXP -324
+#define GRISU3_D64_INF GRISU3_D64_EXP_MASK
+
+#define GRISU3_MIN(x,y) ((x) <= (y) ? (x) : (y))
+#define GRISU3_MAX(x,y) ((x) >= (y) ? (x) : (y))
+
+
+typedef struct grisu3_diy_fp
+{
+ uint64_t f;
+ int e;
+} grisu3_diy_fp_t;
+
+typedef struct grisu3_diy_fp_power
+{
+ uint64_t fract;
+ int16_t b_exp, d_exp;
+} grisu3_diy_fp_power_t;
+
+typedef union {
+ uint64_t u64;
+ double d64;
+} grisu3_cast_double_t;
+
+static uint64_t grisu3_cast_uint64_from_double(double d)
+{
+ grisu3_cast_double_t cd;
+ cd.d64 = d;
+ return cd.u64;
+}
+
+static double grisu3_cast_double_from_uint64(uint64_t u)
+{
+ grisu3_cast_double_t cd;
+ cd.u64 = u;
+ return cd.d64;
+}
+
+#define grisu3_double_infinity grisu3_cast_double_from_uint64(GRISU3_D64_INF)
+#define grisu3_double_nan grisu3_cast_double_from_uint64(GRISU3_D64_INF + 1)
+
+static const grisu3_diy_fp_power_t grisu3_diy_fp_pow_cache[] =
+{
+ { 0xfa8fd5a0081c0288ULL, -1220, -348 },
+ { 0xbaaee17fa23ebf76ULL, -1193, -340 },
+ { 0x8b16fb203055ac76ULL, -1166, -332 },
+ { 0xcf42894a5dce35eaULL, -1140, -324 },
+ { 0x9a6bb0aa55653b2dULL, -1113, -316 },
+ { 0xe61acf033d1a45dfULL, -1087, -308 },
+ { 0xab70fe17c79ac6caULL, -1060, -300 },
+ { 0xff77b1fcbebcdc4fULL, -1034, -292 },
+ { 0xbe5691ef416bd60cULL, -1007, -284 },
+ { 0x8dd01fad907ffc3cULL, -980, -276 },
+ { 0xd3515c2831559a83ULL, -954, -268 },
+ { 0x9d71ac8fada6c9b5ULL, -927, -260 },
+ { 0xea9c227723ee8bcbULL, -901, -252 },
+ { 0xaecc49914078536dULL, -874, -244 },
+ { 0x823c12795db6ce57ULL, -847, -236 },
+ { 0xc21094364dfb5637ULL, -821, -228 },
+ { 0x9096ea6f3848984fULL, -794, -220 },
+ { 0xd77485cb25823ac7ULL, -768, -212 },
+ { 0xa086cfcd97bf97f4ULL, -741, -204 },
+ { 0xef340a98172aace5ULL, -715, -196 },
+ { 0xb23867fb2a35b28eULL, -688, -188 },
+ { 0x84c8d4dfd2c63f3bULL, -661, -180 },
+ { 0xc5dd44271ad3cdbaULL, -635, -172 },
+ { 0x936b9fcebb25c996ULL, -608, -164 },
+ { 0xdbac6c247d62a584ULL, -582, -156 },
+ { 0xa3ab66580d5fdaf6ULL, -555, -148 },
+ { 0xf3e2f893dec3f126ULL, -529, -140 },
+ { 0xb5b5ada8aaff80b8ULL, -502, -132 },
+ { 0x87625f056c7c4a8bULL, -475, -124 },
+ { 0xc9bcff6034c13053ULL, -449, -116 },
+ { 0x964e858c91ba2655ULL, -422, -108 },
+ { 0xdff9772470297ebdULL, -396, -100 },
+ { 0xa6dfbd9fb8e5b88fULL, -369, -92 },
+ { 0xf8a95fcf88747d94ULL, -343, -84 },
+ { 0xb94470938fa89bcfULL, -316, -76 },
+ { 0x8a08f0f8bf0f156bULL, -289, -68 },
+ { 0xcdb02555653131b6ULL, -263, -60 },
+ { 0x993fe2c6d07b7facULL, -236, -52 },
+ { 0xe45c10c42a2b3b06ULL, -210, -44 },
+ { 0xaa242499697392d3ULL, -183, -36 },
+ { 0xfd87b5f28300ca0eULL, -157, -28 },
+ { 0xbce5086492111aebULL, -130, -20 },
+ { 0x8cbccc096f5088ccULL, -103, -12 },
+ { 0xd1b71758e219652cULL, -77, -4 },
+ { 0x9c40000000000000ULL, -50, 4 },
+ { 0xe8d4a51000000000ULL, -24, 12 },
+ { 0xad78ebc5ac620000ULL, 3, 20 },
+ { 0x813f3978f8940984ULL, 30, 28 },
+ { 0xc097ce7bc90715b3ULL, 56, 36 },
+ { 0x8f7e32ce7bea5c70ULL, 83, 44 },
+ { 0xd5d238a4abe98068ULL, 109, 52 },
+ { 0x9f4f2726179a2245ULL, 136, 60 },
+ { 0xed63a231d4c4fb27ULL, 162, 68 },
+ { 0xb0de65388cc8ada8ULL, 189, 76 },
+ { 0x83c7088e1aab65dbULL, 216, 84 },
+ { 0xc45d1df942711d9aULL, 242, 92 },
+ { 0x924d692ca61be758ULL, 269, 100 },
+ { 0xda01ee641a708deaULL, 295, 108 },
+ { 0xa26da3999aef774aULL, 322, 116 },
+ { 0xf209787bb47d6b85ULL, 348, 124 },
+ { 0xb454e4a179dd1877ULL, 375, 132 },
+ { 0x865b86925b9bc5c2ULL, 402, 140 },
+ { 0xc83553c5c8965d3dULL, 428, 148 },
+ { 0x952ab45cfa97a0b3ULL, 455, 156 },
+ { 0xde469fbd99a05fe3ULL, 481, 164 },
+ { 0xa59bc234db398c25ULL, 508, 172 },
+ { 0xf6c69a72a3989f5cULL, 534, 180 },
+ { 0xb7dcbf5354e9beceULL, 561, 188 },
+ { 0x88fcf317f22241e2ULL, 588, 196 },
+ { 0xcc20ce9bd35c78a5ULL, 614, 204 },
+ { 0x98165af37b2153dfULL, 641, 212 },
+ { 0xe2a0b5dc971f303aULL, 667, 220 },
+ { 0xa8d9d1535ce3b396ULL, 694, 228 },
+ { 0xfb9b7cd9a4a7443cULL, 720, 236 },
+ { 0xbb764c4ca7a44410ULL, 747, 244 },
+ { 0x8bab8eefb6409c1aULL, 774, 252 },
+ { 0xd01fef10a657842cULL, 800, 260 },
+ { 0x9b10a4e5e9913129ULL, 827, 268 },
+ { 0xe7109bfba19c0c9dULL, 853, 276 },
+ { 0xac2820d9623bf429ULL, 880, 284 },
+ { 0x80444b5e7aa7cf85ULL, 907, 292 },
+ { 0xbf21e44003acdd2dULL, 933, 300 },
+ { 0x8e679c2f5e44ff8fULL, 960, 308 },
+ { 0xd433179d9c8cb841ULL, 986, 316 },
+ { 0x9e19db92b4e31ba9ULL, 1013, 324 },
+ { 0xeb96bf6ebadf77d9ULL, 1039, 332 },
+ { 0xaf87023b9bf0ee6bULL, 1066, 340 }
+};
+
+/* Avoid dependence on lib math to get (int)ceil(v) */
+static int grisu3_iceil(double v)
+{
+ int k = (int)v;
+ if (v < 0) return k;
+ return v - k == 0 ? k : k + 1;
+}
+
+static int grisu3_diy_fp_cached_pow(int exp, grisu3_diy_fp_t *p)
+{
+ int k = grisu3_iceil((exp+GRISU3_DIY_FP_FRACT_SIZE-1) * GRISU3_D_1_LOG2_10);
+ int i = (k-GRISU3_MIN_CACHED_EXP-1) / GRISU3_CACHED_EXP_STEP + 1;
+ p->f = grisu3_diy_fp_pow_cache[i].fract;
+ p->e = grisu3_diy_fp_pow_cache[i].b_exp;
+ return grisu3_diy_fp_pow_cache[i].d_exp;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_minus(grisu3_diy_fp_t x, grisu3_diy_fp_t y)
+{
+ grisu3_diy_fp_t d; d.f = x.f - y.f; d.e = x.e;
+ GRISU3_ASSERT(x.e == y.e && x.f >= y.f);
+ return d;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_multiply(grisu3_diy_fp_t x, grisu3_diy_fp_t y)
+{
+ uint64_t a, b, c, d, ac, bc, ad, bd, tmp;
+ grisu3_diy_fp_t r;
+ a = x.f >> 32; b = x.f & GRISU3_MASK32;
+ c = y.f >> 32; d = y.f & GRISU3_MASK32;
+ ac = a*c; bc = b*c;
+ ad = a*d; bd = b*d;
+ tmp = (bd >> 32) + (ad & GRISU3_MASK32) + (bc & GRISU3_MASK32);
+ tmp += 1U << 31; /* round */
+ r.f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+ r.e = x.e + y.e + 64;
+ return r;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_normalize(grisu3_diy_fp_t n)
+{
+ GRISU3_ASSERT(n.f != 0);
+ while(!(n.f & 0xFFC0000000000000ULL)) { n.f <<= 10; n.e -= 10; }
+ while(!(n.f & GRISU3_D64_SIGN)) { n.f <<= 1; --n.e; }
+ return n;
+}
+
+static grisu3_diy_fp_t grisu3_cast_diy_fp_from_double(double d)
+{
+ grisu3_diy_fp_t fp;
+ uint64_t u64 = grisu3_cast_uint64_from_double(d);
+ if (!(u64 & GRISU3_D64_EXP_MASK)) { fp.f = u64 & GRISU3_D64_FRACT_MASK; fp.e = 1 - GRISU3_D64_EXP_BIAS; }
+ else { fp.f = (u64 & GRISU3_D64_FRACT_MASK) + GRISU3_D64_IMPLICIT_ONE; fp.e = (int)((u64 & GRISU3_D64_EXP_MASK) >> GRISU3_D64_EXP_POS) - GRISU3_D64_EXP_BIAS; }
+ return fp;
+}
+
+static double grisu3_cast_double_from_diy_fp(grisu3_diy_fp_t n)
+{
+ const uint64_t hidden_bit = GRISU3_D64_IMPLICIT_ONE;
+ const uint64_t frac_mask = GRISU3_D64_FRACT_MASK;
+ const int denorm_exp = GRISU3_D64_DENORM_EXP;
+ const int exp_bias = GRISU3_D64_EXP_BIAS;
+ const int exp_pos = GRISU3_D64_EXP_POS;
+
+ grisu3_diy_fp_t v = n;
+ uint64_t e_biased;
+
+ while (v.f > hidden_bit + frac_mask) {
+ v.f >>= 1;
+ ++v.e;
+ }
+ if (v.e < denorm_exp) {
+ return 0.0;
+ }
+ while (v.e > denorm_exp && (v.f & hidden_bit) == 0) {
+ v.f <<= 1;
+ --v.e;
+ }
+ if (v.e == denorm_exp && (v.f & hidden_bit) == 0) {
+ e_biased = 0;
+ } else {
+ e_biased = (uint64_t)(v.e + exp_bias);
+ }
+ return grisu3_cast_double_from_uint64((v.f & frac_mask) | (e_biased << exp_pos));
+}
+
+/* pow10_cache[i] = 10^(i-1) */
+static const unsigned int grisu3_pow10_cache[] = { 0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
+
+static int grisu3_largest_pow10(uint32_t n, int n_bits, uint32_t *power)
+{
+ int guess = ((n_bits + 1) * 1233 >> 12) + 1/*skip first entry*/;
+ if (n < grisu3_pow10_cache[guess]) --guess; /* We don't have any guarantees that 2^n_bits <= n. */
+ *power = grisu3_pow10_cache[guess];
+ return guess;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_MATH_H */
diff --git a/include/flatcc/portable/grisu3_parse.h b/include/flatcc/portable/grisu3_parse.h
new file mode 100644
index 0000000..3d67c9a
--- /dev/null
+++ b/include/flatcc/portable/grisu3_parse.h
@@ -0,0 +1,582 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/*
+ * Port of parts of Google Double Conversion strtod functionality
+ * but with fallback to strtod instead of a bignum implementation.
+ *
+ * Based on grisu3 math from MathGeoLib.
+ *
+ * See also grisu3_math.h comments.
+ */
+
+#ifndef GRISU3_PARSE_H
+#define GRISU3_PARSE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include <stdlib.h>
+#include <limits.h>
+
+#include "grisu3_math.h"
+
+
+/*
+ * The maximum number characters a valid number may contain. The parse
+ * fails if the input length is longer but the character after max len
+ * was part of the number.
+ *
+ * The length should not be set too high because it protects against
+ * overflow in the exponent part derived from the input length.
+ */
+#define GRISU3_NUM_MAX_LEN 1000
+
+/*
+ * The lightweight "portable" C library recognizes grisu3 support if
+ * included first.
+ */
+#define grisu3_parse_double_is_defined 1
+
+/*
+ * Disable to compare performance and to test diy_fp algorithm in
+ * broader range.
+ */
+#define GRISU3_PARSE_FAST_CASE
+
+/* May result in a one off error, otherwise when uncertain, fall back to strtod. */
+//#define GRISU3_PARSE_ALLOW_ERROR
+
+
+/*
+ * The dec output exponent jumps in 8, so the result is offset at most
+ * by 7 when the input is within range.
+ */
+static int grisu3_diy_fp_cached_dec_pow(int d_exp, grisu3_diy_fp_t *p)
+{
+ const int cached_offset = -GRISU3_MIN_CACHED_EXP;
+ const int d_exp_dist = GRISU3_CACHED_EXP_STEP;
+ int i, a_exp;
+
+ GRISU3_ASSERT(GRISU3_MIN_CACHED_EXP <= d_exp);
+ GRISU3_ASSERT(d_exp < GRISU3_MAX_CACHED_EXP + d_exp_dist);
+
+ i = (d_exp + cached_offset) / d_exp_dist;
+ a_exp = grisu3_diy_fp_pow_cache[i].d_exp;
+ p->f = grisu3_diy_fp_pow_cache[i].fract;
+ p->e = grisu3_diy_fp_pow_cache[i].b_exp;
+
+ GRISU3_ASSERT(a_exp <= d_exp);
+ GRISU3_ASSERT(d_exp < a_exp + d_exp_dist);
+
+ return a_exp;
+}
+
+/*
+ * Ported from google double conversion strtod using
+ * MathGeoLibs diy_fp functions for grisu3 in C.
+ *
+ * ulp_half_error is set if needed to trunacted non-zero trialing
+ * characters.
+ *
+ * The actual value we need to encode is:
+ *
+ * (sign ? -1 : 1) * fraction * 2 ^ (exponent - fraction_exp)
+ * where exponent is the base 10 exponent assuming the decimal point is
+ * after the first digit. fraction_exp is the base 10 magnitude of the
+ * fraction or number of significant digits - 1.
+ *
+ * If the exponent is between 0 and 22 and the fraction is encoded in
+ * the lower 53 bits (the largest bit is implicit in a double, but not
+ * in this fraction), then the value can be trivially converted to
+ * double without loss of precision. If the fraction was in fact
+ * multiplied by trailing zeroes that we didn't convert to exponent,
+ * we there are larger values the 53 bits that can also be encoded
+ * trivially - but then it is better to handle this during parsing
+ * if it is worthwhile. We do not optimize for this here, because it
+ * can be done in a simple check before calling, and because it might
+ * not be worthwile to do at all since it cery likely will fail for
+ * numbers printed to be convertible back to double without loss.
+ *
+ * Returns 0 if conversion was not exact. In that case the vale is
+ * either one smaller than the correct one, or the correct one.
+ *
+ * Exponents must be range protected before calling otherwise cached
+ * powers will blow up.
+ *
+ * Google Double Conversion seems to prefer the following notion:
+ *
+ * x >= 10^309 => +Inf
+ * x <= 10^-324 => 0,
+ *
+ * max double: HUGE_VAL = 1.7976931348623157 * 10^308
+ * min double: 4.9406564584124654 * 10^-324
+ *
+ * Values just below or above min/max representable number
+ * may round towards large/small non-Inf/non-neg values.
+ *
+ * but `strtod` seems to return +/-HUGE_VAL on overflow?
+ */
+static int grisu3_diy_fp_encode_double(uint64_t fraction, int exponent, int fraction_exp, int ulp_half_error, double *result)
+{
+ /*
+ * Error is measures in fractions of integers, so we scale up to get
+ * some resolution to represent error expressions.
+ */
+ const int log2_error_one = 3;
+ const int error_one = 1 << log2_error_one;
+ const int denorm_exp = GRISU3_D64_DENORM_EXP;
+ const uint64_t hidden_bit = GRISU3_D64_IMPLICIT_ONE;
+ const int diy_size = GRISU3_DIY_FP_FRACT_SIZE;
+ const int max_digits = 19;
+
+ int error = ulp_half_error ? error_one / 2 : 0;
+ int d_exp = (exponent - fraction_exp);
+ int a_exp;
+ int o_exp;
+ grisu3_diy_fp_t v = { fraction, 0 };
+ grisu3_diy_fp_t cp;
+ grisu3_diy_fp_t rounded;
+ int mag;
+ int prec;
+ int prec_bits;
+ int half_way;
+
+ /* When fractions in a double aren't stored with implicit msb fraction bit. */
+
+ /* Shift fraction to msb. */
+ v = grisu3_diy_fp_normalize(v);
+ /* The half point error moves up while the exponent moves down. */
+ error <<= -v.e;
+
+ a_exp = grisu3_diy_fp_cached_dec_pow(d_exp, &cp);
+
+ /* Interpolate between cached powers at distance 8. */
+ if (a_exp != d_exp) {
+ int adj_exp = d_exp - a_exp - 1;
+ static grisu3_diy_fp_t cp_10_lut[] = {
+ { 0xa000000000000000ULL, -60 },
+ { 0xc800000000000000ULL, -57 },
+ { 0xfa00000000000000ULL, -54 },
+ { 0x9c40000000000000ULL, -50 },
+ { 0xc350000000000000ULL, -47 },
+ { 0xf424000000000000ULL, -44 },
+ { 0x9896800000000000ULL, -40 },
+ };
+ GRISU3_ASSERT(adj_exp >= 0 && adj_exp < 7);
+ v = grisu3_diy_fp_multiply(v, cp_10_lut[adj_exp]);
+
+ /* 20 decimal digits won't always fit in 64 bit.
+ * (`fraction_exp` is one less than significant decimal
+ * digits in fraction, e.g. 1 * 10e0).
+ * If we cannot fit, introduce 1/2 ulp error
+ * (says double conversion reference impl.) */
+ if (1 + fraction_exp + adj_exp > max_digits) {
+ error += error_one / 2;
+ }
+ }
+
+ v = grisu3_diy_fp_multiply(v, cp);
+ /*
+ * Google double conversion claims that:
+ *
+ * The error introduced by a multiplication of a*b equals
+ * error_a + error_b + error_a*error_b/2^64 + 0.5
+ * Substituting a with 'input' and b with 'cached_power' we have
+ * error_b = 0.5 (all cached powers have an error of less than 0.5 ulp),
+ * error_ab = 0 or 1 / error_oner > error_a*error_b/ 2^64
+ *
+ * which in our encoding becomes:
+ * error_a = error_one/2
+ * error_ab = 1 / error_one (rounds up to 1 if error != 0, or 0 * otherwise)
+ * fixed_error = error_one/2
+ *
+ * error += error_a + fixed_error + (error ? 1 : 0)
+ *
+ * (this isn't entirely clear, but that is as close as we get).
+ */
+ error += error_one + (error ? 1 : 0);
+
+ o_exp = v.e;
+ v = grisu3_diy_fp_normalize(v);
+ /* Again, if we shift the significant bits, the error moves along. */
+ error <<= o_exp - v.e;
+
+ /*
+ * The value `v` is bounded by 2^mag which is 64 + v.e. because we
+ * just normalized it by shifting towards msb.
+ */
+ mag = diy_size + v.e;
+
+ /* The effective magnitude of the IEEE double representation. */
+ mag = mag >= diy_size + denorm_exp ? diy_size : mag <= denorm_exp ? 0 : mag - denorm_exp;
+ prec = diy_size - mag;
+ if (prec + log2_error_one >= diy_size) {
+ int e_scale = prec + log2_error_one - diy_size - 1;
+ v.f >>= e_scale;
+ v.e += e_scale;
+ error = (error >> e_scale) + 1 + error_one;
+ prec -= e_scale;
+ }
+ rounded.f = v.f >> prec;
+ rounded.e = v.e + prec;
+ prec_bits = (int)(v.f & ((uint64_t)1 << (prec - 1))) * error_one;
+ half_way = (int)((uint64_t)1 << (prec - 1)) * error_one;
+ if (prec >= half_way + error) {
+ rounded.f++;
+ /* Prevent overflow. */
+ if (rounded.f & (hidden_bit << 1)) {
+ rounded.f >>= 1;
+ rounded.e += 1;
+ }
+ }
+ *result = grisu3_cast_double_from_diy_fp(rounded);
+ return half_way - error >= prec_bits || prec_bits >= half_way + error;
+}
+
+/*
+ * `end` is unchanged if number is handled natively, or it is the result
+ * of strtod parsing in case of fallback.
+ */
+static const char *grisu3_encode_double(const char *buf, const char *end, int sign, uint64_t fraction, int exponent, int fraction_exp, int ulp_half_error, double *result)
+{
+ const int max_d_exp = GRISU3_D64_MAX_DEC_EXP;
+ const int min_d_exp = GRISU3_D64_MIN_DEC_EXP;
+
+ char *v_end;
+
+ /* Both for user experience, and to protect internal power table lookups. */
+ if (fraction == 0 || exponent < min_d_exp) {
+ *result = 0.0;
+ goto done;
+ }
+ if (exponent - 1 > max_d_exp) {
+ *result = grisu3_double_infinity;
+ goto done;
+ }
+
+ /*
+ * `exponent` is the normalized value, fraction_exp is the size of
+ * the representation in the `fraction value`, or one less than
+ * number of significant digits.
+ *
+ * If the final value can be kept in 53 bits and we can avoid
+ * division, then we can convert to double quite fast.
+ *
+ * ulf_half_error only happens when fraction is maxed out, so
+ * fraction_exp > 22 by definition.
+ *
+ * fraction_exp >= 0 always.
+ *
+ * http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
+ */
+
+
+#ifdef GRISU3_PARSE_FAST_CASE
+ if (fraction < (1ULL << 53) && exponent >= 0 && exponent <= 22) {
+ double v = (double)fraction;
+ /* Multiplying by 1e-k instead of dividing by 1ek results in rounding error. */
+ switch (exponent - fraction_exp) {
+ case -22: v /= 1e22; break;
+ case -21: v /= 1e21; break;
+ case -20: v /= 1e20; break;
+ case -19: v /= 1e19; break;
+ case -18: v /= 1e18; break;
+ case -17: v /= 1e17; break;
+ case -16: v /= 1e16; break;
+ case -15: v /= 1e15; break;
+ case -14: v /= 1e14; break;
+ case -13: v /= 1e13; break;
+ case -12: v /= 1e12; break;
+ case -11: v /= 1e11; break;
+ case -10: v /= 1e10; break;
+ case -9: v /= 1e9; break;
+ case -8: v /= 1e8; break;
+ case -7: v /= 1e7; break;
+ case -6: v /= 1e6; break;
+ case -5: v /= 1e5; break;
+ case -4: v /= 1e4; break;
+ case -3: v /= 1e3; break;
+ case -2: v /= 1e2; break;
+ case -1: v /= 1e1; break;
+ case 0: break;
+ case 1: v *= 1e1; break;
+ case 2: v *= 1e2; break;
+ case 3: v *= 1e3; break;
+ case 4: v *= 1e4; break;
+ case 5: v *= 1e5; break;
+ case 6: v *= 1e6; break;
+ case 7: v *= 1e7; break;
+ case 8: v *= 1e8; break;
+ case 9: v *= 1e9; break;
+ case 10: v *= 1e10; break;
+ case 11: v *= 1e11; break;
+ case 12: v *= 1e12; break;
+ case 13: v *= 1e13; break;
+ case 14: v *= 1e14; break;
+ case 15: v *= 1e15; break;
+ case 16: v *= 1e16; break;
+ case 17: v *= 1e17; break;
+ case 18: v *= 1e18; break;
+ case 19: v *= 1e19; break;
+ case 20: v *= 1e20; break;
+ case 21: v *= 1e21; break;
+ case 22: v *= 1e22; break;
+ }
+ *result = v;
+ goto done;
+ }
+#endif
+
+ if (grisu3_diy_fp_encode_double(fraction, exponent, fraction_exp, ulp_half_error, result)) {
+ goto done;
+ }
+#ifdef GRISU3_PARSE_ALLOW_ERROR
+ goto done;
+#endif
+ *result = strtod(buf, &v_end);
+ if (v_end < end) {
+ return v_end;
+ }
+ return end;
+done:
+ if (sign) {
+ *result = -*result;
+ }
+ return end;
+}
+
+/*
+ * Returns buf if number wasn't matched, or null if number starts ok
+ * but contains invalid content.
+ */
+static const char *grisu3_parse_hex_fp(const char *buf, const char *end, int sign, double *result)
+{
+ (void)buf;
+ (void)end;
+ (void)sign;
+ *result = 0.0;
+ /* Not currently supported. */
+ return buf;
+}
+
+/*
+ * Returns end pointer on success, or null, or buf if start is not a number.
+ * Sets result to 0.0 on error.
+ * Reads up to len + 1 bytes from buffer where len + 1 must not be a
+ * valid part of a number, but all of buf, buf + len need not be a
+ * number. Leading whitespace is NOT valid.
+ * Very small numbers are truncated to +/-0.0 and numerically very large
+ * numbers are returns as +/-infinity.
+ *
+ * A value must not end or begin with '.' (like JSON), but can have
+ * leading zeroes (unlike JSON). A single leading zero followed by
+ * an encoding symbol may or may not be interpreted as a non-decimal
+ * encoding prefix, e.g. 0x, but a leading zero followed by a digit is
+ * NOT interpreted as octal.
+ * A single leading negative sign may appear before digits, but positive
+ * sign is not allowed and space after the sign is not allowed.
+ * At most the first 1000 characters of the input is considered.
+ */
+static const char *grisu3_parse_double(const char *buf, size_t len, double *result)
+{
+ const char *mark, *k, *end;
+ int sign = 0, esign = 0;
+ uint64_t fraction = 0;
+ int exponent = 0;
+ int ee = 0;
+ int fraction_exp = 0;
+ int ulp_half_error = 0;
+
+ *result = 0.0;
+
+ end = buf + len + 1;
+
+ /* Failsafe for exponent overflow. */
+ if (len > GRISU3_NUM_MAX_LEN) {
+ end = buf + GRISU3_NUM_MAX_LEN + 1;
+ }
+
+ if (buf == end) {
+ return buf;
+ }
+ mark = buf;
+ if (*buf == '-') {
+ ++buf;
+ sign = 1;
+ if (buf == end) {
+ return 0;
+ }
+ }
+ if (*buf == '0') {
+ ++buf;
+ /* | 0x20 is lower case ASCII. */
+ if (buf != end && (*buf | 0x20) == 'x') {
+ k = grisu3_parse_hex_fp(buf, end, sign, result);
+ if (k == buf) {
+ return mark;
+ }
+ return k;
+ }
+ /* Not worthwhile, except for getting the scale of integer part. */
+ while (buf != end && *buf == '0') {
+ ++buf;
+ }
+ } else {
+ if (*buf < '1' || *buf > '9') {
+ /*
+ * If we didn't see a sign, just don't recognize it as
+ * number, otherwise make it an error.
+ */
+ return sign ? 0 : mark;
+ }
+ fraction = (uint64_t)(*buf++ - '0');
+ }
+ k = buf;
+ /*
+ * We do not catch trailing zeroes when there is no decimal point.
+ * This misses an opportunity for moving the exponent down into the
+ * fast case. But it is unlikely to be worthwhile as it complicates
+ * parsing.
+ */
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ if (fraction >= UINT64_MAX / 10) {
+ fraction += *buf >= '5';
+ ulp_half_error = 1;
+ break;
+ }
+ fraction = fraction * 10 + (uint64_t)(*buf++ - '0');
+ }
+ fraction_exp = (int)(buf - k);
+ /* Skip surplus digits. Trailing zero does not introduce error. */
+ while (buf != end && *buf == '0') {
+ ++exponent;
+ ++buf;
+ }
+ if (buf != end && *buf >= '1' && *buf <= '9') {
+ ulp_half_error = 1;
+ ++exponent;
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++exponent;
+ ++buf;
+ }
+ }
+ if (buf != end && *buf == '.') {
+ ++buf;
+ k = buf;
+ if (*buf < '0' || *buf > '9') {
+ /* We don't accept numbers without leading or trailing digit. */
+ return 0;
+ }
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ if (fraction >= UINT64_MAX / 10) {
+ if (!ulp_half_error) {
+ fraction += *buf >= '5';
+ ulp_half_error = 1;
+ }
+ break;
+ }
+ fraction = fraction * 10 + (uint64_t)(*buf++ - '0');
+ --exponent;
+ }
+ fraction_exp += (int)(buf - k);
+ while (buf != end && *buf == '0') {
+ ++exponent;
+ ++buf;
+ }
+ if (buf != end && *buf >= '1' && *buf <= '9') {
+ ulp_half_error = 1;
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ }
+ /*
+ * Normalized exponent e.g: 1.23434e3 with fraction = 123434,
+ * fraction_exp = 5, exponent = 3.
+ * So value = fraction * 10^(exponent - fraction_exp)
+ */
+ exponent += fraction_exp;
+ if (buf != end && (*buf | 0x20) == 'e') {
+ if (end - buf < 2) {
+ return 0;
+ }
+ ++buf;
+ if (*buf == '+') {
+ ++buf;
+ if (buf == end) {
+ return 0;
+ }
+ } else if (*buf == '-') {
+ esign = 1;
+ ++buf;
+ if (buf == end) {
+ return 0;
+ }
+ }
+ if (*buf < '0' || *buf > '9') {
+ return 0;
+ }
+ ee = *buf++ - '0';
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ /*
+ * This test impacts performance and we do not need an
+ * exact value just one large enough to dominate the fraction_exp.
+ * Subsequent handling maps large absolute ee to 0 or infinity.
+ */
+ if (ee <= 0x7fff) {
+ ee = ee * 10 + *buf - '0';
+ }
+ ++buf;
+ }
+ }
+ exponent = exponent + (esign ? -ee : ee);
+
+ /*
+ * Exponent is now a base 10 normalized exponent so the absolute value
+ * is less the 10^(exponent + 1) for positive exponents. For
+ * denormalized doubles (using 11 bit exponent 0 with a fraction
+ * shiftet down, extra small numbers can be achieved.
+ *
+ * https://en.wikipedia.org/wiki/Double-precision_floating-point_format
+ *
+ * 10^-324 holds the smallest normalized exponent (but not value) and
+ * 10^308 holds the largest exponent. Internally our lookup table is
+ * only safe to use within a range slightly larger than this.
+ * Externally, a slightly larger/smaller value represents NaNs which
+ * are technically also possible to store as a number.
+ *
+ */
+
+ /* This also protects strod fallback parsing. */
+ if (buf == end) {
+ return 0;
+ }
+ return grisu3_encode_double(mark, buf, sign, fraction, exponent, fraction_exp, ulp_half_error, result);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_PARSE_H */
diff --git a/include/flatcc/portable/grisu3_print.h b/include/flatcc/portable/grisu3_print.h
new file mode 100644
index 0000000..d748408
--- /dev/null
+++ b/include/flatcc/portable/grisu3_print.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ * Copyright author of MathGeoLib (https://github.com/juj)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/*
+ * Extracted from MathGeoLib.
+ *
+ * mikkelfj:
+ * - Fixed final output when printing single digit negative exponent to
+ * have leading zero (important for JSON).
+ * - Changed formatting to prefer 0.012 over 1.2-e-2.
+ *
+ * Large portions of the original grisu3.c file has been moved to
+ * grisu3_math.h, the rest is placed here.
+ *
+ * See also comments in grisu3_math.h.
+ *
+ * MatGeoLib grisu3.c comment:
+ *
+ * This file is part of an implementation of the "grisu3" double to string
+ * conversion algorithm described in the research paper
+ *
+ * "Printing Floating-Point Numbers Quickly And Accurately with Integers"
+ * by Florian Loitsch, available at
+ * http://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf
+ */
+
+#ifndef GRISU3_PRINT_H
+#define GRISU3_PRINT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h> /* sprintf, only needed for fallback printing */
+#include <assert.h> /* assert */
+
+#include "grisu3_math.h"
+
+/*
+ * The lightweight "portable" C library recognizes grisu3 support if
+ * included first.
+ */
+#define grisu3_print_double_is_defined 1
+
+/*
+ * Not sure we have an exact definition, but we get up to 23
+ * emperically. There is some math ensuring it does not go awol though,
+ * like 18 digits + exponent or so.
+ * This max should be safe size buffer for printing, including zero term.
+ */
+#define GRISU3_PRINT_MAX 30
+
+static int grisu3_round_weed(char *buffer, int len, uint64_t wp_W, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t ulp)
+{
+ uint64_t wp_Wup = wp_W - ulp;
+ uint64_t wp_Wdown = wp_W + ulp;
+ while(rest < wp_Wup && delta - rest >= ten_kappa
+ && (rest + ten_kappa < wp_Wup || wp_Wup - rest >= rest + ten_kappa - wp_Wup))
+ {
+ --buffer[len-1];
+ rest += ten_kappa;
+ }
+ if (rest < wp_Wdown && delta - rest >= ten_kappa
+ && (rest + ten_kappa < wp_Wdown || wp_Wdown - rest > rest + ten_kappa - wp_Wdown))
+ return 0;
+
+ return 2*ulp <= rest && rest <= delta - 4*ulp;
+}
+
+static int grisu3_digit_gen(grisu3_diy_fp_t low, grisu3_diy_fp_t w, grisu3_diy_fp_t high, char *buffer, int *length, int *kappa)
+{
+ uint64_t unit = 1;
+ grisu3_diy_fp_t too_low = { low.f - unit, low.e };
+ grisu3_diy_fp_t too_high = { high.f + unit, high.e };
+ grisu3_diy_fp_t unsafe_interval = grisu3_diy_fp_minus(too_high, too_low);
+ grisu3_diy_fp_t one = { 1ULL << -w.e, w.e };
+ uint32_t p1 = (uint32_t)(too_high.f >> -one.e);
+ uint64_t p2 = too_high.f & (one.f - 1);
+ uint32_t div;
+ *kappa = grisu3_largest_pow10(p1, GRISU3_DIY_FP_FRACT_SIZE + one.e, &div);
+ *length = 0;
+
+ while(*kappa > 0)
+ {
+ uint64_t rest;
+ char digit = (char)(p1 / div);
+ buffer[*length] = '0' + digit;
+ ++*length;
+ p1 %= div;
+ --*kappa;
+ rest = ((uint64_t)p1 << -one.e) + p2;
+ if (rest < unsafe_interval.f) return grisu3_round_weed(buffer, *length, grisu3_diy_fp_minus(too_high, w).f, unsafe_interval.f, rest, (uint64_t)div << -one.e, unit);
+ div /= 10;
+ }
+
+ for(;;)
+ {
+ char digit;
+ p2 *= 10;
+ unit *= 10;
+ unsafe_interval.f *= 10;
+ /* Integer division by one. */
+ digit = (char)(p2 >> -one.e);
+ buffer[*length] = '0' + digit;
+ ++*length;
+ p2 &= one.f - 1; /* Modulo by one. */
+ --*kappa;
+ if (p2 < unsafe_interval.f) return grisu3_round_weed(buffer, *length, grisu3_diy_fp_minus(too_high, w).f * unit, unsafe_interval.f, p2, one.f, unit);
+ }
+}
+
+static int grisu3(double v, char *buffer, int *length, int *d_exp)
+{
+ int mk, kappa, success;
+ grisu3_diy_fp_t dfp = grisu3_cast_diy_fp_from_double(v);
+ grisu3_diy_fp_t w = grisu3_diy_fp_normalize(dfp);
+
+ /* normalize boundaries */
+ grisu3_diy_fp_t t = { (dfp.f << 1) + 1, dfp.e - 1 };
+ grisu3_diy_fp_t b_plus = grisu3_diy_fp_normalize(t);
+ grisu3_diy_fp_t b_minus;
+ grisu3_diy_fp_t c_mk; /* Cached power of ten: 10^-k */
+ uint64_t u64 = grisu3_cast_uint64_from_double(v);
+ assert(v > 0 && v <= 1.7976931348623157e308); /* Grisu only handles strictly positive finite numbers. */
+ if (!(u64 & GRISU3_D64_FRACT_MASK) && (u64 & GRISU3_D64_EXP_MASK) != 0) { b_minus.f = (dfp.f << 2) - 1; b_minus.e = dfp.e - 2;} /* lower boundary is closer? */
+ else { b_minus.f = (dfp.f << 1) - 1; b_minus.e = dfp.e - 1; }
+ b_minus.f = b_minus.f << (b_minus.e - b_plus.e);
+ b_minus.e = b_plus.e;
+
+ mk = grisu3_diy_fp_cached_pow(GRISU3_MIN_TARGET_EXP - GRISU3_DIY_FP_FRACT_SIZE - w.e, &c_mk);
+
+ w = grisu3_diy_fp_multiply(w, c_mk);
+ b_minus = grisu3_diy_fp_multiply(b_minus, c_mk);
+ b_plus = grisu3_diy_fp_multiply(b_plus, c_mk);
+
+ success = grisu3_digit_gen(b_minus, w, b_plus, buffer, length, &kappa);
+ *d_exp = kappa - mk;
+ return success;
+}
+
+static int grisu3_i_to_str(int val, char *str)
+{
+ int len, i;
+ char *s;
+ char *begin = str;
+ if (val < 0) { *str++ = '-'; val = -val; }
+ s = str;
+
+ for(;;)
+ {
+ int ni = val / 10;
+ int digit = val - ni*10;
+ *s++ = (char)('0' + digit);
+ if (ni == 0)
+ break;
+ val = ni;
+ }
+ *s = '\0';
+ len = (int)(s - str);
+ for(i = 0; i < len/2; ++i)
+ {
+ char ch = str[i];
+ str[i] = str[len-1-i];
+ str[len-1-i] = ch;
+ }
+
+ return (int)(s - begin);
+}
+
+static int grisu3_print_nan(uint64_t v, char *dst)
+{
+ static char hexdigits[16] = "0123456789ABCDEF";
+ int i = 0;
+
+ dst[0] = 'N';
+ dst[1] = 'a';
+ dst[2] = 'N';
+ dst[3] = '(';
+ dst[20] = ')';
+ dst[21] = '\0';
+ dst += 4;
+ for (i = 15; i >= 0; --i) {
+ dst[i] = hexdigits[v & 0x0F];
+ v >>= 4;
+ }
+ return 21;
+}
+
+static int grisu3_print_double(double v, char *dst)
+{
+ int d_exp, len, success, decimals, i;
+ uint64_t u64 = grisu3_cast_uint64_from_double(v);
+ char *s2 = dst;
+ assert(dst);
+
+ /* Prehandle NaNs */
+ if ((u64 << 1) > 0xFFE0000000000000ULL) return grisu3_print_nan(u64, dst);
+ /* Prehandle negative values. */
+ if ((u64 & GRISU3_D64_SIGN) != 0) { *s2++ = '-'; v = -v; u64 ^= GRISU3_D64_SIGN; }
+ /* Prehandle zero. */
+ if (!u64) { *s2++ = '0'; *s2 = '\0'; return (int)(s2 - dst); }
+ /* Prehandle infinity. */
+ if (u64 == GRISU3_D64_EXP_MASK) { *s2++ = 'i'; *s2++ = 'n'; *s2++ = 'f'; *s2 = '\0'; return (int)(s2 - dst); }
+
+ success = grisu3(v, s2, &len, &d_exp);
+ /* If grisu3 was not able to convert the number to a string, then use old sprintf (suboptimal). */
+ if (!success) return sprintf(s2, "%.17g", v) + (int)(s2 - dst);
+
+ /* We now have an integer string of form "151324135" and a base-10 exponent for that number. */
+ /* Next, decide the best presentation for that string by whether to use a decimal point, or the scientific exponent notation 'e'. */
+ /* We don't pick the absolute shortest representation, but pick a balance between readability and shortness, e.g. */
+ /* 1.545056189557677e-308 could be represented in a shorter form */
+ /* 1545056189557677e-323 but that would be somewhat unreadable. */
+ decimals = GRISU3_MIN(-d_exp, GRISU3_MAX(1, len-1));
+
+ /* mikkelfj:
+ * fix zero prefix .1 => 0.1, important for JSON export.
+ * prefer unscientific notation at same length:
+ * -1.2345e-4 over -1.00012345,
+ * -1.0012345 over -1.2345e-3
+ */
+ if (d_exp < 0 && (len + d_exp) > -3 && len <= -d_exp)
+ {
+ /* mikkelfj: fix zero prefix .1 => 0.1, and short exponents 1.3e-2 => 0.013. */
+ memmove(s2 + 2 - d_exp - len, s2, (size_t)len);
+ s2[0] = '0';
+ s2[1] = '.';
+ for (i = 2; i < 2-d_exp-len; ++i) s2[i] = '0';
+ len += i;
+ }
+ else if (d_exp < 0 && len > 1) /* Add decimal point? */
+ {
+ for(i = 0; i < decimals; ++i) s2[len-i] = s2[len-i-1];
+ s2[len++ - decimals] = '.';
+ d_exp += decimals;
+ /* Need scientific notation as well? */
+ if (d_exp != 0) { s2[len++] = 'e'; len += grisu3_i_to_str(d_exp, s2+len); }
+ }
+ /* Add scientific notation? */
+ else if (d_exp < 0 || d_exp > 2) { s2[len++] = 'e'; len += grisu3_i_to_str(d_exp, s2+len); }
+ /* Add zeroes instead of scientific notation? */
+ else if (d_exp > 0) { while(d_exp-- > 0) s2[len++] = '0'; }
+ s2[len] = '\0'; /* grisu3 doesn't null terminate, so ensure termination. */
+ return (int)(s2+len-dst);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_PRINT_H */
diff --git a/include/flatcc/portable/include/README b/include/flatcc/portable/include/README
new file mode 100644
index 0000000..9f991fc
--- /dev/null
+++ b/include/flatcc/portable/include/README
@@ -0,0 +1,4 @@
+This directory holds subdirectories it can be added to the include path
+such that standard and OS specific header includes like <stdint.h>,
+<bool.h> and <endian.h> can succeed without explicitly including
+special headers explicitly.
diff --git a/include/flatcc/portable/include/linux/endian.h b/include/flatcc/portable/include/linux/endian.h
new file mode 100644
index 0000000..38fd1fb
--- /dev/null
+++ b/include/flatcc/portable/include/linux/endian.h
@@ -0,0 +1 @@
+#include "portable/pendian.h"
diff --git a/include/flatcc/portable/include/std/inttypes.h b/include/flatcc/portable/include/std/inttypes.h
new file mode 100644
index 0000000..99b699d
--- /dev/null
+++ b/include/flatcc/portable/include/std/inttypes.h
@@ -0,0 +1 @@
+#include "portable/inttypes.h"
diff --git a/include/flatcc/portable/include/std/stdalign.h b/include/flatcc/portable/include/std/stdalign.h
new file mode 100644
index 0000000..6d51281
--- /dev/null
+++ b/include/flatcc/portable/include/std/stdalign.h
@@ -0,0 +1 @@
+#include "portable/pstdalign.h"
diff --git a/include/flatcc/portable/include/std/stdbool.h b/include/flatcc/portable/include/std/stdbool.h
new file mode 100644
index 0000000..12eb4c7
--- /dev/null
+++ b/include/flatcc/portable/include/std/stdbool.h
@@ -0,0 +1 @@
+#include "portable/pstdbool.h"
diff --git a/include/flatcc/portable/include/std/stdint.h b/include/flatcc/portable/include/std/stdint.h
new file mode 100644
index 0000000..0364471
--- /dev/null
+++ b/include/flatcc/portable/include/std/stdint.h
@@ -0,0 +1 @@
+#include "portable/pstdint.h"
diff --git a/include/flatcc/portable/paligned_alloc.h b/include/flatcc/portable/paligned_alloc.h
new file mode 100644
index 0000000..70b00b9
--- /dev/null
+++ b/include/flatcc/portable/paligned_alloc.h
@@ -0,0 +1,212 @@
+#ifndef PALIGNED_ALLOC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * NOTE: MSVC in general has no aligned alloc function that is
+ * compatible with free and it is not trivial to implement a version
+ * which is. Therefore, to remain portable, end user code needs to
+ * use `aligned_free` which is not part of C11 but defined in this header.
+ *
+ * glibc only provides aligned_alloc when _ISOC11_SOURCE is defined, but
+ * MingW does not support aligned_alloc despite of this, it uses the
+ * the _aligned_malloc as MSVC.
+ *
+ * The same issue is present on some Unix systems not providing
+ * posix_memalign.
+ *
+ * Note that clang and gcc with -std=c11 or -std=c99 will not define
+ * _POSIX_C_SOURCE and thus posix_memalign cannot be detected but
+ * aligned_alloc is not necessarily available either. We assume
+ * that clang always has posix_memalign although it is not strictly
+ * correct. For gcc, use -std=gnu99 or -std=gnu11 or don't use -std in
+ * order to enable posix_memalign, or live with the fallback until using
+ * a system where glibc has a version that supports aligned_alloc.
+ *
+ * For C11 compliant compilers and compilers with posix_memalign,
+ * it is valid to use free instead of aligned_free with the above
+ * caveats.
+ */
+
+#include <stdlib.h>
+
+/*
+ * Define this to see which version is used so the fallback is not
+ * enganged unnecessarily:
+ *
+ * #define PORTABLE_DEBUG_ALIGNED_ALLOC
+ */
+
+#if 0
+#define PORTABLE_DEBUG_ALIGNED_ALLOC
+#endif
+
+#if !defined(PORTABLE_C11_ALIGNED_ALLOC)
+
+/*
+ * PORTABLE_C11_ALIGNED_ALLOC = 1
+ * indicates that the system has builtin aligned_alloc
+ * If it doesn't, the section after detection provides an implemention.
+ */
+#if defined (__MINGW32__)
+/* MingW does not provide aligned_alloc despite defining _ISOC11_SOURCE */
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif defined (_ISOC11_SOURCE)
+/* glibc aligned_alloc detection, but MingW is not truthful */
+#define PORTABLE_C11_ALIGNED_ALLOC 1
+#elif defined (__GLIBC__)
+/* aligned_alloc is not available in glibc just because __STDC_VERSION__ >= 201112L. */
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif defined (__clang__)
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif defined (__APPLE__)
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif defined(__IBMC__)
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L)
+#define PORTABLE_C11_ALIGNED_ALLOC 1
+#else
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#endif
+
+#endif /* PORTABLE_C11_ALIGNED_ALLOC */
+
+/* https://linux.die.net/man/3/posix_memalign */
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(_GNU_SOURCE)
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+
+/* https://forum.kde.org/viewtopic.php?p=66274 */
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(_XOPEN_SOURCE)
+#if _XOPEN_SOURCE >= 600
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+#endif
+
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(_POSIX_C_SOURCE)
+#if _POSIX_C_SOURCE >= 200112L
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+#endif
+
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(__clang__)
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+
+#if !defined(PORTABLE_POSIX_MEMALIGN)
+#define PORTABLE_POSIX_MEMALIGN 0
+#endif
+
+/* https://forum.kde.org/viewtopic.php?p=66274 */
+#if (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L)
+/* C11 or newer */
+#include <stdalign.h>
+#endif
+
+/* C11 or newer */
+#if !defined(aligned_alloc) && !defined(__aligned_alloc_is_defined)
+
+#if PORTABLE_C11_ALIGNED_ALLOC
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: C11_ALIGNED_ALLOC configured"
+#endif
+#elif defined(_MSC_VER) || defined(__MINGW32__)
+
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: Windows _aligned_malloc configured"
+#endif
+
+/* Aligned _aligned_malloc is not compatible with free. */
+#define aligned_alloc(alignment, size) _aligned_malloc(size, alignment)
+#define aligned_free(p) _aligned_free(p)
+#define __aligned_alloc_is_defined 1
+#define __aligned_free_is_defined 1
+
+#elif PORTABLE_POSIX_MEMALIGN
+
+#if defined(__GNUC__)
+#if !defined(__GNUCC__)
+extern int posix_memalign (void **, size_t, size_t);
+#elif __GNUCC__ < 5
+extern int posix_memalign (void **, size_t, size_t);
+#endif
+#endif
+
+static inline void *__portable_aligned_alloc(size_t alignment, size_t size)
+{
+ int err;
+ void *p = 0;
+
+ if (alignment < sizeof(void *)) {
+ alignment = sizeof(void *);
+ }
+ err = posix_memalign(&p, alignment, size);
+ if (err && p) {
+ free(p);
+ p = 0;
+ }
+ return p;
+}
+
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: POSIX_MEMALIGN configured"
+#endif
+
+#define aligned_alloc(alignment, size) __portable_aligned_alloc(alignment, size)
+#define aligned_free(p) free(p)
+#define __aligned_alloc_is_defined 1
+#define __aligned_free_is_defined 1
+
+#else
+
+static inline void *__portable_aligned_alloc(size_t alignment, size_t size)
+{
+ char *raw;
+ void *buf;
+ size_t total_size = (size + alignment - 1 + sizeof(void *));
+
+ if (alignment < sizeof(void *)) {
+ alignment = sizeof(void *);
+ }
+ raw = (char *)(size_t)malloc(total_size);
+ buf = raw + alignment - 1 + sizeof(void *);
+ buf = (void *)(((size_t)buf) & ~(alignment - 1));
+ ((void **)buf)[-1] = raw;
+ return buf;
+}
+
+static inline void __portable_aligned_free(void *p)
+{
+ char *raw;
+
+ if (p) {
+ raw = (char*)((void **)p)[-1];
+ free(raw);
+ }
+}
+
+#define aligned_alloc(alignment, size) __portable_aligned_alloc(alignment, size)
+#define aligned_free(p) __portable_aligned_free(p)
+#define __aligned_alloc_is_defined 1
+#define __aligned_free_is_defined 1
+
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: aligned_alloc malloc fallback configured"
+#endif
+
+#endif
+
+#endif /* aligned_alloc */
+
+#if !defined(aligned_free) && !defined(__aligned_free_is_defined)
+#define aligned_free(p) free(p)
+#define __aligned_free_is_defined 1
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PALIGNED_ALLOC_H */
diff --git a/include/flatcc/portable/pattributes.h b/include/flatcc/portable/pattributes.h
new file mode 100644
index 0000000..9240fa3
--- /dev/null
+++ b/include/flatcc/portable/pattributes.h
@@ -0,0 +1,84 @@
+
+/*
+ * C23 introduces an attribute syntax `[[<attribute>]]`. Prior to that
+ * other non-standard syntaxes such as `__attribute__((<attribute>))`
+ * and `__declspec(<attribute>)` have been supported by some compiler
+ * versions.
+ *
+ * See also:
+ * https://en.cppreference.com/w/c/language/attributes
+ *
+ * There is no portable way to use C23 attributes in older C standards
+ * so in order to use these portably, some macro name needs to be
+ * defined for each attribute that either maps to the older supported
+ * syntax, or ignores the attribute as appropriate.
+ *
+ * The Linux kernel defines certain attributes as macros, such as
+ * `fallthrough`. When adding attributes it seems reasonable to follow
+ * the Linux conventions in lack of any official standard. However, it
+ * is not the intention that this file should mirror the Linux
+ * attributes 1 to 1.
+ *
+ * See also:
+ * https://github.com/torvalds/linux/blob/master/include/linux/compiler_attributes.h
+ *
+ * There is a risk that exposed attribute names may lead to name
+ * conflicts. A conflicting name can be undefined and if necessary used
+ * using `pattribute(<attribute>)`. All attributes can be hidden by
+ * defining `PORTABLE_EXPOSE_ATTRIBUTES=0` in which case
+ * `pattribute(<attribute>)` can still be used and then if a specific
+ * attribute name still needs to be exposed, it can be defined manually
+ * like `#define fallthrough pattribute(fallthrough)`.
+ */
+
+
+#ifndef PATTRIBUTES_H
+#define PATTRIBUTES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef PORTABLE_EXPOSE_ATTRIBUTES
+#define PORTABLE_EXPOSE_ATTRIBUTES 0
+#endif
+
+#ifdef __has_c_attribute
+# define PORTABLE_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
+#else
+# define PORTABLE_HAS_C_ATTRIBUTE(x) 0
+#endif
+
+#ifdef __has_attribute
+# define PORTABLE_HAS_ATTRIBUTE(x) __has_attribute(x)
+#else
+# define PORTABLE_HAS_ATTRIBUTE(x) 0
+#endif
+
+
+/* https://en.cppreference.com/w/c/language/attributes/fallthrough */
+#if PORTABLE_HAS_C_ATTRIBUTE(__fallthrough__)
+# define pattribute_fallthrough [[__fallthrough__]]
+#elif PORTABLE_HAS_ATTRIBUTE(__fallthrough__)
+# define pattribute_fallthrough __attribute__((__fallthrough__))
+#else
+# define pattribute_fallthrough ((void)0)
+#endif
+
+
+#define pattribute(x) pattribute_##x
+
+#if PORTABLE_EXPOSE_ATTRIBUTES
+
+#ifndef fallthrough
+# define fallthrough pattribute(fallthrough)
+#endif
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PATTRIBUTES_H */
diff --git a/include/flatcc/portable/pbase64.h b/include/flatcc/portable/pbase64.h
new file mode 100644
index 0000000..a6812c4
--- /dev/null
+++ b/include/flatcc/portable/pbase64.h
@@ -0,0 +1,448 @@
+#ifndef PBASE64_H
+#define PBASE64_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+
+/* Guarded to allow inclusion of pstdint.h first, if stdint.h is not supported. */
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#define BASE64_EOK 0
+/* 0 or mure full blocks decoded, remaining content may be parsed with fresh buffer. */
+#define BASE64_EMORE 1
+/* The `src_len` argument is required when encoding. */
+#define BASE64_EARGS 2
+/* Unsupported mode, or modifier not supported by mode when encoding. */
+#define BASE64_EMODE 3
+/* Decoding ends at invalid tail length - either by source length or by non-alphabet symbol. */
+#define BASE64_ETAIL 4
+/* Decoding ends at valid tail length but last byte has non-zero bits where it shouldn't have. */
+#define BASE64_EDIRTY 5
+
+static inline const char *base64_strerror(int err);
+
+/* All codecs are URL safe. Only Crockford allow for non-canocical decoding. */
+enum {
+ /* Most common base64 codec, but not url friendly. */
+ base64_mode_rfc4648 = 0,
+
+ /* URL safe version, '+' -> '-', '/' -> '_'. */
+ base64_mode_url = 1,
+
+ /*
+ * Skip ' ', '\r', and '\n' - we do not allow tab because common
+ * uses of base64 such as PEM do not allow tab.
+ */
+ base64_dec_modifier_skipspace = 32,
+
+ /* Padding is excluded by default. Not allowed for zbase64. */
+ base64_enc_modifier_padding = 128,
+
+ /* For internal use or to decide codec of mode. */
+ base64_modifier_mask = 32 + 64 + 128,
+};
+
+/* Encoded size with or without padding. */
+static inline size_t base64_encoded_size(size_t len, int mode);
+
+/*
+ * Decoded size assuming no padding.
+ * If `len` does include padding, the actual size may be less
+ * when decoding, but never more.
+ */
+static inline size_t base64_decoded_size(size_t len);
+
+/*
+ * `dst` must hold ceil(len * 4 / 3) bytes.
+ * `src_len` points to length of source and is updated with length of
+ * parse on both success and failure. If `dst_len` is not null
+ * it is used to store resulting output lengt withh length of decoded
+ * output on both success and failure.
+ * If `hyphen` is non-zero a hyphen is encoded every `hyphen` output bytes.
+ * `mode` selects encoding alphabet defaulting to Crockfords base64.
+ * Returns 0 on success.
+ *
+ * A terminal space can be added with `dst[dst_len++] = ' '` after the
+ * encode call. All non-alphabet can be used as terminators except the
+ * padding character '='. The following characters will work as
+ * terminator for all modes: { '\0', '\n', ' ', '\t' }. A terminator is
+ * optional when the source length is given to the decoder. Note that
+ * crockford also reserves a few extra characters for checksum but the
+ * checksum must be separate from the main buffer and is not supported
+ * by this library.
+ */
+static inline int base64_encode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode);
+
+/*
+ * Decodes according to mode while ignoring encoding modifiers.
+ * `src_len` and `dst_len` are optional pointers. If `src_len` is set it
+ * must contain the length of the input, otherwise the input must be
+ * terminated with a non-alphabet character or valid padding (a single
+ * padding character is accepted) - if the src_len output is needed but
+ * not the input due to guaranteed termination, then set it to
+ * (size_t)-1. `dst_len` must contain length of output buffer if present
+ * and parse will fail with BASE64_EMORE after decoding a block multiple
+ * if dst_len is exhausted - the parse can thus be resumed after
+ * draining destination. `src_len` and `dst_len` are updated with parsed
+ * and decoded length, when present, on both success and failure.
+ * Returns 0 on success. Invalid characters are not considered errors -
+ * they simply terminate the parse, however, if the termination is not
+ * at a block multiple or a valid partial block length then BASE64_ETAIL
+ * without output holding the last full block, if any. BASE64_ETAIL is also
+ * returned if the a valid length holds non-zero unused tail bits.
+ */
+static inline int base64_decode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode);
+
+static inline const char *base64_strerror(int err)
+{
+ switch (err) {
+ case BASE64_EOK: return "ok";
+ case BASE64_EARGS: return "invalid argument";
+ case BASE64_EMODE: return "invalid mode";
+ case BASE64_EMORE: return "destination full";
+ case BASE64_ETAIL: return "invalid tail length";
+ case BASE64_EDIRTY: return "invalid tail content";
+ default: return "unknown error";
+ }
+}
+
+static inline size_t base64_encoded_size(size_t len, int mode)
+{
+ size_t k = len % 3;
+ size_t n = (len * 4 / 3 + 3) & ~(size_t)3;
+ int pad = mode & base64_enc_modifier_padding;
+
+ if (!pad) {
+ switch (k) {
+ case 2:
+ n -= 1;
+ break;
+ case 1:
+ n -= 2;
+ break;
+ default:
+ break;
+ }
+ }
+ return n;
+}
+
+static inline size_t base64_decoded_size(size_t len)
+{
+ size_t k = len % 4;
+ size_t n = len / 4 * 3;
+
+ switch (k) {
+ case 3:
+ return n + 2;
+ case 2:
+ return n + 1;
+ case 1: /* Not valid without padding. */
+ case 0:
+ default:
+ return n;
+ }
+}
+
+static inline int base64_encode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode)
+{
+ const uint8_t *rfc4648_alphabet = (const uint8_t *)
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ const uint8_t *url_alphabet = (const uint8_t *)
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
+
+ const uint8_t *T;
+ uint8_t *dst_base = dst;
+ int pad = mode & base64_enc_modifier_padding;
+ size_t len = 0;
+ int ret = BASE64_EMODE;
+
+ if (!src_len) {
+ ret = BASE64_EARGS;
+ goto done;
+ }
+ len = *src_len;
+ mode = mode & ~base64_modifier_mask;
+ switch (mode) {
+ case base64_mode_rfc4648:
+ T = rfc4648_alphabet;
+ break;
+ case base64_mode_url:
+ T = url_alphabet;
+ break;
+ default:
+ /* Invalid mode. */
+ goto done;
+ }
+
+ ret = BASE64_EOK;
+
+ /* Encodes 4 destination bytes from 3 source bytes. */
+ while (len >= 3) {
+ dst[0] = T[((src[0] >> 2))];
+ dst[1] = T[((src[0] << 4) & 0x30) | (src[1] >> 4)];
+ dst[2] = T[((src[1] << 2) & 0x3c) | (src[2] >> 6)];
+ dst[3] = T[((src[2] & 0x3f))];
+ len -= 3;
+ dst += 4;
+ src += 3;
+ }
+ /* Encodes 8 destination bytes from 1 to 4 source bytes, if any. */
+ switch(len) {
+ case 2:
+ dst[0] = T[((src[0] >> 2))];
+ dst[1] = T[((src[0] << 4) & 0x30) | (src[1] >> 4)];
+ dst[2] = T[((src[1] << 2) & 0x3c)];
+ dst += 3;
+ if (pad) {
+ *dst++ = '=';
+ }
+ break;
+ case 1:
+ dst[0] = T[((src[0] >> 2))];
+ dst[1] = T[((src[0] << 4) & 0x30)];
+ dst += 2;
+ if (pad) {
+ *dst++ = '=';
+ *dst++ = '=';
+ }
+ break;
+ default:
+ pad = 0;
+ break;
+ }
+ len = 0;
+done:
+ if (dst_len) {
+ *dst_len = (size_t)(dst - dst_base);
+ }
+ if (src_len) {
+ *src_len -= len;
+ }
+ return ret;
+}
+
+static inline int base64_decode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode)
+{
+ static const uint8_t cinvalid = 64;
+ static const uint8_t cignore = 65;
+ static const uint8_t cpadding = 66;
+
+ /*
+ * 0..63: 6-bit encoded value.
+ * 64: flags non-alphabet symbols.
+ * 65: codes for ignored symbols.
+ * 66: codes for pad symbol '='.
+ * All codecs consider padding an optional terminator and if present
+ * consumes as many pad bytes as possible up to block termination,
+ * but does not fail if a block is not full.
+ *
+ * We do not currently have any ignored characters but we might
+ * add spaces as per MIME spec, but assuming spaces only happen
+ * at block boundaries this is probalby better handled by repeated
+ * parsing.
+ */
+ static const uint8_t base64rfc4648_decode[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ static const uint8_t base64url_decode[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 63,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ static const uint8_t base64rfc4648_decode_skipspace[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 64, 64, 65, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 65, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ static const uint8_t base64url_decode_skipspace[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 64, 64, 65, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 65, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 63,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ int ret = BASE64_EOK;
+ size_t i, k;
+ uint8_t hold[4];
+ uint8_t *dst_base = dst;
+ size_t limit = (size_t)-1;
+ size_t len = (size_t)-1, mark;
+ const uint8_t *T = base64rfc4648_decode;
+ int skipspace = mode & base64_dec_modifier_skipspace;
+
+ if (src_len) {
+ len = *src_len;
+ }
+ mark = len;
+ mode = mode & ~base64_modifier_mask;
+ switch (mode) {
+ case base64_mode_rfc4648:
+ T = skipspace ? base64rfc4648_decode_skipspace : base64rfc4648_decode;
+ break;
+ case base64_mode_url:
+ T = skipspace ? base64url_decode_skipspace : base64url_decode;
+ break;
+ default:
+ ret = BASE64_EMODE;
+ goto done;
+ }
+
+ if (dst_len && *dst_len > 0) {
+ limit = *dst_len;
+ }
+ while(limit > 0) {
+ for (i = 0; i < 4; ++i) {
+ if (len == i) {
+ k = i;
+ len -= i;
+ goto tail;
+ }
+ if ((hold[i] = T[src[i]]) >= cinvalid) {
+ if (hold[i] == cignore) {
+ ++src;
+ --len;
+ --i;
+ continue;
+ }
+ k = i;
+ /* Strip padding and ignore hyphen in padding, if present. */
+ if (hold[i] == cpadding) {
+ ++i;
+ while (i < len && i < 8) {
+ if (T[src[i]] != cpadding && T[src[i]] != cignore) {
+ break;
+ }
+ ++i;
+ }
+ }
+ len -= i;
+ goto tail;
+ }
+ }
+ if (limit < 3) {
+ goto more;
+ }
+ dst[0] = (uint8_t)((hold[0] << 2) | (hold[1] >> 4));
+ dst[1] = (uint8_t)((hold[1] << 4) | (hold[2] >> 2));
+ dst[2] = (uint8_t)((hold[2] << 6) | (hold[3]));
+ dst += 3;
+ src += 4;
+ limit -= 3;
+ len -= 4;
+ mark = len;
+ }
+done:
+ if (dst_len) {
+ *dst_len = (size_t)(dst - dst_base);
+ }
+ if (src_len) {
+ *src_len -= mark;
+ }
+ return ret;
+
+tail:
+ switch (k) {
+ case 0:
+ break;
+ case 2:
+ if ((hold[1] << 4) & 0xff) {
+ goto dirty;
+ }
+ if (limit < 1) {
+ goto more;
+ }
+ dst[0] = (uint8_t)((hold[0] << 2) | (hold[1] >> 4));
+ dst += 1;
+ break;
+ case 3:
+ if ((hold[2] << 6) & 0xff) {
+ goto dirty;
+ }
+ if (limit < 2) {
+ goto more;
+ }
+ dst[0] = (uint8_t)((hold[0] << 2) | (hold[1] >> 4));
+ dst[1] = (uint8_t)((hold[1] << 4) | (hold[2] >> 2));
+ dst += 2;
+ break;
+ default:
+ ret = BASE64_ETAIL;
+ goto done;
+ }
+ mark = len;
+ goto done;
+dirty:
+ ret = BASE64_EDIRTY;
+ goto done;
+more:
+ ret = BASE64_EMORE;
+ goto done;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PBASE64_H */
diff --git a/include/flatcc/portable/pcrt.h b/include/flatcc/portable/pcrt.h
new file mode 100644
index 0000000..0226be6
--- /dev/null
+++ b/include/flatcc/portable/pcrt.h
@@ -0,0 +1,48 @@
+#ifndef PCRT_H
+#define PCRT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Assertions and pointer violations in debug mode may trigger a dialog
+ * on Windows. When running headless this is not helpful, but
+ * unfortunately it cannot be disabled with a compiler option so code
+ * must be injected into the runtime early in the main function.
+ * A call to the provided `init_headless_crt()` macro does this in
+ * a portable manner.
+ *
+ * See also:
+ * https://stackoverflow.com/questions/13943665/how-can-i-disable-the-debug-assertion-dialog-on-windows
+ */
+
+#if defined(_WIN32)
+
+#include <crtdbg.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static int _portable_msvc_headless_report_hook(int reportType, char *message, int *returnValue)
+{
+ fprintf(stderr, "CRT[%d]: %s\n", reportType, message);
+ *returnValue = 1;
+ exit(1);
+ return 1;
+}
+
+#define init_headless_crt() _CrtSetReportHook(_portable_msvc_headless_report_hook)
+
+#else
+
+#define init_headless_crt() ((void)0)
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PCRT_H */
diff --git a/include/flatcc/portable/pdiagnostic.h b/include/flatcc/portable/pdiagnostic.h
new file mode 100644
index 0000000..b5294f3
--- /dev/null
+++ b/include/flatcc/portable/pdiagnostic.h
@@ -0,0 +1,85 @@
+ /* There is intentionally no include guard in this file. */
+
+
+/*
+ * Usage: optionally disable any of these before including.
+ *
+ * #define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+ * #define PDIAGNOSTIC_IGNORE_UNUSED_VARIABLE
+ * #define PDIAGNOSTIC_IGNORE_UNUSED_PARAMETER
+ * #define PDIAGNOSTIC_IGNORE_UNUSED // all of the above
+ *
+ * #include "pdiagnostic.h"
+ *
+ * Alternatively use #include "pdiagnostic_push/pop.h"
+ */
+
+#ifdef _MSC_VER
+#pragma warning(disable: 4668) /* preprocessor name not defined */
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_MSVC) && defined(_MSC_VER)
+#define PDIAGNOSTIC_AWARE_MSVC 1
+#elif !defined(PDIAGNOSTIC_AWARE_MSVC)
+#define PDIAGNOSTIC_AWARE_MSVC 0
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_CLANG) && defined(__clang__)
+#define PDIAGNOSTIC_AWARE_CLANG 1
+#elif !defined(PDIAGNOSTIC_AWARE_CLANG)
+#define PDIAGNOSTIC_AWARE_CLANG 0
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_GCC) && defined(__GNUC__) && !defined(__clang__)
+/* Can disable some warnings even if push is not available (gcc-4.2 vs gcc-4.7) */
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)
+#define PDIAGNOSTIC_AWARE_GCC 1
+#endif
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_GCC)
+#define PDIAGNOSTIC_AWARE_GCC 0
+#endif
+
+#if defined(PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION) || defined(PDIAGNOSTIC_IGNORE_UNUSED)
+#if PDIAGNOSTIC_AWARE_CLANG
+#pragma clang diagnostic ignored "-Wunused-function"
+#elif PDIAGNOSTIC_AWARE_GCC
+#pragma GCC diagnostic ignored "-Wunused-function"
+#endif
+#endif
+#undef PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+
+#if defined(PDIAGNOSTIC_IGNORE_UNUSED_VARIABLE) || defined(PDIAGNOSTIC_IGNORE_UNUSED)
+#if PDIAGNOSTIC_AWARE_MSVC
+#pragma warning(disable: 4101) /* unused local variable */
+#elif PDIAGNOSTIC_AWARE_CLANG
+#pragma clang diagnostic ignored "-Wunused-variable"
+#elif PDIAGNOSTIC_AWARE_GCC
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#endif
+#endif
+#undef PDIAGNOSTIC_IGNORE_UNUSED_VARIABLE
+
+#if defined(PDIAGNOSTIC_IGNORE_UNUSED_PARAMETER) || defined(PDIAGNOSTIC_IGNORE_UNUSED)
+#if PDIAGNOSTIC_AWARE_CLANG
+#pragma clang diagnostic ignored "-Wunused-parameter"
+#elif PDIAGNOSTIC_AWARE_GCC
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+#endif
+#undef PDIAGNOSTIC_IGNORE_UNUSED_PARAMETER
+
+#undef PDIAGNOSTIC_IGNORE_UNUSED
+
+#if defined (__cplusplus) && __cplusplus < 201103L
+#if PDIAGNOSTIC_AWARE_CLANG
+/* Needed for < C++11 clang C++ static_assert */
+#pragma clang diagnostic ignored "-Wc11-extensions"
+/* Needed for empty macro arguments. */
+#pragma clang diagnostic ignored "-Wc99-extensions"
+/* Needed for trailing commas. */
+#pragma clang diagnostic ignored "-Wc++11-extensions"
+#endif
+#endif
+
diff --git a/include/flatcc/portable/pdiagnostic_pop.h b/include/flatcc/portable/pdiagnostic_pop.h
new file mode 100644
index 0000000..f5e16b3
--- /dev/null
+++ b/include/flatcc/portable/pdiagnostic_pop.h
@@ -0,0 +1,20 @@
+#if defined(PDIAGNOSTIC_PUSHED_MSVC)
+#if PDIAGNOSTIC_PUSHED_MSVC
+#pragma warning( pop )
+#endif // PDIAGNOSTIC_PUSHED_MSVC
+#undef PDIAGNOSTIC_PUSHED_MSVC
+#endif // defined(PDIAGNOSTIC_PUSHED_MSVC)
+
+#if defined(PDIAGNOSTIC_PUSHED_CLANG)
+#if PDIAGNOSTIC_PUSHED_CLANG
+#pragma clang diagnostic pop
+#endif // PDIAGNOSTIC_PUSHED_CLANG
+#undef PDIAGNOSTIC_PUSHED_CLANG
+#endif // defined(PDIAGNOSTIC_PUSHED_CLANG)
+
+#if defined(PDIAGNOSTIC_PUSHED_GCC)
+#if PDIAGNOSTIC_PUSHED_GCC
+#pragma GCC diagnostic pop
+#endif // PDIAGNOSTIC_PUSHED_GCC
+#undef PDIAGNOSTIC_PUSHED_GCC
+#endif // defined(PDIAGNOSTIC_PUSHED_GCC)
diff --git a/include/flatcc/portable/pdiagnostic_push.h b/include/flatcc/portable/pdiagnostic_push.h
new file mode 100644
index 0000000..66586d7
--- /dev/null
+++ b/include/flatcc/portable/pdiagnostic_push.h
@@ -0,0 +1,51 @@
+/*
+ * See also comment in "pdiagnostic.h"
+ *
+ * e.g.
+ * #define PDIAGNOSTIC_IGNORE_USED_FUNCTION
+ * #define PDIAGNOSTIC_IGNORE_USED_VARIABLE
+ * #include "pdiagnostic_push"
+ * ...
+ * #include "pdiagnostic_pop.h"
+ * <eof>
+ *
+ * or if push pop isn't desired:
+ * #define PDIAGNOSTIC_IGNORE_USED_FUNCTION
+ * #define PDIAGNOSTIC_IGNORE_USED_VARIABLE
+ * #include "pdiagnostic.h"
+ * ...
+ * <eof>
+ *
+ *
+ * Some if these warnings cannot be ignored
+ * at the #pragma level, but might in the future.
+ * Use compiler switches like -Wno-unused-function
+ * to work around this.
+ */
+
+#if defined(_MSC_VER)
+#pragma warning( push )
+#define PDIAGNOSTIC_PUSHED_MSVC 1
+#else
+#define PDIAGNOSTIC_PUSHED_MSVC 0
+#endif
+
+#if defined(__clang__)
+#pragma clang diagnostic push
+#define PDIAGNOSTIC_PUSHED_CLANG 1
+#else
+#define PDIAGNOSTIC_PUSHED_CLANG 0
+#endif
+
+#if defined(__GNUC__) && !defined(__clang__)
+#if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+#pragma GCC diagnostic push
+#define PDIAGNOSTIC_PUSHED_GCC 1
+#else
+#define PDIAGNOSTIC_PUSHED_GCC 0
+#endif // GNUC >= 4.6
+#else
+#define PDIAGNOSTIC_PUSHED_GCC 0
+#endif // defined(__GNUC__) && !defined(__clang__)
+
+#include "pdiagnostic.h"
diff --git a/include/flatcc/portable/pendian.h b/include/flatcc/portable/pendian.h
new file mode 100644
index 0000000..122ba8e
--- /dev/null
+++ b/include/flatcc/portable/pendian.h
@@ -0,0 +1,206 @@
+#ifndef PENDIAN_H
+#define PENDIAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Defines platform optimized (as per linux <endian.h>
+ *
+ * le16toh, le32to, le64toh, be16toh, be32toh, be64toh
+ * htole16, htole32, htole64, htobe16, htobe32, htobe64
+ *
+ * Falls back to auto-detect endian conversion which is also fast
+ * if fast byteswap operation was detected.
+ *
+ * Also defines platform optimized:
+ *
+ * bswap16, bswap32, bswap64,
+ *
+ * with fall-back to shift-or implementation.
+ *
+ * For convenience also defines:
+ *
+ * le8to, be8toh, htole8, htobe8
+ * bswap8
+ *
+ * The convience functions makes is simpler to define conversion macros
+ * based on type size.
+ *
+ * NOTE: this implementation expects arguments with no side-effects and
+ * with appropriately sized unsigned arguments. These are expected to be
+ * used with typesafe wrappers.
+ */
+
+#ifndef UINT8_MAX
+#include "pstdint.h"
+#endif
+
+#if defined(__linux__)
+#include <endian.h>
+#elif defined(__OpenBSD__) || defined(__FreeBSD__)
+#include <sys/endian.h>
+#endif
+
+#include "pendian_detect.h"
+
+#if defined(_MSC_VER)
+#if _MSC_VER >= 1300
+#include <stdlib.h>
+#define bswap16 _byteswap_ushort
+#define bswap32 _byteswap_ulong
+#define bswap64 _byteswap_uint64
+#endif
+#elif defined(__clang__)
+#if __has_builtin(__builtin_bswap16)
+#ifndef bswap16
+#define bswap16 __builtin_bswap16
+#endif
+#endif
+#if __has_builtin(__builtin_bswap32)
+#ifndef bswap32
+#define bswap32 __builtin_bswap32
+#endif
+#endif
+#if __has_builtin(__builtin_bswap64)
+#ifndef bswap64
+#define bswap64 __builtin_bswap64
+#endif
+#endif
+#elif defined(__OpenBSD__) || defined(__FreeBSD__)
+#ifndef bswap16
+#define bswap16 swap16
+#endif
+#ifndef bswap32
+#define bswap32 swap32
+#endif
+#ifndef bswap64
+#define bswap64 swap64
+#endif
+#elif defined(__GNUC__) /* Supported since at least GCC 4.4 */
+#ifndef bswap32
+#define bswap32 __builtin_bswap32
+#endif
+#ifndef bswap64
+#define bswap64 __builtin_bswap64
+#endif
+#endif
+
+#ifndef bswap16
+#define bswap16(v) \
+ (((uint16_t)(v) << 8) | ((uint16_t)(v) >> 8))
+#endif
+
+#ifndef bswap32
+#define bswap32(v) \
+ ((((uint32_t)(v) << 24)) \
+ | (((uint32_t)(v) << 8) & UINT32_C(0x00FF0000)) \
+ | (((uint32_t)(v) >> 8) & UINT32_C(0x0000FF00)) \
+ | (((uint32_t)(v) >> 24)))
+#endif
+
+#ifndef bswap64
+#define bswap64(v) \
+ ((((uint64_t)(v) << 56)) \
+ | (((uint64_t)(v) << 40) & UINT64_C(0x00FF000000000000)) \
+ | (((uint64_t)(v) << 24) & UINT64_C(0x0000FF0000000000)) \
+ | (((uint64_t)(v) << 8) & UINT64_C(0x000000FF00000000)) \
+ | (((uint64_t)(v) >> 8) & UINT64_C(0x00000000FF000000)) \
+ | (((uint64_t)(v) >> 24) & UINT64_C(0x0000000000FF0000)) \
+ | (((uint64_t)(v) >> 40) & UINT64_C(0x000000000000FF00)) \
+ | (((uint64_t)(v) >> 56)))
+#endif
+
+#ifndef bswap8
+#define bswap8(v) ((uint8_t)(v))
+#endif
+
+#if !defined(le16toh) && defined(letoh16)
+#define le16toh letoh16
+#define le32toh letoh32
+#define le64toh letoh64
+#endif
+
+#if !defined(be16toh) && defined(betoh16)
+#define be16toh betoh16
+#define be32toh betoh32
+#define be64toh betoh64
+#endif
+
+/* Assume it goes for all. */
+#if !defined(le16toh)
+
+#if defined(__LITTLE_ENDIAN__)
+
+#define le16toh(v) (v)
+#define le32toh(v) (v)
+#define le64toh(v) (v)
+
+#define htole16(v) (v)
+#define htole32(v) (v)
+#define htole64(v) (v)
+
+#define be16toh(v) bswap16(v)
+#define be32toh(v) bswap32(v)
+#define be64toh(v) bswap64(v)
+
+#define htobe16(v) bswap16(v)
+#define htobe32(v) bswap32(v)
+#define htobe64(v) bswap64(v)
+
+#elif defined(__BIG_ENDIAN__)
+
+#define le16toh(v) bswap16(v)
+#define le32toh(v) bswap32(v)
+#define le64toh(v) bswap64(v)
+
+#define htole16(v) bswap16(v)
+#define htole32(v) bswap32(v)
+#define htole64(v) bswap64(v)
+
+#define be16toh(v) (v)
+#define be32toh(v) (v)
+#define be64toh(v) (v)
+
+#define htobe16(v) (v)
+#define htobe32(v) (v)
+#define htobe64(v) (v)
+
+#else
+
+static const int __pendian_test = 1;
+
+#define le16toh(v) (*(char *)&__pendian_test ? (v) : bswap16(v))
+#define le32toh(v) (*(char *)&__pendian_test ? (v) : bswap32(v))
+#define le64toh(v) (*(char *)&__pendian_test ? (v) : bswap64(v))
+
+#define htole16(v) (*(char *)&__pendian_test ? (v) : bswap16(v))
+#define htole32(v) (*(char *)&__pendian_test ? (v) : bswap32(v))
+#define htole64(v) (*(char *)&__pendian_test ? (v) : bswap64(v))
+
+#define be16toh(v) (*(char *)&__pendian_test ? bswap16(v) : (v))
+#define be32toh(v) (*(char *)&__pendian_test ? bswap32(v) : (v))
+#define be64toh(v) (*(char *)&__pendian_test ? bswap64(v) : (v))
+
+#define htobe16(v) (*(char *)&__pendian_test ? bswap16(v) : (v))
+#define htobe32(v) (*(char *)&__pendian_test ? bswap32(v) : (v))
+#define htobe64(v) (*(char *)&__pendian_test ? bswap64(v) : (v))
+
+#endif
+
+#endif /* le16toh */
+
+/* Helpers not part of Linux <endian.h> */
+#if !defined(le8toh)
+#define le8toh(n) (n)
+#define htole8(n) (n)
+#define be8toh(n) (n)
+#define htobe8(n) (n)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PENDIAN_H */
diff --git a/include/flatcc/portable/pendian_detect.h b/include/flatcc/portable/pendian_detect.h
new file mode 100644
index 0000000..1dd62c0
--- /dev/null
+++ b/include/flatcc/portable/pendian_detect.h
@@ -0,0 +1,118 @@
+/*
+ * Uses various known flags to decide endianness and defines:
+ *
+ * __LITTLE_ENDIAN__ or __BIG_ENDIAN__ if not already defined
+ *
+ * and also defines
+ *
+ * __BYTE_ORDER__ to either __ORDER_LITTLE_ENDIAN__ or
+ * __ORDER_BIG_ENDIAN__ if not already defined
+ *
+ * If none of these could be set, __UNKNOWN_ENDIAN__ is defined,
+ * which is not a known flag. If __BYTE_ORDER__ is defined but
+ * not big or little endian, __UNKNOWN_ENDIAN__ is also defined.
+ *
+ * Note: Some systems define __BYTE_ORDER without __ at the end
+ * - this will be mapped to to __BYTE_ORDER__.
+ */
+
+#ifndef PENDIAN_DETECT
+#define PENDIAN_DETECT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef __ORDER_LITTLE_ENDIAN__
+#define __ORDER_LITTLE_ENDIAN__ 1234
+#endif
+
+#ifndef __ORDER_BIG_ENDIAN__
+#define __ORDER_BIG_ENDIAN__ 4321
+#endif
+
+#ifdef __BYTE_ORDER__
+
+#if defined(__LITTLE_ENDIAN__) && __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
+#error __LITTLE_ENDIAN__ inconsistent with __BYTE_ORDER__
+#endif
+
+#if defined(__BIG_ENDIAN__) && __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__
+#error __BIG_ENDIAN__ inconsistent with __BYTE_ORDER__
+#endif
+
+#else /* __BYTE_ORDER__ */
+
+
+#if \
+ defined(__LITTLE_ENDIAN__) || \
+ (defined(__BYTE_ORDER) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN) || \
+ defined(__ARMEL__) || defined(__THUMBEL__) || \
+ defined(__AARCH64EL__) || \
+ (defined(_MSC_VER) && defined(_M_ARM)) || \
+ defined(_MIPSEL) || defined(__MIPSEL) || defined(__MIPSEL__) || \
+ defined(_M_X64) || defined(_M_IX86) || defined(_M_I86) || \
+ defined(__i386__) || defined(__alpha__) || \
+ defined(__ia64) || defined(__ia64__) || \
+ defined(_M_IA64) || defined(_M_ALPHA) || \
+ defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || \
+ defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
+ defined(__bfin__)
+
+#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
+
+#endif
+
+#if \
+ defined (__BIG_ENDIAN__) || \
+ (defined(__BYTE_ORDER) && __BYTE_ORDER == __ORDER_BIG_ENDIAN) || \
+ defined(__ARMEB__) || defined(THUMBEB__) || defined (__AARCH64EB__) || \
+ defined(_MIPSEB) || defined(__MIPSEB) || defined(__MIPSEB__) || \
+ defined(__sparc) || defined(__sparc__) || \
+ defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || \
+ defined(__hpux) || defined(__hppa) || defined(__s390__)
+
+#define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__
+
+#endif
+
+#endif /* __BYTE_ORDER__ */
+
+#ifdef __BYTE_ORDER__
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+
+#ifndef __LITTLE_ENDIAN__
+#define __LITTLE_ENDIAN__ 1
+#endif
+
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+#ifndef __BIG_ENDIAN__
+#define __BIG_ENDIAN__ 1
+#endif
+
+#else
+
+/*
+ * Custom extension - we only define __BYTE_ORDER__ if known big or little.
+ * User code that understands __BYTE_ORDER__ may also assume unkown if
+ * it is not defined by now - this will allow other endian formats than
+ * big or little when supported by compiler.
+ */
+#ifndef __UNKNOWN_ENDIAN__
+#define __UNKNOWN_ENDIAN__ 1
+#endif
+
+#endif
+#endif /* __BYTE_ORDER__ */
+
+#if defined(__LITTLE_ENDIAN__) && defined(__BIG_ENDIAN__)
+#error conflicting definitions of __LITTLE_ENDIAN__ and __BIG_ENDIAN__
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PENDIAN_DETECT */
diff --git a/include/flatcc/portable/pinline.h b/include/flatcc/portable/pinline.h
new file mode 100644
index 0000000..f4f8f27
--- /dev/null
+++ b/include/flatcc/portable/pinline.h
@@ -0,0 +1,19 @@
+#ifndef PINLINE_H
+#define PINLINE_H
+
+#ifndef __cplusplus
+
+#if (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
+/* C99 or newer */
+#elif _MSC_VER >= 1500 /* MSVC 9 or newer */
+#undef inline
+#define inline __inline
+#elif __GNUC__ >= 3 /* GCC 3 or newer */
+#define inline __inline
+#else /* Unknown or ancient */
+#define inline
+#endif
+
+#endif /* __cplusplus */
+
+#endif /* PINLINE_H */
diff --git a/include/flatcc/portable/pinttypes.h b/include/flatcc/portable/pinttypes.h
new file mode 100644
index 0000000..a1be9df
--- /dev/null
+++ b/include/flatcc/portable/pinttypes.h
@@ -0,0 +1,52 @@
+#ifndef PINTTYPES_H
+#define PINTTYPES_H
+
+#ifndef PRId16
+
+#if (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
+/* C99 or newer */
+#include <inttypes.h>
+#else
+
+/*
+ * This is not a complete implementation of <inttypes.h>, just the most
+ * useful printf modifiers.
+ */
+
+#include "pstdint.h"
+
+#ifndef PRINTF_INT64_MODIFIER
+#error "please define PRINTF_INT64_MODIFIER"
+#endif
+
+#ifndef PRId64
+#define PRId64 PRINTF_INT64_MODIFIER "d"
+#define PRIu64 PRINTF_INT64_MODIFIER "u"
+#define PRIx64 PRINTF_INT64_MODIFIER "x"
+#endif
+
+#ifndef PRINTF_INT32_MODIFIER
+#define PRINTF_INT32_MODIFIER "l"
+#endif
+
+#ifndef PRId32
+#define PRId32 PRINTF_INT32_MODIFIER "d"
+#define PRIu32 PRINTF_INT32_MODIFIER "u"
+#define PRIx32 PRINTF_INT32_MODIFIER "x"
+#endif
+
+#ifndef PRINTF_INT16_MODIFIER
+#define PRINTF_INT16_MODIFIER "h"
+#endif
+
+#ifndef PRId16
+#define PRId16 PRINTF_INT16_MODIFIER "d"
+#define PRIu16 PRINTF_INT16_MODIFIER "u"
+#define PRIx16 PRINTF_INT16_MODIFIER "x"
+#endif
+
+# endif /* __STDC__ */
+
+#endif /* PRId16 */
+
+#endif /* PINTTYPES */
diff --git a/include/flatcc/portable/portable.h b/include/flatcc/portable/portable.h
new file mode 100644
index 0000000..7a6a484
--- /dev/null
+++ b/include/flatcc/portable/portable.h
@@ -0,0 +1,2 @@
+/* portable.h is widely used, so we redirect to a less conflicting name. */
+#include "portable_basic.h"
diff --git a/include/flatcc/portable/portable_basic.h b/include/flatcc/portable/portable_basic.h
new file mode 100644
index 0000000..0396f3d
--- /dev/null
+++ b/include/flatcc/portable/portable_basic.h
@@ -0,0 +1,25 @@
+#ifndef PORTABLE_BASIC_H
+#define PORTABLE_BASIC_H
+
+/*
+ * Basic features need to make compilers support the most common moden C
+ * features, and endian / unligned read support as well.
+ *
+ * It is not assumed that this file is always included.
+ * Other include files are independent or include what they need.
+ */
+
+#include "pversion.h"
+#include "pwarnings.h"
+
+/* Featutures that ought to be supported by C11, but some aren't. */
+#include "pinttypes.h"
+#include "pstdalign.h"
+#include "pinline.h"
+#include "pstatic_assert.h"
+
+/* These are not supported by C11 and are general platform abstractions. */
+#include "pendian.h"
+#include "punaligned.h"
+
+#endif /* PORTABLE_BASIC_H */
diff --git a/include/flatcc/portable/pparsefp.h b/include/flatcc/portable/pparsefp.h
new file mode 100644
index 0000000..7fa1c24
--- /dev/null
+++ b/include/flatcc/portable/pparsefp.h
@@ -0,0 +1,226 @@
+#ifndef PPARSEFP_H
+#define PPARSEFP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h> /* memcpy */
+
+/*
+ * Parses a float or double number and returns the length parsed if
+ * successful. The length argument is of limited value due to dependency
+ * on `strtod` - buf[len] must be accessible and must not be part of
+ * a valid number, including hex float numbers..
+ *
+ * Unlike strtod, whitespace is not parsed.
+ *
+ * May return:
+ * - null on error,
+ * - buffer start if first character does not start a number,
+ * - or end of parse on success.
+ *
+ */
+
+#define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+#include "pdiagnostic_push.h"
+
+/*
+ * isinf is needed in order to stay compatible with strtod's
+ * over/underflow handling but isinf has some portability issues.
+ *
+ * Use the parse_double/float_is_range_error instead of isinf directly.
+ * This ensures optimizations can be added when not using strtod.
+ *
+ * On gcc, clang and msvc we can use isinf or equivalent directly.
+ * Other compilers such as xlc may require linking with -lm which may not
+ * be convienent so a default isinf is provided. If isinf is available
+ * and there is a noticable performance issue, define
+ * `PORTABLE_USE_ISINF`. This flag also affects isnan.
+ */
+#if defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || defined(PORTABLE_USE_ISINF)
+#include <math.h>
+#if defined(_MSC_VER) && !defined(isinf)
+#include <float.h>
+#define isnan _isnan
+#define isinf(x) (!_finite(x))
+#endif
+/*
+ * clang-3 through clang-8 but not clang-9 issues incorrect precision
+ * loss warning with -Wconversion flag when cast is absent.
+ */
+#if defined(__clang__)
+#if __clang_major__ >= 3 && __clang_major__ <= 8
+#define parse_double_isinf(x) isinf((float)x)
+#define parse_double_isnan(x) isnan((float)x)
+#endif
+#endif
+#if !defined(parse_double_isinf)
+#define parse_double_isinf isinf
+#endif
+#define parse_float_isinf isinf
+
+#else
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+/* Avoid linking with libmath but depends on float/double being IEEE754 */
+static inline int parse_double_isinf(const double x)
+{
+ uint64_t u64x;
+
+ memcpy(&u64x, &x, sizeof(u64x));
+ return (u64x & 0x7fffffff00000000ULL) == 0x7ff0000000000000ULL;
+}
+
+static inline int parse_float_isinf(float x)
+{
+ uint32_t u32x;
+
+ memcpy(&u32x, &x, sizeof(u32x));
+ return (u32x & 0x7fffffff) == 0x7f800000;
+}
+
+#endif
+
+#if !defined(parse_double_isnan)
+#define parse_double_isnan isnan
+#endif
+#if !defined(parse_float_isnan)
+#define parse_float_isnan isnan
+#endif
+
+/* Returns 0 when in range, 1 on overflow, and -1 on underflow. */
+static inline int parse_double_is_range_error(double x)
+{
+ return parse_double_isinf(x) ? (x < 0.0 ? -1 : 1) : 0;
+}
+
+static inline int parse_float_is_range_error(float x)
+{
+ return parse_float_isinf(x) ? (x < 0.0f ? -1 : 1) : 0;
+}
+
+#ifndef PORTABLE_USE_GRISU3
+#define PORTABLE_USE_GRISU3 1
+#endif
+
+#if PORTABLE_USE_GRISU3
+#include "grisu3_parse.h"
+#endif
+
+#ifdef grisu3_parse_double_is_defined
+static inline const char *parse_double(const char *buf, size_t len, double *result)
+{
+ return grisu3_parse_double(buf, len, result);
+}
+#else
+#include <stdio.h>
+static inline const char *parse_double(const char *buf, size_t len, double *result)
+{
+ char *end;
+
+ (void)len;
+ *result = strtod(buf, &end);
+ return end;
+}
+#endif
+
+static inline const char *parse_float(const char *buf, size_t len, float *result)
+{
+ const char *end;
+ double v;
+ union { uint32_t u32; float f32; } inf;
+ inf.u32 = 0x7f800000;
+
+ end = parse_double(buf, len, &v);
+ *result = (float)v;
+ if (parse_float_isinf(*result)) {
+ *result = v < 0 ? -inf.f32 : inf.f32;
+ return buf;
+ }
+ return end;
+}
+
+/* Inspired by https://bitbashing.io/comparing-floats.html */
+
+/* Return signed ULP distance or INT64_MAX if any value is nan. */
+static inline int64_t parse_double_compare(const double x, const double y)
+{
+ int64_t i64x, i64y;
+
+ if (x == y) return 0;
+ if (parse_double_isnan(x)) return INT64_MAX;
+ if (parse_double_isnan(y)) return INT64_MAX;
+ memcpy(&i64x, &x, sizeof(i64x));
+ memcpy(&i64y, &y, sizeof(i64y));
+ if ((i64x < 0) != (i64y < 0)) return INT64_MAX;
+ return i64x - i64y;
+}
+
+/* Same as double, but INT32_MAX if nan. */
+static inline int32_t parse_float_compare(const float x, const float y)
+{
+ int32_t i32x, i32y;
+
+ if (x == y) return 0;
+ if (parse_float_isnan(x)) return INT32_MAX;
+ if (parse_float_isnan(y)) return INT32_MAX;
+ memcpy(&i32x, &x, sizeof(i32x));
+ memcpy(&i32y, &y, sizeof(i32y));
+ if ((i32x < 0) != (i32y < 0)) return INT32_MAX;
+ return i32x - i32y;
+}
+
+/*
+ * Returns the absolute distance in floating point ULP (representational bit difference).
+ * Uses signed return value so that INT64_MAX and INT32_MAX indicates NaN similar to
+ * the compare function.
+ */
+static inline int64_t parse_double_dist(const double x, const double y)
+{
+ uint64_t m64;
+ int64_t i64;
+
+ i64 = parse_double_compare(x, y);
+ /* Absolute integer value of compare. */
+ m64 = (uint64_t)-(i64 < 0);
+ return (int64_t)(((uint64_t)i64 + m64) ^ m64);
+}
+
+/* Same as double, but INT32_MAX if NaN. */
+static inline int32_t parse_float_dist(const float x, const float y)
+{
+ uint32_t m32;
+ int32_t i32;
+
+ i32 = parse_float_compare(x, y);
+ /* Absolute integer value of compare. */
+ m32 = (uint32_t)-(i32 < 0);
+ return (int32_t)(((uint32_t)i32 + m32) ^ m32);
+}
+
+/*
+ * Returns 1 if no value is NaN, and the difference is at most one ULP (1 bit), and the
+ * sign is the same, and 0 otherwise.
+ */
+static inline int parse_double_is_equal(const double x, const double y)
+{
+ return parse_double_dist(x, y) >> 1 == 0;
+}
+
+/* Same as double, but at lower precision. */
+static inline int parse_float_is_equal(const float x, const float y)
+{
+ return parse_float_dist(x, y) >> 1 == 0;
+}
+
+#include "pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPARSEFP_H */
diff --git a/include/flatcc/portable/pparseint.h b/include/flatcc/portable/pparseint.h
new file mode 100644
index 0000000..96cc99f
--- /dev/null
+++ b/include/flatcc/portable/pparseint.h
@@ -0,0 +1,374 @@
+#ifndef PPARSEINT_H
+#define PPARSEINT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Type specific integer parsers:
+ *
+ * const char *
+ * parse_<type-name>(const char *buf, size_t len, <type> *value, int *status);
+ *
+ * parse_uint64, parse_int64
+ * parse_uint32, parse_int32
+ * parse_uint16, parse_int16
+ * parse_uint8, parse_int8
+ * parse_ushort, parse_short
+ * parse_uint, parse_int
+ * parse_ulong, parse_long
+ *
+ * Leading space must be stripped in advance. Status argument can be
+ * null.
+ *
+ * Returns pointer to end of match and a non-negative status code
+ * on succcess (0 for unsigned, 1 for signed):
+ *
+ * PARSE_INTEGER_UNSIGNED
+ * PARSE_INTEGER_SIGNED
+ *
+ * Returns null with a negative status code and unmodified value on
+ * invalid integer formats:
+ *
+ * PARSE_INTEGER_OVERFLOW
+ * PARSE_INTEGER_UNDERFLOW
+ * PARSE_INTEGER_INVALID
+ *
+ * Returns input buffer with negative status code and unmodified value
+ * if first character does not start an integer (not a sign or a digit).
+ *
+ * PARSE_INTEGER_UNMATCHED
+ * PARSE_INTEGER_END
+ *
+ * The signed parsers only works with two's complement architectures.
+ *
+ * Note: the corresponding parse_float and parse_double parsers do not
+ * have a status argument because +/-Inf and NaN are conventionally used
+ * for this.
+ */
+
+#include "limits.h"
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#define PARSE_INTEGER_UNSIGNED 0
+#define PARSE_INTEGER_SIGNED 1
+#define PARSE_INTEGER_OVERFLOW -1
+#define PARSE_INTEGER_UNDERFLOW -2
+#define PARSE_INTEGER_INVALID -3
+#define PARSE_INTEGER_UNMATCHED -4
+#define PARSE_INTEGER_END -5
+
+/*
+ * Generic integer parser that holds 64-bit unsigned values and stores
+ * sign separately. Leading space is not valid.
+ *
+ * Note: this function differs from the type specific parsers like
+ * parse_int64 by not negating the value when there is a sign. It
+ * differs from parse_uint64 by being able to return a negative
+ * UINT64_MAX successfully.
+ *
+ * This parser is used by all type specific integer parsers.
+ *
+ * Status argument can be null.
+ */
+static const char *parse_integer(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ uint64_t x0, x = 0;
+ const char *k, *end = buf + len;
+ int sign, status_;
+
+ if (!status) {
+ status = &status_;
+ }
+ if (buf == end) {
+ *status = PARSE_INTEGER_END;
+ return buf;
+ }
+ k = buf;
+ sign = *buf == '-';
+ buf += sign;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ x0 = x;
+ x = x * 10 + (uint64_t)(*buf - '0');
+ if (x0 > x) {
+ *status = sign ? PARSE_INTEGER_UNDERFLOW : PARSE_INTEGER_OVERFLOW;
+ return 0;
+ }
+ ++buf;
+ }
+ if (buf == k) {
+ /* No number was matched, but it isn't an invalid number either. */
+ *status = PARSE_INTEGER_UNMATCHED;
+ return buf;
+ }
+ if (buf == k + sign) {
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ }
+ if (buf != end)
+ switch (*buf) {
+ case 'e': case 'E': case '.': case 'p': case 'P':
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ }
+ *value = x;
+ *status = sign;
+ return buf;
+}
+
+/*
+ * Parse hex values like 0xff, -0xff, 0XdeAdBeaf42, cannot be trailed by '.', 'p', or 'P'.
+ * Overflows if string is more than 16 valid hex digits. Otherwise similar to parse_integer.
+ */
+static const char *parse_hex_integer(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ uint64_t x = 0;
+ const char *k, *k2, *end = buf + len;
+ int sign, status_;
+ unsigned char c;
+
+ if (!status) {
+ status = &status_;
+ }
+ if (buf == end) {
+ *status = PARSE_INTEGER_END;
+ return buf;
+ }
+ sign = *buf == '-';
+ buf += sign;
+ if (end - buf < 2 || buf[0] != '0' || (buf[1] | 0x20) != 'x') {
+ *status = PARSE_INTEGER_UNMATCHED;
+ return buf - sign;
+ }
+ buf += 2;
+ k = buf;
+ k2 = end;
+ if (end - buf > 16) {
+ k2 = buf + 16;
+ }
+ while (buf != k2) {
+ c = (unsigned char)*buf;
+ if (c >= '0' && c <= '9') {
+ x = x * 16 + c - '0';
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = x * 16 + c - 'a' + 10;
+ } else {
+ break;
+ }
+ }
+ ++buf;
+ }
+ if (buf == k) {
+ if (sign) {
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ } else {
+ /* No number was matched, but it isn't an invalid number either. */
+ *status = PARSE_INTEGER_UNMATCHED;
+ return buf;
+ }
+ }
+ if (buf == end) {
+ goto done;
+ }
+ c = (unsigned char)*buf;
+ if (buf == k2) {
+ if (c >= '0' && c <= '9') {
+ *status = sign ? PARSE_INTEGER_UNDERFLOW : PARSE_INTEGER_OVERFLOW;
+ return 0;
+ }
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ *status = sign ? PARSE_INTEGER_UNDERFLOW : PARSE_INTEGER_OVERFLOW;
+ return 0;
+ }
+ }
+ switch (c) {
+ case '.': case 'p': case 'P':
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ }
+done:
+ *value = x;
+ *status = sign;
+ return buf;
+}
+
+
+#define __portable_define_parse_unsigned(NAME, TYPE, LIMIT) \
+static inline const char *parse_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+#define __portable_define_parse_hex_unsigned(NAME, TYPE, LIMIT) \
+static inline const char *parse_hex_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_hex_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+/* This assumes two's complement. */
+#define __portable_define_parse_signed(NAME, TYPE, LIMIT) \
+static inline const char *parse_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ if (x <= (uint64_t)(LIMIT) + 1) { \
+ *value = (TYPE)-(int64_t)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+/* This assumes two's complement. */
+#define __portable_define_parse_hex_signed(NAME, TYPE, LIMIT) \
+static inline const char *parse_hex_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_hex_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ if (x <= (uint64_t)(LIMIT) + 1) { \
+ *value = (TYPE)-(int64_t)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+static inline const char *parse_uint64(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ buf = parse_integer(buf, len, value, status);
+ if (*status == PARSE_INTEGER_SIGNED) {
+ *status = PARSE_INTEGER_UNDERFLOW;
+ return 0;
+ }
+ return buf;
+}
+
+static inline const char *parse_hex_uint64(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ buf = parse_hex_integer(buf, len, value, status);
+ if (*status == PARSE_INTEGER_SIGNED) {
+ *status = PARSE_INTEGER_UNDERFLOW;
+ return 0;
+ }
+ return buf;
+}
+
+__portable_define_parse_signed(int64, int64_t, INT64_MAX)
+__portable_define_parse_signed(int32, int32_t, INT32_MAX)
+__portable_define_parse_unsigned(uint16, uint16_t, UINT16_MAX)
+__portable_define_parse_signed(int16, int16_t, INT16_MAX)
+__portable_define_parse_unsigned(uint8, uint8_t, UINT8_MAX)
+__portable_define_parse_signed(int8, int8_t, INT8_MAX)
+
+__portable_define_parse_hex_signed(int64, int64_t, INT64_MAX)
+__portable_define_parse_hex_signed(int32, int32_t, INT32_MAX)
+__portable_define_parse_hex_unsigned(uint16, uint16_t, UINT16_MAX)
+__portable_define_parse_hex_signed(int16, int16_t, INT16_MAX)
+__portable_define_parse_hex_unsigned(uint8, uint8_t, UINT8_MAX)
+__portable_define_parse_hex_signed(int8, int8_t, INT8_MAX)
+
+__portable_define_parse_unsigned(ushort, unsigned short, USHRT_MAX)
+__portable_define_parse_signed(short, short, SHRT_MAX)
+__portable_define_parse_unsigned(uint, unsigned int, UINT_MAX)
+__portable_define_parse_signed(int, int, INT_MAX)
+__portable_define_parse_unsigned(ulong, unsigned long, ULONG_MAX)
+__portable_define_parse_signed(long, unsigned long, LONG_MAX)
+
+__portable_define_parse_hex_unsigned(ushort, unsigned short, USHRT_MAX)
+__portable_define_parse_hex_signed(short, short, SHRT_MAX)
+__portable_define_parse_hex_unsigned(uint, unsigned int, UINT_MAX)
+__portable_define_parse_hex_signed(int, int, INT_MAX)
+__portable_define_parse_hex_unsigned(ulong, unsigned long, ULONG_MAX)
+__portable_define_parse_hex_signed(long, unsigned long, LONG_MAX)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPARSEINT_H */
diff --git a/include/flatcc/portable/pprintfp.h b/include/flatcc/portable/pprintfp.h
new file mode 100644
index 0000000..c2e5c07
--- /dev/null
+++ b/include/flatcc/portable/pprintfp.h
@@ -0,0 +1,39 @@
+#ifndef PPRINTFP_H
+#define PPRINTFP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+#include "pdiagnostic_push.h"
+
+#ifndef PORTABLE_USE_GRISU3
+#define PORTABLE_USE_GRISU3 1
+#endif
+
+
+#if PORTABLE_USE_GRISU3
+#include "grisu3_print.h"
+#endif
+
+#ifdef grisu3_print_double_is_defined
+/* Currently there is not special support for floats. */
+#define print_float(n, p) grisu3_print_double((float)(n), (p))
+#define print_double(n, p) grisu3_print_double((double)(n), (p))
+#else
+#include <stdio.h>
+#define print_float(n, p) sprintf(p, "%.9g", (float)(n))
+#define print_double(n, p) sprintf(p, "%.17g", (double)(n))
+#endif
+
+#define print_hex_float(n, p) sprintf(p, "%a", (float)(n))
+#define print_hex_double(n, p) sprintf(p, "%a", (double)(n))
+
+#include "pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPRINTFP_H */
diff --git a/include/flatcc/portable/pprintint.h b/include/flatcc/portable/pprintint.h
new file mode 100644
index 0000000..d05f376
--- /dev/null
+++ b/include/flatcc/portable/pprintint.h
@@ -0,0 +1,628 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ *
+ * Fast printing of (u)int8/16/32/64_t, (u)int, (u)long.
+ *
+ * Functions take for the
+ *
+ * int print_<type>(type value, char *buf);
+ *
+ * and returns number of characters printed, excluding trailing '\0'
+ * which is also printed. Prints at most 21 characters including zero-
+ * termination.
+ *
+ * The function `print_bool` is a bit different - it simply prints "true\0" for
+ * non-zero integers, and "false\0" otherwise.
+ *
+ * The general algorithm is in-place formatting using binary search log10
+ * followed by duff device loop unrolling div / 100 stages.
+ *
+ * The simpler post copy algorithm also provided for fmt_(u)int uses a
+ * temp buffer and loops over div/100 and post copy to target buffer.
+ *
+ *
+ * Benchmarks on core-i7, 2.2GHz, 64-bit clang/OS-X -O2:
+ *
+ * print_int64: avg 15ns for values between INT64_MIN + (10^7/2 .. 10^7/2)
+ * print_int64: avg 11ns for values between 10^9 + (0..10,000,000).
+ * print_int32: avg 7ns for values cast from INT64_MIN + (10^7/2 .. 10^7/2)
+ * print_int32: avg 7ns for values between 10^9 + (0..10,000,000).
+ * print_int64: avg 13ns for values between 10^16 + (0..10,000,000).
+ * print_int64: avg 5ns for values between 0 and 10,000,000.
+ * print_int32: avg 5ns for values between 0 and 10,000,000.
+ * print_int16: avg 10ns for values cast from 0 and 10,000,000.
+ * print_int8: avg 4ns for values cast from 0 and 10,000,000.
+ *
+ * Post copy algorithm:
+ * print_int: avg 12ns for values between INT64_MIN + (10^7/2 .. 10^7/2)
+ * print_int: avg 14ns for values between 10^9 + (0..10,000,000).
+ * print_long: avg 29ns for values between INT64_MIN + (10^7/2 .. 10^7/2)
+ *
+ * The post copy algorithm is nearly half as fast as the in-place
+ * algorithm, but can also be faster occasionally - possibly because the
+ * optimizer being able to skip the copy step.
+ */
+
+#ifndef PPRINTINT_H
+#define PPRINTINT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include "pattributes.h" /* fallthrough */
+
+#define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+#include "pdiagnostic_push.h"
+
+static int print_bool(int n, char *p);
+
+static int print_uint8(uint8_t n, char *p);
+static int print_uint16(uint16_t n, char *p);
+static int print_uint32(uint32_t n, char *p);
+static int print_uint64(uint64_t n, char *p);
+static int print_int8(int8_t n, char *p);
+static int print_int16(int16_t n, char *p);
+static int print_int32(int32_t n, char *p);
+static int print_int64(int64_t n, char *p);
+
+/*
+ * Uses slightly slower, but more compact alogrithm
+ * that is not hardcoded to implementation size.
+ * Other types may be defined using macros below.
+ */
+static int print_ulong(unsigned long n, char *p);
+static int print_uint(unsigned int n, char *p);
+static int print_int(int n, char *p);
+static int print_long(long n, char *p);
+
+
+#if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+#define __print_unaligned_copy_16(p, q) (*(uint16_t*)(p) = *(uint16_t*)(q))
+#else
+#define __print_unaligned_copy_16(p, q) \
+ ((((uint8_t*)(p))[0] = ((uint8_t*)(q))[0]), \
+ (((uint8_t*)(p))[1] = ((uint8_t*)(q))[1]))
+#endif
+
+static const char __print_digit_pairs[] =
+ "0001020304050607080910111213141516171819"
+ "2021222324252627282930313233343536373839"
+ "4041424344454647484950515253545556575859"
+ "6061626364656667686970717273747576777879"
+ "8081828384858687888990919293949596979899";
+
+#define __print_stage() \
+ p -= 2; \
+ dp = __print_digit_pairs + (n % 100) * 2; \
+ n /= 100; \
+ __print_unaligned_copy_16(p, dp);
+
+#define __print_long_stage() \
+ __print_stage() \
+ __print_stage()
+
+#define __print_short_stage() \
+ *--p = (n % 10) + '0'; \
+ n /= 10;
+
+static int print_bool(int n, char *buf)
+{
+ if (n) {
+ memcpy(buf, "true\0", 5);
+ return 4;
+ } else {
+ memcpy(buf, "false\0", 6);
+ return 5;
+ }
+}
+
+static int print_uint8(uint8_t n, char *p)
+{
+ const char *dp;
+
+ if (n >= 100) {
+ p += 3;
+ *p = '\0';
+ __print_stage();
+ p[-1] = (char)n + '0';
+ return 3;
+ }
+ if (n >= 10) {
+ p += 2;
+ *p = '\0';
+ __print_stage();
+ return 2;
+ }
+ p[1] = '\0';
+ p[0] = (char)n + '0';
+ return 1;
+}
+
+static int print_uint16(uint16_t n, char *p)
+{
+ int k = 0;
+ const char *dp;
+
+ if (n >= 1000) {
+ if(n >= 10000) {
+ k = 5;
+ } else {
+ k = 4;
+ }
+ } else {
+ if(n >= 100) {
+ k = 3;
+ } else if(n >= 10) {
+ k = 2;
+ } else {
+ k = 1;
+ }
+ }
+ p += k;
+ *p = '\0';
+ if (k & 1) {
+ switch (k) {
+ case 5:
+ __print_stage();
+ pattribute(fallthrough);
+ case 3:
+ __print_stage();
+ pattribute(fallthrough);
+ case 1:
+ p[-1] = (char)n + '0';
+ }
+ } else {
+ switch (k) {
+ case 4:
+ __print_stage();
+ pattribute(fallthrough);
+ case 2:
+ __print_stage();
+ }
+ }
+ return k;
+}
+
+static int print_uint32(uint32_t n, char *p)
+{
+ int k = 0;
+ const char *dp;
+
+ if(n >= 10000UL) {
+ if(n >= 10000000UL) {
+ if(n >= 1000000000UL) {
+ k = 10;
+ } else if(n >= 100000000UL) {
+ k = 9;
+ } else {
+ k = 8;
+ }
+ } else {
+ if(n >= 1000000UL) {
+ k = 7;
+ } else if(n >= 100000UL) {
+ k = 6;
+ } else {
+ k = 5;
+ }
+ }
+ } else {
+ if(n >= 100UL) {
+ if(n >= 1000UL) {
+ k = 4;
+ } else {
+ k = 3;
+ }
+ } else {
+ if(n >= 10UL) {
+ k = 2;
+ } else {
+ k = 1UL;
+ }
+ }
+ }
+ p += k;
+ *p = '\0';
+ if (k & 1) {
+ switch (k) {
+ case 9:
+ __print_stage();
+ pattribute(fallthrough);
+ case 7:
+ __print_stage();
+ pattribute(fallthrough);
+ case 5:
+ __print_stage();
+ pattribute(fallthrough);
+ case 3:
+ __print_stage();
+ pattribute(fallthrough);
+ case 1:
+ p[-1] = (char)n + '0';
+ }
+ } else {
+ switch (k) {
+ case 10:
+ __print_stage();
+ pattribute(fallthrough);
+ case 8:
+ __print_stage();
+ pattribute(fallthrough);
+ case 6:
+ __print_stage();
+ pattribute(fallthrough);
+ case 4:
+ __print_stage();
+ pattribute(fallthrough);
+ case 2:
+ __print_stage();
+ }
+ }
+ return k;
+}
+
+static int print_uint64(uint64_t n, char *p)
+{
+ int k = 0;
+ const char *dp;
+ const uint64_t x = 1000000000ULL;
+
+ if (n < x) {
+ return print_uint32((uint32_t)n, p);
+ }
+ if(n >= 10000ULL * x) {
+ if(n >= 10000000ULL * x) {
+ if(n >= 1000000000ULL * x) {
+ if (n >= 10000000000ULL * x) {
+ k = 11 + 9;
+ } else {
+ k = 10 + 9;
+ }
+ } else if(n >= 100000000ULL * x) {
+ k = 9 + 9;
+ } else {
+ k = 8 + 9;
+ }
+ } else {
+ if(n >= 1000000ULL * x) {
+ k = 7 + 9;
+ } else if(n >= 100000ULL * x) {
+ k = 6 + 9;
+ } else {
+ k = 5 + 9;
+ }
+ }
+ } else {
+ if(n >= 100ULL * x) {
+ if(n >= 1000ULL * x) {
+ k = 4 + 9;
+ } else {
+ k = 3 + 9;
+ }
+ } else {
+ if(n >= 10ULL * x) {
+ k = 2 + 9;
+ } else {
+ k = 1 + 9;
+ }
+ }
+ }
+ p += k;
+ *p = '\0';
+ if (k & 1) {
+ switch (k) {
+ case 19:
+ __print_stage();
+ pattribute(fallthrough);
+ case 17:
+ __print_stage();
+ pattribute(fallthrough);
+ case 15:
+ __print_stage();
+ pattribute(fallthrough);
+ case 13:
+ __print_stage();
+ pattribute(fallthrough);
+ case 11:
+ __print_stage()
+ __print_short_stage();
+ }
+ } else {
+ switch (k) {
+ case 20:
+ __print_stage();
+ pattribute(fallthrough);
+ case 18:
+ __print_stage();
+ pattribute(fallthrough);
+ case 16:
+ __print_stage();
+ pattribute(fallthrough);
+ case 14:
+ __print_stage();
+ pattribute(fallthrough);
+ case 12:
+ __print_stage();
+ pattribute(fallthrough);
+ case 10:
+ __print_stage();
+ }
+ }
+ __print_long_stage()
+ __print_long_stage()
+ return k;
+}
+
+static int print_int8(int8_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint8((uint8_t)n, p) + sign;
+}
+
+static int print_int16(int16_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint16((uint16_t)n, p) + sign;
+}
+
+static int print_int32(int32_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint32((uint32_t)n, p) + sign;
+}
+
+static int print_int64(int64_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint64((uint64_t)n, p) + sign;
+}
+
+#define __define_print_int_simple(NAME, UNAME, T, UT) \
+static int UNAME(UT n, char *buf) \
+{ \
+ char tmp[20]; \
+ char* p = tmp + 20; \
+ char* q = p; \
+ unsigned int k, m; \
+ \
+ while (n >= 100) { \
+ p -= 2; \
+ m = (unsigned int)(n % 100) * 2; \
+ n /= 100; \
+ __print_unaligned_copy_16(p, __print_digit_pairs + m); \
+ } \
+ p -= 2; \
+ m = (unsigned int)n * 2; \
+ __print_unaligned_copy_16(p, __print_digit_pairs + m); \
+ if (n < 10) { \
+ ++p; \
+ } \
+ k = (unsigned int)(q - p); \
+ while (p != q) { \
+ *buf++ = *p++; \
+ } \
+ *buf = '\0'; \
+ return (int)k; \
+} \
+ \
+static int NAME(T n, char *buf) \
+{ \
+ int sign = n < 0; \
+ \
+ if (sign) { \
+ *buf++ = '-'; \
+ n = -n; \
+ } \
+ return UNAME((UT)n, buf) + sign; \
+}
+
+__define_print_int_simple(print_int, print_uint, int, unsigned int)
+__define_print_int_simple(print_long, print_ulong, long, unsigned long)
+
+#ifdef PPRINTINT_BENCH
+int main() {
+ int64_t count = 10000000; /* 10^7 */
+#if 0
+ int64_t base = 0;
+ int64_t base = 10000000000000000; /* 10^16 */
+ int64_t base = 1000000000; /* 10^9 */
+#endif
+ int64_t base = INT64_MIN - count/2;
+ char buf[100];
+ int i, k = 0, n = 0;
+ for (i = 0; i < count; i++) {
+ k = print_int64(i + base, buf);
+ n += buf[0] + buf[k - 1];
+ }
+ return n;
+}
+/* Call with time on executable, multiply time in seconds by 100 to get time unit in ns/number. */
+#endif /* PPRINTINT_BENCH */
+
+#ifdef PPRINTINT_TEST
+
+#include <stdio.h>
+#include <string.h>
+
+int main()
+{
+ char buf[21];
+ int failed = 0;
+ int k;
+
+ k = print_uint64(UINT64_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("18446744073709551615", buf)) {
+ printf("UINT64_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int64(INT64_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("9223372036854775807", buf)) {
+ printf("INT64_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int64(INT64_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-9223372036854775808", buf)) {
+ printf("INT64_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_uint32(UINT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("4294967295", buf)) {
+ printf("UINT32_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int32(INT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("2147483647", buf)) {
+ printf("INT32_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int32(INT32_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-2147483648", buf)) {
+ printf("INT32_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_uint16(UINT16_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("65535", buf)) {
+ printf("UINT16_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int16(INT16_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("32767", buf)) {
+ printf("INT16_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int16(INT16_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-32768", buf)) {
+ printf("INT16_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_uint8(UINT8_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("255", buf)) {
+ printf("INT8_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int8(INT8_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("127", buf)) {
+ printf("INT8_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int8(INT8_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-128", buf)) {
+ printf("INT8_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int(INT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("2147483647", buf)) {
+ printf("INT32_MAX didn't print correctly with k = print_int, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int(INT32_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-2147483648", buf)) {
+ printf("INT32_MIN didn't print correctly k = print_int, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_long(INT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("2147483647", buf)) {
+ printf("INT32_MAX didn't print correctly with fmt_long, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_long(INT32_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-2147483648", buf)) {
+ printf("INT32_MIN didn't print correctly fmt_long, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_bool(1, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("true", buf) {
+ printf("1 didn't print 'true' as expected, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_bool(-1, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("true", buf) {
+ printf("-1 didn't print 'true' as expected, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_bool(, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("false", buf) {
+ printf("0 didn't print 'false' as expected, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ if (failed) {
+ printf("FAILED\n");
+ return -1;
+ }
+ printf("SUCCESS\n");
+ return 0;
+}
+#endif /* PPRINTINT_TEST */
+
+#include "pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPRINTINT_H */
diff --git a/include/flatcc/portable/pstatic_assert.h b/include/flatcc/portable/pstatic_assert.h
new file mode 100644
index 0000000..24d5634
--- /dev/null
+++ b/include/flatcc/portable/pstatic_assert.h
@@ -0,0 +1,67 @@
+#ifndef PSTATIC_ASSERT_H
+#define PSTATIC_ASSERT_H
+
+#include <assert.h>
+
+/* Handle clang */
+#ifndef __has_feature
+ #define __has_feature(x) 0
+#endif
+
+#if defined(static_assert)
+#ifndef __static_assert_is_defined
+#define __static_assert_is_defined 1
+#endif
+#endif
+
+/* Handle static_assert as a keyword in C++ and compiler specifics. */
+#if !defined(__static_assert_is_defined)
+
+#if defined(__cplusplus)
+
+#if __cplusplus >= 201103L
+#define __static_assert_is_defined 1
+#elif __has_feature(cxx_static_assert)
+#define __static_assert_is_defined 1
+#elif defined(_MSC_VER) && (_MSC_VER >= 1600)
+#define __static_assert_is_defined 1
+#endif
+
+#else
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1600)
+#define __static_assert_is_defined 1
+#elif __has_feature(c_static_assert)
+#define static_assert(pred, msg) _Static_assert(pred, msg)
+#define __static_assert_is_defined 1
+#elif defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
+/* In case the clib headers are not compliant. */
+#define static_assert(pred, msg) _Static_assert(pred, msg)
+#define __static_assert_is_defined 1
+#endif
+
+#endif /* __cplusplus */
+#endif /* __static_assert_is_defined */
+
+
+#if !defined(__static_assert_is_defined)
+
+#define __PSTATIC_ASSERT_CONCAT_(a, b) static_assert_scope_##a##_line_##b
+#define __PSTATIC_ASSERT_CONCAT(a, b) __PSTATIC_ASSERT_CONCAT_(a, b)
+#ifdef __COUNTER__
+#define static_assert(e, msg) enum { __PSTATIC_ASSERT_CONCAT(__COUNTER__, __LINE__) = 1/(!!(e)) }
+#else
+#include "pstatic_assert_scope.h"
+#define static_assert(e, msg) enum { __PSTATIC_ASSERT_CONCAT(__PSTATIC_ASSERT_COUNTER, __LINE__) = 1/(int)(!!(e)) }
+#endif
+
+#define __static_assert_is_defined 1
+
+#endif /* __static_assert_is_defined */
+
+#endif /* PSTATIC_ASSERT_H */
+
+/* Update scope counter outside of include guard. */
+#ifdef __PSTATIC_ASSERT_COUNTER
+#include "pstatic_assert_scope.h"
+#endif
diff --git a/include/flatcc/portable/pstatic_assert_scope.h b/include/flatcc/portable/pstatic_assert_scope.h
new file mode 100644
index 0000000..71a0c29
--- /dev/null
+++ b/include/flatcc/portable/pstatic_assert_scope.h
@@ -0,0 +1,280 @@
+/*
+ * january, 2017, ported to portable library by mikkelfj.
+ * Based on dbgtools static assert counter, but with renamed macros.
+ */
+
+/*
+ dbgtools - platform independent wrapping of "nice to have" debug functions.
+
+ version 0.1, october, 2013
+
+ https://github.com/wc-duck/dbgtools
+
+ Copyright (C) 2013- Fredrik Kihlander
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Fredrik Kihlander
+*/
+
+/**
+ * Auto-generated header implementing a counter that increases by each include of the file.
+ *
+ * This header will define the macro __PSTATIC_ASSERT_COUNTER to be increased for each inclusion of the file.
+ *
+ * It has been generated with 3 amount of digits resulting in the counter wrapping around after
+ * 10000 inclusions.
+ *
+ * Usage:
+ *
+ * #include "this_header.h"
+ * int a = __PSTATIC_ASSERT_COUNTER; // 0
+ * #include "this_header.h"
+ * int b = __PSTATIC_ASSERT_COUNTER; // 1
+ * #include "this_header.h"
+ * int c = __PSTATIC_ASSERT_COUNTER; // 2
+ * #include "this_header.h"
+ * int d = __PSTATIC_ASSERT_COUNTER; // 3
+ */
+
+#ifndef __PSTATIC_ASSERT_COUNTER
+# define __PSTATIC_ASSERT_COUNTER_0 0
+# define __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_D1_0
+# define __PSTATIC_ASSERT_COUNTER_D2_0
+# define __PSTATIC_ASSERT_COUNTER_D3_0
+#endif /* __PSTATIC_ASSERT_COUNTER */
+
+#if !defined( __PSTATIC_ASSERT_COUNTER_D0_0 )
+# define __PSTATIC_ASSERT_COUNTER_D0_0
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 0
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_1 )
+# define __PSTATIC_ASSERT_COUNTER_D0_1
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 1
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_2 )
+# define __PSTATIC_ASSERT_COUNTER_D0_2
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 2
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_3 )
+# define __PSTATIC_ASSERT_COUNTER_D0_3
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 3
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_4 )
+# define __PSTATIC_ASSERT_COUNTER_D0_4
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 4
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_5 )
+# define __PSTATIC_ASSERT_COUNTER_D0_5
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 5
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_6 )
+# define __PSTATIC_ASSERT_COUNTER_D0_6
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 6
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_7 )
+# define __PSTATIC_ASSERT_COUNTER_D0_7
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 7
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_8 )
+# define __PSTATIC_ASSERT_COUNTER_D0_8
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 8
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_9 )
+# define __PSTATIC_ASSERT_COUNTER_D0_9
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 9
+#else
+# undef __PSTATIC_ASSERT_COUNTER_D0_1
+# undef __PSTATIC_ASSERT_COUNTER_D0_2
+# undef __PSTATIC_ASSERT_COUNTER_D0_3
+# undef __PSTATIC_ASSERT_COUNTER_D0_4
+# undef __PSTATIC_ASSERT_COUNTER_D0_5
+# undef __PSTATIC_ASSERT_COUNTER_D0_6
+# undef __PSTATIC_ASSERT_COUNTER_D0_7
+# undef __PSTATIC_ASSERT_COUNTER_D0_8
+# undef __PSTATIC_ASSERT_COUNTER_D0_9
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 0
+# if !defined( __PSTATIC_ASSERT_COUNTER_D1_0 )
+# define __PSTATIC_ASSERT_COUNTER_D1_0
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 0
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_1 )
+# define __PSTATIC_ASSERT_COUNTER_D1_1
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 1
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_2 )
+# define __PSTATIC_ASSERT_COUNTER_D1_2
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 2
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_3 )
+# define __PSTATIC_ASSERT_COUNTER_D1_3
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 3
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_4 )
+# define __PSTATIC_ASSERT_COUNTER_D1_4
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 4
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_5 )
+# define __PSTATIC_ASSERT_COUNTER_D1_5
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 5
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_6 )
+# define __PSTATIC_ASSERT_COUNTER_D1_6
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 6
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_7 )
+# define __PSTATIC_ASSERT_COUNTER_D1_7
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 7
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_8 )
+# define __PSTATIC_ASSERT_COUNTER_D1_8
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 8
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_9 )
+# define __PSTATIC_ASSERT_COUNTER_D1_9
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 9
+# else
+# undef __PSTATIC_ASSERT_COUNTER_D1_1
+# undef __PSTATIC_ASSERT_COUNTER_D1_2
+# undef __PSTATIC_ASSERT_COUNTER_D1_3
+# undef __PSTATIC_ASSERT_COUNTER_D1_4
+# undef __PSTATIC_ASSERT_COUNTER_D1_5
+# undef __PSTATIC_ASSERT_COUNTER_D1_6
+# undef __PSTATIC_ASSERT_COUNTER_D1_7
+# undef __PSTATIC_ASSERT_COUNTER_D1_8
+# undef __PSTATIC_ASSERT_COUNTER_D1_9
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 0
+# if !defined( __PSTATIC_ASSERT_COUNTER_D2_0 )
+# define __PSTATIC_ASSERT_COUNTER_D2_0
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 0
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_1 )
+# define __PSTATIC_ASSERT_COUNTER_D2_1
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 1
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_2 )
+# define __PSTATIC_ASSERT_COUNTER_D2_2
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 2
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_3 )
+# define __PSTATIC_ASSERT_COUNTER_D2_3
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 3
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_4 )
+# define __PSTATIC_ASSERT_COUNTER_D2_4
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 4
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_5 )
+# define __PSTATIC_ASSERT_COUNTER_D2_5
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 5
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_6 )
+# define __PSTATIC_ASSERT_COUNTER_D2_6
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 6
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_7 )
+# define __PSTATIC_ASSERT_COUNTER_D2_7
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 7
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_8 )
+# define __PSTATIC_ASSERT_COUNTER_D2_8
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 8
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_9 )
+# define __PSTATIC_ASSERT_COUNTER_D2_9
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 9
+# else
+# undef __PSTATIC_ASSERT_COUNTER_D2_1
+# undef __PSTATIC_ASSERT_COUNTER_D2_2
+# undef __PSTATIC_ASSERT_COUNTER_D2_3
+# undef __PSTATIC_ASSERT_COUNTER_D2_4
+# undef __PSTATIC_ASSERT_COUNTER_D2_5
+# undef __PSTATIC_ASSERT_COUNTER_D2_6
+# undef __PSTATIC_ASSERT_COUNTER_D2_7
+# undef __PSTATIC_ASSERT_COUNTER_D2_8
+# undef __PSTATIC_ASSERT_COUNTER_D2_9
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 0
+# if !defined( __PSTATIC_ASSERT_COUNTER_D3_0 )
+# define __PSTATIC_ASSERT_COUNTER_D3_0
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 0
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_1 )
+# define __PSTATIC_ASSERT_COUNTER_D3_1
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 1
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_2 )
+# define __PSTATIC_ASSERT_COUNTER_D3_2
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 2
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_3 )
+# define __PSTATIC_ASSERT_COUNTER_D3_3
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 3
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_4 )
+# define __PSTATIC_ASSERT_COUNTER_D3_4
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 4
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_5 )
+# define __PSTATIC_ASSERT_COUNTER_D3_5
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 5
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_6 )
+# define __PSTATIC_ASSERT_COUNTER_D3_6
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 6
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_7 )
+# define __PSTATIC_ASSERT_COUNTER_D3_7
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 7
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_8 )
+# define __PSTATIC_ASSERT_COUNTER_D3_8
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 8
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_9 )
+# define __PSTATIC_ASSERT_COUNTER_D3_9
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 9
+# else
+# undef __PSTATIC_ASSERT_COUNTER_D3_1
+# undef __PSTATIC_ASSERT_COUNTER_D3_2
+# undef __PSTATIC_ASSERT_COUNTER_D3_3
+# undef __PSTATIC_ASSERT_COUNTER_D3_4
+# undef __PSTATIC_ASSERT_COUNTER_D3_5
+# undef __PSTATIC_ASSERT_COUNTER_D3_6
+# undef __PSTATIC_ASSERT_COUNTER_D3_7
+# undef __PSTATIC_ASSERT_COUNTER_D3_8
+# undef __PSTATIC_ASSERT_COUNTER_D3_9
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 0
+# endif
+# endif
+# endif
+#endif
+
+#define __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO_(digit0,digit1,digit2,digit3) digit0##digit1##digit2##digit3
+#define __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO(digit0,digit1,digit2,digit3) __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO_(digit0,digit1,digit2,digit3)
+#undef __PSTATIC_ASSERT_COUNTER
+#define __PSTATIC_ASSERT_COUNTER __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO(__PSTATIC_ASSERT_COUNTER_3,__PSTATIC_ASSERT_COUNTER_2,__PSTATIC_ASSERT_COUNTER_1,__PSTATIC_ASSERT_COUNTER_0)
diff --git a/include/flatcc/portable/pstdalign.h b/include/flatcc/portable/pstdalign.h
new file mode 100644
index 0000000..169fe27
--- /dev/null
+++ b/include/flatcc/portable/pstdalign.h
@@ -0,0 +1,162 @@
+#ifndef PSTDALIGN_H
+#define PSTDALIGN_H
+
+/*
+ * NOTE: aligned_alloc is defined via paligned_alloc.h
+ * and requires aligned_free to be fully portable although
+ * free also works on C11 and platforms with posix_memalign.
+ *
+ * NOTE: C++11 defines alignas as a keyword but then also defines
+ * __alignas_is_defined.
+ *
+ * C++14 does not define __alignas_is_defined, at least sometimes.
+ *
+ * GCC 8.3 reverts on this and makes C++11 behave the same as C++14
+ * preventing a simple __cplusplus version check from working.
+ *
+ * Clang C++ without std=c++11 or std=c++14 does define alignas
+ * but does so incorrectly wrt. C11 and C++11 semantics because
+ * `alignas(4) float x;` is not recognized.
+ * To fix such issues, either move to a std version, or
+ * include a working stdalign.h for the given compiler before
+ * this file.
+ *
+ * newlib defines _Alignas and _Alignof in sys/cdefs but rely on
+ * gcc version for <stdaligh.h> which can lead to conflicts if
+ * stdalign is not included.
+ *
+ * newlibs need for <stdalign.h> conflicts with broken C++ stdalign
+ * but this can be fixed be using std=C++11 or newer.
+ *
+ * MSVC does not support <stdalign.h> at least up to MSVC 2015,
+ * but does appear to support alignas and alignof keywords in
+ * recent standard C++.
+ *
+ * TCC only supports alignas with a numeric argument like
+ * `alignas(4)`, but not `alignas(float)`.
+ *
+ * If stdalign.h is supported but heuristics in this file are
+ * insufficient to detect this, try including <stdaligh.h> manually
+ * or define HAVE_STDALIGN_H.
+ */
+
+/* https://github.com/dvidelabs/flatcc/issues/130 */
+#ifndef __alignas_is_defined
+#if defined(__cplusplus)
+#if __cplusplus == 201103 && !defined(__clang__) && ((__GNUC__ > 8) || (__GNUC__ == 8 && __GNUC_MINOR__ >= 3))
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+#include <stdalign.h>
+#endif
+#endif
+#endif
+
+/* Allow for alternative solution to be included first. */
+#ifndef __alignas_is_defined
+
+#ifdef __cplusplus
+#if defined(PORTABLE_PATCH_CPLUSPLUS_STDALIGN)
+#include <stdalign.h>
+#undef alignas
+#define alignas(t) __attribute__((__aligned__(t)))
+#endif
+#endif
+
+#if !defined(PORTABLE_HAS_INCLUDE_STDALIGN)
+#if defined(__has_include)
+#if __has_include(<stdalign.h>)
+#define PORTABLE_HAS_INCLUDE_STDALIGN 1
+#else
+#define PORTABLE_HAS_INCLUDE_STDALIGN 0
+#endif
+#endif
+#endif
+
+ /* https://lists.gnu.org/archive/html/bug-gnulib/2015-08/msg00003.html */
+#if defined(__cplusplus)
+#if !defined(_MSC_VER)
+#include <stdalign.h>
+#endif
+#if __cplusplus > 201103
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+#endif
+#elif PORTABLE_HAS_INCLUDE_STDALIGN
+#include <stdalign.h>
+#elif !defined(__clang__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
+#include <stdalign.h>
+#elif defined(HAVE_STDALIGN_H)
+#include <stdaligh.h>
+#endif
+
+#endif /* __alignas_is_defined */
+
+#ifndef __alignas_is_defined
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (!defined(__clang__) && defined(__GNUC__) && \
+ ((__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 7)))
+#undef PORTABLE_C11_STDALIGN_MISSING
+#define PORTABLE_C11_STDALIGN_MISSING
+#endif
+
+#if defined(__IBMC__)
+#undef PORTABLE_C11_STDALIGN_MISSING
+#define PORTABLE_C11_STDALIGN_MISSING
+#endif
+
+#if ((defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) && \
+ !defined(PORTABLE_C11_STDALIGN_MISSING))
+/* C11 or newer */
+#include <stdalign.h>
+#else
+#if defined(__GNUC__) || defined(__IBM_ALIGNOF__) || defined(__clang__)
+
+#ifndef _Alignas
+#define _Alignas(t) __attribute__((__aligned__(t)))
+#endif
+
+#ifndef _Alignof
+#define _Alignof(t) __alignof__(t)
+#endif
+
+#elif defined(_MSC_VER)
+
+#define _Alignas(t) __declspec (align(t))
+#define _Alignof(t) __alignof(t)
+
+#elif defined(__TINYC__)
+
+/* Supports `_Alignas(integer-expression)`, but not `_Alignas(type)`. */
+#define _Alignas(t) __attribute__(aligned(t))
+#define _Alignof(t) __alignof__(t)
+
+#else
+#error please update pstdalign.h with support for current compiler and library
+#endif
+
+#endif /* __STDC__ */
+
+#ifndef alignas
+#define alignas _Alignas
+#endif
+
+#ifndef alignof
+#define alignof _Alignof
+#endif
+
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __alignas__is_defined */
+
+#include "paligned_alloc.h"
+
+#endif /* PSTDALIGN_H */
diff --git a/include/flatcc/portable/pstdbool.h b/include/flatcc/portable/pstdbool.h
new file mode 100644
index 0000000..28fc89c
--- /dev/null
+++ b/include/flatcc/portable/pstdbool.h
@@ -0,0 +1,37 @@
+#ifndef PSTDBOOL_H
+#define PSTDBOOL_H
+
+#if !defined(__cplusplus) && !__bool_true_false_are_defined && !defined(bool) && !defined(__STDBOOL_H)
+
+#ifdef HAVE_STDBOOL_H
+
+#include <stdbool.h>
+
+#elif (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
+/* C99 or newer */
+
+#define bool _Bool
+#define true 1
+#define false 0
+#define __bool_true_false_are_defined 1
+
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
+
+#define bool bool
+#define true true
+#define false false
+#define __bool_true_false_are_defined 1
+
+#else
+
+typedef unsigned char _Portable_bool;
+#define bool _Portable_bool
+#define true 1
+#define false 0
+#define __bool_true_false_are_defined 1
+
+#endif
+
+#endif
+
+#endif /* PSTDBOOL_H */
diff --git a/include/flatcc/portable/pstdint.h b/include/flatcc/portable/pstdint.h
new file mode 100644
index 0000000..d522fed
--- /dev/null
+++ b/include/flatcc/portable/pstdint.h
@@ -0,0 +1,898 @@
+/* A portable stdint.h
+ ****************************************************************************
+ * BSD License:
+ ****************************************************************************
+ *
+ * Copyright (c) 2005-2016 Paul Hsieh
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************
+ *
+ * Version 0.1.15.2
+ *
+ * The ANSI C standard committee, for the C99 standard, specified the
+ * inclusion of a new standard include file called stdint.h. This is
+ * a very useful and long desired include file which contains several
+ * very precise definitions for integer scalar types that is
+ * critically important for making portable several classes of
+ * applications including cryptography, hashing, variable length
+ * integer libraries and so on. But for most developers its likely
+ * useful just for programming sanity.
+ *
+ * The problem is that some compiler vendors chose to ignore the C99
+ * standard and some older compilers have no opportunity to be updated.
+ * Because of this situation, simply including stdint.h in your code
+ * makes it unportable.
+ *
+ * So that's what this file is all about. Its an attempt to build a
+ * single universal include file that works on as many platforms as
+ * possible to deliver what stdint.h is supposed to. Even compilers
+ * that already come with stdint.h can use this file instead without
+ * any loss of functionality. A few things that should be noted about
+ * this file:
+ *
+ * 1) It is not guaranteed to be portable and/or present an identical
+ * interface on all platforms. The extreme variability of the
+ * ANSI C standard makes this an impossibility right from the
+ * very get go. Its really only meant to be useful for the vast
+ * majority of platforms that possess the capability of
+ * implementing usefully and precisely defined, standard sized
+ * integer scalars. Systems which are not intrinsically 2s
+ * complement may produce invalid constants.
+ *
+ * 2) There is an unavoidable use of non-reserved symbols.
+ *
+ * 3) Other standard include files are invoked.
+ *
+ * 4) This file may come in conflict with future platforms that do
+ * include stdint.h. The hope is that one or the other can be
+ * used with no real difference.
+ *
+ * 5) In the current verison, if your platform can't represent
+ * int32_t, int16_t and int8_t, it just dumps out with a compiler
+ * error.
+ *
+ * 6) 64 bit integers may or may not be defined. Test for their
+ * presence with the test: #ifdef INT64_MAX or #ifdef UINT64_MAX.
+ * Note that this is different from the C99 specification which
+ * requires the existence of 64 bit support in the compiler. If
+ * this is not defined for your platform, yet it is capable of
+ * dealing with 64 bits then it is because this file has not yet
+ * been extended to cover all of your system's capabilities.
+ *
+ * 7) (u)intptr_t may or may not be defined. Test for its presence
+ * with the test: #ifdef PTRDIFF_MAX. If this is not defined
+ * for your platform, then it is because this file has not yet
+ * been extended to cover all of your system's capabilities, not
+ * because its optional.
+ *
+ * 8) The following might not been defined even if your platform is
+ * capable of defining it:
+ *
+ * WCHAR_MIN
+ * WCHAR_MAX
+ * (u)int64_t
+ * PTRDIFF_MIN
+ * PTRDIFF_MAX
+ * (u)intptr_t
+ *
+ * 9) The following have not been defined:
+ *
+ * WINT_MIN
+ * WINT_MAX
+ *
+ * 10) The criteria for defining (u)int_least(*)_t isn't clear,
+ * except for systems which don't have a type that precisely
+ * defined 8, 16, or 32 bit types (which this include file does
+ * not support anyways). Default definitions have been given.
+ *
+ * 11) The criteria for defining (u)int_fast(*)_t isn't something I
+ * would trust to any particular compiler vendor or the ANSI C
+ * committee. It is well known that "compatible systems" are
+ * commonly created that have very different performance
+ * characteristics from the systems they are compatible with,
+ * especially those whose vendors make both the compiler and the
+ * system. Default definitions have been given, but its strongly
+ * recommended that users never use these definitions for any
+ * reason (they do *NOT* deliver any serious guarantee of
+ * improved performance -- not in this file, nor any vendor's
+ * stdint.h).
+ *
+ * 12) The following macros:
+ *
+ * PRINTF_INTMAX_MODIFIER
+ * PRINTF_INT64_MODIFIER
+ * PRINTF_INT32_MODIFIER
+ * PRINTF_INT16_MODIFIER
+ * PRINTF_LEAST64_MODIFIER
+ * PRINTF_LEAST32_MODIFIER
+ * PRINTF_LEAST16_MODIFIER
+ * PRINTF_INTPTR_MODIFIER
+ *
+ * are strings which have been defined as the modifiers required
+ * for the "d", "u" and "x" printf formats to correctly output
+ * (u)intmax_t, (u)int64_t, (u)int32_t, (u)int16_t, (u)least64_t,
+ * (u)least32_t, (u)least16_t and (u)intptr_t types respectively.
+ * PRINTF_INTPTR_MODIFIER is not defined for some systems which
+ * provide their own stdint.h. PRINTF_INT64_MODIFIER is not
+ * defined if INT64_MAX is not defined. These are an extension
+ * beyond what C99 specifies must be in stdint.h.
+ *
+ * In addition, the following macros are defined:
+ *
+ * PRINTF_INTMAX_HEX_WIDTH
+ * PRINTF_INT64_HEX_WIDTH
+ * PRINTF_INT32_HEX_WIDTH
+ * PRINTF_INT16_HEX_WIDTH
+ * PRINTF_INT8_HEX_WIDTH
+ * PRINTF_INTMAX_DEC_WIDTH
+ * PRINTF_INT64_DEC_WIDTH
+ * PRINTF_INT32_DEC_WIDTH
+ * PRINTF_INT16_DEC_WIDTH
+ * PRINTF_UINT8_DEC_WIDTH
+ * PRINTF_UINTMAX_DEC_WIDTH
+ * PRINTF_UINT64_DEC_WIDTH
+ * PRINTF_UINT32_DEC_WIDTH
+ * PRINTF_UINT16_DEC_WIDTH
+ * PRINTF_UINT8_DEC_WIDTH
+ *
+ * Which specifies the maximum number of characters required to
+ * print the number of that type in either hexadecimal or decimal.
+ * These are an extension beyond what C99 specifies must be in
+ * stdint.h.
+ *
+ * Compilers tested (all with 0 warnings at their highest respective
+ * settings): Borland Turbo C 2.0, WATCOM C/C++ 11.0 (16 bits and 32
+ * bits), Microsoft Visual C++ 6.0 (32 bit), Microsoft Visual Studio
+ * .net (VC7), Intel C++ 4.0, GNU gcc v3.3.3
+ *
+ * This file should be considered a work in progress. Suggestions for
+ * improvements, especially those which increase coverage are strongly
+ * encouraged.
+ *
+ * Acknowledgements
+ *
+ * The following people have made significant contributions to the
+ * development and testing of this file:
+ *
+ * Chris Howie
+ * John Steele Scott
+ * Dave Thorup
+ * John Dill
+ * Florian Wobbe
+ * Christopher Sean Morrison
+ * Mikkel Fahnoe Jorgensen
+ *
+ */
+
+#include <stddef.h>
+#include <limits.h>
+#include <signal.h>
+
+/*
+ * For gcc with _STDINT_H, fill in the PRINTF_INT*_MODIFIER macros, and
+ * do nothing else. On the Mac OS X version of gcc this is _STDINT_H_.
+ */
+
+#if ((defined(_MSC_VER) && _MSC_VER >= 1600) || (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || (defined (__WATCOMC__) && (defined (_STDINT_H_INCLUDED) || __WATCOMC__ >= 1250)) || (defined(__GNUC__) && (__GNUC__ > 3 || defined(_STDINT_H) || defined(_STDINT_H_) || defined (__UINT_FAST64_TYPE__)) )) && !defined (_PSTDINT_H_INCLUDED)
+#include <stdint.h>
+#define _PSTDINT_H_INCLUDED
+# if defined(__GNUC__) && (defined(__x86_64__) || defined(__ppc64__)) && !(defined(__APPLE__) && defined(__MACH__))
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "l"
+# endif
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+# else
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# ifndef PRINTF_INT32_MODIFIER
+# if (UINT_MAX == UINT32_MAX)
+# define PRINTF_INT32_MODIFIER ""
+# else
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+# endif
+# endif
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT64_MODIFIER
+# endif
+# ifndef PRINTF_INT64_HEX_WIDTH
+# define PRINTF_INT64_HEX_WIDTH "16"
+# endif
+# ifndef PRINTF_UINT64_HEX_WIDTH
+# define PRINTF_UINT64_HEX_WIDTH "16"
+# endif
+# ifndef PRINTF_INT32_HEX_WIDTH
+# define PRINTF_INT32_HEX_WIDTH "8"
+# endif
+# ifndef PRINTF_UINT32_HEX_WIDTH
+# define PRINTF_UINT32_HEX_WIDTH "8"
+# endif
+# ifndef PRINTF_INT16_HEX_WIDTH
+# define PRINTF_INT16_HEX_WIDTH "4"
+# endif
+# ifndef PRINTF_UINT16_HEX_WIDTH
+# define PRINTF_UINT16_HEX_WIDTH "4"
+# endif
+# ifndef PRINTF_INT8_HEX_WIDTH
+# define PRINTF_INT8_HEX_WIDTH "2"
+# endif
+# ifndef PRINTF_UINT8_HEX_WIDTH
+# define PRINTF_UINT8_HEX_WIDTH "2"
+# endif
+# ifndef PRINTF_INT64_DEC_WIDTH
+# define PRINTF_INT64_DEC_WIDTH "19"
+# endif
+# ifndef PRINTF_UINT64_DEC_WIDTH
+# define PRINTF_UINT64_DEC_WIDTH "20"
+# endif
+# ifndef PRINTF_INT32_DEC_WIDTH
+# define PRINTF_INT32_DEC_WIDTH "10"
+# endif
+# ifndef PRINTF_UINT32_DEC_WIDTH
+# define PRINTF_UINT32_DEC_WIDTH "10"
+# endif
+# ifndef PRINTF_INT16_DEC_WIDTH
+# define PRINTF_INT16_DEC_WIDTH "5"
+# endif
+# ifndef PRINTF_UINT16_DEC_WIDTH
+# define PRINTF_UINT16_DEC_WIDTH "5"
+# endif
+# ifndef PRINTF_INT8_DEC_WIDTH
+# define PRINTF_INT8_DEC_WIDTH "3"
+# endif
+# ifndef PRINTF_UINT8_DEC_WIDTH
+# define PRINTF_UINT8_DEC_WIDTH "3"
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_UINT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_UINTMAX_HEX_WIDTH
+# define PRINTF_UINTMAX_HEX_WIDTH PRINTF_UINT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_UINT64_DEC_WIDTH
+# endif
+# ifndef PRINTF_UINTMAX_DEC_WIDTH
+# define PRINTF_UINTMAX_DEC_WIDTH PRINTF_UINT64_DEC_WIDTH
+# endif
+
+/*
+ * Something really weird is going on with Open Watcom. Just pull some of
+ * these duplicated definitions from Open Watcom's stdint.h file for now.
+ */
+
+# if defined (__WATCOMC__) && __WATCOMC__ >= 1250
+# if !defined (INT64_C)
+# define INT64_C(x) (x + (INT64_MAX - INT64_MAX))
+# endif
+# if !defined (UINT64_C)
+# define UINT64_C(x) (x + (UINT64_MAX - UINT64_MAX))
+# endif
+# if !defined (INT32_C)
+# define INT32_C(x) (x + (INT32_MAX - INT32_MAX))
+# endif
+# if !defined (UINT32_C)
+# define UINT32_C(x) (x + (UINT32_MAX - UINT32_MAX))
+# endif
+# if !defined (INT16_C)
+# define INT16_C(x) (x)
+# endif
+# if !defined (UINT16_C)
+# define UINT16_C(x) (x)
+# endif
+# if !defined (INT8_C)
+# define INT8_C(x) (x)
+# endif
+# if !defined (UINT8_C)
+# define UINT8_C(x) (x)
+# endif
+# if !defined (UINT64_MAX)
+# define UINT64_MAX 18446744073709551615ULL
+# endif
+# if !defined (INT64_MAX)
+# define INT64_MAX 9223372036854775807LL
+# endif
+# if !defined (UINT32_MAX)
+# define UINT32_MAX 4294967295UL
+# endif
+# if !defined (INT32_MAX)
+# define INT32_MAX 2147483647L
+# endif
+# if !defined (INTMAX_MAX)
+# define INTMAX_MAX INT64_MAX
+# endif
+# if !defined (INTMAX_MIN)
+# define INTMAX_MIN INT64_MIN
+# endif
+# endif
+#endif
+
+#ifndef _PSTDINT_H_INCLUDED
+#define _PSTDINT_H_INCLUDED
+
+#ifndef SIZE_MAX
+# define SIZE_MAX (~(size_t)0)
+#endif
+
+/*
+ * Deduce the type assignments from limits.h under the assumption that
+ * integer sizes in bits are powers of 2, and follow the ANSI
+ * definitions.
+ */
+
+#ifndef UINT8_MAX
+# define UINT8_MAX 0xff
+#endif
+#if !defined(uint8_t) && !defined(_UINT8_T)
+# if (UCHAR_MAX == UINT8_MAX) || defined (S_SPLINT_S)
+ typedef unsigned char uint8_t;
+# define UINT8_C(v) ((uint8_t) v)
+# else
+# error "Platform not supported"
+# endif
+#endif
+
+#ifndef INT8_MAX
+# define INT8_MAX 0x7f
+#endif
+#ifndef INT8_MIN
+# define INT8_MIN INT8_C(0x80)
+#endif
+#if !defined(int8_t) && !defined(_INT8_T)
+# if (SCHAR_MAX == INT8_MAX) || defined (S_SPLINT_S)
+ typedef signed char int8_t;
+# define INT8_C(v) ((int8_t) v)
+# else
+# error "Platform not supported"
+# endif
+#endif
+
+#ifndef UINT16_MAX
+# define UINT16_MAX 0xffff
+#endif
+#if !defined(uint16_t) && !defined(_UINT16_T)
+#if (UINT_MAX == UINT16_MAX) || defined (S_SPLINT_S)
+ typedef unsigned int uint16_t;
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER ""
+# endif
+# define UINT16_C(v) ((uint16_t) (v))
+#elif (USHRT_MAX == UINT16_MAX)
+ typedef unsigned short uint16_t;
+# define UINT16_C(v) ((uint16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef INT16_MAX
+# define INT16_MAX 0x7fff
+#endif
+#ifndef INT16_MIN
+# define INT16_MIN INT16_C(0x8000)
+#endif
+#if !defined(int16_t) && !defined(_INT16_T)
+#if (INT_MAX == INT16_MAX) || defined (S_SPLINT_S)
+ typedef signed int int16_t;
+# define INT16_C(v) ((int16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER ""
+# endif
+#elif (SHRT_MAX == INT16_MAX)
+ typedef signed short int16_t;
+# define INT16_C(v) ((int16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef UINT32_MAX
+# define UINT32_MAX (0xffffffffUL)
+#endif
+#if !defined(uint32_t) && !defined(_UINT32_T)
+#if (ULONG_MAX == UINT32_MAX) || defined (S_SPLINT_S)
+ typedef unsigned long uint32_t;
+# define UINT32_C(v) v ## UL
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+#elif (UINT_MAX == UINT32_MAX)
+ typedef unsigned int uint32_t;
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+# define UINT32_C(v) v ## U
+#elif (USHRT_MAX == UINT32_MAX)
+ typedef unsigned short uint32_t;
+# define UINT32_C(v) ((unsigned short) (v))
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef INT32_MAX
+# define INT32_MAX (0x7fffffffL)
+#endif
+#ifndef INT32_MIN
+# define INT32_MIN INT32_C(0x80000000)
+#endif
+#if !defined(int32_t) && !defined(_INT32_T)
+#if (LONG_MAX == INT32_MAX) || defined (S_SPLINT_S)
+ typedef signed long int32_t;
+# define INT32_C(v) v ## L
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+#elif (INT_MAX == INT32_MAX)
+ typedef signed int int32_t;
+# define INT32_C(v) v
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#elif (SHRT_MAX == INT32_MAX)
+ typedef signed short int32_t;
+# define INT32_C(v) ((short) (v))
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+/*
+ * The macro stdint_int64_defined is temporarily used to record
+ * whether or not 64 integer support is available. It must be
+ * defined for any 64 integer extensions for new platforms that are
+ * added.
+ */
+
+#undef stdint_int64_defined
+#if (defined(__STDC__) && defined(__STDC_VERSION__)) || defined (S_SPLINT_S)
+# if (__STDC__ && __STDC_VERSION__ >= 199901L) || defined (S_SPLINT_S)
+# define stdint_int64_defined
+ typedef long long int64_t;
+ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# endif
+#endif
+
+#if !defined (stdint_int64_defined)
+# if defined(__GNUC__)
+# define stdint_int64_defined
+ __extension__ typedef long long int64_t;
+ __extension__ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# elif defined(__MWERKS__) || defined (__SUNPRO_C) || defined (__SUNPRO_CC) || defined (__APPLE_CC__) || defined (_LONG_LONG) || defined (_CRAYC) || defined (S_SPLINT_S)
+# define stdint_int64_defined
+ typedef long long int64_t;
+ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# elif (defined(__WATCOMC__) && defined(__WATCOM_INT64__)) || (defined(_MSC_VER) && _INTEGRAL_MAX_BITS >= 64) || (defined (__BORLANDC__) && __BORLANDC__ > 0x460) || defined (__alpha) || defined (__DECC)
+# define stdint_int64_defined
+ typedef __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+# define UINT64_C(v) v ## UI64
+# define INT64_C(v) v ## I64
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "I64"
+# endif
+# endif
+#endif
+
+#if !defined (LONG_LONG_MAX) && defined (INT64_C)
+# define LONG_LONG_MAX INT64_C (9223372036854775807)
+#endif
+#ifndef ULONG_LONG_MAX
+# define ULONG_LONG_MAX UINT64_C (18446744073709551615)
+#endif
+
+#if !defined (INT64_MAX) && defined (INT64_C)
+# define INT64_MAX INT64_C (9223372036854775807)
+#endif
+#if !defined (INT64_MIN) && defined (INT64_C)
+# define INT64_MIN INT64_C (-9223372036854775808)
+#endif
+#if !defined (UINT64_MAX) && defined (INT64_C)
+# define UINT64_MAX UINT64_C (18446744073709551615)
+#endif
+
+/*
+ * Width of hexadecimal for number field.
+ */
+
+#ifndef PRINTF_INT64_HEX_WIDTH
+# define PRINTF_INT64_HEX_WIDTH "16"
+#endif
+#ifndef PRINTF_INT32_HEX_WIDTH
+# define PRINTF_INT32_HEX_WIDTH "8"
+#endif
+#ifndef PRINTF_INT16_HEX_WIDTH
+# define PRINTF_INT16_HEX_WIDTH "4"
+#endif
+#ifndef PRINTF_INT8_HEX_WIDTH
+# define PRINTF_INT8_HEX_WIDTH "2"
+#endif
+#ifndef PRINTF_INT64_DEC_WIDTH
+# define PRINTF_INT64_DEC_WIDTH "19"
+#endif
+#ifndef PRINTF_INT32_DEC_WIDTH
+# define PRINTF_INT32_DEC_WIDTH "10"
+#endif
+#ifndef PRINTF_INT16_DEC_WIDTH
+# define PRINTF_INT16_DEC_WIDTH "5"
+#endif
+#ifndef PRINTF_INT8_DEC_WIDTH
+# define PRINTF_INT8_DEC_WIDTH "3"
+#endif
+#ifndef PRINTF_UINT64_DEC_WIDTH
+# define PRINTF_UINT64_DEC_WIDTH "20"
+#endif
+#ifndef PRINTF_UINT32_DEC_WIDTH
+# define PRINTF_UINT32_DEC_WIDTH "10"
+#endif
+#ifndef PRINTF_UINT16_DEC_WIDTH
+# define PRINTF_UINT16_DEC_WIDTH "5"
+#endif
+#ifndef PRINTF_UINT8_DEC_WIDTH
+# define PRINTF_UINT8_DEC_WIDTH "3"
+#endif
+
+/*
+ * Ok, lets not worry about 128 bit integers for now. Moore's law says
+ * we don't need to worry about that until about 2040 at which point
+ * we'll have bigger things to worry about.
+ */
+
+#ifdef stdint_int64_defined
+ typedef int64_t intmax_t;
+ typedef uint64_t uintmax_t;
+# define INTMAX_MAX INT64_MAX
+# define INTMAX_MIN INT64_MIN
+# define UINTMAX_MAX UINT64_MAX
+# define UINTMAX_C(v) UINT64_C(v)
+# define INTMAX_C(v) INT64_C(v)
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT64_MODIFIER
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_INT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_INT64_DEC_WIDTH
+# endif
+#else
+ typedef int32_t intmax_t;
+ typedef uint32_t uintmax_t;
+# define INTMAX_MAX INT32_MAX
+# define UINTMAX_MAX UINT32_MAX
+# define UINTMAX_C(v) UINT32_C(v)
+# define INTMAX_C(v) INT32_C(v)
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT32_MODIFIER
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_INT32_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_INT32_DEC_WIDTH
+# endif
+#endif
+
+/*
+ * Because this file currently only supports platforms which have
+ * precise powers of 2 as bit sizes for the default integers, the
+ * least definitions are all trivial. Its possible that a future
+ * version of this file could have different definitions.
+ */
+
+#ifndef stdint_least_defined
+ typedef int8_t int_least8_t;
+ typedef uint8_t uint_least8_t;
+ typedef int16_t int_least16_t;
+ typedef uint16_t uint_least16_t;
+ typedef int32_t int_least32_t;
+ typedef uint32_t uint_least32_t;
+# define PRINTF_LEAST32_MODIFIER PRINTF_INT32_MODIFIER
+# define PRINTF_LEAST16_MODIFIER PRINTF_INT16_MODIFIER
+# define UINT_LEAST8_MAX UINT8_MAX
+# define INT_LEAST8_MAX INT8_MAX
+# define UINT_LEAST16_MAX UINT16_MAX
+# define INT_LEAST16_MAX INT16_MAX
+# define UINT_LEAST32_MAX UINT32_MAX
+# define INT_LEAST32_MAX INT32_MAX
+# define INT_LEAST8_MIN INT8_MIN
+# define INT_LEAST16_MIN INT16_MIN
+# define INT_LEAST32_MIN INT32_MIN
+# ifdef stdint_int64_defined
+ typedef int64_t int_least64_t;
+ typedef uint64_t uint_least64_t;
+# define PRINTF_LEAST64_MODIFIER PRINTF_INT64_MODIFIER
+# define UINT_LEAST64_MAX UINT64_MAX
+# define INT_LEAST64_MAX INT64_MAX
+# define INT_LEAST64_MIN INT64_MIN
+# endif
+#endif
+#undef stdint_least_defined
+
+/*
+ * The ANSI C committee pretending to know or specify anything about
+ * performance is the epitome of misguided arrogance. The mandate of
+ * this file is to *ONLY* ever support that absolute minimum
+ * definition of the fast integer types, for compatibility purposes.
+ * No extensions, and no attempt to suggest what may or may not be a
+ * faster integer type will ever be made in this file. Developers are
+ * warned to stay away from these types when using this or any other
+ * stdint.h.
+ */
+
+typedef int_least8_t int_fast8_t;
+typedef uint_least8_t uint_fast8_t;
+typedef int_least16_t int_fast16_t;
+typedef uint_least16_t uint_fast16_t;
+typedef int_least32_t int_fast32_t;
+typedef uint_least32_t uint_fast32_t;
+#define UINT_FAST8_MAX UINT_LEAST8_MAX
+#define INT_FAST8_MAX INT_LEAST8_MAX
+#define UINT_FAST16_MAX UINT_LEAST16_MAX
+#define INT_FAST16_MAX INT_LEAST16_MAX
+#define UINT_FAST32_MAX UINT_LEAST32_MAX
+#define INT_FAST32_MAX INT_LEAST32_MAX
+#define INT_FAST8_MIN INT_LEAST8_MIN
+#define INT_FAST16_MIN INT_LEAST16_MIN
+#define INT_FAST32_MIN INT_LEAST32_MIN
+#ifdef stdint_int64_defined
+ typedef int_least64_t int_fast64_t;
+ typedef uint_least64_t uint_fast64_t;
+# define UINT_FAST64_MAX UINT_LEAST64_MAX
+# define INT_FAST64_MAX INT_LEAST64_MAX
+# define INT_FAST64_MIN INT_LEAST64_MIN
+#endif
+
+#undef stdint_int64_defined
+
+/*
+ * Whatever piecemeal, per compiler thing we can do about the wchar_t
+ * type limits.
+ */
+
+#if defined(__WATCOMC__) || defined(_MSC_VER) || defined (__GNUC__)
+# include <wchar.h>
+# ifndef WCHAR_MIN
+# define WCHAR_MIN 0
+# endif
+# ifndef WCHAR_MAX
+# define WCHAR_MAX ((wchar_t)-1)
+# endif
+#endif
+
+/*
+ * Whatever piecemeal, per compiler/platform thing we can do about the
+ * (u)intptr_t types and limits.
+ */
+
+#if (defined (_MSC_VER) && defined (_UINTPTR_T_DEFINED)) || defined (_UINTPTR_T)
+# define STDINT_H_UINTPTR_T_DEFINED
+#endif
+
+#ifndef STDINT_H_UINTPTR_T_DEFINED
+# if defined (__alpha__) || defined (__ia64__) || defined (__x86_64__) || defined (_WIN64) || defined (__ppc64__)
+# define stdint_intptr_bits 64
+# elif defined (__WATCOMC__) || defined (__TURBOC__)
+# if defined(__TINY__) || defined(__SMALL__) || defined(__MEDIUM__)
+# define stdint_intptr_bits 16
+# else
+# define stdint_intptr_bits 32
+# endif
+# elif defined (__i386__) || defined (_WIN32) || defined (WIN32) || defined (__ppc64__)
+# define stdint_intptr_bits 32
+# elif defined (__INTEL_COMPILER)
+/* TODO -- what did Intel do about x86-64? */
+# else
+/* #error "This platform might not be supported yet" */
+# endif
+
+# ifdef stdint_intptr_bits
+# define stdint_intptr_glue3_i(a,b,c) a##b##c
+# define stdint_intptr_glue3(a,b,c) stdint_intptr_glue3_i(a,b,c)
+# ifndef PRINTF_INTPTR_MODIFIER
+# define PRINTF_INTPTR_MODIFIER stdint_intptr_glue3(PRINTF_INT,stdint_intptr_bits,_MODIFIER)
+# endif
+# ifndef PTRDIFF_MAX
+# define PTRDIFF_MAX stdint_intptr_glue3(INT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef PTRDIFF_MIN
+# define PTRDIFF_MIN stdint_intptr_glue3(INT,stdint_intptr_bits,_MIN)
+# endif
+# ifndef UINTPTR_MAX
+# define UINTPTR_MAX stdint_intptr_glue3(UINT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef INTPTR_MAX
+# define INTPTR_MAX stdint_intptr_glue3(INT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef INTPTR_MIN
+# define INTPTR_MIN stdint_intptr_glue3(INT,stdint_intptr_bits,_MIN)
+# endif
+# ifndef INTPTR_C
+# define INTPTR_C(x) stdint_intptr_glue3(INT,stdint_intptr_bits,_C)(x)
+# endif
+# ifndef UINTPTR_C
+# define UINTPTR_C(x) stdint_intptr_glue3(UINT,stdint_intptr_bits,_C)(x)
+# endif
+ typedef stdint_intptr_glue3(uint,stdint_intptr_bits,_t) uintptr_t;
+ typedef stdint_intptr_glue3( int,stdint_intptr_bits,_t) intptr_t;
+# else
+/* TODO -- This following is likely wrong for some platforms, and does
+ nothing for the definition of uintptr_t. */
+ typedef ptrdiff_t intptr_t;
+# endif
+# define STDINT_H_UINTPTR_T_DEFINED
+#endif
+
+/*
+ * Assumes sig_atomic_t is signed and we have a 2s complement machine.
+ */
+
+#ifndef SIG_ATOMIC_MAX
+# define SIG_ATOMIC_MAX ((((sig_atomic_t) 1) << (sizeof (sig_atomic_t)*CHAR_BIT-1)) - 1)
+#endif
+
+#endif
+
+#if defined (__TEST_PSTDINT_FOR_CORRECTNESS)
+
+/*
+ * Please compile with the maximum warning settings to make sure macros are
+ * not defined more than once.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#define glue3_aux(x,y,z) x ## y ## z
+#define glue3(x,y,z) glue3_aux(x,y,z)
+
+#define DECLU(bits) glue3(uint,bits,_t) glue3(u,bits,) = glue3(UINT,bits,_C) (0);
+#define DECLI(bits) glue3(int,bits,_t) glue3(i,bits,) = glue3(INT,bits,_C) (0);
+
+#define DECL(us,bits) glue3(DECL,us,) (bits)
+
+#define TESTUMAX(bits) glue3(u,bits,) = ~glue3(u,bits,); if (glue3(UINT,bits,_MAX) != glue3(u,bits,)) printf ("Something wrong with UINT%d_MAX\n", bits)
+
+#define REPORTERROR(msg) { err_n++; if (err_first <= 0) err_first = __LINE__; printf msg; }
+
+int main () {
+ int err_n = 0;
+ int err_first = 0;
+ DECL(I,8)
+ DECL(U,8)
+ DECL(I,16)
+ DECL(U,16)
+ DECL(I,32)
+ DECL(U,32)
+#ifdef INT64_MAX
+ DECL(I,64)
+ DECL(U,64)
+#endif
+ intmax_t imax = INTMAX_C(0);
+ uintmax_t umax = UINTMAX_C(0);
+ char str0[256], str1[256];
+
+ sprintf (str0, "%" PRINTF_INT32_MODIFIER "d", INT32_C(2147483647));
+ if (0 != strcmp (str0, "2147483647")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str0));
+ if (atoi(PRINTF_INT32_DEC_WIDTH) != (int) strlen(str0)) REPORTERROR (("Something wrong with PRINTF_INT32_DEC_WIDTH : %s\n", PRINTF_INT32_DEC_WIDTH));
+ sprintf (str0, "%" PRINTF_INT32_MODIFIER "u", UINT32_C(4294967295));
+ if (0 != strcmp (str0, "4294967295")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str0));
+ if (atoi(PRINTF_UINT32_DEC_WIDTH) != (int) strlen(str0)) REPORTERROR (("Something wrong with PRINTF_UINT32_DEC_WIDTH : %s\n", PRINTF_UINT32_DEC_WIDTH));
+#ifdef INT64_MAX
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "d", INT64_C(9223372036854775807));
+ if (0 != strcmp (str1, "9223372036854775807")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str1));
+ if (atoi(PRINTF_INT64_DEC_WIDTH) != (int) strlen(str1)) REPORTERROR (("Something wrong with PRINTF_INT64_DEC_WIDTH : %s, %d\n", PRINTF_INT64_DEC_WIDTH, (int) strlen(str1)));
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "u", UINT64_C(18446744073709550591));
+ if (0 != strcmp (str1, "18446744073709550591")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str1));
+ if (atoi(PRINTF_UINT64_DEC_WIDTH) != (int) strlen(str1)) REPORTERROR (("Something wrong with PRINTF_UINT64_DEC_WIDTH : %s, %d\n", PRINTF_UINT64_DEC_WIDTH, (int) strlen(str1)));
+#endif
+
+ sprintf (str0, "%d %x\n", 0, ~0);
+
+ sprintf (str1, "%d %x\n", i8, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i8 : %s\n", str1));
+ sprintf (str1, "%u %x\n", u8, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u8 : %s\n", str1));
+ sprintf (str1, "%d %x\n", i16, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i16 : %s\n", str1));
+ sprintf (str1, "%u %x\n", u16, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u16 : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INT32_MODIFIER "d %x\n", i32, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i32 : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INT32_MODIFIER "u %x\n", u32, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u32 : %s\n", str1));
+#ifdef INT64_MAX
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "d %x\n", i64, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i64 : %s\n", str1));
+#endif
+ sprintf (str1, "%" PRINTF_INTMAX_MODIFIER "d %x\n", imax, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with imax : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INTMAX_MODIFIER "u %x\n", umax, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with umax : %s\n", str1));
+
+ TESTUMAX(8);
+ TESTUMAX(16);
+ TESTUMAX(32);
+#ifdef INT64_MAX
+ TESTUMAX(64);
+#endif
+
+#define STR(v) #v
+#define Q(v) printf ("sizeof " STR(v) " = %u\n", (unsigned) sizeof (v));
+ if (err_n) {
+ printf ("pstdint.h is not correct. Please use sizes below to correct it:\n");
+ }
+
+ Q(int)
+ Q(unsigned)
+ Q(long int)
+ Q(short int)
+ Q(int8_t)
+ Q(int16_t)
+ Q(int32_t)
+#ifdef INT64_MAX
+ Q(int64_t)
+#endif
+
+ return EXIT_SUCCESS;
+}
+
+#endif
diff --git a/include/flatcc/portable/punaligned.h b/include/flatcc/portable/punaligned.h
new file mode 100644
index 0000000..a380edd
--- /dev/null
+++ b/include/flatcc/portable/punaligned.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2016 Mikkel Fahnøe Jørgensen, dvide.com
+ *
+ * (MIT License)
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * - The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * - The Software is provided "as is", without warranty of any kind, express or
+ * implied, including but not limited to the warranties of merchantability,
+ * fitness for a particular purpose and noninfringement. In no event shall the
+ * authors or copyright holders be liable for any claim, damages or other
+ * liability, whether in an action of contract, tort or otherwise, arising from,
+ * out of or in connection with the Software or the use or other dealings in the
+ * Software.
+ */
+
+#ifndef PUNLIGNED_H
+#define PUNLIGNED_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef PORTABLE_UNALIGNED_ACCESS
+
+#if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+#define PORTABLE_UNALIGNED_ACCESS 1
+#else
+#define PORTABLE_UNALIGNED_ACCESS 0
+#endif
+
+#endif
+
+/* `unaligned_read_16` might not be defined if endianness was not determined. */
+#if !defined(unaligned_read_le16toh)
+
+#include "pendian.h"
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#if PORTABLE_UNALIGNED_ACCESS
+
+#define unaligned_read_16(p) (*(uint16_t*)(p))
+#define unaligned_read_32(p) (*(uint32_t*)(p))
+#define unaligned_read_64(p) (*(uint64_t*)(p))
+
+#define unaligned_read_le16toh(p) le16toh(*(uint16_t*)(p))
+#define unaligned_read_le32toh(p) le32toh(*(uint32_t*)(p))
+#define unaligned_read_le64toh(p) le64toh(*(uint64_t*)(p))
+
+#define unaligned_read_be16toh(p) be16toh(*(uint16_t*)(p))
+#define unaligned_read_be32toh(p) be32toh(*(uint32_t*)(p))
+#define unaligned_read_be64toh(p) be64toh(*(uint64_t*)(p))
+
+#define unaligned_write_16(p, v) (*(uint16_t*)(p) = (uint16_t)(v))
+#define unaligned_write_32(p, v) (*(uint32_t*)(p) = (uint32_t)(v))
+#define unaligned_write_64(p, v) (*(uint64_t*)(p) = (uint64_t)(v))
+
+#define unaligned_write_htole16(p, v) (*(uint16_t*)(p) = htole16(v))
+#define unaligned_write_htole32(p, v) (*(uint32_t*)(p) = htole32(v))
+#define unaligned_write_htole64(p, v) (*(uint64_t*)(p) = htole64(v))
+
+#define unaligned_write_htobe16(p, v) (*(uint16_t*)(p) = htobe16(v))
+#define unaligned_write_htobe32(p, v) (*(uint32_t*)(p) = htobe32(v))
+#define unaligned_write_htobe64(p, v) (*(uint64_t*)(p) = htobe64(v))
+
+#else
+
+#define unaligned_read_le16toh(p) ( \
+ (((uint16_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint16_t)(((uint8_t *)(p))[1])) << 8))
+
+#define unaligned_read_le32toh(p) ( \
+ (((uint32_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint32_t)(((uint8_t *)(p))[1])) << 8) | \
+ (((uint32_t)(((uint8_t *)(p))[2])) << 16) | \
+ (((uint32_t)(((uint8_t *)(p))[3])) << 24))
+
+#define unaligned_read_le64toh(p) ( \
+ (((uint64_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint64_t)(((uint8_t *)(p))[1])) << 8) | \
+ (((uint64_t)(((uint8_t *)(p))[2])) << 16) | \
+ (((uint64_t)(((uint8_t *)(p))[3])) << 24) | \
+ (((uint64_t)(((uint8_t *)(p))[4])) << 32) | \
+ (((uint64_t)(((uint8_t *)(p))[5])) << 40) | \
+ (((uint64_t)(((uint8_t *)(p))[6])) << 48) | \
+ (((uint64_t)(((uint8_t *)(p))[7])) << 56))
+
+#define unaligned_read_be16toh(p) ( \
+ (((uint16_t)(((uint8_t *)(p))[0])) << 8) | \
+ (((uint16_t)(((uint8_t *)(p))[1])) << 0))
+
+#define unaligned_read_be32toh(p) ( \
+ (((uint32_t)(((uint8_t *)(p))[0])) << 24) | \
+ (((uint32_t)(((uint8_t *)(p))[1])) << 16) | \
+ (((uint32_t)(((uint8_t *)(p))[2])) << 8) | \
+ (((uint32_t)(((uint8_t *)(p))[3])) << 0))
+
+#define unaligned_read_be64toh(p) ( \
+ (((uint64_t)(((uint8_t *)(p))[0])) << 56) | \
+ (((uint64_t)(((uint8_t *)(p))[1])) << 48) | \
+ (((uint64_t)(((uint8_t *)(p))[2])) << 40) | \
+ (((uint64_t)(((uint8_t *)(p))[3])) << 32) | \
+ (((uint64_t)(((uint8_t *)(p))[4])) << 24) | \
+ (((uint64_t)(((uint8_t *)(p))[5])) << 16) | \
+ (((uint64_t)(((uint8_t *)(p))[6])) << 8) | \
+ (((uint64_t)(((uint8_t *)(p))[7])) << 0))
+
+#define unaligned_write_htole16(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint16_t)(v)) >> 0); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint16_t)(v)) >> 8); \
+ } while (0)
+
+#define unaligned_write_htole32(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint32_t)(v)) >> 0); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint32_t)(v)) >> 8); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint32_t)(v)) >> 16); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint32_t)(v)) >> 24); \
+ } while (0)
+
+#define unaligned_write_htole64(p) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint64_t)(v)) >> 0); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint64_t)(v)) >> 8); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint64_t)(v)) >> 16); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint64_t)(v)) >> 24); \
+ ((uint8_t *)(p))[4] = (uint8_t)(((uint64_t)(v)) >> 32); \
+ ((uint8_t *)(p))[5] = (uint8_t)(((uint64_t)(v)) >> 40); \
+ ((uint8_t *)(p))[6] = (uint8_t)(((uint64_t)(v)) >> 48); \
+ ((uint8_t *)(p))[7] = (uint8_t)(((uint64_t)(v)) >> 56); \
+ } while (0)
+
+#define unaligned_write_htobe16(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint16_t)(v)) >> 8); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint16_t)(v)) >> 0); \
+ } while (0)
+
+#define unaligned_write_htobe32(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint32_t)(v)) >> 24); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint32_t)(v)) >> 16); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint32_t)(v)) >> 8); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint32_t)(v)) >> 0); \
+ } while (0)
+
+#define unaligned_write_htobe64(p) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint64_t)(v)) >> 56); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint64_t)(v)) >> 48); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint64_t)(v)) >> 40); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint64_t)(v)) >> 32); \
+ ((uint8_t *)(p))[4] = (uint8_t)(((uint64_t)(v)) >> 24); \
+ ((uint8_t *)(p))[5] = (uint8_t)(((uint64_t)(v)) >> 16); \
+ ((uint8_t *)(p))[6] = (uint8_t)(((uint64_t)(v)) >> 8); \
+ ((uint8_t *)(p))[7] = (uint8_t)(((uint64_t)(v)) >> 0); \
+ } while (0)
+
+#if __LITTLE_ENDIAN__
+#define unaligned_read_16(p) unaligned_read_le16toh(p)
+#define unaligned_read_32(p) unaligned_read_le32toh(p)
+#define unaligned_read_64(p) unaligned_read_le64toh(p)
+
+#define unaligned_write_16(p) unaligned_write_htole16(p)
+#define unaligned_write_32(p) unaligned_write_htole32(p)
+#define unaligned_write_64(p) unaligned_write_htole64(p)
+#endif
+
+#if __BIG_ENDIAN__
+#define unaligned_read_16(p) unaligned_read_be16toh(p)
+#define unaligned_read_32(p) unaligned_read_be32toh(p)
+#define unaligned_read_64(p) unaligned_read_be64toh(p)
+
+#define unaligned_write_16(p) unaligned_write_htobe16(p)
+#define unaligned_write_32(p) unaligned_write_htobe32(p)
+#define unaligned_write_64(p) unaligned_write_htobe64(p)
+#endif
+
+#endif
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PUNALIGNED_H */
diff --git a/include/flatcc/portable/pversion.h b/include/flatcc/portable/pversion.h
new file mode 100644
index 0000000..d434104
--- /dev/null
+++ b/include/flatcc/portable/pversion.h
@@ -0,0 +1,6 @@
+#define PORTABLE_VERSION_TEXT "0.2.6-pre"
+#define PORTABLE_VERSION_MAJOR 0
+#define PORTABLE_VERSION_MINOR 2
+#define PORTABLE_VERSION_PATCH 6
+/* 1 or 0 */
+#define PORTABLE_VERSION_RELEASED 0
diff --git a/include/flatcc/portable/pwarnings.h b/include/flatcc/portable/pwarnings.h
new file mode 100644
index 0000000..f420861
--- /dev/null
+++ b/include/flatcc/portable/pwarnings.h
@@ -0,0 +1,52 @@
+#ifndef PWARNINGS_H
+#define PWARNINGS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * See also pdiagnostics.h headers for per file control of common
+ * warnings.
+ *
+ * This file is intended for global disabling of warnings that shouldn't
+ * be present in C11 or perhaps C99, or a generally just noise where
+ * recent clang / gcc compile cleanly with high warning levels.
+ */
+
+#if defined(_MSC_VER)
+/* Needed when flagging code in or out and more. */
+#pragma warning(disable: 4127) /* conditional expression is constant */
+/* happens also in MS's own headers. */
+#pragma warning(disable: 4668) /* preprocessor name not defined */
+/* MSVC does not respect double parenthesis for intent */
+#pragma warning(disable: 4706) /* assignment within conditional expression */
+/* `inline` only advisory anyway. */
+#pragma warning(disable: 4710) /* function not inlined */
+/* Well, we don't intend to add the padding manually. */
+#pragma warning(disable: 4820) /* x bytes padding added in struct */
+
+/*
+ * Don't warn that fopen etc. are unsafe
+ *
+ * Define a compiler flag like `-D_CRT_SECURE_NO_WARNINGS` in the build.
+ * For some reason it doesn't work when defined here.
+ *
+ * #define _CRT_SECURE_NO_WARNINGS
+ */
+
+/*
+ * Anonymous union in struct is valid in C11 and has been supported in
+ * GCC and Clang for a while, but it is not C99. MSVC also handles it,
+ * but warns. Truly portable code should perhaps not use this feature,
+ * but this is not the place to complain about it.
+ */
+#pragma warning(disable: 4201) /* nonstandard extension used: nameless struct/union */
+
+#endif /* _MSV_VER */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PWARNINGS_H */
diff --git a/include/flatcc/reflection/README b/include/flatcc/reflection/README
new file mode 100644
index 0000000..3c7207a
--- /dev/null
+++ b/include/flatcc/reflection/README
@@ -0,0 +1,19 @@
+Generated by flatcc
+
+Keep checked in - needed by flatcc to generate binary schema.
+
+NOTE TO CONTRIBUTORS: DO NOT EDIT THESE FILES BY HAND
+
+If you need to change anything here, it is done in the code generator,
+possibly followed by running `reflection/generate_code.sh` from the
+project root. But please only do this for testing do not include the
+generated files in a pull request unless agreed otherwise, and if so,
+do it in a separate commit.
+
+Normally new reflection code is generated during a release which also
+updates the version number in comments and there is no reason to update
+reflection on every commit unless it breaks something fundamentally.
+
+There is a build option `FLATCC_REFLECTION` to disable reflection which
+is helpful while making changes that affect the content of these files
+in a way that would prevent the flatcc compiler from building.
diff --git a/include/flatcc/reflection/flatbuffers_common_builder.h b/include/flatcc/reflection/flatbuffers_common_builder.h
new file mode 100644
index 0000000..a4be1ce
--- /dev/null
+++ b/include/flatcc/reflection/flatbuffers_common_builder.h
@@ -0,0 +1,685 @@
+#ifndef FLATBUFFERS_COMMON_BUILDER_H
+#define FLATBUFFERS_COMMON_BUILDER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+/* Common FlatBuffers build functionality for C. */
+
+#include "flatcc/flatcc_prologue.h"
+#ifndef FLATBUILDER_H
+#include "flatcc/flatcc_builder.h"
+#endif
+typedef flatcc_builder_t flatbuffers_builder_t;
+typedef flatcc_builder_ref_t flatbuffers_ref_t;
+typedef flatcc_builder_ref_t flatbuffers_vec_ref_t;
+typedef flatcc_builder_union_ref_t flatbuffers_union_ref_t;
+typedef flatcc_builder_union_vec_ref_t flatbuffers_union_vec_ref_t;
+/* integer return code (ref and ptr always fail on 0) */
+#define flatbuffers_failed(x) ((x) < 0)
+typedef flatbuffers_ref_t flatbuffers_root_t;
+#define flatbuffers_root(ref) ((flatbuffers_root_t)(ref))
+
+#define __flatbuffers_memoize_begin(B, src)\
+do { flatcc_builder_ref_t _ref; if ((_ref = flatcc_builder_refmap_find((B), (src)))) return _ref; } while (0)
+#define __flatbuffers_memoize_end(B, src, op) do { return flatcc_builder_refmap_insert((B), (src), (op)); } while (0)
+#define __flatbuffers_memoize(B, src, op) do { __flatbuffers_memoize_begin(B, src); __flatbuffers_memoize_end(B, src, op); } while (0)
+
+#define __flatbuffers_build_buffer(NS)\
+typedef NS ## ref_t NS ## buffer_ref_t;\
+static inline int NS ## buffer_start(NS ## builder_t *B, const NS ##fid_t fid)\
+{ return flatcc_builder_start_buffer(B, fid, 0, 0); }\
+static inline int NS ## buffer_start_with_size(NS ## builder_t *B, const NS ##fid_t fid)\
+{ return flatcc_builder_start_buffer(B, fid, 0, flatcc_builder_with_size); }\
+static inline int NS ## buffer_start_aligned(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\
+{ return flatcc_builder_start_buffer(B, fid, block_align, 0); }\
+static inline int NS ## buffer_start_aligned_with_size(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\
+{ return flatcc_builder_start_buffer(B, fid, block_align, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t NS ## buffer_end(NS ## builder_t *B, NS ## ref_t root)\
+{ return flatcc_builder_end_buffer(B, root); }
+
+#define __flatbuffers_build_table_root(NS, N, FID, TFID)\
+static inline int N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, FID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_typed_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, TFID) ? -1 : N ## _start(B); }\
+static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start(B, TFID)) return 0;return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }
+
+#define __flatbuffers_build_table_prolog(NS, N, FID, TFID)\
+__flatbuffers_build_table_vector_ops(NS, N ## _vec, N)\
+__flatbuffers_build_table_root(NS, N, FID, TFID)
+
+#define __flatbuffers_build_struct_root(NS, N, A, FID, TFID)\
+static inline N ## _t *N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, FID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_typed_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, TFID) ? 0 : N ## _start(B); }\
+static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_pe_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end_pe(B)); }\
+static inline NS ## buffer_ref_t N ## _end_pe_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end_pe(B)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, FID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, 0); }\
+static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, FID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, TFID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, 0); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, TFID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, 0); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, 0); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }
+
+#define __flatbuffers_build_nested_table_root(NS, N, TN, FID, TFID)\
+static inline int N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? -1 : TN ## _start(B); }\
+static inline int N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? -1 : TN ## _start(B); }\
+static inline int N ## _end_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _table_t t)\
+{ return N ## _add(B, TN ## _clone_as_root(B, t)); }\
+static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _table_t t)\
+{ return N ## _add(B, TN ## _clone_as_typed_root(B, t)); }
+
+#define __flatbuffers_build_nested_struct_root(NS, N, TN, A, FID, TFID)\
+static inline TN ## _t *N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\
+static inline TN ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\
+static inline int N ## _end_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_pe_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end_pe(B))); }\
+static inline int N ## _create_as_root(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ return N ## _add(B, flatcc_builder_create_buffer(B, FID, 0,\
+ TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\
+static inline int N ## _create_as_typed_root(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ return N ## _add(B, flatcc_builder_create_buffer(B, TFID, 0,\
+ TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\
+static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _struct_t p)\
+{ return N ## _add(B, TN ## _clone_as_root(B, p)); }\
+static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _struct_t p)\
+{ return N ## _add(B, TN ## _clone_as_typed_root(B, p)); }
+
+#define __flatbuffers_build_vector_ops(NS, V, N, TN, T)\
+static inline T *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return (T *)flatcc_builder_extend_vector(B, len); }\
+static inline T *V ## _append(NS ## builder_t *B, const T *data, size_t len)\
+{ return (T *)flatcc_builder_append_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_vector(B, len); }\
+static inline T *V ## _edit(NS ## builder_t *B)\
+{ return (T *)flatcc_builder_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_vector_count(B); }\
+static inline T *V ## _push(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? (memcpy(_p, p, TN ## __size()), _p) : 0; }\
+static inline T *V ## _push_copy(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\
+static inline T *V ## _push_clone(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\
+static inline T *V ## _push_create(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _assign(_p __ ## TN ## _call_args) : 0; }
+
+#define __flatbuffers_build_vector(NS, N, T, S, A)\
+typedef NS ## ref_t N ## _vec_ref_t;\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_end_pe(NS ## builder_t *B)\
+{ return flatcc_builder_end_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { size_t i, n; T *p = (T *)flatcc_builder_vector_edit(B);\
+ for (i = 0, n = flatcc_builder_vector_count(B); i < n; ++i)\
+ { N ## _to_pe(N ## __ptr_add(p, i)); }} return flatcc_builder_end_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_create_pe(NS ## builder_t *B, const T *data, size_t len)\
+{ return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const T *data, size_t len)\
+{ if (!NS ## is_native_pe()) { size_t i; T *p; int ret = flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); if (ret) { return ret; }\
+ p = (T *)flatcc_builder_extend_vector(B, len); if (!p) return 0;\
+ for (i = 0; i < len; ++i) { N ## _copy_to_pe(N ## __ptr_add(p, i), N ## __const_ptr_add(data, i)); }\
+ return flatcc_builder_end_vector(B); } else return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\
+{ __flatbuffers_memoize(B, vec, flatcc_builder_create_vector(B, vec, N ## _vec_len(vec), S, A, FLATBUFFERS_COUNT_MAX(S))); }\
+static inline N ## _vec_ref_t N ## _vec_slice(NS ## builder_t *B, N ##_vec_t vec, size_t index, size_t len)\
+{ size_t n = N ## _vec_len(vec); if (index >= n) index = n; n -= index; if (len > n) len = n;\
+ return flatcc_builder_create_vector(B, N ## __const_ptr_add(vec, index), len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+__flatbuffers_build_vector_ops(NS, N ## _vec, N, N, T)
+
+#define __flatbuffers_build_union_vector_ops(NS, V, N, TN)\
+static inline TN ## _union_ref_t *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_union_vector(B, len); }\
+static inline TN ## _union_ref_t *V ## _append(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\
+{ return flatcc_builder_append_union_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_union_vector(B, len); }\
+static inline TN ## _union_ref_t *V ## _edit(NS ## builder_t *B)\
+{ return (TN ## _union_ref_t *) flatcc_builder_union_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_union_vector_count(B); }\
+static inline TN ## _union_ref_t *V ## _push(NS ## builder_t *B, const TN ## _union_ref_t ref)\
+{ return flatcc_builder_union_vector_push(B, ref); }\
+static inline TN ## _union_ref_t *V ## _push_clone(NS ## builder_t *B, TN ## _union_t u)\
+{ return TN ## _vec_push(B, TN ## _clone(B, u)); }
+
+#define __flatbuffers_build_union_vector(NS, N)\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_union_vector(B); }\
+static inline N ## _union_vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_union_vector(B); }\
+static inline N ## _union_vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _union_ref_t *data, size_t len)\
+{ return flatcc_builder_create_union_vector(B, data, len); }\
+__flatbuffers_build_union_vector_ops(NS, N ## _vec, N, N)\
+/* Preserves DAG structure separately for type and value vector, so a type vector could be shared for many value vectors. */\
+static inline N ## _union_vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_union_vec_t vec)\
+{ N ## _union_vec_ref_t _uvref, _ret = { 0, 0 }; NS ## union_ref_t _uref; size_t _i, _len;\
+ if (vec.type == 0) return _ret;\
+ _uvref.type = flatcc_builder_refmap_find(B, vec.type); _uvref.value = flatcc_builder_refmap_find(B, vec.value);\
+ _len = N ## _union_vec_len(vec); if (_uvref.type == 0) {\
+ _uvref.type = flatcc_builder_refmap_insert(B, vec.type, (flatcc_builder_create_type_vector(B, vec.type, _len))); }\
+ if (_uvref.type == 0) return _ret; if (_uvref.value == 0) {\
+ if (flatcc_builder_start_offset_vector(B)) return _ret;\
+ for (_i = 0; _i < _len; ++_i) { _uref = N ## _clone(B, N ## _union_vec_at(vec, _i));\
+ if (!_uref.value || !(flatcc_builder_offset_vector_push(B, _uref.value))) return _ret; }\
+ _uvref.value = flatcc_builder_refmap_insert(B, vec.value, flatcc_builder_end_offset_vector(B));\
+ if (_uvref.value == 0) return _ret; } return _uvref; }
+
+#define __flatbuffers_build_string_vector_ops(NS, N)\
+static inline int N ## _push_start(NS ## builder_t *B)\
+{ return NS ## string_start(B); }\
+static inline NS ## string_ref_t *N ## _push_end(NS ## builder_t *B)\
+{ return NS ## string_vec_push(B, NS ## string_end(B)); }\
+static inline NS ## string_ref_t *N ## _push_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return NS ## string_vec_push(B, NS ## string_create(B, s, len)); }\
+static inline NS ## string_ref_t *N ## _push_create_str(NS ## builder_t *B, const char *s)\
+{ return NS ## string_vec_push(B, NS ## string_create_str(B, s)); }\
+static inline NS ## string_ref_t *N ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return NS ## string_vec_push(B, NS ## string_create_strn(B, s, max_len)); }\
+static inline NS ## string_ref_t *N ## _push_clone(NS ## builder_t *B, NS ## string_t string)\
+{ return NS ## string_vec_push(B, NS ## string_clone(B, string)); }\
+static inline NS ## string_ref_t *N ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return NS ## string_vec_push(B, NS ## string_slice(B, string, index, len)); }
+
+#define __flatbuffers_build_table_vector_ops(NS, N, TN)\
+static inline int N ## _push_start(NS ## builder_t *B)\
+{ return TN ## _start(B); }\
+static inline TN ## _ref_t *N ## _push_end(NS ## builder_t *B)\
+{ return N ## _push(B, TN ## _end(B)); }\
+static inline TN ## _ref_t *N ## _push_create(NS ## builder_t *B __ ## TN ##_formal_args)\
+{ return N ## _push(B, TN ## _create(B __ ## TN ## _call_args)); }
+
+#define __flatbuffers_build_offset_vector_ops(NS, V, N, TN)\
+static inline TN ## _ref_t *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_offset_vector(B, len); }\
+static inline TN ## _ref_t *V ## _append(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\
+{ return flatcc_builder_append_offset_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_offset_vector(B, len); }\
+static inline TN ## _ref_t *V ## _edit(NS ## builder_t *B)\
+{ return (TN ## _ref_t *)flatcc_builder_offset_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_offset_vector_count(B); }\
+static inline TN ## _ref_t *V ## _push(NS ## builder_t *B, const TN ## _ref_t ref)\
+{ return ref ? flatcc_builder_offset_vector_push(B, ref) : 0; }
+
+#define __flatbuffers_build_offset_vector(NS, N)\
+typedef NS ## ref_t N ## _vec_ref_t;\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_offset_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_offset_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _ref_t *data, size_t len)\
+{ return flatcc_builder_create_offset_vector(B, data, len); }\
+__flatbuffers_build_offset_vector_ops(NS, N ## _vec, N, N)\
+static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\
+{ int _ret; N ## _ref_t _e; size_t _i, _len; __flatbuffers_memoize_begin(B, vec);\
+ _len = N ## _vec_len(vec); if (flatcc_builder_start_offset_vector(B)) return 0;\
+ for (_i = 0; _i < _len; ++_i) { if (!(_e = N ## _clone(B, N ## _vec_at(vec, _i)))) return 0;\
+ if (!flatcc_builder_offset_vector_push(B, _e)) return 0; }\
+ __flatbuffers_memoize_end(B, vec, flatcc_builder_end_offset_vector(B)); }\
+
+#define __flatbuffers_build_string_ops(NS, N)\
+static inline char *N ## _append(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_append_string(B, s, len); }\
+static inline char *N ## _append_str(NS ## builder_t *B, const char *s)\
+{ return flatcc_builder_append_string_str(B, s); }\
+static inline char *N ## _append_strn(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_append_string_strn(B, s, len); }\
+static inline size_t N ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_string_len(B); }\
+static inline char *N ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_string(B, len); }\
+static inline char *N ## _edit(NS ## builder_t *B)\
+{ return flatcc_builder_string_edit(B); }\
+static inline int N ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_string(B, len); }
+
+#define __flatbuffers_build_string(NS)\
+typedef NS ## ref_t NS ## string_ref_t;\
+static inline int NS ## string_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_string(B); }\
+static inline NS ## string_ref_t NS ## string_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_string(B); }\
+static inline NS ## ref_t NS ## string_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_create_string(B, s, len); }\
+static inline NS ## ref_t NS ## string_create_str(NS ## builder_t *B, const char *s)\
+{ return flatcc_builder_create_string_str(B, s); }\
+static inline NS ## ref_t NS ## string_create_strn(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_create_string_strn(B, s, len); }\
+static inline NS ## string_ref_t NS ## string_clone(NS ## builder_t *B, NS ## string_t string)\
+{ __flatbuffers_memoize(B, string, flatcc_builder_create_string(B, string, NS ## string_len(string))); }\
+static inline NS ## string_ref_t NS ## string_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ size_t n = NS ## string_len(string); if (index >= n) index = n; n -= index; if (len > n) len = n;\
+ return flatcc_builder_create_string(B, string + index, len); }\
+__flatbuffers_build_string_ops(NS, NS ## string)\
+__flatbuffers_build_offset_vector(NS, NS ## string)
+
+#define __flatbuffers_copy_from_pe(P, P2, N) (*(P) = N ## _read_from_pe(P2), (P))
+#define __flatbuffers_from_pe(P, N) (*(P) = N ## _read_from_pe(P), (P))
+#define __flatbuffers_copy_to_pe(P, P2, N) (N ## _write_to_pe((P), *(P2)), (P))
+#define __flatbuffers_to_pe(P, N) (N ## _write_to_pe((P), *(P)), (P))
+#define __flatbuffers_define_fixed_array_primitives(NS, N, T)\
+static inline T *N ## _array_copy(T *p, const T *p2, size_t n)\
+{ memcpy(p, p2, n * sizeof(T)); return p; }\
+static inline T *N ## _array_copy_from_pe(T *p, const T *p2, size_t n)\
+{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\
+ for (i = 0; i < n; ++i) N ## _copy_from_pe(&p[i], &p2[i]); return p; }\
+static inline T *N ## _array_copy_to_pe(T *p, const T *p2, size_t n)\
+{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\
+ for (i = 0; i < n; ++i) N ## _copy_to_pe(&p[i], &p2[i]); return p; }
+#define __flatbuffers_define_scalar_primitives(NS, N, T)\
+static inline T *N ## _from_pe(T *p) { return __ ## NS ## from_pe(p, N); }\
+static inline T *N ## _to_pe(T *p) { return __ ## NS ## to_pe(p, N); }\
+static inline T *N ## _copy(T *p, const T *p2) { *p = *p2; return p; }\
+static inline T *N ## _copy_from_pe(T *p, const T *p2)\
+{ return __ ## NS ## copy_from_pe(p, p2, N); }\
+static inline T *N ## _copy_to_pe(T *p, const T *p2) \
+{ return __ ## NS ## copy_to_pe(p, p2, N); }\
+static inline T *N ## _assign(T *p, const T v0) { *p = v0; return p; }\
+static inline T *N ## _assign_from_pe(T *p, T v0)\
+{ *p = N ## _read_from_pe(&v0); return p; }\
+static inline T *N ## _assign_to_pe(T *p, T v0)\
+{ N ## _write_to_pe(p, v0); return p; }
+#define __flatbuffers_build_scalar(NS, N, T)\
+__ ## NS ## define_scalar_primitives(NS, N, T)\
+__ ## NS ## define_fixed_array_primitives(NS, N, T)\
+__ ## NS ## build_vector(NS, N, T, sizeof(T), sizeof(T))
+/* Depends on generated copy_to/from_pe functions, and the type. */
+#define __flatbuffers_define_struct_primitives(NS, N)\
+static inline N ## _t *N ##_to_pe(N ## _t *p)\
+{ if (!NS ## is_native_pe()) { N ## _copy_to_pe(p, p); }; return p; }\
+static inline N ## _t *N ##_from_pe(N ## _t *p)\
+{ if (!NS ## is_native_pe()) { N ## _copy_from_pe(p, p); }; return p; }\
+static inline N ## _t *N ## _clear(N ## _t *p) { return (N ## _t *)memset(p, 0, N ## __size()); }
+
+/* Depends on generated copy/assign_to/from_pe functions, and the type. */
+#define __flatbuffers_build_struct(NS, N, S, A, FID, TFID)\
+__ ## NS ## define_struct_primitives(NS, N)\
+typedef NS ## ref_t N ## _ref_t;\
+static inline N ## _t *N ## _start(NS ## builder_t *B)\
+{ return (N ## _t *)flatcc_builder_start_struct(B, S, A); }\
+static inline N ## _ref_t N ## _end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { N ## _to_pe((N ## _t *)flatcc_builder_struct_edit(B)); }\
+ return flatcc_builder_end_struct(B); }\
+static inline N ## _ref_t N ## _end_pe(NS ## builder_t *B)\
+{ return flatcc_builder_end_struct(B); }\
+static inline N ## _ref_t N ## _create(NS ## builder_t *B __ ## N ## _formal_args)\
+{ N ## _t *_p = N ## _start(B); if (!_p) return 0; N ##_assign_to_pe(_p __ ## N ## _call_args);\
+ return N ## _end_pe(B); }\
+static inline N ## _ref_t N ## _clone(NS ## builder_t *B, N ## _struct_t p)\
+{ N ## _t *_p; __flatbuffers_memoize_begin(B, p); _p = N ## _start(B); if (!_p) return 0;\
+ N ## _copy(_p, p); __flatbuffers_memoize_end(B, p, N ##_end_pe(B)); }\
+__flatbuffers_build_vector(NS, N, N ## _t, S, A)\
+__flatbuffers_build_struct_root(NS, N, A, FID, TFID)\
+
+#define __flatbuffers_struct_clear_field(p) memset((p), 0, sizeof(*(p)))
+#define __flatbuffers_build_table(NS, N, K)\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_table(B, K); }\
+static inline N ## _ref_t N ## _end(NS ## builder_t *B)\
+{ FLATCC_ASSERT(flatcc_builder_check_required(B, __ ## N ## _required,\
+ sizeof(__ ## N ## _required) / sizeof(__ ## N ## _required[0]) - 1));\
+ return flatcc_builder_end_table(B); }\
+__flatbuffers_build_offset_vector(NS, N)
+
+#define __flatbuffers_build_table_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _ref_t ref)\
+{ TN ## _ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ?\
+ ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return TN ## _start(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _end(B)); }\
+static inline TN ## _ref_t N ## _create(NS ## builder_t *B __ ## TN ##_formal_args)\
+{ return N ## _add(B, TN ## _create(B __ ## TN ## _call_args)); }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _table_t p)\
+{ return N ## _add(B, TN ## _clone(B, p)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _table_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_union_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _union_ref_t uref)\
+{ NS ## ref_t *_p; TN ## _union_type_t *_pt; if (uref.type == TN ## _NONE) return 0; if (uref.value == 0) return -1;\
+ if (!(_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1, sizeof(*_pt), sizeof(*_pt)))) return -1;\
+ *_pt = uref.type; if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uref.value; return 0; }\
+static inline int N ## _add_type(NS ## builder_t *B, TN ## _union_type_t type)\
+{ TN ## _union_type_t *_pt; if (type == TN ## _NONE) return 0; return (_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1,\
+ sizeof(*_pt), sizeof(*_pt))) ? ((*_pt = type), 0) : -1; }\
+static inline int N ## _add_value(NS ## builder_t *B, TN ## _union_ref_t uref)\
+{ NS ## ref_t *p; if (uref.type == TN ## _NONE) return 0; return (p = flatcc_builder_table_add_offset(B, ID)) ?\
+ ((*p = uref.value), 0) : -1; }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _union_t p)\
+{ return N ## _add(B, TN ## _clone(B, p)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _union_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }
+
+/* M is the union value name and T is its type, i.e. the qualified name. */
+#define __flatbuffers_build_union_table_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+static inline int N ## _ ## M ## _start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline int N ## _ ## M ## _end(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end(B);\
+ return ref ? N ## _ ## M ## _add(B, ref) : -1; }\
+static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _table_t t)\
+{ T ## _ref_t ref = T ## _clone(B, t);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }
+
+/* M is the union value name and T is its type, i.e. the qualified name. */
+#define __flatbuffers_build_union_struct_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+static inline T ## _t *N ## _ ## M ## _start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline int N ## _ ## M ## _end(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end(B);\
+ return ref ? N ## _ ## M ## _add(B, ref) : -1; }\
+static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _end_pe(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end_pe(B);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _struct_t p)\
+{ T ## _ref_t ref = T ## _clone(B, p);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }
+#define __flatbuffers_build_union_string_value_field(NS, N, NU, M)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+__flatbuffers_build_string_field_ops(NS, N ## _ ## M)
+
+/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type
+ * S: sizeof of scalar type, A: alignment of type T, default value V of type T. */
+#define __flatbuffers_build_scalar_field(ID, NS, N, TN, T, S, A, V, TT)\
+static inline int N ## _add(NS ## builder_t *B, const T v)\
+{ T *_p; if (v == V) return 0; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+static inline int N ## _force_add(NS ## builder_t *B, const T v)\
+{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+/* Clone does not skip default values and expects pe endian content. */\
+static inline int N ## _clone(NS ## builder_t *B, const T *p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+/* Transferring a missing field is a nop success with 0 as result. */\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }
+
+/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type
+ * S: sizeof of scalar type, A: alignment of type T. */
+#define __flatbuffers_build_scalar_optional_field(ID, NS, N, TN, T, S, A, TT)\
+static inline int N ## _add(NS ## builder_t *B, const T v)\
+{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+/* Clone does not skip default values and expects pe endian content. */\
+static inline int N ## _clone(NS ## builder_t *B, const T *p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+/* Transferring a missing field is a nop success with 0 as result. */\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_struct_field(ID, NS, N, TN, S, A, TT)\
+static inline TN ## _t *N ## _start(NS ## builder_t *B)\
+{ return (TN ## _t *)flatcc_builder_table_add(B, ID, S, A); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { TN ## _to_pe((TN ## _t *)flatcc_builder_table_edit(B, S)); } return 0; }\
+static inline int N ## _end_pe(NS ## builder_t *B) { return 0; }\
+static inline int N ## _create(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_assign_to_pe(_p __ ## TN ## _call_args);\
+ return 0; }\
+static inline int N ## _add(NS ## builder_t *B, const TN ## _t *p)\
+{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_copy_to_pe(_p, p); return 0; }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _struct_t p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _struct_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_vector_field(ID, NS, N, TN, T, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\
+{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return TN ## _vec_start(B); }\
+static inline int N ## _end_pe(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _vec_end_pe(B)); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _vec_end(B)); }\
+static inline int N ## _create_pe(NS ## builder_t *B, const T *data, size_t len)\
+{ return N ## _add(B, TN ## _vec_create_pe(B, data, len)); }\
+static inline int N ## _create(NS ## builder_t *B, const T *data, size_t len)\
+{ return N ## _add(B, TN ## _vec_create(B, data, len)); }\
+static inline int N ## _slice(NS ## builder_t *B, TN ## _vec_t vec, size_t index, size_t len)\
+{ return N ## _add(B, TN ## _vec_slice(B, vec, index, len)); }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\
+__flatbuffers_build_vector_ops(NS, N, N, TN, T)\
+
+#define __flatbuffers_build_offset_vector_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\
+{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_offset_vector(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_offset_vector(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_offset_vector(B, data, len)); }\
+__flatbuffers_build_offset_vector_ops(NS, N, N, TN)\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+/* depends on N ## _add which differs for union member fields and ordinary fields */\
+#define __flatbuffers_build_string_field_ops(NS, N)\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_string(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_string(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const char *s, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_string(B, s, len)); }\
+static inline int N ## _create_str(NS ## builder_t *B, const char *s)\
+{ return N ## _add(B, flatcc_builder_create_string_str(B, s)); }\
+static inline int N ## _create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return N ## _add(B, flatcc_builder_create_string_strn(B, s, max_len)); }\
+static inline int N ## _clone(NS ## builder_t *B, NS ## string_t string)\
+{ return N ## _add(B, NS ## string_clone(B, string)); }\
+static inline int N ## _slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return N ## _add(B, NS ## string_slice(B, string, index, len)); }\
+__flatbuffers_build_string_ops(NS, N)
+
+#define __flatbuffers_build_string_field(ID, NS, N, TT)\
+static inline int N ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ NS ## string_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+__flatbuffers_build_string_field_ops(NS, N)\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ NS ## string_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_table_vector_field(ID, NS, N, TN, TT)\
+__flatbuffers_build_offset_vector_field(ID, NS, N, TN, TT)\
+__flatbuffers_build_table_vector_ops(NS, N, TN)
+
+#define __flatbuffers_build_union_vector_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _union_vec_ref_t uvref)\
+{ NS ## vec_ref_t *_p; if (!uvref.type || !uvref.value) return uvref.type == uvref.value ? 0 : -1;\
+ if (!(_p = flatcc_builder_table_add_offset(B, ID - 1))) return -1; *_p = uvref.type;\
+ if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uvref.value; return 0; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_union_vector(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_union_vector(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_union_vector(B, data, len)); }\
+__flatbuffers_build_union_vector_ops(NS, N, N, TN)\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _union_vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _union_vec_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_union_table_vector_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _table_t t)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, t))); }
+
+#define __flatbuffers_build_union_struct_vector_value_field(NS, N, NU, M, T)\
+static inline T ## _t *N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _struct_t p)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, p))); }
+
+#define __flatbuffers_build_union_string_vector_value_field(NS, N, NU, M)\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return NS ## string_start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create(B, s, len))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_str(NS ## builder_t *B, const char *s)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_str(B, s))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_strn(B, s, max_len))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, NS ## string_t string)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_clone(B, string))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_slice(B, string, index, len))); }
+
+#define __flatbuffers_build_string_vector_field(ID, NS, N, TT)\
+__flatbuffers_build_offset_vector_field(ID, NS, N, NS ## string, TT)\
+__flatbuffers_build_string_vector_ops(NS, N)
+
+#define __flatbuffers_char_formal_args , char v0
+#define __flatbuffers_char_call_args , v0
+#define __flatbuffers_uint8_formal_args , uint8_t v0
+#define __flatbuffers_uint8_call_args , v0
+#define __flatbuffers_int8_formal_args , int8_t v0
+#define __flatbuffers_int8_call_args , v0
+#define __flatbuffers_bool_formal_args , flatbuffers_bool_t v0
+#define __flatbuffers_bool_call_args , v0
+#define __flatbuffers_uint16_formal_args , uint16_t v0
+#define __flatbuffers_uint16_call_args , v0
+#define __flatbuffers_uint32_formal_args , uint32_t v0
+#define __flatbuffers_uint32_call_args , v0
+#define __flatbuffers_uint64_formal_args , uint64_t v0
+#define __flatbuffers_uint64_call_args , v0
+#define __flatbuffers_int16_formal_args , int16_t v0
+#define __flatbuffers_int16_call_args , v0
+#define __flatbuffers_int32_formal_args , int32_t v0
+#define __flatbuffers_int32_call_args , v0
+#define __flatbuffers_int64_formal_args , int64_t v0
+#define __flatbuffers_int64_call_args , v0
+#define __flatbuffers_float_formal_args , float v0
+#define __flatbuffers_float_call_args , v0
+#define __flatbuffers_double_formal_args , double v0
+#define __flatbuffers_double_call_args , v0
+
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_char, char)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint8, uint8_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int8, int8_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_bool, flatbuffers_bool_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint16, uint16_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint32, uint32_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint64, uint64_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int16, int16_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int32, int32_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int64, int64_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_float, float)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_double, double)
+
+__flatbuffers_build_string(flatbuffers_)
+
+__flatbuffers_build_buffer(flatbuffers_)
+#include "flatcc/flatcc_epilogue.h"
+#endif /* FLATBUFFERS_COMMON_BUILDER_H */
diff --git a/include/flatcc/reflection/flatbuffers_common_reader.h b/include/flatcc/reflection/flatbuffers_common_reader.h
new file mode 100644
index 0000000..c575308
--- /dev/null
+++ b/include/flatcc/reflection/flatbuffers_common_reader.h
@@ -0,0 +1,578 @@
+#ifndef FLATBUFFERS_COMMON_READER_H
+#define FLATBUFFERS_COMMON_READER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+/* Common FlatBuffers read functionality for C. */
+
+#include "flatcc/flatcc_prologue.h"
+#include "flatcc/flatcc_flatbuffers.h"
+
+
+#define __flatbuffers_read_scalar_at_byteoffset(N, p, o) N ## _read_from_pe((uint8_t *)(p) + (o))
+#define __flatbuffers_read_scalar(N, p) N ## _read_from_pe(p)
+#define __flatbuffers_read_vt(ID, offset, t)\
+flatbuffers_voffset_t offset = 0;\
+{ flatbuffers_voffset_t id__tmp, *vt__tmp;\
+ FLATCC_ASSERT(t != 0 && "null pointer table access");\
+ id__tmp = ID;\
+ vt__tmp = (flatbuffers_voffset_t *)((uint8_t *)(t) -\
+ __flatbuffers_soffset_read_from_pe(t));\
+ if (__flatbuffers_voffset_read_from_pe(vt__tmp) >= sizeof(vt__tmp[0]) * (id__tmp + 3u)) {\
+ offset = __flatbuffers_voffset_read_from_pe(vt__tmp + id__tmp + 2);\
+ }\
+}
+#define __flatbuffers_field_present(ID, t) { __flatbuffers_read_vt(ID, offset__tmp, t) return offset__tmp != 0; }
+#define __flatbuffers_scalar_field(T, ID, t)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ return (const T *)((uint8_t *)(t) + offset__tmp);\
+ }\
+ return 0;\
+}
+#define __flatbuffers_define_scalar_field(ID, N, NK, TK, T, V)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+{ __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+}\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+{ __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+}\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _table_t t__tmp)\
+__flatbuffers_scalar_field(T, ID, t__tmp)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)\
+__flatbuffers_define_scan_by_scalar_field(N, NK, T)
+#define __flatbuffers_define_scalar_optional_field(ID, N, NK, TK, T, V)\
+__flatbuffers_define_scalar_field(ID, N, NK, TK, T, V)\
+static inline TK ## _option_t N ## _ ## NK ## _option(N ## _table_t t__tmp)\
+{ TK ## _option_t ret; __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ ret.is_null = offset__tmp == 0; ret.value = offset__tmp ?\
+ __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+ return ret; }
+#define __flatbuffers_struct_field(T, ID, t, r)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ return (T)((uint8_t *)(t) + offset__tmp);\
+ }\
+ FLATCC_ASSERT(!(r) && "required field missing");\
+ return 0;\
+}
+#define __flatbuffers_offset_field(T, ID, t, r, adjust)\
+{\
+ flatbuffers_uoffset_t *elem__tmp;\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ elem__tmp = (flatbuffers_uoffset_t *)((uint8_t *)(t) + offset__tmp);\
+ /* Add sizeof so C api can have raw access past header field. */\
+ return (T)((uint8_t *)(elem__tmp) + adjust +\
+ __flatbuffers_uoffset_read_from_pe(elem__tmp));\
+ }\
+ FLATCC_ASSERT(!(r) && "required field missing");\
+ return 0;\
+}
+#define __flatbuffers_vector_field(T, ID, t, r) __flatbuffers_offset_field(T, ID, t, r, sizeof(flatbuffers_uoffset_t))
+#define __flatbuffers_table_field(T, ID, t, r) __flatbuffers_offset_field(T, ID, t, r, 0)
+#define __flatbuffers_define_struct_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_struct_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_struct_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_vector_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_table_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_table_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_table_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_string_field(ID, N, NK, r)\
+static inline flatbuffers_string_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(flatbuffers_string_t, ID, t__tmp, r)\
+static inline flatbuffers_string_t N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(flatbuffers_string_t, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)\
+__flatbuffers_define_scan_by_string_field(N, NK)
+#define __flatbuffers_vec_len(vec)\
+{ return (vec) ? (size_t)__flatbuffers_uoffset_read_from_pe((flatbuffers_uoffset_t *)vec - 1) : 0; }
+#define __flatbuffers_string_len(s) __flatbuffers_vec_len(s)
+static inline size_t flatbuffers_vec_len(const void *vec)
+__flatbuffers_vec_len(vec)
+#define __flatbuffers_scalar_vec_at(N, vec, i)\
+{ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range");\
+ return __flatbuffers_read_scalar(N, &(vec)[i]); }
+#define __flatbuffers_struct_vec_at(vec, i)\
+{ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range"); return (vec) + (i); }
+/* `adjust` skips past the header for string vectors. */
+#define __flatbuffers_offset_vec_at(T, vec, i, adjust)\
+{ const flatbuffers_uoffset_t *elem__tmp = (vec) + (i);\
+ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range");\
+ return (T)((uint8_t *)(elem__tmp) + (size_t)__flatbuffers_uoffset_read_from_pe(elem__tmp) + (adjust)); }
+#define __flatbuffers_define_scalar_vec_len(N)\
+static inline size_t N ## _vec_len(N ##_vec_t vec__tmp)\
+{ return flatbuffers_vec_len(vec__tmp); }
+#define __flatbuffers_define_scalar_vec_at(N, T) \
+static inline T N ## _vec_at(N ## _vec_t vec__tmp, size_t i__tmp)\
+__flatbuffers_scalar_vec_at(N, vec__tmp, i__tmp)
+typedef const char *flatbuffers_string_t;
+static inline size_t flatbuffers_string_len(flatbuffers_string_t s)
+__flatbuffers_string_len(s)
+typedef const flatbuffers_uoffset_t *flatbuffers_string_vec_t;
+typedef flatbuffers_uoffset_t *flatbuffers_string_mutable_vec_t;
+static inline size_t flatbuffers_string_vec_len(flatbuffers_string_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline flatbuffers_string_t flatbuffers_string_vec_at(flatbuffers_string_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_string_t, vec, i, sizeof(vec[0]))
+typedef const void *flatbuffers_generic_t;
+typedef void *flatbuffers_mutable_generic_t;
+static inline flatbuffers_string_t flatbuffers_string_cast_from_generic(const flatbuffers_generic_t p)
+{ return p ? ((const char *)p) + __flatbuffers_uoffset__size() : 0; }
+typedef const flatbuffers_uoffset_t *flatbuffers_generic_vec_t;
+typedef flatbuffers_uoffset_t *flatbuffers_generic_table_mutable_vec_t;
+static inline size_t flatbuffers_generic_vec_len(flatbuffers_generic_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline flatbuffers_generic_t flatbuffers_generic_vec_at(flatbuffers_generic_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_generic_t, vec, i, 0)
+static inline flatbuffers_generic_t flatbuffers_generic_vec_at_as_string(flatbuffers_generic_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_generic_t, vec, i, sizeof(vec[0]))
+typedef struct flatbuffers_union {
+ flatbuffers_union_type_t type;
+ flatbuffers_generic_t value;
+} flatbuffers_union_t;
+typedef struct flatbuffers_union_vec {
+ const flatbuffers_union_type_t *type;
+ const flatbuffers_uoffset_t *value;
+} flatbuffers_union_vec_t;
+typedef struct flatbuffers_mutable_union {
+ flatbuffers_union_type_t type;
+ flatbuffers_mutable_generic_t value;
+} flatbuffers_mutable_union_t;
+typedef struct flatbuffers_mutable_union_vec {
+ flatbuffers_union_type_t *type;
+ flatbuffers_uoffset_t *value;
+} flatbuffers_mutable_union_vec_t;
+static inline flatbuffers_mutable_union_t flatbuffers_mutable_union_cast(flatbuffers_union_t u__tmp)\
+{ flatbuffers_mutable_union_t mu = { u__tmp.type, (flatbuffers_mutable_generic_t)u__tmp.value };\
+ return mu; }
+static inline flatbuffers_mutable_union_vec_t flatbuffers_mutable_union_vec_cast(flatbuffers_union_vec_t uv__tmp)\
+{ flatbuffers_mutable_union_vec_t muv =\
+ { (flatbuffers_union_type_t *)uv__tmp.type, (flatbuffers_uoffset_t *)uv__tmp.value }; return muv; }
+#define __flatbuffers_union_type_field(ID, t)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(__flatbuffers_utype, t, offset__tmp) : 0;\
+}
+static inline flatbuffers_string_t flatbuffers_string_cast_from_union(const flatbuffers_union_t u__tmp)\
+{ return flatbuffers_string_cast_from_generic(u__tmp.value); }
+#define __flatbuffers_define_union_field(NS, ID, N, NK, T, r)\
+static inline T ## _union_type_t N ## _ ## NK ## _type_get(N ## _table_t t__tmp)\
+__## NS ## union_type_field(((ID) - 1), t__tmp)\
+static inline NS ## generic_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\
+static inline T ## _union_type_t N ## _ ## NK ## _type(N ## _table_t t__tmp)\
+__## NS ## union_type_field(((ID) - 1), t__tmp)\
+static inline NS ## generic_t N ## _ ## NK(N ## _table_t t__tmp)\
+__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__## NS ## field_present(ID, t__tmp)\
+static inline T ## _union_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\
+{ T ## _union_t u__tmp = { 0, 0 }; u__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\
+ if (u__tmp.type == 0) return u__tmp; u__tmp.value = N ## _ ## NK ## _get(t__tmp); return u__tmp; }\
+static inline NS ## string_t N ## _ ## NK ## _as_string(N ## _table_t t__tmp)\
+{ return NS ## string_cast_from_generic(N ## _ ## NK ## _get(t__tmp)); }\
+
+#define __flatbuffers_define_union_vector_ops(NS, T)\
+static inline size_t T ## _union_vec_len(T ## _union_vec_t uv__tmp)\
+{ return NS ## vec_len(uv__tmp.type); }\
+static inline T ## _union_t T ## _union_vec_at(T ## _union_vec_t uv__tmp, size_t i__tmp)\
+{ T ## _union_t u__tmp = { 0, 0 }; size_t n__tmp = NS ## vec_len(uv__tmp.type);\
+ FLATCC_ASSERT(n__tmp > (i__tmp) && "index out of range"); u__tmp.type = uv__tmp.type[i__tmp];\
+ /* Unknown type is treated as NONE for schema evolution. */\
+ if (u__tmp.type == 0) return u__tmp;\
+ u__tmp.value = NS ## generic_vec_at(uv__tmp.value, i__tmp); return u__tmp; }\
+static inline NS ## string_t T ## _union_vec_at_as_string(T ## _union_vec_t uv__tmp, size_t i__tmp)\
+{ return (NS ## string_t) NS ## generic_vec_at_as_string(uv__tmp.value, i__tmp); }\
+
+#define __flatbuffers_define_union_vector(NS, T)\
+typedef NS ## union_vec_t T ## _union_vec_t;\
+typedef NS ## mutable_union_vec_t T ## _mutable_union_vec_t;\
+static inline T ## _mutable_union_vec_t T ## _mutable_union_vec_cast(T ## _union_vec_t u__tmp)\
+{ return NS ## mutable_union_vec_cast(u__tmp); }\
+__## NS ## define_union_vector_ops(NS, T)
+#define __flatbuffers_define_union(NS, T)\
+typedef NS ## union_t T ## _union_t;\
+typedef NS ## mutable_union_t T ## _mutable_union_t;\
+static inline T ## _mutable_union_t T ## _mutable_union_cast(T ## _union_t u__tmp)\
+{ return NS ## mutable_union_cast(u__tmp); }\
+__## NS ## define_union_vector(NS, T)
+#define __flatbuffers_define_union_vector_field(NS, ID, N, NK, T, r)\
+__## NS ## define_vector_field(ID - 1, N, NK ## _type, T ## _vec_t, r)\
+__## NS ## define_vector_field(ID, N, NK, flatbuffers_generic_vec_t, r)\
+static inline T ## _union_vec_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\
+{ T ## _union_vec_t uv__tmp; uv__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\
+ uv__tmp.value = N ## _ ## NK(t__tmp);\
+ FLATCC_ASSERT(NS ## vec_len(uv__tmp.type) == NS ## vec_len(uv__tmp.value)\
+ && "union vector type length mismatch"); return uv__tmp; }
+#include <string.h>
+static const size_t flatbuffers_not_found = (size_t)-1;
+static const size_t flatbuffers_end = (size_t)-1;
+#define __flatbuffers_identity(n) (n)
+#define __flatbuffers_min(a, b) ((a) < (b) ? (a) : (b))
+/* Subtraction doesn't work for unsigned types. */
+#define __flatbuffers_scalar_cmp(x, y, n) ((x) < (y) ? -1 : (x) > (y))
+static inline int __flatbuffers_string_n_cmp(flatbuffers_string_t v, const char *s, size_t n)
+{ size_t nv = flatbuffers_string_len(v); int x = strncmp(v, s, nv < n ? nv : n);
+ return x != 0 ? x : nv < n ? -1 : nv > n; }
+/* `n` arg unused, but needed by string find macro expansion. */
+static inline int __flatbuffers_string_cmp(flatbuffers_string_t v, const char *s, size_t n) { (void)n; return strcmp(v, s); }
+/* A = identity if searching scalar vectors rather than key fields. */
+/* Returns lowest matching index or not_found. */
+#define __flatbuffers_find_by_field(A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t a__tmp = 0, b__tmp, m__tmp; if (!(b__tmp = L(V))) { return flatbuffers_not_found; }\
+ --b__tmp;\
+ while (a__tmp < b__tmp) {\
+ m__tmp = a__tmp + ((b__tmp - a__tmp) >> 1);\
+ v__tmp = A(E(V, m__tmp));\
+ if ((D(v__tmp, (K), (Kn))) < 0) {\
+ a__tmp = m__tmp + 1;\
+ } else {\
+ b__tmp = m__tmp;\
+ }\
+ }\
+ if (a__tmp == b__tmp) {\
+ v__tmp = A(E(V, a__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return a__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_find_by_scalar_field(A, V, E, L, K, T)\
+__flatbuffers_find_by_field(A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_find_by_string_field(A, V, E, L, K)\
+__flatbuffers_find_by_field(A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_find_by_string_n_field(A, V, E, L, K, Kn)\
+__flatbuffers_find_by_field(A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_define_find_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, TK key__tmp)\
+__flatbuffers_find_by_scalar_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, TK)
+#define __flatbuffers_define_scalar_find(N, T)\
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_find_by_scalar_field(__flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_find_by_string_field(N, NK) \
+/* Note: find only works on vectors sorted by this field. */\
+static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_find_by_string_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_find_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_find_by_string_n_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)
+#define __flatbuffers_define_default_find_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_find_by_ ## NK(vec__tmp, key__tmp); }
+#define __flatbuffers_define_default_find_by_string_field(N, NK) \
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_find_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_find_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_find_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }
+/* A = identity if searching scalar vectors rather than key fields. */
+/* Returns lowest matching index or not_found. */
+#define __flatbuffers_scan_by_field(b, e, A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t i__tmp;\
+ for (i__tmp = b; i__tmp < e; ++i__tmp) {\
+ v__tmp = A(E(V, i__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return i__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_rscan_by_field(b, e, A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t i__tmp = e;\
+ while (i__tmp-- > b) {\
+ v__tmp = A(E(V, i__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return i__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_scan_by_scalar_field(b, e, A, V, E, L, K, T)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_scan_by_string_field(b, e, A, V, E, L, K)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_scan_by_string_n_field(b, e, A, V, E, L, K, Kn)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_rscan_by_scalar_field(b, e, A, V, E, L, K, T)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_rscan_by_string_field(b, e, A, V, E, L, K)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_rscan_by_string_n_field(b, e, A, V, E, L, K, Kn)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_define_scan_by_scalar_field(N, NK, T)\
+static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_scalar_scan(N, T)\
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(0, N ## _vec_len(vec__tmp), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(0, N ## _vec_len(vec__tmp), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_scan_by_string_field(N, NK) \
+static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_scan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_scan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_scan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+__flatbuffers_scan_by_string_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_scan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_scan_by_string_n_field(begin__tmp, __flatbuffers_min( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_rscan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_rscan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_rscan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+__flatbuffers_rscan_by_string_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_rscan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_rscan_by_string_n_field(begin__tmp, __flatbuffers_min( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)
+#define __flatbuffers_define_default_scan_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_scan_by_ ## NK(vec__tmp, key__tmp); }\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\
+{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_rscan_by_ ## NK(vec__tmp, key__tmp); }\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\
+{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }
+#define __flatbuffers_define_default_scan_by_string_field(N, NK) \
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_scan_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_scan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_scan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\
+static inline size_t N ## _vec_scan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_scan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_rscan_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_rscan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_rscan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\
+static inline size_t N ## _vec_rscan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_rscan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }
+#define __flatbuffers_heap_sort(N, X, A, E, L, TK, TE, D, S)\
+static inline void __ ## N ## X ## __heap_sift_down(\
+ N ## _mutable_vec_t vec__tmp, size_t start__tmp, size_t end__tmp)\
+{ size_t child__tmp, root__tmp; TK v1__tmp, v2__tmp, vroot__tmp;\
+ root__tmp = start__tmp;\
+ while ((root__tmp << 1) <= end__tmp) {\
+ child__tmp = root__tmp << 1;\
+ if (child__tmp < end__tmp) {\
+ v1__tmp = A(E(vec__tmp, child__tmp));\
+ v2__tmp = A(E(vec__tmp, child__tmp + 1));\
+ if (D(v1__tmp, v2__tmp) < 0) {\
+ child__tmp++;\
+ }\
+ }\
+ vroot__tmp = A(E(vec__tmp, root__tmp));\
+ v1__tmp = A(E(vec__tmp, child__tmp));\
+ if (D(vroot__tmp, v1__tmp) < 0) {\
+ S(vec__tmp, root__tmp, child__tmp, TE);\
+ root__tmp = child__tmp;\
+ } else {\
+ return;\
+ }\
+ }\
+}\
+static inline void __ ## N ## X ## __heap_sort(N ## _mutable_vec_t vec__tmp)\
+{ size_t start__tmp, end__tmp, size__tmp;\
+ size__tmp = L(vec__tmp); if (size__tmp == 0) return; end__tmp = size__tmp - 1; start__tmp = size__tmp >> 1;\
+ do { __ ## N ## X ## __heap_sift_down(vec__tmp, start__tmp, end__tmp); } while (start__tmp--);\
+ while (end__tmp > 0) { \
+ S(vec__tmp, 0, end__tmp, TE);\
+ __ ## N ## X ## __heap_sift_down(vec__tmp, 0, --end__tmp); } }
+#define __flatbuffers_define_sort_by_field(N, NK, TK, TE, D, S)\
+ __flatbuffers_heap_sort(N, _sort_by_ ## NK, N ## _ ## NK ## _get, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\
+static inline void N ## _vec_sort_by_ ## NK(N ## _mutable_vec_t vec__tmp)\
+{ __ ## N ## _sort_by_ ## NK ## __heap_sort(vec__tmp); }
+#define __flatbuffers_define_sort(N, TK, TE, D, S)\
+__flatbuffers_heap_sort(N, , __flatbuffers_identity, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\
+static inline void N ## _vec_sort(N ## _mutable_vec_t vec__tmp) { __ ## N ## __heap_sort(vec__tmp); }
+#define __flatbuffers_scalar_diff(x, y) ((x) < (y) ? -1 : (x) > (y))
+#define __flatbuffers_string_diff(x, y) __flatbuffers_string_n_cmp((x), (const char *)(y), flatbuffers_string_len(y))
+#define __flatbuffers_value_swap(vec, a, b, TE) { TE x__tmp = vec[b]; vec[b] = vec[a]; vec[a] = x__tmp; }
+#define __flatbuffers_uoffset_swap(vec, a, b, TE)\
+{ TE ta__tmp, tb__tmp, d__tmp;\
+ d__tmp = (TE)((a - b) * sizeof(vec[0]));\
+ ta__tmp = __flatbuffers_uoffset_read_from_pe(vec + b) - d__tmp;\
+ tb__tmp = __flatbuffers_uoffset_read_from_pe(vec + a) + d__tmp;\
+ __flatbuffers_uoffset_write_to_pe(vec + a, ta__tmp);\
+ __flatbuffers_uoffset_write_to_pe(vec + b, tb__tmp); }
+#define __flatbuffers_scalar_swap(vec, a, b, TE) __flatbuffers_value_swap(vec, a, b, TE)
+#define __flatbuffers_string_swap(vec, a, b, TE) __flatbuffers_uoffset_swap(vec, a, b, TE)
+#define __flatbuffers_struct_swap(vec, a, b, TE) __flatbuffers_value_swap(vec, a, b, TE)
+#define __flatbuffers_table_swap(vec, a, b, TE) __flatbuffers_uoffset_swap(vec, a, b, TE)
+#define __flatbuffers_define_struct_sort_by_scalar_field(N, NK, TK, TE)\
+ __flatbuffers_define_sort_by_field(N, NK, TK, TE, __flatbuffers_scalar_diff, __flatbuffers_struct_swap)
+#define __flatbuffers_define_table_sort_by_scalar_field(N, NK, TK)\
+ __flatbuffers_define_sort_by_field(N, NK, TK, flatbuffers_uoffset_t, __flatbuffers_scalar_diff, __flatbuffers_table_swap)
+#define __flatbuffers_define_table_sort_by_string_field(N, NK)\
+ __flatbuffers_define_sort_by_field(N, NK, flatbuffers_string_t, flatbuffers_uoffset_t, __flatbuffers_string_diff, __flatbuffers_table_swap)
+#define __flatbuffers_define_scalar_sort(N, T) __flatbuffers_define_sort(N, T, T, __flatbuffers_scalar_diff, __flatbuffers_scalar_swap)
+#define __flatbuffers_define_string_sort() __flatbuffers_define_sort(flatbuffers_string, flatbuffers_string_t, flatbuffers_uoffset_t, __flatbuffers_string_diff, __flatbuffers_string_swap)
+#define __flatbuffers_sort_vector_field(N, NK, T, t)\
+{ T ## _mutable_vec_t v__tmp = (T ## _mutable_vec_t) N ## _ ## NK ## _get(t);\
+ if (v__tmp) T ## _vec_sort(v__tmp); }
+#define __flatbuffers_sort_table_field(N, NK, T, t)\
+{ T ## _sort((T ## _mutable_table_t)N ## _ ## NK ## _get(t)); }
+#define __flatbuffers_sort_union_field(N, NK, T, t)\
+{ T ## _sort(T ## _mutable_union_cast(N ## _ ## NK ## _union(t))); }
+#define __flatbuffers_sort_table_vector_field_elements(N, NK, T, t)\
+{ T ## _vec_t v__tmp = N ## _ ## NK ## _get(t); size_t i__tmp, n__tmp;\
+ n__tmp = T ## _vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\
+ T ## _sort((T ## _mutable_table_t)T ## _vec_at(v__tmp, i__tmp)); }}
+#define __flatbuffers_sort_union_vector_field_elements(N, NK, T, t)\
+{ T ## _union_vec_t v__tmp = N ## _ ## NK ## _union(t); size_t i__tmp, n__tmp;\
+ n__tmp = T ## _union_vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\
+ T ## _sort(T ## _mutable_union_cast(T ## _union_vec_at(v__tmp, i__tmp))); }}
+#define __flatbuffers_define_scalar_vector(N, T)\
+typedef const T *N ## _vec_t;\
+typedef T *N ## _mutable_vec_t;\
+__flatbuffers_define_scalar_vec_len(N)\
+__flatbuffers_define_scalar_vec_at(N, T)\
+__flatbuffers_define_scalar_find(N, T)\
+__flatbuffers_define_scalar_scan(N, T)\
+__flatbuffers_define_scalar_sort(N, T)
+
+#define __flatbuffers_define_integer_type(N, T, W)\
+__flatcc_define_integer_accessors(N, T, W, flatbuffers_endian)\
+__flatbuffers_define_scalar_vector(N, T)
+__flatbuffers_define_scalar_vector(flatbuffers_bool, flatbuffers_bool_t)
+__flatbuffers_define_scalar_vector(flatbuffers_char, char)
+__flatbuffers_define_scalar_vector(flatbuffers_uint8, uint8_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int8, int8_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint16, uint16_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int16, int16_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint32, uint32_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int32, int32_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint64, uint64_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int64, int64_t)
+__flatbuffers_define_scalar_vector(flatbuffers_float, float)
+__flatbuffers_define_scalar_vector(flatbuffers_double, double)
+__flatbuffers_define_scalar_vector(flatbuffers_union_type, flatbuffers_union_type_t)
+static inline size_t flatbuffers_string_vec_find(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_find_by_string_field(__flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_find_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_find_by_string_n_field(__flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_scan(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_scan_by_string_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_scan_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_scan_by_string_n_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_scan_ex(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s)
+__flatbuffers_scan_by_string_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_scan_ex_n(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s, size_t n)
+__flatbuffers_scan_by_string_n_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_rscan(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_rscan_by_string_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_rscan_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_rscan_by_string_n_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_rscan_ex(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s)
+__flatbuffers_rscan_by_string_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_rscan_ex_n(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s, size_t n)
+__flatbuffers_rscan_by_string_n_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+__flatbuffers_define_string_sort()
+#define __flatbuffers_define_struct_scalar_fixed_array_field(N, NK, TK, T, L)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0;\
+ return __flatbuffers_read_scalar(TK, &(t__tmp->NK[i__tmp])); }\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? t__tmp->NK : 0; }\
+static inline size_t N ## _ ## NK ## _get_len(void) { return L; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp, size_t i__tmp)\
+{ return N ## _ ## NK ## _get(t__tmp, i__tmp); }
+#define __flatbuffers_define_struct_struct_fixed_array_field(N, NK, T, L)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }static inline T N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? t__tmp->NK : 0; }\
+static inline size_t N ## _ ## NK ## _get_len(void) { return L; }\
+static inline T N ## _ ## NK(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }
+#define __flatbuffers_define_struct_scalar_field(N, NK, TK, T)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp)\
+{ return t__tmp ? __flatbuffers_read_scalar(TK, &(t__tmp->NK)) : 0; }\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? &(t__tmp->NK) : 0; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp)\
+{ return t__tmp ? __flatbuffers_read_scalar(TK, &(t__tmp->NK)) : 0; }\
+__flatbuffers_define_scan_by_scalar_field(N, NK, T)
+#define __flatbuffers_define_struct_struct_field(N, NK, T)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }
+/* If fid is null, the function returns true without testing as buffer is not expected to have any id. */
+static inline int flatbuffers_has_identifier(const void *buffer, const char *fid)
+{ flatbuffers_thash_t id, id2 = 0; if (fid == 0) { return 1; };
+ id2 = flatbuffers_type_hash_from_string(fid);
+ id = __flatbuffers_thash_read_from_pe(((flatbuffers_uoffset_t *)buffer) + 1);
+ return id2 == 0 || id == id2; }
+static inline int flatbuffers_has_type_hash(const void *buffer, flatbuffers_thash_t thash)
+{ return thash == 0 || (__flatbuffers_thash_read_from_pe((flatbuffers_uoffset_t *)buffer + 1) == thash); }
+
+static inline flatbuffers_thash_t flatbuffers_get_type_hash(const void *buffer)
+{ return __flatbuffers_thash_read_from_pe((flatbuffers_uoffset_t *)buffer + 1); }
+
+#define flatbuffers_verify_endian() flatbuffers_has_identifier("\x00\x00\x00\x00" "1234", "1234")
+static inline void *flatbuffers_read_size_prefix(void *b, size_t *size_out)
+{ if (size_out) { *size_out = (size_t)__flatbuffers_uoffset_read_from_pe(b); }
+ return (uint8_t *)b + sizeof(flatbuffers_uoffset_t); }
+/* Null file identifier accepts anything, otherwise fid should be 4 characters. */
+#define __flatbuffers_read_root(T, K, buffer, fid)\
+ ((!buffer || !flatbuffers_has_identifier(buffer, fid)) ? 0 :\
+ ((T ## _ ## K ## t)(((uint8_t *)buffer) +\
+ __flatbuffers_uoffset_read_from_pe(buffer))))
+#define __flatbuffers_read_typed_root(T, K, buffer, thash)\
+ ((!buffer || !flatbuffers_has_type_hash(buffer, thash)) ? 0 :\
+ ((T ## _ ## K ## t)(((uint8_t *)buffer) +\
+ __flatbuffers_uoffset_read_from_pe(buffer))))
+#define __flatbuffers_nested_buffer_as_root(C, N, T, K)\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_root_with_identifier(C ## _ ## table_t t__tmp, const char *fid__tmp)\
+{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, fid__tmp); }\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_typed_root(C ## _ ## table_t t__tmp)\
+{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, C ## _ ## type_identifier); }\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_root(C ## _ ## table_t t__tmp)\
+{ const char *fid__tmp = T ## _file_identifier;\
+ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, fid__tmp); }
+#define __flatbuffers_buffer_as_root(N, K)\
+static inline N ## _ ## K ## t N ## _as_root_with_identifier(const void *buffer__tmp, const char *fid__tmp)\
+{ return __flatbuffers_read_root(N, K, buffer__tmp, fid__tmp); }\
+static inline N ## _ ## K ## t N ## _as_root_with_type_hash(const void *buffer__tmp, flatbuffers_thash_t thash__tmp)\
+{ return __flatbuffers_read_typed_root(N, K, buffer__tmp, thash__tmp); }\
+static inline N ## _ ## K ## t N ## _as_root(const void *buffer__tmp)\
+{ const char *fid__tmp = N ## _file_identifier;\
+ return __flatbuffers_read_root(N, K, buffer__tmp, fid__tmp); }\
+static inline N ## _ ## K ## t N ## _as_typed_root(const void *buffer__tmp)\
+{ return __flatbuffers_read_typed_root(N, K, buffer__tmp, N ## _type_hash); }
+#define __flatbuffers_struct_as_root(N) __flatbuffers_buffer_as_root(N, struct_)
+#define __flatbuffers_table_as_root(N) __flatbuffers_buffer_as_root(N, table_)
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* FLATBUFFERS_COMMON_H */
diff --git a/include/flatcc/reflection/reflection_builder.h b/include/flatcc/reflection/reflection_builder.h
new file mode 100644
index 0000000..65aef73
--- /dev/null
+++ b/include/flatcc/reflection/reflection_builder.h
@@ -0,0 +1,457 @@
+#ifndef REFLECTION_BUILDER_H
+#define REFLECTION_BUILDER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef REFLECTION_READER_H
+#include "reflection_reader.h"
+#endif
+#ifndef FLATBUFFERS_COMMON_BUILDER_H
+#include "flatbuffers_common_builder.h"
+#endif
+#include "flatcc/flatcc_prologue.h"
+#undef flatbuffers_identifier
+#define flatbuffers_identifier "BFBS"
+#undef flatbuffers_extension
+#define flatbuffers_extension "bfbs"
+
+#define __reflection_BaseType_formal_args , reflection_BaseType_enum_t v0
+#define __reflection_BaseType_call_args , v0
+__flatbuffers_build_scalar(flatbuffers_, reflection_BaseType, reflection_BaseType_enum_t)
+
+static const flatbuffers_voffset_t __reflection_Type_required[] = { 0 };
+typedef flatbuffers_ref_t reflection_Type_ref_t;
+static reflection_Type_ref_t reflection_Type_clone(flatbuffers_builder_t *B, reflection_Type_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Type, 4)
+
+static const flatbuffers_voffset_t __reflection_KeyValue_required[] = { 0, 0 };
+typedef flatbuffers_ref_t reflection_KeyValue_ref_t;
+static reflection_KeyValue_ref_t reflection_KeyValue_clone(flatbuffers_builder_t *B, reflection_KeyValue_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_KeyValue, 2)
+
+static const flatbuffers_voffset_t __reflection_EnumVal_required[] = { 0, 0 };
+typedef flatbuffers_ref_t reflection_EnumVal_ref_t;
+static reflection_EnumVal_ref_t reflection_EnumVal_clone(flatbuffers_builder_t *B, reflection_EnumVal_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_EnumVal, 5)
+
+static const flatbuffers_voffset_t __reflection_Enum_required[] = { 0, 1, 3, 0 };
+typedef flatbuffers_ref_t reflection_Enum_ref_t;
+static reflection_Enum_ref_t reflection_Enum_clone(flatbuffers_builder_t *B, reflection_Enum_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Enum, 6)
+
+static const flatbuffers_voffset_t __reflection_Field_required[] = { 0, 1, 0 };
+typedef flatbuffers_ref_t reflection_Field_ref_t;
+static reflection_Field_ref_t reflection_Field_clone(flatbuffers_builder_t *B, reflection_Field_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Field, 12)
+
+static const flatbuffers_voffset_t __reflection_Object_required[] = { 0, 1, 0 };
+typedef flatbuffers_ref_t reflection_Object_ref_t;
+static reflection_Object_ref_t reflection_Object_clone(flatbuffers_builder_t *B, reflection_Object_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Object, 7)
+
+static const flatbuffers_voffset_t __reflection_RPCCall_required[] = { 0, 1, 2, 0 };
+typedef flatbuffers_ref_t reflection_RPCCall_ref_t;
+static reflection_RPCCall_ref_t reflection_RPCCall_clone(flatbuffers_builder_t *B, reflection_RPCCall_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_RPCCall, 5)
+
+static const flatbuffers_voffset_t __reflection_Service_required[] = { 0, 0 };
+typedef flatbuffers_ref_t reflection_Service_ref_t;
+static reflection_Service_ref_t reflection_Service_clone(flatbuffers_builder_t *B, reflection_Service_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Service, 4)
+
+static const flatbuffers_voffset_t __reflection_Schema_required[] = { 0, 1, 0 };
+typedef flatbuffers_ref_t reflection_Schema_ref_t;
+static reflection_Schema_ref_t reflection_Schema_clone(flatbuffers_builder_t *B, reflection_Schema_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Schema, 6)
+
+#define __reflection_Type_formal_args , reflection_BaseType_enum_t v0, reflection_BaseType_enum_t v1, int32_t v2, uint16_t v3
+#define __reflection_Type_call_args , v0, v1, v2, v3
+static inline reflection_Type_ref_t reflection_Type_create(flatbuffers_builder_t *B __reflection_Type_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Type, reflection_Type_file_identifier, reflection_Type_type_identifier)
+
+#define __reflection_KeyValue_formal_args , flatbuffers_string_ref_t v0, flatbuffers_string_ref_t v1
+#define __reflection_KeyValue_call_args , v0, v1
+static inline reflection_KeyValue_ref_t reflection_KeyValue_create(flatbuffers_builder_t *B __reflection_KeyValue_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_KeyValue, reflection_KeyValue_file_identifier, reflection_KeyValue_type_identifier)
+
+#define __reflection_EnumVal_formal_args ,\
+ flatbuffers_string_ref_t v0, int64_t v1, reflection_Object_ref_t v2, reflection_Type_ref_t v3, flatbuffers_string_vec_ref_t v4
+#define __reflection_EnumVal_call_args ,\
+ v0, v1, v2, v3, v4
+static inline reflection_EnumVal_ref_t reflection_EnumVal_create(flatbuffers_builder_t *B __reflection_EnumVal_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_EnumVal, reflection_EnumVal_file_identifier, reflection_EnumVal_type_identifier)
+
+#define __reflection_Enum_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_EnumVal_vec_ref_t v1, flatbuffers_bool_t v2, reflection_Type_ref_t v3, reflection_KeyValue_vec_ref_t v4, flatbuffers_string_vec_ref_t v5
+#define __reflection_Enum_call_args ,\
+ v0, v1, v2, v3, v4, v5
+static inline reflection_Enum_ref_t reflection_Enum_create(flatbuffers_builder_t *B __reflection_Enum_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Enum, reflection_Enum_file_identifier, reflection_Enum_type_identifier)
+
+#define __reflection_Field_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_Type_ref_t v1, uint16_t v2, uint16_t v3,\
+ int64_t v4, double v5, flatbuffers_bool_t v6, flatbuffers_bool_t v7,\
+ flatbuffers_bool_t v8, reflection_KeyValue_vec_ref_t v9, flatbuffers_string_vec_ref_t v10, flatbuffers_bool_t v11
+#define __reflection_Field_call_args ,\
+ v0, v1, v2, v3,\
+ v4, v5, v6, v7,\
+ v8, v9, v10, v11
+static inline reflection_Field_ref_t reflection_Field_create(flatbuffers_builder_t *B __reflection_Field_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Field, reflection_Field_file_identifier, reflection_Field_type_identifier)
+
+#define __reflection_Object_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_Field_vec_ref_t v1, flatbuffers_bool_t v2, int32_t v3,\
+ int32_t v4, reflection_KeyValue_vec_ref_t v5, flatbuffers_string_vec_ref_t v6
+#define __reflection_Object_call_args ,\
+ v0, v1, v2, v3,\
+ v4, v5, v6
+static inline reflection_Object_ref_t reflection_Object_create(flatbuffers_builder_t *B __reflection_Object_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Object, reflection_Object_file_identifier, reflection_Object_type_identifier)
+
+#define __reflection_RPCCall_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_Object_ref_t v1, reflection_Object_ref_t v2, reflection_KeyValue_vec_ref_t v3, flatbuffers_string_vec_ref_t v4
+#define __reflection_RPCCall_call_args ,\
+ v0, v1, v2, v3, v4
+static inline reflection_RPCCall_ref_t reflection_RPCCall_create(flatbuffers_builder_t *B __reflection_RPCCall_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_RPCCall, reflection_RPCCall_file_identifier, reflection_RPCCall_type_identifier)
+
+#define __reflection_Service_formal_args , flatbuffers_string_ref_t v0, reflection_RPCCall_vec_ref_t v1, reflection_KeyValue_vec_ref_t v2, flatbuffers_string_vec_ref_t v3
+#define __reflection_Service_call_args , v0, v1, v2, v3
+static inline reflection_Service_ref_t reflection_Service_create(flatbuffers_builder_t *B __reflection_Service_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Service, reflection_Service_file_identifier, reflection_Service_type_identifier)
+
+#define __reflection_Schema_formal_args ,\
+ reflection_Object_vec_ref_t v0, reflection_Enum_vec_ref_t v1, flatbuffers_string_ref_t v2, flatbuffers_string_ref_t v3, reflection_Object_ref_t v4, reflection_Service_vec_ref_t v5
+#define __reflection_Schema_call_args ,\
+ v0, v1, v2, v3, v4, v5
+static inline reflection_Schema_ref_t reflection_Schema_create(flatbuffers_builder_t *B __reflection_Schema_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Schema, reflection_Schema_file_identifier, reflection_Schema_type_identifier)
+
+__flatbuffers_build_scalar_field(0, flatbuffers_, reflection_Type_base_type, reflection_BaseType, reflection_BaseType_enum_t, 1, 1, INT8_C(0), reflection_Type)
+__flatbuffers_build_scalar_field(1, flatbuffers_, reflection_Type_element, reflection_BaseType, reflection_BaseType_enum_t, 1, 1, INT8_C(0), reflection_Type)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Type_index, flatbuffers_int32, int32_t, 4, 4, INT32_C(-1), reflection_Type)
+__flatbuffers_build_scalar_field(3, flatbuffers_, reflection_Type_fixed_length, flatbuffers_uint16, uint16_t, 2, 2, UINT16_C(0), reflection_Type)
+
+static inline reflection_Type_ref_t reflection_Type_create(flatbuffers_builder_t *B __reflection_Type_formal_args)
+{
+ if (reflection_Type_start(B)
+ || reflection_Type_index_add(B, v2)
+ || reflection_Type_fixed_length_add(B, v3)
+ || reflection_Type_base_type_add(B, v0)
+ || reflection_Type_element_add(B, v1)) {
+ return 0;
+ }
+ return reflection_Type_end(B);
+}
+
+static reflection_Type_ref_t reflection_Type_clone(flatbuffers_builder_t *B, reflection_Type_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Type_start(B)
+ || reflection_Type_index_pick(B, t)
+ || reflection_Type_fixed_length_pick(B, t)
+ || reflection_Type_base_type_pick(B, t)
+ || reflection_Type_element_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Type_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_KeyValue_key, reflection_KeyValue)
+__flatbuffers_build_string_field(1, flatbuffers_, reflection_KeyValue_value, reflection_KeyValue)
+
+static inline reflection_KeyValue_ref_t reflection_KeyValue_create(flatbuffers_builder_t *B __reflection_KeyValue_formal_args)
+{
+ if (reflection_KeyValue_start(B)
+ || reflection_KeyValue_key_add(B, v0)
+ || reflection_KeyValue_value_add(B, v1)) {
+ return 0;
+ }
+ return reflection_KeyValue_end(B);
+}
+
+static reflection_KeyValue_ref_t reflection_KeyValue_clone(flatbuffers_builder_t *B, reflection_KeyValue_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_KeyValue_start(B)
+ || reflection_KeyValue_key_pick(B, t)
+ || reflection_KeyValue_value_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_KeyValue_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_EnumVal_name, reflection_EnumVal)
+__flatbuffers_build_scalar_field(1, flatbuffers_, reflection_EnumVal_value, flatbuffers_int64, int64_t, 8, 8, INT64_C(0), reflection_EnumVal)
+__flatbuffers_build_table_field(2, flatbuffers_, reflection_EnumVal_object, reflection_Object, reflection_EnumVal)
+__flatbuffers_build_table_field(3, flatbuffers_, reflection_EnumVal_union_type, reflection_Type, reflection_EnumVal)
+__flatbuffers_build_string_vector_field(4, flatbuffers_, reflection_EnumVal_documentation, reflection_EnumVal)
+
+static inline reflection_EnumVal_ref_t reflection_EnumVal_create(flatbuffers_builder_t *B __reflection_EnumVal_formal_args)
+{
+ if (reflection_EnumVal_start(B)
+ || reflection_EnumVal_value_add(B, v1)
+ || reflection_EnumVal_name_add(B, v0)
+ || reflection_EnumVal_object_add(B, v2)
+ || reflection_EnumVal_union_type_add(B, v3)
+ || reflection_EnumVal_documentation_add(B, v4)) {
+ return 0;
+ }
+ return reflection_EnumVal_end(B);
+}
+
+static reflection_EnumVal_ref_t reflection_EnumVal_clone(flatbuffers_builder_t *B, reflection_EnumVal_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_EnumVal_start(B)
+ || reflection_EnumVal_value_pick(B, t)
+ || reflection_EnumVal_name_pick(B, t)
+ || reflection_EnumVal_object_pick(B, t)
+ || reflection_EnumVal_union_type_pick(B, t)
+ || reflection_EnumVal_documentation_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_EnumVal_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Enum_name, reflection_Enum)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Enum_values, reflection_EnumVal, reflection_Enum)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Enum_is_union, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Enum)
+__flatbuffers_build_table_field(3, flatbuffers_, reflection_Enum_underlying_type, reflection_Type, reflection_Enum)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(4, flatbuffers_, reflection_Enum_attributes, reflection_KeyValue, reflection_Enum)
+__flatbuffers_build_string_vector_field(5, flatbuffers_, reflection_Enum_documentation, reflection_Enum)
+
+static inline reflection_Enum_ref_t reflection_Enum_create(flatbuffers_builder_t *B __reflection_Enum_formal_args)
+{
+ if (reflection_Enum_start(B)
+ || reflection_Enum_name_add(B, v0)
+ || reflection_Enum_values_add(B, v1)
+ || reflection_Enum_underlying_type_add(B, v3)
+ || reflection_Enum_attributes_add(B, v4)
+ || reflection_Enum_documentation_add(B, v5)
+ || reflection_Enum_is_union_add(B, v2)) {
+ return 0;
+ }
+ return reflection_Enum_end(B);
+}
+
+static reflection_Enum_ref_t reflection_Enum_clone(flatbuffers_builder_t *B, reflection_Enum_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Enum_start(B)
+ || reflection_Enum_name_pick(B, t)
+ || reflection_Enum_values_pick(B, t)
+ || reflection_Enum_underlying_type_pick(B, t)
+ || reflection_Enum_attributes_pick(B, t)
+ || reflection_Enum_documentation_pick(B, t)
+ || reflection_Enum_is_union_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Enum_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Field_name, reflection_Field)
+__flatbuffers_build_table_field(1, flatbuffers_, reflection_Field_type, reflection_Type, reflection_Field)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Field_id, flatbuffers_uint16, uint16_t, 2, 2, UINT16_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(3, flatbuffers_, reflection_Field_offset, flatbuffers_uint16, uint16_t, 2, 2, UINT16_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(4, flatbuffers_, reflection_Field_default_integer, flatbuffers_int64, int64_t, 8, 8, INT64_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(5, flatbuffers_, reflection_Field_default_real, flatbuffers_double, double, 8, 8, 0.0000000000000000, reflection_Field)
+__flatbuffers_build_scalar_field(6, flatbuffers_, reflection_Field_deprecated, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(7, flatbuffers_, reflection_Field_required, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(8, flatbuffers_, reflection_Field_key, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(9, flatbuffers_, reflection_Field_attributes, reflection_KeyValue, reflection_Field)
+__flatbuffers_build_string_vector_field(10, flatbuffers_, reflection_Field_documentation, reflection_Field)
+__flatbuffers_build_scalar_field(11, flatbuffers_, reflection_Field_optional, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+
+static inline reflection_Field_ref_t reflection_Field_create(flatbuffers_builder_t *B __reflection_Field_formal_args)
+{
+ if (reflection_Field_start(B)
+ || reflection_Field_default_integer_add(B, v4)
+ || reflection_Field_default_real_add(B, v5)
+ || reflection_Field_name_add(B, v0)
+ || reflection_Field_type_add(B, v1)
+ || reflection_Field_attributes_add(B, v9)
+ || reflection_Field_documentation_add(B, v10)
+ || reflection_Field_id_add(B, v2)
+ || reflection_Field_offset_add(B, v3)
+ || reflection_Field_deprecated_add(B, v6)
+ || reflection_Field_required_add(B, v7)
+ || reflection_Field_key_add(B, v8)
+ || reflection_Field_optional_add(B, v11)) {
+ return 0;
+ }
+ return reflection_Field_end(B);
+}
+
+static reflection_Field_ref_t reflection_Field_clone(flatbuffers_builder_t *B, reflection_Field_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Field_start(B)
+ || reflection_Field_default_integer_pick(B, t)
+ || reflection_Field_default_real_pick(B, t)
+ || reflection_Field_name_pick(B, t)
+ || reflection_Field_type_pick(B, t)
+ || reflection_Field_attributes_pick(B, t)
+ || reflection_Field_documentation_pick(B, t)
+ || reflection_Field_id_pick(B, t)
+ || reflection_Field_offset_pick(B, t)
+ || reflection_Field_deprecated_pick(B, t)
+ || reflection_Field_required_pick(B, t)
+ || reflection_Field_key_pick(B, t)
+ || reflection_Field_optional_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Field_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Object_name, reflection_Object)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Object_fields, reflection_Field, reflection_Object)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Object_is_struct, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Object)
+__flatbuffers_build_scalar_field(3, flatbuffers_, reflection_Object_minalign, flatbuffers_int32, int32_t, 4, 4, INT32_C(0), reflection_Object)
+__flatbuffers_build_scalar_field(4, flatbuffers_, reflection_Object_bytesize, flatbuffers_int32, int32_t, 4, 4, INT32_C(0), reflection_Object)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(5, flatbuffers_, reflection_Object_attributes, reflection_KeyValue, reflection_Object)
+__flatbuffers_build_string_vector_field(6, flatbuffers_, reflection_Object_documentation, reflection_Object)
+
+static inline reflection_Object_ref_t reflection_Object_create(flatbuffers_builder_t *B __reflection_Object_formal_args)
+{
+ if (reflection_Object_start(B)
+ || reflection_Object_name_add(B, v0)
+ || reflection_Object_fields_add(B, v1)
+ || reflection_Object_minalign_add(B, v3)
+ || reflection_Object_bytesize_add(B, v4)
+ || reflection_Object_attributes_add(B, v5)
+ || reflection_Object_documentation_add(B, v6)
+ || reflection_Object_is_struct_add(B, v2)) {
+ return 0;
+ }
+ return reflection_Object_end(B);
+}
+
+static reflection_Object_ref_t reflection_Object_clone(flatbuffers_builder_t *B, reflection_Object_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Object_start(B)
+ || reflection_Object_name_pick(B, t)
+ || reflection_Object_fields_pick(B, t)
+ || reflection_Object_minalign_pick(B, t)
+ || reflection_Object_bytesize_pick(B, t)
+ || reflection_Object_attributes_pick(B, t)
+ || reflection_Object_documentation_pick(B, t)
+ || reflection_Object_is_struct_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Object_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_RPCCall_name, reflection_RPCCall)
+__flatbuffers_build_table_field(1, flatbuffers_, reflection_RPCCall_request, reflection_Object, reflection_RPCCall)
+__flatbuffers_build_table_field(2, flatbuffers_, reflection_RPCCall_response, reflection_Object, reflection_RPCCall)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(3, flatbuffers_, reflection_RPCCall_attributes, reflection_KeyValue, reflection_RPCCall)
+__flatbuffers_build_string_vector_field(4, flatbuffers_, reflection_RPCCall_documentation, reflection_RPCCall)
+
+static inline reflection_RPCCall_ref_t reflection_RPCCall_create(flatbuffers_builder_t *B __reflection_RPCCall_formal_args)
+{
+ if (reflection_RPCCall_start(B)
+ || reflection_RPCCall_name_add(B, v0)
+ || reflection_RPCCall_request_add(B, v1)
+ || reflection_RPCCall_response_add(B, v2)
+ || reflection_RPCCall_attributes_add(B, v3)
+ || reflection_RPCCall_documentation_add(B, v4)) {
+ return 0;
+ }
+ return reflection_RPCCall_end(B);
+}
+
+static reflection_RPCCall_ref_t reflection_RPCCall_clone(flatbuffers_builder_t *B, reflection_RPCCall_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_RPCCall_start(B)
+ || reflection_RPCCall_name_pick(B, t)
+ || reflection_RPCCall_request_pick(B, t)
+ || reflection_RPCCall_response_pick(B, t)
+ || reflection_RPCCall_attributes_pick(B, t)
+ || reflection_RPCCall_documentation_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_RPCCall_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Service_name, reflection_Service)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Service_calls, reflection_RPCCall, reflection_Service)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(2, flatbuffers_, reflection_Service_attributes, reflection_KeyValue, reflection_Service)
+__flatbuffers_build_string_vector_field(3, flatbuffers_, reflection_Service_documentation, reflection_Service)
+
+static inline reflection_Service_ref_t reflection_Service_create(flatbuffers_builder_t *B __reflection_Service_formal_args)
+{
+ if (reflection_Service_start(B)
+ || reflection_Service_name_add(B, v0)
+ || reflection_Service_calls_add(B, v1)
+ || reflection_Service_attributes_add(B, v2)
+ || reflection_Service_documentation_add(B, v3)) {
+ return 0;
+ }
+ return reflection_Service_end(B);
+}
+
+static reflection_Service_ref_t reflection_Service_clone(flatbuffers_builder_t *B, reflection_Service_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Service_start(B)
+ || reflection_Service_name_pick(B, t)
+ || reflection_Service_calls_pick(B, t)
+ || reflection_Service_attributes_pick(B, t)
+ || reflection_Service_documentation_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Service_end(B));
+}
+
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(0, flatbuffers_, reflection_Schema_objects, reflection_Object, reflection_Schema)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Schema_enums, reflection_Enum, reflection_Schema)
+__flatbuffers_build_string_field(2, flatbuffers_, reflection_Schema_file_ident, reflection_Schema)
+__flatbuffers_build_string_field(3, flatbuffers_, reflection_Schema_file_ext, reflection_Schema)
+__flatbuffers_build_table_field(4, flatbuffers_, reflection_Schema_root_table, reflection_Object, reflection_Schema)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(5, flatbuffers_, reflection_Schema_services, reflection_Service, reflection_Schema)
+
+static inline reflection_Schema_ref_t reflection_Schema_create(flatbuffers_builder_t *B __reflection_Schema_formal_args)
+{
+ if (reflection_Schema_start(B)
+ || reflection_Schema_objects_add(B, v0)
+ || reflection_Schema_enums_add(B, v1)
+ || reflection_Schema_file_ident_add(B, v2)
+ || reflection_Schema_file_ext_add(B, v3)
+ || reflection_Schema_root_table_add(B, v4)
+ || reflection_Schema_services_add(B, v5)) {
+ return 0;
+ }
+ return reflection_Schema_end(B);
+}
+
+static reflection_Schema_ref_t reflection_Schema_clone(flatbuffers_builder_t *B, reflection_Schema_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Schema_start(B)
+ || reflection_Schema_objects_pick(B, t)
+ || reflection_Schema_enums_pick(B, t)
+ || reflection_Schema_file_ident_pick(B, t)
+ || reflection_Schema_file_ext_pick(B, t)
+ || reflection_Schema_root_table_pick(B, t)
+ || reflection_Schema_services_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Schema_end(B));
+}
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* REFLECTION_BUILDER_H */
diff --git a/include/flatcc/reflection/reflection_reader.h b/include/flatcc/reflection/reflection_reader.h
new file mode 100644
index 0000000..bf6a0e9
--- /dev/null
+++ b/include/flatcc/reflection/reflection_reader.h
@@ -0,0 +1,411 @@
+#ifndef REFLECTION_READER_H
+#define REFLECTION_READER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef FLATBUFFERS_COMMON_READER_H
+#include "flatbuffers_common_reader.h"
+#endif
+#include "flatcc/flatcc_flatbuffers.h"
+#ifndef __alignas_is_defined
+#include <stdalign.h>
+#endif
+#include "flatcc/flatcc_prologue.h"
+#undef flatbuffers_identifier
+#define flatbuffers_identifier "BFBS"
+#undef flatbuffers_extension
+#define flatbuffers_extension "bfbs"
+
+
+typedef const struct reflection_Type_table *reflection_Type_table_t;
+typedef struct reflection_Type_table *reflection_Type_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Type_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Type_mutable_vec_t;
+typedef const struct reflection_KeyValue_table *reflection_KeyValue_table_t;
+typedef struct reflection_KeyValue_table *reflection_KeyValue_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_KeyValue_vec_t;
+typedef flatbuffers_uoffset_t *reflection_KeyValue_mutable_vec_t;
+typedef const struct reflection_EnumVal_table *reflection_EnumVal_table_t;
+typedef struct reflection_EnumVal_table *reflection_EnumVal_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_EnumVal_vec_t;
+typedef flatbuffers_uoffset_t *reflection_EnumVal_mutable_vec_t;
+typedef const struct reflection_Enum_table *reflection_Enum_table_t;
+typedef struct reflection_Enum_table *reflection_Enum_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Enum_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Enum_mutable_vec_t;
+typedef const struct reflection_Field_table *reflection_Field_table_t;
+typedef struct reflection_Field_table *reflection_Field_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Field_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Field_mutable_vec_t;
+typedef const struct reflection_Object_table *reflection_Object_table_t;
+typedef struct reflection_Object_table *reflection_Object_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Object_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Object_mutable_vec_t;
+typedef const struct reflection_RPCCall_table *reflection_RPCCall_table_t;
+typedef struct reflection_RPCCall_table *reflection_RPCCall_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_RPCCall_vec_t;
+typedef flatbuffers_uoffset_t *reflection_RPCCall_mutable_vec_t;
+typedef const struct reflection_Service_table *reflection_Service_table_t;
+typedef struct reflection_Service_table *reflection_Service_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Service_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Service_mutable_vec_t;
+typedef const struct reflection_Schema_table *reflection_Schema_table_t;
+typedef struct reflection_Schema_table *reflection_Schema_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Schema_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Schema_mutable_vec_t;
+#ifndef reflection_Type_file_identifier
+#define reflection_Type_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Type_file_identifier */
+#ifndef reflection_Type_identifier
+#define reflection_Type_identifier "BFBS"
+#endif
+#define reflection_Type_type_hash ((flatbuffers_thash_t)0x44c8fe5e)
+#define reflection_Type_type_identifier "\x5e\xfe\xc8\x44"
+#ifndef reflection_Type_file_extension
+#define reflection_Type_file_extension "bfbs"
+#endif
+#ifndef reflection_KeyValue_file_identifier
+#define reflection_KeyValue_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_KeyValue_file_identifier */
+#ifndef reflection_KeyValue_identifier
+#define reflection_KeyValue_identifier "BFBS"
+#endif
+#define reflection_KeyValue_type_hash ((flatbuffers_thash_t)0x8c761eaa)
+#define reflection_KeyValue_type_identifier "\xaa\x1e\x76\x8c"
+#ifndef reflection_KeyValue_file_extension
+#define reflection_KeyValue_file_extension "bfbs"
+#endif
+#ifndef reflection_EnumVal_file_identifier
+#define reflection_EnumVal_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_EnumVal_file_identifier */
+#ifndef reflection_EnumVal_identifier
+#define reflection_EnumVal_identifier "BFBS"
+#endif
+#define reflection_EnumVal_type_hash ((flatbuffers_thash_t)0x9531c946)
+#define reflection_EnumVal_type_identifier "\x46\xc9\x31\x95"
+#ifndef reflection_EnumVal_file_extension
+#define reflection_EnumVal_file_extension "bfbs"
+#endif
+#ifndef reflection_Enum_file_identifier
+#define reflection_Enum_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Enum_file_identifier */
+#ifndef reflection_Enum_identifier
+#define reflection_Enum_identifier "BFBS"
+#endif
+#define reflection_Enum_type_hash ((flatbuffers_thash_t)0xacffa90f)
+#define reflection_Enum_type_identifier "\x0f\xa9\xff\xac"
+#ifndef reflection_Enum_file_extension
+#define reflection_Enum_file_extension "bfbs"
+#endif
+#ifndef reflection_Field_file_identifier
+#define reflection_Field_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Field_file_identifier */
+#ifndef reflection_Field_identifier
+#define reflection_Field_identifier "BFBS"
+#endif
+#define reflection_Field_type_hash ((flatbuffers_thash_t)0x9f7e408a)
+#define reflection_Field_type_identifier "\x8a\x40\x7e\x9f"
+#ifndef reflection_Field_file_extension
+#define reflection_Field_file_extension "bfbs"
+#endif
+#ifndef reflection_Object_file_identifier
+#define reflection_Object_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Object_file_identifier */
+#ifndef reflection_Object_identifier
+#define reflection_Object_identifier "BFBS"
+#endif
+#define reflection_Object_type_hash ((flatbuffers_thash_t)0xb09729bd)
+#define reflection_Object_type_identifier "\xbd\x29\x97\xb0"
+#ifndef reflection_Object_file_extension
+#define reflection_Object_file_extension "bfbs"
+#endif
+#ifndef reflection_RPCCall_file_identifier
+#define reflection_RPCCall_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_RPCCall_file_identifier */
+#ifndef reflection_RPCCall_identifier
+#define reflection_RPCCall_identifier "BFBS"
+#endif
+#define reflection_RPCCall_type_hash ((flatbuffers_thash_t)0xe2d586f1)
+#define reflection_RPCCall_type_identifier "\xf1\x86\xd5\xe2"
+#ifndef reflection_RPCCall_file_extension
+#define reflection_RPCCall_file_extension "bfbs"
+#endif
+#ifndef reflection_Service_file_identifier
+#define reflection_Service_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Service_file_identifier */
+#ifndef reflection_Service_identifier
+#define reflection_Service_identifier "BFBS"
+#endif
+#define reflection_Service_type_hash ((flatbuffers_thash_t)0xf31a13b5)
+#define reflection_Service_type_identifier "\xb5\x13\x1a\xf3"
+#ifndef reflection_Service_file_extension
+#define reflection_Service_file_extension "bfbs"
+#endif
+#ifndef reflection_Schema_file_identifier
+#define reflection_Schema_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Schema_file_identifier */
+#ifndef reflection_Schema_identifier
+#define reflection_Schema_identifier "BFBS"
+#endif
+#define reflection_Schema_type_hash ((flatbuffers_thash_t)0xfaf93779)
+#define reflection_Schema_type_identifier "\x79\x37\xf9\xfa"
+#ifndef reflection_Schema_file_extension
+#define reflection_Schema_file_extension "bfbs"
+#endif
+
+typedef int8_t reflection_BaseType_enum_t;
+__flatbuffers_define_integer_type(reflection_BaseType, reflection_BaseType_enum_t, 8)
+#define reflection_BaseType_None ((reflection_BaseType_enum_t)INT8_C(0))
+#define reflection_BaseType_UType ((reflection_BaseType_enum_t)INT8_C(1))
+#define reflection_BaseType_Bool ((reflection_BaseType_enum_t)INT8_C(2))
+#define reflection_BaseType_Byte ((reflection_BaseType_enum_t)INT8_C(3))
+#define reflection_BaseType_UByte ((reflection_BaseType_enum_t)INT8_C(4))
+#define reflection_BaseType_Short ((reflection_BaseType_enum_t)INT8_C(5))
+#define reflection_BaseType_UShort ((reflection_BaseType_enum_t)INT8_C(6))
+#define reflection_BaseType_Int ((reflection_BaseType_enum_t)INT8_C(7))
+#define reflection_BaseType_UInt ((reflection_BaseType_enum_t)INT8_C(8))
+#define reflection_BaseType_Long ((reflection_BaseType_enum_t)INT8_C(9))
+#define reflection_BaseType_ULong ((reflection_BaseType_enum_t)INT8_C(10))
+#define reflection_BaseType_Float ((reflection_BaseType_enum_t)INT8_C(11))
+#define reflection_BaseType_Double ((reflection_BaseType_enum_t)INT8_C(12))
+#define reflection_BaseType_String ((reflection_BaseType_enum_t)INT8_C(13))
+#define reflection_BaseType_Vector ((reflection_BaseType_enum_t)INT8_C(14))
+#define reflection_BaseType_Obj ((reflection_BaseType_enum_t)INT8_C(15))
+#define reflection_BaseType_Union ((reflection_BaseType_enum_t)INT8_C(16))
+#define reflection_BaseType_Array ((reflection_BaseType_enum_t)INT8_C(17))
+#define reflection_BaseType_MaxBaseType ((reflection_BaseType_enum_t)INT8_C(18))
+
+static inline const char *reflection_BaseType_name(reflection_BaseType_enum_t value)
+{
+ switch (value) {
+ case reflection_BaseType_None: return "None";
+ case reflection_BaseType_UType: return "UType";
+ case reflection_BaseType_Bool: return "Bool";
+ case reflection_BaseType_Byte: return "Byte";
+ case reflection_BaseType_UByte: return "UByte";
+ case reflection_BaseType_Short: return "Short";
+ case reflection_BaseType_UShort: return "UShort";
+ case reflection_BaseType_Int: return "Int";
+ case reflection_BaseType_UInt: return "UInt";
+ case reflection_BaseType_Long: return "Long";
+ case reflection_BaseType_ULong: return "ULong";
+ case reflection_BaseType_Float: return "Float";
+ case reflection_BaseType_Double: return "Double";
+ case reflection_BaseType_String: return "String";
+ case reflection_BaseType_Vector: return "Vector";
+ case reflection_BaseType_Obj: return "Obj";
+ case reflection_BaseType_Union: return "Union";
+ case reflection_BaseType_Array: return "Array";
+ case reflection_BaseType_MaxBaseType: return "MaxBaseType";
+ default: return "";
+ }
+}
+
+static inline int reflection_BaseType_is_known_value(reflection_BaseType_enum_t value)
+{
+ switch (value) {
+ case reflection_BaseType_None: return 1;
+ case reflection_BaseType_UType: return 1;
+ case reflection_BaseType_Bool: return 1;
+ case reflection_BaseType_Byte: return 1;
+ case reflection_BaseType_UByte: return 1;
+ case reflection_BaseType_Short: return 1;
+ case reflection_BaseType_UShort: return 1;
+ case reflection_BaseType_Int: return 1;
+ case reflection_BaseType_UInt: return 1;
+ case reflection_BaseType_Long: return 1;
+ case reflection_BaseType_ULong: return 1;
+ case reflection_BaseType_Float: return 1;
+ case reflection_BaseType_Double: return 1;
+ case reflection_BaseType_String: return 1;
+ case reflection_BaseType_Vector: return 1;
+ case reflection_BaseType_Obj: return 1;
+ case reflection_BaseType_Union: return 1;
+ case reflection_BaseType_Array: return 1;
+ case reflection_BaseType_MaxBaseType: return 1;
+ default: return 0;
+ }
+}
+
+
+
+struct reflection_Type_table { uint8_t unused__; };
+
+static inline size_t reflection_Type_vec_len(reflection_Type_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Type_table_t reflection_Type_vec_at(reflection_Type_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Type_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Type)
+
+__flatbuffers_define_scalar_field(0, reflection_Type, base_type, reflection_BaseType, reflection_BaseType_enum_t, INT8_C(0))
+__flatbuffers_define_scalar_field(1, reflection_Type, element, reflection_BaseType, reflection_BaseType_enum_t, INT8_C(0))
+__flatbuffers_define_scalar_field(2, reflection_Type, index, flatbuffers_int32, int32_t, INT32_C(-1))
+__flatbuffers_define_scalar_field(3, reflection_Type, fixed_length, flatbuffers_uint16, uint16_t, UINT16_C(0))
+
+struct reflection_KeyValue_table { uint8_t unused__; };
+
+static inline size_t reflection_KeyValue_vec_len(reflection_KeyValue_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_KeyValue_table_t reflection_KeyValue_vec_at(reflection_KeyValue_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_KeyValue_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_KeyValue)
+
+__flatbuffers_define_string_field(0, reflection_KeyValue, key, 1)
+__flatbuffers_define_find_by_string_field(reflection_KeyValue, key)
+__flatbuffers_define_table_sort_by_string_field(reflection_KeyValue, key)
+__flatbuffers_define_default_find_by_string_field(reflection_KeyValue, key)
+__flatbuffers_define_default_scan_by_string_field(reflection_KeyValue, key)
+#define reflection_KeyValue_vec_sort reflection_KeyValue_vec_sort_by_key
+__flatbuffers_define_string_field(1, reflection_KeyValue, value, 0)
+
+struct reflection_EnumVal_table { uint8_t unused__; };
+
+static inline size_t reflection_EnumVal_vec_len(reflection_EnumVal_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_EnumVal_table_t reflection_EnumVal_vec_at(reflection_EnumVal_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_EnumVal_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_EnumVal)
+
+__flatbuffers_define_string_field(0, reflection_EnumVal, name, 1)
+__flatbuffers_define_scalar_field(1, reflection_EnumVal, value, flatbuffers_int64, int64_t, INT64_C(0))
+/* Note: find only works on vectors sorted by this field. */
+__flatbuffers_define_find_by_scalar_field(reflection_EnumVal, value, int64_t)
+__flatbuffers_define_table_sort_by_scalar_field(reflection_EnumVal, value, int64_t)
+__flatbuffers_define_default_find_by_scalar_field(reflection_EnumVal, value, int64_t)
+__flatbuffers_define_default_scan_by_scalar_field(reflection_EnumVal, value, int64_t)
+#define reflection_EnumVal_vec_sort reflection_EnumVal_vec_sort_by_value
+__flatbuffers_define_table_field(2, reflection_EnumVal, object, reflection_Object_table_t, 0)
+__flatbuffers_define_table_field(3, reflection_EnumVal, union_type, reflection_Type_table_t, 0)
+__flatbuffers_define_vector_field(4, reflection_EnumVal, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Enum_table { uint8_t unused__; };
+
+static inline size_t reflection_Enum_vec_len(reflection_Enum_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Enum_table_t reflection_Enum_vec_at(reflection_Enum_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Enum_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Enum)
+
+__flatbuffers_define_string_field(0, reflection_Enum, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Enum, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Enum, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Enum, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Enum, name)
+#define reflection_Enum_vec_sort reflection_Enum_vec_sort_by_name
+__flatbuffers_define_vector_field(1, reflection_Enum, values, reflection_EnumVal_vec_t, 1)
+__flatbuffers_define_scalar_field(2, reflection_Enum, is_union, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_table_field(3, reflection_Enum, underlying_type, reflection_Type_table_t, 1)
+__flatbuffers_define_vector_field(4, reflection_Enum, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(5, reflection_Enum, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Field_table { uint8_t unused__; };
+
+static inline size_t reflection_Field_vec_len(reflection_Field_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Field_table_t reflection_Field_vec_at(reflection_Field_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Field_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Field)
+
+__flatbuffers_define_string_field(0, reflection_Field, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Field, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Field, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Field, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Field, name)
+#define reflection_Field_vec_sort reflection_Field_vec_sort_by_name
+__flatbuffers_define_table_field(1, reflection_Field, type, reflection_Type_table_t, 1)
+__flatbuffers_define_scalar_field(2, reflection_Field, id, flatbuffers_uint16, uint16_t, UINT16_C(0))
+__flatbuffers_define_scalar_field(3, reflection_Field, offset, flatbuffers_uint16, uint16_t, UINT16_C(0))
+__flatbuffers_define_scalar_field(4, reflection_Field, default_integer, flatbuffers_int64, int64_t, INT64_C(0))
+__flatbuffers_define_scalar_field(5, reflection_Field, default_real, flatbuffers_double, double, 0.0000000000000000)
+__flatbuffers_define_scalar_field(6, reflection_Field, deprecated, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_scalar_field(7, reflection_Field, required, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_scalar_field(8, reflection_Field, key, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_vector_field(9, reflection_Field, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(10, reflection_Field, documentation, flatbuffers_string_vec_t, 0)
+__flatbuffers_define_scalar_field(11, reflection_Field, optional, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+
+struct reflection_Object_table { uint8_t unused__; };
+
+static inline size_t reflection_Object_vec_len(reflection_Object_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Object_table_t reflection_Object_vec_at(reflection_Object_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Object_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Object)
+
+__flatbuffers_define_string_field(0, reflection_Object, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Object, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Object, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Object, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Object, name)
+#define reflection_Object_vec_sort reflection_Object_vec_sort_by_name
+__flatbuffers_define_vector_field(1, reflection_Object, fields, reflection_Field_vec_t, 1)
+__flatbuffers_define_scalar_field(2, reflection_Object, is_struct, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_scalar_field(3, reflection_Object, minalign, flatbuffers_int32, int32_t, INT32_C(0))
+__flatbuffers_define_scalar_field(4, reflection_Object, bytesize, flatbuffers_int32, int32_t, INT32_C(0))
+__flatbuffers_define_vector_field(5, reflection_Object, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(6, reflection_Object, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_RPCCall_table { uint8_t unused__; };
+
+static inline size_t reflection_RPCCall_vec_len(reflection_RPCCall_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_RPCCall_table_t reflection_RPCCall_vec_at(reflection_RPCCall_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_RPCCall_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_RPCCall)
+
+__flatbuffers_define_string_field(0, reflection_RPCCall, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_RPCCall, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_RPCCall, name)
+__flatbuffers_define_default_find_by_string_field(reflection_RPCCall, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_RPCCall, name)
+#define reflection_RPCCall_vec_sort reflection_RPCCall_vec_sort_by_name
+__flatbuffers_define_table_field(1, reflection_RPCCall, request, reflection_Object_table_t, 1)
+__flatbuffers_define_table_field(2, reflection_RPCCall, response, reflection_Object_table_t, 1)
+__flatbuffers_define_vector_field(3, reflection_RPCCall, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(4, reflection_RPCCall, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Service_table { uint8_t unused__; };
+
+static inline size_t reflection_Service_vec_len(reflection_Service_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Service_table_t reflection_Service_vec_at(reflection_Service_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Service_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Service)
+
+__flatbuffers_define_string_field(0, reflection_Service, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Service, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Service, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Service, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Service, name)
+#define reflection_Service_vec_sort reflection_Service_vec_sort_by_name
+__flatbuffers_define_vector_field(1, reflection_Service, calls, reflection_RPCCall_vec_t, 0)
+__flatbuffers_define_vector_field(2, reflection_Service, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(3, reflection_Service, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Schema_table { uint8_t unused__; };
+
+static inline size_t reflection_Schema_vec_len(reflection_Schema_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Schema_table_t reflection_Schema_vec_at(reflection_Schema_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Schema_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Schema)
+
+__flatbuffers_define_vector_field(0, reflection_Schema, objects, reflection_Object_vec_t, 1)
+__flatbuffers_define_vector_field(1, reflection_Schema, enums, reflection_Enum_vec_t, 1)
+__flatbuffers_define_string_field(2, reflection_Schema, file_ident, 0)
+__flatbuffers_define_string_field(3, reflection_Schema, file_ext, 0)
+__flatbuffers_define_table_field(4, reflection_Schema, root_table, reflection_Object_table_t, 0)
+__flatbuffers_define_vector_field(5, reflection_Schema, services, reflection_Service_vec_t, 0)
+
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* REFLECTION_READER_H */
diff --git a/include/flatcc/reflection/reflection_verifier.h b/include/flatcc/reflection/reflection_verifier.h
new file mode 100644
index 0000000..5b5bd37
--- /dev/null
+++ b/include/flatcc/reflection/reflection_verifier.h
@@ -0,0 +1,308 @@
+#ifndef REFLECTION_VERIFIER_H
+#define REFLECTION_VERIFIER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef REFLECTION_READER_H
+#include "reflection_reader.h"
+#endif
+#include "flatcc/flatcc_verifier.h"
+#include "flatcc/flatcc_prologue.h"
+
+static int reflection_Type_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_KeyValue_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_EnumVal_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Enum_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Field_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Object_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_RPCCall_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Service_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Schema_verify_table(flatcc_table_verifier_descriptor_t *td);
+
+static int reflection_Type_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_field(td, 0, 1, 1) /* base_type */)) return ret;
+ if ((ret = flatcc_verify_field(td, 1, 1, 1) /* element */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 4, 4) /* index */)) return ret;
+ if ((ret = flatcc_verify_field(td, 3, 2, 2) /* fixed_length */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Type_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Type_identifier, &reflection_Type_verify_table);
+}
+
+static inline int reflection_Type_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Type_type_identifier, &reflection_Type_verify_table);
+}
+
+static inline int reflection_Type_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Type_verify_table);
+}
+
+static inline int reflection_Type_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Type_verify_table);
+}
+
+static int reflection_KeyValue_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* key */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 1, 0) /* value */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_KeyValue_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_KeyValue_identifier, &reflection_KeyValue_verify_table);
+}
+
+static inline int reflection_KeyValue_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_KeyValue_type_identifier, &reflection_KeyValue_verify_table);
+}
+
+static inline int reflection_KeyValue_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_KeyValue_verify_table);
+}
+
+static inline int reflection_KeyValue_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_KeyValue_verify_table);
+}
+
+static int reflection_EnumVal_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_field(td, 1, 8, 8) /* value */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 2, 0, &reflection_Object_verify_table) /* object */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 3, 0, &reflection_Type_verify_table) /* union_type */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 4, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_EnumVal_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_EnumVal_identifier, &reflection_EnumVal_verify_table);
+}
+
+static inline int reflection_EnumVal_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_EnumVal_type_identifier, &reflection_EnumVal_verify_table);
+}
+
+static inline int reflection_EnumVal_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_EnumVal_verify_table);
+}
+
+static inline int reflection_EnumVal_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_EnumVal_verify_table);
+}
+
+static int reflection_Enum_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 1, &reflection_EnumVal_verify_table) /* values */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 1, 1) /* is_union */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 3, 1, &reflection_Type_verify_table) /* underlying_type */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 4, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 5, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Enum_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Enum_identifier, &reflection_Enum_verify_table);
+}
+
+static inline int reflection_Enum_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Enum_type_identifier, &reflection_Enum_verify_table);
+}
+
+static inline int reflection_Enum_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Enum_verify_table);
+}
+
+static inline int reflection_Enum_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Enum_verify_table);
+}
+
+static int reflection_Field_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 1, 1, &reflection_Type_verify_table) /* type */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 2, 2) /* id */)) return ret;
+ if ((ret = flatcc_verify_field(td, 3, 2, 2) /* offset */)) return ret;
+ if ((ret = flatcc_verify_field(td, 4, 8, 8) /* default_integer */)) return ret;
+ if ((ret = flatcc_verify_field(td, 5, 8, 8) /* default_real */)) return ret;
+ if ((ret = flatcc_verify_field(td, 6, 1, 1) /* deprecated */)) return ret;
+ if ((ret = flatcc_verify_field(td, 7, 1, 1) /* required */)) return ret;
+ if ((ret = flatcc_verify_field(td, 8, 1, 1) /* key */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 9, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 10, 0) /* documentation */)) return ret;
+ if ((ret = flatcc_verify_field(td, 11, 1, 1) /* optional */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Field_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Field_identifier, &reflection_Field_verify_table);
+}
+
+static inline int reflection_Field_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Field_type_identifier, &reflection_Field_verify_table);
+}
+
+static inline int reflection_Field_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Field_verify_table);
+}
+
+static inline int reflection_Field_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Field_verify_table);
+}
+
+static int reflection_Object_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 1, &reflection_Field_verify_table) /* fields */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 1, 1) /* is_struct */)) return ret;
+ if ((ret = flatcc_verify_field(td, 3, 4, 4) /* minalign */)) return ret;
+ if ((ret = flatcc_verify_field(td, 4, 4, 4) /* bytesize */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 5, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 6, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Object_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Object_identifier, &reflection_Object_verify_table);
+}
+
+static inline int reflection_Object_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Object_type_identifier, &reflection_Object_verify_table);
+}
+
+static inline int reflection_Object_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Object_verify_table);
+}
+
+static inline int reflection_Object_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Object_verify_table);
+}
+
+static int reflection_RPCCall_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 1, 1, &reflection_Object_verify_table) /* request */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 2, 1, &reflection_Object_verify_table) /* response */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 3, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 4, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_RPCCall_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_RPCCall_identifier, &reflection_RPCCall_verify_table);
+}
+
+static inline int reflection_RPCCall_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_RPCCall_type_identifier, &reflection_RPCCall_verify_table);
+}
+
+static inline int reflection_RPCCall_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_RPCCall_verify_table);
+}
+
+static inline int reflection_RPCCall_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_RPCCall_verify_table);
+}
+
+static int reflection_Service_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 0, &reflection_RPCCall_verify_table) /* calls */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 2, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 3, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Service_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Service_identifier, &reflection_Service_verify_table);
+}
+
+static inline int reflection_Service_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Service_type_identifier, &reflection_Service_verify_table);
+}
+
+static inline int reflection_Service_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Service_verify_table);
+}
+
+static inline int reflection_Service_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Service_verify_table);
+}
+
+static int reflection_Schema_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 0, 1, &reflection_Object_verify_table) /* objects */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 1, &reflection_Enum_verify_table) /* enums */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 2, 0) /* file_ident */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 3, 0) /* file_ext */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 4, 0, &reflection_Object_verify_table) /* root_table */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 5, 0, &reflection_Service_verify_table) /* services */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Schema_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Schema_identifier, &reflection_Schema_verify_table);
+}
+
+static inline int reflection_Schema_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Schema_type_identifier, &reflection_Schema_verify_table);
+}
+
+static inline int reflection_Schema_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Schema_verify_table);
+}
+
+static inline int reflection_Schema_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Schema_verify_table);
+}
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* REFLECTION_VERIFIER_H */
diff --git a/include/flatcc/support/README b/include/flatcc/support/README
new file mode 100644
index 0000000..d9f6ec0
--- /dev/null
+++ b/include/flatcc/support/README
@@ -0,0 +1 @@
+support files mainly used for testing
diff --git a/include/flatcc/support/cdump.h b/include/flatcc/support/cdump.h
new file mode 100644
index 0000000..b589362
--- /dev/null
+++ b/include/flatcc/support/cdump.h
@@ -0,0 +1,38 @@
+#ifndef CDUMP_H
+#define CDUMP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+/* Generates a constant a C byte array. */
+static void cdump(const char *name, void *addr, size_t len, FILE *fp) {
+ unsigned int i;
+ unsigned char *pc = (unsigned char*)addr;
+
+ // Output description if given.
+ name = name ? name : "dump";
+ fprintf(fp, "const unsigned char %s[] = {", name);
+
+ // Process every byte in the data.
+ for (i = 0; i < (unsigned int)len; i++) {
+ // Multiple of 16 means new line (with line offset).
+
+ if ((i % 16) == 0) {
+ fprintf(fp, "\n ");
+ } else if ((i % 8) == 0) {
+ fprintf(fp, " ");
+ }
+
+ fprintf(fp, " 0x%02x,", pc[i]);
+ }
+ fprintf(fp, "\n};\n");
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* CDUMP_H */
diff --git a/include/flatcc/support/elapsed.h b/include/flatcc/support/elapsed.h
new file mode 100644
index 0000000..ba3bd73
--- /dev/null
+++ b/include/flatcc/support/elapsed.h
@@ -0,0 +1,73 @@
+#ifndef ELAPSED_H
+#define ELAPSED_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+/* Based on http://stackoverflow.com/a/8583395 */
+#if !defined(_WIN32)
+#include <sys/time.h>
+static double elapsed_realtime(void) { // returns 0 seconds first time called
+ static struct timeval t0;
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ if (!t0.tv_sec)
+ t0 = tv;
+ return (double)(tv.tv_sec - t0.tv_sec) + (double)(tv.tv_usec - t0.tv_usec) / 1e6;
+}
+#else
+#include <windows.h>
+#ifndef FatalError
+#define FatalError(s) do { perror(s); exit(-1); } while(0)
+#endif
+static double elapsed_realtime(void) { // granularity about 50 microsecs on my machine
+ static LARGE_INTEGER freq, start;
+ LARGE_INTEGER count;
+ if (!QueryPerformanceCounter(&count))
+ FatalError("QueryPerformanceCounter");
+ if (!freq.QuadPart) { // one time initialization
+ if (!QueryPerformanceFrequency(&freq))
+ FatalError("QueryPerformanceFrequency");
+ start = count;
+ }
+ return (double)(count.QuadPart - start.QuadPart) / freq.QuadPart;
+}
+#endif
+
+/* end Based on stackoverflow */
+
+static int show_benchmark(const char *descr, double t1, double t2, size_t size, int rep, const char *reptext)
+{
+ double tdiff = t2 - t1;
+ double nstime;
+
+ printf("operation: %s\n", descr);
+ printf("elapsed time: %.3f (s)\n", tdiff);
+ printf("iterations: %d\n", rep);
+ printf("size: %lu (bytes)\n", (unsigned long)size);
+ printf("bandwidth: %.3f (MB/s)\n", (double)rep * (double)size / 1e6 / tdiff);
+ printf("throughput in ops per sec: %.3f\n", rep / tdiff);
+ if (reptext && rep != 1) {
+ printf("throughput in %s ops per sec: %.3f\n", reptext, 1 / tdiff);
+ }
+ nstime = tdiff * 1e9 / rep;
+ if (nstime < 1000) {
+ printf("time per op: %.3f (ns)\n", nstime);
+ } else if (nstime < 1e6) {
+ printf("time per op: %.3f (us)\n", nstime / 1000);
+ } else if (nstime < 1e9) {
+ printf("time per op: %.3f (ms)\n", nstime / 1e6);
+ } else {
+ printf("time per op: %.3f (s)\n", nstime / 1e9);
+ }
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ELAPSED_H */
diff --git a/include/flatcc/support/hexdump.h b/include/flatcc/support/hexdump.h
new file mode 100644
index 0000000..7b6f9b8
--- /dev/null
+++ b/include/flatcc/support/hexdump.h
@@ -0,0 +1,47 @@
+#ifndef HEXDUMP_H
+#define HEXDUMP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+/* Based on: http://stackoverflow.com/a/7776146 */
+static void hexdump(const char *desc, const void *addr, size_t len, FILE *fp) {
+ unsigned int i;
+ unsigned char buf[17];
+ const unsigned char *pc = (const unsigned char*)addr;
+
+ /* Output description if given. */
+ if (desc != NULL) fprintf(fp, "%s:\n", desc);
+
+ for (i = 0; i < (unsigned int)len; i++) {
+
+ if ((i % 16) == 0) {
+ if (i != 0) fprintf(fp, " |%s|\n", buf);
+ fprintf(fp, "%08x ", i);
+ } else if ((i % 8) == 0) {
+ fprintf(fp, " ");
+ }
+ fprintf(fp, " %02x", pc[i]);
+ if ((pc[i] < 0x20) || (pc[i] > 0x7e)) {
+ buf[i % 16] = '.';
+ } else {
+ buf[i % 16] = pc[i];
+ }
+ buf[(i % 16) + 1] = '\0';
+ }
+ if (i % 16 <= 8 && i % 16 != 0) fprintf(fp, " ");
+ while ((i % 16) != 0) {
+ fprintf(fp, " ");
+ i++;
+ }
+ fprintf(fp, " |%s|\n", buf);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* HEXDUMP_H */
diff --git a/include/flatcc/support/readfile.h b/include/flatcc/support/readfile.h
new file mode 100644
index 0000000..209875f
--- /dev/null
+++ b/include/flatcc/support/readfile.h
@@ -0,0 +1,66 @@
+#ifndef READFILE_H
+#define READFILE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+
+static char *readfile(const char *filename, size_t max_size, size_t *size_out)
+{
+ FILE *fp;
+ long k;
+ size_t size, pos, n, _out;
+ char *buf;
+
+ size_out = size_out ? size_out : &_out;
+
+ fp = fopen(filename, "rb");
+ size = 0;
+ buf = 0;
+
+ if (!fp) {
+ goto fail;
+ }
+ fseek(fp, 0L, SEEK_END);
+ k = ftell(fp);
+ if (k < 0) goto fail;
+ size = (size_t)k;
+ *size_out = size;
+ if (max_size > 0 && size > max_size) {
+ goto fail;
+ }
+ rewind(fp);
+ buf = (char *)malloc(size ? size : 1);
+ if (!buf) {
+ goto fail;
+ }
+ pos = 0;
+ while ((n = fread(buf + pos, 1, size - pos, fp))) {
+ pos += n;
+ }
+ if (pos != size) {
+ goto fail;
+ }
+ fclose(fp);
+ *size_out = size;
+ return buf;
+
+fail:
+ if (fp) {
+ fclose(fp);
+ }
+ if (buf) {
+ free(buf);
+ }
+ *size_out = size;
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* READFILE_H */
diff --git a/reflection/README.in b/reflection/README.in
new file mode 100644
index 0000000..3c7207a
--- /dev/null
+++ b/reflection/README.in
@@ -0,0 +1,19 @@
+Generated by flatcc
+
+Keep checked in - needed by flatcc to generate binary schema.
+
+NOTE TO CONTRIBUTORS: DO NOT EDIT THESE FILES BY HAND
+
+If you need to change anything here, it is done in the code generator,
+possibly followed by running `reflection/generate_code.sh` from the
+project root. But please only do this for testing do not include the
+generated files in a pull request unless agreed otherwise, and if so,
+do it in a separate commit.
+
+Normally new reflection code is generated during a release which also
+updates the version number in comments and there is no reason to update
+reflection on every commit unless it breaks something fundamentally.
+
+There is a build option `FLATCC_REFLECTION` to disable reflection which
+is helpful while making changes that affect the content of these files
+in a way that would prevent the flatcc compiler from building.
diff --git a/reflection/generate_code.sh b/reflection/generate_code.sh
new file mode 100755
index 0000000..ce88aaf
--- /dev/null
+++ b/reflection/generate_code.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+# Regnerate reflection API
+#
+# The output should be checked in with the project since it is
+# a bootstrapping process.
+
+cd `dirname $0`
+../scripts/build.sh
+RPATH=../include/flatcc/reflection
+mkdir -p ${RPATH}
+../bin/flatcc -a -o ../include/flatcc/reflection reflection.fbs
+cp README.in ${RPATH}/README
diff --git a/reflection/reflection.fbs b/reflection/reflection.fbs
new file mode 100644
index 0000000..d9e2dc4
--- /dev/null
+++ b/reflection/reflection.fbs
@@ -0,0 +1,117 @@
+// This schema defines objects that represent a parsed schema, like
+// the binary version of a .fbs file.
+// This could be used to operate on unknown FlatBuffers at runtime.
+// It can even ... represent itself (!)
+
+namespace reflection;
+
+// These must correspond to the enum in idl.h.
+enum BaseType : byte {
+ None,
+ UType,
+ Bool,
+ Byte,
+ UByte,
+ Short,
+ UShort,
+ Int,
+ UInt,
+ Long,
+ ULong,
+ Float,
+ Double,
+ String,
+ Vector,
+ Obj, // Used for tables & structs.
+ Union,
+ Array,
+
+ // Add any new type above this value.
+ MaxBaseType
+}
+
+table Type {
+ base_type:BaseType;
+ element:BaseType = None; // Only if base_type == Vector
+ // or base_type == Array.
+ index:int = -1; // If base_type == Object, index into "objects" below.
+ // If base_type == Union, UnionType, or integral derived
+ // from an enum, index into "enums" below.
+ fixed_length:uint16 = 0; // Only if base_type == Array.
+}
+
+table KeyValue {
+ key:string (required, key);
+ value:string;
+}
+
+table EnumVal {
+ name:string (required);
+ value:long (key);
+ object:Object; // Will be deprecated in favor of union_type in the future.
+ union_type:Type;
+ documentation:[string];
+}
+
+table Enum {
+ name:string (required, key);
+ values:[EnumVal] (required); // In order of their values.
+ is_union:bool = false;
+ underlying_type:Type (required);
+ attributes:[KeyValue];
+ documentation:[string];
+}
+
+table Field {
+ name:string (required, key);
+ type:Type (required);
+ id:ushort;
+ offset:ushort; // Offset into the vtable for tables, or into the struct.
+ default_integer:long = 0;
+ default_real:double = 0.0;
+ deprecated:bool = false;
+ required:bool = false;
+ key:bool = false;
+ attributes:[KeyValue];
+ documentation:[string];
+ optional:bool = false;
+}
+
+table Object { // Used for both tables and structs.
+ name:string (required, key);
+ fields:[Field] (required); // Sorted.
+ is_struct:bool = false;
+ minalign:int;
+ bytesize:int; // For structs.
+ attributes:[KeyValue];
+ documentation:[string];
+}
+
+table RPCCall {
+ name:string (required, key);
+ request:Object (required); // must be a table (not a struct)
+ response:Object (required); // must be a table (not a struct)
+ attributes:[KeyValue];
+ documentation:[string];
+}
+
+table Service {
+ name:string (required, key);
+ calls:[RPCCall];
+ attributes:[KeyValue];
+ documentation:[string];
+}
+
+table Schema {
+ objects:[Object] (required); // Sorted.
+ enums:[Enum] (required); // Sorted.
+ file_ident:string;
+ file_ext:string;
+ root_table:Object;
+ services:[Service]; // Sorted.
+}
+
+root_type Schema;
+
+file_identifier "BFBS";
+file_extension "bfbs";
diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt
new file mode 100644
index 0000000..c05a450
--- /dev/null
+++ b/samples/CMakeLists.txt
@@ -0,0 +1,8 @@
+if (FLATCC_NEED_C89_VAR_DECLS)
+ MESSAGE( STATUS "Disabling monster sample: needed C99 style variable declarations not supported by target compiler")
+else()
+add_subdirectory(monster)
+endif()
+if (FLATCC_REFLECTION)
+ add_subdirectory(reflection)
+endif()
diff --git a/samples/bugreport/.gitignore b/samples/bugreport/.gitignore
new file mode 100644
index 0000000..378eac2
--- /dev/null
+++ b/samples/bugreport/.gitignore
@@ -0,0 +1 @@
+build
diff --git a/samples/bugreport/build.sh b/samples/bugreport/build.sh
new file mode 100755
index 0000000..2de7528
--- /dev/null
+++ b/samples/bugreport/build.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+cd $(dirname $0)
+
+FLATBUFFERS_DIR=../..
+NAME=myissue
+SCHEMA=eclectic.fbs
+OUT=build
+
+FLATCC_EXE=$FLATBUFFERS_DIR/bin/flatcc
+FLATCC_INCLUDE=$FLATBUFFERS_DIR/include
+FLATCC_LIB=$FLATBUFFERS_DIR/lib
+
+mkdir -p $OUT
+$FLATCC_EXE --outfile $OUT/${NAME}_generated.h -a $SCHEMA || exit 1
+cc -I$FLATCC_INCLUDE -g -o $OUT/$NAME $NAME.c -L$FLATCC_LIB -lflatccrt_d || exit 1
+echo "running $OUT/$NAME"
+if $OUT/$NAME; then
+ echo "success"
+else
+ echo "failed"
+ exit 1
+fi
diff --git a/samples/bugreport/eclectic.fbs b/samples/bugreport/eclectic.fbs
new file mode 100644
index 0000000..ad507e7
--- /dev/null
+++ b/samples/bugreport/eclectic.fbs
@@ -0,0 +1,11 @@
+namespace Eclectic;
+
+enum Fruit : byte { Banana = -1, Orange = 42 }
+table FooBar {
+ meal : Fruit = Banana;
+ density : long (deprecated);
+ say : string;
+ height : short;
+}
+file_identifier "NOOB";
+root_type FooBar;
diff --git a/samples/bugreport/myissue.c b/samples/bugreport/myissue.c
new file mode 100644
index 0000000..0098235
--- /dev/null
+++ b/samples/bugreport/myissue.c
@@ -0,0 +1,35 @@
+/* Minimal test with all headers generated into a single file. */
+#include "build/myissue_generated.h"
+#include "flatcc/support/hexdump.h"
+
+int main(int argc, char *argv[])
+{
+ int ret;
+ void *buf;
+ size_t size;
+ flatcc_builder_t builder, *B;
+
+ (void)argc;
+ (void)argv;
+
+ B = &builder;
+ flatcc_builder_init(B);
+
+ Eclectic_FooBar_start_as_root(B);
+ Eclectic_FooBar_say_create_str(B, "hello");
+ Eclectic_FooBar_meal_add(B, Eclectic_Fruit_Orange);
+ Eclectic_FooBar_height_add(B, -8000);
+ Eclectic_FooBar_end_as_root(B);
+ buf = flatcc_builder_get_direct_buffer(B, &size);
+#if defined(PROVOKE_ERROR) || 0
+ /* Provoke error for testing. */
+ ((char*)buf)[0] = 42;
+#endif
+ ret = Eclectic_FooBar_verify_as_root(buf, size);
+ if (ret) {
+ hexdump("Eclectic.FooBar buffer for myissue", buf, size, stdout);
+ printf("could not verify Electic.FooBar table, got %s\n", flatcc_verify_error_string(ret));
+ }
+ flatcc_builder_clear(B);
+ return ret;
+}
diff --git a/samples/monster/CMakeLists.txt b/samples/monster/CMakeLists.txt
new file mode 100644
index 0000000..1c03455
--- /dev/null
+++ b/samples/monster/CMakeLists.txt
@@ -0,0 +1,22 @@
+include(CTest)
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_monster_fbs ALL)
+add_custom_command (
+ TARGET gen_monster_fbs
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND flatcc_cli -a -o "${GEN_DIR}" "${FBS_DIR}/monster.fbs"
+ DEPENDS flatcc_cli "${FBS_DIR}/monster.fbs"
+)
+add_executable(monster monster.c)
+add_dependencies(monster gen_monster_fbs)
+target_link_libraries(monster flatccrt)
+
+if (FLATCC_TEST)
+ add_test(monster monster${CMAKE_EXECUTABLE_SUFFIX})
+endif()
diff --git a/samples/monster/build.sh b/samples/monster/build.sh
new file mode 100755
index 0000000..e090aed
--- /dev/null
+++ b/samples/monster/build.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../..
+ROOT=`pwd`
+NAME=monster
+TMP=${ROOT}/build/tmp/samples/${NAME}
+EX=${ROOT}/samples/${NAME}
+
+CC=${CC:-cc}
+CFLAGS_DEBUG="-g -I ${ROOT}/include"
+CFLAGS_RELEASE="-O3 -DNDEBUG -I ${ROOT}/include"
+${ROOT}/scripts/build.sh
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+bin/flatcc -a -o ${TMP} ${EX}/${NAME}.fbs
+
+cp ${EX}/*.c ${TMP}
+cd ${TMP}
+
+echo "building $NAME example (debug)"
+$CC $CFLAGS_DEBUG ${NAME}.c ${ROOT}/lib/libflatccrt_d.a -o ${NAME}_d
+echo "building $NAME example (release)"
+$CC $CFLAGS_RELEASE ${NAME}.c ${ROOT}/lib/libflatccrt.a -o ${NAME}
+
+echo "running $NAME example (debug)"
+./${NAME}_d
diff --git a/samples/monster/monster.c b/samples/monster/monster.c
new file mode 100644
index 0000000..32a39b0
--- /dev/null
+++ b/samples/monster/monster.c
@@ -0,0 +1,353 @@
+// Example on how to build a Monster FlatBuffer.
+
+
+// Note: while some older C89 compilers are supported when
+// -DFLATCC_PORTABLE is defined, this particular sample is known not to
+// not work with MSVC 2010 (MSVC 2013 is OK) due to inline variable
+// declarations. These are easily move to the start of code blocks, but
+// since we follow the step-wise tutorial, it isn't really practical
+// in this case. The comment style is technically also in violation of C89.
+
+
+#include "monster_builder.h" // Generated by `flatcc`.
+// <string.h> and <assert.h> already included.
+
+// Convenient namespace macro to manage long namespace prefix.
+// The ns macro makes it possible to write `ns(Monster_create(...))`
+// instead of `MyGame_Sample_Monster_create(...)`
+#undef ns
+#define ns(x) FLATBUFFERS_WRAP_NAMESPACE(MyGame_Sample, x) // Specified in the schema.
+
+// A helper to simplify creating vectors from C-arrays.
+#define c_vec_len(V) (sizeof(V)/sizeof((V)[0]))
+
+// This allows us to verify result in optimized builds.
+#define test_assert(x) do { if (!(x)) { assert(0); return -1; }} while(0)
+
+// Bottom-up approach where we create child objects and store these
+// in temporary references before a parent object is created with
+// these references.
+int create_monster_bottom_up(flatcc_builder_t *B, int flexible)
+{
+ flatbuffers_string_ref_t weapon_one_name = flatbuffers_string_create_str(B, "Sword");
+ int16_t weapon_one_damage = 3;
+
+ flatbuffers_string_ref_t weapon_two_name = flatbuffers_string_create_str(B, "Axe");
+ int16_t weapon_two_damage = 5;
+
+ // Use the `MyGame_Sample_Weapon_create` shortcut to create Weapons
+ // with all the fields set.
+ //
+ // In the C-API, verbs (here create) always follow the type name
+ // (here Weapon), prefixed by the namespace (here MyGame_Sample_):
+ // MyGame_Sample_Weapon_create(...), or ns(Weapone_create(...)).
+ ns(Weapon_ref_t) sword = ns(Weapon_create(B, weapon_one_name, weapon_one_damage));
+ ns(Weapon_ref_t) axe = ns(Weapon_create(B, weapon_two_name, weapon_two_damage));
+
+ // Serialize a name for our monster, called "Orc".
+ // The _str suffix indicates the source is an ascii-z string.
+ flatbuffers_string_ref_t name = flatbuffers_string_create_str(B, "Orc");
+
+ // Create a `vector` representing the inventory of the Orc. Each number
+ // could correspond to an item that can be claimed after he is slain.
+ uint8_t treasure[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ flatbuffers_uint8_vec_ref_t inventory;
+ // `c_vec_len` is the convenience macro we defined earlier.
+ inventory = flatbuffers_uint8_vec_create(B, treasure, c_vec_len(treasure));
+
+ // Here we use a top-down approach locally to build a Weapons vector
+ // in-place instead of creating a temporary external vector to use
+ // as argument like we did with the `inventory` earlier on, but the
+ // overall approach is still bottom-up.
+ ns(Weapon_vec_start(B));
+ ns(Weapon_vec_push(B, sword));
+ ns(Weapon_vec_push(B, axe));
+ ns(Weapon_vec_ref_t) weapons = ns(Weapon_vec_end(B));
+
+
+ // Create a `Vec3`, representing the Orc's position in 3-D space.
+ ns(Vec3_t) pos = { 1.0f, 2.0f, 3.0f };
+
+
+ // Set his hit points to 300 and his mana to 150.
+ int16_t hp = 300;
+ // The default value is 150, so we will never store this field.
+ int16_t mana = 150;
+
+ // Create the equipment union. In the C++ language API this is given
+ // as two arguments to the create call, or as two separate add
+ // operations for the type and the table reference. Here we create
+ // a single union value that carries both the type and reference.
+ ns(Equipment_union_ref_t) equipped = ns(Equipment_as_Weapon(axe));
+
+ if (!flexible) {
+ // Finally, create the monster using the `Monster_create` helper function
+ // to set all fields.
+ //
+ // Note that the Equipment union only take up one argument in C, where
+ // C++ takes a type and an object argument.
+ ns(Monster_create_as_root(B, &pos, mana, hp, name, inventory, ns(Color_Red),
+ weapons, equipped));
+
+ // Unlike C++ we do not use a Finish call. Instead we use the
+ // `create_as_root` action which has better type safety and
+ // simplicity.
+ //
+ // However, we can also express this as:
+ //
+ // ns(Monster_ref_t) orc = ns(Monster_create(B, ...));
+ // flatcc_builder_buffer_create(orc);
+ //
+ // In this approach the function should return the orc and
+ // let a calling function handle the flatcc_buffer_create call
+ // for a more composable setup that is also able to create child
+ // monsters. In general, `flatcc_builder` calls are best isolated
+ // in a containing driver function.
+
+ } else {
+
+ // A more flexible approach where we mix bottom-up and top-down
+ // style. We still create child objects first, but then create
+ // a top-down style monster object that we can manipulate in more
+ // detail.
+
+ // It is important to pair `start_as_root` with `end_as_root`.
+ ns(Monster_start_as_root(B));
+ ns(Monster_pos_create(B, 1.0f, 2.0f, 3.0f));
+ // or alternatively
+ //ns(Monster_pos_add(&pos);
+
+ ns(Monster_hp_add(B, hp));
+ // Notice that `Monser_name_add` adds a string reference unlike the
+ // add_str and add_strn variants.
+ ns(Monster_name_add(B, name));
+ ns(Monster_inventory_add(B, inventory));
+ ns(Monster_color_add(B, ns(Color_Red)));
+ ns(Monster_weapons_add(B, weapons));
+ ns(Monster_equipped_add(B, equipped));
+ // Complete the monster object and make it the buffer root object.
+ ns(Monster_end_as_root(B));
+
+ // We could also drop the `as_root` suffix from Monster_start/end(B)
+ // and add the table as buffer root later:
+ //
+ // ns(Monster_ref_t) orc = ns(Monster_start(B));
+ // ...
+ // ns(Monster_ref_t) orc = ns(Monster_end(B));
+ // flatcc_builder_buffer_create(orc);
+ //
+ // It is best to keep the `flatcc_builder` calls in a containing
+ // driver function for modularity.
+ }
+ return 0;
+}
+
+// Alternative top-down approach where parent objects are created before
+// their children. We only need to save one reference because the `axe`
+// object is used in two places effectively making the buffer object
+// graph a DAG.
+int create_monster_top_down(flatcc_builder_t *B)
+{
+ uint8_t treasure[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ size_t treasure_count = c_vec_len(treasure);
+ ns(Weapon_ref_t) axe;
+
+ // NOTE: if we use end_as_root, we MUST also start as root.
+ ns(Monster_start_as_root(B));
+ ns(Monster_pos_create(B, 1.0f, 2.0f, 3.0f));
+ ns(Monster_hp_add(B, 300));
+ //ns(Monster_mana_add(B, 150));
+ // We use create_str instead of add because we have no existing string reference.
+ ns(Monster_name_create_str(B, "Orc"));
+ // Again we use create because we no existing vector object, only a C-array.
+ ns(Monster_inventory_create(B, treasure, treasure_count));
+ ns(Monster_color_add(B, ns(Color_Red)));
+ if (1) {
+ ns(Monster_weapons_start(B));
+ ns(Monster_weapons_push_create(B, flatbuffers_string_create_str(B, "Sword"), 3));
+ // We reuse the axe object later. Note that we dereference a pointer
+ // because push always returns a short-term pointer to the stored element.
+ // We could also have created the axe object first and simply pushed it.
+ axe = *ns(Monster_weapons_push_create(B, flatbuffers_string_create_str(B, "Axe"), 5));
+ ns(Monster_weapons_end(B));
+ } else {
+ // We can have more control with the table elements added to a vector:
+ //
+ ns(Monster_weapons_start(B));
+ ns(Monster_weapons_push_start(B));
+ ns(Weapon_name_create_str(B, "Sword"));
+ ns(Weapon_damage_add(B, 3));
+ ns(Monster_weapons_push_end(B));
+ ns(Monster_weapons_push_start(B));
+ ns(Weapon_name_create_str(B, "Axe"));
+ ns(Weapon_damage_add(B, 5));
+ axe = *ns(Monster_weapons_push_end(B));
+ ns(Monster_weapons_end(B));
+ }
+ // Unions can get their type by using a type-specific add/create/start method.
+ ns(Monster_equipped_Weapon_add(B, axe));
+
+ ns(Monster_end_as_root(B));
+ return 0;
+}
+
+// This isn't strictly needed because the builder already included the reader,
+// but we would need it if our reader were in a separate file.
+#include "monster_reader.h"
+
+#undef ns
+#define ns(x) FLATBUFFERS_WRAP_NAMESPACE(MyGame_Sample, x) // Specified in the schema.
+
+int access_monster_buffer(const void *buffer)
+{
+ // Note that we use the `table_t` suffix when reading a table object
+ // as opposed to the `ref_t` suffix used during the construction of
+ // the buffer.
+ ns(Monster_table_t) monster = ns(Monster_as_root(buffer));
+
+ // Note: root object pointers are NOT the same as the `buffer` pointer.
+
+ // Make sure the buffer is accessible.
+ test_assert(monster != 0);
+
+ int16_t hp = ns(Monster_hp(monster));
+ int16_t mana = ns(Monster_mana(monster));
+ // This is just a const char *, but it also supports a fast length operation.
+ flatbuffers_string_t name = ns(Monster_name(monster));
+ size_t name_len = flatbuffers_string_len(name);
+
+ test_assert(hp == 300);
+ // Since 150 is the default, we are reading a value that wasn't stored.
+ test_assert(mana == 150);
+ test_assert(0 == strcmp(name, "Orc"));
+ test_assert(name_len == strlen("Orc"));
+
+ int hp_present = ns(Monster_hp_is_present(monster)); // 1
+ int mana_present = ns(Monster_mana_is_present(monster)); // 0
+ test_assert(hp_present);
+ test_assert(!mana_present);
+
+ ns(Vec3_struct_t) pos = ns(Monster_pos(monster));
+ // Make sure pos has been set.
+ test_assert(pos != 0);
+ float x = ns(Vec3_x(pos));
+ float y = ns(Vec3_y(pos));
+ float z = ns(Vec3_z(pos));
+
+ // The literal `f` suffix is important because double literals does
+ // not always map cleanly to 32-bit represention even with only a few digits:
+ // `1.0 == 1.0f`, but `3.2 != 3.2f`.
+ test_assert(x == 1.0f);
+ test_assert(y == 2.0f);
+ test_assert(z == 3.0f);
+
+ // We can also read the position into a C-struct. We have to copy
+ // because we generally do not know if the native endian format
+ // matches the one stored in the buffer (pe: protocol endian).
+ ns(Vec3_t) pos_vec;
+ // `pe` indicates endian conversion from protocol to native.
+ ns(Vec3_copy_from_pe(&pos_vec, pos));
+ test_assert(pos_vec.x == 1.0f);
+ test_assert(pos_vec.y == 2.0f);
+ test_assert(pos_vec.z == 3.0f);
+
+ // This is a const uint8_t *, but it shouldn't be accessed directly
+ // to ensure proper endian conversion. However, uint8 (ubyte) are
+ // not sensitive endianness, so we *could* have accessed it directly.
+ // The compiler likely optimizes this so that it doesn't matter.
+ flatbuffers_uint8_vec_t inv = ns(Monster_inventory(monster));
+ size_t inv_len = flatbuffers_uint8_vec_len(inv);
+ // Make sure the inventory has been set.
+ test_assert(inv != 0);
+ // If `inv` were absent, the length would 0, so the above test is redundant.
+ test_assert(inv_len == 10);
+ // Index 0 is the first, index 2 is the third.
+ // NOTE: C++ uses the `Get` terminology for vector elemetns, C use `at`.
+ uint8_t third_item = flatbuffers_uint8_vec_at(inv, 2);
+ test_assert(third_item == 2);
+
+ ns(Weapon_vec_t) weapons = ns(Monster_weapons(monster));
+ size_t weapons_len = ns(Weapon_vec_len(weapons));
+ test_assert(weapons_len == 2);
+ // We can use `const char *` instead of `flatbuffers_string_t`.
+ const char *second_weapon_name = ns(Weapon_name(ns(Weapon_vec_at(weapons, 1))));
+ int16_t second_weapon_damage = ns(Weapon_damage(ns(Weapon_vec_at(weapons, 1))));
+ test_assert(second_weapon_name != 0 && strcmp(second_weapon_name, "Axe") == 0);
+ test_assert(second_weapon_damage == 5);
+
+ // Access union type field.
+ if (ns(Monster_equipped_type(monster)) == ns(Equipment_Weapon)) {
+ // Cast to appropriate type:
+ // C does not require the cast to Weapon_table_t, but C++ does.
+ ns(Weapon_table_t) weapon = (ns(Weapon_table_t)) ns(Monster_equipped(monster));
+ const char *weapon_name = ns(Weapon_name(weapon));
+ int16_t weapon_damage = ns(Weapon_damage(weapon));
+
+ test_assert(0 == strcmp(weapon_name, "Axe"));
+ test_assert(weapon_damage == 5);
+ }
+ return 0;
+}
+
+#include <stdio.h>
+
+int main(int argc, char *argv[])
+{
+ // Create a `FlatBufferBuilder`, which will be used to create our
+ // monsters' FlatBuffers.
+ flatcc_builder_t builder;
+ void *buf;
+ size_t size;
+
+ // Silence warnings.
+ (void)argc;
+ (void)argv;
+
+ // Initialize the builder object.
+ flatcc_builder_init(&builder);
+ test_assert(0 == create_monster_bottom_up(&builder, 0));
+
+ // Allocate and extract a readable buffer from internal builder heap.
+ // NOTE: Finalizing the buffer does NOT change the builder, it
+ // just creates a snapshot of the builder content.
+ // NOTE2: finalize_buffer uses malloc while finalize_aligned_buffer
+ // uses a portable aligned allocation method. Often the malloc
+ // version is sufficient, but won't work for all schema on all
+ // systems. If the buffer is written to disk or network, but not
+ // accessed in memory, `finalize_buffer` is also sufficient.
+ // The flatcc_builder version of free or aligned_free should be used
+ // instead of `free` although free will often work on POSIX systems.
+ // This ensures portability and prevents issues when linking to
+ // allocation libraries other than malloc.
+ buf = flatcc_builder_finalize_aligned_buffer(&builder, &size);
+ //buf = flatcc_builder_finalize_buffer(&builder, &size);
+
+ // We now have a FlatBuffer we can store on disk or send over a network.
+ // ** file/network code goes here :) **
+ // Instead, we're going to access it right away (as if we just received it).
+ //access_monster_buffer(buf);
+
+ // prior to v0.5.0, use `aligned_free`
+ flatcc_builder_aligned_free(buf);
+ //free(buf);
+ //
+ // The builder object can optionally be reused after a reset which
+ // is faster than creating a new builder. Subsequent use might
+ // entirely avoid temporary allocations until finalizing the buffer.
+ flatcc_builder_reset(&builder);
+ test_assert(0 == create_monster_bottom_up(&builder, 1));
+ buf = flatcc_builder_finalize_aligned_buffer(&builder, &size);
+ access_monster_buffer(buf);
+ flatcc_builder_aligned_free(buf);
+ flatcc_builder_reset(&builder);
+ create_monster_top_down(&builder);
+ buf = flatcc_builder_finalize_buffer(&builder, &size);
+ test_assert(0 == access_monster_buffer(buf));
+ flatcc_builder_free(buf);
+ // Eventually the builder must be cleaned up:
+ flatcc_builder_clear(&builder);
+
+ printf("The FlatBuffer was successfully created and accessed!\n");
+
+ return 0;
+}
diff --git a/samples/monster/monster.fbs b/samples/monster/monster.fbs
new file mode 100644
index 0000000..12859d2
--- /dev/null
+++ b/samples/monster/monster.fbs
@@ -0,0 +1,32 @@
+// Example IDL file for our monster's schema.
+
+namespace MyGame.Sample;
+
+enum Color:byte { Red = 0, Green, Blue = 2 }
+
+union Equipment { Weapon } // Optionally add more tables.
+
+struct Vec3 {
+ x:float;
+ y:float;
+ z:float;
+}
+
+table Monster {
+ pos:Vec3; // Struct.
+ mana:short = 150;
+ hp:short = 100;
+ name:string;
+ friendly:bool = false (deprecated);
+ inventory:[ubyte]; // Vector of scalars.
+ color:Color = Blue; // Enum.
+ weapons:[Weapon]; // Vector of tables.
+ equipped:Equipment; // Union.
+}
+
+table Weapon {
+ name:string;
+ damage:short;
+}
+
+root_type Monster;
diff --git a/samples/reflection/CMakeLists.txt b/samples/reflection/CMakeLists.txt
new file mode 100644
index 0000000..cfbef1c
--- /dev/null
+++ b/samples/reflection/CMakeLists.txt
@@ -0,0 +1,31 @@
+include(CTest)
+
+#
+# This projects depends headers generated from reflection.fbs but these
+# are pre-generated in `include/flatcc/reflection` so we don't need to
+# build them here.
+#
+# What we do build is a binary schema `monster.bfbs` for the monster
+# sample, and the actual C source of this project.
+#
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/samples/monster")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_monster_bfbs ALL)
+add_custom_command (
+ TARGET gen_monster_bfbs
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND flatcc_cli --schema -o "${GEN_DIR}" "${FBS_DIR}/monster.fbs"
+ DEPENDS flatcc_cli "${FBS_DIR}/monster.fbs"
+)
+add_executable(bfbs2json bfbs2json.c)
+add_dependencies(bfbs2json gen_monster_bfbs)
+target_link_libraries(bfbs2json flatccrt)
+
+if (FLATCC_TEST)
+ add_test(bfbs2json bfbs2json${CMAKE_EXECUTABLE_SUFFIX} ${GEN_DIR}/monster.bfbs)
+endif()
diff --git a/samples/reflection/bfbs2json.c b/samples/reflection/bfbs2json.c
new file mode 100644
index 0000000..03c31e7
--- /dev/null
+++ b/samples/reflection/bfbs2json.c
@@ -0,0 +1,314 @@
+#include "flatcc/support/readfile.h"
+#include "flatcc/reflection/reflection_reader.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+
+/*
+ * Reads a binary schema generated by `flatcc` or Googles `flatc` tool,
+ * then prints the content out in a custom JSON format.
+ *
+ * Note: This is completely unrelated to `flatcc's` JSON support - we
+ * just needed to do something tangible with the data we read from the
+ * binary schema and opted to print it as JSON.
+ *
+ * The JSON can be pretty printed with an external tool, for example:
+ *
+ * cat monster_test_schema.json | jq '.'
+ */
+
+void print_type(reflection_Type_table_t T)
+{
+ int first = 1;
+ printf("{");
+ if (reflection_Type_base_type_is_present(T)) {
+ if (!first) {
+ printf(",");
+ }
+ printf("\"base_type\":\"%s\"", reflection_BaseType_name(reflection_Type_base_type(T)));
+ first = 0;
+ }
+ if (reflection_Type_element_is_present(T)) {
+ if (!first) {
+ printf(",");
+ }
+ printf("\"element\":\"%s\"", reflection_BaseType_name(reflection_Type_element(T)));
+ first = 0;
+ }
+ if (reflection_Type_index_is_present(T)) {
+ if (!first) {
+ printf(",");
+ }
+ printf("\"index\":%d", reflection_Type_index(T));
+ first = 0;
+ }
+ if (reflection_Type_fixed_length_is_present(T)) {
+ if (!first) {
+ printf(",");
+ }
+ printf("\"fixed_length\":%d", reflection_Type_fixed_length(T));
+ first = 0;
+ }
+ printf("}");
+}
+
+void print_attributes(reflection_KeyValue_vec_t KV)
+{
+ size_t i;
+ reflection_KeyValue_table_t attribute;
+ const char *key, *value;
+
+ printf("[");
+ for (i = 0; i < reflection_KeyValue_vec_len(KV); ++i) {
+ attribute = reflection_KeyValue_vec_at(KV, i);
+ key = reflection_KeyValue_key(attribute);
+ value = reflection_KeyValue_value(attribute);
+ if (i > 0) {
+ printf(",");
+ }
+ printf("{\"key\":\"%s\"", key);
+ if (value) {
+ /* TODO: we ought to escape '\"' and other non-string-able characters. */
+ printf(",\"value\":\"%s\"", value);
+ }
+ printf("}");
+ }
+ printf("]");
+}
+
+void print_object(reflection_Object_table_t O)
+{
+ reflection_Field_vec_t Flds;
+ reflection_Field_table_t F;
+ size_t i;
+
+ Flds = reflection_Object_fields(O);
+ printf("{\"name\":\"%s\"", reflection_Object_name(O));
+ printf(",\"fields\":[");
+ for (i = 0; i < reflection_Field_vec_len(Flds); ++i) {
+ if (i > 0) {
+ printf(",");
+ }
+ F = reflection_Field_vec_at(Flds, i);
+ printf("{\"name\":\"%s\",\"type\":", reflection_Field_name(F));
+ print_type(reflection_Field_type(F));
+ if (reflection_Field_id_is_present(F)) {
+ printf(",\"id\":%hu", reflection_Field_id(F));
+ }
+ if (reflection_Field_default_integer_is_present(F)) {
+ printf(",\"default_integer\":%"PRId64"", (int64_t)reflection_Field_default_integer(F));
+ }
+ if (reflection_Field_default_real_is_present(F)) {
+ printf(",\"default_integer\":%lf", reflection_Field_default_real(F));
+ }
+ if (reflection_Field_required_is_present(F)) {
+ printf(",\"required\":%s", reflection_Field_required(F) ? "true" : "false");
+ }
+ if (reflection_Field_key_is_present(F)) {
+ printf(",\"key\":%s", reflection_Field_key(F) ? "true" : "false");
+ }
+ if (reflection_Field_attributes_is_present(F)) {
+ printf(",\"attributes\":");
+ print_attributes(reflection_Field_attributes(F));
+ }
+ printf("}");
+ }
+ printf("]");
+ if (reflection_Object_is_struct_is_present(O)) {
+ printf(",\"is_struct\":%s", reflection_Object_is_struct(O) ? "true" : "false");
+ }
+ if (reflection_Object_minalign_is_present(O)) {
+ printf(",\"minalign\":%d", reflection_Object_minalign(O));
+ }
+ if (reflection_Object_bytesize_is_present(O)) {
+ printf(",\"bytesize\":%d", reflection_Object_bytesize(O));
+ }
+ if (reflection_Object_attributes_is_present(O)) {
+ printf(",\"attributes\":");
+ print_attributes(reflection_Object_attributes(O));
+ }
+ printf("}");
+}
+
+void print_enum(reflection_Enum_table_t E)
+{
+ reflection_EnumVal_vec_t EnumVals;
+ reflection_EnumVal_table_t EV;
+ size_t i;
+
+ printf("{\"name\":\"%s\"", reflection_Enum_name(E));
+ EnumVals = reflection_Enum_values(E);
+ printf(",\"values\":[");
+ for (i = 0; i < reflection_Enum_vec_len(EnumVals); ++i) {
+ EV = reflection_EnumVal_vec_at(EnumVals, i);
+ if (i > 0) {
+ printf(",");
+ }
+ printf("{\"name\":\"%s\"", reflection_EnumVal_name(EV));
+ if (reflection_EnumVal_value_is_present(EV)) {
+ printf(",\"value\":%"PRId64"", (int64_t)reflection_EnumVal_value(EV));
+ }
+ if (reflection_EnumVal_object_is_present(EV)) {
+ printf(",\"object\":");
+ print_object(reflection_EnumVal_object(EV));
+ }
+ if (reflection_EnumVal_union_type_is_present(EV)) {
+ printf(",\"union_type\":");
+ print_type(reflection_EnumVal_union_type(EV));
+ }
+ printf("}");
+ }
+ printf("]");
+ if (reflection_Enum_is_union_is_present(E)) {
+ printf(",\"is_union\":%s", reflection_Enum_is_union(E) ? "true" : "false");
+ }
+ printf(",\"underlying_type\":");
+ print_type(reflection_Enum_underlying_type(E));
+ if (reflection_Enum_attributes_is_present(E)) {
+ printf(",\"attributes\":");
+ print_attributes(reflection_Enum_attributes(E));
+ }
+ printf("}");
+}
+
+void print_call(reflection_RPCCall_table_t C)
+{
+ printf("{\"name\":\"%s\"", reflection_RPCCall_name(C));
+ printf(",\"request\":");
+ print_object(reflection_RPCCall_request(C));
+ printf(",\"response\":");
+ print_object(reflection_RPCCall_response(C));
+
+ if (reflection_RPCCall_attributes_is_present(C)) {
+ printf(",\"attributes\":");
+ print_attributes(reflection_RPCCall_attributes(C));
+ }
+ printf("}");
+}
+
+void print_service(reflection_Service_table_t S)
+{
+ reflection_RPCCall_vec_t calls;
+ size_t i;
+
+ printf("{\"name\":\"%s\"", reflection_Service_name(S));
+
+ printf(",\"calls\":[");
+ calls = reflection_Service_calls(S);
+ for (i = 0; i < reflection_RPCCall_vec_len(calls); ++i) {
+ if (i > 0) {
+ printf(",");
+ }
+ print_call(reflection_RPCCall_vec_at(calls, i));
+ }
+ printf("]");
+
+ if (reflection_Service_attributes_is_present(S)) {
+ printf(",\"attributes\":");
+ print_attributes(reflection_Service_attributes(S));
+ }
+ printf("}");
+}
+
+void print_schema(reflection_Schema_table_t S)
+{
+ reflection_Object_vec_t Objs;
+ reflection_Enum_vec_t Enums;
+ reflection_Service_vec_t Services;
+ size_t i;
+
+ Objs = reflection_Schema_objects(S);
+ printf("{");
+ printf("\"objects\":[");
+ for (i = 0; i < reflection_Object_vec_len(Objs); ++i) {
+ if (i > 0) {
+ printf(",");
+ }
+ print_object(reflection_Object_vec_at(Objs, i));
+ }
+ printf("]");
+ Enums = reflection_Schema_enums(S);
+ printf(",\"enums\":[");
+ for (i = 0; i < reflection_Enum_vec_len(Enums); ++i) {
+ if (i > 0) {
+ printf(",");
+ }
+ print_enum(reflection_Enum_vec_at(Enums, i));
+ }
+ printf("]");
+ if (reflection_Schema_file_ident_is_present(S)) {
+ printf(",\"file_ident\":\"%s\"", reflection_Schema_file_ident(S));
+ }
+ if (reflection_Schema_file_ext_is_present(S)) {
+ printf(",\"file_ext\":\"%s\"", reflection_Schema_file_ext(S));
+ }
+ if (reflection_Schema_root_table_is_present(S)) {
+ printf(",\"root_table\":");
+ print_object(reflection_Schema_root_table(S));
+ }
+ if (reflection_Schema_services_is_present(S)) {
+ printf(",\"services\":[");
+ Services = reflection_Schema_services(S);
+ for (i = 0; i < reflection_Service_vec_len(Services); ++i) {
+ if (i > 0) {
+ printf(",");
+ }
+ print_service(reflection_Service_vec_at(Services, i));
+ }
+ printf("]");
+ }
+ printf("}\n");
+}
+
+int load_and_dump_schema(const char *filename)
+{
+ void *buffer;
+ size_t size;
+ int ret = -1;
+ reflection_Schema_table_t S;
+
+ buffer = readfile(filename, 100000, &size);
+ if (!buffer) {
+ fprintf(stderr, "failed to load binary schema file: '%s'\n", filename);
+ goto done;
+ }
+ if (size < 12) {
+ fprintf(stderr, "file too small to access: '%s'\n", filename);
+ goto done;
+ }
+ S = reflection_Schema_as_root(buffer);
+ if (!S) {
+ S = reflection_Schema_as_root((char*)buffer + 4);
+ if (S) {
+ fprintf(stderr, "(skipping length field of input buffer)\n");
+ }
+ }
+ if (!S) {
+ fprintf(stderr, "input is not a valid schema");
+ goto done;
+ }
+ print_schema(S);
+ ret = 0;
+
+done:
+ if (buffer) {
+ free(buffer);
+ }
+ return ret;
+}
+
+int main(int argc, char *argv[])
+{
+ if (argc != 2) {
+ fprintf(stderr, "usage: bfbs2json <filename>\n");
+ fprintf(stderr, "reads a binary flatbuffer schema and prints it to compact json on stdout\n\n");
+ fprintf(stderr, "pretty print with exernal tool, for example:\n"
+ " bfbs2json myschema.bfbs | python -m json.tool > myschema.json\n"
+ "note: also understands binary schema files with a 4 byte length prefix\n");
+ exit(-1);
+ }
+ return load_and_dump_schema(argv[1]);
+}
diff --git a/samples/reflection/build.sh b/samples/reflection/build.sh
new file mode 100755
index 0000000..84ccf8a
--- /dev/null
+++ b/samples/reflection/build.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../..
+ROOT=`pwd`
+TMP=${ROOT}/build/tmp/samples/reflection
+
+CC=${CC:-cc}
+${ROOT}/scripts/build.sh
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+#bin/flatcc --schema --schema-length=yes -o ${TMP} test/monster_test/monster_test.fbs
+bin/flatcc --schema -o ${TMP} test/monster_test/monster_test.fbs
+
+cp samples/reflection/*.c ${TMP}
+cd ${TMP}
+# We don't need debug version, but it is always useful to have if we get
+# assertions in the interface code.
+$CC -g -I ${ROOT}/include bfbs2json.c -o bfbs2jsond
+$CC -O3 -DNDEBUG -I ${ROOT}/include bfbs2json.c -o bfbs2json
+cp bfbs2json ${ROOT}/bin/bfbs2json
+echo "generating example json output from monster_test.fbs schema ..."
+${ROOT}/bin/bfbs2json ${TMP}/monster_test.bfbs > monster_test_schema.json
+cat monster_test_schema.json | python -m json.tool > pretty_monster_test_schema.json
+echo "test json file located in ${TMP}/monster_test_schema.json"
+echo "pretty printed file located in ${TMP}/pretty_monster_test_schema.json"
+echo "bfbs2json tool placed in ${ROOT}/bin/bfbs2json"
diff --git a/scripts/_user_build.in b/scripts/_user_build.in
new file mode 100644
index 0000000..dfcc40a
--- /dev/null
+++ b/scripts/_user_build.in
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -e
+
+CC=${CC:-cc}
+
+# assume we are in a subdirectory of the project to build
+HERE=$(dirname $0)/..
+cd $HERE
+ROOT=$(pwd)
+NAME=$(basename $ROOT)
+
+mkdir -p build
+mkdir -p generated
+
+cd build
+
+if [[ "$FLATCC_PORTABLE" = "yes" ]]; then
+ CFLAGS="$CFLAGS -DFLATCC_PORTABLE"
+fi
+
+CFLAGS="$CFLAGS -I ${ROOT}/include -I ${ROOT}/generated"
+CFLAGS_DEBUG=${CFLAGS_DEBUG:--g}
+CFLAGS_RELEASE=${CFLAGS_RELEASE:--O2 -DNDEBUG}
+
+${ROOT}/bin/flatcc -a -o ${ROOT}/generated ${ROOT}/src/*.fbs
+
+echo "building '$NAME' for debug"
+$CC $CFLAGS $CFLAGS_DEBUG ${ROOT}/src/*.c ${ROOT}/lib/libflatccrt_d.a -o ${NAME}_d
+
+echo "building '$NAME' for release"
+$CC $CFLAGS $CFLAGS_RELEASE ${ROOT}/src/*.c ${ROOT}/lib/libflatccrt.a -o ${NAME}
diff --git a/scripts/benchflatcc.sh b/scripts/benchflatcc.sh
new file mode 100755
index 0000000..1dcb830
--- /dev/null
+++ b/scripts/benchflatcc.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd `dirname $0`/..
+test/benchmark/benchflatcc/run.sh
diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh
new file mode 100755
index 0000000..370dd56
--- /dev/null
+++ b/scripts/benchmark.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd `dirname $0`/..
+test/benchmark/benchall.sh
diff --git a/scripts/bfbs-sample.sh b/scripts/bfbs-sample.sh
new file mode 100755
index 0000000..306d93f
--- /dev/null
+++ b/scripts/bfbs-sample.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+cd $(dirname $0)/..
+builddir=build/Debug
+bfbsdir=$builddir/test/reflection_test/generated/
+
+if [ ! -e $bfbsdir/monster_test.bfbs ]; then
+ scripts/test.sh
+fi
+
+$builddir/samples/reflection/bfbs2json_d \
+ $bfbsdir/monster_test.bfbs > $bfbsdir/monster_test_bfbs.json
+
+cat $bfbsdir/monster_test_bfbs.json \
+ | python -m json.tool
diff --git a/scripts/build.cfg.make b/scripts/build.cfg.make
new file mode 100644
index 0000000..ae9c332
--- /dev/null
+++ b/scripts/build.cfg.make
@@ -0,0 +1,3 @@
+FLATCC_BUILD_GEN="Unix Makefiles"
+FLATCC_BUILD_CMD=make
+FLATCC_BUILD_FLAGS=""
diff --git a/scripts/build.cfg.make-32bit b/scripts/build.cfg.make-32bit
new file mode 100644
index 0000000..2299d67
--- /dev/null
+++ b/scripts/build.cfg.make-32bit
@@ -0,0 +1,3 @@
+FLATCC_BUILD_GEN="Unix Makefiles"
+FLATCC_BUILD_CMD=make
+FLATCC_BUILD_FLAGS="-DCMAKE_C_FLAGS=-m32 -DCMAKE_CXX_FLAGS=-m32"
diff --git a/scripts/build.cfg.make-concurrent b/scripts/build.cfg.make-concurrent
new file mode 100644
index 0000000..7684642
--- /dev/null
+++ b/scripts/build.cfg.make-concurrent
@@ -0,0 +1,3 @@
+FLATCC_BUILD_GEN="Unix Makefiles"
+FLATCC_BUILD_CMD="make -j"
+FLATCC_BUILD_FLAGS=""
diff --git a/scripts/build.cfg.ninja b/scripts/build.cfg.ninja
new file mode 100644
index 0000000..07ead70
--- /dev/null
+++ b/scripts/build.cfg.ninja
@@ -0,0 +1,3 @@
+FLATCC_BUILD_GEN=Ninja
+FLATCC_BUILD_CMD=ninja
+FLATCC_BUILD_FLAGS=""
diff --git a/scripts/build.sh b/scripts/build.sh
new file mode 100755
index 0000000..98cd41c
--- /dev/null
+++ b/scripts/build.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+set -e
+
+HERE=`dirname $0`
+cd $HERE/..
+ROOT=`pwd`
+
+CFGFILE=${ROOT}/scripts/build.cfg
+
+if [ -e $CFGFILE ]; then
+ . $CFGFILE
+fi
+
+FLATCC_BUILD_CMD=${FLATCC_BUILD_CMD:-ninja}
+
+mkdir -p ${ROOT}/bin
+mkdir -p ${ROOT}/lib
+
+rm -f ${ROOT}/bin/flatcc
+rm -f ${ROOT}/bin/flatcc_d
+rm -f ${ROOT}/libflatcc
+rm -f ${ROOT}/libflatcc_d.a
+rm -f ${ROOT}/libflatccrt.a
+rm -f ${ROOT}/libflatccrt_d.a
+
+if [ ! -d ${ROOT}/build/Debug ] || [ ! -d ${ROOT}/build/Release ]; then
+ ${ROOT}/scripts/initbuild.sh
+fi
+
+echo "building Debug" 1>&2
+cd ${ROOT}/build/Debug && $FLATCC_BUILD_CMD
+
+if [ "$1" != "--debug" ]; then
+ echo "building Release" 1>&2
+ cd ${ROOT}/build/Release && $FLATCC_BUILD_CMD
+fi
diff --git a/scripts/cleanall.sh b/scripts/cleanall.sh
new file mode 100755
index 0000000..a91d58c
--- /dev/null
+++ b/scripts/cleanall.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+set -e
+
+echo "removing build products"
+
+cd `dirname $0`/..
+
+rm -rf build
+rm -rf release
+rm -f bin/flatcc*
+rm -f bin/bfbs2json*
+rm -f lib/libflatcc*
+if [ -d bin ]; then
+ rmdir bin
+fi
+if [ -d lib ]; then
+ rmdir lib
+fi
+
diff --git a/scripts/dev.sh b/scripts/dev.sh
new file mode 100755
index 0000000..12dabf4
--- /dev/null
+++ b/scripts/dev.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env sh
+
+set -e
+
+HERE=`dirname $0`
+cd $HERE/..
+ROOT=`pwd`
+
+${ROOT}/scripts/test.sh --debug --no-clean
diff --git a/scripts/flatcc-doc.sh b/scripts/flatcc-doc.sh
new file mode 100755
index 0000000..902ae5c
--- /dev/null
+++ b/scripts/flatcc-doc.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+
+HOME=$(dirname $0)/..
+SCHEMA=${SCHEMA:-$1}
+PREFIX=${PREFIX:-$2}
+OUTDIR=${OUTDIR:-$3}
+OUTDIR=${OUTDIR:-'.'}
+INCLUDE=${INCLUDE:-$HOME/include}
+FLATCC=${FLATCC:-$HOME/bin/flatcc}
+
+if [ "x$SCHEMA" = "x" ]; then
+ echo "Missing schema arg"
+ echo "usage: $(basename $0) schema-file name-prefix [outdir]"
+ exit 1
+fi
+
+if [ "x$PREFIX" = "x" ]; then
+ echo "Missing prefix arg"
+ echo "usage: $(basename $0) schema-file name-prefix [outdir]"
+ exit 1
+fi
+
+echo "flatcc doc for schema: '$SCHEMA' with name prefix: '$PREFIX'"
+
+echo "generating $OUTDIR/$PREFIX.doc"
+
+$FLATCC $SCHEMA -a --json --stdout | \
+ clang - -E -DNDEBUG -I $INCLUDE | \
+ clang-format -style="WebKit" | \
+ grep "^static.* $PREFIX\w*(" | \
+ cut -f 1 -d '{' | \
+ grep -v deprecated | \
+ grep -v ");$" | \
+ sed 's/__tmp//g' | \
+ sed 's/)/);/g' \
+ > $OUTDIR/$PREFIX.doc
diff --git a/scripts/initbuild.sh b/scripts/initbuild.sh
new file mode 100755
index 0000000..2b18cd2
--- /dev/null
+++ b/scripts/initbuild.sh
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+# link a specific build.cfg.xxx to build.cfg to use that build
+# configuration, e.g. ln -sf build.cfg.make build.cfg
+#
+# call build/cleanall.sh before changing
+
+set -e
+
+HERE=`dirname $0`
+cd $HERE/..
+ROOT=`pwd`
+
+CFGFILE=${ROOT}/scripts/build.cfg
+
+if [ x"$1" != x ]; then
+ if [ -e ${CFGFILE}.$1 ]; then
+ ln -sf ${CFGFILE}.$1 $CFGFILE
+ else
+ echo "missing config file for build generator option: $1"
+ exit -1
+ fi
+ ${ROOT}/scripts/cleanall.sh
+fi
+
+if [ -e $CFGFILE ]; then
+ . $CFGFILE
+fi
+
+FLATCC_BUILD_GEN=${FLATCC_BUILD_GEN:-Ninja}
+
+echo "initializing build for CMake $FLATCC_BUILD_GEN"
+
+mkdir -p ${ROOT}/build/Debug
+mkdir -p ${ROOT}/build/Release
+rm -rf ${ROOT}/build/Debug/*
+rm -rf ${ROOT}/build/Release/*
+
+cd ${ROOT}/build/Debug && cmake -G "$FLATCC_BUILD_GEN" $FLATCC_BUILD_FLAGS ../.. -DCMAKE_BUILD_TYPE=Debug
+cd ${ROOT}/build/Release && cmake -G "$FLATCC_BUILD_GEN" $FLATCC_BUILD_FLAGS ../.. -DCMAKE_BUILD_TYPE=Release
diff --git a/scripts/monster-doc.example.sh b/scripts/monster-doc.example.sh
new file mode 100755
index 0000000..a4c6b70
--- /dev/null
+++ b/scripts/monster-doc.example.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+PREFIX=MyGame_Sample_Monster_
+FLATCC=bin/flatcc
+SCHEMA=samples/monster/monster.fbs
+
+. $(dirname $0)/flatcc-doc.sh
diff --git a/scripts/reflection-doc-example.sh b/scripts/reflection-doc-example.sh
new file mode 100755
index 0000000..ea2bcf4
--- /dev/null
+++ b/scripts/reflection-doc-example.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+PREFIX=reflection_Field_vec_
+FLATCC=bin/flatcc
+SCHEMA=reflection/reflection.fbs
+
+. $(dirname $0)/flatcc-doc.sh
diff --git a/scripts/release.sh b/scripts/release.sh
new file mode 100755
index 0000000..d3ad800
--- /dev/null
+++ b/scripts/release.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+VER=`git describe --tags`
+
+echo "archiving tagged version ${VER} to release folder"
+
+cd `dirname $0`/..
+mkdir -p release
+git archive --format=tar.gz --prefix=flatcc-$VER/ v$VER >release/flatcc-$VER.tar.gz
diff --git a/scripts/setup.sh b/scripts/setup.sh
new file mode 100755
index 0000000..c7ea533
--- /dev/null
+++ b/scripts/setup.sh
@@ -0,0 +1,112 @@
+#!/usr/bin/env bash
+
+# This is intended for quickly developming with flatcc tools
+# in a standalone directory
+
+set -e
+
+DIR=`pwd`
+HERE=`dirname $0`
+cd $HERE/..
+ROOT=`pwd`
+
+function usage() {
+ echo "Usage: <flatcc-dir>/scripts/`basename $0` [options] <path>"
+ echo ""
+ echo "Options:"
+ echo " -g | --gitignore : create/update .gitignore file"
+ echo " -b | --build : build flatcc (otherwise is must have been)"
+ echo " -x | --example : copy example source and schema"
+ echo " -s | --script : copy generic build script"
+ echo " -a | --all : all of the above"
+ echo " -h | --help"
+ echo ""
+ echo "Sets up a client project for use with flatcc."
+ echo ""
+ echo "Links flatcc into bin, lib, and include directories and optionally"
+ echo "starts a build first. Optionally creates or updates a .gitignore file"
+ echo "and a generic build script, and a sample project."
+ echo "Also adds an empty generated directory for 'flatcc -o generated',"
+ echo "'cc -I generated', and for git to ignore. 'build' directory"
+ echo "will be ignored if '-b' is selected."
+ echo ""
+ echo "When using the build script (-s), place source and schema files in 'src'."
+ echo "It is only meant for sharing small examples."
+ echo ""
+ echo "The flatcc project must be the parent of the path to this script."
+ exit 1
+}
+
+while [ $# -gt 0 ]; do
+ case "$1" in
+
+ # Standard help option.
+ -h|-\?|-help|--help|--doc*) usage ;;
+ -g|--gitignore) G=1 ;;
+ -b|--build) B=1 ;;
+ -s|--script) S=1 ;;
+ -x|--example) X=1 ;;
+ -a|--all) G=1; B=1; S=1; X=1 ;;
+
+ -*) echo "Unknown option \"$1\""; usage ;;
+ *) break ;; # unforced end of user options
+ esac
+ shift # next option
+done
+
+if [[ -z "$1" ]]; then
+ echo "Please specify a path"
+ usage
+fi
+
+echo "Building flatcc libraries and tool"
+
+if [[ ! -d "$ROOT/include/flatcc" ]]; then
+ echo "script not located in flatcc project"
+fi
+
+if [[ -n "$B" ]]; then
+ $ROOT/scripts/build.sh
+fi
+
+echo "Linking flatcc tool and library into $1"
+
+mkdir -p $DIR/$1
+cd $DIR/$1
+
+if [[ -n "$S" ]]; then
+ echo "Copying build script"
+ mkdir -p scripts
+ mkdir -p src
+ cp $ROOT/scripts/_user_build.in scripts/build.sh
+ chmod +x scripts/build.sh
+fi
+
+if [[ -n "$X" ]]; then
+ echo "Copying monster sample project"
+ mkdir -p src
+ cp $ROOT/samples/monster/monster.{c,fbs} src
+fi
+
+mkdir -p lib
+mkdir -p bin
+mkdir -p include
+
+ln -sf $ROOT/bin/flatcc bin/
+ln -sf $ROOT/lib/libflatcc.a lib/
+ln -sf $ROOT/lib/libflatccrt.a lib/
+ln -sf $ROOT/lib/libflatcc_d.a lib/
+ln -sf $ROOT/lib/libflatccrt_d.a lib/
+ln -sf $ROOT/include/flatcc include/
+
+if [[ -n "$G" ]]; then
+ echo "Updating .gitignore"
+ touch .gitignore
+ grep -q '^bin/flatcc*' .gitignore || echo 'bin/flatcc*' >> .gitignore
+ grep -q '^lib/libflatcc*.a' .gitignore || echo 'lib/libflatcc*.a' >> .gitignore
+ grep -q '^include/flatcc' .gitignore || echo 'include/flatcc' >> .gitignore
+ grep -q '^generated/' .gitignore || echo 'generated/' >> .gitignore
+ if [[ -n "$S" ]]; then
+ grep -q '^build/' .gitignore || echo 'build/' >> .gitignore
+ fi
+fi
diff --git a/scripts/test.sh b/scripts/test.sh
new file mode 100755
index 0000000..d87924b
--- /dev/null
+++ b/scripts/test.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env sh
+
+set -e
+
+HERE=`dirname $0`
+cd $HERE/..
+ROOT=`pwd`
+
+DBGDIR=$ROOT/build/Debug
+RELDIR=$ROOT/build/Release
+
+if [ "$1" = "--debug" ]; then
+ DEBUG=$1
+ echo "running debug build"
+ shift
+fi
+
+if [ "$1" != "--no-clean" ]; then
+ echo "cleaning build before tests ..."
+ $ROOT/scripts/cleanall.sh
+else
+ shift
+fi
+
+echo "building before tests ..."
+$ROOT/scripts/build.sh $DEBUG
+
+echo "running test in debug build ..."
+cd $DBGDIR && ctest $ROOT
+
+if [ "$DEBUG" != "--debug" ]; then
+echo "running test in release build ..."
+cd $RELDIR && ctest $ROOT
+echo "TEST PASSED"
+else
+ echo "DEBUG TEST PASSED"
+fi
+
diff --git a/src/cli/CMakeLists.txt b/src/cli/CMakeLists.txt
new file mode 100644
index 0000000..40facac
--- /dev/null
+++ b/src/cli/CMakeLists.txt
@@ -0,0 +1,20 @@
+include_directories (
+ "${PROJECT_SOURCE_DIR}/include"
+ "${PROJECT_SOURCE_DIR}/config"
+)
+
+add_executable(flatcc_cli
+ flatcc_cli.c
+)
+
+target_link_libraries(flatcc_cli
+ flatcc
+)
+
+# Rename because the libflatcc library and the flatcc executable would
+# conflict if they had the same target name `flatcc`.
+set_target_properties(flatcc_cli PROPERTIES OUTPUT_NAME flatcc)
+
+if (FLATCC_INSTALL)
+ install(TARGETS flatcc_cli DESTINATION bin)
+endif()
diff --git a/src/cli/flatcc_cli.c b/src/cli/flatcc_cli.c
new file mode 100644
index 0000000..9a03dec
--- /dev/null
+++ b/src/cli/flatcc_cli.c
@@ -0,0 +1,505 @@
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "flatcc/flatcc.h"
+#include "config.h"
+
+#define VERSION FLATCC_VERSION_TEXT
+#define TITLE FLATCC_TITLE_TEXT
+
+void usage(FILE *fp)
+{
+ fprintf(fp, "%s\n", TITLE);
+ fprintf(fp, "version: %s\n", VERSION);
+ fprintf(fp, "usage: flatcc [options] file [...]\n");
+ fprintf(fp, "options:\n"
+ " --reader (default) Generate reader\n"
+ " -c, --common Generate common include header(s)\n"
+ " --common_reader Generate common reader include header(s)\n"
+ " --common_builder Generate common builder include header(s)\n"
+ " -w, --builder Generate builders (writable buffers)\n"
+ " -v, --verifier Generate verifier\n"
+ " -r, --recursive Recursively generate included schema files\n"
+ " -a Generate all (like -cwvr)\n"
+ " -g Use _get suffix only to avoid conflicts\n"
+ " -d Dependency file like gcc -MMD\n"
+ " -I<inpath> Search path for include files (multiple allowed)\n"
+ " -o<outpath> Write files relative to this path (dir must exist)\n"
+ " --stdout Concatenate all output to stdout\n"
+ " --outfile=<file> Like --stdout, but to a file.\n"
+ " --depfile=<file> Dependency file like gcc -MF.\n"
+ " --deptarget=<file> Override --depfile target like gcc -MT.\n"
+ " --prefix=<prefix> Add prefix to all generated names (no _ added)\n"
+ " --common-prefix=<prefix> Replace 'flatbuffers' prefix in common files\n"
+#if FLATCC_REFLECTION
+ " --schema Generate binary schema (.bfbs)\n"
+ " --schema-length=no Add length prefix to binary schema\n"
+#endif
+ " --verifier Generate verifier for schema\n"
+ " --json-parser Generate json parser for schema\n"
+ " --json-printer Generate json printer for schema\n"
+ " --json Generate both json parser and printer for schema\n"
+ " --version Show version\n"
+ " -h | --help Help message\n"
+ );
+}
+
+void help(FILE *fp)
+{
+ usage(fp);
+ fprintf(fp,
+ "\n"
+ "This is a flatbuffer compatible compiler implemented in C generating C\n"
+ "source. It is largely compatible with the flatc compiler provided by\n"
+ "Google Fun Propulsion Lab but does not support JSON objects or binary\n"
+ "schema.\n"
+ "\n"
+ "By example 'flatcc monster.fbs' generates a 'monster.h' file which\n"
+ "provides functions to read a flatbuffer. A common include header is also\n"
+ "required. The common file is generated with the -c option. The reader\n"
+ "has no external dependencies.\n"
+ "\n"
+ "The -w (--builder) option enables code generation to build buffers:\n"
+ "`flatbuffers -w monster.fbs` will generate `monster.h` and\n"
+ "`monster_builder.h`, and also a builder specific common file with the\n"
+ "-cw option. The builder must link with the extern `flatbuilder` library.\n"
+ "\n"
+ "-v (--verifier) generates a verifier file per schema. It depends on the\n"
+ "runtime library but not on other generated files, except other included\n"
+ "verifiers.\n"
+ "\n"
+ "-r (--recursive) generates all schema included recursively.\n"
+ "\n"
+ "--reader is the default option to generate reader output but can be used\n"
+ "explicitly together with other options that would otherwise disable it.\n"
+ "\n"
+ "All C output can be concated to a single file using --stdout or\n"
+ "--outfile with content produced in dependency order. The outfile is\n"
+ "relative to cwd.\n"
+ "\n"
+ "-g Only add '_get' suffix to read accessors such that, for example,\n"
+ "only 'Monster_name_get(monster)` will be generated and not also\n"
+ "'Monster_name(monster)'. This avoids potential conflicts with\n"
+ "other generated symbols when a schema change is impractical.\n"
+ "\n"
+ "-d generates a dependency file, e.g. 'monster.fbs.d' in the output dir.\n"
+ "\n"
+ "--depfile implies -d but accepts an explicit filename with a path\n"
+ "relative to cwd. The dependency files content is a gnu make rule with a\n"
+ "target followed by the included schema files The target must match how\n"
+ "it is seen by the rest of the build system and defaults to e.g.\n"
+ "'monster_reader.h' or 'monster.bfbs' paths relative to the working\n"
+ "directory.\n"
+ "\n"
+ "--deptarget overrides the default target for --depfile, simiar to gcc -MT.\n"
+ "\n"
+
+#if FLATCC_REFLECTION
+ "--schema will generate a binary .bfbs file for each top-level schema file.\n"
+ "Can be used with --stdout if no C output is specified. When used with multiple\n"
+ "files --schema-length=yes is recommend.\n"
+ "\n"
+ "--schema-length adds a length prefix of type uoffset_t to binary schema so\n"
+ "they can be concatenated - the aligned buffer starts after the prefix.\n"
+ "\n"
+#else
+ "Flatbuffers binary schema support (--schema) has been disabled."
+ "\n"
+#endif
+ "--json-parser generates a file that implements a fast typed json parser for\n"
+ "the schema. It depends on some flatcc headers and the runtime library but\n"
+ "not on other generated files except other parsers from included schema.\n"
+ "\n"
+ "--json-printer generates a file that implements json printers for the schema\n"
+ "and has dependencies similar to --json-parser.\n"
+ "\n"
+ "--json is generates both printer and parser.\n"
+ "\n"
+#if FLATCC_REFLECTION
+#if 0 /* Disable deprecated features. */
+ "DEPRECATED:\n"
+ " --schema-namespace controls if typenames in schema are prefixed a namespace.\n"
+ " namespaces should always be present.\n"
+ "\n"
+#endif
+#endif
+ "The generated source can redefine offset sizes by including a modified\n"
+ "`flatcc_types.h` file. The flatbuilder library must then be compiled with the\n"
+ "same `flatcc_types.h` file. In this case --prefix and --common-prefix options\n"
+ "may be helpful to avoid conflict with standard offset sizes.\n"
+ "\n"
+ "The output size may seem bulky, but most content is rarely used inline\n"
+ "functions and macros. The compiled binary need not be large.\n"
+ "\n"
+ "The generated source assumes C11 functionality for alignment, compile\n"
+ "time assertions and inline functions but an optional set of portability\n"
+ "headers can be included to work with most any compiler. The portability\n"
+ "layer is not throughly tested so a platform specific test is required\n"
+ "before production use. Upstream patches are welcome.\n");
+}
+
+enum { noarg, suffixarg, nextarg };
+
+int parse_bool_arg(const char *a)
+{
+ if (strcmp(a, "0") == 0 || strcmp(a, "no") == 0) {
+ return 0;
+ }
+ if (strcmp(a, "1") == 0 || strcmp(a, "yes") == 0) {
+ return 1;
+ }
+ fprintf(stderr, "invalid boolean argument: '%s', must be '0', '1', 'yes' or 'no'\n", a);
+ return -1;
+}
+
+int match_long_arg(const char *option, const char *s, size_t n)
+{
+ return strncmp(option, s, n) == 0 && strlen(option) == n;
+}
+
+int set_opt(flatcc_options_t *opts, const char *s, const char *a)
+{
+ int ret = noarg;
+ size_t n = strlen(s);
+ const char *v = strchr(s, '=');
+ if (v) {
+ a = v + 1;
+ n = (size_t)(v - s);
+ }
+ if (*s == 'h' || 0 == strcmp("-help", s)) {
+ /* stdout so less and more works. */
+ help(stdout);
+ exit(0);
+ }
+ if (0 == strcmp("-version", s)) {
+ fprintf(stdout, "%s\n", TITLE);
+ fprintf(stdout, "version: %s\n", VERSION);
+ exit(0);
+ }
+ if (0 == strcmp("-stdout", s)) {
+ opts->gen_stdout = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-common", s)) {
+ opts->cgen_common_reader = 1;
+ opts->cgen_common_builder = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-common_reader", s)) {
+ opts->cgen_common_reader = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-common_builder", s)) {
+ opts->cgen_common_builder = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-reader", s)) {
+ opts->cgen_reader = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-builder", s)) {
+ opts->cgen_builder = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-verifier", s)) {
+ opts->cgen_verifier = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-recursive", s)) {
+ opts->cgen_recursive = 1;
+ return noarg;
+ }
+#if FLATCC_REFLECTION
+ if (0 == strcmp("-schema", s)) {
+ opts->bgen_bfbs = 1;
+ return noarg;
+ }
+#endif
+ if (0 == strcmp("-json-parser", s)) {
+ opts->cgen_json_parser = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-json-printer", s)) {
+ opts->cgen_json_printer = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-json", s)) {
+ opts->cgen_json_parser = 1;
+ opts->cgen_json_printer = 1;
+ return noarg;
+ }
+#if FLATCC_REFLECTION
+#if 0 /* Disable deprecated features. */
+ if (match_long_arg("-schema-namespace", s, n)) {
+ fprintf(stderr, "warning: --schema-namespace is deprecated\n"
+ " a namespace is added by default and should always be present\n");
+ if (!a) {
+ fprintf(stderr, "--schema-namespace option needs an argument\n");
+ exit(-1);
+ }
+ if(0 > (opts->bgen_qualify_names = parse_bool_arg(a))) {
+ exit(-1);
+ }
+ return v ? noarg : nextarg;
+ }
+ if (match_long_arg("-schema-length", s, n)) {
+ if (!a) {
+ fprintf(stderr, "--schema-length option needs an argument\n");
+ exit(-1);
+ }
+ if(0 > (opts->bgen_length_prefix = parse_bool_arg(a))) {
+ exit(-1);
+ }
+ return v ? noarg : nextarg;
+ }
+#endif
+#endif
+ if (match_long_arg("-depfile", s, n)) {
+ if (!a) {
+ fprintf(stderr, "--depfile option needs an argument\n");
+ exit(-1);
+ }
+ opts->gen_depfile = a;
+ opts->gen_dep = 1;
+ return v ? noarg : nextarg;
+ }
+ if (match_long_arg("-deptarget", s, n)) {
+ if (!a) {
+ fprintf(stderr, "--deptarget option needs an argument\n");
+ exit(-1);
+ }
+ opts->gen_deptarget = a;
+ return v ? noarg : nextarg;
+ }
+ if (match_long_arg("-outfile", s, n)) {
+ if (!a) {
+ fprintf(stderr, "--outfile option needs an argument\n");
+ exit(-1);
+ }
+ opts->gen_outfile= a;
+ return v ? noarg : nextarg;
+ }
+ if (match_long_arg("-common-prefix", s, n)) {
+ if (!a) {
+ fprintf(stderr, "--common-prefix option needs an argument\n");
+ exit(-1);
+ }
+ opts->nsc = a;
+ return v ? noarg : nextarg;
+ }
+ if (match_long_arg("-prefix", s, n)) {
+ if (!a) {
+ fprintf(stderr, "-n option needs an argument\n");
+ exit(-1);
+ }
+ opts->ns = a;
+ return v ? noarg : nextarg;
+ }
+ switch (*s) {
+ case '-':
+ fprintf(stderr, "invalid option: -%s\n", s);
+ exit(-1);
+ case 'I':
+ if (s[1]) {
+ ret = suffixarg;
+ a = s + 1;
+ } else if (!a) {
+ fprintf(stderr, "-I option needs an argument\n");
+ exit(-1);
+ } else {
+ ret = nextarg;
+ }
+ opts->inpaths[opts->inpath_count++] = a;
+ return ret;
+ case 'o':
+ if (opts->outpath) {
+ fprintf(stderr, "-o option can only be specified once\n");
+ exit(-1);
+ }
+ if (s[1]) {
+ ret = suffixarg;
+ a = s + 1;
+ } else if (!a) {
+ fprintf(stderr, "-o option needs an argument\n");
+ exit(-1);
+ } else {
+ ret = nextarg;
+ }
+ opts->outpath = a;
+ return ret;
+ case 'w':
+ opts->cgen_builder = 1;
+ return noarg;
+ case 'v':
+ opts->cgen_verifier = 1;
+ return noarg;
+ case 'c':
+ opts->cgen_common_reader = 1;
+ opts->cgen_common_builder = 1;
+ return noarg;
+ case 'r':
+ opts->cgen_recursive = 1;
+ return noarg;
+ case 'g':
+ opts->cgen_no_conflicts = 1;
+ return noarg;
+ case 'd':
+ opts->gen_dep = 1;
+ return noarg;
+ case 'a':
+ opts->cgen_reader = 1;
+ opts->cgen_builder = 1;
+ opts->cgen_verifier = 1;
+ opts->cgen_common_reader = 1;
+ opts->cgen_common_builder = 1;
+ opts->cgen_recursive = 1;
+ return noarg;
+ default:
+ fprintf(stderr, "invalid option: -%c\n", *s);
+ exit(-1);
+ }
+ return noarg;
+}
+
+int get_opt(flatcc_options_t *opts, const char *s, const char *a)
+{
+ if (s[1] == '-') {
+ return nextarg == set_opt(opts, s + 1, a);
+ }
+ ++s;
+ if (*s == 0) {
+ fprintf(stderr, "- is not a valid option\n");
+ exit(-1);
+ }
+ while (*s) {
+ switch (set_opt(opts, s, a)) {
+ case noarg:
+ ++s;
+ continue;
+ case suffixarg:
+ return 0;
+ case nextarg:
+ return 1;
+ }
+ }
+ return noarg;
+}
+
+void parse_opts(int argc, const char *argv[], flatcc_options_t *opts)
+{
+ int i;
+ const char *s, *a;
+
+ for (i = 1; i < argc; ++i) {
+ if (argv[i][0] == '-') {
+ s = argv[i];
+ a = i + 1 < argc ? argv[i + 1] : 0;
+ i += get_opt(opts, s, a);
+ } else {
+ opts->srcpaths[opts->srcpath_count++] = argv[i];
+ }
+ }
+}
+
+int main(int argc, const char *argv[])
+{
+ flatcc_options_t opts;
+ flatcc_context_t ctx = 0;
+ int i, ret, cgen;
+ const char **src;
+
+ ctx = 0;
+ ret = 0;
+ if (argc < 2) {
+ usage(stderr);
+ exit(-1);
+ }
+ flatcc_init_options(&opts);
+ if (!(opts.inpaths = malloc((size_t)argc * sizeof(char *)))) {
+ fprintf(stderr, "memory allocation failure\n");
+ exit(-1);
+ }
+ if (!(opts.srcpaths = malloc((size_t)argc * sizeof(char *)))) {
+ fprintf(stderr, "memory allocation failure\n");
+ free((void *)opts.inpaths);
+ exit(-1);
+ }
+
+ parse_opts(argc, argv, &opts);
+ if (opts.cgen_builder && opts.cgen_common_reader) {
+ opts.cgen_common_builder = 1;
+ }
+ if (opts.srcpath_count == 0) {
+ /* No input files, so only generate header(s). */
+ if (!(opts.cgen_common_reader || opts.cgen_common_builder) || opts.bgen_bfbs) {
+ fprintf(stderr, "filename missing\n");
+ goto fail;
+ }
+ if (!(ctx = flatcc_create_context(&opts, 0, 0, 0))) {
+ fprintf(stderr, "internal error: failed to create parsing context\n");
+ goto fail;
+ }
+ if (flatcc_generate_files(ctx)) {
+ goto fail;
+ }
+ flatcc_destroy_context(ctx);
+ ctx = 0;
+ goto done;
+ }
+ cgen = opts.cgen_reader || opts.cgen_builder || opts.cgen_verifier
+ || opts.cgen_common_reader || opts.cgen_common_builder
+ || opts.cgen_json_parser || opts.cgen_json_printer;
+ if (!opts.bgen_bfbs && (!cgen || opts.cgen_builder || opts.cgen_verifier)) {
+ /* Assume default if no other output specified when deps required it. */
+ opts.cgen_reader = 1;
+ }
+ if (opts.bgen_bfbs && cgen) {
+ if (opts.gen_stdout) {
+ fprintf(stderr, "--stdout cannot be used with mixed text and binary output");
+ goto fail;
+ }
+ if (opts.gen_outfile) {
+ fprintf(stderr, "--outfile cannot be used with mixed text and binary output");
+ goto fail;
+ }
+ }
+ if (opts.gen_deptarget && !opts.gen_depfile) {
+ fprintf(stderr, "--deptarget cannot be used without --depfile");
+ goto fail;
+ }
+ if (opts.gen_stdout && opts.gen_outfile) {
+ fprintf(stderr, "--outfile cannot be used with --stdout");
+ goto fail;
+ }
+ for (i = 0, src = opts.srcpaths; i < opts.srcpath_count; ++i, ++src) {
+ if (!(ctx = flatcc_create_context(&opts, *src, 0, 0))) {
+ fprintf(stderr, "internal error: failed to create parsing context\n");
+ goto fail;
+ }
+ if (flatcc_parse_file(ctx, *src)) {
+ goto fail;
+ }
+ if (flatcc_generate_files(ctx)) {
+ goto fail;
+ }
+ flatcc_destroy_context(ctx);
+ ctx = 0;
+ /* for --stdout and --outfile options: append to file and skip generating common headers. */
+ opts.gen_append = 1;
+ }
+ goto done;
+fail:
+ ret = -1;
+done:
+ if (ctx) {
+ flatcc_destroy_context(ctx);
+ ctx = 0;
+ }
+ if (ret) {
+ fprintf(stderr, "output failed\n");
+ }
+ free((void *)opts.inpaths);
+ free((void *)opts.srcpaths);
+ return ret;
+}
diff --git a/src/compiler/CMakeLists.txt b/src/compiler/CMakeLists.txt
new file mode 100644
index 0000000..ce31819
--- /dev/null
+++ b/src/compiler/CMakeLists.txt
@@ -0,0 +1,43 @@
+include_directories (
+ "${PROJECT_SOURCE_DIR}/external"
+ "${PROJECT_SOURCE_DIR}/include"
+ "${PROJECT_SOURCE_DIR}/config"
+)
+
+set (SOURCES
+ ${PROJECT_SOURCE_DIR}/external/hash/cmetrohash64.c
+ ${PROJECT_SOURCE_DIR}/external/hash/str_set.c
+ ${PROJECT_SOURCE_DIR}/external/hash/ptr_set.c
+ hash_tables/symbol_table.c
+ hash_tables/scope_table.c
+ hash_tables/name_table.c
+ hash_tables/schema_table.c
+ hash_tables/value_set.c
+ fileio.c
+ parser.c
+ semantics.c
+ coerce.c
+ flatcc.c
+ codegen_c.c
+ codegen_c_reader.c
+ codegen_c_sort.c
+ codegen_c_builder.c
+ codegen_c_verifier.c
+ codegen_c_sorter.c
+ codegen_c_json_parser.c
+ codegen_c_json_printer.c
+ # needed for building binary schema
+ ../runtime/builder.c
+ ../runtime/emitter.c
+ ../runtime/refmap.c
+)
+
+if (FLATCC_REFLECTION)
+ set (SOURCES ${SOURCES} codegen_schema.c)
+endif(FLATCC_REFLECTION)
+
+add_library(flatcc ${SOURCES})
+
+if (FLATCC_INSTALL)
+ install(TARGETS flatcc DESTINATION ${lib_dir})
+endif()
diff --git a/src/compiler/catalog.h b/src/compiler/catalog.h
new file mode 100644
index 0000000..de2947f
--- /dev/null
+++ b/src/compiler/catalog.h
@@ -0,0 +1,217 @@
+#ifndef CATALOG_H
+#define CATALOG_H
+
+#include <stdlib.h>
+#include "symbols.h"
+
+/* Helper to build more intuitive schema data with fully qualified names. */
+
+
+typedef struct entry entry_t;
+typedef entry_t object_entry_t;
+typedef entry_t enum_entry_t;
+typedef entry_t service_entry_t;
+typedef struct scope_entry scope_entry_t;
+
+struct entry {
+ fb_compound_type_t *ct;
+ char *name;
+};
+
+struct scope_entry {
+ fb_scope_t *scope;
+ char *name;
+};
+
+typedef struct catalog catalog_t;
+
+struct catalog {
+ int qualify_names;
+ int nobjects;
+ int nenums;
+ int nservices;
+ size_t name_table_size;
+ object_entry_t *objects;
+ enum_entry_t *enums;
+ service_entry_t *services;
+ char *name_table;
+ object_entry_t *next_object;
+ enum_entry_t *next_enum;
+ service_entry_t *next_service;
+ char *next_name;
+ fb_schema_t *schema;
+};
+
+#include <stdio.h>
+
+static void count_symbol(void *context, fb_symbol_t *sym)
+{
+ catalog_t *catalog = context;
+ fb_ref_t *scope_name;
+ size_t n = 0;
+ fb_compound_type_t *ct;
+
+ if (!(ct = get_compound_if_visible(catalog->schema, sym))) {
+ return;
+ }
+
+ /*
+ * Find out how much space the name requires. We must store each
+ * name in full for sorting because comparing a variable number of
+ * parent scope names is otherwise tricky.
+ */
+ if (catalog->qualify_names) {
+ scope_name = ct->scope->name;
+ while (scope_name) {
+ /* + 1 for '.'. */
+ n += (size_t)scope_name->ident->len + 1;
+ scope_name = scope_name->link;
+ }
+ }
+ /* + 1 for '\0'. */
+ n += (size_t)sym->ident->len + 1;
+ catalog->name_table_size += n;
+
+ switch (sym->kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ ++catalog->nobjects;
+ break;
+ case fb_is_union:
+ case fb_is_enum:
+ ++catalog->nenums;
+ break;
+ case fb_is_rpc_service:
+ ++catalog->nservices;
+ break;
+ default: return;
+ }
+}
+
+static void install_symbol(void *context, fb_symbol_t *sym)
+{
+ catalog_t *catalog = context;
+ fb_ref_t *scope_name;
+ int n = 0;
+ char *s, *name;
+ fb_compound_type_t *ct;
+
+ if (!(ct = get_compound_if_visible(catalog->schema, sym))) {
+ return;
+ }
+
+ s = catalog->next_name;
+ name = s;
+ if (catalog->qualify_names) {
+ scope_name = ct->scope->name;
+ while (scope_name) {
+ n = (int)scope_name->ident->len;
+ memcpy(s, scope_name->ident->text, (size_t)n);
+ s += n;
+ *s++ = '.';
+ scope_name = scope_name->link;
+ }
+ }
+ n = (int)sym->ident->len;
+ memcpy(s, sym->ident->text, (size_t)n);
+ s += n;
+ *s++ = '\0';
+ catalog->next_name = s;
+
+ switch (sym->kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ catalog->next_object->ct = (fb_compound_type_t *)sym;
+ catalog->next_object->name = name;
+ catalog->next_object++;
+ break;
+ case fb_is_union:
+ case fb_is_enum:
+ catalog->next_enum->ct = (fb_compound_type_t *)sym;
+ catalog->next_enum->name = name;
+ catalog->next_enum++;
+ break;
+ case fb_is_rpc_service:
+ catalog->next_service->ct = (fb_compound_type_t *)sym;
+ catalog->next_service->name = name;
+ catalog->next_service++;
+ break;
+ default: break;
+ }
+}
+
+static void count_symbols(void *context, fb_scope_t *scope)
+{
+ fb_symbol_table_visit(&scope->symbol_index, count_symbol, context);
+}
+
+static void install_symbols(void *context, fb_scope_t *scope)
+{
+ fb_symbol_table_visit(&scope->symbol_index, install_symbol, context);
+}
+
+static int compare_entries(const void *x, const void *y)
+{
+ return strcmp(((const entry_t *)x)->name, ((const entry_t *)y)->name);
+}
+
+static void sort_entries(entry_t *entries, int count)
+{
+ int i;
+
+ qsort(entries, (size_t)count, sizeof(entries[0]), compare_entries);
+
+ for (i = 0; i < count; ++i) {
+ entries[i].ct->export_index = (size_t)i;
+ }
+}
+
+static void clear_catalog(catalog_t *catalog)
+{
+ if (catalog->objects) {
+ free(catalog->objects);
+ }
+ if (catalog->enums) {
+ free(catalog->enums);
+ }
+ if (catalog->services) {
+ free(catalog->services);
+ }
+ if (catalog->name_table) {
+ free(catalog->name_table);
+ }
+ memset(catalog, 0, sizeof(*catalog));
+}
+
+static int build_catalog(catalog_t *catalog, fb_schema_t *schema, int qualify_names, fb_scope_table_t *index)
+{
+ memset(catalog, 0, sizeof(*catalog));
+ catalog->qualify_names = qualify_names;
+ catalog->schema = schema;
+
+ /* Build support datastructures before export. */
+ fb_scope_table_visit(index, count_symbols, catalog);
+ catalog->objects = calloc((size_t)catalog->nobjects, sizeof(catalog->objects[0]));
+ catalog->enums = calloc((size_t)catalog->nenums, sizeof(catalog->enums[0]));
+ catalog->services = calloc((size_t)catalog->nservices, sizeof(catalog->services[0]));
+ catalog->name_table = malloc(catalog->name_table_size);
+ catalog->next_object = catalog->objects;
+ catalog->next_enum = catalog->enums;
+ catalog->next_service = catalog->services;
+ catalog->next_name = catalog->name_table;
+ if ((!catalog->objects && catalog->nobjects > 0) ||
+ (!catalog->enums && catalog->nenums > 0) ||
+ (!catalog->services && catalog->nservices > 0) ||
+ (!catalog->name_table && catalog->name_table_size > 0)) {
+ clear_catalog(catalog);
+ return -1;
+ }
+ fb_scope_table_visit(index, install_symbols, catalog);
+ /* Presort objects and enums because the sorted index is required in Type tables. */
+ sort_entries(catalog->objects, catalog->nobjects);
+ sort_entries(catalog->enums, catalog->nenums);
+ sort_entries(catalog->services, catalog->nservices);
+ return 0;
+}
+
+#endif /* CATALOG_H */
diff --git a/src/compiler/codegen.h b/src/compiler/codegen.h
new file mode 100644
index 0000000..2798767
--- /dev/null
+++ b/src/compiler/codegen.h
@@ -0,0 +1,46 @@
+#ifndef CODEGEN_H
+#define CODEGEN_H
+
+#include "symbols.h"
+#include "parser.h"
+
+typedef struct fb_output fb_output_t;
+
+struct fb_output {
+ /*
+ * Common namespace across files. May differ from namespace
+ * for consistent use of type names.
+ */
+ char nsc[FLATCC_NAMESPACE_MAX + 2];
+ char nscup[FLATCC_NAMESPACE_MAX + 2];
+
+ FILE *fp;
+ fb_schema_t *S;
+ fb_options_t *opts;
+ fb_scope_t *current_scope;
+ int indent;
+ int spacing;
+ int tmp_indent;
+};
+
+int __flatcc_fb_init_output_c(fb_output_t *out, fb_options_t *opts);
+#define fb_init_output_c __flatcc_fb_init_output_c
+void __flatcc_fb_end_output_c(fb_output_t *out);
+#define fb_end_output_c __flatcc_fb_end_output_c
+
+int __flatcc_fb_codegen_common_c(fb_output_t *out);
+#define fb_codegen_common_c __flatcc_fb_codegen_common_c
+
+int __flatcc_fb_codegen_c(fb_output_t *out, fb_schema_t *S);
+#define fb_codegen_c __flatcc_fb_codegen_c
+
+void *__flatcc_fb_codegen_bfbs_to_buffer(fb_options_t *opts, fb_schema_t *S, void *buffer, size_t *size);
+#define fb_codegen_bfbs_to_buffer __flatcc_fb_codegen_bfbs_to_buffer
+
+void *__flatcc_fb_codegen_bfbs_alloc_buffer(fb_options_t *opts, fb_schema_t *S, size_t *size);
+#define fb_codegen_bfbs_alloc_buffer __flatcc_fb_codegen_bfbs_alloc_buffer
+
+int __flatcc_fb_codegen_bfbs_to_file(fb_options_t *opts, fb_schema_t *S);
+#define fb_codegen_bfbs_to_file __flatcc_fb_codegen_bfbs_to_file
+
+#endif /* CODEGEN_H */
diff --git a/src/compiler/codegen_c.c b/src/compiler/codegen_c.c
new file mode 100644
index 0000000..5e5fe0e
--- /dev/null
+++ b/src/compiler/codegen_c.c
@@ -0,0 +1,285 @@
+#include "codegen_c.h"
+#include "fileio.h"
+#include "pstrutil.h"
+#include "../../external/hash/str_set.h"
+
+int fb_open_output_file(fb_output_t *out, const char *name, size_t len, const char *ext)
+{
+ char *path;
+ int ret;
+ const char *prefix = out->opts->outpath ? out->opts->outpath : "";
+ size_t prefix_len = strlen(prefix);
+
+ if (out->fp) {
+ return 0;
+ }
+ checkmem((path = fb_create_join_path_n(prefix, prefix_len, name, len, ext, 1)));
+ out->fp = fopen(path, "wb");
+ ret = 0;
+ if (!out->fp) {
+ fprintf(stderr, "error opening file for write: %s\n", path);
+ ret = -1;
+ }
+ free(path);
+ return ret;
+}
+
+void fb_close_output_file(fb_output_t *out)
+{
+ /* Concatenate covers either stdout or a file. */
+ if (!out->opts->gen_outfile && !out->opts->gen_stdout && out->fp) {
+ fclose(out->fp);
+ out->fp = 0;
+ }
+ /* Keep out->fp open for next file. */
+}
+
+void fb_end_output_c(fb_output_t *out)
+{
+ if (out->fp != stdout && out->fp) {
+ fclose(out->fp);
+ }
+ out->fp = 0;
+}
+
+/*
+ * If used with --stdout or concat=<file>, we assume there
+ * are no other language outputs at the same time.
+ */
+int fb_init_output_c(fb_output_t *out, fb_options_t *opts)
+{
+ const char *nsc;
+ char *path = 0;
+ size_t n;
+ const char *mode = opts->gen_append ? "ab" : "wb";
+ const char *prefix = opts->outpath ? opts->outpath : "";
+ int ret = -1;
+
+ memset(out, 0, sizeof(*out));
+ out->opts = opts;
+ nsc = opts->nsc;
+ if (nsc) {
+ n = strlen(opts->nsc);
+ if (n > FLATCC_NAMESPACE_MAX) {
+ fprintf(stderr, "common namespace argument is limited to %i characters\n", (int)FLATCC_NAMESPACE_MAX);
+ return -1;
+ }
+ } else {
+ nsc = FLATCC_DEFAULT_NAMESPACE_COMMON;
+ n = strlen(nsc);
+ }
+ strncpy(out->nsc, nsc, FLATCC_NAMESPACE_MAX);
+ out->nsc[FLATCC_NAMESPACE_MAX] = '\0';
+ if (n) {
+ out->nsc[n] = '_';
+ out->nsc[n + 1] = '\0';
+ }
+ pstrcpyupper(out->nscup, out->nsc);
+ out->nscup[n] = '\0'; /* No trailing _ */
+ out->spacing = opts->cgen_spacing;
+ if (opts->gen_stdout) {
+ out->fp = stdout;
+ return 0;
+ }
+ if (!out->opts->gen_outfile) {
+ /* Normal operation to multiple header filers. */
+ return 0;
+ }
+ checkmem((path = fb_create_join_path(prefix, out->opts->gen_outfile, "", 1)));
+ out->fp = fopen(path, mode);
+ if (!out->fp) {
+ fprintf(stderr, "error opening file for write: %s\n", path);
+ ret = -1;
+ goto done;
+ }
+ ret = 0;
+done:
+ if (path) {
+ free(path);
+ }
+ return ret;
+}
+
+static void _str_set_destructor(void *context, char *item)
+{
+ (void)context;
+
+ free(item);
+}
+
+/*
+ * Removal of duplicate inclusions is only for a cleaner output - it is
+ * not stricly necessary because the preprocessor handles include
+ * guards. The guards are required to deal with concatenated files
+ * regardless unless we generate special code for concatenation.
+ */
+void fb_gen_c_includes(fb_output_t *out, const char *ext, const char *extup)
+{
+ fb_include_t *inc = out->S->includes;
+ char *basename, *basenameup, *s;
+ str_set_t set;
+
+ fb_clear(set);
+
+ /* Don't include our own file. */
+ str_set_insert_item(&set, fb_copy_path(out->S->basenameup), ht_keep);
+ while (inc) {
+ checkmem((basename = fb_create_basename(
+ inc->name.s.s, (size_t)inc->name.s.len, out->opts->default_schema_ext)));
+ inc = inc->link;
+ checkmem((basenameup = fb_copy_path(basename)));
+ s = basenameup;
+ while (*s) {
+ *s = (char)toupper(*s);
+ ++s;
+ }
+ if (str_set_insert_item(&set, basenameup, ht_keep)) {
+ free(basenameup);
+ free(basename);
+ continue;
+ }
+ /* The include guard is needed when concatening output. */
+ fprintf(out->fp,
+ "#ifndef %s%s\n"
+ "#include \"%s%s\"\n"
+ "#endif\n",
+ basenameup, extup, basename, ext);
+ free(basename);
+ /* `basenameup` stored in str_set. */
+ }
+ str_set_destroy(&set, _str_set_destructor, 0);
+}
+
+int fb_copy_scope(fb_scope_t *scope, char *buf)
+{
+ size_t n, len;
+ fb_ref_t *name;
+
+ len = (size_t)scope->prefix.len;
+ for (name = scope->name; name; name = name->link) {
+ n = (size_t)name->ident->len;
+ len += n + 1;
+ }
+ if (len > FLATCC_NAMESPACE_MAX + 1) {
+ buf[0] = '\0';
+ return -1;
+ }
+ len = (size_t)scope->prefix.len;
+ memcpy(buf, scope->prefix.s, len);
+ for (name = scope->name; name; name = name->link) {
+ n = (size_t)name->ident->len;
+ memcpy(buf + len, name->ident->text, n);
+ len += n + 1;
+ buf[len - 1] = '_';
+ }
+ buf[len] = '\0';
+ return (int)len;
+}
+
+void fb_scoped_symbol_name(fb_scope_t *scope, fb_symbol_t *sym, fb_scoped_name_t *sn)
+{
+ fb_token_t *t = sym->ident;
+
+ if (sn->scope != scope) {
+ if (0 > (sn->scope_len = fb_copy_scope(scope, sn->text))) {
+ sn->scope_len = 0;
+ fprintf(stderr, "skipping too long namespace\n");
+ }
+ }
+ sn->len = (int)t->len;
+ sn->total_len = sn->scope_len + sn->len;
+ if (sn->total_len > FLATCC_NAME_BUFSIZ - 1) {
+ fprintf(stderr, "warning: truncating identifier: %.*s\n", sn->len, t->text);
+ sn->len = FLATCC_NAME_BUFSIZ - sn->scope_len - 1;
+ sn->total_len = sn->scope_len + sn->len;
+ }
+ memcpy(sn->text + sn->scope_len, t->text, (size_t)sn->len);
+ sn->text[sn->total_len] = '\0';
+}
+
+int fb_codegen_common_c(fb_output_t *out)
+{
+ size_t nsc_len;
+ int ret;
+
+ nsc_len = strlen(out->nsc) - 1;
+ ret = 0;
+ if (out->opts->cgen_common_reader) {
+ if (fb_open_output_file(out, out->nsc, nsc_len, "_common_reader.h")) {
+ return -1;
+ }
+ ret = fb_gen_common_c_header(out);
+ fb_close_output_file(out);
+ }
+ if (!ret && out->opts->cgen_common_builder) {
+ if (fb_open_output_file(out, out->nsc, nsc_len, "_common_builder.h")) {
+ return -1;
+ }
+ fb_gen_common_c_builder_header(out);
+ fb_close_output_file(out);
+ }
+ return ret;
+}
+
+int fb_codegen_c(fb_output_t *out, fb_schema_t *S)
+{
+ size_t basename_len;
+ /* OK if no files were processed. */
+ int ret = 0;
+
+ out->S = S;
+ out->current_scope = fb_scope_table_find(&S->root_schema->scope_index, 0, 0);
+ basename_len = strlen(out->S->basename);
+ if (out->opts->cgen_reader) {
+ if (fb_open_output_file(out, out->S->basename, basename_len, "_reader.h")) {
+ ret = -1;
+ goto done;
+ }
+ if ((ret = fb_gen_c_reader(out))) {
+ goto done;
+ }
+ fb_close_output_file(out);
+ }
+ if (out->opts->cgen_builder) {
+ if (fb_open_output_file(out, out->S->basename, basename_len, "_builder.h")) {
+ ret = -1;
+ goto done;
+ }
+ if ((ret = fb_gen_c_builder(out))) {
+ goto done;
+ }
+ fb_close_output_file(out);
+ }
+ if (out->opts->cgen_verifier) {
+ if (fb_open_output_file(out, out->S->basename, basename_len, "_verifier.h")) {
+ ret = -1;
+ goto done;
+ }
+ if ((ret = fb_gen_c_verifier(out))) {
+ goto done;
+ }
+ fb_close_output_file(out);
+ }
+ if (out->opts->cgen_json_parser) {
+ if (fb_open_output_file(out, out->S->basename, basename_len, "_json_parser.h")) {
+ ret = -1;
+ goto done;
+ }
+ if ((ret = fb_gen_c_json_parser(out))) {
+ goto done;
+ }
+ fb_close_output_file(out);
+ }
+ if (out->opts->cgen_json_printer) {
+ if (fb_open_output_file(out, out->S->basename, basename_len, "_json_printer.h")) {
+ ret = -1;
+ goto done;
+ }
+ if ((ret = fb_gen_c_json_printer(out))) {
+ goto done;
+ }
+ fb_close_output_file(out);
+ }
+done:
+ return ret;
+}
diff --git a/src/compiler/codegen_c.h b/src/compiler/codegen_c.h
new file mode 100644
index 0000000..6eba54a
--- /dev/null
+++ b/src/compiler/codegen_c.h
@@ -0,0 +1,397 @@
+#ifndef CODEGEN_C_H
+#define CODEGEN_C_H
+
+#include <assert.h>
+#include <stdarg.h>
+
+#include "symbols.h"
+#include "parser.h"
+#include "codegen.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+#define __FLATCC_ERROR_TYPE "INTERNAL_ERROR_UNEXPECTED_TYPE"
+
+#ifndef gen_panic
+#define gen_panic(context, msg) fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, msg), assert(0), exit(-1)
+#endif
+
+
+static inline void token_name(fb_token_t *t, int *n, const char **s) {
+ *n = (int)t->len;
+ *s = t->text;
+}
+
+typedef char fb_symbol_text_t[FLATCC_NAME_BUFSIZ];
+typedef struct fb_scoped_name fb_scoped_name_t;
+
+/* Should be zeroed because scope is cached across updates. */
+struct fb_scoped_name {
+ fb_symbol_text_t text;
+ fb_scope_t *scope;
+ int scope_len, len, total_len;
+};
+
+#define fb_clear(x) (memset(&(x), 0, sizeof(x)))
+
+/* Returns length or -1 if length exceeds namespace max. */
+int __flatcc_fb_copy_scope(fb_scope_t *scope, char *buf);
+#define fb_copy_scope __flatcc_fb_copy_scope
+
+void __flatcc_fb_scoped_symbol_name(fb_scope_t *scope, fb_symbol_t *sym, fb_scoped_name_t *sn);
+#define fb_scoped_symbol_name __flatcc_fb_scoped_symbol_name
+
+static inline void fb_compound_name(fb_compound_type_t *ct, fb_scoped_name_t *sn)
+{
+ fb_scoped_symbol_name(ct->scope, &ct->symbol, sn);
+}
+
+static inline void symbol_name(fb_symbol_t *sym, int *n, const char **s) {
+ token_name(sym->ident, n, s);
+}
+
+static inline const char *scalar_type_ns(fb_scalar_type_t scalar_type, const char *ns)
+{
+ return scalar_type == fb_bool ? ns : "";
+}
+
+static inline const char *scalar_type_prefix(fb_scalar_type_t scalar_type)
+{
+ const char *tname;
+ switch (scalar_type) {
+ case fb_ulong:
+ tname = "uint64";
+ break;
+ case fb_uint:
+ tname = "uint32";
+ break;
+ case fb_ushort:
+ tname = "uint16";
+ break;
+ case fb_char:
+ tname = "char";
+ break;
+ case fb_ubyte:
+ tname = "uint8";
+ break;
+ case fb_bool:
+ tname = "bool";
+ break;
+ case fb_long:
+ tname = "int64";
+ break;
+ case fb_int:
+ tname = "int32";
+ break;
+ case fb_short:
+ tname = "int16";
+ break;
+ case fb_byte:
+ tname = "int8";
+ break;
+ case fb_float:
+ tname = "float";
+ break;
+ case fb_double:
+ tname = "double";
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ tname = __FLATCC_ERROR_TYPE;
+ break;
+ }
+ return tname;
+}
+
+static inline const char *scalar_type_name(fb_scalar_type_t scalar_type)
+{
+ const char *tname;
+ switch (scalar_type) {
+ case fb_ulong:
+ tname = "uint64_t";
+ break;
+ case fb_uint:
+ tname = "uint32_t";
+ break;
+ case fb_ushort:
+ tname = "uint16_t";
+ break;
+ case fb_char:
+ tname = "char";
+ break;
+ case fb_ubyte:
+ tname = "uint8_t";
+ break;
+ case fb_bool:
+ tname = "bool_t";
+ break;
+ case fb_long:
+ tname = "int64_t";
+ break;
+ case fb_int:
+ tname = "int32_t";
+ break;
+ case fb_short:
+ tname = "int16_t";
+ break;
+ case fb_byte:
+ tname = "int8_t";
+ break;
+ case fb_float:
+ tname = "float";
+ break;
+ case fb_double:
+ tname = "double";
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ tname = __FLATCC_ERROR_TYPE;
+ break;
+ }
+ return tname;
+}
+
+static inline const char *scalar_vector_type_name(fb_scalar_type_t scalar_type)
+{
+ const char *tname;
+ switch (scalar_type) {
+ case fb_ulong:
+ tname = "uint64_vec_t";
+ break;
+ case fb_uint:
+ tname = "uint32_vec_t";
+ break;
+ case fb_ushort:
+ tname = "uint16_vec_t";
+ break;
+ case fb_char:
+ tname = "char_vec_t";
+ break;
+ case fb_ubyte:
+ tname = "uint8_vec_t";
+ break;
+ case fb_bool:
+ tname = "uint8_vec_t";
+ break;
+ case fb_long:
+ tname = "int64_vec_t";
+ break;
+ case fb_int:
+ tname = "int32_vec_t";
+ break;
+ case fb_short:
+ tname = "int16_vec_t";
+ break;
+ case fb_byte:
+ tname = "int8_vec_t";
+ break;
+ case fb_float:
+ tname = "float_vec_t";
+ break;
+ case fb_double:
+ tname = "double_vec_t";
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ tname = __FLATCC_ERROR_TYPE;
+ break;
+ }
+ return tname;
+}
+
+/* Only for integers. */
+static inline const char *scalar_cast(fb_scalar_type_t scalar_type)
+{
+ const char *cast;
+ switch (scalar_type) {
+ case fb_ulong:
+ cast = "UINT64_C";
+ break;
+ case fb_uint:
+ cast = "UINT32_C";
+ break;
+ case fb_ushort:
+ cast = "UINT16_C";
+ break;
+ case fb_char:
+ cast = "char";
+ break;
+ case fb_ubyte:
+ cast = "UINT8_C";
+ break;
+ case fb_bool:
+ cast = "UINT8_C";
+ break;
+ case fb_long:
+ cast = "INT64_C";
+ break;
+ case fb_int:
+ cast = "INT32_C";
+ break;
+ case fb_short:
+ cast = "INT16_C";
+ break;
+ case fb_byte:
+ cast = "INT8_C";
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ cast = "";
+ break;
+ }
+ return cast;
+}
+
+typedef char fb_literal_t[100];
+
+static inline size_t print_literal(fb_scalar_type_t scalar_type, const fb_value_t *value, fb_literal_t literal)
+{
+ const char *cast;
+
+ switch (value->type) {
+ case vt_uint:
+ cast = scalar_cast(scalar_type);
+ return (size_t)sprintf(literal, "%s(%"PRIu64")", cast, (uint64_t)value->u);
+ break;
+ case vt_int:
+ cast = scalar_cast(scalar_type);
+ return (size_t)sprintf(literal, "%s(%"PRId64")", cast, (int64_t)value->i);
+ break;
+ case vt_bool:
+ cast = scalar_cast(scalar_type);
+ return (size_t)sprintf(literal, "%s(%u)", cast, (unsigned)value->b);
+ break;
+ case vt_float:
+ /*
+ * .9g ensures sufficient precision in 32-bit floats and
+ * .17g ensures sufficient precision for 64-bit floats (double).
+ * The '#' forces a decimal point that would not be printed
+ * for integers which would result in the wrong type in C
+ * source.
+ */
+ if (scalar_type == fb_float) {
+ return (size_t)sprintf(literal, "%#.9gf", (float)value->f);
+ } else {
+ return (size_t)sprintf(literal, "%#.17g", (double)value->f);
+ }
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ *literal = 0;
+ return 0;
+ }
+}
+
+static inline const char *scalar_suffix(fb_scalar_type_t scalar_type)
+{
+ const char *suffix;
+ switch (scalar_type) {
+ case fb_ulong:
+ suffix = "ULL";
+ break;
+ case fb_uint:
+ suffix = "UL";
+ break;
+ case fb_ushort:
+ suffix = "U";
+ break;
+ case fb_char:
+ suffix = "";
+ break;
+ case fb_ubyte:
+ suffix = "U";
+ break;
+ case fb_bool:
+ suffix = "U";
+ break;
+ case fb_long:
+ suffix = "LL";
+ break;
+ case fb_int:
+ suffix = "L";
+ break;
+ case fb_short:
+ suffix = "";
+ break;
+ case fb_byte:
+ suffix = "";
+ break;
+ case fb_double:
+ suffix = "";
+ break;
+ case fb_float:
+ suffix = "F";
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ suffix = "";
+ break;
+ }
+ return suffix;
+}
+
+/* See also: https://github.com/philsquared/Catch/issues/376 */
+static inline int gen_prologue(fb_output_t *out)
+{
+ if (out->opts->cgen_pragmas) {
+ fprintf(out->fp, "#include \"flatcc/flatcc_prologue.h\"\n");
+ }
+ return 0;
+}
+
+static inline int gen_epilogue(fb_output_t *out)
+{
+ if (out->opts->cgen_pragmas) {
+ fprintf(out->fp, "#include \"flatcc/flatcc_epilogue.h\"\n");
+ }
+ return 0;
+}
+
+/* This assumes the output context is named out which it is by convention. */
+#define indent() (out->indent++)
+#define unindent() { assert(out->indent); out->indent--; }
+#define margin() { out->tmp_indent = out->indent; out->indent = 0; }
+#define unmargin() { out->indent = out->tmp_indent; }
+
+/* Redefine names to avoid polluting library namespace. */
+
+int __flatcc_fb_init_output_c(fb_output_t *out, fb_options_t *opts);
+#define fb_init_output_c __flatcc_fb_init_output_c
+
+int __flatcc_fb_open_output_file(fb_output_t *out, const char *name, size_t len, const char *ext);
+#define fb_open_output_file __flatcc_fb_open_output_file
+
+void __flatcc_fb_close_output_file(fb_output_t *out);
+#define fb_close_output_file __flatcc_fb_close_output_file
+
+void __flatcc_fb_gen_c_includes(fb_output_t *out, const char *ext, const char *extup);
+#define fb_gen_c_includes __flatcc_fb_gen_c_includes
+
+int __flatcc_fb_gen_common_c_header(fb_output_t *out);
+#define fb_gen_common_c_header __flatcc_fb_gen_common_c_header
+
+int __flatcc_fb_gen_common_c_builder_header(fb_output_t *out);
+#define fb_gen_common_c_builder_header __flatcc_fb_gen_common_c_builder_header
+
+int __flatcc_fb_gen_c_reader(fb_output_t *out);
+#define fb_gen_c_reader __flatcc_fb_gen_c_reader
+
+int __flatcc_fb_gen_c_builder(fb_output_t *out);
+#define fb_gen_c_builder __flatcc_fb_gen_c_builder
+
+int __flatcc_fb_gen_c_verifier(fb_output_t *out);
+#define fb_gen_c_verifier __flatcc_fb_gen_c_verifier
+
+int __flatcc_fb_gen_c_sorter(fb_output_t *out);
+#define fb_gen_c_sorter __flatcc_fb_gen_c_sorter
+
+int __flatcc_fb_gen_c_json_parser(fb_output_t *out);
+#define fb_gen_c_json_parser __flatcc_fb_gen_c_json_parser
+
+int __flatcc_fb_gen_c_json_printer(fb_output_t *out);
+#define fb_gen_c_json_printer __flatcc_fb_gen_c_json_printer
+
+#endif /* CODEGEN_C_H */
diff --git a/src/compiler/codegen_c_builder.c b/src/compiler/codegen_c_builder.c
new file mode 100644
index 0000000..ffa105d
--- /dev/null
+++ b/src/compiler/codegen_c_builder.c
@@ -0,0 +1,2159 @@
+#include <string.h>
+
+#include "codegen_c.h"
+
+int fb_gen_common_c_builder_header(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+ const char *nscup = out->nscup;
+
+ fprintf(out->fp, "#ifndef %s_COMMON_BUILDER_H\n", nscup);
+ fprintf(out->fp, "#define %s_COMMON_BUILDER_H\n", nscup);
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ fprintf(out->fp, "/* Common FlatBuffers build functionality for C. */\n\n");
+ gen_prologue(out);
+
+ fprintf(out->fp, "#ifndef FLATBUILDER_H\n");
+ fprintf(out->fp, "#include \"flatcc/flatcc_builder.h\"\n");
+ fprintf(out->fp, "#endif\n");
+ if (strcmp(nsc, "flatcc_builder_")) {
+ fprintf(out->fp, "typedef flatcc_builder_t %sbuilder_t;\n", nsc);
+ fprintf(out->fp, "typedef flatcc_builder_ref_t %sref_t;\n", nsc);
+ fprintf(out->fp, "typedef flatcc_builder_ref_t %svec_ref_t;\n", nsc);
+ fprintf(out->fp, "typedef flatcc_builder_union_ref_t %sunion_ref_t;\n", nsc);
+ fprintf(out->fp, "typedef flatcc_builder_union_vec_ref_t %sunion_vec_ref_t;\n", nsc);
+ fprintf(out->fp, "/* integer return code (ref and ptr always fail on 0) */\n"
+ "#define %sfailed(x) ((x) < 0)\n", nsc);
+ }
+ fprintf(out->fp, "typedef %sref_t %sroot_t;\n", nsc, nsc);
+ fprintf(out->fp, "#define %sroot(ref) ((%sroot_t)(ref))\n", nsc, nsc);
+ if (strcmp(nsc, "flatbuffers_")) {
+ fprintf(out->fp, "#define %sis_native_pe flatbuffers_is_native_pe\n", nsc);
+ fprintf(out->fp, "typedef flatbuffers_fid_t %sfid_t;\n", nsc);
+ }
+ fprintf(out->fp, "\n");
+
+ fprintf(out->fp,
+ "#define __%smemoize_begin(B, src)\\\n"
+ "do { flatcc_builder_ref_t _ref; if ((_ref = flatcc_builder_refmap_find((B), (src)))) return _ref; } while (0)\n"
+ "#define __%smemoize_end(B, src, op) do { return flatcc_builder_refmap_insert((B), (src), (op)); } while (0)\n"
+ "#define __%smemoize(B, src, op) do { __%smemoize_begin(B, src); __%smemoize_end(B, src, op); } while (0)\n"
+ "\n",
+ nsc, nsc, nsc, nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_buffer(NS)\\\n"
+ "typedef NS ## ref_t NS ## buffer_ref_t;\\\n"
+ "static inline int NS ## buffer_start(NS ## builder_t *B, const NS ##fid_t fid)\\\n"
+ "{ return flatcc_builder_start_buffer(B, fid, 0, 0); }\\\n"
+ "static inline int NS ## buffer_start_with_size(NS ## builder_t *B, const NS ##fid_t fid)\\\n"
+ "{ return flatcc_builder_start_buffer(B, fid, 0, flatcc_builder_with_size); }\\\n"
+ "static inline int NS ## buffer_start_aligned(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\\\n"
+ "{ return flatcc_builder_start_buffer(B, fid, block_align, 0); }\\\n"
+ "static inline int NS ## buffer_start_aligned_with_size(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\\\n"
+ "{ return flatcc_builder_start_buffer(B, fid, block_align, flatcc_builder_with_size); }\\\n"
+ "static inline NS ## buffer_ref_t NS ## buffer_end(NS ## builder_t *B, NS ## ref_t root)\\\n"
+ "{ return flatcc_builder_end_buffer(B, root); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_table_root(NS, N, FID, TFID)\\\n"
+ "static inline int N ## _start_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, FID) ? -1 : N ## _start(B); }\\\n"
+ "static inline int N ## _start_as_root_with_size(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start_with_size(B, FID) ? -1 : N ## _start(B); }\\\n"
+ "static inline int N ## _start_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, TFID) ? -1 : N ## _start(B); }\\\n"
+ "static inline int N ## _start_as_typed_root_with_size(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start_with_size(B, TFID) ? -1 : N ## _start(B); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end(B)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end(B)); }\\\n"
+ /*
+ * Unlike structs, we do no use flatcc_builder_create_buffer
+ * because we would have to manage alignment, and we save very
+ * little because tables require stack allocations in any case.
+ */
+ "static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ if (NS ## buffer_start(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _table_t t)\\\n"
+ "{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _table_t t)\\\n"
+ "{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _table_t t)\\\n"
+ "{ if (NS ## buffer_start(B, TFID)) return 0;return NS ## buffer_end(B, N ## _clone(B, t)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _table_t t)\\\n"
+ "{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_table_prolog(NS, N, FID, TFID)\\\n"
+ "__%sbuild_table_vector_ops(NS, N ## _vec, N)\\\n"
+ "__%sbuild_table_root(NS, N, FID, TFID)\n"
+ "\n",
+ nsc, nsc, nsc);
+
+
+ fprintf(out->fp,
+ "#define __%sbuild_struct_root(NS, N, A, FID, TFID)\\\n"
+ "static inline N ## _t *N ## _start_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, FID) ? 0 : N ## _start(B); }\\\n"
+ "static inline N ## _t *N ## _start_as_root_with_size(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start_with_size(B, FID) ? 0 : N ## _start(B); }\\\n"
+ "static inline N ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, TFID) ? 0 : N ## _start(B); }\\\n"
+ "static inline N ## _t *N ## _start_as_typed_root_with_size(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start_with_size(B, TFID) ? 0 : N ## _start(B); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end(B)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end(B)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_pe_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end_pe(B)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_pe_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end_pe(B)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ return flatcc_builder_create_buffer(B, FID, 0,\\\n"
+ " N ## _create(B __ ## N ## _call_args), A, 0); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ return flatcc_builder_create_buffer(B, FID, 0,\\\n"
+ " N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ return flatcc_builder_create_buffer(B, TFID, 0,\\\n"
+ " N ## _create(B __ ## N ## _call_args), A, 0); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ return flatcc_builder_create_buffer(B, TFID, 0,\\\n"
+ " N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _struct_t p)\\\n"
+ "{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, 0); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _struct_t p)\\\n"
+ "{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _struct_t p)\\\n"
+ "{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, 0); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _struct_t p)\\\n"
+ "{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_nested_table_root(NS, N, TN, FID, TFID)\\\n"
+ "static inline int N ## _start_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, FID) ? -1 : TN ## _start(B); }\\\n"
+ "static inline int N ## _start_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, TFID) ? -1 : TN ## _start(B); }\\\n"
+ "static inline int N ## _end_as_root(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\\\n"
+ "static inline int N ## _end_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\\\n"
+ "static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\\\n"
+ " align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\\\n"
+ "static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\\\n"
+ " align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\\\n"
+ "static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _table_t t)\\\n"
+ "{ return N ## _add(B, TN ## _clone_as_root(B, t)); }\\\n"
+ "static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _table_t t)\\\n"
+ "{ return N ## _add(B, TN ## _clone_as_typed_root(B, t)); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_nested_struct_root(NS, N, TN, A, FID, TFID)\\\n"
+ "static inline TN ## _t *N ## _start_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\\\n"
+ "static inline TN ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\\\n"
+ "static inline int N ## _end_as_root(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\\\n"
+ "static inline int N ## _end_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\\\n"
+ "static inline int N ## _end_pe_as_root(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, NS ## buffer_end(B, TN ## _end_pe(B))); }\\\n"
+ "static inline int N ## _create_as_root(NS ## builder_t *B __ ## TN ## _formal_args)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_buffer(B, FID, 0,\\\n"
+ " TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\\\n"
+ "static inline int N ## _create_as_typed_root(NS ## builder_t *B __ ## TN ## _formal_args)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_buffer(B, TFID, 0,\\\n"
+ " TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\\\n"
+ "static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\\\n"
+ " align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\\\n"
+ "static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\\\n"
+ " align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\\\n"
+ "static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _struct_t p)\\\n"
+ "{ return N ## _add(B, TN ## _clone_as_root(B, p)); }\\\n"
+ "static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _struct_t p)\\\n"
+ "{ return N ## _add(B, TN ## _clone_as_typed_root(B, p)); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_vector_ops(NS, V, N, TN, T)\\\n"
+ "static inline T *V ## _extend(NS ## builder_t *B, size_t len)\\\n"
+ "{ return (T *)flatcc_builder_extend_vector(B, len); }\\\n"
+ "static inline T *V ## _append(NS ## builder_t *B, const T *data, size_t len)\\\n"
+ "{ return (T *)flatcc_builder_append_vector(B, data, len); }\\\n"
+ "static inline int V ## _truncate(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_truncate_vector(B, len); }\\\n"
+ "static inline T *V ## _edit(NS ## builder_t *B)\\\n"
+ "{ return (T *)flatcc_builder_vector_edit(B); }\\\n"
+ "static inline size_t V ## _reserved_len(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_vector_count(B); }\\\n"
+ "static inline T *V ## _push(NS ## builder_t *B, const T *p)\\\n"
+ "{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? (memcpy(_p, p, TN ## __size()), _p) : 0; }\\\n"
+ "static inline T *V ## _push_copy(NS ## builder_t *B, const T *p)\\\n"
+ "{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\\\n"
+ /* push_clone is the same as a for push_copy for scalar and struct vectors
+ * but copy has different semantics as a standalone operation so we can't use
+ * clone to implement push_clone - it would create a reference to a struct. */
+ "static inline T *V ## _push_clone(NS ## builder_t *B, const T *p)\\\n"
+ "{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\\\n"
+ "static inline T *V ## _push_create(NS ## builder_t *B __ ## TN ## _formal_args)\\\n"
+ "{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _assign(_p __ ## TN ## _call_args) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ /* NS: common namespace, N: typename, T: element type, S: elem size, A: alignment */
+ "#define __%sbuild_vector(NS, N, T, S, A)\\\n"
+ "typedef NS ## ref_t N ## _vec_ref_t;\\\n"
+ "static inline int N ## _vec_start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_end_pe(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_end_vector(B); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\\\n"
+ "{ if (!NS ## is_native_pe()) { size_t i, n; T *p = (T *)flatcc_builder_vector_edit(B);\\\n"
+ " for (i = 0, n = flatcc_builder_vector_count(B); i < n; ++i)\\\n"
+ " { N ## _to_pe(N ## __ptr_add(p, i)); }} return flatcc_builder_end_vector(B); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_create_pe(NS ## builder_t *B, const T *data, size_t len)\\\n"
+ "{ return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const T *data, size_t len)\\\n"
+ "{ if (!NS ## is_native_pe()) { size_t i; T *p; int ret = flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); if (ret) { return ret; }\\\n"
+ " p = (T *)flatcc_builder_extend_vector(B, len); if (!p) return 0;\\\n"
+ " for (i = 0; i < len; ++i) { N ## _copy_to_pe(N ## __ptr_add(p, i), N ## __const_ptr_add(data, i)); }\\\n"
+ " return flatcc_builder_end_vector(B); } else return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\\\n"
+ "{ __%smemoize(B, vec, flatcc_builder_create_vector(B, vec, N ## _vec_len(vec), S, A, FLATBUFFERS_COUNT_MAX(S))); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_slice(NS ## builder_t *B, N ##_vec_t vec, size_t index, size_t len)\\\n"
+ "{ size_t n = N ## _vec_len(vec); if (index >= n) index = n; n -= index; if (len > n) len = n;\\\n"
+ " return flatcc_builder_create_vector(B, N ## __const_ptr_add(vec, index), len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\\\n"
+ "__%sbuild_vector_ops(NS, N ## _vec, N, N, T)\n"
+ "\n",
+ nsc, nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_vector_ops(NS, V, N, TN)\\\n"
+ "static inline TN ## _union_ref_t *V ## _extend(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_extend_union_vector(B, len); }\\\n"
+ "static inline TN ## _union_ref_t *V ## _append(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\\\n"
+ "{ return flatcc_builder_append_union_vector(B, data, len); }\\\n"
+ "static inline int V ## _truncate(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_truncate_union_vector(B, len); }\\\n"
+ "static inline TN ## _union_ref_t *V ## _edit(NS ## builder_t *B)\\\n"
+ "{ return (TN ## _union_ref_t *) flatcc_builder_union_vector_edit(B); }\\\n"
+ "static inline size_t V ## _reserved_len(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_union_vector_count(B); }\\\n"
+ "static inline TN ## _union_ref_t *V ## _push(NS ## builder_t *B, const TN ## _union_ref_t ref)\\\n"
+ "{ return flatcc_builder_union_vector_push(B, ref); }\\\n"
+ "static inline TN ## _union_ref_t *V ## _push_clone(NS ## builder_t *B, TN ## _union_t u)\\\n"
+ "{ return TN ## _vec_push(B, TN ## _clone(B, u)); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_vector(NS, N)\\\n"
+ "static inline int N ## _vec_start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_union_vector(B); }\\\n"
+ "static inline N ## _union_vec_ref_t N ## _vec_end(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_end_union_vector(B); }\\\n"
+ "static inline N ## _union_vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _union_ref_t *data, size_t len)\\\n"
+ "{ return flatcc_builder_create_union_vector(B, data, len); }\\\n"
+ "__%sbuild_union_vector_ops(NS, N ## _vec, N, N)\\\n"
+ "/* Preserves DAG structure separately for type and value vector, so a type vector could be shared for many value vectors. */\\\n"
+ "static inline N ## _union_vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_union_vec_t vec)\\\n"
+ "{ N ## _union_vec_ref_t _uvref, _ret = { 0, 0 }; NS ## union_ref_t _uref; size_t _i, _len;\\\n"
+ " if (vec.type == 0) return _ret;\\\n"
+ " _uvref.type = flatcc_builder_refmap_find(B, vec.type); _uvref.value = flatcc_builder_refmap_find(B, vec.value);\\\n"
+ " _len = N ## _union_vec_len(vec); if (_uvref.type == 0) {\\\n"
+ " _uvref.type = flatcc_builder_refmap_insert(B, vec.type, (flatcc_builder_create_type_vector(B, vec.type, _len))); }\\\n"
+ " if (_uvref.type == 0) return _ret; if (_uvref.value == 0) {\\\n"
+ " if (flatcc_builder_start_offset_vector(B)) return _ret;\\\n"
+ " for (_i = 0; _i < _len; ++_i) { _uref = N ## _clone(B, N ## _union_vec_at(vec, _i));\\\n"
+ " if (!_uref.value || !(flatcc_builder_offset_vector_push(B, _uref.value))) return _ret; }\\\n"
+ " _uvref.value = flatcc_builder_refmap_insert(B, vec.value, flatcc_builder_end_offset_vector(B));\\\n"
+ " if (_uvref.value == 0) return _ret; } return _uvref; }\n"
+ "\n",
+ nsc, nsc);
+
+ /* In addtion to offset_vector_ops... */
+ fprintf(out->fp,
+ "#define __%sbuild_string_vector_ops(NS, N)\\\n"
+ "static inline int N ## _push_start(NS ## builder_t *B)\\\n"
+ "{ return NS ## string_start(B); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_end(NS ## builder_t *B)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_end(B)); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_create(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_create(B, s, len)); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_create_str(NS ## builder_t *B, const char *s)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_create_str(B, s)); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_create_strn(B, s, max_len)); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_clone(NS ## builder_t *B, NS ## string_t string)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_clone(B, string)); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_slice(B, string, index, len)); }\n"
+ "\n",
+ nsc);
+
+ /* In addtion to offset_vector_ops... */
+ fprintf(out->fp,
+ "#define __%sbuild_table_vector_ops(NS, N, TN)\\\n"
+ "static inline int N ## _push_start(NS ## builder_t *B)\\\n"
+ "{ return TN ## _start(B); }\\\n"
+ "static inline TN ## _ref_t *N ## _push_end(NS ## builder_t *B)\\\n"
+ "{ return N ## _push(B, TN ## _end(B)); }\\\n"
+ "static inline TN ## _ref_t *N ## _push_create(NS ## builder_t *B __ ## TN ##_formal_args)\\\n"
+ "{ return N ## _push(B, TN ## _create(B __ ## TN ## _call_args)); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_offset_vector_ops(NS, V, N, TN)\\\n"
+ "static inline TN ## _ref_t *V ## _extend(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_extend_offset_vector(B, len); }\\\n"
+ "static inline TN ## _ref_t *V ## _append(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\\\n"
+ "{ return flatcc_builder_append_offset_vector(B, data, len); }\\\n"
+ "static inline int V ## _truncate(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_truncate_offset_vector(B, len); }\\\n"
+ "static inline TN ## _ref_t *V ## _edit(NS ## builder_t *B)\\\n"
+ "{ return (TN ## _ref_t *)flatcc_builder_offset_vector_edit(B); }\\\n"
+ "static inline size_t V ## _reserved_len(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_offset_vector_count(B); }\\\n"
+ "static inline TN ## _ref_t *V ## _push(NS ## builder_t *B, const TN ## _ref_t ref)\\\n"
+ "{ return ref ? flatcc_builder_offset_vector_push(B, ref) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_offset_vector(NS, N)\\\n"
+ "typedef NS ## ref_t N ## _vec_ref_t;\\\n"
+ "static inline int N ## _vec_start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_offset_vector(B); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_end_offset_vector(B); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _ref_t *data, size_t len)\\\n"
+ "{ return flatcc_builder_create_offset_vector(B, data, len); }\\\n"
+ "__%sbuild_offset_vector_ops(NS, N ## _vec, N, N)\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\\\n"
+ "{ int _ret; N ## _ref_t _e; size_t _i, _len; __%smemoize_begin(B, vec);\\\n"
+ " _len = N ## _vec_len(vec); if (flatcc_builder_start_offset_vector(B)) return 0;\\\n"
+ " for (_i = 0; _i < _len; ++_i) { if (!(_e = N ## _clone(B, N ## _vec_at(vec, _i)))) return 0;\\\n"
+ " if (!flatcc_builder_offset_vector_push(B, _e)) return 0; }\\\n"
+ " __%smemoize_end(B, vec, flatcc_builder_end_offset_vector(B)); }\\\n"
+ "\n",
+ nsc, nsc, nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_string_ops(NS, N)\\\n"
+ "static inline char *N ## _append(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return flatcc_builder_append_string(B, s, len); }\\\n"
+ "static inline char *N ## _append_str(NS ## builder_t *B, const char *s)\\\n"
+ "{ return flatcc_builder_append_string_str(B, s); }\\\n"
+ "static inline char *N ## _append_strn(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return flatcc_builder_append_string_strn(B, s, len); }\\\n"
+ "static inline size_t N ## _reserved_len(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_string_len(B); }\\\n"
+ "static inline char *N ## _extend(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_extend_string(B, len); }\\\n"
+ "static inline char *N ## _edit(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_string_edit(B); }\\\n"
+ "static inline int N ## _truncate(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_truncate_string(B, len); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_string(NS)\\\n"
+ "typedef NS ## ref_t NS ## string_ref_t;\\\n"
+ "static inline int NS ## string_start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_string(B); }\\\n"
+ "static inline NS ## string_ref_t NS ## string_end(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_end_string(B); }\\\n"
+ "static inline NS ## ref_t NS ## string_create(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return flatcc_builder_create_string(B, s, len); }\\\n"
+ "static inline NS ## ref_t NS ## string_create_str(NS ## builder_t *B, const char *s)\\\n"
+ "{ return flatcc_builder_create_string_str(B, s); }\\\n"
+ "static inline NS ## ref_t NS ## string_create_strn(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return flatcc_builder_create_string_strn(B, s, len); }\\\n"
+ "static inline NS ## string_ref_t NS ## string_clone(NS ## builder_t *B, NS ## string_t string)\\\n"
+ "{ __%smemoize(B, string, flatcc_builder_create_string(B, string, NS ## string_len(string))); }\\\n"
+ "static inline NS ## string_ref_t NS ## string_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\\\n"
+ "{ size_t n = NS ## string_len(string); if (index >= n) index = n; n -= index; if (len > n) len = n;\\\n"
+ " return flatcc_builder_create_string(B, string + index, len); }\\\n"
+ "__%sbuild_string_ops(NS, NS ## string)\\\n"
+ "__%sbuild_offset_vector(NS, NS ## string)\n"
+ "\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%scopy_from_pe(P, P2, N) (*(P) = N ## _read_from_pe(P2), (P))\n"
+ "#define __%sfrom_pe(P, N) (*(P) = N ## _read_from_pe(P), (P))\n"
+ "#define __%scopy_to_pe(P, P2, N) (N ## _write_to_pe((P), *(P2)), (P))\n"
+ "#define __%sto_pe(P, N) (N ## _write_to_pe((P), *(P)), (P))\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_fixed_array_primitives(NS, N, T)\\\n"
+ "static inline T *N ## _array_copy(T *p, const T *p2, size_t n)\\\n"
+ "{ memcpy(p, p2, n * sizeof(T)); return p; }\\\n"
+ "static inline T *N ## _array_copy_from_pe(T *p, const T *p2, size_t n)\\\n"
+ "{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\\\n"
+ " for (i = 0; i < n; ++i) N ## _copy_from_pe(&p[i], &p2[i]); return p; }\\\n"
+ "static inline T *N ## _array_copy_to_pe(T *p, const T *p2, size_t n)\\\n"
+ "{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\\\n"
+ " for (i = 0; i < n; ++i) N ## _copy_to_pe(&p[i], &p2[i]); return p; }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_primitives(NS, N, T)\\\n"
+ "static inline T *N ## _from_pe(T *p) { return __ ## NS ## from_pe(p, N); }\\\n"
+ "static inline T *N ## _to_pe(T *p) { return __ ## NS ## to_pe(p, N); }\\\n"
+ "static inline T *N ## _copy(T *p, const T *p2) { *p = *p2; return p; }\\\n"
+ "static inline T *N ## _copy_from_pe(T *p, const T *p2)\\\n"
+ "{ return __ ## NS ## copy_from_pe(p, p2, N); }\\\n"
+ "static inline T *N ## _copy_to_pe(T *p, const T *p2) \\\n"
+ "{ return __ ## NS ## copy_to_pe(p, p2, N); }\\\n"
+ "static inline T *N ## _assign(T *p, const T v0) { *p = v0; return p; }\\\n"
+ "static inline T *N ## _assign_from_pe(T *p, T v0)\\\n"
+ "{ *p = N ## _read_from_pe(&v0); return p; }\\\n"
+ "static inline T *N ## _assign_to_pe(T *p, T v0)\\\n"
+ "{ N ## _write_to_pe(p, v0); return p; }\n"
+ "#define __%sbuild_scalar(NS, N, T)\\\n"
+ "__ ## NS ## define_scalar_primitives(NS, N, T)\\\n"
+ "__ ## NS ## define_fixed_array_primitives(NS, N, T)\\\n"
+ "__ ## NS ## build_vector(NS, N, T, sizeof(T), sizeof(T))\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "/* Depends on generated copy_to/from_pe functions, and the type. */\n"
+ "#define __%sdefine_struct_primitives(NS, N)\\\n"
+ "static inline N ## _t *N ##_to_pe(N ## _t *p)\\\n"
+ "{ if (!NS ## is_native_pe()) { N ## _copy_to_pe(p, p); }; return p; }\\\n"
+ "static inline N ## _t *N ##_from_pe(N ## _t *p)\\\n"
+ "{ if (!NS ## is_native_pe()) { N ## _copy_from_pe(p, p); }; return p; }\\\n"
+ "static inline N ## _t *N ## _clear(N ## _t *p) { return (N ## _t *)memset(p, 0, N ## __size()); }\n"
+ "\n"
+
+ /*
+ * NOTE: structs can both be inline and independent blocks. They
+ * are independent as buffer roots, and also as union members.
+ * _clone applied to a struct type name creates a reference to
+ * an independent block, but this is ambigous. Structs also
+ * support _copy which is the inline equivalent of _clone for
+ * inline. There is also the distinction between _clone applied
+ * to a field name, clone applied to a type name, and _clone
+ * applied to a _vec_push operation. For field names and push
+ * operations, _clone is unambigiously inline and similar to
+ * _copy. So the ambigiouty is when applying _clone to a type
+ * name where _copy and _clone are different. Unions can safely
+ * implement clone on structs members via _clone because union
+ * members are indendendent blocks whereas push_clone must be
+ * implemented with _copy because structs are inline in
+ * (non-union) vectors. Structs in union-vectors are independent
+ * but these simply the unions clone operation (which is a
+ * generated function).
+ */
+ "/* Depends on generated copy/assign_to/from_pe functions, and the type. */\n"
+ "#define __%sbuild_struct(NS, N, S, A, FID, TFID)\\\n"
+ "__ ## NS ## define_struct_primitives(NS, N)\\\n"
+ "typedef NS ## ref_t N ## _ref_t;\\\n"
+ "static inline N ## _t *N ## _start(NS ## builder_t *B)\\\n"
+ "{ return (N ## _t *)flatcc_builder_start_struct(B, S, A); }\\\n"
+ "static inline N ## _ref_t N ## _end(NS ## builder_t *B)\\\n"
+ "{ if (!NS ## is_native_pe()) { N ## _to_pe((N ## _t *)flatcc_builder_struct_edit(B)); }\\\n"
+ " return flatcc_builder_end_struct(B); }\\\n"
+ "static inline N ## _ref_t N ## _end_pe(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_end_struct(B); }\\\n"
+ "static inline N ## _ref_t N ## _create(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ N ## _t *_p = N ## _start(B); if (!_p) return 0; N ##_assign_to_pe(_p __ ## N ## _call_args);\\\n"
+ " return N ## _end_pe(B); }\\\n"
+ "static inline N ## _ref_t N ## _clone(NS ## builder_t *B, N ## _struct_t p)\\\n"
+ "{ N ## _t *_p; __%smemoize_begin(B, p); _p = N ## _start(B); if (!_p) return 0;\\\n"
+ " N ## _copy(_p, p); __%smemoize_end(B, p, N ##_end_pe(B)); }\\\n"
+ "__%sbuild_vector(NS, N, N ## _t, S, A)\\\n"
+ "__%sbuild_struct_root(NS, N, A, FID, TFID)\\\n"
+ "\n",
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sstruct_clear_field(p) memset((p), 0, sizeof(*(p)))\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_table(NS, N, K)\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_table(B, K); }\\\n"
+ "static inline N ## _ref_t N ## _end(NS ## builder_t *B)\\\n"
+ "{ FLATCC_ASSERT(flatcc_builder_check_required(B, __ ## N ## _required,\\\n"
+ " sizeof(__ ## N ## _required) / sizeof(__ ## N ## _required[0]) - 1));\\\n"
+ " return flatcc_builder_end_table(B); }\\\n"
+ "__%sbuild_offset_vector(NS, N)\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_table_field(ID, NS, N, TN, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, TN ## _ref_t ref)\\\n"
+ "{ TN ## _ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ?\\\n"
+ " ((*_p = ref), 0) : -1; }\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return TN ## _start(B); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, TN ## _end(B)); }\\\n"
+ "static inline TN ## _ref_t N ## _create(NS ## builder_t *B __ ## TN ##_formal_args)\\\n"
+ "{ return N ## _add(B, TN ## _create(B __ ## TN ## _call_args)); }\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _table_t p)\\\n"
+ "{ return N ## _add(B, TN ## _clone(B, p)); }\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _table_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_field(ID, NS, N, TN, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, TN ## _union_ref_t uref)\\\n"
+ "{ NS ## ref_t *_p; TN ## _union_type_t *_pt; if (uref.type == TN ## _NONE) return 0; if (uref.value == 0) return -1;\\\n"
+ " if (!(_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1, sizeof(*_pt), sizeof(*_pt)))) return -1;\\\n"
+ " *_pt = uref.type; if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uref.value; return 0; }\\\n"
+ "static inline int N ## _add_type(NS ## builder_t *B, TN ## _union_type_t type)\\\n"
+ "{ TN ## _union_type_t *_pt; if (type == TN ## _NONE) return 0; return (_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1,\\\n"
+ " sizeof(*_pt), sizeof(*_pt))) ? ((*_pt = type), 0) : -1; }\\\n"
+ "static inline int N ## _add_value(NS ## builder_t *B, TN ## _union_ref_t uref)\\\n"
+ "{ NS ## ref_t *p; if (uref.type == TN ## _NONE) return 0; return (p = flatcc_builder_table_add_offset(B, ID)) ?\\\n"
+ " ((*p = uref.value), 0) : -1; }\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _union_t p)\\\n"
+ "{ return N ## _add(B, TN ## _clone(B, p)); }\\\n"
+ /* `_pick` is not supported on specific union members because the source dictates the type. */
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _union_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "/* M is the union value name and T is its type, i.e. the qualified name. */\n"
+ "#define __%sbuild_union_table_value_field(NS, N, NU, M, T)\\\n"
+ "static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\\\n"
+ "{ return N ## _add(B, NU ## _as_ ## M (ref)); }\\\n"
+ "static inline int N ## _ ## M ## _start(NS ## builder_t *B)\\\n"
+ "{ return T ## _start(B); }\\\n"
+ "static inline int N ## _ ## M ## _end(NS ## builder_t *B)\\\n"
+ "{ T ## _ref_t ref = T ## _end(B);\\\n"
+ " return ref ? N ## _ ## M ## _add(B, ref) : -1; }\\\n"
+ "static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\\\n"
+ "{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\\\n"
+ " return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\\\n"
+ "static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _table_t t)\\\n"
+ "{ T ## _ref_t ref = T ## _clone(B, t);\\\n"
+ " return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "/* M is the union value name and T is its type, i.e. the qualified name. */\n"
+ "#define __%sbuild_union_struct_value_field(NS, N, NU, M, T)\\\n"
+ "static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\\\n"
+ "{ return N ## _add(B, NU ## _as_ ## M (ref)); }\\\n"
+ "static inline T ## _t *N ## _ ## M ## _start(NS ## builder_t *B)\\\n"
+ "{ return T ## _start(B); }\\\n"
+ "static inline int N ## _ ## M ## _end(NS ## builder_t *B)\\\n"
+ "{ T ## _ref_t ref = T ## _end(B);\\\n"
+ " return ref ? N ## _ ## M ## _add(B, ref) : -1; }\\\n"
+ "static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\\\n"
+ "{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\\\n"
+ " return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\\\n"
+ "static inline int N ## _ ## M ## _end_pe(NS ## builder_t *B)\\\n"
+ "{ T ## _ref_t ref = T ## _end_pe(B);\\\n"
+ " return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\\\n"
+ "static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _struct_t p)\\\n"
+ "{ T ## _ref_t ref = T ## _clone(B, p);\\\n"
+ " return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_string_value_field(NS, N, NU, M)\\\n"
+ "static inline int N ## _ ## M ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\\\n"
+ "{ return N ## _add(B, NU ## _as_ ## M (ref)); }\\\n"
+ "__%sbuild_string_field_ops(NS, N ## _ ## M)\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type\n"
+ " * S: sizeof of scalar type, A: alignment of type T, default value V of type T. */\n"
+ "#define __%sbuild_scalar_field(ID, NS, N, TN, T, S, A, V, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, const T v)\\\n"
+ "{ T *_p; if (v == V) return 0; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\\\n"
+ " TN ## _assign_to_pe(_p, v); return 0; }\\\n"
+ "static inline int N ## _force_add(NS ## builder_t *B, const T v)\\\n"
+ "{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\\\n"
+ " TN ## _assign_to_pe(_p, v); return 0; }\\\n"
+ "/* Clone does not skip default values and expects pe endian content. */\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, const T *p)\\\n"
+ "{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\\\n"
+ "/* Transferring a missing field is a nop success with 0 as result. */\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type\n"
+ " * S: sizeof of scalar type, A: alignment of type T. */\n"
+ "#define __%sbuild_scalar_optional_field(ID, NS, N, TN, T, S, A, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, const T v)\\\n"
+ "{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\\\n"
+ " TN ## _assign_to_pe(_p, v); return 0; }\\\n"
+ "/* Clone does not skip default values and expects pe endian content. */\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, const T *p)\\\n"
+ "{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\\\n"
+ "/* Transferring a missing field is a nop success with 0 as result. */\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_struct_field(ID, NS, N, TN, S, A, TT)\\\n"
+ "static inline TN ## _t *N ## _start(NS ## builder_t *B)\\\n"
+ "{ return (TN ## _t *)flatcc_builder_table_add(B, ID, S, A); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ if (!NS ## is_native_pe()) { TN ## _to_pe((TN ## _t *)flatcc_builder_table_edit(B, S)); } return 0; }\\\n"
+ "static inline int N ## _end_pe(NS ## builder_t *B) { return 0; }\\\n"
+ "static inline int N ## _create(NS ## builder_t *B __ ## TN ## _formal_args)\\\n"
+ "{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_assign_to_pe(_p __ ## TN ## _call_args);\\\n"
+ " return 0; }\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, const TN ## _t *p)\\\n"
+ "{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_copy_to_pe(_p, p); return 0; }\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _struct_t p)\\\n"
+ "{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _struct_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc);
+
+ /* This goes for scalar, struct, and enum vectors. */
+ fprintf(out->fp,
+ "#define __%sbuild_vector_field(ID, NS, N, TN, T, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\\\n"
+ "{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return TN ## _vec_start(B); }\\\n"
+ "static inline int N ## _end_pe(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, TN ## _vec_end_pe(B)); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, TN ## _vec_end(B)); }\\\n"
+ "static inline int N ## _create_pe(NS ## builder_t *B, const T *data, size_t len)\\\n"
+ "{ return N ## _add(B, TN ## _vec_create_pe(B, data, len)); }\\\n"
+ "static inline int N ## _create(NS ## builder_t *B, const T *data, size_t len)\\\n"
+ "{ return N ## _add(B, TN ## _vec_create(B, data, len)); }\\\n"
+ "static inline int N ## _slice(NS ## builder_t *B, TN ## _vec_t vec, size_t index, size_t len)\\\n"
+ "{ return N ## _add(B, TN ## _vec_slice(B, vec, index, len)); }\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\\\n"
+ "{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\\\n"
+ "__%sbuild_vector_ops(NS, N, N, TN, T)\\\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_offset_vector_field(ID, NS, N, TN, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\\\n"
+ "{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_offset_vector(B); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, flatcc_builder_end_offset_vector(B)); }\\\n"
+ "static inline int N ## _create(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_offset_vector(B, data, len)); }\\\n"
+ "__%sbuild_offset_vector_ops(NS, N, N, TN)\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\\\n"
+ "{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "/* depends on N ## _add which differs for union member fields and ordinary fields */\\\n"
+ "#define __%sbuild_string_field_ops(NS, N)\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_string(B); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, flatcc_builder_end_string(B)); }\\\n"
+ "static inline int N ## _create(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_string(B, s, len)); }\\\n"
+ "static inline int N ## _create_str(NS ## builder_t *B, const char *s)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_string_str(B, s)); }\\\n"
+ "static inline int N ## _create_strn(NS ## builder_t *B, const char *s, size_t max_len)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_string_strn(B, s, max_len)); }\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, NS ## string_t string)\\\n"
+ "{ return N ## _add(B, NS ## string_clone(B, string)); }\\\n"
+ "static inline int N ## _slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\\\n"
+ "{ return N ## _add(B, NS ## string_slice(B, string, index, len)); }\\\n"
+ "__%sbuild_string_ops(NS, N)\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_string_field(ID, NS, N, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\\\n"
+ "{ NS ## string_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\\\n"
+ "__%sbuild_string_field_ops(NS, N)\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ NS ## string_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_table_vector_field(ID, NS, N, TN, TT)\\\n"
+ "__%sbuild_offset_vector_field(ID, NS, N, TN, TT)\\\n"
+ "__%sbuild_table_vector_ops(NS, N, TN)\n"
+ "\n",
+ nsc, nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_vector_field(ID, NS, N, TN, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, TN ## _union_vec_ref_t uvref)\\\n"
+ "{ NS ## vec_ref_t *_p; if (!uvref.type || !uvref.value) return uvref.type == uvref.value ? 0 : -1;\\\n"
+ " if (!(_p = flatcc_builder_table_add_offset(B, ID - 1))) return -1; *_p = uvref.type;\\\n"
+ " if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uvref.value; return 0; }\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_union_vector(B); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, flatcc_builder_end_union_vector(B)); }\\\n"
+ "static inline int N ## _create(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_union_vector(B, data, len)); }\\\n"
+ "__%sbuild_union_vector_ops(NS, N, N, TN)\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _union_vec_t vec)\\\n"
+ "{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _union_vec_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_table_vector_value_field(NS, N, NU, M, T)\\\n"
+ "static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\\\n"
+ "{ return T ## _start(B); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _table_t t)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, t))); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_struct_vector_value_field(NS, N, NU, M, T)\\\n"
+ "static inline T ## _t *N ## _ ## M ## _push_start(NS ## builder_t *B)\\\n"
+ "{ return T ## _start(B); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _struct_t p)\\\n"
+ /* Here we create an independent struct block, so T ## _clone is appropriate as opposed to T ## _copy. */
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, p))); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_string_vector_value_field(NS, N, NU, M)\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, NS ## string_ref_t ref)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\\\n"
+ "static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\\\n"
+ "{ return NS ## string_start(B); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_end(B))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create(B, s, len))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_str(NS ## builder_t *B, const char *s)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_str(B, s))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_strn(B, s, max_len))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, NS ## string_t string)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_clone(B, string))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_slice(B, string, index, len))); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_string_vector_field(ID, NS, N, TT)\\\n"
+ "__%sbuild_offset_vector_field(ID, NS, N, NS ## string, TT)\\\n"
+ "__%sbuild_string_vector_ops(NS, N)\n"
+ "\n",
+ nsc, nsc, nsc);
+
+ fprintf(out->fp, "#define __%schar_formal_args , char v0\n", nsc);
+ fprintf(out->fp, "#define __%schar_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%suint8_formal_args , uint8_t v0\n", nsc);
+ fprintf(out->fp, "#define __%suint8_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sint8_formal_args , int8_t v0\n", nsc);
+ fprintf(out->fp, "#define __%sint8_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sbool_formal_args , %sbool_t v0\n", nsc, nsc);
+ fprintf(out->fp, "#define __%sbool_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%suint16_formal_args , uint16_t v0\n", nsc);
+ fprintf(out->fp, "#define __%suint16_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%suint32_formal_args , uint32_t v0\n", nsc);
+ fprintf(out->fp, "#define __%suint32_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%suint64_formal_args , uint64_t v0\n", nsc);
+ fprintf(out->fp, "#define __%suint64_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sint16_formal_args , int16_t v0\n", nsc);
+ fprintf(out->fp, "#define __%sint16_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sint32_formal_args , int32_t v0\n", nsc);
+ fprintf(out->fp, "#define __%sint32_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sint64_formal_args , int64_t v0\n", nsc);
+ fprintf(out->fp, "#define __%sint64_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sfloat_formal_args , float v0\n", nsc);
+ fprintf(out->fp, "#define __%sfloat_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sdouble_formal_args , double v0\n", nsc);
+ fprintf(out->fp, "#define __%sdouble_call_args , v0\n", nsc);
+ fprintf(out->fp, "\n");
+ fprintf(out->fp, "__%sbuild_scalar(%s, %schar, char)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %suint8, uint8_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sint8, int8_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sbool, %sbool_t)\n", nsc, nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %suint16, uint16_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %suint32, uint32_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %suint64, uint64_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sint16, int16_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sint32, int32_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sint64, int64_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sfloat, float)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sdouble, double)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "\n");
+ fprintf(out->fp, "__%sbuild_string(%s)\n", nsc, nsc);
+ fprintf(out->fp, "\n");
+
+ fprintf(out->fp, "__%sbuild_buffer(%s)\n", nsc, nsc);
+ gen_epilogue(out);
+ fprintf(out->fp, "#endif /* %s_COMMON_BUILDER_H */\n", nscup);
+ return 0;
+}
+
+static int gen_builder_pretext(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+ const char *nscup = out->nscup;
+
+ fprintf(out->fp,
+ "#ifndef %s_BUILDER_H\n"
+ "#define %s_BUILDER_H\n",
+ out->S->basenameup, out->S->basenameup);
+
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ fprintf(out->fp, "#ifndef %s_READER_H\n", out->S->basenameup);
+ fprintf(out->fp, "#include \"%s_reader.h\"\n", out->S->basename);
+ fprintf(out->fp, "#endif\n");
+ fprintf(out->fp, "#ifndef %s_COMMON_BUILDER_H\n", nscup);
+ fprintf(out->fp, "#include \"%scommon_builder.h\"\n", nsc);
+ fprintf(out->fp, "#endif\n");
+
+ fb_gen_c_includes(out, "_builder.h", "_BUILDER_H");
+
+ gen_prologue(out);
+
+ /*
+ * Even if defined in the reader header, we must redefine it here
+ * because another file might sneak in and update.
+ */
+ if (out->S->file_identifier.type == vt_string) {
+ fprintf(out->fp,
+ "#undef %sidentifier\n"
+ "#define %sidentifier \"%.*s\"\n",
+ nsc,
+ nsc, out->S->file_identifier.s.len, out->S->file_identifier.s.s);
+ } else {
+ fprintf(out->fp,
+ "#ifndef %sidentifier\n"
+ "#define %sidentifier 0\n"
+ "#endif\n",
+ nsc, nsc);
+ }
+ if (out->S->file_extension.type == vt_string) {
+ fprintf(out->fp,
+ "#undef %sextension\n"
+ "#define %sextension \"%.*s\"\n",
+ nsc,
+ nsc, out->S->file_extension.s.len, out->S->file_extension.s.s);
+ } else {
+ fprintf(out->fp,
+ "#ifndef %sextension\n"
+ "#define %sextension \"%s\"\n"
+ "#endif\n",
+ nsc, nsc, out->opts->default_bin_ext);
+ }
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int get_total_struct_field_count(fb_compound_type_t *ct)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ int count = 0;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ switch (member->type.type) {
+ /* struct arrays count as 1 but struct fields are expanded */
+ case vt_compound_type_ref:
+ if (member->type.ct->symbol.kind == fb_is_struct) {
+ count += get_total_struct_field_count(member->type.ct);
+ continue;
+ }
+ ++count;
+ break;
+ default:
+ ++count;
+ break;
+ }
+ }
+ return count;
+}
+
+static inline void gen_comma(fb_output_t *out, int index, int count, int is_macro)
+{
+ char *cont = is_macro ? "\\\n" : "\n";
+
+ if (count == 0) {
+ return;
+ }
+ if (index == 0) {
+ if (count > 4) {
+ fprintf(out->fp, ",%s ", cont);
+ } else {
+ fprintf(out->fp, ", ");
+ }
+ } else {
+ if (index % 4 || count - index <= 2) {
+ fprintf(out->fp, ", ");
+ } else {
+ fprintf(out->fp, ",%s ", cont);
+ }
+ }
+}
+
+static int gen_builder_struct_args(fb_output_t *out, fb_compound_type_t *ct, int index, int len, int is_macro)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *tname, *tname_ns;
+ fb_scoped_name_t snref;
+
+ fb_clear(snref);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ switch (member->type.type) {
+ case vt_fixed_array_compound_type_ref:
+ gen_comma(out, index, len, is_macro);
+ fb_compound_name(member->type.ct, &snref);
+ if (member->type.ct->symbol.kind == fb_is_struct) {
+ fprintf(out->fp, "const %s_t v%i[%i]", snref.text, index++, (int)member->type.len);
+ } else {
+ fprintf(out->fp, "%s_enum_t v%i[%i]", snref.text, index++, (int)member->type.len);
+ }
+ break;
+ case vt_compound_type_ref:
+ if (member->type.ct->symbol.kind == fb_is_struct) {
+ index = gen_builder_struct_args(out, member->type.ct, index, len, is_macro);
+ continue;
+ }
+ gen_comma(out, index, len, is_macro);
+ fb_compound_name(member->type.ct, &snref);
+ fprintf(out->fp, "%s_enum_t v%i", snref.text, index++);
+ break;
+ case vt_fixed_array_type:
+ gen_comma(out, index, len, is_macro);
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ fprintf(out->fp, "const %s%s v%i[%i]", tname_ns, tname, index++, (int)member->type.len);
+ break;
+ case vt_scalar_type:
+ gen_comma(out, index, len, is_macro);
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ fprintf(out->fp, "%s%s v%i", tname_ns, tname, index++);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected struct member type");
+ continue;
+ }
+ }
+ return index;
+}
+
+static int gen_builder_struct_call_list(fb_output_t *out, fb_compound_type_t *ct, int index, int arg_count, int is_macro)
+{
+ int i;
+ int len = get_total_struct_field_count(ct);
+
+ for (i = 0; i < len; ++i) {
+ gen_comma(out, i, arg_count, is_macro);
+ fprintf(out->fp, "v%i", index++);
+ }
+ return index;
+}
+
+enum { no_conversion, convert_from_pe, convert_to_pe };
+
+/* Note: returned index is not correct when using from_ptr since it doesn't track arguments, but it shouldn't matter. */
+static int gen_builder_struct_field_assign(fb_output_t *out, fb_compound_type_t *ct, int index, int arg_count,
+ int conversion, int from_ptr)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ int n, len;
+ const char *s;
+ int deprecated_index = 0;
+ const char *kind, *tprefix;
+ fb_scoped_name_t snref;
+
+ fb_clear(snref);
+ switch (conversion) {
+ case convert_to_pe: kind = "_to_pe"; break;
+ case convert_from_pe: kind = "_from_pe"; break;
+ default: kind = ""; break;
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+
+ if (index > 0) {
+ if (index % 4 == 0) {
+ fprintf(out->fp, ";\n ");
+ } else {
+ fprintf(out->fp, "; ");
+ }
+ }
+ switch (member->type.type) {
+ case vt_fixed_array_compound_type_ref:
+ len = (int)member->type.len;
+ fb_compound_name(member->type.ct, &snref);
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "__%sstruct_clear_field(p->__deprecated%i)",
+ nsc, deprecated_index);
+ ++deprecated_index;
+ ++index;
+ continue;
+ }
+ if (from_ptr) {
+ fprintf(out->fp, "%s_array_copy%s(p->%.*s, p2->%.*s, %d)",
+ snref.text, kind, n, s, n, s, len);
+ } else {
+ fprintf(out->fp, "%s_array_copy%s(p->%.*s, v%i, %d)",
+ snref.text, kind, n, s, index, len);
+ }
+ ++index;
+ continue;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ if (member->type.ct->symbol.kind == fb_is_struct) {
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "__%sstruct_clear_field(p->__deprecated%i)",
+ nsc, deprecated_index);
+ deprecated_index++;
+ index += get_total_struct_field_count(member->type.ct);
+ continue;
+ }
+ if (from_ptr) {
+ fprintf(out->fp, "%s_copy%s(&p->%.*s, &p2->%.*s)", snref.text, kind, n, s, n, s);
+ /* `index` does not count children, but it doesn't matter here. */
+ ++index;
+ } else {
+ fprintf(out->fp, "%s_assign%s(&p->%.*s", snref.text, kind, n, s);
+ index = gen_builder_struct_call_list(out, member->type.ct, index, arg_count, 0);
+ fprintf(out->fp, ")");
+ }
+ continue;
+ }
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "__%sstruct_clear_field(p->__deprecated%i)",
+ nsc, deprecated_index);
+ ++deprecated_index;
+ ++index;
+ continue;
+ }
+ switch (member->size == 1 ? no_conversion : conversion) {
+ case convert_from_pe:
+ if (from_ptr) {
+ fprintf(out->fp, "%s_copy_from_pe(&p->%.*s, &p2->%.*s)",
+ snref.text, n, s, n, s);
+ } else {
+ fprintf(out->fp, "%s_assign_from_pe(&p->%.*s, v%i)",
+ snref.text, n, s, index);
+ }
+ break;
+ case convert_to_pe:
+ if (from_ptr) {
+ fprintf(out->fp, "%s_copy_to_pe(&p->%.*s, &p2->%.*s)",
+ snref.text, n, s, n, s);
+ } else {
+ fprintf(out->fp, "%s_assign_to_pe(&p->%.*s, v%i)",
+ snref.text, n, s, index);
+ }
+ break;
+ default:
+ if (from_ptr) {
+ fprintf(out->fp, "p->%.*s = p2->%.*s", n, s, n, s);
+ } else {
+ fprintf(out->fp, "p->%.*s = v%i", n, s, index);
+ }
+ break;
+ }
+ ++index;
+ continue;
+ case vt_fixed_array_type:
+ tprefix = scalar_type_prefix(member->type.st);
+ len = (int)member->type.len;
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "__%sstruct_clear_field(p->__deprecated%i)",
+ nsc, deprecated_index);
+ ++deprecated_index;
+ ++index;
+ continue;
+ }
+ if (from_ptr) {
+ fprintf(out->fp, "%s%s_array_copy%s(p->%.*s, p2->%.*s, %d)",
+ nsc, tprefix, kind, n, s, n, s, len);
+ } else {
+ fprintf(out->fp, "%s%s_array_copy%s(p->%.*s, v%i, %d)",
+ nsc, tprefix, kind, n, s, index, len);
+ }
+ ++index;
+ break;
+ case vt_scalar_type:
+ tprefix = scalar_type_prefix(member->type.st);
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "__%sstruct_clear_field(p->__deprecated%i)",
+ nsc, deprecated_index);
+ ++deprecated_index;
+ ++index;
+ continue;
+ }
+ switch (member->size == 1 ? no_conversion : conversion) {
+ case convert_from_pe:
+ if (from_ptr) {
+ fprintf(out->fp, "%s%s_copy_from_pe(&p->%.*s, &p2->%.*s)",
+ nsc, tprefix, n, s, n, s);
+ } else {
+ fprintf(out->fp, "%s%s_assign_from_pe(&p->%.*s, v%i)",
+ nsc, tprefix, n, s, index);
+ }
+ break;
+ case convert_to_pe:
+ if (from_ptr) {
+ fprintf(out->fp, "%s%s_copy_to_pe(&p->%.*s, &p2->%.*s)",
+ nsc, tprefix, n, s, n, s);
+ } else {
+ fprintf(out->fp, "%s%s_assign_to_pe(&p->%.*s, v%i)",
+ nsc, tprefix, n, s, index);
+ }
+ break;
+ default:
+ if (from_ptr) {
+ fprintf(out->fp, "p->%.*s = p2->%.*s", n, s, n, s);
+ } else {
+ fprintf(out->fp, "p->%.*s = v%i", n, s, index);
+ }
+ break;
+ }
+ ++index;
+ break;
+ default:
+ gen_panic(out, "internal error: type error");
+ continue;
+ }
+ }
+ if (arg_count > 0) {
+ fprintf(out->fp, ";\n ");
+ }
+ return index;
+}
+
+static void gen_builder_struct(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ int arg_count;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ assert(ct->symbol.kind == fb_is_struct);
+
+ fb_compound_name(ct, &snt);
+
+ arg_count = get_total_struct_field_count(ct);
+ fprintf(out->fp, "#define __%s_formal_args ", snt.text);
+ gen_builder_struct_args(out, ct, 0, arg_count, 1);
+ fprintf(out->fp, "\n#define __%s_call_args ", snt.text);
+ gen_builder_struct_call_list(out, ct, 0, arg_count, 1);
+ fprintf(out->fp, "\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_assign(%s_t *p",
+ snt.text, snt.text, snt.text);
+ gen_builder_struct_args(out, ct, 0, arg_count, 0);
+ fprintf(out->fp, ")\n{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, no_conversion, 0);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_copy(%s_t *p, const %s_t *p2)\n",
+ snt.text, snt.text, snt.text, snt.text);
+ fprintf(out->fp, "{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, no_conversion, 1);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_assign_to_pe(%s_t *p",
+ snt.text, snt.text, snt.text);
+ gen_builder_struct_args(out, ct, 0, arg_count, 0);
+ fprintf(out->fp, ")\n{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, convert_to_pe, 0);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_copy_to_pe(%s_t *p, const %s_t *p2)\n",
+ snt.text, snt.text, snt.text, snt.text);
+ fprintf(out->fp, "{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, convert_to_pe, 1);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_assign_from_pe(%s_t *p",
+ snt.text, snt.text, snt.text);
+ gen_builder_struct_args(out, ct, 0, arg_count, 0);
+ fprintf(out->fp, ")\n{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, convert_from_pe, 0);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_copy_from_pe(%s_t *p, const %s_t *p2)\n",
+ snt.text, snt.text, snt.text, snt.text);
+ fprintf(out->fp, "{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, convert_from_pe, 1);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp, "__%sbuild_struct(%s, %s, %"PRIu64", %u, %s_file_identifier, %s_type_identifier)\n",
+ nsc, nsc, snt.text, (uint64_t)ct->size, ct->align, snt.text, snt.text);
+
+ if (ct->size > 0) {
+ fprintf(out->fp, "__%sdefine_fixed_array_primitives(%s, %s, %s_t)\n",
+ nsc, nsc, snt.text, snt.text);
+ }
+}
+
+static int get_create_table_arg_count(fb_compound_type_t *ct)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ int count = 0;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ ++count;
+ }
+ return count;
+}
+
+static int gen_builder_table_call_list(fb_output_t *out, fb_compound_type_t *ct, int arg_count, int is_macro)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ int index = 0;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ gen_comma(out, index, arg_count, is_macro);
+ fprintf(out->fp, "v%"PRIu64"", (uint64_t)member->id);
+ ++index;
+ }
+ return index;
+}
+
+
+static int gen_required_table_fields(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ int index;
+ int arg_count;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ arg_count = get_create_table_arg_count(ct);
+ index = 0;
+ fb_compound_name(ct, &snt);
+ fprintf(out->fp, "static const %svoffset_t __%s_required[] = {", nsc, snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ if (member->metadata_flags & fb_f_required) {
+ if (index > 0) {
+ gen_comma(out, index, arg_count, 0);
+ } else {
+ fprintf(out->fp, " ");
+ }
+ fprintf(out->fp, "%u", (unsigned)member->id);
+ index++;
+ }
+ }
+ /* Add extra element to avoid null arrays. */
+ if (index > 0) {
+ fprintf(out->fp, ", 0 };\n");
+ } else {
+ fprintf(out->fp, " 0 };\n");
+ }
+ return index;
+}
+
+static int gen_builder_table_args(fb_output_t *out, fb_compound_type_t *ct, int arg_count, int is_macro)
+{
+ const char *nsc = out->nsc;
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ const char *tname, *tname_ns;
+ int index;
+ fb_scoped_name_t snref;
+
+ fb_clear(snref);
+ /* Just to help the comma. */
+ index = 0;
+ /* We use the id to name arguments so sorted assignment can find the arguments trivially. */
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ gen_comma(out, index++, arg_count, is_macro);
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ fprintf(out->fp, "%s_t *v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ case fb_is_enum:
+ fprintf(out->fp, "%s_enum_t v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ case fb_is_table:
+ fprintf(out->fp, "%s_ref_t v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ case fb_is_union:
+ /* Unions jump an index because it is two fields. */
+ fprintf(out->fp, "%s_union_ref_t v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table field type");
+ continue;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ case fb_is_enum:
+ case fb_is_table:
+ fprintf(out->fp, "%s_vec_ref_t v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ case fb_is_union:
+ fprintf(out->fp, "%s_union_vec_ref_t v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table table type");
+ continue;
+ }
+ break;
+ case vt_scalar_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ fprintf(out->fp, "%s%s v%"PRIu64"", tname_ns, tname, (uint64_t)member->id);
+ break;
+ case vt_vector_type:
+ tname = scalar_type_prefix(member->type.st);
+ fprintf(out->fp, "%s%s_vec_ref_t v%"PRIu64"", nsc, tname, (uint64_t)member->id);
+ break;
+ case vt_string_type:
+ fprintf(out->fp, "%sstring_ref_t v%"PRIu64"", nsc, (uint64_t)member->id);
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp, "%sstring_vec_ref_t v%"PRIu64"", nsc, (uint64_t)member->id);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table member type");
+ continue;
+ }
+ }
+ return index;
+}
+
+static int gen_builder_create_table_decl(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ int arg_count;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ arg_count = get_create_table_arg_count(ct);
+ fprintf(out->fp, "#define __%s_formal_args ", snt.text);
+ gen_builder_table_args(out, ct, arg_count, 1);
+ fprintf(out->fp, "\n#define __%s_call_args ", snt.text);
+ gen_builder_table_call_list(out, ct, arg_count, 1);
+ fprintf(out->fp, "\n");
+
+ /* `_clone` fw decl must be place before build_table macro and `_create` must be placed after. */
+ fprintf(out->fp,
+ "static inline %s_ref_t %s_create(%sbuilder_t *B __%s_formal_args);\n",
+ snt.text, snt.text, nsc, snt.text);
+ return 0;
+}
+
+static int gen_builder_create_table(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ int n;
+ const char *s;
+ int patch_union = !(ct->metadata_flags & fb_f_original_order);
+ int has_union = 0;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static inline %s_ref_t %s_create(%sbuilder_t *B __%s_formal_args)\n",
+ snt.text, snt.text, nsc, snt.text);
+
+ fprintf(out->fp, "{\n if (%s_start(B)", snt.text);
+ for (member = ct->ordered_members; member; member = member->order) {
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ symbol_name(&member->symbol, &n, &s);
+ if (member->type.type == vt_compound_type_ref && member->type.ct->symbol.kind == fb_is_union) {
+ has_union = 1;
+ if (patch_union) {
+ fprintf(out->fp, "\n || %s_%.*s_add_value(B, v%"PRIu64")", snt.text, n, s, (uint64_t)member->id);
+ continue;
+ }
+ }
+ fprintf(out->fp, "\n || %s_%.*s_add(B, v%"PRIu64")", snt.text, n, s, (uint64_t)member->id);
+ }
+ if (patch_union && has_union) {
+ for (member = ct->ordered_members; member; member = member->order) {
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ if (member->type.type == vt_compound_type_ref && member->type.ct->symbol.kind == fb_is_union) {
+ symbol_name(&member->symbol, &n, &s);
+ fprintf(out->fp, "\n || %s_%.*s_add_type(B, v%"PRIu64".type)", snt.text, n, s, (uint64_t)member->id);
+ }
+ }
+ }
+ fprintf(out->fp, ") {\n return 0;\n }\n return %s_end(B);\n}\n\n", snt.text);
+ return 0;
+}
+
+static int gen_builder_structs(fb_output_t *out)
+{
+ fb_compound_type_t *ct;
+
+ /* Generate structs in topologically sorted order. */
+ for (ct = out->S->ordered_structs; ct; ct = ct->order) {
+ gen_builder_struct(out, ct);
+ fprintf(out->fp, "\n");
+ }
+ return 0;
+}
+
+static int gen_builder_table(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "typedef %sref_t %s_ref_t;\n",
+ nsc, snt.text);
+ fprintf(out->fp,
+ "static %s_ref_t %s_clone(%sbuilder_t *B, %s_table_t t);\n",
+ snt.text, snt.text, nsc, snt.text);
+ fprintf(out->fp, "__%sbuild_table(%s, %s, %"PRIu64")\n",
+ nsc, nsc, snt.text, (uint64_t)ct->count);
+ return 0;
+}
+
+static int gen_builder_table_prolog(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp, "__%sbuild_table_prolog(%s, %s, %s_file_identifier, %s_type_identifier)\n",
+ nsc, nsc, snt.text, snt.text, snt.text);
+ return 0;
+}
+
+static int gen_union_fields(fb_output_t *out, const char *st, int n, const char *s,
+ fb_compound_type_t *ct, int is_vector)
+{
+ const char *nsc = out->nsc;
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ const char *su;
+ int nu;
+ fb_scoped_name_t snref;
+ fb_scoped_name_t snu;
+ const char *kind = is_vector ? "vector_value" : "value";
+
+ fb_clear(snref);
+ fb_clear(snu);
+ fb_compound_name(ct, &snref);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &nu, &su);
+ switch (member->type.type) {
+ case vt_missing:
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snu);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ "__%sbuild_union_table_%s_field(%s, %s_%.*s, %s, %.*s, %s)\n",
+ nsc, kind, nsc, st, n, s, snref.text, nu, su, snu.text);
+ break;
+ case fb_is_struct:
+ fprintf(out->fp,
+ "__%sbuild_union_struct_%s_field(%s, %s_%.*s, %s, %.*s, %s)\n",
+ nsc, kind, nsc, st, n, s, snref.text, nu, su, snu.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected union member compound type");
+ return -1;
+ }
+ break;
+ case vt_string_type:
+ fprintf(out->fp,
+ "__%sbuild_union_string_%s_field(%s, %s_%.*s, %s, %.*s)\n",
+ nsc, kind, nsc, st, n, s, snref.text, nu, su);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected union member type");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int gen_builder_table_fields(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *s, *tprefix, *tname, *tname_ns;
+ int n;
+ int is_optional;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+ fb_literal_t literal;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(&member->symbol, &n, &s);
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "/* Skipping build of deprecated field: '%s_%.*s' */\n\n", snt.text, n, s);
+ continue;
+ }
+ is_optional = member->flags & fb_fm_optional;
+ switch (member->type.type) {
+ case vt_scalar_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ tprefix = scalar_type_prefix(member->type.st);
+ if (is_optional) {
+ fprintf(out->fp,
+ "__%sbuild_scalar_optional_field(%"PRIu64", %s, %s_%.*s, %s%s, %s%s, %"PRIu64", %u, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, nsc, tprefix, tname_ns, tname,
+ (uint64_t)member->size, member->align, snt.text);
+ } else {
+ print_literal(member->type.st, &member->value, literal);
+ fprintf(out->fp,
+ "__%sbuild_scalar_field(%"PRIu64", %s, %s_%.*s, %s%s, %s%s, %"PRIu64", %u, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, nsc, tprefix, tname_ns, tname,
+ (uint64_t)member->size, member->align, literal, snt.text);
+ }
+ break;
+ case vt_vector_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ tprefix = scalar_type_prefix(member->type.st);
+ fprintf(out->fp,
+ "__%sbuild_vector_field(%"PRIu64", %s, %s_%.*s, %s%s, %s%s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, nsc, tprefix, tname_ns, tname, snt.text);
+ /* [ubyte] vectors can nest buffers. */
+ if (member->nest) {
+ switch (member->nest->symbol.kind) {
+ case fb_is_table:
+ fb_compound_name((fb_compound_type_t *)(&member->nest->symbol), &snref);
+ fprintf(out->fp, "__%sbuild_nested_table_root(%s, %s_%.*s, %s, %s_identifier, %s_type_identifier)\n",
+ nsc, nsc, snt.text, n, s, snref.text, snref.text, snref.text);
+ break;
+ case fb_is_struct:
+ fb_compound_name((fb_compound_type_t *)(&member->nest->symbol), &snref);
+ fprintf(out->fp, "__%sbuild_nested_struct_root(%s, %s_%.*s, %s, %u, %s_identifier, %s_type_identifier)\n",
+ nsc, nsc, snt.text, n, s, snref.text,
+ (unsigned)((fb_compound_type_t *)(member->nest))->align, snref.text, snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected nested type");
+ continue;
+ }
+ }
+ break;
+ case vt_string_type:
+ fprintf(out->fp,
+ "__%sbuild_string_field(%"PRIu64", %s, %s_%.*s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snt.text);
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp,
+ "__%sbuild_string_vector_field(%"PRIu64", %s, %s_%.*s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snt.text);
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ fprintf(out->fp,
+ "__%sbuild_struct_field(%"PRIu64", %s, %s_%.*s, %s, %"PRIu64", %u, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, (uint64_t)member->size, member->align, snt.text);
+ break;
+ case fb_is_table:
+ fprintf(out->fp,
+ "__%sbuild_table_field(%"PRIu64", %s, %s_%.*s, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snt.text);
+ break;
+ case fb_is_enum:
+ if (is_optional) {
+ fprintf(out->fp,
+ "__%sbuild_scalar_optional_field(%"PRIu64", %s, %s_%.*s, %s, %s_enum_t, %"PRIu64", %u, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snref.text,
+ (uint64_t)member->size, member->align, snt.text);
+ } else {
+ print_literal(member->type.ct->type.st, &member->value, literal);
+ fprintf(out->fp,
+ "__%sbuild_scalar_field(%"PRIu64", %s, %s_%.*s, %s, %s_enum_t, %"PRIu64", %u, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snref.text,
+ (uint64_t)member->size, member->align, literal, snt.text);
+ }
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "__%sbuild_union_field(%"PRIu64", %s, %s_%.*s, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snt.text);
+ gen_union_fields(out, snt.text, n, s, member->type.ct, 0);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound type in table during code generation");
+ break;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ if (member->type.ct->symbol.flags & fb_indexed) {
+ fprintf(out->fp, "/* vector has keyed elements */\n");
+ }
+ fprintf(out->fp,
+ "__%sbuild_vector_field(%"PRIu64", %s, %s_%.*s, %s, %s_t, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snref.text, snt.text);
+ break;
+ case fb_is_table:
+ if (member->type.ct->symbol.flags & fb_indexed) {
+ fprintf(out->fp, "/* vector has keyed elements */\n");
+ }
+ fprintf(out->fp,
+ "__%sbuild_table_vector_field(%"PRIu64", %s, %s_%.*s, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snt.text);
+ break;
+ case fb_is_enum:
+ fprintf(out->fp,
+ "__%sbuild_vector_field(%"PRIu64", %s, %s_%.*s, %s, %s_enum_t, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snref.text, snt.text);
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "__%sbuild_union_vector_field(%"PRIu64", %s, %s_%.*s, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snt.text);
+ gen_union_fields(out, snt.text, n, s, member->type.ct, 1);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected vector compound type in table during code generation");
+ break;
+ }
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table member type during code generation");
+ break;
+ }
+ }
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+/*
+ * NOTE:
+ *
+ * Cloning a table might lead to a combinatorial explosion if the source
+ * has many shared references in a DAG. In many cases this might not be
+ * an issue, but it it is deduplication will be necessary. Deduplication
+ * is not specific to cloning but especially relevant here. Because
+ * deduplication carries an overhead in runtime and complexity it is not
+ * part of the core cloning operation. Cloning of unions and vectors with
+ * references have similar concerns.
+ *
+ * A deduplication operation would internally look like like this:
+ *
+ * dedup_clone_table(builder, dedup_map, src_ptr)
+ * {
+ * ref = get_cloned_ref(dedup_map, src_ptr)
+ * if (!ref) {
+ * ref = clone_table(builder, src_ptr);
+ * set_cloned_ref(dedup_map, src_ptr, ref);
+ * }
+ * return ref;
+ * }
+ *
+ * where dedup_map is a map from a pointer to a builder reference and
+ * where the dedup_map is dedicated to a single builder and may cover
+ * multiple source buffers as long as they have separate memory
+ * locations - otherwise a separate dedup map must be used for each
+ * source buffer.
+ *
+ * Note that the clone operation is not safe without a safe source
+ * buffer so clone cannot be used to make a buffer with overlapping data
+ * safe (e.g. a string and a table referencing the same memory). Even if
+ * the source passes basic verification the result might not. To make
+ * clone safe it would be necessariry to remember the type as well, for
+ * example by adding a type specifier to the dedup_map.
+ *
+ * In the following we do not implement deduplication.
+ */
+static int gen_builder_clone_table(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ const char *s;
+ int n;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ /*
+ * We could optimize this by cloning the entire table memory block
+ * and then update update only the references. The builder has
+ * direct vtable operations to support this - this would not work
+ * properly if there are deprecated fields to be stripped or if the
+ * default value has changed - and, more complicated: it is
+ * necessary to know what table alignment needs to be which require
+ * inspection of all fields, or a worst case assumption. So at least
+ * for now, we clone by picking one field at a time.
+ */
+
+ fprintf(out->fp,
+ "static %s_ref_t %s_clone(%sbuilder_t *B, %s_table_t t)\n",
+ snt.text, snt.text, nsc, snt.text);
+
+ fprintf(out->fp,
+ "{\n"
+ " __%smemoize_begin(B, t);\n"
+ " if (%s_start(B)", nsc, snt.text);
+ for (member = ct->ordered_members; member; member = member->order) {
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ symbol_name(&member->symbol, &n, &s);
+ switch (member->type.type) {
+ case vt_scalar_type:
+ case vt_vector_type: /* This includes nested buffers - they are just transferred as bytes. */
+ case vt_string_type:
+ case vt_vector_string_type:
+ fprintf(out->fp, "\n || %s_%.*s_pick(B, t)", snt.text, n, s);
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ case fb_is_enum:
+ case fb_is_union:
+ fprintf(out->fp, "\n || %s_%.*s_pick(B, t)", snt.text, n, s);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound type in table during code generation");
+ break;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ case fb_is_enum:
+ case fb_is_union:
+ fprintf(out->fp, "\n || %s_%.*s_pick(B, t)", snt.text, n, s);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected vector compound type in table during code generation");
+ break;
+ }
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table member type during code generation");
+ break;
+ }
+ }
+ fprintf(out->fp, ") {\n"
+ " return 0;\n"
+ " }\n"
+ " __%smemoize_end(B, t, %s_end(B));\n}\n", nsc, snt.text);
+ return 0;
+}
+
+static int gen_builder_enums(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+ fb_symbol_t *sym;
+ int was_here = 0;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_enum:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "#define __%s_formal_args , %s_enum_t v0\n"
+ "#define __%s_call_args , v0\n",
+ snt.text, snt.text,
+ snt.text);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %s, %s_enum_t)\n",
+ nsc, nsc, snt.text, snt.text);
+ was_here = 1;
+ break;
+ default:
+ continue;
+ }
+ }
+ if (was_here) {
+ fprintf(out->fp, "\n");
+ }
+ return 0;
+}
+
+/*
+ * Scope resolution is a bit fuzzy in unions -
+ *
+ * Googles flatc compiler allows dot notation in unions but not enums.
+ * C++ generates unqualified enum members (i.e. MyGame.Example.Monster
+ * becomes Monster) in the generated enum but still refers to the
+ * specific table type in the given namespace. This makes it possible
+ * to have name conflicts, and flatc raises these like other enum
+ * conficts.
+ *
+ * We use the same approach and this is why we both look up compound
+ * name and symbol name for the same member but the code generator
+ * is not concerned with how the scope is parsed or how errors are
+ * flagged - it just expects members to be unique.
+ */
+static int gen_union(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *s;
+ int n;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ fb_compound_name((fb_compound_type_t *)member->type.ct, &snref);
+ symbol_name(sym, &n, &s);
+ fprintf(out->fp,
+ "static inline %s_union_ref_t %s_as_%.*s(%s_ref_t ref)\n"
+ "{ %s_union_ref_t uref; uref.type = %s_%.*s; uref.value = ref; return uref; }\n",
+ snt.text, snt.text, n, s, snref.text,
+ snt.text, snt.text, n, s);
+ break;
+ case vt_string_type:
+ symbol_name(sym, &n, &s);
+ fprintf(out->fp,
+ "static inline %s_union_ref_t %s_as_%.*s(%sstring_ref_t ref)\n"
+ "{ %s_union_ref_t uref; uref.type = %s_%.*s; uref.value = ref; return uref; }\n",
+ snt.text, snt.text, n, s, nsc,
+ snt.text, snt.text, n, s);
+ break;
+ case vt_missing:
+ fprintf(out->fp,
+ "static inline %s_union_ref_t %s_as_NONE(void)\n"
+ "{ %s_union_ref_t uref; uref.type = %s_NONE; uref.value = 0; return uref; }\n",
+ snt.text, snt.text, snt.text, snt.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected union value type");
+ break;
+ }
+ }
+ fprintf(out->fp,
+ "__%sbuild_union_vector(%s, %s)\n\n",
+ nsc, nsc, snt.text);
+ return 0;
+}
+
+static int gen_union_clone(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *s;
+ int n;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static %s_union_ref_t %s_clone(%sbuilder_t *B, %s_union_t u)\n{\n switch (u.type) {\n",
+ snt.text, snt.text, nsc, snt.text);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ fb_compound_name((fb_compound_type_t *)member->type.ct, &snref);
+ symbol_name(sym, &n, &s);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ " case %u: return %s_as_%.*s(%s_clone(B, (%s_table_t)u.value));\n",
+ (unsigned)member->value.u, snt.text, n, s, snref.text, snref.text);
+ break;
+ case fb_is_struct:
+ fprintf(out->fp,
+ " case %u: return %s_as_%.*s(%s_clone(B, (%s_struct_t)u.value));\n",
+ (unsigned)member->value.u, snt.text, n, s, snref.text, snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected union value type");
+ break;
+ }
+ break;
+ case vt_string_type:
+ symbol_name(sym, &n, &s);
+ fprintf(out->fp,
+ " case %u: return %s_as_%.*s(%sstring_clone(B, u.value));\n",
+ (unsigned)member->value.u, snt.text, n, s, nsc);
+ break;
+ case vt_missing:
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected union value type");
+ break;
+ }
+ }
+
+ /* Unknown unions are dropped. */
+ fprintf(out->fp,
+ " default: return %s_as_NONE();\n"
+ " }\n}\n",
+ snt.text);
+ return 0;
+}
+
+
+static int gen_builder_union_decls(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+ fb_symbol_t *sym;
+ int was_here = 0;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "typedef %sunion_ref_t %s_union_ref_t;\n"
+ "typedef %sunion_vec_ref_t %s_union_vec_ref_t;\n",
+ nsc, snt.text, nsc, snt.text);
+ fprintf(out->fp,
+ "static %s_union_ref_t %s_clone(%sbuilder_t *B, %s_union_t t);\n",
+ snt.text, snt.text, nsc, snt.text);
+ was_here = 1;
+ break;
+ default:
+ continue;
+ }
+ }
+ if (was_here) {
+ fprintf(out->fp, "\n");
+ }
+ return 0;
+}
+
+static int gen_builder_unions(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ gen_union(out, (fb_compound_type_t *)sym);
+ gen_union_clone(out, (fb_compound_type_t *)sym);
+ fprintf(out->fp, "\n");
+ break;
+ default:
+ continue;
+ }
+ }
+ return 0;
+}
+
+static int gen_builder_table_decls(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ /*
+ * Because tables are recursive, we need the type and `start/end/add`
+ * operations before the fields. We also need create for push_create
+ * but it needs all dependent types, so create is fw declared
+ * in a subsequent step. The actual create impl. then follows
+ * after the table fields.
+ */
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_required_table_fields(out, (fb_compound_type_t *)sym);
+ gen_builder_table(out, (fb_compound_type_t *)sym);
+ fprintf(out->fp, "\n");
+ break;
+ default:
+ continue;
+ }
+ }
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_builder_create_table_decl(out, (fb_compound_type_t *)sym);
+ gen_builder_table_prolog(out, (fb_compound_type_t *)sym);
+ fprintf(out->fp, "\n");
+ break;
+ default:
+ continue;
+ }
+ }
+ return 0;
+}
+
+static int gen_builder_tables(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_builder_table_fields(out, (fb_compound_type_t *)sym);
+ gen_builder_create_table(out, (fb_compound_type_t *)sym);
+ gen_builder_clone_table(out, (fb_compound_type_t *)sym);
+ fprintf(out->fp, "\n");
+ break;
+ default:
+ continue;
+ }
+ }
+ return 0;
+}
+
+static int gen_builder_footer(fb_output_t *out)
+{
+ gen_epilogue(out);
+ fprintf(out->fp,
+ "#endif /* %s_BUILDER_H */\n",
+ out->S->basenameup);
+ return 0;
+}
+
+int fb_gen_c_builder(fb_output_t *out)
+{
+ gen_builder_pretext(out);
+ gen_builder_enums(out);
+ gen_builder_structs(out);
+ gen_builder_union_decls(out);
+ gen_builder_table_decls(out);
+ gen_builder_unions(out);
+ gen_builder_tables(out);
+ gen_builder_footer(out);
+ return 0;
+}
diff --git a/src/compiler/codegen_c_json_parser.c b/src/compiler/codegen_c_json_parser.c
new file mode 100644
index 0000000..307ce76
--- /dev/null
+++ b/src/compiler/codegen_c_json_parser.c
@@ -0,0 +1,1850 @@
+#include <stdlib.h>
+#include "codegen_c.h"
+#include "flatcc/flatcc_types.h"
+#include "catalog.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+#define PRINTLN_SPMAX 64
+static char println_spaces[PRINTLN_SPMAX];
+
+static void println(fb_output_t *out, const char * format, ...)
+{
+ int i = out->indent * out->opts->cgen_spacing;
+ va_list ap;
+
+ if (println_spaces[0] == 0) {
+ memset(println_spaces, 0x20, PRINTLN_SPMAX);
+ }
+ /* Don't indent on blank lines. */
+ if (*format) {
+ while (i > PRINTLN_SPMAX) {
+ fprintf(out->fp, "%.*s", (int)PRINTLN_SPMAX, println_spaces);
+ i -= PRINTLN_SPMAX;
+ }
+ /* Use modulo to reset margin if we go too far. */
+ fprintf(out->fp, "%.*s", i, println_spaces);
+ va_start (ap, format);
+ vfprintf (out->fp, format, ap);
+ va_end (ap);
+ }
+ fprintf(out->fp, "\n");
+}
+
+/*
+ * Unknown fields and unknown union members can be failed
+ * rather than ignored with a config flag.
+ *
+ * Default values an be forced with a config flat.
+ *
+ * Forward schema isn't perfect: Unknown symbolic constants
+ * cannot be used with known fields but will be ignored
+ * in ignored fields.
+ */
+
+static int gen_json_parser_pretext(fb_output_t *out)
+{
+ println(out, "#ifndef %s_JSON_PARSER_H", out->S->basenameup);
+ println(out, "#define %s_JSON_PARSER_H", out->S->basenameup);
+ println(out, "");
+ println(out, "/* " FLATCC_GENERATED_BY " */");
+ println(out, "");
+ println(out, "#include \"flatcc/flatcc_json_parser.h\"");
+ fb_gen_c_includes(out, "_json_parser.h", "_JSON_PARSER_H");
+ gen_prologue(out);
+ println(out, "");
+ return 0;
+}
+
+static int gen_json_parser_footer(fb_output_t *out)
+{
+ gen_epilogue(out);
+ println(out, "#endif /* %s_JSON_PARSER_H */", out->S->basenameup);
+ return 0;
+}
+
+typedef struct dict_entry dict_entry_t;
+struct dict_entry {
+ const char *text;
+ int len;
+ void *data;
+ int hint;
+};
+
+/* Returns length of name that reminds after tag at current position. */
+static int get_dict_suffix_len(dict_entry_t *de, int pos)
+{
+ int n;
+
+ n = de->len;
+ if (pos + 8 > n) {
+ return 0;
+ }
+ return n - pos - 8;
+}
+
+/*
+ * Returns the length name that reminds if it terminates at the tag
+ * and 0 if it has a suffix.
+ */
+static int get_dict_tag_len(dict_entry_t *de, int pos)
+{
+ int n;
+
+ n = de->len;
+ if (pos + 8 >= n) {
+ return n - pos;
+ }
+ return 0;
+}
+
+/*
+ * 8 byte word part of the name starting at characert `pos` in big
+ * endian encoding with first char always at msb, zero padded at lsb.
+ * Returns length of tag [0;8].
+ */
+static int get_dict_tag(dict_entry_t *de, int pos, uint64_t *tag, uint64_t *mask,
+ const char **tag_name, int *tag_len)
+{
+ int i, n = 0;
+ const char *a = 0;
+ uint64_t w = 0;
+
+ if (pos > de->len) {
+ goto done;
+ }
+ a = de->text + pos;
+ n = de->len - pos;
+ if (n > 8) {
+ n = 8;
+ }
+ i = n;
+ while (i--) {
+ w |= ((uint64_t)a[i]) << (56 - (i * 8));
+ }
+ *tag = w;
+ *mask = ~(((uint64_t)(1) << (8 - n) * 8) - 1);
+done:
+ if (tag_name) {
+ *tag_name = a;
+ }
+ if (tag_len) {
+ *tag_len = n;
+ }
+ return n;
+}
+
+
+/*
+ * Find the median, but move earlier if the previous entry
+ * is a strict prefix within the range.
+ *
+ * `b` is inclusive.
+ *
+ * The `pos` is a window into the key at an 8 byte multiple.
+ *
+ * Only consider the range `[pos;pos+8)` and move the median
+ * up if an earlier key is a prefix or match within this
+ * window. This is needed to handle trailing data in
+ * a compared external key, and also to handle sub-tree
+ * branching when two keys has same tag at pos.
+ *
+ * Worst case we get a linear search of length 8 if all
+ * keys are perfect prefixes of their successor key:
+ * `a, ab, abc, ..., abcdefgh`
+ * While the midpoint stills seeks towards 'a' for longer
+ * such sequences, the branch logic will pool those
+ * squences the share prefix groups of length 8.
+ */
+static int split_dict_left(dict_entry_t *dict, int a, int b, int pos)
+{
+ int m = a + (b - a) / 2;
+ uint64_t wf = 0, wg = 0, wmf = 0, wmg = 0;
+
+ while (m > a) {
+ get_dict_tag(&dict[m - 1], pos, &wf, &wmf, 0, 0);
+ get_dict_tag(&dict[m], pos, &wg, &wmg, 0, 0);
+ if (((wf ^ wg) & wmf) != 0) {
+ return m;
+ }
+ --m;
+ }
+ return m;
+}
+
+/*
+ * When multiple tags are identical after split_dict_left has moved
+ * intersection up so a == m, we need to split in the opposite direction
+ * to ensure progress untill all tags in the range are identical
+ * at which point the trie must descend.
+ *
+ * If all tags are the same from intersection to end, b + 1 is returned
+ * which is not a valid element.
+ */
+static int split_dict_right(dict_entry_t *dict, int a, int b, int pos)
+{
+ int m = a + (b - a) / 2;
+ uint64_t wf = 0, wg = 0, wmf = 0, wmg = 0;
+
+ while (m < b) {
+ get_dict_tag(&dict[m], pos, &wf, &wmf, 0, 0);
+ get_dict_tag(&dict[m + 1], pos, &wg, &wmg, 0, 0);
+ if (((wf ^ wg) & wmf) != 0) {
+ return m + 1;
+ }
+ ++m;
+ }
+ return m + 1;
+}
+
+/*
+ * Returns the first index where the tag does not terminate at
+ * [pos..pos+7], or b + 1 if none exists.
+ */
+static int split_dict_descend(dict_entry_t *dict, int a, int b, int pos)
+{
+ while (a <= b) {
+ if (0 < get_dict_suffix_len(&dict[a], pos)) {
+ break;
+ }
+ ++a;
+ }
+ return a;
+}
+
+
+static int dict_cmp(const void *x, const void *y)
+{
+ const dict_entry_t *a = x, *b = y;
+ int k, n = a->len > b->len ? b->len : a->len;
+
+ k = memcmp(a->text, b->text, (size_t)n);
+ return k ? k : a->len - b->len;
+}
+
+/* Includes union vectors. */
+static inline int is_union_member(fb_member_t *member)
+{
+ return (member->type.type == vt_compound_type_ref || member->type.type == vt_vector_compound_type_ref)
+ && member->type.ct->symbol.kind == fb_is_union;
+}
+
+static dict_entry_t *build_compound_dict(fb_compound_type_t *ct, int *count_out)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ size_t n;
+ dict_entry_t *dict, *de;
+ char *strbuf = 0;
+ size_t strbufsiz = 0;
+ int is_union;
+ size_t union_index = 0;
+
+ n = 0;
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ is_union = is_union_member(member);
+ if (is_union) {
+ ++n;
+ strbufsiz += (size_t)member->symbol.ident->len + 6;
+ }
+ ++n;
+ }
+ *count_out = (int)n;
+ if (n == 0) {
+ return 0;
+ }
+ dict = malloc(n * sizeof(dict_entry_t) + strbufsiz);
+ if (!dict) {
+ return 0;
+ }
+ strbuf = (char *)dict + n * sizeof(dict_entry_t);
+ de = dict;
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ de->text = member->symbol.ident->text;
+ de->len = (int)member->symbol.ident->len;
+ de->data = member;
+ de->hint = 0;
+ ++de;
+ is_union = is_union_member(member);
+ if (is_union) {
+ member->export_index = union_index++;
+ de->len = (int)member->symbol.ident->len + 5;
+ de->text = strbuf;
+ memcpy(strbuf, member->symbol.ident->text, (size_t)member->symbol.ident->len);
+ strbuf += member->symbol.ident->len;
+ strcpy(strbuf, "_type");
+ strbuf += 6;
+ de->data = member;
+ de->hint = 1;
+ ++de;
+ }
+ }
+ qsort(dict, n, sizeof(dict[0]), dict_cmp);
+ return dict;
+}
+
+typedef struct {
+ int count;
+ fb_schema_t *schema;
+ dict_entry_t *de;
+} install_enum_context_t;
+
+static void count_visible_enum_symbol(void *context, fb_symbol_t *sym)
+{
+ install_enum_context_t *p = context;
+
+ if (get_enum_if_visible(p->schema, sym)) {
+ p->count++;
+ }
+}
+
+static void install_visible_enum_symbol(void *context, fb_symbol_t *sym)
+{
+ install_enum_context_t *p = context;
+
+ if (get_enum_if_visible(p->schema, sym)) {
+ p->de->text = sym->ident->text;
+ p->de->len = (int)sym->ident->len;
+ p->de->data = sym;
+ p->de++;
+ }
+}
+
+/*
+ * A scope dictionary contains all the enum types defined under the given
+ * namespace of the scope. The actually namespace is not contained in
+ * the name - it is an implicit prefix. It is used when looking up a
+ * symbolic constant assigned to a field such that the constant is first
+ * searched for in the same scope (namespace) as the one that defined
+ * the table owning the field assigned to. If that fails, a global
+ * namespace prefixed lookup is needed, but this is separate from this
+ * dictionary. In case of conflicts the local scope takes precedence
+ * and must be searched first. Because each table parsed can a have a
+ * unique local scope, we cannot install the the unprefixed lookup in
+ * the same dictionary as the global lookup.
+ *
+ * NOTE: the scope may have been contanimated by being expanded by a
+ * parent schema so we check that each symbol is visible to the current
+ * schema. If we didn't do this, we would risk referring to enum parsers
+ * that are not included in the generated source. The default empty
+ * namespace (i.e. scope) is an example where this easily could happen.
+ */
+static dict_entry_t *build_local_scope_dict(fb_schema_t *schema, fb_scope_t *scope, int *count_out)
+{
+ dict_entry_t *dict;
+ install_enum_context_t iec;
+
+ fb_clear(iec);
+
+ iec.schema = schema;
+
+ fb_symbol_table_visit(&scope->symbol_index, count_visible_enum_symbol, &iec);
+ *count_out = iec.count;
+
+ if (iec.count == 0) {
+ return 0;
+ }
+ dict = malloc((size_t)iec.count * sizeof(dict[0]));
+ if (!dict) {
+ return 0;
+ }
+ iec.de = dict;
+ fb_symbol_table_visit(&scope->symbol_index, install_visible_enum_symbol, &iec);
+ qsort(dict, (size_t)iec.count, sizeof(dict[0]), dict_cmp);
+ return dict;
+}
+
+static dict_entry_t *build_global_scope_dict(catalog_t *catalog, int *count_out)
+{
+ size_t i, n = (size_t)catalog->nenums;
+ dict_entry_t *dict;
+
+ *count_out = (int)n;
+ if (n == 0) {
+ return 0;
+ }
+ dict = malloc(n * sizeof(dict[0]));
+ if (!dict) {
+ return 0;
+ }
+ for (i = 0; i < (size_t)catalog->nenums; ++i) {
+ dict[i].text = catalog->enums[i].name;
+ dict[i].len = (int)strlen(catalog->enums[i].name);
+ dict[i].data = catalog->enums[i].ct;
+ dict[i].hint = 0;
+ }
+ qsort(dict, (size_t)catalog->nenums, sizeof(dict[0]), dict_cmp);
+ *count_out = catalog->nenums;
+ return dict;
+}
+
+static void clear_dict(dict_entry_t *dict)
+{
+ if (dict) {
+ free(dict);
+ }
+}
+
+static int gen_field_match_handler(fb_output_t *out, fb_compound_type_t *ct, void *data, int is_union_type)
+{
+ fb_member_t *member = data;
+ fb_scoped_name_t snref;
+ fb_symbol_text_t scope_name;
+
+ int is_struct_container;
+ int is_string = 0;
+ int is_enum = 0;
+ int is_vector = 0;
+ int is_offset = 0;
+ int is_scalar = 0;
+ int is_optional = 0;
+ int is_table = 0;
+ int is_struct = 0;
+ int is_union = 0;
+ int is_union_vector = 0;
+ int is_union_type_vector = 0;
+ int is_base64 = 0;
+ int is_base64url = 0;
+ int is_nested = 0;
+ int is_array = 0;
+ int is_char_array = 0;
+ size_t array_len = 0;
+ fb_scalar_type_t st = 0;
+ const char *tname_prefix = "n/a", *tname = "n/a"; /* suppress compiler warnigns */
+ fb_literal_t literal;
+
+ fb_clear(snref);
+
+ fb_copy_scope(ct->scope, scope_name);
+ is_struct_container = ct->symbol.kind == fb_is_struct;
+ is_optional = !!(member->flags & fb_fm_optional);
+
+ switch (member->type.type) {
+ case vt_vector_type:
+ case vt_vector_compound_type_ref:
+ case vt_vector_string_type:
+ is_vector = 1;
+ break;
+ }
+
+ switch (member->type.type) {
+ case vt_fixed_array_compound_type_ref:
+ case vt_vector_compound_type_ref:
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ is_enum = member->type.ct->symbol.kind == fb_is_enum;
+ is_struct = member->type.ct->symbol.kind == fb_is_struct;
+ is_table = member->type.ct->symbol.kind == fb_is_table;
+ is_union = member->type.ct->symbol.kind == fb_is_union && !is_union_type;
+ if (is_enum) {
+ st = member->type.ct->type.st;
+ is_scalar = 1;
+ }
+ break;
+ case vt_vector_string_type:
+ case vt_string_type:
+ is_string = 1;
+ break;
+ case vt_vector_type:
+ /* Nested types are processed twice, once as an array, once as an object. */
+ is_nested = member->nest != 0;
+ is_base64 = member->metadata_flags & fb_f_base64;
+ is_base64url = member->metadata_flags & fb_f_base64url;
+ is_scalar = 1;
+ st = member->type.st;
+ break;
+ case vt_fixed_array_type:
+ is_scalar = 1;
+ is_array = 1;
+ array_len = member->type.len;
+ st = member->type.st;
+ break;
+ case vt_scalar_type:
+ is_scalar = 1;
+ st = member->type.st;
+ break;
+ }
+ if (member->type.type == vt_fixed_array_compound_type_ref) {
+ assert(is_struct_container);
+ is_array = 1;
+ array_len = member->type.len;
+ }
+ if (is_base64 || is_base64url) {
+ /* Even if it is nested, parse it as a regular base64 or base64url encoded vector. */
+ if (st != fb_ubyte || !is_vector) {
+ gen_panic(out, "internal error: unexpected base64 or base64url field type\n");
+ return -1;
+ }
+ is_nested = 0;
+ is_vector = 0;
+ is_scalar = 0;
+ }
+ if (is_union_type) {
+ is_scalar = 0;
+ }
+ if (is_vector && is_union_type) {
+ is_union_type_vector = 1;
+ is_vector = 0;
+ }
+ if (is_vector && is_union) {
+ is_union_vector = 1;
+ is_vector = 0;
+ }
+ if (is_array && is_scalar && st == fb_char) {
+ is_array = 0;
+ is_scalar = 0;
+ is_char_array = 1;
+ }
+ if (is_nested == 1) {
+ println(out, "if (buf != end && *buf == '[') { /* begin nested */"); indent();
+ }
+repeat_nested:
+ if (is_nested == 2) {
+ unindent(); println(out, "} else { /* nested */"); indent();
+ fb_compound_name((fb_compound_type_t *)&member->nest->symbol, &snref);
+ if (member->nest->symbol.kind == fb_is_table) {
+ is_table = 1;
+ } else {
+ is_struct = 1;
+ }
+ is_vector = 0;
+ is_scalar = 0;
+ println(out, "if (flatcc_builder_start_buffer(ctx->ctx, 0, 0, 0)) goto failed;");
+ }
+ is_offset = !is_scalar && !is_struct && !is_union_type;
+
+ if (is_scalar) {
+ tname_prefix = scalar_type_prefix(st);
+ tname = st == fb_bool ? "uint8_t" : scalar_type_name(st);
+ }
+
+ /* Other types can also be vector, so we wrap. */
+ if (is_vector) {
+ if (is_offset) {
+ println(out, "if (flatcc_builder_start_offset_vector(ctx->ctx)) goto failed;");
+ } else {
+ println(out,
+ "if (flatcc_builder_start_vector(ctx->ctx, %"PRIu64", %hu, UINT64_C(%"PRIu64"))) goto failed;",
+ (uint64_t)member->size, (short)member->align,
+ (uint64_t)FLATBUFFERS_COUNT_MAX(member->size));
+ }
+ }
+ if (is_array) {
+ if (is_scalar) {
+ println(out, "size_t count = %d;", array_len);
+ println(out, "%s *base = (%s *)((size_t)struct_base + %"PRIu64");",
+ tname, tname, (uint64_t)member->offset);
+ }
+ else {
+ println(out, "size_t count = %d;", array_len);
+ println(out, "void *base = (void *)((size_t)struct_base + %"PRIu64");",
+ (uint64_t)member->offset);
+ }
+ }
+ if (is_char_array) {
+ println(out, "char *base = (char *)((size_t)struct_base + %"PRIu64");",
+ (uint64_t)member->offset);
+ println(out, "buf = flatcc_json_parser_char_array(ctx, buf, end, base, %d);", array_len);
+ }
+ if (is_array || is_vector) {
+ println(out, "buf = flatcc_json_parser_array_start(ctx, buf, end, &more);");
+ /* Note that we reuse `more` which is safe because it is updated at the end of the main loop. */
+ println(out, "while (more) {"); indent();
+ }
+ if (is_scalar) {
+ println(out, "%s val = 0;", tname);
+ println(out, "static flatcc_json_parser_integral_symbol_f *symbolic_parsers[] = {");
+ indent(); indent();
+ /*
+ * The scope name may be empty when no namespace is used. In that
+ * case the global scope is the same, but performance the
+ * duplicate doesn't matter.
+ */
+ if (is_enum) {
+ println(out, "%s_parse_json_enum,", snref.text);
+ println(out, "%s_local_%sjson_parser_enum,", out->S->basename, scope_name);
+ println(out, "%s_global_json_parser_enum, 0 };", out->S->basename);
+ } else {
+ println(out, "%s_local_%sjson_parser_enum,", out->S->basename, scope_name);
+ println(out, "%s_global_json_parser_enum, 0 };", out->S->basename);
+ }
+ unindent(); unindent();
+ }
+ /* It is not safe to acquire the pointer before building element table or string. */
+ if (is_vector && !is_offset) {
+ println(out, "if (!(pval = flatcc_builder_extend_vector(ctx->ctx, 1))) goto failed;");
+ }
+ if (is_struct_container) {
+ if (!is_array && !is_char_array) {
+ /* `struct_base` is given as argument to struct parsers. */
+ println(out, "pval = (void *)((size_t)struct_base + %"PRIu64");", (uint64_t)member->offset);
+ }
+ } else if (is_struct && !is_vector) {
+ /* Same logic as scalars in tables, but scalars must be tested for default. */
+ println(out,
+ "if (!(pval = flatcc_builder_table_add(ctx->ctx, %"PRIu64", %"PRIu64", %"PRIu16"))) goto failed;",
+ (uint64_t)member->id, (uint64_t)member->size, (uint16_t)member->align);
+ }
+ if (is_scalar) {
+ println(out, "buf = flatcc_json_parser_%s(ctx, (mark = buf), end, &val);", tname_prefix);
+ println(out, "if (mark == buf) {"); indent();
+ println(out, "buf = flatcc_json_parser_symbolic_%s(ctx, (mark = buf), end, symbolic_parsers, &val);", tname_prefix);
+ println(out, "if (buf == mark || buf == end) goto failed;");
+ unindent(); println(out, "}");
+ if (!is_struct_container && !is_vector && !is_base64 && !is_base64url) {
+#if !FLATCC_JSON_PARSE_FORCE_DEFAULTS
+ /* We need to create a check for the default value and create a table field if not the default. */
+ if (!is_optional) {
+ if (!print_literal(st, &member->value, literal)) return -1;
+ println(out, "if (val != %s || (ctx->flags & flatcc_json_parser_f_force_add)) {", literal); indent();
+ }
+#endif
+ println(out, "if (!(pval = flatcc_builder_table_add(ctx->ctx, %"PRIu64", %"PRIu64", %hu))) goto failed;",
+ (uint64_t)member->id, (uint64_t)member->size, (short)member->align);
+#if !FLATCC_JSON_PARSE_FORCE_DEFAULTS
+#endif
+ }
+ /* For scalars in table field, and in struct container. */
+ if (is_array) {
+ println(out, "if (count) {"); indent();
+ println(out, "%s%s_write_to_pe(base, val);", out->nsc, tname_prefix);
+ println(out, "--count;");
+ println(out, "++base;");
+ unindent(); println(out, "} else if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {"); indent();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);");
+ unindent(); println(out, "}");
+ } else {
+ println(out, "%s%s_write_to_pe(pval, val);", out->nsc, tname_prefix);
+ }
+ if (!is_struct_container && !is_vector && !(is_scalar && is_optional)) {
+ unindent(); println(out, "}");
+ }
+ } else if (is_struct) {
+ if (is_array) {
+ println(out, "if (count) {"); indent();
+ println(out, "buf = %s_parse_json_struct_inline(ctx, buf, end, base);", snref.text);
+ println(out, "--count;");
+ println(out, "base = (void *)((size_t)base + %"PRIu64");", member->type.ct->size);
+ unindent(); println(out, "} else if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {"); indent();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);");
+ unindent(); println(out, "}");
+ } else {
+ println(out, "buf = %s_parse_json_struct_inline(ctx, buf, end, pval);", snref.text);
+ }
+ } else if (is_string) {
+ println(out, "buf = flatcc_json_parser_build_string(ctx, buf, end, &ref);");
+ } else if (is_base64 || is_base64url) {
+ println(out, "buf = flatcc_json_parser_build_uint8_vector_base64(ctx, buf, end, &ref, %u);",
+ !is_base64);
+ } else if (is_table) {
+ println(out, "buf = %s_parse_json_table(ctx, buf, end, &ref);", snref.text);
+ } else if (is_union) {
+ if (is_union_vector) {
+ println(out, "buf = flatcc_json_parser_union_vector(ctx, buf, end, %"PRIu64", %"PRIu64", h_unions, %s_parse_json_union);",
+ (uint64_t)member->export_index, member->id, snref.text);
+ } else {
+ println(out, "buf = flatcc_json_parser_union(ctx, buf, end, %"PRIu64", %"PRIu64", h_unions, %s_parse_json_union);",
+ (uint64_t)member->export_index, member->id, snref.text);
+ }
+ } else if (is_union_type) {
+ println(out, "static flatcc_json_parser_integral_symbol_f *symbolic_parsers[] = {");
+ indent(); indent();
+ println(out, "%s_parse_json_enum,", snref.text);
+ println(out, "%s_local_%sjson_parser_enum,", out->S->basename, scope_name);
+ println(out, "%s_global_json_parser_enum, 0 };", out->S->basename);
+ unindent(); unindent();
+ if (is_union_type_vector) {
+ println(out, "buf = flatcc_json_parser_union_type_vector(ctx, buf, end, %"PRIu64", %"PRIu64", h_unions, symbolic_parsers, %s_parse_json_union, %s_json_union_accept_type);",
+ (uint64_t)member->export_index, member->id, snref.text, snref.text);
+ } else {
+ println(out, "buf = flatcc_json_parser_union_type(ctx, buf, end, %"PRIu64", %"PRIu64", h_unions, symbolic_parsers, %s_parse_json_union);",
+ (uint64_t)member->export_index, member->id, snref.text);
+ }
+ } else if (!is_vector && !is_char_array) {
+ gen_panic(out, "internal error: unexpected type for trie member\n");
+ return -1;
+ }
+ if (is_vector) {
+ if (is_offset) {
+ /* Deal with table and string vector elements - unions cannot be elements. */
+ println(out, "if (!ref || !(pref = flatcc_builder_extend_offset_vector(ctx->ctx, 1))) goto failed;");
+ /* We don't need to worry about endian conversion - offsets vectors fix this automatically. */
+ println(out, "*pref = ref;");
+ }
+ println(out, "buf = flatcc_json_parser_array_end(ctx, buf, end, &more);");
+ unindent(); println(out, "}");
+ if (is_offset) {
+ println(out, "ref = flatcc_builder_end_offset_vector(ctx->ctx);");
+ } else {
+ println(out, "ref = flatcc_builder_end_vector(ctx->ctx);");
+ }
+ }
+ if (is_array) {
+ println(out, "buf = flatcc_json_parser_array_end(ctx, buf, end, &more);");
+ unindent(); println(out, "}");
+ println(out, "if (count) {"); indent();
+ println(out, "if (ctx->flags & flatcc_json_parser_f_reject_array_underflow) {"); indent();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_underflow);");
+ unindent(); println(out, "}");
+ if (is_scalar) {
+ println(out, "memset(base, 0, count * sizeof(*base));");
+ } else {
+ println(out, "memset(base, 0, count * %"PRIu64");", (uint64_t)member->type.ct->size);
+ }
+ unindent(); println(out, "}");
+ }
+ if (is_nested == 1) {
+ is_nested = 2;
+ goto repeat_nested;
+ }
+ if (is_nested == 2) {
+ println(out, "if (!ref) goto failed;");
+ println(out, "ref = flatcc_builder_end_buffer(ctx->ctx, ref);");
+ unindent(); println(out, "} /* end nested */");
+ }
+ if (is_nested || is_vector || is_table || is_string || is_base64 || is_base64url) {
+ println(out, "if (!ref || !(pref = flatcc_builder_table_add_offset(ctx->ctx, %"PRIu64"))) goto failed;", member->id);
+ println(out, "*pref = ref;");
+ }
+ return 0;
+}
+
+static void gen_field_match(fb_output_t *out, fb_compound_type_t *ct, void *data, int hint, int n)
+{
+ println(out, "buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, %d);", n);
+ println(out, "if (mark != buf) {"); indent();
+ gen_field_match_handler(out, ct, data, hint);
+ unindent(); println(out, "} else {"); indent();
+}
+
+/* This also handles union type enumerations. */
+static void gen_enum_match_handler(fb_output_t *out, fb_compound_type_t *ct, void *data, int unused_hint)
+{
+ fb_member_t *member = data;
+
+ (void)unused_hint;
+
+ /*
+ * This is rather unrelated to the rest, we just use the same
+ * trie generation logic. Here we simply need to assign a known
+ * value to the enum parsers output arguments.
+ */
+ switch (ct->type.st) {
+ case fb_bool:
+ case fb_ubyte:
+ case fb_ushort:
+ case fb_uint:
+ case fb_ulong:
+ println(out, "*value = UINT64_C(%"PRIu64"), *value_sign = 0;",
+ member->value.u);
+ break;
+ case fb_byte:
+ case fb_short:
+ case fb_int:
+ case fb_long:
+ if (member->value.i < 0) {
+ println(out, "*value = UINT64_C(%"PRIu64"), *value_sign = 1;", member->value.i);
+ } else {
+ println(out, "*value = UINT64_C(%"PRIu64"), *value_sign = 0;", member->value.i);
+ }
+ break;
+ default:
+ gen_panic(out, "internal error: invalid enum type\n");
+ }
+}
+
+static void gen_enum_match(fb_output_t *out, fb_compound_type_t *ct, void *data, int hint, int n)
+{
+ println(out, "buf = flatcc_json_parser_match_constant(ctx, (mark = buf), end, %d, aggregate);", n);
+ println(out, "if (buf != mark) {"); indent();
+ gen_enum_match_handler(out, ct, data, hint);
+ unindent(); println(out, "} else {"); indent();
+}
+
+static void gen_scope_match_handler(fb_output_t *out, fb_compound_type_t *unused_ct, void *data, int unused_hint)
+{
+ fb_compound_type_t *ct = data;
+ fb_scoped_name_t snt;
+
+ (void)unused_ct;
+ (void)unused_hint;
+ assert(ct->symbol.kind == fb_is_enum || ct->symbol.kind == fb_is_union);
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+ /* May be included from another file. Unions also have _enum parsers. */
+ println(out, "buf = %s_parse_json_enum(ctx, buf, end, value_type, value, aggregate);", snt.text);
+}
+
+static void gen_scope_match(fb_output_t *out, fb_compound_type_t *ct, void *data, int hint, int n)
+{
+ println(out, "buf = flatcc_json_parser_match_scope(ctx, (mark = buf), end, %d);", n);
+ println(out, "if (buf != mark) {"); indent();
+ gen_scope_match_handler(out, ct, data, hint);
+ unindent(); println(out, "} else {"); indent();
+}
+
+static void gen_field_unmatched(fb_output_t *out)
+{
+ println(out, "buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);");
+}
+
+static void gen_enum_unmatched(fb_output_t *out)
+{
+ println(out, "return unmatched;");
+}
+
+static void gen_scope_unmatched(fb_output_t *out)
+{
+ println(out, "return unmatched;");
+}
+
+/*
+ * Generate a trie for all members or a compound type.
+ * This may be a struct or a table.
+ *
+ * We have a ternary trie where a search word w compares:
+ * w < wx_tag is one branch [a;x), iff a < x.
+ * w > wx_tag is another branch (y;b], iff b > y
+ * and w == wx_tag is a third branch [x;y].
+ *
+ * The sets [a;x) and (y;b] may be empty in which case a non-match
+ * action is triggered.
+ *
+ * [x..y] is a set of one or more fields that share the same tag at the
+ * current position. The first (and only the first) field name in this
+ * set may terminate withint the current tag (when suffix length k ==
+ * 0). There is therefore potentially both a direct field action and a
+ * sub-tree action. Once there is only one field in the set and the
+ * field name terminates within the current tag, the search word is
+ * masked and tested against the field tag and the search word is also
+ * tested for termination in the buffer at the first position after the
+ * field match. If the termination was not found a non-match action is
+ * triggered.
+ *
+ * A non-match action may be to silently consume the rest of the
+ * search identifier and then the json value, or to report and
+ * error.
+ *
+ * A match action triggers a json value parse of a known type
+ * which updates into a flatcc builder object. If the type is
+ * basic (string or scalar) the update simple, otherwise if
+ * the type is within the same schema, we push context
+ * and switch to parse the nested type, otherwise we call
+ * a parser in another schema. When a trie is done, we
+ * switch back context if in the same schema. The context
+ * lives on a stack. This avoids deep recursion because
+ * schema parsers are not mutually recursive.
+ *
+ * The trie is also used to parse enums and scopes (namespace prefixes)
+ * with a slight modification.
+ */
+
+enum trie_type { table_trie, struct_trie, enum_trie, local_scope_trie, global_scope_trie };
+typedef struct trie trie_t;
+
+typedef void gen_match_f(fb_output_t *out, fb_compound_type_t *ct, void *data, int hint, int n);
+typedef void gen_unmatched_f(fb_output_t *out);
+
+struct trie {
+ dict_entry_t *dict;
+ gen_match_f *gen_match;
+ gen_unmatched_f *gen_unmatched;
+ /* Not used with scopes. */
+ fb_compound_type_t *ct;
+ int type;
+ int union_total;
+ int label;
+};
+
+/*
+ * This function is a final handler of the `gen_trie` function. Often
+ * just to handle a single match, but also to handle a prefix range
+ * special case like keys in `{ a, alpha, alpha2 }`.
+ *
+ * (See also special case of two non-prefix keys below).
+ *
+ * We know that all keys [a..b] have length in the range [pos..pos+8)
+ * and also that key x is proper prefix of key x + 1, x in [a..b).
+ *
+ * It is possible that `a == b`.
+ *
+ * We conduct a binary search by testing the middle for masked match and
+ * gradually refine until we do not have a match or have a single
+ * element match.
+ *
+ * (An alternative algorithm xors 8 byte tag with longest prefix and
+ * finds ceiling of log 2 using a few bit logic operations or intrinsic
+ * zero count and creates a jump table of at most 8 elements, but is
+ * hardly worthwhile vs 3 comparisons and 3 AND operations and often
+ * less than that.)
+ *
+ * Once we have a single element match we need to confirm the successor
+ * symbol is not any valid key - this differs among trie types and is
+ * therefore the polymorph match logic handles the final confirmed match
+ * or mismatch.
+ *
+ * Each trie type has special operation for implementing a matched and
+ * a failed match. Our job is to call these for each key in the range.
+ *
+ * While not the original intention, the `gen_prefix_trie` also handles the
+ * special case where the set has two keys where one is not a prefix of
+ * the other, but both terminate in the same tag. In this case we can
+ * immediately do an exact match test and skip the less than
+ * comparision. We need no special code for this, assuming the function
+ * is called correctly. This significantly reduces the branching in a
+ * case like "Red, Green, Blue".
+ *
+ * If `label` is positive, it is used to jump to additional match logic
+ * when a prefix was not matched. If 0 there is no additional logic and
+ * the symbol is considered unmatched immediately.
+ */
+static void gen_prefix_trie(fb_output_t *out, trie_t *trie, int a, int b, int pos, int label)
+{
+ int m, n;
+ uint64_t tag = 00, mask = 0;
+ const char *name;
+ int len;
+
+ /*
+ * Weigh the intersection towards the longer prefix. Notably if we
+ * have two keys it makes no sense to check the shorter key first.
+ */
+ m = a + (b - a + 1) / 2;
+
+ n = get_dict_tag(&trie->dict[m], pos, &tag, &mask, &name, &len);
+ if (n == 8) {
+ println(out, "if (w == 0x%"PRIx64") { /* \"%.*s\" */", tag, len, name); indent();
+ } else {
+ println(out, "if ((w & 0x%"PRIx64") == 0x%"PRIx64") { /* \"%.*s\" */",
+ mask, tag, len, name); indent();
+ }
+ if (m == a) {
+ /* There can be only one. */
+ trie->gen_match(out, trie->ct, trie->dict[m].data, trie->dict[m].hint, n);
+ if (label > 0) {
+ println(out, "goto pfguard%d;", label);
+ } else {
+ trie->gen_unmatched(out);
+ }
+ unindent(); println(out, "}");
+ unindent(); println(out, "} else { /* \"%.*s\" */", len, name); indent();
+ if (label > 0) {
+ println(out, "goto pfguard%d;", label);
+ } else {
+ trie->gen_unmatched(out);
+ }
+ } else {
+ if (m == b) {
+ trie->gen_match(out, trie->ct, trie->dict[m].data, trie->dict[m].hint, n);
+ if (label > 0) {
+ println(out, "goto pfguard%d;", label);
+ } else {
+ trie->gen_unmatched(out);
+ }
+ unindent(); println(out, "}");
+ } else {
+ gen_prefix_trie(out, trie, m, b, pos, label);
+ }
+ unindent(); println(out, "} else { /* \"%.*s\" */", len, name); indent();
+ gen_prefix_trie(out, trie, a, m - 1, pos, label);
+ }
+ unindent(); println(out, "} /* \"%.*s\" */", len, name);
+}
+
+static void gen_trie(fb_output_t *out, trie_t *trie, int a, int b, int pos)
+{
+ int x, k;
+ uint64_t tag = 0, mask = 0;
+ const char *name = "";
+ int len = 0, has_prefix_key = 0, prefix_guard = 0, has_descend;
+ int label = 0;
+
+ /*
+ * Process a trie at the level given by pos. A single level covers
+ * one tag.
+ *
+ * A tag is a range of 8 characters [pos..pos+7] that is read as a
+ * single big endian word and tested as against a ternary trie
+ * generated in code. In generated code the tag is stored in "w".
+ *
+ * Normally trailing data in a tag is not a problem
+ * because the difference between two keys happen in the middle and
+ * trailing data is not valid key material. When the difference is
+ * at the end, we get a lot of special cases to handle.
+ *
+ * Regardless, when we believe we have a match, a final check is
+ * made to ensure that the next character after the match is not a
+ * valid key character - for quoted keys a valid termiantot is a
+ * quote, for unquoted keys it can be one of several characters -
+ * therefore quoted keys are faster to parse, even if they consume
+ * more space. The trie does not care about these details, the
+ * gen_match function handles this transparently for different
+ * symbol types.
+ */
+
+
+ /*
+ * If we have one or two keys that terminate in this tag, there is no
+ * need to do a branch test before matching exactly.
+ *
+ * We observe that `gen_prefix_trie` actually handles this
+ * case well, even though it was not designed for it.
+ */
+ if ((get_dict_suffix_len(&trie->dict[a], pos) == 0) &&
+ (b == a || (b == a + 1 && get_dict_suffix_len(&trie->dict[b], pos) == 0))) {
+ gen_prefix_trie(out, trie, a, b, pos, 0);
+ return;
+ }
+
+ /*
+ * Due trie nature, we have a left, middle, and right range where
+ * the middle range all compare the same at the current trie level
+ * when masked against shortest (and first) key in middle range.
+ */
+ x = split_dict_left(trie->dict, a, b, pos);
+
+ if (x > a) {
+ /*
+ * This is normal early branch with a key `a < x < b` such that
+ * any shared prefix ranges do not span x.
+ */
+ get_dict_tag(&trie->dict[x], pos, &tag, &mask, &name, &len);
+ println(out, "if (w < 0x%"PRIx64") { /* branch \"%.*s\" */", tag, len, name); indent();
+ gen_trie(out, trie, a, x - 1, pos);
+ unindent(); println(out, "} else { /* branch \"%.*s\" */", len, name); indent();
+ gen_trie(out, trie, x, b, pos);
+ unindent(); println(out, "} /* branch \"%.*s\" */", len, name);
+ return;
+ }
+ x = split_dict_right(trie->dict, a, b, pos);
+
+ /*
+ * [a .. x-1] is a non-empty sequence of prefixes,
+ * for example { a123, a1234, a12345 }.
+ * The keys might not terminate in the current tag. To find those
+ * that do, we will evaluate k such that:
+ * [a .. k-1] are prefixes that terminate in the current tag if any
+ * such exists.
+ * [x..b] are keys that are prefixes up to at least pos + 7 but
+ * do not terminate in the current tag.
+ * [k..x-1] are prefixes that do not termiante in the current tag.
+ * Note that they might not be prefixes when considering more than the
+ * current tag.
+ * The range [a .. x-1] can ge generated with `gen_prefix_trie`.
+ *
+ * We generally have the form
+ *
+ * [a..b] =
+ * (a)<prefixes>, (k-1)<descend-prefix>, (k)<descend>, (x)<reminder>
+ *
+ * Where <prefixes> are keys that terminate at the current tag.
+ * <descend> are keys that have the prefixes as prefix but do not
+ * terminate at the current tag.
+ * <descend-prerfix> is a single key that terminates exactly
+ * where the tag ends. If there are no descend keys it is part of
+ * prefixes, otherwise it is tested as a special case.
+ * <reminder> are any keys larger than the prefixes.
+ *
+ * The reminder keys cannot be tested before we are sure that no
+ * prefix is matching at least no prefixes that is not a
+ * descend-prefix. This is because less than comparisons are
+ * affected by trailing data within the tag caused by prefixes
+ * terminating early. Trailing data is not a problem if two keys are
+ * longer than the point where they differ even if they terminate
+ * within the current tag.
+ *
+ * Thus, if we have non-empty <descend> and non-empty <reminder>,
+ * the reminder must guard against any matches in prefix but not
+ * against any matches in <descend>. If <descend> is empty and
+ * <prefixes> == <descend-prefix> a guard is also not needed.
+ */
+
+ /* Find first prefix that does not terminate at the current level, or x if absent */
+ k = split_dict_descend(trie->dict, a, x - 1, pos);
+ has_descend = k < x;
+
+ /* If we have a descend, process that in isolation. */
+ if (has_descend) {
+ has_prefix_key = k > a && get_dict_tag_len(&trie->dict[k - 1], pos) == 8;
+ get_dict_tag(&trie->dict[k], pos, &tag, &mask, &name, &len);
+ println(out, "if (w == 0x%"PRIx64") { /* descend \"%.*s\" */", tag, len, name); indent();
+ if (has_prefix_key) {
+ /* We have a key that terminates at the descend prefix. */
+ println(out, "/* descend prefix key \"%.*s\" */", len, name);
+ trie->gen_match(out, trie->ct, trie->dict[k - 1].data, trie->dict[k - 1].hint, 8);
+ println(out, "/* descend suffix \"%.*s\" */", len, name);
+ }
+ println(out, "buf += 8;");
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, trie, k, x - 1, pos + 8);
+ if (has_prefix_key) {
+ unindent(); println(out, "} /* desend suffix \"%.*s\" */", len, name);
+ /* Here we move the <descend-prefix> key out of the <descend> range. */
+ --k;
+ }
+ unindent(); println(out, "} else { /* descend \"%.*s\" */", len, name); indent();
+ }
+ prefix_guard = a < k && x <= b;
+ if (prefix_guard) {
+ label = ++trie->label;
+ }
+ if (a < k) {
+ gen_prefix_trie(out, trie, a, k - 1, pos, label);
+ }
+ if (prefix_guard) {
+ /* All prefixes tested, but none matched. */
+ println(out, "goto endpfguard%d;", label);
+ margin();
+ println(out, "pfguard%d:", label);
+ unmargin();
+ }
+ if (x <= b) {
+ gen_trie(out, trie, x, b, pos);
+ } else if (a >= k) {
+ trie->gen_unmatched(out);
+ }
+ if (prefix_guard) {
+ margin();
+ println(out, "endpfguard%d:", label);
+ unmargin();
+ println(out, "(void)0;");
+ }
+ if (has_descend) {
+ unindent(); println(out, "} /* descend \"%.*s\" */", len, name);
+ }
+}
+
+
+/*
+ * Parsing symbolic constants:
+ *
+ * An enum parser parses the local symbols and translate them into
+ * numeric values.
+ *
+ * If a symbol wasn't matched, e.g. "Red", it might be matched with
+ * "Color.Red" but the enum parser does not handle this.
+ *
+ * Instead a scope parser maps each type in the scope to a call
+ * to an enum parser, e.g. "Color." maps to a color enum parser
+ * that understands "Red". If this also fails, a call is made
+ * to a global scope parser that maps a namespace to a local
+ * scope parser, for example "Graphics.Color.Red" first
+ * recognizes the namespace "Graphics." which may or may not
+ * be the same as the local scope tried earlier, then "Color."
+ * is matched and finally "Red".
+ *
+ * The scope and namespace parsers may cover extend namespaces from
+ * include files so each file calls into dependencies as necessary.
+ * This means the same scope can have multiple parsers and must
+ * therefore be name prefixed by the basename of the include file.
+ *
+ * The enums can only exist in a single file.
+ *
+ * The local scope is defined as the scope in which the consuming
+ * fields container is defined, so if Pen is a table in Graphics
+ * with a field named "ink" and the pen is parsed as
+ * { "ink": "Color.Red" }, then Color would be parsed in the
+ * Graphics scope. If ink was and enum of type Color, the enum
+ * parser would be tried first. If ink was, say, an integer
+ * type, it would not try an enum parse first but try the local
+ * scope, then the namespace scope.
+ *
+ * It is permitted to have multiple symbols in a string when
+ * the enum type has flag attribute so values can be or'ed together.
+ * The parser does not attempt to validate this and will simple
+ * 'or' together multiple values after coercing each to the
+ * receiving field type: "Has.ink Has.shape Has.brush".
+ */
+
+
+/*
+ * Used by scalar/enum/union_type table fields to look up symbolic
+ * constants in same scope as the table was defined, thus avoiding
+ * namespace prefix.
+ *
+ * Theh matched name then calls into the type specific parser which
+ * may be in a dependent file.
+ *
+ * Because each scope may be extended in dependent schema files
+ * we recreate the scope in full in each file.
+ */
+static void gen_local_scope_parser(void *context, fb_scope_t *scope)
+{
+ fb_output_t *out = context;
+ int n = 0;
+ trie_t trie;
+ fb_symbol_text_t scope_name;
+
+ fb_clear(trie);
+ fb_copy_scope(scope, scope_name);
+ if (((trie.dict = build_local_scope_dict(out->S, scope, &n)) == 0) && n > 0) {
+ gen_panic(out, "internal error: could not build dictionary for json parser\n");
+ return;
+ }
+ /* Not used for scopes. */
+ trie.ct = 0;
+ trie.type = local_scope_trie;
+ trie.gen_match = gen_scope_match;
+ trie.gen_unmatched = gen_scope_unmatched;
+ println(out, "static const char *%s_local_%sjson_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,",
+ out->S->basename, scope_name);
+ indent(); indent();
+ println(out, "int *value_type, uint64_t *value, int *aggregate)");
+ unindent(); unindent();
+ println(out, "{"); indent();
+ if (n == 0) {
+ println(out, "/* Scope has no enum / union types to look up. */");
+ println(out, "return buf; /* unmatched; */");
+ unindent(); println(out, "}");
+ } else {
+ println(out, "const char *unmatched = buf;");
+ println(out, "const char *mark;");
+ println(out, "uint64_t w;");
+ println(out, "");
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, &trie, 0, n - 1, 0);
+ println(out, "return buf;");
+ unindent(); println(out, "}");
+ }
+ println(out, "");
+ clear_dict(trie.dict);
+}
+
+/*
+ * This parses namespace prefixed types. Because scopes can be extended
+ * in dependent schema files, each file has its own global scope parser.
+ * The matched types call into type specific parsers that may be in
+ * a dependent file.
+ *
+ * When a local scope is also parsed, it should be tried before the
+ * global scope.
+ */
+static int gen_global_scope_parser(fb_output_t *out)
+{
+ int n = 0;
+ trie_t trie;
+ catalog_t catalog;
+
+ fb_clear(trie);
+ if (build_catalog(&catalog, out->S, 1, &out->S->root_schema->scope_index)) {
+ return -1;
+ }
+
+ if ((trie.dict = build_global_scope_dict(&catalog, &n)) == 0 && n > 0) {
+ clear_catalog(&catalog);
+ gen_panic(out, "internal error: could not build dictionary for json parser\n");
+ return -1;
+ }
+ /* Not used for scopes. */
+ trie.ct = 0;
+ trie.type = global_scope_trie;
+ trie.gen_match = gen_scope_match;
+ trie.gen_unmatched = gen_scope_unmatched;
+ println(out, "static const char *%s_global_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,", out->S->basename);
+ indent(); indent();
+ println(out, "int *value_type, uint64_t *value, int *aggregate)");
+ unindent(); unindent();
+ println(out, "{"); indent();
+ if (n == 0) {
+ println(out, "/* Global scope has no enum / union types to look up. */");
+ println(out, "return buf; /* unmatched; */");
+ unindent(); println(out, "}");
+ } else {
+ println(out, "const char *unmatched = buf;");
+ println(out, "const char *mark;");
+ println(out, "uint64_t w;");
+ println(out, "");
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, &trie, 0, n - 1, 0);
+ println(out, "return buf;");
+ unindent(); println(out, "}");
+ }
+ println(out, "");
+ clear_dict(trie.dict);
+ clear_catalog(&catalog);
+ return 0;
+}
+
+/*
+ * Constants have the form `"Red"` or `Red` but may also be part
+ * of a list of flags: `"Normal High Average"` or `Normal High
+ * Average`. `more` indicates more symbols follow.
+ *
+ * Returns input argument if there was no valid match,
+ * `end` on syntax error, and `more=1` if matched and
+ * there are more constants to parse.
+ * Applies the mached and coerced constant to `pval`
+ * with a binary `or` operation so `pval` must be initialized
+ * to 0 before teh first constant in a list.
+ */
+static int gen_enum_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+ int n = 0;
+ trie_t trie;
+
+ fb_clear(trie);
+ assert(ct->symbol.kind == fb_is_enum || ct->symbol.kind == fb_is_union);
+
+ if ((trie.dict = build_compound_dict(ct, &n)) == 0 && n > 0) {
+ gen_panic(out, "internal error: could not build dictionary for json parser\n");
+ return -1;
+ }
+ trie.ct = ct;
+ trie.type = enum_trie;
+ trie.gen_match = gen_enum_match;
+ trie.gen_unmatched = gen_enum_unmatched;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ println(out, "static const char *%s_parse_json_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,", snt.text);
+ indent(); indent();
+ println(out, "int *value_sign, uint64_t *value, int *aggregate)");
+ unindent(); unindent();
+ println(out, "{"); indent();
+ if (n == 0) {
+ println(out, "/* Enum has no fields. */");
+ println(out, "*aggregate = 0;");
+ println(out, "return buf; /* unmatched; */");
+ unindent(); println(out, "}");
+ } else {
+ println(out, "const char *unmatched = buf;");
+ println(out, "const char *mark;");
+ println(out, "uint64_t w;");
+ println(out, "");
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, &trie, 0, n - 1, 0);
+ println(out, "return buf;");
+ unindent(); println(out, "}");
+ }
+ println(out, "");
+ clear_dict(trie.dict);
+ return 0;
+}
+
+/*
+ * We do not check for duplicate settings or missing struct fields.
+ * Missing fields are zeroed.
+ *
+ * TODO: we should track nesting level because nested structs do not
+ * interact with the builder so the builders level limit will not kick
+ * in. As long as we get input from our own parser we should, however,
+ * be reasonable safe as nesting is bounded.
+ */
+static int gen_struct_parser_inline(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+ int n;
+ trie_t trie;
+
+ fb_clear(trie);
+ assert(ct->symbol.kind == fb_is_struct);
+ if ((trie.dict = build_compound_dict(ct, &n)) == 0 && n > 0) {
+ gen_panic(out, "internal error: could not build dictionary for json parser\n");
+ return -1;
+ }
+ trie.ct = ct;
+ trie.type = struct_trie;
+ trie.gen_match = gen_field_match;
+ trie.gen_unmatched = gen_field_unmatched;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+ println(out, "static const char *%s_parse_json_struct_inline(flatcc_json_parser_t *ctx, const char *buf, const char *end, void *struct_base)", snt.text);
+ println(out, "{"); indent();
+ println(out, "int more;");
+ if (n > 0) {
+ println(out, "flatcc_builder_ref_t ref;");
+ println(out, "void *pval;");
+ println(out, "const char *mark;");
+ println(out, "uint64_t w;");
+ }
+ println(out, "");
+ println(out, "buf = flatcc_json_parser_object_start(ctx, buf, end, &more);");
+ println(out, "while (more) {"); indent();
+ if (n == 0) {
+ println(out, "/* Empty struct. */");
+ println(out, "buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);");
+ } else {
+ println(out, "buf = flatcc_json_parser_symbol_start(ctx, buf, end);");
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, &trie, 0, n - 1, 0);
+ }
+ println(out, "buf = flatcc_json_parser_object_end(ctx, buf, end , &more);");
+ unindent(); println(out, "}");
+ println(out, "return buf;");
+ if (n > 0) {
+ /* Set runtime error if no other error was set already. */
+ margin();
+ println(out, "failed:");
+ unmargin();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);");
+ }
+ unindent(); println(out, "}");
+ println(out, "");
+ clear_dict(trie.dict);
+ return 0;
+}
+
+static int gen_struct_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+
+ assert(ct->symbol.kind == fb_is_struct);
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+ println(out, "static const char *%s_parse_json_struct(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result)", snt.text);
+ println(out, "{"); indent();
+ println(out, "void *pval;");
+ println(out, "");
+ println(out, "*result = 0;");
+ println(out, "if (!(pval = flatcc_builder_start_struct(ctx->ctx, %"PRIu64", %"PRIu16"))) goto failed;",
+ (uint64_t)ct->size, (uint16_t)ct->align);
+ println(out, "buf = %s_parse_json_struct_inline(ctx, buf, end, pval);", snt.text);
+ println(out, "if (ctx->error || !(*result = flatcc_builder_end_struct(ctx->ctx))) goto failed;");
+ println(out, "return buf;");
+ margin();
+ println(out, "failed:");
+ unmargin();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);");
+ unindent(); println(out, "}");
+ println(out, "");
+ println(out, "static inline int %s_parse_json_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx, const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid)", snt.text);
+ println(out, "{"); indent();
+ println(out, "return flatcc_json_parser_struct_as_root(B, ctx, buf, bufsiz, flags, fid, %s_parse_json_struct);",
+ snt.text);
+ unindent(); println(out, "}");
+ println(out, "");
+ return 0;
+}
+
+static int gen_table_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+ fb_member_t *member;
+ int first, i, n;
+ int is_union, is_required;
+ trie_t trie;
+
+ fb_clear(trie);
+ assert(ct->symbol.kind == fb_is_table);
+ if ((trie.dict = build_compound_dict(ct, &n)) == 0 && n > 0) {
+ gen_panic(out, "internal error: could not build dictionary for json parser\n");
+ return -1;
+ }
+ trie.ct = ct;
+ trie.type = table_trie;
+ trie.gen_match = gen_field_match;
+ trie.gen_unmatched = gen_field_unmatched;
+
+ trie.union_total = 0;
+ for (i = 0; i < n; ++i) {
+ trie.union_total += !!trie.dict[i].hint;
+ }
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+ println(out, "static const char *%s_parse_json_table(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result)", snt.text);
+ println(out, "{"); indent();
+ println(out, "int more;");
+
+ if (n > 0) {
+ println(out, "void *pval;");
+ println(out, "flatcc_builder_ref_t ref, *pref;");
+ println(out, "const char *mark;");
+ println(out, "uint64_t w;");
+ }
+ if (trie.union_total) {
+ println(out, "size_t h_unions;");
+ }
+ println(out, "");
+ println(out, "*result = 0;");
+ println(out, "if (flatcc_builder_start_table(ctx->ctx, %"PRIu64")) goto failed;",
+ ct->count);
+ if (trie.union_total) {
+ println(out, "if (end == flatcc_json_parser_prepare_unions(ctx, buf, end, %"PRIu64", &h_unions)) goto failed;", (uint64_t)trie.union_total);
+ }
+ println(out, "buf = flatcc_json_parser_object_start(ctx, buf, end, &more);");
+ println(out, "while (more) {"); indent();
+ println(out, "buf = flatcc_json_parser_symbol_start(ctx, buf, end);");
+ if (n > 0) {
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, &trie, 0, n - 1, 0);
+ } else {
+ println(out, "/* Table has no fields. */");
+ println(out, "buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);");
+ }
+ println(out, "buf = flatcc_json_parser_object_end(ctx, buf, end, &more);");
+ unindent(); println(out, "}");
+ println(out, "if (ctx->error) goto failed;");
+ for (first = 1, i = 0; i < n; ++i) {
+ member = trie.dict[i].data;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ is_union = is_union_member(member);
+ is_required = member->metadata_flags & fb_f_required;
+ if (is_required) {
+ if (first) {
+ println(out, "if (!flatcc_builder_check_required_field(ctx->ctx, %"PRIu64")", member->id - !!is_union);
+ indent();
+ } else {
+ println(out, "|| !flatcc_builder_check_required_field(ctx->ctx, %"PRIu64")", member->id - !!is_union);
+ }
+ first = 0;
+ }
+ }
+ if (!first) {
+ unindent(); println(out, ") {"); indent();
+ println(out, "buf = flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_required);");
+ println(out, "goto failed;");
+ unindent(); println(out, "}");
+ }
+ if (trie.union_total) {
+ println(out, "buf = flatcc_json_parser_finalize_unions(ctx, buf, end, h_unions);");
+ }
+ println(out, "if (!(*result = flatcc_builder_end_table(ctx->ctx))) goto failed;");
+ println(out, "return buf;");
+ /* Set runtime error if no other error was set already. */
+ margin();
+ println(out, "failed:");
+ unmargin();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);");
+ unindent(); println(out, "}");
+ println(out, "");
+ println(out, "static inline int %s_parse_json_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx, const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid)", snt.text);
+ println(out, "{"); indent();
+ println(out, "return flatcc_json_parser_table_as_root(B, ctx, buf, bufsiz, flags, fid, %s_parse_json_table);",
+ snt.text);
+ unindent(); println(out, "}");
+ println(out, "");
+ clear_dict(trie.dict);
+ return 0;
+}
+
+static int gen_union_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt, snref;
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ int n;
+ const char *s;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+ println(out, "static const char *%s_parse_json_union(flatcc_json_parser_t *ctx, const char *buf, const char *end, uint8_t type, flatcc_builder_ref_t *result)", snt.text);
+ println(out, "{"); indent();
+ println(out, "");
+ println(out, "*result = 0;");
+ println(out, "switch (type) {");
+ println(out, "case 0: /* NONE */"); indent();
+ println(out, "return flatcc_json_parser_none(ctx, buf, end);");
+ unindent();
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+ switch (member->type.type) {
+ case vt_missing:
+ /* NONE is of type vt_missing and already handled. */
+ continue;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ println(out, "case %u: /* %.*s */", (unsigned)member->value.u, n, s); indent();
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ println(out, "buf = %s_parse_json_table(ctx, buf, end, result);", snref.text);
+ break;
+ case fb_is_struct:
+ println(out, "buf = %s_parse_json_struct(ctx, buf, end, result);", snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound union member type\n");
+ return -1;
+ }
+ println(out, "break;");
+ unindent();
+ continue;
+ case vt_string_type:
+ println(out, "case %u: /* %.*s */", (unsigned)member->value.u, n, s); indent();
+ println(out, "buf = flatcc_json_parser_build_string(ctx, buf, end, result);");
+ println(out, "break;");
+ unindent();
+ continue;
+ default:
+ gen_panic(out, "internal error: unexpected union member type\n");
+ return -1;
+ }
+ }
+ /* Unknown union, but not an error if we allow schema forwarding. */
+ println(out, "default:"); indent();
+ println(out, "if (!(ctx->flags & flatcc_json_parser_f_skip_unknown)) {"); indent();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unknown_union);");
+ unindent(); println(out, "} else {"); indent();
+ println(out, "return flatcc_json_parser_generic_json(ctx, buf, end);");
+ unindent(); println(out, "}");
+ unindent(); println(out, "}");
+ println(out, "if (ctx->error) return buf;");
+ println(out, "if (!*result) {");
+ indent(); println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);");
+ unindent(); println(out, "}");
+ println(out, "return buf;");
+ unindent(); println(out, "}");
+ println(out, "");
+ return 0;
+}
+
+static int gen_union_accept_type(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt, snref;
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ int n;
+ const char *s;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+ println(out, "static int %s_json_union_accept_type(uint8_t type)", snt.text);
+ println(out, "{"); indent();
+ println(out, "switch (type) {");
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+ if (member->type.type == vt_missing) {
+ println(out, "case 0: return 1; /* NONE */");
+ continue;
+ }
+ println(out, "case %u: return 1; /* %.*s */", (unsigned)member->value.u, n, s);
+ }
+ /* Unknown union, but not an error if we allow schema forwarding. */
+ println(out, "default: return 0;"); indent();
+ unindent(); println(out, "}");
+ unindent(); println(out, "}");
+ println(out, "");
+ return 0;
+}
+
+static void gen_local_scope_prototype(void *context, fb_scope_t *scope)
+{
+ fb_output_t *out = context;
+ fb_symbol_text_t scope_name;
+
+ fb_copy_scope(scope, scope_name);
+
+ println(out, "static const char *%s_local_%sjson_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,",
+ out->S->basename, scope_name);
+ println(out, "int *value_type, uint64_t *value, int *aggregate);");
+}
+
+static int gen_root_table_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ println(out, "static int %s_parse_json(flatcc_builder_t *B, flatcc_json_parser_t *ctx,", out->S->basename);
+ indent(); indent();
+ println(out, "const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags)");
+ unindent(); unindent();
+ println(out, "{"); indent();
+ println(out, "flatcc_json_parser_t parser;");
+ println(out, "flatcc_builder_ref_t root;");
+ println(out, "");
+ println(out, "ctx = ctx ? ctx : &parser;");
+ println(out, "flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);");
+ if (out->S->file_identifier.type == vt_string) {
+ println(out, "if (flatcc_builder_start_buffer(B, \"%.*s\", 0, 0)) return -1;",
+ out->S->file_identifier.s.len, out->S->file_identifier.s.s);
+ } else {
+ println(out, "if (flatcc_builder_start_buffer(B, 0, 0, 0)) return -1;");
+ }
+ println(out, "%s_parse_json_table(ctx, buf, buf + bufsiz, &root);", snt.text);
+ println(out, "if (ctx->error) {"); indent();
+ println(out, "return ctx->error;");
+ unindent(); println(out, "}");
+ println(out, "if (!flatcc_builder_end_buffer(B, root)) return -1;");
+ println(out, "ctx->end_loc = buf;");
+ println(out, "return 0;");
+ unindent(); println(out, "}");
+ println(out, "");
+ return 0;
+}
+
+static int gen_root_struct_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ println(out, "static int %s_parse_json(flatcc_builder_t *B, flatcc_json_parser_t *ctx,", out->S->basename);
+ indent(); indent();
+ println(out, "const char *buf, size_t bufsiz, int flags)");
+ unindent(); unindent();
+ println(out, "{"); indent();
+ println(out, "flatcc_json_parser_t ctx_;");
+ println(out, "flatcc_builder_ref_t root;");
+ println(out, "");
+ println(out, "ctx = ctx ? ctx : &ctx_;");
+ println(out, "flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);");
+ if (out->S->file_identifier.type == vt_string) {
+ println(out, "if (flatcc_builder_start_buffer(B, \"%.*s\", 0, 0)) return -1;",
+ out->S->file_identifier.s.len, out->S->file_identifier.s.s);
+ } else {
+ println(out, "if (flatcc_builder_start_buffer(B, 0, 0, 0)) return -1;");
+ }
+ println(out, "buf = %s_parse_json_struct(ctx, buf, buf + bufsiz, &root);", snt.text);
+ println(out, "if (ctx->error) {"); indent();
+ println(out, "return ctx->error;");
+ unindent(); println(out, "}");
+ println(out, "if (!flatcc_builder_end_buffer(B, root)) return -1;");
+ println(out, "ctx->end_loc = buf;");
+ println(out, "return 0;");
+ unindent(); println(out, "}");
+ println(out, "");
+ return 0;
+}
+
+
+static int gen_root_parser(fb_output_t *out)
+{
+ fb_symbol_t *root_type = out->S->root_type.type;
+
+ if (!root_type) {
+ return 0;
+ }
+ if (root_type) {
+ switch (root_type->kind) {
+ case fb_is_table:
+ return gen_root_table_parser(out, (fb_compound_type_t *)root_type);
+ case fb_is_struct:
+ return gen_root_struct_parser(out, (fb_compound_type_t *)root_type);
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int gen_json_parser_prototypes(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_scoped_name_t snt;
+ fb_symbol_t *root_type = out->S->root_type.type;
+
+ fb_clear(snt);
+
+ if (root_type)
+ switch (root_type->kind) {
+ case fb_is_table:
+ case fb_is_struct:
+ println(out, "/*");
+ println(out, " * Parses the default root table or struct of the schema and constructs a FlatBuffer.");
+ println(out, " *");
+ println(out, " * Builder `B` must be initialized. `ctx` can be null but will hold");
+ println(out, " * hold detailed error info on return when available.");
+ println(out, " * Returns 0 on success, or error code.");
+ println(out, " * `flags` : 0 by default, `flatcc_json_parser_f_skip_unknown` silently");
+ println(out, " * ignores unknown table and structs fields, and union types.");
+ println(out, " */");
+ println(out, "static int %s_parse_json(flatcc_builder_t *B, flatcc_json_parser_t *ctx,",
+ out->S->basename);
+ indent(); indent();
+ println(out, "const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags);");
+ unindent(); unindent();
+ println(out, "");
+ break;
+ default:
+ break;
+ }
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ println(out, "static const char *%s_parse_json_union(flatcc_json_parser_t *ctx, const char *buf, const char *end, uint8_t type, flatcc_builder_ref_t *pref);", snt.text);
+ println(out, "static int %s_json_union_accept_type(uint8_t type);", snt.text);
+ /* A union also has an enum parser to get the type. */
+ println(out, "static const char *%s_parse_json_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,", snt.text);
+ indent(); indent();
+ println(out, "int *value_type, uint64_t *value, int *aggregate);");
+ unindent(); unindent();
+ break;
+ case fb_is_struct:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ println(out, "static const char *%s_parse_json_struct_inline(flatcc_json_parser_t *ctx, const char *buf, const char *end, void *struct_base);", snt.text);
+ println(out, "static const char *%s_parse_json_struct(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result);", snt.text);
+ break;
+ case fb_is_table:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ println(out, "static const char *%s_parse_json_table(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result);", snt.text);
+ break;
+ case fb_is_enum:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ println(out, "static const char *%s_parse_json_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,", snt.text);
+ indent(); indent();
+ println(out, "int *value_type, uint64_t *value, int *aggregate);", snt.text);
+ unindent(); unindent();
+ break;
+ }
+ }
+ fb_scope_table_visit(&out->S->root_schema->scope_index, gen_local_scope_prototype, out);
+ println(out, "static const char *%s_global_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,", out->S->basename);
+ indent(); indent();
+ println(out, "int *value_type, uint64_t *value, int *aggregate);");
+ unindent(); unindent();
+ println(out, "");
+ return 0;
+}
+
+static int gen_json_parsers(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ gen_union_parser(out, (fb_compound_type_t *)sym);
+ gen_union_accept_type(out, (fb_compound_type_t *)sym);
+ gen_enum_parser(out, (fb_compound_type_t *)sym);
+ break;
+ case fb_is_struct:
+ gen_struct_parser_inline(out, (fb_compound_type_t *)sym);
+ gen_struct_parser(out, (fb_compound_type_t *)sym);
+ break;
+ case fb_is_table:
+ gen_table_parser(out, (fb_compound_type_t *)sym);
+ break;
+ case fb_is_enum:
+ gen_enum_parser(out, (fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ fb_scope_table_visit(&out->S->root_schema->scope_index, gen_local_scope_parser, out);
+ gen_global_scope_parser(out);
+ gen_root_parser(out);
+ return 0;
+}
+
+int fb_gen_c_json_parser(fb_output_t *out)
+{
+ gen_json_parser_pretext(out);
+ gen_json_parser_prototypes(out);
+ gen_json_parsers(out);
+ gen_json_parser_footer(out);
+ return 0;
+}
diff --git a/src/compiler/codegen_c_json_printer.c b/src/compiler/codegen_c_json_printer.c
new file mode 100644
index 0000000..efc4c3d
--- /dev/null
+++ b/src/compiler/codegen_c_json_printer.c
@@ -0,0 +1,732 @@
+#include "codegen_c.h"
+#include "flatcc/flatcc_types.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+static int gen_json_printer_pretext(fb_output_t *out)
+{
+ fprintf(out->fp,
+ "#ifndef %s_JSON_PRINTER_H\n"
+ "#define %s_JSON_PRINTER_H\n",
+ out->S->basenameup, out->S->basenameup);
+
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ fprintf(out->fp, "#include \"flatcc/flatcc_json_printer.h\"\n");
+ fb_gen_c_includes(out, "_json_printer.h", "_JSON_PRINTER_H");
+ gen_prologue(out);
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int gen_json_printer_footer(fb_output_t *out)
+{
+ gen_epilogue(out);
+ fprintf(out->fp,
+ "#endif /* %s_JSON_PRINTER_H */\n",
+ out->S->basenameup);
+ return 0;
+}
+
+static int gen_json_printer_enum(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ const char *tp, *tn, *ns;
+ int bit_flags;
+ uint64_t mask = 0;
+ char *constwrap = "";
+ char *ut = "";
+ fb_scalar_type_t st = ct->type.st;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+ tp = scalar_type_prefix(st);
+ tn = scalar_type_name(st);
+ ns = scalar_type_ns(st, out->nsc);
+
+ bit_flags = !!(ct->metadata_flags & fb_f_bit_flags);
+ if (bit_flags) {
+ switch (ct->size) {
+ case 1:
+ mask = UINT8_MAX, constwrap = "UINT8_C", ut = "uint8_t";
+ break;
+ case 2:
+ mask = UINT16_MAX, constwrap = "UINT16_C", ut = "uint16_t";
+ break;
+ case 4:
+ mask = UINT32_MAX, constwrap = "UINT32_C", ut = "uint32_t";
+ break;
+ default:
+ mask = UINT64_MAX, constwrap = "UINT64_C", ut = "uint64_t";
+ break;
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->value.type) {
+ case vt_uint:
+ mask &= ~(uint64_t)member->value.u;
+ break;
+ case vt_int:
+ mask &= ~(uint64_t)member->value.i;
+ break;
+ case vt_bool:
+ mask &= ~(uint64_t)member->value.b;
+ break;
+ }
+ }
+ }
+
+ fprintf(out->fp,
+ "static void %s_print_json_enum(flatcc_json_printer_t *ctx, %s%s v)\n{\n",
+ snt.text, ns, tn);
+ if (bit_flags) {
+ if (strcmp(ut, tn)) {
+ fprintf(out->fp, " %s x = (%s)v;\n", ut, ut);
+ } else {
+ fprintf(out->fp, " %s x = v;\n", ut);
+ }
+ fprintf(out->fp,
+ " int multiple = 0 != (x & (x - 1));\n"
+ " int i = 0;\n");
+
+ fprintf(out->fp, "\n");
+ /*
+ * If the value is not entirely within the known bit flags, print as
+ * a number.
+ */
+ if (mask) {
+ fprintf(out->fp,
+ " if ((x & %s(0x%"PRIx64")) || x == 0) {\n"
+ " flatcc_json_printer_%s(ctx, v);\n"
+ " return;\n"
+ " }\n",
+ constwrap, mask, tp);
+ }
+ /*
+ * Test if multiple bits set. We may have a configuration option
+ * that requires multiple flags to be quoted like `color: "Red Green"`
+ * but unquoted if just a single value like `color: Green`.
+ *
+ * The index `i` is used to add space separators much like an
+ * index is provided for struct members to handle comma.
+ */
+ fprintf(out->fp, " flatcc_json_printer_delimit_enum_flags(ctx, multiple);\n");
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->value.type) {
+ case vt_uint:
+ fprintf(out->fp, " if (x & %s(0x%"PRIx64")) flatcc_json_printer_enum_flag(ctx, i++, \"%.*s\", %ld);\n",
+ constwrap, member->value.u, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_int:
+ fprintf(out->fp, " if (x & %s(0x%"PRIx64")) flatcc_json_printer_enum_flag(ctx, i++, \"%.*s\", %ld);\n",
+ constwrap, (uint64_t)member->value.i, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_bool:
+ fprintf(out->fp, " if (x & %s(0x%"PRIx64")) flatcc_json_printer_enum_flag(ctx, i++, \"%.*s\", %ld);\n",
+ constwrap, (uint64_t)member->value.b, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected value type for enum json_print");
+ break;
+ }
+ }
+ fprintf(out->fp, " flatcc_json_printer_delimit_enum_flags(ctx, multiple);\n");
+ } else {
+ fprintf(out->fp, "\n switch (v) {\n");
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->value.type) {
+ case vt_uint:
+ fprintf(out->fp, " case %s(%"PRIu64"): flatcc_json_printer_enum(ctx, \"%.*s\", %ld); break;\n",
+ constwrap, member->value.u, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_int:
+ fprintf(out->fp, " case %s(%"PRId64"): flatcc_json_printer_enum(ctx, \"%.*s\", %ld); break;\n",
+ constwrap, member->value.i, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_bool:
+ fprintf(out->fp, " case %s(%u): flatcc_json_printer_enum(ctx, \"%.*s\", %ld); break;\n",
+ constwrap, member->value.b, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected value type for enum json_print");
+ break;
+ }
+ }
+ fprintf(out->fp,
+ " default: flatcc_json_printer_%s(ctx, v); break;\n"
+ " }\n",
+ tp);
+ }
+ fprintf(out->fp, "}\n\n");
+ return 0;
+}
+
+static int gen_json_printer_union_type(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static void %s_print_json_union_type(flatcc_json_printer_t *ctx, flatbuffers_utype_t type)\n"
+ "{\n switch (type) {\n",
+ snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->type.type == vt_missing) {
+ continue;
+ }
+ fprintf(out->fp,
+ " case %u:\n"
+ " flatcc_json_printer_enum(ctx, \"%.*s\", %ld);\n"
+ " break;\n",
+ (unsigned)member->value.u, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ }
+ fprintf(out->fp,
+ " default:\n"
+ " flatcc_json_printer_enum(ctx, \"NONE\", 4);\n"
+ " break;\n");
+ fprintf(out->fp,
+ " }\n}\n\n");
+ return 0;
+}
+
+static int gen_json_printer_union_member(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static void %s_print_json_union(flatcc_json_printer_t *ctx, flatcc_json_printer_union_descriptor_t *ud)\n"
+ "{\n switch (ud->type) {\n",
+ snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->type.type) {
+ case vt_missing:
+ continue;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ " case %u:\n"
+ " flatcc_json_printer_union_table(ctx, ud, %s_print_json_table);\n"
+ " break;\n",
+ (unsigned)member->value.u, snref.text);
+ continue;
+ case fb_is_struct:
+ fprintf(out->fp,
+ " case %u:\n"
+ " flatcc_json_printer_union_struct(ctx, ud, %s_print_json_struct);\n"
+ " break;\n",
+ (unsigned)member->value.u, snref.text);
+ continue;
+ default:
+ gen_panic(out, "internal error: unexpected union type\n");
+ return -1;
+ }
+ case vt_string_type:
+ fprintf(out->fp,
+ " case %u:\n"
+ " flatcc_json_printer_union_string(ctx, ud);\n"
+ " break;\n",
+ (unsigned)member->value.u);
+ continue;
+ default:
+ gen_panic(out, "internal error: unexpected union type\n");
+ return -1;
+ }
+ }
+ fprintf(out->fp,
+ " default:\n"
+ " break;\n");
+ fprintf(out->fp,
+ " }\n}\n\n");
+ return 0;
+}
+
+static int gen_json_printer_union(fb_output_t *out, fb_compound_type_t *ct)
+{
+ gen_json_printer_union_type(out, ct);
+ gen_json_printer_union_member(out, ct);
+ return 0;
+}
+
+static int gen_json_printer_struct(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ int index = 0;
+ const char *tp;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static void %s_print_json_struct(flatcc_json_printer_t *ctx, const void *p)\n"
+ "{\n",
+ snt.text);
+ for (sym = ct->members; sym; ++index, sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ switch (member->type.type) {
+ case vt_scalar_type:
+ tp = scalar_type_prefix(member->type.st);
+ fprintf(
+ out->fp,
+ " flatcc_json_printer_%s_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_fixed_array_type:
+ tp = scalar_type_prefix(member->type.st);
+ fprintf(
+ out->fp,
+ " flatcc_json_printer_%s_array_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %d);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len, member->type.len);
+ break;
+ case vt_fixed_array_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+#if FLATCC_JSON_PRINT_MAP_ENUMS
+ tp = scalar_type_prefix(member->type.ct->type.st);
+ fprintf(out->fp,
+ " flatcc_json_printer_%s_enum_array_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %d, %s_print_json_enum);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len, member->type.len, snref.text);
+ break;
+#else
+ tp = scalar_type_prefix(member->type.ct->type.st);
+ fprintf(
+ out->fp,
+ " flatcc_json_printer_%s_array_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %d);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len, member->type.len);
+ break;
+#endif
+ case fb_is_struct:
+ fprintf(out->fp,
+ " flatcc_json_printer_embedded_struct_array_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %"PRIu64", %"PRIu64", %s_print_json_struct);\n",
+ index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len,
+ (uint64_t)member->type.ct->size, (uint64_t)member->type.len, snref.text);
+ }
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+#if FLATCC_JSON_PRINT_MAP_ENUMS
+ tp = scalar_type_prefix(member->type.ct->type.st);
+ fprintf(out->fp,
+ " flatcc_json_printer_%s_enum_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %s_print_json_enum);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+#else
+ tp = scalar_type_prefix(member->type.ct->type.st);
+ fprintf(
+ out->fp,
+ " flatcc_json_printer_%s_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+#endif
+ case fb_is_struct:
+ fprintf(out->fp,
+ " flatcc_json_printer_embedded_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %s_print_json_struct);\n",
+ index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+ }
+ break;
+ }
+ }
+ fprintf(out->fp, "}\n\n");
+ fprintf(out->fp,
+ "static inline int %s_print_json_as_root(flatcc_json_printer_t *ctx, const void *buf, size_t bufsiz, const char *fid)\n"
+ "{\n return flatcc_json_printer_struct_as_root(ctx, buf, bufsiz, fid, %s_print_json_struct);\n}\n\n",
+ snt.text, snt.text);
+ return 0;
+}
+
+static int gen_json_printer_table(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ const char *tp;
+ int is_optional;
+ int ret = 0;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ /* Fields are printed in field id order for consistency across schema version. */
+ fprintf(out->fp,
+ "static void %s_print_json_table(flatcc_json_printer_t *ctx, flatcc_json_printer_table_descriptor_t *td)\n"
+ "{",
+ snt.text);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ sym = &member->symbol;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ is_optional = !!(member->flags & fb_fm_optional);
+ fprintf(out->fp, "\n ");
+ switch (member->type.type) {
+ case vt_scalar_type:
+ tp = scalar_type_prefix(member->type.st);
+ if (is_optional) {
+ fprintf( out->fp,
+ "flatcc_json_printer_%s_optional_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ } else {
+ fb_literal_t literal;
+ if (!print_literal(member->type.st, &member->value, literal)) return -1;
+ fprintf( out->fp,
+ "flatcc_json_printer_%s_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, literal);
+ }
+ break;
+ case vt_vector_type:
+ if (member->metadata_flags & (fb_f_base64 | fb_f_base64url)) {
+ fprintf(out->fp,
+ "flatcc_json_printer_uint8_vector_base64_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %u);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len,
+ !(member->metadata_flags & fb_f_base64));
+ } else if (member->nest) {
+ fb_compound_name((fb_compound_type_t *)&member->nest->symbol, &snref);
+ if (member->nest->symbol.kind == fb_is_table) {
+ /*
+ * Always set fid to 0 since it is difficult to know what is right.
+ * We do know the type from the field attribute.
+ */
+ fprintf(out->fp,
+ "flatcc_json_printer_table_as_nested_root(ctx, td, %"PRIu64", \"%.*s\", %ld, 0, %s_print_json_table);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ } else {
+ /*
+ * Always set fid to 0 since it is difficult to know what is right.
+ * We do know the type from the field attribute.
+ */
+ fprintf(out->fp,
+ "flatcc_json_printer_struct_as_nested_root(ctx, td, %"PRIu64", \"%.*s\", %ld, 0, %s_print_json_struct);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ }
+ } else {
+ tp = scalar_type_prefix(member->type.st);
+ fprintf(out->fp,
+ "flatcc_json_printer_%s_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ }
+ break;
+ case vt_string_type:
+ fprintf(out->fp,
+ "flatcc_json_printer_string_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp,
+ "flatcc_json_printer_string_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+ tp = scalar_type_prefix(member->type.ct->type.st);
+#if FLATCC_JSON_PRINT_MAP_ENUMS
+ if (is_optional) {
+ fprintf(out->fp,
+ "flatcc_json_printer_%s_enum_optional_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s_print_json_enum);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ } else {
+ fb_literal_t literal;
+ if (!print_literal(member->type.ct->type.st, &member->value, literal)) return -1;
+ fprintf(out->fp,
+ "flatcc_json_printer_%s_enum_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s, %s_print_json_enum);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, literal, snref.text);
+ }
+#else
+ if (is_optional) {
+ fprintf( out->fp,
+ "flatcc_json_printer_%s_optional_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ } else {
+ fb_literal_t literal;
+ if (!print_literal(member->type.ct->type.st, &member->value, literal)) return -1;
+ fprintf( out->fp,
+ "flatcc_json_printer_%s_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, literal);
+ }
+#endif
+ break;
+ case fb_is_struct:
+ fprintf(out->fp,
+ "flatcc_json_printer_struct_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s_print_json_struct);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+ case fb_is_table:
+ fprintf(out->fp,
+ "flatcc_json_printer_table_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s_print_json_table);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "flatcc_json_printer_union_field(ctx, td, %"PRIu64", \"%.*s\", %ld, "
+ "%s_print_json_union_type, %s_print_json_union);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text, snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound type for table json_print");
+ goto fail;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ "flatcc_json_printer_table_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s_print_json_table);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+ case fb_is_enum:
+ tp = scalar_type_prefix(member->type.ct->type.st);
+#if FLATCC_JSON_PRINT_MAP_ENUMS
+ fprintf(out->fp,
+ "flatcc_json_printer_%s_enum_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s_print_json_enum);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+#else
+ fprintf(out->fp,
+ "flatcc_json_printer_%s_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+#endif
+ case fb_is_struct:
+ fprintf(out->fp,
+ "flatcc_json_printer_struct_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %"PRIu64", %s_print_json_struct);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, (uint64_t)member->size, snref.text);
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "flatcc_json_printer_union_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld, "
+ "%s_print_json_union_type, %s_print_json_union);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text, snref.text);
+ break;
+
+ default:
+ gen_panic(out, "internal error: unexpected vector compound type for table json_print");
+ goto fail;
+ }
+ break;
+ }
+ }
+ fprintf(out->fp, "\n}\n\n");
+ fprintf(out->fp,
+ "static inline int %s_print_json_as_root(flatcc_json_printer_t *ctx, const void *buf, size_t bufsiz, const char *fid)\n"
+ "{\n return flatcc_json_printer_table_as_root(ctx, buf, bufsiz, fid, %s_print_json_table);\n}\n\n",
+ snt.text, snt.text);
+done:
+ return ret;
+fail:
+ ret = -1;
+ goto done;
+}
+
+/*
+ * Only tables are mutually recursive. Structs are sorted and unions are
+ * defined earlier, depending on the table prototypes.
+ */
+static int gen_json_printer_prototypes(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_scoped_name_t snt;
+ fb_symbol_t *root_type = out->S->root_type.type;
+
+ fb_clear(snt);
+
+ if (root_type)
+ switch (root_type->kind) {
+ case fb_is_table:
+ case fb_is_struct:
+ fprintf(out->fp,
+ "/*\n"
+ " * Prints the default root table or struct from a buffer which must have\n"
+ " * the schema declared file identifier, if any. It is also possible to\n"
+ " * call the type specific `print_json_as_root` function wich accepts an\n"
+ " * optional identifier (or 0) as argument. The printer `ctx` object must\n"
+ " * be initialized with the appropriate output type, or it can be 0 which\n"
+ " * defaults to stdout. NOTE: `ctx` is not generally allowed to be null, only\n"
+ " * here for a simplified interface.\n"
+ " */\n");
+ fprintf(out->fp,
+ "static int %s_print_json(flatcc_json_printer_t *ctx, const char *buf, size_t bufsiz);\n\n",
+ out->S->basename);
+ break;
+ default:
+ break;
+ }
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "static void %s_print_json_union_type(flatcc_json_printer_t *ctx, flatbuffers_utype_t type);\n"
+ "static void %s_print_json_union(flatcc_json_printer_t *ctx, flatcc_json_printer_union_descriptor_t *ud);\n",
+ snt.text, snt.text);
+ break;
+ case fb_is_table:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "static void %s_print_json_table(flatcc_json_printer_t *ctx, flatcc_json_printer_table_descriptor_t *td);\n",
+ snt.text);
+ break;
+ case fb_is_struct:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "static void %s_print_json_struct(flatcc_json_printer_t *ctx, const void *p);\n",
+ snt.text);
+ break;
+ }
+ }
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int gen_json_printer_enums(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_enum:
+ gen_json_printer_enum(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+static int gen_json_printer_unions(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ gen_json_printer_union(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+static int gen_json_printer_structs(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ gen_json_printer_struct(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+static int gen_json_printer_tables(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_json_printer_table(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+/* Same for structs and tables. */
+static int gen_root_type_printer(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static int %s_print_json(flatcc_json_printer_t *ctx, const char *buf, size_t bufsiz)\n",
+ out->S->basename);
+ fprintf(out->fp,
+ "{\n"
+ " flatcc_json_printer_t printer;\n"
+ "\n"
+ " if (ctx == 0) {\n"
+ " ctx = &printer;\n"
+ " flatcc_json_printer_init(ctx, 0);\n"
+ " }\n"
+ " return %s_print_json_as_root(ctx, buf, bufsiz, ",
+ snt.text);
+ if (out->S->file_identifier.type == vt_string) {
+ fprintf(out->fp,
+ "\"%.*s\");\n",
+ out->S->file_identifier.s.len, out->S->file_identifier.s.s);
+ } else {
+ fprintf(out->fp,
+ "0);");
+ }
+ fprintf(out->fp,
+ "}\n\n");
+ return 0;
+}
+
+static int gen_json_root_printer(fb_output_t *out)
+{
+ fb_symbol_t *root_type = out->S->root_type.type;
+
+ if (!root_type) {
+ return 0;
+ }
+ if (root_type) {
+ switch (root_type->kind) {
+ case fb_is_table:
+ case fb_is_struct:
+ return gen_root_type_printer(out, (fb_compound_type_t *)root_type);
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+int fb_gen_c_json_printer(fb_output_t *out)
+{
+ gen_json_printer_pretext(out);
+ gen_json_printer_prototypes(out);
+ gen_json_printer_enums(out);
+ gen_json_printer_unions(out);
+ gen_json_printer_structs(out);
+ gen_json_printer_tables(out);
+ gen_json_root_printer(out);
+ gen_json_printer_footer(out);
+ return 0;
+}
diff --git a/src/compiler/codegen_c_reader.c b/src/compiler/codegen_c_reader.c
new file mode 100644
index 0000000..6de0f21
--- /dev/null
+++ b/src/compiler/codegen_c_reader.c
@@ -0,0 +1,1928 @@
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "codegen_c.h"
+#include "codegen_c_sort.h"
+
+static inline int match_kw_identifier(fb_symbol_t *sym)
+{
+ return (sym->ident->len == 10 &&
+ memcmp(sym->ident->text, "identifier", 10) == 0);
+}
+
+/*
+ * Use of file identifiers for undeclared roots is fuzzy, but we need an
+ * identifer for all, so we use the one defined for the current schema
+ * file and allow the user to override. This avoids tedious runtime file
+ * id arguments to all create calls.
+ *
+ * As later addition to FlatBuffers, type hashes may replace file
+ * identifiers when explicitly stated. These are FNV-1a hashes of the
+ * fully qualified type name (dot separated).
+ *
+ * We generate the type hash both as a native integer constants for use
+ * in switch statements, and encoded as a little endian C string for use
+ * as a file identifier.
+ */
+static void print_type_identifier(fb_output_t *out, fb_compound_type_t *ct)
+{
+ uint8_t buf[17];
+ uint8_t *p;
+ uint8_t x;
+ int i;
+ const char *nsc = out->nsc;
+ fb_scoped_name_t snt;
+ const char *name;
+ uint32_t type_hash;
+ int conflict = 0;
+ fb_symbol_t *sym;
+ const char *file_identifier;
+ int file_identifier_len;
+ const char *quote;
+
+ fb_clear(snt);
+
+ fb_compound_name(ct, &snt);
+ name = snt.text;
+ type_hash = ct->type_hash;
+
+ /*
+ * It's not practical to detect all possible name conflicts, but
+ * 'identifier' is common enough to require special handling.
+ */
+ for (sym = ct->members; sym; sym = sym->link) {
+ if (match_kw_identifier(sym)) {
+ conflict = 1;
+ break;
+ }
+ }
+ if (out->S->file_identifier.type == vt_string) {
+ quote = "\"";
+ file_identifier = out->S->file_identifier.s.s;
+ file_identifier_len = out->S->file_identifier.s.len;
+ } else {
+ quote = "";
+ file_identifier = "0";
+ file_identifier_len = 1;
+ }
+ fprintf(out->fp,
+ "#ifndef %s_file_identifier\n"
+ "#define %s_file_identifier %s%.*s%s\n"
+ "#endif\n",
+ name, name, quote, file_identifier_len, file_identifier, quote);
+ if (!conflict) {
+ /* For backwards compatibility. */
+ fprintf(out->fp,
+ "/* deprecated, use %s_file_identifier */\n"
+ "#ifndef %s_identifier\n"
+ "#define %s_identifier %s%.*s%s\n"
+ "#endif\n",
+ name, name, name, quote, file_identifier_len, file_identifier, quote);
+ }
+ fprintf(out->fp,
+ "#define %s_type_hash ((%sthash_t)0x%lx)\n",
+ name, nsc, (unsigned long)(type_hash));
+ p = buf;
+ i = 4;
+ while (i--) {
+ *p++ = '\\';
+ *p++ = 'x';
+ x = type_hash & 0x0f;
+ x += x > 9 ? 'a' - 10 : '0';
+ type_hash >>= 4;
+ p[1] = x;
+ x = type_hash & 0x0f;
+ x += x > 9 ? 'a' - 10 : '0';
+ type_hash >>= 4;
+ p[0] = x;
+ p += 2;
+ }
+ *p = '\0';
+ fprintf(out->fp,
+ "#define %s_type_identifier \"%s\"\n",
+ name, buf);
+}
+
+static void print_file_extension(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+ const char *name;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+ name = snt.text;
+
+ if (out->S->file_extension.type == vt_string) {
+ fprintf(out->fp,
+ "#ifndef %s_file_extension\n"
+ "#define %s_file_extension \"%.*s\"\n"
+ "#endif\n",
+ name, name, out->S->file_extension.s.len, out->S->file_extension.s.s);
+ } else {
+ fprintf(out->fp,
+ "#ifndef %s_file_extension\n"
+ "#define %s_file_extension \"%s\"\n"
+ "#endif\n",
+ name, name, out->opts->default_bin_ext);
+ }
+}
+
+/* Finds first occurrence of matching key when vector is sorted on the named field. */
+static void gen_find(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+
+ /*
+ * E: Element accessor (elem = E(vector, index)).
+ * L: Length accessor (length = L(vector)).
+ * A: Field accessor (or the identity function), result must match the diff function D's first arg.
+ * V: The vector to search (assuming sorted).
+ * T: The scalar, enum or string key type, (either the element, or a field of the element).
+ * K: The search key.
+ * Kn: optional key length so external strings do not have to be zero terminated.
+ * D: the diff function D(v, K, Kn) :: v - <K, Kn>
+ *
+ * returns index (0..len - 1), or not_found (-1).
+ */
+ fprintf(out->fp,
+ "#include <string.h>\n"
+ "static const size_t %snot_found = (size_t)-1;\n"
+ "static const size_t %send = (size_t)-1;\n"
+ "#define __%sidentity(n) (n)\n"
+ "#define __%smin(a, b) ((a) < (b) ? (a) : (b))\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "/* Subtraction doesn't work for unsigned types. */\n"
+ "#define __%sscalar_cmp(x, y, n) ((x) < (y) ? -1 : (x) > (y))\n"
+ "static inline int __%sstring_n_cmp(%sstring_t v, const char *s, size_t n)\n"
+ "{ size_t nv = %sstring_len(v); int x = strncmp(v, s, nv < n ? nv : n);\n"
+ " return x != 0 ? x : nv < n ? -1 : nv > n; }\n"
+ "/* `n` arg unused, but needed by string find macro expansion. */\n"
+ "static inline int __%sstring_cmp(%sstring_t v, const char *s, size_t n) { (void)n; return strcmp(v, s); }\n",
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "/* A = identity if searching scalar vectors rather than key fields. */\n"
+ "/* Returns lowest matching index or not_found. */\n"
+ "#define __%sfind_by_field(A, V, E, L, K, Kn, T, D)\\\n"
+ "{ T v__tmp; size_t a__tmp = 0, b__tmp, m__tmp; if (!(b__tmp = L(V))) { return %snot_found; }\\\n"
+ " --b__tmp;\\\n"
+ " while (a__tmp < b__tmp) {\\\n"
+ " m__tmp = a__tmp + ((b__tmp - a__tmp) >> 1);\\\n"
+ " v__tmp = A(E(V, m__tmp));\\\n"
+ " if ((D(v__tmp, (K), (Kn))) < 0) {\\\n"
+ " a__tmp = m__tmp + 1;\\\n"
+ " } else {\\\n"
+ " b__tmp = m__tmp;\\\n"
+ " }\\\n"
+ " }\\\n"
+ " if (a__tmp == b__tmp) {\\\n"
+ " v__tmp = A(E(V, a__tmp));\\\n"
+ " if (D(v__tmp, (K), (Kn)) == 0) {\\\n"
+ " return a__tmp;\\\n"
+ " }\\\n"
+ " }\\\n"
+ " return %snot_found;\\\n"
+ "}\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sfind_by_scalar_field(A, V, E, L, K, T)\\\n"
+ "__%sfind_by_field(A, V, E, L, K, 0, T, __%sscalar_cmp)\n"
+ "#define __%sfind_by_string_field(A, V, E, L, K)\\\n"
+ "__%sfind_by_field(A, V, E, L, K, 0, %sstring_t, __%sstring_cmp)\n"
+ "#define __%sfind_by_string_n_field(A, V, E, L, K, Kn)\\\n"
+ "__%sfind_by_field(A, V, E, L, K, Kn, %sstring_t, __%sstring_n_cmp)\n",
+ nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_find_by_scalar_field(N, NK, TK)\\\n"
+ "static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, TK key__tmp)\\\n"
+ "__%sfind_by_scalar_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, TK)\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_find(N, T)\\\n"
+ "static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, T key__tmp)\\\n"
+ "__%sfind_by_scalar_field(__%sidentity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_find_by_string_field(N, NK) \\\n"
+ "/* Note: find only works on vectors sorted by this field. */\\\n"
+ "static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "__%sfind_by_string_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\\\n"
+ "static inline size_t N ## _vec_find_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "__%sfind_by_string_n_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_default_find_by_scalar_field(N, NK, TK)\\\n"
+ "static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, TK key__tmp)\\\n"
+ "{ return N ## _vec_find_by_ ## NK(vec__tmp, key__tmp); }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_default_find_by_string_field(N, NK) \\\n"
+ "static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "{ return N ## _vec_find_by_ ## NK(vec__tmp, s__tmp); }\\\n"
+ "static inline size_t N ## _vec_find_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "{ return N ## _vec_find_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\n",
+ nsc);
+}
+
+static void gen_union(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+
+ fprintf(out->fp,
+ "typedef struct %sunion {\n"
+ " %sunion_type_t type;\n"
+ " %sgeneric_t value;\n"
+ "} %sunion_t;\n"
+ "typedef struct %sunion_vec {\n"
+ " const %sunion_type_t *type;\n"
+ " const %suoffset_t *value;\n"
+ "} %sunion_vec_t;\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "typedef struct %smutable_union {\n"
+ " %sunion_type_t type;\n"
+ " %smutable_generic_t value;\n"
+ "} %smutable_union_t;\n"
+ "typedef struct %smutable_union_vec {\n"
+ " %sunion_type_t *type;\n"
+ " %suoffset_t *value;\n"
+ "} %smutable_union_vec_t;\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline %smutable_union_t %smutable_union_cast(%sunion_t u__tmp)\\\n"
+ "{ %smutable_union_t mu = { u__tmp.type, (%smutable_generic_t)u__tmp.value };\\\n"
+ " return mu; }\n",
+ nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline %smutable_union_vec_t %smutable_union_vec_cast(%sunion_vec_t uv__tmp)\\\n"
+ "{ %smutable_union_vec_t muv =\\\n"
+ " { (%sunion_type_t *)uv__tmp.type, (%suoffset_t *)uv__tmp.value }; return muv; }\n",
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sunion_type_field(ID, t)\\\n"
+ "{\\\n"
+ " __%sread_vt(ID, offset__tmp, t)\\\n"
+ " return offset__tmp ? __%sread_scalar_at_byteoffset(__%sutype, t, offset__tmp) : 0;\\\n"
+ "}\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline %sstring_t %sstring_cast_from_union(const %sunion_t u__tmp)\\\n"
+ "{ return %sstring_cast_from_generic(u__tmp.value); }\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_union_field(NS, ID, N, NK, T, r)\\\n"
+ "static inline T ## _union_type_t N ## _ ## NK ## _type_get(N ## _table_t t__tmp)\\\n"
+ "__## NS ## union_type_field(((ID) - 1), t__tmp)\\\n"
+ "static inline NS ## generic_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\\\n", nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "static inline T ## _union_type_t N ## _ ## NK ## _type(N ## _table_t t__tmp)\\\n"
+ "__## NS ## union_type_field(((ID) - 1), t__tmp)\\\n"
+ "static inline NS ## generic_t N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\\\n");
+ }
+ fprintf(out->fp,
+ "static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__## NS ## field_present(ID, t__tmp)\\\n"
+ "static inline T ## _union_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\\\n"
+ "{ T ## _union_t u__tmp = { 0, 0 }; u__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\\\n"
+ " if (u__tmp.type == 0) return u__tmp; u__tmp.value = N ## _ ## NK ## _get(t__tmp); return u__tmp; }\\\n"
+ "static inline NS ## string_t N ## _ ## NK ## _as_string(N ## _table_t t__tmp)\\\n"
+ "{ return NS ## string_cast_from_generic(N ## _ ## NK ## _get(t__tmp)); }\\\n"
+ "\n");
+ fprintf(out->fp,
+ "#define __%sdefine_union_vector_ops(NS, T)\\\n"
+ "static inline size_t T ## _union_vec_len(T ## _union_vec_t uv__tmp)\\\n"
+ "{ return NS ## vec_len(uv__tmp.type); }\\\n"
+ "static inline T ## _union_t T ## _union_vec_at(T ## _union_vec_t uv__tmp, size_t i__tmp)\\\n"
+ "{ T ## _union_t u__tmp = { 0, 0 }; size_t n__tmp = NS ## vec_len(uv__tmp.type);\\\n"
+ " FLATCC_ASSERT(n__tmp > (i__tmp) && \"index out of range\"); u__tmp.type = uv__tmp.type[i__tmp];\\\n"
+ " /* Unknown type is treated as NONE for schema evolution. */\\\n"
+ " if (u__tmp.type == 0) return u__tmp;\\\n"
+ " u__tmp.value = NS ## generic_vec_at(uv__tmp.value, i__tmp); return u__tmp; }\\\n"
+ "static inline NS ## string_t T ## _union_vec_at_as_string(T ## _union_vec_t uv__tmp, size_t i__tmp)\\\n"
+ "{ return (NS ## string_t) NS ## generic_vec_at_as_string(uv__tmp.value, i__tmp); }\\\n"
+ "\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_union_vector(NS, T)\\\n"
+ "typedef NS ## union_vec_t T ## _union_vec_t;\\\n"
+ "typedef NS ## mutable_union_vec_t T ## _mutable_union_vec_t;\\\n"
+ "static inline T ## _mutable_union_vec_t T ## _mutable_union_vec_cast(T ## _union_vec_t u__tmp)\\\n"
+ "{ return NS ## mutable_union_vec_cast(u__tmp); }\\\n"
+ "__## NS ## define_union_vector_ops(NS, T)\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_union(NS, T)\\\n"
+ "typedef NS ## union_t T ## _union_t;\\\n"
+ "typedef NS ## mutable_union_t T ## _mutable_union_t;\\\n"
+ "static inline T ## _mutable_union_t T ## _mutable_union_cast(T ## _union_t u__tmp)\\\n"
+ "{ return NS ## mutable_union_cast(u__tmp); }\\\n"
+ "__## NS ## define_union_vector(NS, T)\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_union_vector_field(NS, ID, N, NK, T, r)\\\n"
+ "__## NS ## define_vector_field(ID - 1, N, NK ## _type, T ## _vec_t, r)\\\n"
+ "__## NS ## define_vector_field(ID, N, NK, flatbuffers_generic_vec_t, r)\\\n"
+ "static inline T ## _union_vec_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\\\n"
+ "{ T ## _union_vec_t uv__tmp; uv__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\\\n"
+ " uv__tmp.value = N ## _ ## NK(t__tmp);\\\n"
+ " FLATCC_ASSERT(NS ## vec_len(uv__tmp.type) == NS ## vec_len(uv__tmp.value)\\\n"
+ " && \"union vector type length mismatch\"); return uv__tmp; }\n",
+ nsc);
+}
+
+/* Linearly finds first occurrence of matching key, doesn't require vector to be sorted. */
+static void gen_scan(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+
+ /*
+ * E: Element accessor (elem = E(vector, index)).
+ * L: Length accessor (length = L(vector)).
+ * A: Field accessor (or the identity function), result must match the diff function D's first arg.
+ * V: The vector to search (assuming sorted).
+ * T: The scalar, enum or string key type, (either the element, or a field of the element).
+ * K: The search key.
+ * Kn: optional key length so external strings do not have to be zero terminated.
+ * D: the diff function D(v, K, Kn) :: v - <K, Kn>
+ *
+ * returns index (0..len - 1), or not_found (-1).
+ */
+ fprintf(out->fp,
+ "/* A = identity if searching scalar vectors rather than key fields. */\n"
+ "/* Returns lowest matching index or not_found. */\n"
+ "#define __%sscan_by_field(b, e, A, V, E, L, K, Kn, T, D)\\\n"
+ "{ T v__tmp; size_t i__tmp;\\\n"
+ " for (i__tmp = b; i__tmp < e; ++i__tmp) {\\\n"
+ " v__tmp = A(E(V, i__tmp));\\\n"
+ " if (D(v__tmp, (K), (Kn)) == 0) {\\\n"
+ " return i__tmp;\\\n"
+ " }\\\n"
+ " }\\\n"
+ " return %snot_found;\\\n"
+ "}\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%srscan_by_field(b, e, A, V, E, L, K, Kn, T, D)\\\n"
+ "{ T v__tmp; size_t i__tmp = e;\\\n"
+ " while (i__tmp-- > b) {\\\n"
+ " v__tmp = A(E(V, i__tmp));\\\n"
+ " if (D(v__tmp, (K), (Kn)) == 0) {\\\n"
+ " return i__tmp;\\\n"
+ " }\\\n"
+ " }\\\n"
+ " return %snot_found;\\\n"
+ "}\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sscan_by_scalar_field(b, e, A, V, E, L, K, T)\\\n"
+ "__%sscan_by_field(b, e, A, V, E, L, K, 0, T, __%sscalar_cmp)\n"
+ "#define __%sscan_by_string_field(b, e, A, V, E, L, K)\\\n"
+ "__%sscan_by_field(b, e, A, V, E, L, K, 0, %sstring_t, __%sstring_cmp)\n"
+ "#define __%sscan_by_string_n_field(b, e, A, V, E, L, K, Kn)\\\n"
+ "__%sscan_by_field(b, e, A, V, E, L, K, Kn, %sstring_t, __%sstring_n_cmp)\n",
+ nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%srscan_by_scalar_field(b, e, A, V, E, L, K, T)\\\n"
+ "__%srscan_by_field(b, e, A, V, E, L, K, 0, T, __%sscalar_cmp)\n"
+ "#define __%srscan_by_string_field(b, e, A, V, E, L, K)\\\n"
+ "__%srscan_by_field(b, e, A, V, E, L, K, 0, %sstring_t, __%sstring_cmp)\n"
+ "#define __%srscan_by_string_n_field(b, e, A, V, E, L, K, Kn)\\\n"
+ "__%srscan_by_field(b, e, A, V, E, L, K, Kn, %sstring_t, __%sstring_n_cmp)\n",
+ nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scan_by_scalar_field(N, NK, T)\\\n"
+ "static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\\\n"
+ "__%sscan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\\\n"
+ "__%sscan_by_scalar_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\\\n"
+ "__%srscan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\\\n"
+ "__%srscan_by_scalar_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_scan(N, T)\\\n"
+ "static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, T key__tmp)\\\n"
+ "__%sscan_by_scalar_field(0, N ## _vec_len(vec__tmp), __%sidentity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\\\n"
+ "__%sscan_by_scalar_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), __%sidentity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, T key__tmp)\\\n"
+ "__%srscan_by_scalar_field(0, N ## _vec_len(vec__tmp), __%sidentity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\\\n"
+ "__%srscan_by_scalar_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), __%sidentity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scan_by_string_field(N, NK) \\\n"
+ "static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "__%sscan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\\\n"
+ "static inline size_t N ## _vec_scan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "__%sscan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\\\n"
+ "static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\\\n"
+ "__%sscan_by_string_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\\\n"
+ "static inline size_t N ## _vec_scan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "__%sscan_by_string_n_field(begin__tmp, __%smin( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\\\n"
+ "static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "__%srscan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\\\n"
+ "static inline size_t N ## _vec_rscan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "__%srscan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\\\n"
+ "static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\\\n"
+ "__%srscan_by_string_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\\\n"
+ "static inline size_t N ## _vec_rscan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "__%srscan_by_string_n_field(begin__tmp, __%smin( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_default_scan_by_scalar_field(N, NK, TK)\\\n"
+ "static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, TK key__tmp)\\\n"
+ "{ return N ## _vec_scan_by_ ## NK(vec__tmp, key__tmp); }\\\n"
+ "static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\\\n"
+ "{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, TK key__tmp)\\\n"
+ "{ return N ## _vec_rscan_by_ ## NK(vec__tmp, key__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\\\n"
+ "{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_default_scan_by_string_field(N, NK) \\\n"
+ "static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "{ return N ## _vec_scan_by_ ## NK(vec__tmp, s__tmp); }\\\n"
+ "static inline size_t N ## _vec_scan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "{ return N ## _vec_scan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\\\n"
+ "static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\\\n"
+ "{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\\\n"
+ "static inline size_t N ## _vec_scan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "{ return N ## _vec_scan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "{ return N ## _vec_rscan_by_ ## NK(vec__tmp, s__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "{ return N ## _vec_rscan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\\\n"
+ "{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "{ return N ## _vec_rscan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }\n",
+ nsc);
+}
+
+static void gen_helpers(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+
+ fprintf(out->fp,
+ /*
+ * Include the basic primitives for accessing flatbuffer data types independent
+ * of endianness.
+ *
+ * The included file must define the basic types and accessors
+ * prefixed with the common namespace which by default is
+ * "flatbuffers_".
+ */
+ "#include \"flatcc/flatcc_flatbuffers.h\"\n"
+ "\n\n");
+ /*
+ * The remapping of basic types to the common namespace makes it
+ * possible to have different definitions. The generic
+ * `flatbuffers_uoffset_t` etc. cannot be trusted to have one specific
+ * size since it depends on the included `flatcc/flatcc_types.h`
+ * filer, but the namespace prefixed types can be trusted if used carefully.
+ * For example the common namespace could be `flatbuffers_large_`
+ * when allowing for 64 bit offsets.
+ */
+ if (strcmp(nsc, "flatbuffers_")) {
+ fprintf(out->fp,
+ "typedef flatbuffers_uoffset_t %suoffset_t;\n"
+ "typedef flatbuffers_soffset_t %ssoffset_t;\n"
+ "typedef flatbuffers_voffset_t %svoffset_t;\n"
+ "typedef flatbuffers_utype_t %sutype_t;\n"
+ "typedef flatbuffers_bool_t %sbool_t;\n"
+ "\n",
+ nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define %sendian flatbuffers_endian\n"
+ "__flatcc_define_basic_scalar_accessors(%s, flatbuffers_endian)"
+ "__flatcc_define_integer_accessors(%sbool, flatbuffers_bool_t,\\\n"
+ " FLATBUFFERS_BOOL_WIDTH, flatbuffers_endian)\\\n"
+ "__flatcc_define_integer_accessors(%sunion_type, flatbuffers_union_type_t,\n"
+ " FLATBUFFERS_UTYPE_WIDTH, flatbuffers_endian)\\\n",
+ "\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "__flatcc_define_integer_accessors(__%suoffset, flatbuffers_uoffset_t,\n"
+ " FLATBUFFERS_UOFFSET_WIDTH, flatbuffers_endian)\n"
+ "__flatcc_define_integer_accessors(__%ssoffset, flatbuffers_soffset_t,\n"
+ " FLATBUFFERS_SOFFSET_WIDTH, flatbuffers_endian)\n"
+ "__flatcc_define_integer_accessors(__%svoffset, flatbuffers_voffset_t,\n"
+ " FLATBUFFERS_VOFFSET_WIDTH, flatbuffers_endian)\n"
+ "__flatcc_define_integer_accessors(__%sutype, flatbuffers_utype_t,\n"
+ " FLATBUFFERS_UTYPE_WIDTH, flatbuffers_endian)\n"
+ "__flatcc_define_integer_accessors(__%sthash, flatbuffers_thash_t,\n"
+ " FLATBUFFERS_THASH_WIDTH, flatbuffers_endian)\n",
+ nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#ifndef %s_WRAP_NAMESPACE\n"
+ "#define %s_WRAP_NAMESPACE(ns, x) ns ## _ ## x\n"
+ "#endif\n",
+ out->nscup, out->nscup);
+ }
+ /* Build out a more elaborate type system based in the primitives included. */
+ fprintf(out->fp,
+ "#define __%sread_scalar_at_byteoffset(N, p, o) N ## _read_from_pe((uint8_t *)(p) + (o))\n"
+ "#define __%sread_scalar(N, p) N ## _read_from_pe(p)\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sread_vt(ID, offset, t)\\\n"
+ "%svoffset_t offset = 0;\\\n"
+ "{ %svoffset_t id__tmp, *vt__tmp;\\\n"
+ " FLATCC_ASSERT(t != 0 && \"null pointer table access\");\\\n"
+ " id__tmp = ID;\\\n"
+ " vt__tmp = (%svoffset_t *)((uint8_t *)(t) -\\\n"
+ " __%ssoffset_read_from_pe(t));\\\n"
+ " if (__%svoffset_read_from_pe(vt__tmp) >= sizeof(vt__tmp[0]) * (id__tmp + 3u)) {\\\n"
+ " offset = __%svoffset_read_from_pe(vt__tmp + id__tmp + 2);\\\n"
+ " }\\\n"
+ "}\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sfield_present(ID, t) { __%sread_vt(ID, offset__tmp, t) return offset__tmp != 0; }\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sscalar_field(T, ID, t)\\\n"
+ "{\\\n"
+ " __%sread_vt(ID, offset__tmp, t)\\\n"
+ " if (offset__tmp) {\\\n"
+ " return (const T *)((uint8_t *)(t) + offset__tmp);\\\n"
+ " }\\\n"
+ " return 0;\\\n"
+ "}\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_field(ID, N, NK, TK, T, V)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "{ __%sread_vt(ID, offset__tmp, t__tmp)\\\n"
+ " return offset__tmp ? __%sread_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\\\n"
+ "}\\\n", nsc, nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "static inline T N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "{ __%sread_vt(ID, offset__tmp, t__tmp)\\\n"
+ " return offset__tmp ? __%sread_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\\\n"
+ "}\\\n", nsc, nsc);
+ }
+ fprintf(out->fp,
+ "static inline const T *N ## _ ## NK ## _get_ptr(N ## _table_t t__tmp)\\\n"
+ "__%sscalar_field(T, ID, t__tmp)\\\n", nsc);
+ fprintf(out->fp,
+ "static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__%sfield_present(ID, t__tmp)",nsc);
+ if (out->opts->allow_scan_for_all_fields) {
+ fprintf(out->fp, "\\\n__%sdefine_scan_by_scalar_field(N, NK, T)\n", nsc);
+ } else {
+ fprintf(out->fp, "\n");
+ }
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_optional_field(ID, N, NK, TK, T, V)\\\n"
+ "__%sdefine_scalar_field(ID, N, NK, TK, T, V)\\\n"
+ "static inline TK ## _option_t N ## _ ## NK ## _option(N ## _table_t t__tmp)\\\n"
+ "{ TK ## _option_t ret; __%sread_vt(ID, offset__tmp, t__tmp)\\\n"
+ " ret.is_null = offset__tmp == 0; ret.value = offset__tmp ?\\\n"
+ " __%sread_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\\\n"
+ " return ret; }\n", nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sstruct_field(T, ID, t, r)\\\n"
+ "{\\\n"
+ " __%sread_vt(ID, offset__tmp, t)\\\n"
+ " if (offset__tmp) {\\\n"
+ " return (T)((uint8_t *)(t) + offset__tmp);\\\n"
+ " }\\\n"
+ " FLATCC_ASSERT(!(r) && \"required field missing\");\\\n"
+ " return 0;\\\n"
+ "}\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%soffset_field(T, ID, t, r, adjust)\\\n"
+ "{\\\n"
+ " %suoffset_t *elem__tmp;\\\n"
+ " __%sread_vt(ID, offset__tmp, t)\\\n"
+ " if (offset__tmp) {\\\n"
+ " elem__tmp = (%suoffset_t *)((uint8_t *)(t) + offset__tmp);\\\n"
+ " /* Add sizeof so C api can have raw access past header field. */\\\n"
+ " return (T)((uint8_t *)(elem__tmp) + adjust +\\\n"
+ " __%suoffset_read_from_pe(elem__tmp));\\\n"
+ " }\\\n"
+ " FLATCC_ASSERT(!(r) && \"required field missing\");\\\n"
+ " return 0;\\\n"
+ "}\n",
+ nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%svector_field(T, ID, t, r) __%soffset_field(T, ID, t, r, sizeof(%suoffset_t))\n"
+ "#define __%stable_field(T, ID, t, r) __%soffset_field(T, ID, t, r, 0)\n",
+ nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_struct_field(ID, N, NK, T, r)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "__%sstruct_field(T, ID, t__tmp, r)", nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "__%sstruct_field(T, ID, t__tmp, r)", nsc);
+ }
+ fprintf(out->fp,
+ "\\\nstatic inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__%sfield_present(ID, t__tmp)\n", nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_vector_field(ID, N, NK, T, r)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "__%svector_field(T, ID, t__tmp, r)", nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "__%svector_field(T, ID, t__tmp, r)", nsc);
+ }
+ fprintf(out->fp,
+ "\\\nstatic inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__%sfield_present(ID, t__tmp)\n", nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_table_field(ID, N, NK, T, r)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "__%stable_field(T, ID, t__tmp, r)", nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "__%stable_field(T, ID, t__tmp, r)", nsc);
+ }
+ fprintf(out->fp,
+ "\\\nstatic inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__%sfield_present(ID, t__tmp)\n", nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_string_field(ID, N, NK, r)\\\n"
+ "static inline %sstring_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "__%svector_field(%sstring_t, ID, t__tmp, r)", nsc, nsc, nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline %sstring_t N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "__%svector_field(%sstring_t, ID, t__tmp, r)", nsc, nsc, nsc);
+ }
+ fprintf(out->fp,
+ "\\\nstatic inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__%sfield_present(ID, t__tmp)", nsc);
+ if (out->opts->allow_scan_for_all_fields) {
+ fprintf(out->fp, "\\\n__%sdefine_scan_by_string_field(N, NK)\n", nsc);
+ } else {
+ fprintf(out->fp, "\n");
+ }
+ fprintf(out->fp,
+ "#define __%svec_len(vec)\\\n"
+ "{ return (vec) ? (size_t)__%suoffset_read_from_pe((flatbuffers_uoffset_t *)vec - 1) : 0; }\n"
+ "#define __%sstring_len(s) __%svec_len(s)\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline size_t %svec_len(const void *vec)\n"
+ "__%svec_len(vec)\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ /* Tb is the base type for loads. */
+ "#define __%sscalar_vec_at(N, vec, i)\\\n"
+ "{ FLATCC_ASSERT(%svec_len(vec) > (i) && \"index out of range\");\\\n"
+ " return __%sread_scalar(N, &(vec)[i]); }\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sstruct_vec_at(vec, i)\\\n"
+ "{ FLATCC_ASSERT(%svec_len(vec) > (i) && \"index out of range\"); return (vec) + (i); }\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "/* `adjust` skips past the header for string vectors. */\n"
+ "#define __%soffset_vec_at(T, vec, i, adjust)\\\n"
+ "{ const %suoffset_t *elem__tmp = (vec) + (i);\\\n"
+ " FLATCC_ASSERT(%svec_len(vec) > (i) && \"index out of range\");\\\n"
+ " return (T)((uint8_t *)(elem__tmp) + (size_t)__%suoffset_read_from_pe(elem__tmp) + (adjust)); }\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_vec_len(N)\\\n"
+ "static inline size_t N ## _vec_len(N ##_vec_t vec__tmp)\\\n"
+ "{ return %svec_len(vec__tmp); }\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_vec_at(N, T) \\\n"
+ "static inline T N ## _vec_at(N ## _vec_t vec__tmp, size_t i__tmp)\\\n"
+ "__%sscalar_vec_at(N, vec__tmp, i__tmp)\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "typedef const char *%sstring_t;\n"
+ "static inline size_t %sstring_len(%sstring_t s)\n"
+ "__%sstring_len(s)\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "typedef const %suoffset_t *%sstring_vec_t;\n"
+ "typedef %suoffset_t *%sstring_mutable_vec_t;\n"
+ "static inline size_t %sstring_vec_len(%sstring_vec_t vec)\n"
+ "__%svec_len(vec)\n"
+ "static inline %sstring_t %sstring_vec_at(%sstring_vec_t vec, size_t i)\n"
+ "__%soffset_vec_at(%sstring_t, vec, i, sizeof(vec[0]))\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp, "typedef const void *%sgeneric_t;\n", nsc);
+ fprintf(out->fp, "typedef void *%smutable_generic_t;\n", nsc);
+ fprintf(out->fp,
+ "static inline %sstring_t %sstring_cast_from_generic(const %sgeneric_t p)\n"
+ "{ return p ? ((const char *)p) + __%suoffset__size() : 0; }\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "typedef const %suoffset_t *%sgeneric_vec_t;\n"
+ "typedef %suoffset_t *%sgeneric_table_mutable_vec_t;\n"
+ "static inline size_t %sgeneric_vec_len(%sgeneric_vec_t vec)\n"
+ "__%svec_len(vec)\n"
+ "static inline %sgeneric_t %sgeneric_vec_at(%sgeneric_vec_t vec, size_t i)\n"
+ "__%soffset_vec_at(%sgeneric_t, vec, i, 0)\n"
+ "static inline %sgeneric_t %sgeneric_vec_at_as_string(%sgeneric_vec_t vec, size_t i)\n"
+ "__%soffset_vec_at(%sgeneric_t, vec, i, sizeof(vec[0]))\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ gen_union(out);
+ gen_find(out);
+ gen_scan(out);
+ if (out->opts->cgen_sort) {
+ gen_sort(out);
+ fprintf(out->fp,
+ "#define __%ssort_vector_field(N, NK, T, t)\\\n"
+ "{ T ## _mutable_vec_t v__tmp = (T ## _mutable_vec_t) N ## _ ## NK ## _get(t);\\\n"
+ " if (v__tmp) T ## _vec_sort(v__tmp); }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%ssort_table_field(N, NK, T, t)\\\n"
+ "{ T ## _sort((T ## _mutable_table_t)N ## _ ## NK ## _get(t)); }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%ssort_union_field(N, NK, T, t)\\\n"
+ "{ T ## _sort(T ## _mutable_union_cast(N ## _ ## NK ## _union(t))); }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%ssort_table_vector_field_elements(N, NK, T, t)\\\n"
+ "{ T ## _vec_t v__tmp = N ## _ ## NK ## _get(t); size_t i__tmp, n__tmp;\\\n"
+ " n__tmp = T ## _vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\\\n"
+ " T ## _sort((T ## _mutable_table_t)T ## _vec_at(v__tmp, i__tmp)); }}\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%ssort_union_vector_field_elements(N, NK, T, t)\\\n"
+ "{ T ## _union_vec_t v__tmp = N ## _ ## NK ## _union(t); size_t i__tmp, n__tmp;\\\n"
+ " n__tmp = T ## _union_vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\\\n"
+ " T ## _sort(T ## _mutable_union_cast(T ## _union_vec_at(v__tmp, i__tmp))); }}\n",
+ nsc);
+ } else {
+ fprintf(out->fp, "/* sort disabled */\n");
+ }
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_vector(N, T)\\\n"
+ "typedef const T *N ## _vec_t;\\\n"
+ "typedef T *N ## _mutable_vec_t;\\\n"
+ "__%sdefine_scalar_vec_len(N)\\\n"
+ "__%sdefine_scalar_vec_at(N, T)\\\n"
+ "__%sdefine_scalar_find(N, T)\\\n"
+ "__%sdefine_scalar_scan(N, T)",
+ nsc, nsc, nsc, nsc, nsc);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp, "\\\n__%sdefine_scalar_sort(N, T)\n", nsc);
+ } else {
+ fprintf(out->fp, "\n");
+ }
+ fprintf(out->fp, "\n");
+ /* Elaborate on the included basic type system. */
+ fprintf(out->fp,
+ "#define __%sdefine_integer_type(N, T, W)\\\n"
+ "__flatcc_define_integer_accessors(N, T, W, %sendian)\\\n"
+ "__%sdefine_scalar_vector(N, T)\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "__%sdefine_scalar_vector(%sbool, %sbool_t)\n"
+ "__%sdefine_scalar_vector(%schar, char)\n"
+ "__%sdefine_scalar_vector(%suint8, uint8_t)\n"
+ "__%sdefine_scalar_vector(%sint8, int8_t)\n"
+ "__%sdefine_scalar_vector(%suint16, uint16_t)\n"
+ "__%sdefine_scalar_vector(%sint16, int16_t)\n"
+ "__%sdefine_scalar_vector(%suint32, uint32_t)\n"
+ "__%sdefine_scalar_vector(%sint32, int32_t)\n"
+ "__%sdefine_scalar_vector(%suint64, uint64_t)\n"
+ "__%sdefine_scalar_vector(%sint64, int64_t)\n"
+ "__%sdefine_scalar_vector(%sfloat, float)\n"
+ "__%sdefine_scalar_vector(%sdouble, double)\n"
+ "__%sdefine_scalar_vector(%sunion_type, %sunion_type_t)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline size_t %sstring_vec_find(%sstring_vec_t vec, const char *s)\n"
+ "__%sfind_by_string_field(__%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s)\n"
+ "static inline size_t %sstring_vec_find_n(%sstring_vec_t vec, const char *s, size_t n)\n"
+ "__%sfind_by_string_n_field(__%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s, n)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline size_t %sstring_vec_scan(%sstring_vec_t vec, const char *s)\n"
+ "__%sscan_by_string_field(0, %sstring_vec_len(vec), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s)\n"
+ "static inline size_t %sstring_vec_scan_n(%sstring_vec_t vec, const char *s, size_t n)\n"
+ "__%sscan_by_string_n_field(0, %sstring_vec_len(vec), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s, n)\n"
+ "static inline size_t %sstring_vec_scan_ex(%sstring_vec_t vec, size_t begin, size_t end, const char *s)\n"
+ "__%sscan_by_string_field(begin, __%smin(end, %sstring_vec_len(vec)), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s)\n"
+ "static inline size_t %sstring_vec_scan_ex_n(%sstring_vec_t vec, size_t begin, size_t end, const char *s, size_t n)\n"
+ "__%sscan_by_string_n_field(begin, __%smin(end, %sstring_vec_len(vec)), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s, n)\n"
+ "static inline size_t %sstring_vec_rscan(%sstring_vec_t vec, const char *s)\n"
+ "__%srscan_by_string_field(0, %sstring_vec_len(vec), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s)\n"
+ "static inline size_t %sstring_vec_rscan_n(%sstring_vec_t vec, const char *s, size_t n)\n"
+ "__%srscan_by_string_n_field(0, %sstring_vec_len(vec), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s, n)\n"
+ "static inline size_t %sstring_vec_rscan_ex(%sstring_vec_t vec, size_t begin, size_t end, const char *s)\n"
+ "__%srscan_by_string_field(begin, __%smin(end, %sstring_vec_len(vec)), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s)\n"
+ "static inline size_t %sstring_vec_rscan_ex_n(%sstring_vec_t vec, size_t begin, size_t end, const char *s, size_t n)\n"
+ "__%srscan_by_string_n_field(begin, __%smin(end, %sstring_vec_len(vec)), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s, n)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp, "__%sdefine_string_sort()\n", nsc);
+ }
+ fprintf(out->fp,
+ "#define __%sdefine_struct_scalar_fixed_array_field(N, NK, TK, T, L)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\\\n"
+ "{ if (!t__tmp || i__tmp >= L) return 0;\\\n"
+ " return __%sread_scalar(TK, &(t__tmp->NK[i__tmp])); }\\\n"
+ "static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\\\n"
+ "{ return t__tmp ? t__tmp->NK : 0; }\\\n"
+ "static inline size_t N ## _ ## NK ## _get_len(void) { return L; }",
+ nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK (N ## _struct_t t__tmp, size_t i__tmp)\\\n"
+ "{ return N ## _ ## NK ## _get(t__tmp, i__tmp); }");
+ }
+ fprintf(out->fp, "\n");;
+ fprintf(out->fp,
+ "#define __%sdefine_struct_struct_fixed_array_field(N, NK, T, L)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\\\n"
+ "{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }"
+ "static inline T N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\\\n"
+ "{ return t__tmp ? t__tmp->NK : 0; }\\\n"
+ "static inline size_t N ## _ ## NK ## _get_len(void) { return L; }",
+ nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK(N ## _struct_t t__tmp, size_t i__tmp)\\\n"
+ "{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }");
+ }
+ fprintf(out->fp, "\n");
+ fprintf(out->fp,
+ "#define __%sdefine_struct_scalar_field(N, NK, TK, T)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp)\\\n"
+ "{ return t__tmp ? __%sread_scalar(TK, &(t__tmp->NK)) : 0; }\\\n"
+ "static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\\\n"
+ "{ return t__tmp ? &(t__tmp->NK) : 0; }",
+ nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK (N ## _struct_t t__tmp)\\\n"
+ "{ return t__tmp ? __%sread_scalar(TK, &(t__tmp->NK)) : 0; }",
+ nsc);
+ }
+ if (out->opts->allow_scan_for_all_fields) {
+ fprintf(out->fp, "\\\n__%sdefine_scan_by_scalar_field(N, NK, T)\n", nsc);
+ } else {
+ fprintf(out->fp, "\n");
+ }
+ fprintf(out->fp,
+ "#define __%sdefine_struct_struct_field(N, NK, T)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }",
+ nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK (N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }\n");
+ } else {
+ fprintf(out->fp, "\n");
+ }
+ fprintf(out->fp,
+ "/* If fid is null, the function returns true without testing as buffer is not expected to have any id. */\n"
+ "static inline int %shas_identifier(const void *buffer, const char *fid)\n"
+ "{ %sthash_t id, id2 = 0; if (fid == 0) { return 1; };\n"
+ " id2 = %stype_hash_from_string(fid);\n"
+ " id = __%sthash_read_from_pe(((%suoffset_t *)buffer) + 1);\n"
+ " return id2 == 0 || id == id2; }\n"
+ "static inline int %shas_type_hash(const void *buffer, %sthash_t thash)\n"
+ "{ return thash == 0 || (__%sthash_read_from_pe((%suoffset_t *)buffer + 1) == thash); }\n\n"
+ "static inline %sthash_t %sget_type_hash(const void *buffer)\n"
+ "{ return __%sthash_read_from_pe((flatbuffers_uoffset_t *)buffer + 1); }\n\n"
+ "#define %sverify_endian() %shas_identifier(\"\\x00\\x00\\x00\\x00\" \"1234\", \"1234\")\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline void *%sread_size_prefix(void *b, size_t *size_out)\n"
+ "{ if (size_out) { *size_out = (size_t)__%suoffset_read_from_pe(b); }\n"
+ " return (uint8_t *)b + sizeof(%suoffset_t); }\n", nsc, nsc, nsc);
+ fprintf(out->fp,
+ "/* Null file identifier accepts anything, otherwise fid should be 4 characters. */\n"
+ "#define __%sread_root(T, K, buffer, fid)\\\n"
+ " ((!buffer || !%shas_identifier(buffer, fid)) ? 0 :\\\n"
+ " ((T ## _ ## K ## t)(((uint8_t *)buffer) +\\\n"
+ " __%suoffset_read_from_pe(buffer))))\n"
+ "#define __%sread_typed_root(T, K, buffer, thash)\\\n"
+ " ((!buffer || !%shas_type_hash(buffer, thash)) ? 0 :\\\n"
+ " ((T ## _ ## K ## t)(((uint8_t *)buffer) +\\\n"
+ " __%suoffset_read_from_pe(buffer))))\n",
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%snested_buffer_as_root(C, N, T, K)\\\n"
+ "static inline T ## _ ## K ## t C ## _ ## N ## _as_root_with_identifier(C ## _ ## table_t t__tmp, const char *fid__tmp)\\\n"
+ "{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __%sread_root(T, K, buffer__tmp, fid__tmp); }\\\n"
+ "static inline T ## _ ## K ## t C ## _ ## N ## _as_typed_root(C ## _ ## table_t t__tmp)\\\n"
+ "{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __%sread_root(T, K, buffer__tmp, C ## _ ## type_identifier); }\\\n"
+ "static inline T ## _ ## K ## t C ## _ ## N ## _as_root(C ## _ ## table_t t__tmp)\\\n"
+ "{ const char *fid__tmp = T ## _file_identifier;\\\n"
+ " const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __%sread_root(T, K, buffer__tmp, fid__tmp); }\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sbuffer_as_root(N, K)\\\n"
+ "static inline N ## _ ## K ## t N ## _as_root_with_identifier(const void *buffer__tmp, const char *fid__tmp)\\\n"
+ "{ return __%sread_root(N, K, buffer__tmp, fid__tmp); }\\\n"
+ "static inline N ## _ ## K ## t N ## _as_root_with_type_hash(const void *buffer__tmp, %sthash_t thash__tmp)\\\n"
+ "{ return __%sread_typed_root(N, K, buffer__tmp, thash__tmp); }\\\n"
+ "static inline N ## _ ## K ## t N ## _as_root(const void *buffer__tmp)\\\n"
+ "{ const char *fid__tmp = N ## _file_identifier;\\\n"
+ " return __%sread_root(N, K, buffer__tmp, fid__tmp); }\\\n"
+ "static inline N ## _ ## K ## t N ## _as_typed_root(const void *buffer__tmp)\\\n"
+ "{ return __%sread_typed_root(N, K, buffer__tmp, N ## _type_hash); }\n"
+ "#define __%sstruct_as_root(N) __%sbuffer_as_root(N, struct_)\n"
+ "#define __%stable_as_root(N) __%sbuffer_as_root(N, table_)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp, "\n");
+}
+
+int fb_gen_common_c_header(fb_output_t *out)
+{
+ const char *nscup = out->nscup;
+
+ fprintf(out->fp,
+ "#ifndef %s_COMMON_READER_H\n"
+ "#define %s_COMMON_READER_H\n",
+ nscup, nscup);
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ fprintf(out->fp, "/* Common FlatBuffers read functionality for C. */\n\n");
+ if (!out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "/*"
+ " * This code is generated without support for vector sort operations\n"
+ " * but find operations are supported on pre-sorted vectors.\n"
+ " */\n");
+ }
+ gen_prologue(out);
+ gen_helpers(out);
+ gen_epilogue(out);
+ fprintf(out->fp,
+ "#endif /* %s_COMMON_H */\n",
+ nscup);
+ return 0;
+}
+
+static void gen_pretext(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+ const char *nscup = out->nscup;
+
+ int do_pad = out->opts->cgen_pad;
+
+ fprintf(out->fp,
+ "#ifndef %s_READER_H\n"
+ "#define %s_READER_H\n",
+ out->S->basenameup, out->S->basenameup);
+
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ if (do_pad) {
+ fprintf(out->fp,
+ "/*\n"
+ " * Generated with 'pad' option which expects #pragma pack(1) and\n"
+ " * #pragma pack() to be supported, and which adds extra padding\n"
+ " * fields to structs.\n"
+ " *\n"
+ " * This is mostly relevant for some micro controller platforms, but\n"
+ " * may also be needed with 'force_align' attributes > 16.\n"
+ " *\n"
+ " * The default output uses C11 <stdalign.h> alignas(n) which can be\n"
+ " * defined as `__attribute__((aligned (n)))` or similar on many\n"
+ " * older platforms.\n"
+ " */\n"
+ "\n");
+ }
+
+ fprintf(out->fp,
+ "#ifndef %s_COMMON_READER_H\n"
+ "#include \"%scommon_reader.h\"\n"
+ "#endif\n",
+ nscup, nsc);
+ fb_gen_c_includes(out, "_reader.h", "_READER_H");
+
+ /*
+ * Must be in included in every file using static_assert to ensure
+ * static_assert_scope.h counter can avoid conflicts.
+ */
+ fprintf(out->fp,
+ "#include \"flatcc/flatcc_flatbuffers.h\"\n");
+ if (!do_pad) {
+ fprintf(out->fp,
+ "#ifndef __alignas_is_defined\n"
+ "#include <stdalign.h>\n"
+ "#endif\n");
+ }
+ gen_prologue(out);
+ if (out->S->file_identifier.type == vt_string) {
+ fprintf(out->fp,
+ "#undef %sidentifier\n"
+ "#define %sidentifier \"%.*s\"\n",
+ nsc,
+ nsc, out->S->file_identifier.s.len, out->S->file_identifier.s.s);
+ } else {
+ fprintf(out->fp,
+ "#ifndef %sidentifier\n"
+ "#define %sidentifier 0\n"
+ "#endif\n",
+ nsc, nsc);
+ }
+ if (out->S->file_extension.type == vt_string) {
+ fprintf(out->fp,
+ "#undef %sextension\n"
+ "#define %sextension \"%.*s\"\n",
+ nsc,
+ nsc, out->S->file_extension.s.len, out->S->file_extension.s.s);
+ } else {
+ fprintf(out->fp,
+ "#ifndef %sextension\n"
+ "#define %sextension \"%s\"\n"
+ "#endif\n",
+ nsc, nsc, out->opts->default_bin_ext);
+ }
+ fprintf(out->fp, "\n");
+}
+
+static void gen_footer(fb_output_t *out)
+{
+ gen_epilogue(out);
+ fprintf(out->fp, "#endif /* %s_READER_H */\n", out->S->basenameup);
+}
+
+static void gen_forward_decl(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+ const char *nsc = out->nsc;
+
+ fb_clear(snt);
+
+ assert(ct->symbol.kind == fb_is_struct || ct->symbol.kind == fb_is_table);
+
+ fb_compound_name(ct, &snt);
+ if (ct->symbol.kind == fb_is_struct) {
+ if (ct->size == 0) {
+ gen_panic(out, "internal error: unexpected empty struct");
+ return;
+ } else {
+ fprintf(out->fp, "typedef struct %s %s_t;\n",
+ snt.text, snt.text);
+ }
+ fprintf(out->fp, "typedef const %s_t *%s_struct_t;\n",
+ snt.text, snt.text);
+ fprintf(out->fp, "typedef %s_t *%s_mutable_struct_t;\n",
+ snt.text, snt.text);
+ fprintf(out->fp, "typedef const %s_t *%s_vec_t;\n",
+ snt.text, snt.text);
+ fprintf(out->fp, "typedef %s_t *%s_mutable_vec_t;\n",
+ snt.text, snt.text);
+ } else {
+ fprintf(out->fp, "typedef const struct %s_table *%s_table_t;\n",
+ snt.text, snt.text);
+ fprintf(out->fp, "typedef struct %s_table *%s_mutable_table_t;\n",
+ snt.text, snt.text);
+ fprintf(out->fp, "typedef const %suoffset_t *%s_vec_t;\n", nsc, snt.text);
+ fprintf(out->fp, "typedef %suoffset_t *%s_mutable_vec_t;\n", nsc, snt.text);
+ }
+}
+
+static inline void print_doc(fb_output_t *out, const char *indent, fb_doc_t *doc)
+{
+ long ln = 0;
+ int first = 1;
+ if (doc == 0) {
+ return;
+ }
+ while (doc) {
+ if (ln != doc->ident->linenum) {
+ if (first) {
+ /* Not all C compilers understand // comments. */
+ fprintf(out->fp, "%s/** ", indent);
+ ln = doc->ident->linenum;
+ } else {
+ fprintf(out->fp, "\n%s * ", indent);
+ }
+ }
+ first = 0;
+ fprintf(out->fp, "%.*s", (int)doc->ident->len, doc->ident->text);
+ ln = doc->ident->linenum;
+ doc = doc->link;
+ }
+ fprintf(out->fp, " */\n");
+}
+
+static void gen_struct(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ unsigned align;
+ size_t offset = 0;
+ const char *tname, *tname_ns, *tname_prefix;
+ int n, len;
+ const char *s;
+ unsigned pad_index = 0, deprecated_index = 0, pad;
+ const char *kind;
+ int do_pad = out->opts->cgen_pad;
+ int is_primary_key, current_key_processed;
+ const char *nsc = out->nsc;
+
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+
+ fb_clear(snt);
+ fb_clear(snref);
+
+ assert(ct->symbol.kind == fb_is_struct);
+ assert(ct->align > 0 || ct->count == 0);
+ assert(ct->size > 0 || ct->count == 0);
+
+ fb_compound_name(ct, &snt);
+ print_doc(out, "", ct->doc);
+ if (ct->size == 0) {
+ gen_panic(out, "internal error: unexpected empty struct");
+ } else {
+ if (do_pad) {
+ fprintf(out->fp, "#pragma pack(1)\n");
+ }
+ /*
+ * Unfortunately the following is not valid in C11:
+ *
+ * struct alignas(4) mystruct { ... };
+ *
+ * we can only use alignas on members (unlike C++, and unlike
+ * non-portable C compiler variants).
+ *
+ * By padding the first element to the struct size we get around
+ * this problem. It shouldn't strictly be necessary to add padding
+ * fields, but compilers might not support padding above 16 bytes,
+ * so we do that as a precaution with an optional compiler flag.
+ */
+ fprintf(out->fp, "struct %s {\n", snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ current_key_processed = 0;
+ member = (fb_member_t *)sym;
+ is_primary_key = ct->primary_key == member;
+ print_doc(out, " ", member->doc);
+ symbol_name(sym, &n, &s);
+ align = offset == 0 ? ct->align : member->align;
+ if (do_pad && (pad = (unsigned)(member->offset - offset))) {
+ fprintf(out->fp, " uint8_t __padding%u[%u];\n",
+ pad_index++, pad);
+ }
+ if (member->metadata_flags & fb_f_deprecated) {
+ pad = (unsigned)member->size;
+ if (do_pad) {
+ fprintf(out->fp, " uint8_t __deprecated%u[%u]; /* was: '%.*s' */\n",
+ deprecated_index++, pad, n, s);
+ } else {
+ fprintf(out->fp, " alignas(%u) uint8_t __deprecated%u[%u]; /* was: '%.*s' */\n",
+ align, deprecated_index++, pad, n, s);
+ }
+ offset = (unsigned)(member->offset + member->size);
+ continue;
+ }
+ switch (member->type.type) {
+ case vt_fixed_array_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ len = (int)member->type.len;
+ if (do_pad) {
+ fprintf(out->fp, " %s%s ", tname_ns, tname);
+ } else {
+ fprintf(out->fp, " alignas(%u) %s%s ", align, tname_ns, tname);
+ }
+ fprintf(out->fp, "%.*s[%d];\n", n, s, len);
+ break;
+ case vt_scalar_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ if (do_pad) {
+ fprintf(out->fp, " %s%s ", tname_ns, tname);
+ } else {
+ fprintf(out->fp, " alignas(%u) %s%s ", align, tname_ns, tname);
+ }
+ fprintf(out->fp, "%.*s;\n", n, s);
+ break;
+ case vt_fixed_array_compound_type_ref:
+ assert(member->type.ct->symbol.kind == fb_is_struct || member->type.ct->symbol.kind == fb_is_enum);
+ kind = member->type.ct->symbol.kind == fb_is_struct ? "" : "enum_";
+ fb_compound_name(member->type.ct, &snref);
+ len = (int)member->type.len;
+ if (do_pad) {
+ fprintf(out->fp, " %s_%st ", snref.text, kind);
+ } else {
+ fprintf(out->fp, " alignas(%u) %s_%st ", align, snref.text, kind);
+ }
+ fprintf(out->fp, "%.*s[%d];\n", n, s, len);
+ break;
+ case vt_compound_type_ref:
+ assert(member->type.ct->symbol.kind == fb_is_struct || member->type.ct->symbol.kind == fb_is_enum);
+ kind = member->type.ct->symbol.kind == fb_is_struct ? "" : "enum_";
+ fb_compound_name(member->type.ct, &snref);
+ if (do_pad) {
+ fprintf(out->fp, " %s_%st ", snref.text, kind);
+ } else {
+ fprintf(out->fp, " alignas(%u) %s_%st ", align, snref.text, kind);
+ }
+ fprintf(out->fp, "%.*s;\n", n, s);
+ break;
+ default:
+ fprintf(out->fp, " %s ", __FLATCC_ERROR_TYPE);
+ fprintf(out->fp, "%.*s;\n", n, s);
+ gen_panic(out, "internal error: unexpected type during code generation");
+ break;
+ }
+ offset = (unsigned)(member->offset + member->size);
+ }
+ if (do_pad && (pad = (unsigned)(ct->size - offset))) {
+ fprintf(out->fp, " uint8_t __padding%u[%u];\n",
+ pad_index, pad);
+ }
+ fprintf(out->fp, "};\n");
+ if (do_pad) {
+ fprintf(out->fp, "#pragma pack()\n");
+ }
+ fprintf(out->fp,
+ "static_assert(sizeof(%s_t) == %"PRIu64", \"struct size mismatch\");\n\n",
+ snt.text, (uint64_t)ct->size);
+ fprintf(out->fp,
+ "static inline const %s_t *%s__const_ptr_add(const %s_t *p, size_t i) { return p + i; }\n", snt.text, snt.text, snt.text);
+ fprintf(out->fp,
+ "static inline %s_t *%s__ptr_add(%s_t *p, size_t i) { return p + i; }\n", snt.text, snt.text, snt.text);
+ fprintf(out->fp,
+ "static inline %s_struct_t %s_vec_at(%s_vec_t vec, size_t i)\n"
+ "__%sstruct_vec_at(vec, i)\n",
+ snt.text, snt.text, snt.text,
+ nsc);
+ }
+ fprintf(out->fp, "static inline size_t %s__size(void) { return %"PRIu64"; }\n",
+ snt.text, (uint64_t)ct->size);
+ fprintf(out->fp,
+ "static inline size_t %s_vec_len(%s_vec_t vec)\n"
+ "__%svec_len(vec)\n",
+ snt.text, snt.text, nsc);
+ fprintf(out->fp,
+ "__%sstruct_as_root(%s)\n",
+ nsc, snt.text);
+ fprintf(out->fp, "\n");
+
+ /* Create accessors which respect endianness and which return 0 on null struct access. */
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ is_primary_key = ct->primary_key == member;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ symbol_name(&member->symbol, &n, &s);
+ switch (member->type.type) {
+ case vt_fixed_array_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ tname_prefix = scalar_type_prefix(member->type.st);
+ fprintf(out->fp,
+ "__%sdefine_struct_scalar_fixed_array_field(%s, %.*s, %s%s, %s%s, %d)\n",
+ nsc, snt.text, n, s, nsc, tname_prefix, tname_ns, tname, member->type.len);
+ /* TODO: if member->type.st == fb_char add string specific methods. */
+ break;
+ case vt_scalar_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ tname_prefix = scalar_type_prefix(member->type.st);
+ fprintf(out->fp,
+ "__%sdefine_struct_scalar_field(%s, %.*s, %s%s, %s%s)\n",
+ nsc, snt.text, n, s, nsc, tname_prefix, tname_ns, tname);
+ if (!out->opts->allow_scan_for_all_fields && (member->metadata_flags & fb_f_key)) {
+ fprintf(out->fp,
+ "__%sdefine_scan_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (!is_primary_key) {
+ fprintf(out->fp, "/* Note: this is not the primary key field on this struct. */\n");
+ }
+ fprintf(out->fp, "/* Note: find only works on vectors sorted by this field. */\n");
+ fprintf(out->fp,
+ "__%sdefine_find_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "__%sdefine_struct_sort_by_scalar_field(%s, %.*s, %s%s, %s_t)\n",
+ nsc, snt.text, n, s, tname_ns, tname, snt.text);
+ }
+ if (is_primary_key) {
+ fprintf(out->fp,
+ "__%sdefine_default_find_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ fprintf(out->fp,
+ "__%sdefine_default_scan_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "#define %s_vec_sort %s_vec_sort_by_%.*s\n",
+ snt.text, snt.text, n, s);
+ }
+ }
+ current_key_processed = 1;
+ }
+ break;
+ case vt_fixed_array_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+ fprintf(out->fp,
+ "__%sdefine_struct_scalar_fixed_array_field(%s, %.*s, %s, %s_enum_t, %d)\n",
+ nsc, snt.text, n, s, snref.text, snref.text, member->type.len);
+ break;
+ case fb_is_struct:
+ fprintf(out->fp,
+ "__%sdefine_struct_struct_fixed_array_field(%s, %.*s, %s_struct_t, %d)\n",
+ nsc, snt.text, n, s, snref.text, member->type.len);
+ break;
+ }
+ break;
+
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+ fprintf(out->fp,
+ "__%sdefine_struct_scalar_field(%s, %.*s, %s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text, snref.text);
+ if (!out->opts->allow_scan_for_all_fields && (member->metadata_flags & fb_f_key)) {
+ fprintf(out->fp,
+ "__%sdefine_scan_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (!is_primary_key) {
+ fprintf(out->fp, "/* Note: this is not the primary key of this table. */\n");
+ }
+ fprintf(out->fp, "/* Note: find only works on vectors sorted by this field. */\n");
+ fprintf(out->fp,
+ "__%sdefine_find_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "__%sdefine_struct_sort_by_scalar_field(%s, %.*s, %s_enum_t, %s_t)\n",
+ nsc, snt.text, n, s, snref.text, snt.text);
+ }
+ if (is_primary_key) {
+ fprintf(out->fp,
+ "__%sdefine_default_find_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ fprintf(out->fp,
+ "__%sdefine_default_scan_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "#define %s_vec_sort %s_vec_sort_by_%.*s\n",
+ snt.text, snt.text, n, s);
+ }
+ }
+ current_key_processed = 1;
+ }
+ break;
+ case fb_is_struct:
+ /*
+ * For completeness provide an accessor which returns member pointer
+ * or null if container struct is null.
+ */
+ fprintf(out->fp,
+ "__%sdefine_struct_struct_field(%s, %.*s, %s_struct_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ }
+
+ }
+ if ((member->metadata_flags & fb_f_key) && !current_key_processed) {
+ fprintf(out->fp,
+ "/* Note: field has key, but there is no support for find by fields of this type. */\n");
+ /*
+ * If the first key already exists, but was for an unsupported
+ * type, we do not map the next possible key to generic find.
+ */
+ }
+ }
+ fprintf(out->fp, "\n");
+}
+
+/*
+ * Enums are integers, but we cannot control the size.
+ * To produce a typesafe and portable result, we generate constants
+ * instead.
+ */
+static void gen_enum(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *tname, *tname_ns, *s, *kind;
+ fb_literal_t literal;
+ int n, w;
+ int is_union;
+ fb_scoped_name_t snt;
+ const char *nsc = out->nsc;
+
+ fb_clear(snt);
+
+ assert(ct->symbol.kind == fb_is_enum || ct->symbol.kind == fb_is_union);
+ assert(ct->type.type == vt_scalar_type);
+
+ tname_ns = scalar_type_ns(ct->type.st, nsc);
+ tname = scalar_type_name(ct->type.st);
+
+ w = (int)ct->size * 8;
+
+ is_union = ct->symbol.kind != fb_is_enum;
+ kind = is_union ? "union_type" : "enum";
+ fb_compound_name(ct, &snt);
+ print_doc(out, "", ct->doc);
+ fprintf(out->fp,
+ "typedef %s%s %s_%s_t;\n",
+ tname_ns, tname, snt.text, kind);
+ fprintf(out->fp,
+ "__%sdefine_integer_type(%s, %s_%s_t, %u)\n",
+ nsc, snt.text, snt.text, kind, w);
+ if (is_union) {
+ fprintf(out->fp,
+ "__%sdefine_union(%s, %s)\n",
+ nsc, nsc, snt.text);
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ print_doc(out, "", member->doc);
+ symbol_name(&member->symbol, &n, &s);
+ print_literal(ct->type.st, &member->value, literal);
+ /*
+ * This must be a define, not a static const integer, otherwise it
+ * won't work in switch statements - except with GNU extensions.
+ */
+ fprintf(out->fp,
+ "#define %s_%.*s ((%s_%s_t)%s)\n",
+ snt.text, n, s, snt.text, kind, literal);
+ }
+ fprintf(out->fp, "\n");
+
+ if (is_union) {
+ fprintf(out->fp, "static inline const char *%s_type_name(%s_union_type_t type)\n"
+ "{\n",
+ snt.text, snt.text);
+ } else {
+ fprintf(out->fp, "static inline const char *%s_name(%s_enum_t value)\n"
+ "{\n",
+ snt.text, snt.text);
+ }
+
+
+ if (is_union) {
+ fprintf(out->fp, " switch (type) {\n");
+ } else {
+ fprintf(out->fp, " switch (value) {\n");
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(&member->symbol, &n, &s);
+ if (sym->flags & fb_duplicate) {
+ fprintf(out->fp,
+ " /* case %s_%.*s: return \"%.*s\"; (duplicate) */\n",
+ snt.text, n, s, n, s);
+ } else {
+ fprintf(out->fp,
+ " case %s_%.*s: return \"%.*s\";\n",
+ snt.text, n, s, n, s);
+ }
+ }
+ fprintf(out->fp,
+ " default: return \"\";\n"
+ " }\n"
+ "}\n");
+ fprintf(out->fp, "\n");
+
+ if (is_union) {
+ fprintf(out->fp, "static inline int %s_is_known_type(%s_union_type_t type)\n"
+ "{\n",
+ snt.text, snt.text);
+ } else {
+ fprintf(out->fp, "static inline int %s_is_known_value(%s_enum_t value)\n"
+ "{\n",
+ snt.text, snt.text);
+ }
+ if (is_union) {
+ fprintf(out->fp, " switch (type) {\n");
+ } else {
+ fprintf(out->fp, " switch (value) {\n");
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(&member->symbol, &n, &s);
+ if (sym->flags & fb_duplicate) {
+ fprintf(out->fp,
+ " /* case %s_%.*s: return 1; (duplicate) */\n",
+ snt.text, n, s);
+ } else {
+ fprintf(out->fp,
+ " case %s_%.*s: return 1;\n",
+ snt.text, n, s);
+ }
+ }
+ fprintf(out->fp,
+ " default: return 0;\n"
+ " }\n"
+ "}\n");
+ fprintf(out->fp, "\n");
+
+}
+
+static void gen_nested_root(fb_output_t *out, fb_symbol_t *root_type, fb_symbol_t *container, fb_symbol_t *member)
+{
+ const char *s;
+ int n;
+ const char *kind;
+ const char *nsc = out->nsc;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snc;
+
+ fb_clear(snt);
+ fb_clear(snc);
+ if (!root_type) {
+ return;
+ }
+ /*
+ * Current flatc compiler only accepts tables, but here we support
+ * both tables and structs in so far the parser and analyzer
+ * allows for it.
+ */
+ switch (root_type->kind) {
+ case fb_is_table:
+ kind = "table_";
+ break;
+ case fb_is_struct:
+ kind = "struct_";
+ break;
+ default:
+ gen_panic(out, "internal error: roots can only be structs or tables");
+ return;
+ }
+ fb_compound_name((fb_compound_type_t *)root_type, &snt);
+ assert(container->kind == fb_is_table);
+ fb_compound_name((fb_compound_type_t *)container, &snc);
+ symbol_name(member, &n, &s);
+ fprintf(out->fp, "__%snested_buffer_as_root(%s, %.*s, %s, %s)\n", nsc, snc.text, n, s, snt.text, kind);
+}
+
+static void gen_table(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *s, *tname, *tname_ns, *tname_prefix;
+ int n, r;
+ int is_primary_key, current_key_processed;
+ const char *nsc = out->nsc;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+ fb_literal_t literal;
+ int is_optional;
+
+ assert(ct->symbol.kind == fb_is_table);
+
+ fb_clear(snt);
+ fb_clear(snref);
+
+ fprintf(out->fp, "\n");
+ fb_compound_name(ct, &snt);
+ print_doc(out, "", ct->doc);
+ fprintf(out->fp,
+ /*
+ * We don't really need the struct, but it provides better
+ * type safety than a typedef void *.
+ */
+ "struct %s_table { uint8_t unused__; };\n"
+ "\n",
+ snt.text);
+ fprintf(out->fp,
+ "static inline size_t %s_vec_len(%s_vec_t vec)\n"
+ "__%svec_len(vec)\n",
+ snt.text, snt.text, nsc);
+ fprintf(out->fp,
+ "static inline %s_table_t %s_vec_at(%s_vec_t vec, size_t i)\n"
+ "__%soffset_vec_at(%s_table_t, vec, i, 0)\n",
+ snt.text, snt.text, snt.text, nsc, snt.text);
+ fprintf(out->fp,
+ "__%stable_as_root(%s)\n",
+ nsc, snt.text);
+ fprintf(out->fp, "\n");
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ current_key_processed = 0;
+ member = (fb_member_t *)sym;
+ is_primary_key = ct->primary_key == member;
+ is_optional = !!(member->flags & fb_fm_optional);
+ print_doc(out, "", member->doc);
+ /*
+ * In flatc, there can at most one key field, and it should be
+ * scalar or string. Here we export all keys using the
+ * <table>_vec_find_by_<fieldname> convention and let the parser deal with
+ * semantics. Keys on unsupported fields are ignored. The first
+ * valid find operation is also mapped to just <table>_vec_find.
+ */
+ symbol_name(&member->symbol, &n, &s);
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "/* Skipping deprecated field: '%s_%.*s' */\n\n", snt.text, n, s);
+ continue;
+ }
+ r = (member->metadata_flags & fb_f_required) != 0;
+ switch (member->type.type) {
+ case vt_scalar_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ tname_prefix = scalar_type_prefix(member->type.st);
+ print_literal(member->type.st, &member->value, literal);
+ if (is_optional) {
+ fprintf(out->fp,
+ "__%sdefine_scalar_optional_field(%"PRIu64", %s, %.*s, %s%s, %s%s, %s)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, nsc, tname_prefix, tname_ns, tname, literal);
+ } else {
+ fprintf(out->fp,
+ "__%sdefine_scalar_field(%"PRIu64", %s, %.*s, %s%s, %s%s, %s)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, nsc, tname_prefix, tname_ns, tname, literal);
+ }
+ if (!out->opts->allow_scan_for_all_fields && (member->metadata_flags & fb_f_key)) {
+ fprintf(out->fp,
+ "__%sdefine_scan_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (!is_primary_key) {
+ fprintf(out->fp, "/* Note: this is not the primary key of this table. */\n");
+ }
+ fprintf(out->fp, "/* Note: find only works on vectors sorted by this field. */\n");
+ fprintf(out->fp,
+ "__%sdefine_find_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "__%sdefine_table_sort_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ }
+ if (is_primary_key) {
+ fprintf(out->fp,
+ "__%sdefine_default_find_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ fprintf(out->fp,
+ "__%sdefine_default_scan_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "#define %s_vec_sort %s_vec_sort_by_%.*s\n",
+ snt.text, snt.text, n, s);
+ }
+ }
+ current_key_processed = 1;
+ }
+ break;
+ case vt_vector_type:
+ /* They all use a namespace. */
+ tname = scalar_vector_type_name(member->type.st);
+ tname_ns = nsc;
+ fprintf(out->fp,
+ "__%sdefine_vector_field(%"PRIu64", %s, %.*s, %s%s, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, tname_ns, tname, r);
+ if (member->nest) {
+ gen_nested_root(out, &member->nest->symbol, &ct->symbol, &member->symbol);
+ }
+ break;
+ case vt_string_type:
+ fprintf(out->fp,
+ "__%sdefine_string_field(%"PRIu64", %s, %.*s, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, r);
+ if (!out->opts->allow_scan_for_all_fields && (member->metadata_flags & fb_f_key)) {
+ fprintf(out->fp,
+ "__%sdefine_scan_by_string_field(%s, %.*s)\n",
+ nsc, snt.text, n, s);
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (!is_primary_key) {
+ fprintf(out->fp, "/* Note: this is not the primary key of this table. */\n");
+ }
+ fprintf(out->fp,
+ "__%sdefine_find_by_string_field(%s, %.*s)\n",
+ nsc, snt.text, n, s);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "__%sdefine_table_sort_by_string_field(%s, %.*s)\n",
+ nsc, snt.text, n, s);
+ }
+ if (is_primary_key) {
+ fprintf(out->fp,
+ "__%sdefine_default_find_by_string_field(%s, %.*s)\n",
+ nsc, snt.text, n, s);
+ fprintf(out->fp,
+ "__%sdefine_default_scan_by_string_field(%s, %.*s)\n",
+ nsc, snt.text, n, s);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "#define %s_vec_sort %s_vec_sort_by_%.*s\n",
+ snt.text, snt.text, n, s);
+ }
+ }
+ current_key_processed = 1;
+ }
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp,
+ "__%sdefine_vector_field(%"PRIu64", %s, %.*s, %sstring_vec_t, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, nsc, r);
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ fprintf(out->fp,
+ "__%sdefine_struct_field(%"PRIu64", %s, %.*s, %s_struct_t, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, snref.text, r);
+ break;
+ case fb_is_table:
+ fprintf(out->fp,
+ "__%sdefine_table_field(%"PRIu64", %s, %.*s, %s_table_t, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, snref.text, r);
+ break;
+ case fb_is_enum:
+ print_literal(member->type.ct->type.st, &member->value, literal);
+ if (is_optional) {
+ fprintf(out->fp,
+ "__%sdefine_scalar_optional_field(%"PRIu64", %s, %.*s, %s, %s_enum_t, %s)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, snref.text, snref.text, literal);
+ } else {
+ fprintf(out->fp,
+ "__%sdefine_scalar_field(%"PRIu64", %s, %.*s, %s, %s_enum_t, %s)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, snref.text, snref.text, literal);
+ }
+ if (!out->opts->allow_scan_for_all_fields && (member->metadata_flags & fb_f_key)) {
+ fprintf(out->fp,
+ "__%sdefine_scan_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (!is_primary_key) {
+ fprintf(out->fp, "/* Note: this is not the primary key of this table. */\n");
+ }
+ fprintf(out->fp, "/* Note: find only works on vectors sorted by this field. */\n");
+ fprintf(out->fp,
+ "__%sdefine_find_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "__%sdefine_table_sort_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ }
+ if (is_primary_key) {
+ fprintf(out->fp,
+ "__%sdefine_default_find_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ fprintf(out->fp,
+ "__%sdefine_default_scan_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "#define %s_vec_sort %s_vec_sort_by_%.*s\n",
+ snt.text, snt.text, n, s);
+ }
+ }
+ current_key_processed = 1;
+ }
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "__%sdefine_union_field(%s, %"PRIu64", %s, %.*s, %s, %u)\n",
+ nsc, nsc, (uint64_t)member->id, snt.text, n, s, snref.text, r);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound type in table during code generation");
+ break;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ break;
+ case fb_is_table:
+ break;
+ case fb_is_enum:
+ break;
+ case fb_is_union:
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected vector compound type in table during code generation");
+ break;
+ }
+ if (member->type.ct->symbol.kind == fb_is_union) {
+ fprintf(out->fp,
+ "__%sdefine_union_vector_field(%s, %"PRIu64", %s, %.*s, %s, %u)\n",
+ nsc, nsc, (uint64_t)member->id, snt.text, n, s, snref.text, r);
+ } else {
+ fprintf(out->fp,
+ "__%sdefine_vector_field(%"PRIu64", %s, %.*s, %s_vec_t, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, snref.text, r);
+ }
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table member type during code generation");
+ break;
+ }
+ if ((member->metadata_flags & fb_f_key) && !current_key_processed) {
+ fprintf(out->fp,
+ "/* Note: field has key, but there is no support for find by fields of this type. */\n");
+ /*
+ * If the first key already exists, but was for an unsupported
+ * type, we do not map the next possible key to generic find.
+ */
+ }
+ }
+}
+
+int fb_gen_c_reader(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_compound_type_t *ct;
+
+ gen_pretext(out);
+
+ for (ct = out->S->ordered_structs; ct; ct = ct->order) {
+ gen_forward_decl(out, ct);
+ }
+ fprintf(out->fp, "\n");
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_forward_decl(out, (fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ /* Must be placed early due to nested buffer circular references. */
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ /* Fall through. */
+ case fb_is_table:
+ print_type_identifier(out, (fb_compound_type_t *)sym);
+ print_file_extension(out, (fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ fprintf(out->fp, "\n");
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ /* Enums must come before structs in case they are referenced. */
+ case fb_is_enum:
+ gen_enum(out, (fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ fprintf(out->fp, "\n");
+ /* Generate structs in topologically sorted order. */
+ for (ct = out->S->ordered_structs; ct; ct = ct->order) {
+ gen_struct(out, ct);
+ }
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_enum:
+ case fb_is_struct:
+ /* Already generated. */
+ break;
+ case fb_is_union:
+ gen_enum(out, (fb_compound_type_t *)sym);
+ break;
+ case fb_is_table:
+ gen_table(out, (fb_compound_type_t *)sym);
+ break;
+ case fb_is_rpc_service:
+ /* Ignore. */
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected schema component");
+ break;
+ }
+ }
+ fprintf(out->fp, "\n");
+
+ if (out->opts->cgen_sort) {
+ fb_gen_c_sorter(out);
+ }
+
+ gen_footer(out);
+ return 0;
+}
diff --git a/src/compiler/codegen_c_sort.c b/src/compiler/codegen_c_sort.c
new file mode 100644
index 0000000..4319f96
--- /dev/null
+++ b/src/compiler/codegen_c_sort.c
@@ -0,0 +1,171 @@
+#include "codegen_c_sort.h"
+
+/*
+ * We choose heapsort because it is about as fast as quicksort, avoids
+ * recursion, the code is compact which makes it practical to specialize for
+ * different vector types, it can sort the flatbuffer arrays in-place,
+ * and it has only a few places with comparisons. Furthermore, heapsort
+ * has worst case (n log n) upperbound where quicksort has O(n^2) which
+ * is an attack vector, and could be a problem with large datasets
+ * The sort is not stable.
+ *
+ * Some arguments are similar to those of the __%sfind_by_field macro.
+ *
+ * NS: The namespace
+ * N: the name of the vector type
+ * X: the name suffix when there are multiple sorts for same vector type.
+ * E: Element accessor (elem = E(vector, index)).
+ * L: Vector length.
+ * A: Field accessor (or the identity function), result must match the diff function D.
+ * TK: The scalar, enum or string key type, (either the element, or a field of the element).
+ * TE: The raw element type - uoffset_t for tables and strings.
+ * for swap.
+ * D: The diff function, but unlike __find_by_field, the second
+ * argument is returned by A, not a search key, and there is no third argument.
+ * S: Swap operation - must handle offset change when offset elements are moved.
+ */
+
+int gen_sort(fb_output_t *out)
+{
+ fprintf(out->fp,
+ "#define __%sheap_sort(N, X, A, E, L, TK, TE, D, S)\\\n"
+ "static inline void __ ## N ## X ## __heap_sift_down(\\\n"
+ " N ## _mutable_vec_t vec__tmp, size_t start__tmp, size_t end__tmp)\\\n"
+ "{ size_t child__tmp, root__tmp; TK v1__tmp, v2__tmp, vroot__tmp;\\\n"
+ " root__tmp = start__tmp;\\\n"
+ " while ((root__tmp << 1) <= end__tmp) {\\\n"
+ " child__tmp = root__tmp << 1;\\\n"
+ " if (child__tmp < end__tmp) {\\\n"
+ " v1__tmp = A(E(vec__tmp, child__tmp));\\\n"
+ " v2__tmp = A(E(vec__tmp, child__tmp + 1));\\\n"
+ " if (D(v1__tmp, v2__tmp) < 0) {\\\n"
+ " child__tmp++;\\\n"
+ " }\\\n"
+ " }\\\n"
+ " vroot__tmp = A(E(vec__tmp, root__tmp));\\\n"
+ " v1__tmp = A(E(vec__tmp, child__tmp));\\\n"
+ " if (D(vroot__tmp, v1__tmp) < 0) {\\\n"
+ " S(vec__tmp, root__tmp, child__tmp, TE);\\\n"
+ " root__tmp = child__tmp;\\\n"
+ " } else {\\\n"
+ " return;\\\n"
+ " }\\\n"
+ " }\\\n"
+ "}\\\n"
+ "static inline void __ ## N ## X ## __heap_sort(N ## _mutable_vec_t vec__tmp)\\\n"
+ "{ size_t start__tmp, end__tmp, size__tmp;\\\n"
+ " size__tmp = L(vec__tmp); if (size__tmp == 0) return; end__tmp = size__tmp - 1; start__tmp = size__tmp >> 1;\\\n"
+ " do { __ ## N ## X ## __heap_sift_down(vec__tmp, start__tmp, end__tmp); } while (start__tmp--);\\\n"
+ " while (end__tmp > 0) { \\\n"
+ " S(vec__tmp, 0, end__tmp, TE);\\\n"
+ " __ ## N ## X ## __heap_sift_down(vec__tmp, 0, --end__tmp); } }\n",
+ out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_sort_by_field(N, NK, TK, TE, D, S)\\\n"
+ " __%sheap_sort(N, _sort_by_ ## NK, N ## _ ## NK ## _get, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\\\n"
+ "static inline void N ## _vec_sort_by_ ## NK(N ## _mutable_vec_t vec__tmp)\\\n"
+ "{ __ ## N ## _sort_by_ ## NK ## __heap_sort(vec__tmp); }\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_sort(N, TK, TE, D, S)\\\n"
+ "__%sheap_sort(N, , __%sidentity, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\\\n"
+ "static inline void N ## _vec_sort(N ## _mutable_vec_t vec__tmp) { __ ## N ## __heap_sort(vec__tmp); }\n",
+ out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ /* Subtractions doesn't work for unsigned types. */
+ "#define __%sscalar_diff(x, y) ((x) < (y) ? -1 : (x) > (y))\n"
+ "#define __%sstring_diff(x, y) __%sstring_n_cmp((x), (const char *)(y), %sstring_len(y))\n",
+ out->nsc, out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%svalue_swap(vec, a, b, TE) { TE x__tmp = vec[b]; vec[b] = vec[a]; vec[a] = x__tmp; }\n"
+ "#define __%suoffset_swap(vec, a, b, TE)\\\n"
+ "{ TE ta__tmp, tb__tmp, d__tmp;\\\n"
+ " d__tmp = (TE)((a - b) * sizeof(vec[0]));\\\n"
+ " ta__tmp = __flatbuffers_uoffset_read_from_pe(vec + b) - d__tmp;\\\n"
+ " tb__tmp = __flatbuffers_uoffset_read_from_pe(vec + a) + d__tmp;\\\n"
+ " __flatbuffers_uoffset_write_to_pe(vec + a, ta__tmp);\\\n"
+ " __flatbuffers_uoffset_write_to_pe(vec + b, tb__tmp); }\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sscalar_swap(vec, a, b, TE) __%svalue_swap(vec, a, b, TE)\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sstring_swap(vec, a, b, TE) __%suoffset_swap(vec, a, b, TE)\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sstruct_swap(vec, a, b, TE) __%svalue_swap(vec, a, b, TE)\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%stable_swap(vec, a, b, TE) __%suoffset_swap(vec, a, b, TE)\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_struct_sort_by_scalar_field(N, NK, TK, TE)\\\n"
+ " __%sdefine_sort_by_field(N, NK, TK, TE, __%sscalar_diff, __%sstruct_swap)\n",
+ out->nsc, out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_table_sort_by_scalar_field(N, NK, TK)\\\n"
+ " __%sdefine_sort_by_field(N, NK, TK, %suoffset_t, __%sscalar_diff, __%stable_swap)\n",
+ out->nsc, out->nsc, out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_table_sort_by_string_field(N, NK)\\\n"
+ " __%sdefine_sort_by_field(N, NK, %sstring_t, %suoffset_t, __%sstring_diff, __%stable_swap)\n",
+ out->nsc, out->nsc, out->nsc, out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_sort(N, T) __%sdefine_sort(N, T, T, __%sscalar_diff, __%sscalar_swap)\n",
+ out->nsc, out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_string_sort() __%sdefine_sort(%sstring, %sstring_t, %suoffset_t, __%sstring_diff, __%sstring_swap)\n",
+ out->nsc, out->nsc, out->nsc, out->nsc, out->nsc, out->nsc, out->nsc);
+ return 0;
+}
+
+/* reference implementation */
+#if 0
+
+/* from github swenson/sort */
+/* heap sort: based on wikipedia */
+static __inline void HEAP_SIFT_DOWN(SORT_TYPE *dst, const int64_t start, const int64_t end) {
+ int64_t root = start;
+
+ while ((root << 1) <= end) {
+ int64_t child = root << 1;
+
+ if ((child < end) && (SORT_CMP(dst[child], dst[child + 1]) < 0)) {
+ child++;
+ }
+
+ if (SORT_CMP(dst[root], dst[child]) < 0) {
+ SORT_SWAP(dst[root], dst[child]);
+ root = child;
+ } else {
+ return;
+ }
+ }
+}
+
+static __inline void HEAPIFY(SORT_TYPE *dst, const size_t size) {
+ int64_t start = size >> 1;
+
+ while (start >= 0) {
+ HEAP_SIFT_DOWN(dst, start, size - 1);
+ start--;
+ }
+}
+
+void HEAP_SORT(SORT_TYPE *dst, const size_t size) {
+ /* don't bother sorting an array of size 0 */
+ if (size == 0) {
+ return;
+ }
+
+ int64_t end = size - 1;
+ HEAPIFY(dst, size);
+
+ while (end > 0) {
+ SORT_SWAP(dst[end], dst[0]);
+ HEAP_SIFT_DOWN(dst, 0, end - 1);
+ end--;
+ }
+}
+
+#endif
diff --git a/src/compiler/codegen_c_sort.h b/src/compiler/codegen_c_sort.h
new file mode 100644
index 0000000..27f79c5
--- /dev/null
+++ b/src/compiler/codegen_c_sort.h
@@ -0,0 +1,9 @@
+#ifndef CODEGEN_SORT_C_H
+#define CODEGEN_SORT_C_H
+
+#include "codegen_c.h"
+
+int __flatcc_gen_sort(fb_output_t *out);
+#define gen_sort __flatcc_gen_sort
+
+#endif /* CODEGEN_SORT_C_H */
diff --git a/src/compiler/codegen_c_sorter.c b/src/compiler/codegen_c_sorter.c
new file mode 100644
index 0000000..3c40b1a
--- /dev/null
+++ b/src/compiler/codegen_c_sorter.c
@@ -0,0 +1,355 @@
+#include "codegen_c.h"
+
+#include "flatcc/flatcc_types.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+/* Used internally to identify sortable objects. */
+enum {
+ /* object contains at least one direct vector that needs sorting */
+ direct_sortable = 1,
+ /* object contains at least one indirect vector that needs sorting */
+ indirect_sortable = 1,
+ /* object contains at least one direct or indirect vector that needs sorting */
+ sortable = 3,
+};
+
+static int gen_union_sorter(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ int n;
+ const char *s;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static void %s_sort(%s_mutable_union_t u)\n{\n switch (u.type) {\n",
+ snt.text, snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ if (member->type.ct->export_index & sortable) {
+ fprintf(out->fp,
+ " case %s_%.*s: %s_sort(u.value); break;\n",
+ snt.text, n, s, snref.text);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ fprintf(out->fp,
+ " default: break;\n }\n}\n\n");
+ return 0;
+}
+
+static int gen_table_sorter(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ const char *tname_prefix;
+ const char *nsc = out->nsc;
+ const char *s;
+ int n;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static void %s_sort(%s_mutable_table_t t)\n{\n",
+ snt.text, snt.text);
+
+ fprintf(out->fp, " if (!t) return;\n");
+ /* sort all children before sorting current table */
+ if (ct->export_index & indirect_sortable)
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ symbol_name(sym, &n, &s);
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ if (!(member->type.ct->export_index & sortable)) continue;
+ fprintf(out->fp,
+ " __%ssort_table_field(%s, %.*s, %s, t);\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ case fb_is_union:
+ if (!(member->type.ct->export_index & sortable)) continue;
+ fprintf(out->fp,
+ " __%ssort_union_field(%s, %.*s, %s, t);\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ default:
+ continue;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ if (!(member->type.ct->export_index & sortable)) continue;
+ fprintf(out->fp,
+ " __%ssort_table_vector_field_elements(%s, %.*s, %s, t);\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ case fb_is_union:
+ /* Although union vectors cannot be sorted, their content can be. */
+ if (!(member->type.ct->export_index & sortable)) continue;
+ fprintf(out->fp,
+ " __%ssort_union_vector_field_elements(%s, %.*s, %s, t);\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ default:
+ continue;
+ }
+ break;
+ }
+ }
+ if (ct->export_index & direct_sortable)
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+ if (!(member->metadata_flags & fb_f_sorted)) continue;
+ switch (member->type.type) {
+ case vt_vector_type:
+ tname_prefix = scalar_type_prefix(member->type.st);
+ fprintf(out->fp,
+ " __%ssort_vector_field(%s, %.*s, %s%s, t)\n",
+ nsc, snt.text, n, s, nsc, tname_prefix);
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp,
+ " __%ssort_vector_field(%s, %.*s, %s%s, t)\n",
+ nsc, snt.text, n, s, nsc, "string");
+ break;
+ case vt_vector_compound_type_ref:
+ if (!member->type.ct->primary_key) {
+ gen_panic(out, "internal error: unexpected type during code generation");
+ return -1;
+ }
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ case fb_is_struct:
+ fprintf(out->fp,
+ " __%ssort_vector_field(%s, %.*s, %s, t)\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ /* Union vectors cannot be sorted. */
+ default:
+ break;
+ }
+ break;
+ }
+ }
+ fprintf(out->fp, "}\n\n");
+ return 0;
+}
+
+static int gen_table_sorter_prototypes(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_scoped_name_t snt;
+ fb_compound_type_t *ct;
+
+ fb_clear(snt);
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ ct = (fb_compound_type_t *)sym;
+ if (ct->export_index & sortable) {
+ fb_compound_name(ct, &snt);
+ fprintf(out->fp,
+ "static void %s_sort(%s_mutable_table_t t);\n",
+ snt.text, snt.text);
+ }
+ }
+ }
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int gen_union_sorters(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_compound_type_t *ct;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ ct = (fb_compound_type_t *)sym;
+ if (ct->export_index & sortable) {
+ gen_union_sorter(out, ct);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int gen_table_sorters(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_compound_type_t *ct;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ ct = (fb_compound_type_t *)sym;
+ if (ct->export_index & sortable) {
+ gen_table_sorter(out, ct);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Return 1 if the table or union is known to be sortable,
+ * and 0 if that information is not available.
+ *
+ * Note that if neither a table nor its direct children have
+ * sortable vectors, the table might still be sortable via a
+ * union member or via deeper nested tables. By iterating
+ * repeatedly over all objects, the indirect_sortable
+ * property eventually propagetes to all affected objects.
+ * At that point no object will change its return value
+ * on repeated calls.
+ */
+static int mark_member_sortable(fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ if (member->metadata_flags & fb_f_sorted) {
+ ct->export_index |= direct_sortable;
+ }
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ case fb_is_union:
+ break;
+ default:
+ continue;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ case fb_is_union:
+ break;
+ default:
+ continue;
+ }
+ break;
+ default:
+ continue;
+ }
+ if (member->type.ct->export_index & (sortable | indirect_sortable)) {
+ ct->export_index |= indirect_sortable;
+ }
+ }
+ return !!(ct->export_index & sortable);
+}
+
+static void init_sortable(fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ case vt_vector_compound_type_ref:
+ member->type.ct->export_index = 0;
+ break;
+ default:
+ continue;
+ }
+ }
+ ct->export_index = 0;
+}
+
+/*
+ * Use fix-point iteration to implement a breadth first
+ * search for tables and unions that can be sorted. The
+ * problem is slightly tricky due to self-referential types:
+ * a graph colored depth-first search might terminate before
+ * it is known whether any non-direct descendants are
+ * sortable.
+ */
+static int mark_sortable(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ int old_count = -1, count = 0;
+
+ /* Initialize state kept in the custom export_index symbol table field. */
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ case fb_is_union:
+ init_sortable((fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ /* Perform fix-point iteration search. */
+ while (old_count != count) {
+ old_count = count;
+ count = 0;
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ case fb_is_union:
+ count += mark_member_sortable((fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+/* To be generated towards the end of _reader.h when sort option is active. */
+int fb_gen_c_sorter(fb_output_t *out)
+{
+ mark_sortable(out);
+ gen_table_sorter_prototypes(out);
+ gen_union_sorters(out);
+ gen_table_sorters(out);
+ return 0;
+}
diff --git a/src/compiler/codegen_c_verifier.c b/src/compiler/codegen_c_verifier.c
new file mode 100644
index 0000000..9b1a048
--- /dev/null
+++ b/src/compiler/codegen_c_verifier.c
@@ -0,0 +1,327 @@
+#include "codegen_c.h"
+
+#include "flatcc/flatcc_types.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+static int gen_verifier_pretext(fb_output_t *out)
+{
+ fprintf(out->fp,
+ "#ifndef %s_VERIFIER_H\n"
+ "#define %s_VERIFIER_H\n",
+ out->S->basenameup, out->S->basenameup);
+
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ /* Needed to get the file identifiers */
+ fprintf(out->fp, "#ifndef %s_READER_H\n", out->S->basenameup);
+ fprintf(out->fp, "#include \"%s_reader.h\"\n", out->S->basename);
+ fprintf(out->fp, "#endif\n");
+ fprintf(out->fp, "#include \"flatcc/flatcc_verifier.h\"\n");
+ fb_gen_c_includes(out, "_verifier.h", "_VERIFIER_H");
+ gen_prologue(out);
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int gen_verifier_footer(fb_output_t *out)
+{
+ gen_epilogue(out);
+ fprintf(out->fp,
+ "#endif /* %s_VERIFIER_H */\n",
+ out->S->basenameup);
+ return 0;
+}
+
+static int gen_union_verifier(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ int n;
+ const char *s;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static int %s_union_verifier(flatcc_union_verifier_descriptor_t *ud)\n{\n switch (ud->type) {\n",
+ snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+ switch (member->type.type) {
+ case vt_missing:
+ /* NONE is of type vt_missing and already handled. */
+ continue;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ " case %u: return flatcc_verify_union_table(ud, %s_verify_table); /* %.*s */\n",
+ (unsigned)member->value.u, snref.text, n, s);
+ continue;
+ case fb_is_struct:
+ fprintf(out->fp,
+ " case %u: return flatcc_verify_union_struct(ud, %"PRIu64", %"PRIu16"); /* %.*s */\n",
+ (unsigned)member->value.u, member->type.ct->size, member->type.ct->align, n, s);
+ continue;
+ default:
+ gen_panic(out, "internal error: unexpected compound type for union verifier");
+ return -1;
+ }
+ case vt_string_type:
+ fprintf(out->fp,
+ " case %u: return flatcc_verify_union_string(ud); /* %.*s */\n",
+ (unsigned)member->value.u, n, s);
+ continue;
+ default:
+ gen_panic(out, "internal error: unexpected type for union verifier");
+ return -1;
+ }
+ }
+ fprintf(out->fp,
+ " default: return flatcc_verify_ok;\n }\n}\n\n");
+ return 0;
+}
+
+static int gen_table_verifier(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ int required, first = 1;
+ const char *nsc = out->nsc;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static int %s_verify_table(flatcc_table_verifier_descriptor_t *td)\n{\n",
+ snt.text);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+
+ if (first) {
+ fprintf(out->fp, " int ret;\n if ((ret = ");
+ } else {
+ fprintf(out->fp, ")) return ret;\n if ((ret = ");
+ }
+ first = 0;
+ required = (member->metadata_flags & fb_f_required) != 0;
+ switch (member->type.type) {
+ case vt_scalar_type:
+ fprintf(
+ out->fp,
+ "flatcc_verify_field(td, %"PRIu64", %"PRIu64", %"PRIu16")",
+ member->id, member->size, member->align);
+ break;
+ case vt_vector_type:
+ if (member->nest) {
+ fb_compound_name((fb_compound_type_t *)&member->nest->symbol, &snref);
+ if (member->nest->symbol.kind == fb_is_table) {
+ fprintf(out->fp,
+ "flatcc_verify_table_as_nested_root(td, %"PRIu64", "
+ "%u, 0, %"PRIu16", %s_verify_table)",
+ member->id, required, member->align, snref.text);
+ } else {
+ fprintf(out->fp,
+ "flatcc_verify_struct_as_nested_root(td, %"PRIu64", "
+ "%u, 0, %"PRIu64", %"PRIu16")",
+ member->id, required, member->size, member->align);
+ }
+ } else {
+ fprintf(out->fp,
+ "flatcc_verify_vector_field(td, %"PRIu64", %d, %"PRIu64", %"PRIu16", INT64_C(%"PRIu64"))",
+ member->id, required, member->size, member->align, (uint64_t)FLATBUFFERS_COUNT_MAX(member->size));
+ };
+ break;
+ case vt_string_type:
+ fprintf(out->fp,
+ "flatcc_verify_string_field(td, %"PRIu64", %d)",
+ member->id, required);
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp,
+ "flatcc_verify_string_vector_field(td, %"PRIu64", %d)",
+ member->id, required);
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+ case fb_is_struct:
+ fprintf(out->fp,
+ "flatcc_verify_field(td, %"PRIu64", %"PRIu64", %"PRIu16")",
+ member->id, member->size, member->align);
+ break;
+ case fb_is_table:
+ fprintf(out->fp,
+ "flatcc_verify_table_field(td, %"PRIu64", %d, &%s_verify_table)",
+ member->id, required, snref.text);
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "flatcc_verify_union_field(td, %"PRIu64", %d, &%s_union_verifier)",
+ member->id, required, snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound type for table verifier");
+ return -1;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ "flatcc_verify_table_vector_field(td, %"PRIu64", %d, &%s_verify_table)",
+ member->id, required, snref.text);
+ break;
+ case fb_is_enum:
+ case fb_is_struct:
+ fprintf(out->fp,
+ "flatcc_verify_vector_field(td, %"PRIu64", %d, %"PRIu64", %"PRIu16", INT64_C(%"PRIu64"))",
+ member->id, required, member->size, member->align, (uint64_t)FLATBUFFERS_COUNT_MAX(member->size));
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "flatcc_verify_union_vector_field(td, %"PRIu64", %d, &%s_union_verifier)",
+ member->id, required, snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected vector compound type for table verifier");
+ return -1;
+ }
+ break;
+ }
+ fprintf(out->fp, " /* %.*s */", (int)sym->ident->len, sym->ident->text);
+ }
+ if (!first) {
+ fprintf(out->fp, ")) return ret;\n");
+ }
+ fprintf(out->fp, " return flatcc_verify_ok;\n");
+ fprintf(out->fp, "}\n\n");
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root(const void *buf, size_t bufsiz)\n"
+ "{\n return flatcc_verify_table_as_root(buf, bufsiz, %s_identifier, &%s_verify_table);\n}\n\n",
+ snt.text, snt.text, snt.text);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_typed_root(const void *buf, size_t bufsiz)\n"
+ "{\n return flatcc_verify_table_as_root(buf, bufsiz, %s_type_identifier, &%s_verify_table);\n}\n\n",
+ snt.text, snt.text, snt.text);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)\n"
+ "{\n return flatcc_verify_table_as_root(buf, bufsiz, fid, &%s_verify_table);\n}\n\n",
+ snt.text, snt.text);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, %sthash_t thash)\n"
+ "{\n return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &%s_verify_table);\n}\n\n",
+ snt.text, nsc, snt.text);
+ return 0;
+}
+
+static int gen_struct_verifier(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root(const void *buf, size_t bufsiz)\n"
+ "{\n return flatcc_verify_struct_as_root(buf, bufsiz, %s_identifier, %"PRIu64", %"PRIu16");\n}\n\n",
+ snt.text, snt.text, ct->size, ct->align);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_typed_root(const void *buf, size_t bufsiz)\n"
+ "{\n return flatcc_verify_struct_as_typed_root(buf, bufsiz, %s_type_hash, %"PRIu64", %"PRIu16");\n}\n\n",
+ snt.text, snt.text, ct->size, ct->align);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, %sthash_t thash)\n"
+ "{\n return flatcc_verify_struct_as_typed_root(buf, bufsiz, thash, %"PRIu64", %"PRIu16");\n}\n\n",
+ snt.text, out->nsc, ct->size, ct->align);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)\n"
+ "{\n return flatcc_verify_struct_as_root(buf, bufsiz, fid, %"PRIu64", %"PRIu16");\n}\n\n",
+ snt.text, ct->size, ct->align);
+ return 0;
+}
+
+static int gen_verifier_prototypes(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "static int %s_verify_table(flatcc_table_verifier_descriptor_t *td);\n",
+ snt.text);
+ }
+ }
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int gen_union_verifiers(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ gen_union_verifier(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+static int gen_struct_verifiers(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ gen_struct_verifier(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+static int gen_table_verifiers(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_table_verifier(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+int fb_gen_c_verifier(fb_output_t *out)
+{
+ gen_verifier_pretext(out);
+ gen_verifier_prototypes(out);
+ gen_union_verifiers(out);
+ gen_struct_verifiers(out);
+ gen_table_verifiers(out);
+ gen_verifier_footer(out);
+ return 0;
+}
diff --git a/src/compiler/codegen_schema.c b/src/compiler/codegen_schema.c
new file mode 100644
index 0000000..d0c9fde
--- /dev/null
+++ b/src/compiler/codegen_schema.c
@@ -0,0 +1,581 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include "flatcc/reflection/reflection_builder.h"
+#include "symbols.h"
+#include "parser.h"
+#include "codegen.h"
+#include "fileio.h"
+/* Needed to store length prefix. */
+#include "catalog.h"
+
+#define BaseType(x) FLATBUFFERS_WRAP_NAMESPACE(reflection_BaseType, x)
+
+static flatbuffers_bool_t is_optional_type(fb_value_t type, int optional, int required)
+{
+ if (required) return 0;
+ if (optional) return 1;
+ if (type.type == vt_scalar_type) return 0;
+ if (type.type == vt_compound_type_ref && type.ct->symbol.kind == fb_is_enum) return 0;
+ return 1;
+}
+
+static reflection_Type_ref_t export_type(flatcc_builder_t *B, fb_value_t type)
+{
+ fb_scalar_type_t st = fb_missing_type;
+ int32_t index = -1;
+ reflection_BaseType_enum_t base_type = BaseType(None);
+ reflection_BaseType_enum_t element = BaseType(None);
+ reflection_BaseType_enum_t primitive = BaseType(None);
+ uint16_t fixed_length = 0;
+
+ switch (type.type) {
+ case vt_scalar_type:
+ st = type.st;
+ break;
+ case vt_vector_type:
+ st = type.st;
+ base_type = BaseType(Vector);
+ break;
+ case vt_vector_string_type:
+ element = BaseType(String);
+ base_type = BaseType(Vector);
+ break;
+ case vt_vector_compound_type_ref:
+ index = (int32_t)type.ct->export_index;
+ switch (type.ct->symbol.kind) {
+ case fb_is_enum:
+ st = type.ct->type.st;
+ base_type = BaseType(Vector);
+ break;
+ case fb_is_struct:
+ case fb_is_table:
+ base_type = BaseType(Vector);
+ element = BaseType(Obj);
+ break;
+ case fb_is_union:
+ base_type = BaseType(Vector);
+ element = BaseType(Union);
+ break;
+ default:
+ break;
+ }
+ break;
+ case vt_string_type:
+ base_type = BaseType(String);
+ break;
+ case vt_compound_type_ref:
+ index = (int32_t)type.ct->export_index;
+ switch (type.ct->symbol.kind) {
+ case fb_is_enum:
+ st = type.ct->type.st;
+ break;
+ case fb_is_struct:
+ case fb_is_table:
+ base_type = BaseType(Obj);
+ break;
+ case fb_is_union:
+ base_type = BaseType(Union);
+ break;
+ default:
+ index = -1;
+ break;
+ }
+ break;
+ case vt_fixed_array_type:
+ st = type.st;
+ base_type = BaseType(Array);
+ fixed_length = (uint16_t)type.len;
+ break;
+ case vt_fixed_array_string_type:
+ break;
+ element = BaseType(Byte);
+ base_type = BaseType(Array);
+ fixed_length = (uint16_t)type.len;
+ break;
+ case vt_fixed_array_compound_type_ref:
+ index = (int32_t)type.ct->export_index;
+ switch (type.ct->symbol.kind) {
+ case fb_is_enum:
+ st = type.ct->type.st;
+ break;
+ case fb_is_struct:
+ case fb_is_table:
+ element = BaseType(Obj);
+ break;
+ case fb_is_union:
+ element = BaseType(Union);
+ break;
+ default:
+ break;
+ }
+ base_type = BaseType(Array);
+ fixed_length = (uint16_t)type.len;
+ break;
+ default:
+ break;
+ }
+ /* If st is set, resolve scalar type and set it to base_type or element. */
+ switch (st) {
+ case fb_missing_type: break;
+ case fb_ulong: primitive = BaseType(ULong); break;
+ case fb_uint: primitive = BaseType(UInt); break;
+ case fb_ushort: primitive = BaseType(UShort); break;
+ case fb_ubyte: primitive = BaseType(UByte); break;
+ case fb_bool: primitive = BaseType(Bool); break;
+ case fb_long: primitive = BaseType(Long); break;
+ case fb_int: primitive = BaseType(Int); break;
+ case fb_short: primitive = BaseType(Short); break;
+ case fb_byte: primitive = BaseType(Byte); break;
+ case fb_double: primitive = BaseType(Double); break;
+ case fb_float: primitive = BaseType(Float); break;
+ /* TODO: Googles flatc tool does not have char arrays so we use Byte as element type */
+ case fb_char: primitive = BaseType(Byte); break;
+ default: break;
+ }
+
+ if (base_type == BaseType(None)) {
+ base_type = primitive;
+ } else if (base_type == BaseType(Vector) || base_type == BaseType(Array)) {
+ if (element == BaseType(None)) {
+ element = primitive;
+ }
+ }
+ return reflection_Type_create(B, base_type, element, index, fixed_length);
+}
+
+static void export_attributes(flatcc_builder_t *B, fb_metadata_t *m)
+{
+ for (; m; m = m->link) {
+ reflection_KeyValue_vec_push_start(B);
+ reflection_KeyValue_key_create_strn(B, m->ident->text, (size_t)m->ident->len);
+ if (m->value.type == vt_string) {
+ reflection_KeyValue_value_create_strn(B, m->value.s.s, (size_t)m->value.s.len);
+ }
+ reflection_KeyValue_vec_push_end(B);
+ }
+}
+
+static void export_fields(flatcc_builder_t *B, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ flatbuffers_bool_t has_key, deprecated, required, optional, key_processed = 0;
+ int64_t default_integer;
+ double default_real;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ /*
+ * Unlike `flatc` we allow multiple keys in the parser, but
+ * there is no way to tell which key is default in the
+ * reflection schema because the fields are sorted, so we only
+ * export the default (first) key.
+ */
+ has_key = !key_processed && (member->metadata_flags & fb_f_key) != 0;
+ required = (member->metadata_flags & fb_f_required) != 0;
+ default_integer = 0;
+ default_real = 0.0;
+ deprecated = (member->metadata_flags & fb_f_deprecated) != 0;
+ /*
+ * Flag is only set when `= null` is used in the schema, but
+ * non-scalar types are optional by default and therfore also
+ * true in the binary schema.
+ */
+ optional = is_optional_type(member->type, !!(member->flags & fb_fm_optional), required);
+
+ if ((member->type.type == vt_compound_type_ref || member->type.type == vt_vector_compound_type_ref)
+ && member->type.ct->symbol.kind == fb_is_union) {
+ reflection_Field_vec_push_start(B);
+ reflection_Field_name_start(B);
+ reflection_Field_name_append(B, member->symbol.ident->text, (size_t)member->symbol.ident->len);
+ reflection_Field_name_append(B, "_type", 5);
+ reflection_Field_name_end(B);
+ switch(member->type.type) {
+ case vt_compound_type_ref:
+ reflection_Field_type_create(B, BaseType(UType), BaseType(None), -1, 0);
+ break;
+ case vt_vector_compound_type_ref:
+ reflection_Field_type_create(B, BaseType(Vector), BaseType(UType), -1, 0);
+ break;
+ }
+ reflection_Field_offset_add(B, (uint16_t)(member->id - 1 + 2) * sizeof(flatbuffers_voffset_t));
+ reflection_Field_id_add(B, (uint16_t)(member->id - 1));
+ reflection_Field_deprecated_add(B, deprecated);
+ reflection_Field_vec_push_end(B);
+ }
+ reflection_Field_vec_push_start(B);
+ reflection_Field_name_create(B, member->symbol.ident->text, (size_t)member->symbol.ident->len);
+ reflection_Field_type_add(B, export_type(B, member->type));
+ switch (ct->symbol.kind) {
+ case fb_is_table:
+ switch (member->value.type) {
+ case vt_uint:
+ default_integer = (int64_t)member->value.u;
+ break;
+ case vt_int:
+ default_integer = (int64_t)member->value.i;
+ break;
+ case vt_bool:
+ default_integer = (int64_t)member->value.b;
+ break;
+ case vt_float:
+ default_real = member->value.f;
+ break;
+ }
+ reflection_Field_default_integer_add(B, default_integer);
+ reflection_Field_default_real_add(B, default_real);
+ reflection_Field_id_add(B, (uint16_t)member->id);
+ reflection_Field_offset_add(B, (uint16_t)(member->id + 2) * sizeof(flatbuffers_voffset_t));
+ reflection_Field_key_add(B, has_key);
+ reflection_Field_required_add(B, required);
+ reflection_Field_optional_add(B, optional);
+ break;
+ case fb_is_struct:
+ reflection_Field_offset_add(B, (uint16_t)member->offset);
+ break;
+ default: break;
+ }
+ /* Deprecated struct fields not supported by `flatc` but is here as an option. */
+ reflection_Field_deprecated_add(B, deprecated);
+ if (member->metadata) {
+ reflection_Field_attributes_start(B);
+ export_attributes(B, member->metadata);
+ reflection_Field_attributes_end(B);
+ }
+ reflection_Field_vec_push_end(B);
+ key_processed |= has_key;
+ }
+}
+
+/* `vec` is filled with references to the constructed objects. */
+static void export_objects(flatcc_builder_t *B, object_entry_t *objects, int nobjects,
+ reflection_Object_ref_t *object_map)
+{
+ int i, is_struct;
+ fb_compound_type_t *ct;
+
+ for (i = 0; i < nobjects; ++i) {
+ ct = objects[i].ct;
+ reflection_Object_start(B);
+ reflection_Object_name_create_str(B, objects[i].name);
+ /*
+ * We can post sort-fields because the index is not used, unlike
+ * objects and enums.
+ */
+ reflection_Object_fields_start(B);
+ export_fields(B, ct);
+ reflection_Object_fields_end(B);
+ is_struct = ct->symbol.kind == fb_is_struct;
+ if (is_struct) {
+ reflection_Object_bytesize_add(B, (int32_t)ct->size);
+ }
+ reflection_Object_is_struct_add(B, (flatbuffers_bool_t)is_struct);
+ reflection_Object_minalign_add(B, ct->align);
+ if (ct->metadata) {
+ reflection_Object_attributes_start(B);
+ export_attributes(B, ct->metadata);
+ reflection_Object_attributes_end(B);
+ }
+ object_map[i] = reflection_Object_end(B);
+ }
+ reflection_Schema_objects_create(B, object_map, (size_t)nobjects);
+}
+
+static void export_enumval(flatcc_builder_t *B, fb_member_t *member, reflection_Object_ref_t *object_map)
+{
+ int is_union = object_map != 0;
+
+ reflection_EnumVal_vec_push_start(B);
+ reflection_EnumVal_name_create(B, member->symbol.ident->text, (size_t)member->symbol.ident->len);
+ if (is_union) {
+ if (member->type.type == vt_compound_type_ref) {
+ /* object is deprecated in favor of union_type to support mixed union types. */
+ reflection_EnumVal_object_add(B, object_map[member->type.ct->export_index]);
+ }
+ reflection_EnumVal_union_type_add(B, export_type(B, member->type));
+ }
+ reflection_EnumVal_value_add(B, (int64_t)member->value.u);
+ reflection_EnumVal_vec_push_end(B);
+}
+
+static void export_enums(flatcc_builder_t *B, enum_entry_t *enums, int nenums,
+ reflection_Object_ref_t *object_map)
+{
+ int i, is_union;
+ fb_compound_type_t *ct;
+ fb_symbol_t *sym;
+
+ reflection_Schema_enums_start(B);
+ for (i = 0; i < nenums; ++i) {
+ ct = enums[i].ct;
+ is_union = ct->symbol.kind == fb_is_union;
+ reflection_Enum_vec_push_start(B);
+ reflection_Enum_name_create_str(B, enums[i].name);
+ reflection_Enum_values_start(B);
+ for (sym = ct->members; sym; sym = sym->link) {
+ export_enumval(B, (fb_member_t *)sym, is_union ? object_map : 0);
+ }
+ reflection_Enum_values_end(B);
+ reflection_Enum_is_union_add(B, (flatbuffers_bool_t)is_union);
+ reflection_Enum_underlying_type_add(B, export_type(B, ct->type));
+ if (ct->metadata) {
+ reflection_Enum_attributes_start(B);
+ export_attributes(B, ct->metadata);
+ reflection_Enum_attributes_end(B);
+ }
+ reflection_Enum_vec_push_end(B);
+ }
+ reflection_Schema_enums_end(B);
+}
+
+static void export_root_type(flatcc_builder_t *B, fb_symbol_t * root_type,
+ reflection_Object_ref_t *object_map)
+{
+ fb_compound_type_t *ct;
+ if (root_type) {
+ /*
+ * We could also store a struct object here, but since the
+ * binrary schema says root_table, not root_type as in the text
+ * schema, it would be misleading.
+ */
+ if (root_type->kind == fb_is_table) {
+ ct = (fb_compound_type_t *)root_type;
+ reflection_Schema_root_table_add(B, object_map[ct->export_index]);
+ }
+ }
+}
+
+static void export_call(flatcc_builder_t *B, fb_member_t *member, reflection_Object_ref_t *object_map)
+{
+ reflection_RPCCall_vec_push_start(B);
+ reflection_RPCCall_name_create(B, member->symbol.ident->text, (size_t)member->symbol.ident->len);
+ reflection_RPCCall_request_add(B, object_map[member->req_type.ct->export_index]);
+ reflection_RPCCall_response_add(B, object_map[member->type.ct->export_index]);
+ if (member->metadata) {
+ reflection_RPCCall_attributes_start(B);
+ export_attributes(B, member->metadata);
+ reflection_RPCCall_attributes_end(B);
+ }
+ reflection_RPCCall_vec_push_end(B);
+}
+
+static void export_services(flatcc_builder_t *B, service_entry_t *services, int nservices,
+ reflection_Object_ref_t *object_map)
+{
+ int i;
+ fb_compound_type_t *ct;
+ fb_symbol_t *sym;
+
+ reflection_Schema_services_start(B);
+ for (i = 0; i < nservices; ++i) {
+ ct = services[i].ct;
+ reflection_Service_vec_push_start(B);
+ reflection_Service_name_create_str(B, services[i].name);
+ reflection_Service_calls_start(B);
+ for (sym = ct->members; sym; sym = sym->link) {
+ export_call(B, (fb_member_t *)sym, object_map);
+ }
+ reflection_Service_calls_end(B);
+ if (ct->metadata) {
+ reflection_Service_attributes_start(B);
+ export_attributes(B, ct->metadata);
+ reflection_Service_attributes_end(B);
+ }
+ reflection_Service_vec_push_end(B);
+ }
+ reflection_Schema_services_end(B);
+}
+
+static int export_schema(flatcc_builder_t *B, fb_options_t *opts, fb_schema_t *S)
+{
+ catalog_t catalog;
+ reflection_Object_ref_t *object_map = 0;
+
+ if (build_catalog(&catalog, S, opts->bgen_qualify_names, &S->root_schema->scope_index)) {
+ return -1;
+ }
+
+ if (catalog.nobjects > 0 && !(object_map = malloc((size_t)catalog.nobjects * sizeof(object_map[0])))) {
+ clear_catalog(&catalog);
+ return -1;
+ }
+
+ /* Build the schema. */
+
+ if (opts->bgen_length_prefix) {
+ reflection_Schema_start_as_root_with_size(B);
+ } else {
+ reflection_Schema_start_as_root(B);
+ }
+ if (S->file_identifier.type == vt_string) {
+ reflection_Schema_file_ident_create(B,
+ S->file_identifier.s.s, (size_t)S->file_identifier.s.len);
+ }
+ if (S->file_extension.type == vt_string) {
+ reflection_Schema_file_ext_create(B,
+ S->file_extension.s.s, (size_t)S->file_extension.s.len);
+ }
+ export_objects(B, catalog.objects, catalog.nobjects, object_map);
+ export_enums(B, catalog.enums, catalog.nenums, object_map);
+ export_root_type(B, S->root_type.type, object_map);
+ export_services(B, catalog.services, catalog.nservices, object_map);
+
+ reflection_Schema_end_as_root(B);
+
+ /* Clean up support datastructures. */
+
+ clear_catalog(&catalog);
+ if (object_map) {
+ free(object_map);
+ }
+ return 0;
+}
+
+/*
+ * We do not not sort attributes because we would loose ordering
+ * information between different attributes, and between same named
+ * attributes because the sort is not stable.
+ *
+ * The C bindings has a scan interface that can find attributes
+ * in order of appearance.
+ *
+ * Field sorting is done on the finished buffer.
+ */
+static void sort_objects(void *buffer)
+{
+ size_t i;
+ reflection_Schema_table_t schema;
+ reflection_Object_vec_t objects;
+ reflection_Object_table_t object;
+ reflection_Field_vec_t fields;
+ reflection_Field_mutable_vec_t mfields;
+
+ schema = reflection_Schema_as_root(buffer);
+ objects = reflection_Schema_objects(schema);
+ for (i = 0; i < reflection_Object_vec_len(objects); ++i) {
+ object = reflection_Object_vec_at(objects, i);
+ fields = reflection_Object_fields(object);
+ if (fields) {
+ mfields = (reflection_Field_mutable_vec_t)fields;
+ reflection_Field_vec_sort(mfields);
+ }
+ }
+}
+
+static FILE *open_file(fb_options_t *opts, fb_schema_t *S)
+{
+ FILE *fp = 0;
+ char *path = 0, *ext = 0;
+ const char *prefix = opts->outpath ? opts->outpath : "";
+ size_t len, prefix_len = strlen(prefix);
+ const char *name;
+
+ name = S->basename;
+ len = strlen(name);
+
+ ext = fb_create_path_ext(".", flatbuffers_extension);
+ /* We generally should not use cgen options here, but in this case it makes sense. */
+ if (opts->gen_stdout) {
+ return stdout;
+ }
+ checkmem((path = fb_create_join_path_n(prefix, prefix_len, name, len, ext, 1)));
+ fp = fopen(path, "wb");
+ if (!fp) {
+ fprintf(stderr, "error opening file for writing binary schema: %s\n", path);
+ }
+ free(path);
+ free(ext);
+ return fp;
+}
+
+static void close_file(FILE *fp)
+{
+ if (fp && fp != stdout) {
+ fclose(fp);
+ }
+}
+
+/*
+ * Normally enums are required to be ascending in the schema and
+ * therefore there is no need to sort enums. If not, we export them in
+ * the order defined anyway becuase there is no well-defined ordering
+ * and blindly sorting the content would just loose more information.
+ *
+ * In conclusion: find by enum value is only supported when enums are
+ * defined in consequtive order.
+ *
+ * refers to: `opts->ascending_enum`
+ *
+ * `size` must hold the maximum buffer size.
+ * Returns intput buffer if successful and updates size argument.
+ */
+void *fb_codegen_bfbs_to_buffer(fb_options_t *opts, fb_schema_t *S, void *buffer, size_t *size)
+{
+ flatcc_builder_t builder, *B;
+
+ B = &builder;
+ flatcc_builder_init(B);
+ export_schema(B, opts, S);
+ if (!flatcc_builder_copy_buffer(B, buffer, *size)) {
+ goto done;
+ }
+ sort_objects(buffer);
+done:
+ *size = flatcc_builder_get_buffer_size(B);
+ flatcc_builder_clear(B);
+ return buffer;
+}
+
+/*
+ * Like to_buffer, but returns allocated buffer.
+ * Updates size argument with buffer size if not null.
+ * Returned buffer must be deallocatd with `free`.
+ * The buffer is malloc aligned which should suffice for reflection buffers.
+ */
+void *fb_codegen_bfbs_alloc_buffer(fb_options_t *opts, fb_schema_t *S, size_t *size)
+{
+ flatcc_builder_t builder, *B;
+ void *buffer = 0;
+
+ B = &builder;
+ flatcc_builder_init(B);
+ if (export_schema(B, opts, S)) {
+ goto done;
+ }
+ if (!(buffer = flatcc_builder_finalize_buffer(B, size))) {
+ goto done;
+ }
+ sort_objects(buffer);
+done:
+ flatcc_builder_clear(B);
+ return buffer;
+}
+
+int fb_codegen_bfbs_to_file(fb_options_t *opts, fb_schema_t *S)
+{
+ void *buffer;
+ size_t size;
+ FILE *fp;
+ int ret = -1;
+
+ fp = open_file(opts, S);
+ if (!fp) {
+ return -1;
+ }
+ buffer = fb_codegen_bfbs_alloc_buffer(opts, S, &size);
+ if (!buffer) {
+ fprintf(stderr, "failed to generate binary schema\n");
+ goto done;
+ }
+ if (size != fwrite(buffer, 1, size, fp)) {
+ fprintf(stderr, "could not write binary schema to file\n");
+ goto done;
+ }
+ ret = 0;
+done:
+ if (buffer) {
+ free(buffer);
+ }
+ close_file(fp);
+ return ret;
+}
diff --git a/src/compiler/coerce.c b/src/compiler/coerce.c
new file mode 100644
index 0000000..6ab12a6
--- /dev/null
+++ b/src/compiler/coerce.c
@@ -0,0 +1,266 @@
+#include "coerce.h"
+
+/*
+ * Be aware that some value variants represents actual values (e.g.
+ * vt_int), and others represent a type (e.g. vt_scalar) which holds a
+ * type identifier token. Here we implicitly expect a vt_scalar type as
+ * first argument, but only receive the token. The second argument is a
+ * value literal. Our job is to decide if the value fits within the
+ * given type. Our internal representation already ensures that value
+ * fits within a 64bit signed or unsigned integer, or double; otherwise
+ * the parser would have set vt_invalid type on the value.
+ *
+ * If the value is invalid, success is returned because the
+ * error is presumably already generated. If the value is some other
+ * type than expect, an error is generated.
+ *
+ * Symbolic names are not allowed as values here.
+ *
+ * Converts positive integers to signed type and unsigned integers to
+ * signed type, integers to floats and floats to integers.
+ *
+ * Optionally allows 1 to be assigned as true and 0 as false, and vice
+ * versa when allow_boolean_conversion is enabled.
+ *
+ * Returns 0 on success, -1 on error.
+ */
+int fb_coerce_scalar_type(fb_parser_t *P, fb_symbol_t *sym, fb_scalar_type_t st, fb_value_t *value)
+{
+ double d;
+ float f;
+
+ if (!value->type) {
+ return 0;
+ }
+ /*
+ * The parser only produces negative vt_int values, which simplifies
+ * the logic, but to make this operation robust against multiple
+ * coercion steps, we first convert back to uint if the assumption turns
+ * out false.
+ */
+ if (value->type == vt_int && value->i >= 0) {
+ value->type = vt_uint;
+ value->u = (uint64_t)value->i;
+ }
+ if (value->type == vt_invalid) {
+ /* Silently ignore past errors. */
+ return 0;
+ }
+ if (value->type == vt_bool && st != fb_bool && P->opts.allow_boolean_conversion) {
+ value->type = vt_uint;
+ value->u = (uint64_t)value->b;
+ assert(value->u == 1 || value->u == 0);
+ }
+ switch (st) {
+ case fb_ulong:
+ if (value->type != vt_uint) {
+ error_sym(P, sym, "64-bit uint32_t type only accepts unsigned integers");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ case fb_uint:
+ if (value->type != vt_uint) {
+ error_sym(P, sym, "32-bit unsigned int type only accepts unsigned integers");
+ value->type = vt_invalid;
+ return -1;
+ }
+ if (value->u > UINT32_MAX) {
+ error_sym(P, sym, "32-bit unsigned int overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ case fb_ushort:
+ if (value->type != vt_uint) {
+ error_sym(P, sym, "16-bit unsigned short type only accepts unsigned integers");
+ value->type = vt_invalid;
+ return -1;
+ }
+ if (value->u > UINT16_MAX) {
+ error_sym(P, sym, "16-bit unsigned short overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ case fb_char:
+ /* Although C treats char as signed by default, flatcc treats it as unsigned. */
+ case fb_ubyte:
+ if (value->type != vt_uint) {
+ error_sym(P, sym, "8-bit unsigned byte type only accepts unsigned integers");
+ value->type = vt_invalid;
+ return -1;
+ }
+ if (value->u > UINT8_MAX) {
+ error_sym(P, sym, "8-bit unsigned byte overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ case fb_long:
+ if (value->type == vt_int) {
+ /* Native format is always ok, or parser would have failed. */
+ return 0;
+ }
+ if (value->type == vt_uint) {
+ if (value->u >= (1ULL << 63)) {
+ error_sym(P, sym, "64-bit signed int32_t overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->i = (int64_t)value->u;
+ value->type = vt_int;
+ return 0;
+ }
+ error_sym(P, sym, "64-bit int32_t type only accepts integers");
+ value->type = vt_invalid;
+ return -1;
+ case fb_int:
+ if (value->type == vt_int) {
+ if (value->i < INT32_MIN) {
+ error_sym(P, sym, "32-bit signed int underflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ }
+ if (value->type == vt_uint) {
+ if (value->i > INT32_MAX) {
+ error_sym(P, sym, "32-bit signed int overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->i = (int64_t)value->u;
+ value->type = vt_int;
+ return 0;
+ }
+ error_sym(P, sym, "32-bit signed int type only accepts integers");
+ value->type = vt_invalid;
+ return -1;
+ case fb_short:
+ if (value->type == vt_int) {
+ if (value->i < INT16_MIN) {
+ error_sym(P, sym, "16-bit signed short underflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ }
+ if (value->type == vt_uint) {
+ if (value->i > INT16_MAX) {
+ error_sym(P, sym, "16-bit signed short overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->i = (int64_t)value->u;
+ value->type = vt_int;
+ return 0;
+ }
+ error_sym(P, sym, "16-bit signed short type only accepts integers");
+ value->type = vt_invalid;
+ return -1;
+ case fb_byte:
+ if (value->type == vt_int) {
+ if (value->i < INT8_MIN) {
+ error_sym(P, sym, "8-bit signed byte underflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ }
+ if (value->type == vt_uint) {
+ if (value->i > INT8_MAX) {
+ error_sym(P, sym, "8-bit signed byte overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->i = (int64_t)value->u;
+ value->type = vt_int;
+ return 0;
+ }
+ error_sym(P, sym, "8-bit signed byte type only accepts integers");
+ value->type = vt_invalid;
+ return -1;
+ case fb_bool:
+ if (value->type == vt_uint && P->opts.allow_boolean_conversion) {
+ if (value->u > 1) {
+ error_sym(P, sym, "boolean integer conversion only accepts 0 (false) or 1 (true)");
+ value->type = vt_invalid;
+ return -1;
+ }
+ } else if (value->type != vt_bool) {
+ error_sym(P, sym, "boolean type only accepts 'true' or 'false' as values");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ case fb_double:
+ switch (value->type) {
+ case vt_int:
+ d = (double)value->i;
+ if ((int64_t)d != value->i) {
+ /* We could make this a warning. */
+ error_sym(P, sym, "precision loss in 64-bit double type assignment");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->f = d;
+ value->type = vt_float;
+ return 0;
+ case vt_uint:
+ d = (double)value->u;
+ if ((uint64_t)d != value->u) {
+ /* We could make this a warning. */
+ error_sym(P, sym, "precision loss in 64-bit double type assignment");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->f = d;
+ value->type = vt_float;
+ return 0;
+ case vt_float:
+ /* Double is our internal repr., so not loss at this point. */
+ return 0;
+ default:
+ error_sym(P, sym, "64-bit double type only accepts integer and float values");
+ value->type = vt_invalid;
+ return -1;
+ }
+ case fb_float:
+ switch (value->type) {
+ case vt_int:
+ f = (float)value->i;
+ if ((int64_t)f != value->i) {
+ /* We could make this a warning. */
+ error_sym(P, sym, "precision loss in 32-bit float type assignment");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->f = f;
+ value->type = vt_float;
+ return 0;
+ case vt_uint:
+ f = (float)value->u;
+ if ((uint64_t)f != value->u) {
+ /* We could make this a warning. */
+ error_sym(P, sym, "precision loss in 32-bit float type assignment");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->f = f;
+ value->type = vt_float;
+ return 0;
+ case vt_float:
+ return 0;
+ default:
+ error_sym(P, sym, "32-bit float type only accepts integer and float values");
+ value->type = vt_invalid;
+ return -1;
+ }
+ default:
+ error_sym(P, sym, "scalar type expected");
+ value->type = vt_invalid;
+ return -1;
+ }
+}
+
diff --git a/src/compiler/coerce.h b/src/compiler/coerce.h
new file mode 100644
index 0000000..91c43f2
--- /dev/null
+++ b/src/compiler/coerce.h
@@ -0,0 +1,13 @@
+#ifndef COERCE_H
+#define COERCE_H
+
+#include <assert.h>
+
+#include "symbols.h"
+#include "parser.h"
+
+int __flatcc_fb_coerce_scalar_type(fb_parser_t *P,
+ fb_symbol_t *sym, fb_scalar_type_t st, fb_value_t *value);
+#define fb_coerce_scalar_type __flatcc_fb_coerce_scalar_type
+
+#endif /* COERCE_H */
diff --git a/src/compiler/fileio.c b/src/compiler/fileio.c
new file mode 100644
index 0000000..56d88c1
--- /dev/null
+++ b/src/compiler/fileio.c
@@ -0,0 +1,225 @@
+#include <string.h>
+#include <stdio.h>
+
+/* Ensures portable headers are included such as inline. */
+#include "config.h"
+#include "fileio.h"
+#include "pstrutil.h"
+
+char *fb_copy_path_n(const char *path, size_t len)
+{
+ size_t n;
+ char *s;
+
+ n = strnlen(path, len);
+ if ((s = malloc(n + 1))) {
+ memcpy(s, path, n);
+ s[n] = '\0';
+ }
+ return s;
+}
+
+char *fb_copy_path(const char *path)
+{
+ size_t n;
+ char *s;
+
+ n = strlen(path);
+ if ((s = malloc(n + 1))) {
+ memcpy(s, path, n);
+ s[n] = '\0';
+ }
+ return s;
+}
+
+size_t fb_chomp(const char *path, size_t len, const char *ext)
+{
+ size_t ext_len = ext ? strlen(ext) : 0;
+ if (len > ext_len && 0 == strncmp(path + len - ext_len, ext, ext_len)) {
+ len -= ext_len;
+ }
+ return len;
+}
+
+char *fb_create_join_path_n(const char *prefix, size_t prefix_len,
+ const char *suffix, size_t suffix_len, const char *ext, int path_sep)
+{
+ char *path;
+ size_t ext_len = ext ? strlen(ext) : 0;
+ size_t n;
+
+ if (!prefix ||
+ (suffix_len > 0 && (suffix[0] == '/' || suffix[0] == '\\')) ||
+ (suffix_len > 1 && suffix[1] == ':')) {
+ prefix_len = 0;
+ }
+ if (path_sep && (prefix_len == 0 ||
+ (prefix[prefix_len - 1] == '/' || prefix[prefix_len - 1] == '\\'))) {
+ path_sep = 0;
+ }
+ path = malloc(prefix_len + !!path_sep + suffix_len + ext_len + 1);
+ if (!path) {
+ return 0;
+ }
+ n = 0;
+ if (prefix_len > 0) {
+ memcpy(path, prefix, prefix_len);
+ n += prefix_len;
+ }
+ if (path_sep) {
+ path[n++] = '/';
+ }
+ memcpy(path + n, suffix, suffix_len);
+ n += suffix_len;
+ memcpy(path + n, ext, ext_len);
+ n += ext_len;
+ path[n] = '\0';
+ return path;
+}
+
+char *fb_create_join_path(const char *prefix, const char *suffix, const char *ext, int path_sep)
+{
+ return fb_create_join_path_n(prefix, prefix ? strlen(prefix) : 0,
+ suffix, suffix ? strlen(suffix) : 0, ext, path_sep);
+}
+
+char *fb_create_path_ext_n(const char *path, size_t path_len, const char *ext)
+{
+ return fb_create_join_path_n(0, 0, path, path_len, ext, 0);
+}
+
+char *fb_create_path_ext(const char *path, const char *ext)
+{
+ return fb_create_join_path(0, path, ext, 0);
+}
+
+char *fb_create_make_path_n(const char *path, size_t len)
+{
+ size_t i, j, n;
+ char *s;
+
+ if (len == 1 && (path[0] == ' ' || path[0] == '\\')) {
+ if (!(s = malloc(3))) {
+ return 0;
+ }
+ s[0] = '\\';
+ s[1] = path[0];
+ s[2] = '\0';
+ return s;
+ }
+ if (len <= 1) {
+ return fb_copy_path_n(path, len);
+ }
+ for (i = 0, n = len; i < len - 1; ++i) {
+ if (path[i] == '\\' && path[i + 1] == ' ') {
+ ++n;
+ }
+ n += path[i] == ' ';
+ }
+ n += path[i] == ' ';
+ if (!(s = malloc(n + 1))) {
+ return 0;
+ }
+ for (i = 0, j = 0; i < len - 1; ++i, ++j) {
+ if (path[i] == '\\' && path[i + 1] == ' ') {
+ s[j++] = '\\';
+ }
+ if (path[i] == ' ') {
+ s[j++] = '\\';
+ }
+ s[j] = path[i];
+ }
+ if (path[i] == ' ') {
+ s[j++] = '\\';
+ }
+ s[j++] = path[i];
+ s[j] = 0;
+ return s;
+}
+
+char *fb_create_make_path(const char *path)
+{
+ return fb_create_make_path_n(path, strlen(path));
+}
+
+size_t fb_find_basename(const char *path, size_t len)
+{
+ char *p = (char *)path;
+
+ p += len;
+ while(p != path) {
+ --p;
+ if (*p == '/' || *p == '\\') {
+ ++p;
+ break;
+ }
+ }
+ return (size_t)(p - path);
+}
+
+char *fb_create_basename(const char *path, size_t len, const char *ext)
+{
+ size_t pos;
+ char *s;
+
+ pos = fb_find_basename(path, len);
+ path += pos;
+ len -= pos;
+ len = fb_chomp(path, len, ext);
+ if ((s = malloc(len + 1))) {
+ memcpy(s, path, len);
+ s[len] = '\0';
+ }
+ return s;
+}
+
+char *fb_read_file(const char *filename, size_t max_size, size_t *size_out)
+{
+ FILE *fp;
+ long k;
+ size_t size, pos, n, _out;
+ char *buf;
+
+ size_out = size_out ? size_out : &_out;
+
+ fp = fopen(filename, "rb");
+ size = 0;
+ buf = 0;
+
+ if (!fp) {
+ goto fail;
+ }
+ fseek(fp, 0L, SEEK_END);
+ k = ftell(fp);
+ if (k < 0) goto fail;
+ size = (size_t)k;
+ *size_out = size;
+ if (max_size > 0 && size > max_size) {
+ goto fail;
+ }
+ rewind(fp);
+ buf = malloc(size ? size : 1);
+ if (!buf) {
+ goto fail;
+ }
+ pos = 0;
+ while ((n = fread(buf + pos, 1, size - pos, fp))) {
+ pos += n;
+ }
+ if (pos != size) {
+ goto fail;
+ }
+ fclose(fp);
+ *size_out = size;
+ return buf;
+
+fail:
+ if (fp) {
+ fclose(fp);
+ }
+ if (buf) {
+ free(buf);
+ }
+ *size_out = size;
+ return 0;
+}
diff --git a/src/compiler/fileio.h b/src/compiler/fileio.h
new file mode 100644
index 0000000..5a46f6d
--- /dev/null
+++ b/src/compiler/fileio.h
@@ -0,0 +1,86 @@
+#ifndef FILES_H
+#define FILES_H
+
+#include <stdlib.h>
+
+/*
+ * Returns an allocated copy of the path truncated to len if len is
+ * shorter. Free returned string subsequently. Also truncates to less
+ * than len if path contains null characters.
+ */
+char *__flatcc_fb_copy_path_n(const char *path, size_t len);
+#define fb_copy_path_n __flatcc_fb_copy_path_n
+
+/* Returns an allocated copy of path. Free returned string subsequently. */
+char *__flatcc_fb_copy_path(const char *path);
+#define fb_copy_path __flatcc_fb_copy_path
+
+/*
+ * Joins two paths. The prefix can optionally be null.
+ * Free returned string subsequently. If `path_sep` is true, prefix is
+ * separated from suffix with a path separator if not already present.
+ */
+char *__flatcc_fb_create_join_path_n(const char *prefix, size_t prefix_len,
+ const char *suffix, size_t suffix_len, const char *ext, int path_sep);
+#define fb_create_join_path_n __flatcc_fb_create_join_path_n
+
+char *__flatcc_fb_create_join_path(const char *prefix, const char * suffix, const char *ext, int path_sep);
+#define fb_create_join_path __flatcc_fb_create_join_path
+
+/* Adds extension to path in a new copy. */
+char *__flatcc_fb_create_path_ext_n(const char *path, size_t path_len, const char *ext);
+#define fb_create_path_ext_n __flatcc_fb_create_path_ext_n
+
+char *__flatcc_fb_create_path_ext(const char *path, const char *ext);
+#define fb_create_path_ext __flatcc_fb_create_path_ext
+
+/*
+ * Creates a path with spaces escaped in a sort of gcc/Gnu Make
+ * compatible way, primarily for use with dependency files.
+ *
+ * http://clang.llvm.org/doxygen/DependencyFile_8cpp_source.html
+ *
+ * We should escape a backslash only if followed by space.
+ * We should escape a space in all cases.
+ * We ought to handle to #, but don't because gcc fails to do so.
+ *
+ * This is dictated by how clang and gcc generates makefile
+ * dependency rules for gnu make.
+ *
+ * This is not intended for strings used for system calls, but rather
+ * for writing to files where a quoted format is not supported.
+ *
+ */
+char *__flatcc_fb_create_make_path_n(const char *path, size_t path_len);
+#define fb_create_make_path_n __flatcc_fb_create_make_path_n
+
+char *__flatcc_fb_create_make_path(const char *path);
+#define fb_create_make_path __flatcc_fb_create_make_path
+
+/*
+ * Creates a new filename stripped from path prefix and optional ext
+ * suffix. Free returned string subsequently.
+ */
+char *__flatcc_fb_create_basename(const char *path, size_t len, const char *ext);
+#define fb_create_basename __flatcc_fb_create_basename
+
+/* Free returned buffer subsequently. Stores file size in `size_out` arg.
+ * if `max_size` is 0 the file as read regardless of size, otherwise
+ * if the file size exceeds `max_size` then `size_out` is set to the
+ * actual size and null is returend. */
+char *__flatcc_fb_read_file(const char *filename, size_t max_size, size_t *size_out);
+#define fb_read_file __flatcc_fb_read_file
+
+
+/*
+ * Returns offset into source path representing the longest suffix
+ * string with no path separator.
+ */
+size_t __flatcc_fb_find_basename(const char *path, size_t len);
+#define fb_find_basename __flatcc_fb_find_basename
+
+/* Returns input length or length reduced by ext len if ext is a proper suffix. */
+size_t __flatcc_fb_chomp(const char *path, size_t len, const char *ext);
+#define fb_chomp __flatcc_fb_chomp
+
+#endif /* FILES_H */
diff --git a/src/compiler/flatcc.c b/src/compiler/flatcc.c
new file mode 100644
index 0000000..3111b4c
--- /dev/null
+++ b/src/compiler/flatcc.c
@@ -0,0 +1,511 @@
+#include <assert.h>
+#include "config.h"
+#include "parser.h"
+#include "semantics.h"
+#include "fileio.h"
+#include "codegen.h"
+#include "flatcc/flatcc.h"
+
+#define checkfree(s) if (s) { free(s); s = 0; }
+
+void flatcc_init_options(flatcc_options_t *opts)
+{
+ memset(opts, 0, sizeof(*opts));
+
+ opts->max_schema_size = FLATCC_MAX_SCHEMA_SIZE;
+ opts->max_include_depth = FLATCC_MAX_INCLUDE_DEPTH;
+ opts->max_include_count = FLATCC_MAX_INCLUDE_COUNT;
+ opts->allow_boolean_conversion = FLATCC_ALLOW_BOOLEAN_CONVERSION;
+ opts->allow_enum_key = FLATCC_ALLOW_ENUM_KEY;
+ opts->allow_enum_struct_field = FLATCC_ALLOW_ENUM_STRUCT_FIELD;
+ opts->allow_multiple_key_fields = FLATCC_ALLOW_MULTIPLE_KEY_FIELDS;
+ opts->allow_primary_key = FLATCC_ALLOW_PRIMARY_KEY;
+ opts->allow_scan_for_all_fields = FLATCC_ALLOW_SCAN_FOR_ALL_FIELDS;
+ opts->allow_string_key = FLATCC_ALLOW_STRING_KEY;
+ opts->allow_struct_field_deprecate = FLATCC_ALLOW_STRUCT_FIELD_DEPRECATE;
+ opts->allow_struct_field_key = FLATCC_ALLOW_STRUCT_FIELD_KEY;
+ opts->allow_struct_root = FLATCC_ALLOW_STRUCT_ROOT;
+ opts->ascending_enum = FLATCC_ASCENDING_ENUM;
+ opts->hide_later_enum = FLATCC_HIDE_LATER_ENUM;
+ opts->hide_later_struct = FLATCC_HIDE_LATER_STRUCT;
+ opts->offset_size = FLATCC_OFFSET_SIZE;
+ opts->voffset_size = FLATCC_VOFFSET_SIZE;
+ opts->utype_size = FLATCC_UTYPE_SIZE;
+ opts->bool_size = FLATCC_BOOL_SIZE;
+
+ opts->require_root_type = FLATCC_REQUIRE_ROOT_TYPE;
+ opts->strict_enum_init = FLATCC_STRICT_ENUM_INIT;
+ /*
+ * Index 0 is table elem count, and index 1 is table size
+ * so max count is reduced by 2, meaning field id's
+ * must be between 0 and vt_max_count - 1.
+ * Usually, the table is 16-bit, so FLATCC_VOFFSET_SIZE = 2.
+ * Strange expression to avoid shift overflow on 64 bit size.
+ */
+ opts->vt_max_count = ((1LL << (FLATCC_VOFFSET_SIZE * 8 - 1)) - 1) * 2;
+
+ opts->default_schema_ext = FLATCC_DEFAULT_SCHEMA_EXT;
+ opts->default_bin_schema_ext = FLATCC_DEFAULT_BIN_SCHEMA_EXT;
+ opts->default_bin_ext = FLATCC_DEFAULT_BIN_EXT;
+
+ opts->cgen_no_conflicts = FLATCC_CGEN_NO_CONFLICTS;
+
+ opts->cgen_pad = FLATCC_CGEN_PAD;
+ opts->cgen_sort = FLATCC_CGEN_SORT;
+ opts->cgen_pragmas = FLATCC_CGEN_PRAGMAS;
+
+ opts->cgen_common_reader = 0;
+ opts->cgen_common_builder = 0;
+ opts->cgen_reader = 0;
+ opts->cgen_builder = 0;
+ opts->cgen_json_parser = 0;
+ opts->cgen_spacing = FLATCC_CGEN_SPACING;
+
+ opts->bgen_bfbs = FLATCC_BGEN_BFBS;
+ opts->bgen_qualify_names = FLATCC_BGEN_QUALIFY_NAMES;
+ opts->bgen_length_prefix = FLATCC_BGEN_LENGTH_PREFIX;
+}
+
+flatcc_context_t flatcc_create_context(flatcc_options_t *opts, const char *name,
+ flatcc_error_fun error_out, void *error_ctx)
+{
+ fb_parser_t *P;
+
+ if (!(P = malloc(sizeof(*P)))) {
+ return 0;
+ }
+ if (fb_init_parser(P, opts, name, error_out, error_ctx, 0)) {
+ free(P);
+ return 0;
+ }
+ return P;
+}
+
+static flatcc_context_t __flatcc_create_child_context(flatcc_options_t *opts, const char *name,
+ fb_parser_t *P_parent)
+{
+ fb_parser_t *P;
+
+ if (!(P = malloc(sizeof(*P)))) {
+ return 0;
+ }
+ if (fb_init_parser(P, opts, name, P_parent->error_out, P_parent->error_ctx, P_parent->schema.root_schema)) {
+ free(P);
+ return 0;
+ }
+ return P;
+}
+
+/* TODO: handle include files via some sort of buffer read callback
+ * and possible transfer file based parser to this logic. */
+int flatcc_parse_buffer(flatcc_context_t ctx, const char *buf, size_t buflen)
+{
+ fb_parser_t *P = ctx;
+
+ /* Currently includes cannot be handled by buffers, so they should done. */
+ P->opts.disable_includes = 1;
+ if ((size_t)buflen > P->opts.max_schema_size && P->opts.max_schema_size > 0) {
+ fb_print_error(P, "input exceeds maximum allowed size\n");
+ return -1;
+ }
+ /* Add self to set of visible schema. */
+ ptr_set_insert_item(&P->schema.visible_schema, &P->schema, ht_keep);
+ return fb_parse(P, buf, buflen, 0) || fb_build_schema(P) ? -1 : 0;
+}
+
+static void visit_dep(void *context, void *ptr)
+{
+ fb_schema_t *parent = context;
+ fb_schema_t *dep = ptr;
+
+ ptr_set_insert_item(&parent->visible_schema, dep, ht_keep);
+}
+
+static void add_visible_schema(fb_schema_t *parent, fb_schema_t *dep)
+{
+ ptr_set_visit(&dep->visible_schema, visit_dep, parent);
+}
+
+static int __parse_include_file(fb_parser_t *P_parent, const char *filename)
+{
+ flatcc_context_t *ctx = 0;
+ fb_parser_t *P = 0;
+ fb_root_schema_t *rs;
+ flatcc_options_t *opts = &P_parent->opts;
+ fb_schema_t *dep;
+
+ rs = P_parent->schema.root_schema;
+ if (rs->include_depth >= opts->max_include_depth && opts->max_include_depth > 0) {
+ fb_print_error(P_parent, "include nesting level too deep\n");
+ return -1;
+ }
+ if (rs->include_count >= opts->max_include_count && opts->max_include_count > 0) {
+ fb_print_error(P_parent, "include count limit exceeded\n");
+ return -1;
+ }
+ if (!(ctx = __flatcc_create_child_context(opts, filename, P_parent))) {
+ return -1;
+ }
+ P = (fb_parser_t *)ctx;
+ /* Don't parse the same file twice, or any other file with same name. */
+ if ((dep = fb_schema_table_find_item(&rs->include_index, &P->schema))) {
+ add_visible_schema(&P_parent->schema, dep);
+ flatcc_destroy_context(ctx);
+ return 0;
+ }
+ P->dependencies = P_parent->dependencies;
+ P_parent->dependencies = P;
+ P->referer_path = P_parent->path;
+ /* Each parser has a root schema instance, but only the root parsers instance is used. */
+ rs->include_depth++;
+ rs->include_count++;
+ if (flatcc_parse_file(ctx, filename)) {
+ return -1;
+ }
+ add_visible_schema(&P_parent->schema, &P->schema);
+ return 0;
+}
+
+/*
+ * The depends file format is a make rule:
+ *
+ * <outputfile> : <dep1-file> <dep2-file> ...
+ *
+ * like -MMD option for gcc/clang:
+ * lib.o.d generated with content:
+ *
+ * lib.o : header1.h header2.h
+ *
+ * We use a file name <basename>.depends for schema <basename>.fbs with content:
+ *
+ * <basename>_reader.h : <included-schema-1> ...
+ *
+ * The .d extension could mean the D language and we don't have sensible
+ * .o.d name because of multiple outputs, so .depends is better.
+ *
+ * (the above above is subject to the configuration of extensions).
+ *
+ * TODO:
+ * perhaps we should optionally add a dependency to the common reader
+ * and builder files when they are generated separately as they should in
+ * concurrent builds.
+ *
+ * TODO:
+ * 1. we should have a file for every output we produce (_builder.h * etc.)
+ * 2. reader might not even be in the output, e.g. verifier only.
+ * 3. multiple outputs doesn't work with ninja build 1.7.1, so just
+ * use reader for now, and possible add an option for multiple
+ * outputs later.
+ *
+ * http://stackoverflow.com/questions/11855386/using-g-with-mmd-in-makefile-to-automatically-generate-dependencies
+ * https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485
+ *
+ * Spaces in gnu make:
+ * https://www.cmcrossroads.com/article/gnu-make-meets-file-names-spaces-them
+ * See comments on gnu make handling of spaces.
+ * http://clang.llvm.org/doxygen/DependencyFile_8cpp_source.html
+ */
+static int __flatcc_gen_depends_file(fb_parser_t *P)
+{
+ FILE *fp = 0;
+ const char *outpath, *basename;
+ const char *depfile, *deproot, *depext;
+ const char *targetfile, *targetsuffix, *targetroot;
+ char *path = 0, *deppath = 0, *tmppath = 0, *targetpath = 0;
+ int ret = -1;
+
+ /*
+ * The dependencies list is only correct for root files as it is a
+ * linear list. To deal with children, we would have to filter via
+ * the visible schema hash table, but we don't really need that.
+ */
+ assert(P->referer_path == 0);
+
+ outpath = P->opts.outpath ? P->opts.outpath : "";
+ basename = P->schema.basename;
+ targetfile = P->opts.gen_deptarget;
+
+
+ /* The following is mostly considering build tools generating
+ * a depfile as Ninja build would use it. It is a bit strict
+ * on path variations and currenlty doesn't accept multiple
+ * build products in a build rule (Ninja 1.7.1).
+ *
+ * Make depfile relative to cwd so the user can add output if
+ * needed, otherwise it is not possible, or difficult, to use a path given
+ * by a build tool, relative the cwd. If --depfile is not given,
+ * then -d is given or we would not be here. In that case we add an
+ * extension "<basename>.fbs.d" in the outpath.
+ *
+ * A general problem is that the outpath may be a build root dir or
+ * a current subdir for a custom build rule while the dep file
+ * content needs the same path every time, not just an equivalent
+ * path. For dependencies, we can rely on the input schema path.
+ * The input search paths may because confusion but we choose the
+ * discovered path relative to cwd consistently for each schema file
+ * encountered.
+ *
+ * The target file (<target>: <include1.fbs> <include2.fbs> ...)
+ * is tricky because it is not unique - but we can chose <schema>_reader.h
+ * or <schema>.bfbs prefixed with outpath. The user should choose an
+ * outpath relative to cwd or an absolute path depending on what the
+ * build system prefers. This may not be so easy in praxis, but what
+ * can we do?
+ *
+ * It is important to note the default target and the default
+ * depfile name is not just a convenience. Sometimes it is much
+ * simpler to use this version over an explicit path, sometimes
+ * perhaps not so much.
+ */
+
+ if (P->opts.gen_depfile) {
+ depfile = P->opts.gen_depfile;
+ deproot = "";
+ depext = "";
+ } else {
+ depfile = basename;
+ deproot = outpath;
+ depext = FLATCC_DEFAULT_DEP_EXT;
+ }
+ if (targetfile) {
+ targetsuffix = "";
+ targetroot = "";
+ } else {
+ targetsuffix = P->opts.bgen_bfbs
+ ? FLATCC_DEFAULT_BIN_SCHEMA_EXT
+ : FLATCC_DEFAULT_DEP_TARGET_SUFFIX;
+ targetfile = basename;
+ targetroot = outpath;
+ }
+
+ checkmem(path = fb_create_join_path(deproot, depfile, depext, 1));
+
+ checkmem(tmppath = fb_create_join_path(targetroot, targetfile, targetsuffix, 1));
+ /* Handle spaces in dependency file. */
+ checkmem((targetpath = fb_create_make_path(tmppath)));
+ checkfree(tmppath);
+
+ fp = fopen(path, "wb");
+ if (!fp) {
+ fb_print_error(P, "could not open dependency file for output: %s\n", path);
+ goto done;
+ }
+ fprintf(fp, "%s:", targetpath);
+
+ /* Don't depend on root schema. */
+ P = P->dependencies;
+ while (P) {
+ checkmem((deppath = fb_create_make_path(P->path)));
+ fprintf(fp, " %s", deppath);
+ P = P->dependencies;
+ checkfree(deppath);
+ }
+ fprintf(fp, "\n");
+ ret = 0;
+
+done:
+ checkfree(path);
+ checkfree(tmppath);
+ checkfree(targetpath);
+ checkfree(deppath);
+ if (fp) {
+ fclose(fp);
+ }
+ return ret;
+}
+
+int flatcc_parse_file(flatcc_context_t ctx, const char *filename)
+{
+ fb_parser_t *P = ctx;
+ size_t inpath_len, filename_len;
+ char *buf, *path, *include_file;
+ const char *inpath;
+ size_t size;
+ fb_name_t *inc;
+ int i, ret, is_root;
+
+ filename_len = strlen(filename);
+ /* Don't parse the same file twice, or any other file with same basename. */
+ if (fb_schema_table_insert_item(&P->schema.root_schema->include_index, &P->schema, ht_keep)) {
+ return 0;
+ }
+ buf = 0;
+ path = 0;
+ include_file = 0;
+ ret = -1;
+ is_root = !P->referer_path;
+
+ /*
+ * For root files, read file relative to working dir first. For
+ * included files (`referer_path` set), first try include paths
+ * in order, then path relative to including file.
+ */
+ if (is_root) {
+ if (!(buf = fb_read_file(filename, P->opts.max_schema_size, &size))) {
+ if (size + P->schema.root_schema->total_source_size > P->opts.max_schema_size && P->opts.max_schema_size > 0) {
+ fb_print_error(P, "input exceeds maximum allowed size\n");
+ goto done;
+ }
+ } else {
+ checkmem((path = fb_copy_path(filename)));
+ }
+ }
+ for (i = 0; !buf && i < P->opts.inpath_count; ++i) {
+ inpath = P->opts.inpaths[i];
+ inpath_len = strlen(inpath);
+ checkmem((path = fb_create_join_path_n(inpath, inpath_len, filename, filename_len, "", 1)));
+ if (!(buf = fb_read_file(path, P->opts.max_schema_size, &size))) {
+ free(path);
+ path = 0;
+ if (size > P->opts.max_schema_size && P->opts.max_schema_size > 0) {
+ fb_print_error(P, "input exceeds maximum allowed size\n");
+ goto done;
+ }
+ }
+ }
+ if (!buf && !is_root) {
+ inpath = P->referer_path;
+ inpath_len = fb_find_basename(inpath, strlen(inpath));
+ checkmem((path = fb_create_join_path_n(inpath, inpath_len, filename, filename_len, "", 1)));
+ if (!(buf = fb_read_file(path, P->opts.max_schema_size, &size))) {
+ free(path);
+ path = 0;
+ if (size > P->opts.max_schema_size && P->opts.max_schema_size > 0) {
+ fb_print_error(P, "input exceeds maximum allowed size\n");
+ goto done;
+ }
+ }
+ }
+ if (!buf) {
+ fb_print_error(P, "error reading included schema file: %s\n", filename);
+ goto done;
+ }
+ P->schema.root_schema->total_source_size += size;
+ P->path = path;
+ /* Parser owns path. */
+ path = 0;
+ /*
+ * Even if we do not have the recursive option set, we still
+ * need to parse all include files to make sense of the current
+ * file.
+ */
+ if (!fb_parse(P, buf, size, 1)) {
+ /* Parser owns buffer. */
+ buf = 0;
+ inc = P->schema.includes;
+ while (inc) {
+ checkmem((include_file = fb_copy_path_n(inc->name.s.s, (size_t)inc->name.s.len)));
+ if (__parse_include_file(P, include_file)) {
+ goto done;
+ }
+ free(include_file);
+ include_file = 0;
+ inc = inc->link;
+ }
+ /* Add self to set of visible schema. */
+ ptr_set_insert_item(&P->schema.visible_schema, &P->schema, ht_keep);
+ if (fb_build_schema(P)) {
+ goto done;
+ }
+ /*
+ * We choose to only generate optional .depends files for root level
+ * files. These will contain all nested files regardless of
+ * recursive file generation flags.
+ */
+ if (P->opts.gen_dep && is_root) {
+ if (__flatcc_gen_depends_file(P)) {
+ goto done;
+ }
+ }
+ ret = 0;
+ }
+
+done:
+ /* Parser owns buffer so don't free it here. */
+ checkfree(path);
+ checkfree(include_file);
+ return ret;
+}
+
+#if FLATCC_REFLECTION
+int flatcc_generate_binary_schema_to_buffer(flatcc_context_t ctx, void *buf, size_t bufsiz)
+{
+ fb_parser_t *P = ctx;
+
+ if (fb_codegen_bfbs_to_buffer(&P->opts, &P->schema, buf, &bufsiz)) {
+ return (int)bufsiz;
+ }
+ return -1;
+}
+
+void *flatcc_generate_binary_schema(flatcc_context_t ctx, size_t *size)
+{
+ fb_parser_t *P = ctx;
+
+ return fb_codegen_bfbs_alloc_buffer(&P->opts, &P->schema, size);
+}
+#endif
+
+int flatcc_generate_files(flatcc_context_t ctx)
+{
+ fb_parser_t *P = ctx, *P_leaf;
+ fb_output_t *out, output;
+ int ret = 0;
+ out = &output;
+
+ if (!P || P->failed) {
+ return -1;
+ }
+ P_leaf = 0;
+ while (P) {
+ P->inverse_dependencies = P_leaf;
+ P_leaf = P;
+ P = P->dependencies;
+ }
+ P = ctx;
+#if FLATCC_REFLECTION
+ if (P->opts.bgen_bfbs) {
+ if (fb_codegen_bfbs_to_file(&P->opts, &P->schema)) {
+ return -1;
+ }
+ }
+#endif
+
+ if (fb_init_output_c(out, &P->opts)) {
+ return -1;
+ }
+ /* This does not require a parse first. */
+ if (!P->opts.gen_append && (ret = fb_codegen_common_c(out))) {
+ goto done;
+ }
+ /* If no file parsed - just common files if at all. */
+ if (!P->has_schema) {
+ goto done;
+ }
+ if (!P->opts.cgen_recursive) {
+ ret = fb_codegen_c(out, &P->schema);
+ goto done;
+ }
+ /* Make sure stdout and outfile output is generated in the right order. */
+ P = P_leaf;
+ while (!ret && P) {
+ ret = P->failed || fb_codegen_c(out, &P->schema);
+ P = P->inverse_dependencies;
+ }
+done:
+ fb_end_output_c(out);
+ return ret;
+}
+
+void flatcc_destroy_context(flatcc_context_t ctx)
+{
+ fb_parser_t *P = ctx, *dep = 0;
+
+ while (P) {
+ dep = P->dependencies;
+ fb_clear_parser(P);
+ free(P);
+ P = dep;
+ }
+}
diff --git a/src/compiler/hash_tables/README.txt b/src/compiler/hash_tables/README.txt
new file mode 100644
index 0000000..dc71a59
--- /dev/null
+++ b/src/compiler/hash_tables/README.txt
@@ -0,0 +1,2 @@
+Each generic hashtable type requires an often small independent
+compilation unit so we keep these here.
diff --git a/src/compiler/hash_tables/name_table.c b/src/compiler/hash_tables/name_table.c
new file mode 100644
index 0000000..ec0f7c2
--- /dev/null
+++ b/src/compiler/hash_tables/name_table.c
@@ -0,0 +1,21 @@
+ /* Note: only one hash table can be implemented a single file. */
+#include "../symbols.h"
+#include "hash/hash_table_def.h"
+DEFINE_HASH_TABLE(fb_name_table)
+
+#include "hash/hash_table_impl.h"
+
+static inline int ht_match(const void *key, size_t len, fb_name_t *name)
+{
+ return len == (size_t)name->name.s.len && memcmp(key, name->name.s.s, len) == 0;
+}
+
+static inline const void *ht_key(fb_name_t *name)
+{
+ return name->name.s.s;
+}
+
+static inline size_t ht_key_len(fb_name_t *name)
+{
+ return (size_t)name->name.s.len;
+}
diff --git a/src/compiler/hash_tables/schema_table.c b/src/compiler/hash_tables/schema_table.c
new file mode 100644
index 0000000..2a7e322
--- /dev/null
+++ b/src/compiler/hash_tables/schema_table.c
@@ -0,0 +1,21 @@
+ /* Note: only one hash table can be implemented a single file. */
+#include "../symbols.h"
+#include "hash/hash_table_def.h"
+DEFINE_HASH_TABLE(fb_schema_table)
+
+#include "hash/hash_table_impl.h"
+
+static inline int ht_match(const void *key, size_t len, fb_schema_t *schema)
+{
+ return len == (size_t)schema->name.name.s.len && memcmp(key, schema->name.name.s.s, len) == 0;
+}
+
+static inline const void *ht_key(fb_schema_t *schema)
+{
+ return schema->name.name.s.s;
+}
+
+static inline size_t ht_key_len(fb_schema_t *schema)
+{
+ return (size_t)schema->name.name.s.len;
+}
diff --git a/src/compiler/hash_tables/scope_table.c b/src/compiler/hash_tables/scope_table.c
new file mode 100644
index 0000000..7a7df3b
--- /dev/null
+++ b/src/compiler/hash_tables/scope_table.c
@@ -0,0 +1,177 @@
+ /* Note: only one hash table can be implemented a single file. */
+
+
+/*
+ * The generic hash table is designed to make the key length optional
+ * and we do not need it because our key is a terminated token list.
+ *
+ * The token list avoids having to allocated a new string and the
+ * associated issues of memory management. In most cases the search key
+ * is also a similar token list.
+ *
+ * However, on occasion we need to look up an unparsed string of dot
+ * separated scopes (nested_flatbuffer attributes). This is not
+ * trivially possible without reverting to allocating the strings.
+ * We could parse the attribute into tokens but it is also non-trivial
+ * because the token buffer breaks pointers when reallocating and
+ * the parse output is considered read-only at this point.
+ *
+ * We can however, use a trick to overcome this because the hash table
+ * does not enforce that the search key has same representation as the
+ * stored key. We can use the key length to switch between key types.
+ *
+ * When the key is paresed to a token list:
+ *
+ * enemy: MyGame . Example.Monster
+ *
+ * the spaces in dots may be ignored by the parser.
+ * Spaces must be handled explicitly or disallowed when the key is
+ * parsed as an attribute string (only the quoted content):
+ *
+ * (nested_flatbuffer:"MyGame.Example.Monster")
+ *
+ * vs
+ *
+ * (nested_flatbuffer:"MyGame . Example.Monster")
+ *
+ * Googles flatc allows spaces in the token stream where dots are
+ * operators, but not in attribute strings which are supposed to
+ * be unique so we follow that convention.
+ *
+ * On both key representations, preprocessing must strip the trailing
+ * symbol stored within the scope before lookup - minding that this
+ * lookup only finds the scope itself. For token lists this can be
+ * done by either zero terminating the list early, or by issuing
+ * a negative length (after cast to int) of elements to consider. For
+ * string keys the key length should be made to the length to be
+ * considered.
+ *
+ * If the scope string is zero length, a null key should be issued
+ * with zero length. This is indistinguishly from a null length token
+ * list - both indicating a global scope - null thus being a valid key.
+ *
+ * Note: it is important to not use a non-null zero length string
+ * as key.
+ */
+
+#include "../symbols.h"
+
+static inline size_t scope_hash(const void *s, size_t len);
+#define HT_HASH_FUNCTION scope_hash
+
+#include "hash/hash_table_def.h"
+DEFINE_HASH_TABLE(fb_scope_table)
+#include "hash/hash_table_impl.h"
+
+/* Null is a valid key used for root scopes. */
+static inline int ht_match(const void *key, size_t len, fb_scope_t *scope)
+{
+ const fb_ref_t *name = scope->name;
+ int count = (int)len;
+ size_t n1, n2, i;
+
+ /* Note: `name` may be null here - this is the global scope name. */
+ if (count <= 0) {
+ const fb_ref_t *keyname = key;
+ /*
+ * If count is negative, this is the token count of the key
+ * which may have suffix to be ignored, otherwise the key is the
+ * full list.
+ */
+ /* `key` is a ref list (a list of tokens). */
+ while (name && keyname) {
+ n1 = (size_t)name->ident->len;
+ n2 = (size_t)keyname->ident->len;
+ if (n1 != n2 || strncmp(name->ident->text, keyname->ident->text, n1)) {
+ return 0;
+ }
+ name = name->link;
+ keyname = keyname->link;
+ if (++count == 0) {
+ return name == 0;
+ }
+ }
+ if (name || keyname) {
+ return 0;
+ }
+ return 1;
+ } else {
+ /* `key` is a dotted string. */
+ const char *s1, *s2 = key;
+ while (name) {
+ s1 = name->ident->text;
+ n1 = (size_t)name->ident->len;
+ if (n1 > len) {
+ return 0;
+ }
+ for (i = 0; i < n1; ++i) {
+ if (s1[i] != s2[i]) {
+ return 0;
+ }
+ }
+ if (n1 == len) {
+ return name->link == 0;
+ }
+ if (s2[i] != '.') {
+ return 0;
+ }
+ len -= n1 + 1;
+ s2 += n1 + 1;
+ name = name->link;
+ }
+ return 0;
+ }
+}
+
+static inline const void *ht_key(fb_scope_t *scope)
+{
+ return scope->name;
+}
+
+static inline size_t ht_key_len(fb_scope_t *scope)
+{
+ (void)scope;
+ /*
+ * Must be zero because the result is passed to ht_match
+ * when comparing two stored items for hash conflicts.
+ * Only external lookup keys can be non-zero.
+ */
+ return 0;
+}
+
+static inline size_t scope_hash(const void *key, size_t len)
+{
+ size_t h = 0, i;
+ int count = (int)len;
+
+ if (count <= 0) {
+ const fb_ref_t *name = key;
+
+ while (name) {
+ h ^= ht_strn_hash_function(name->ident->text, (size_t)name->ident->len);
+ h = ht_int_hash_function((void *)h, 0);
+ name = name->link;
+ if (++count == 0) {
+ break;
+ }
+ }
+ return h;
+ } else {
+ const char *s = key;
+ for (;;) {
+ for (i = 0; i < len; ++i) {
+ if (s[i] == '.') {
+ break;
+ }
+ }
+ h ^= ht_strn_hash_function(s, i);
+ h = ht_int_hash_function((void *)h, 0);
+ if (i == len) {
+ break;
+ }
+ len -= i + 1;
+ s += i + 1;
+ }
+ return h;
+ }
+}
diff --git a/src/compiler/hash_tables/symbol_table.c b/src/compiler/hash_tables/symbol_table.c
new file mode 100644
index 0000000..bc13d8a
--- /dev/null
+++ b/src/compiler/hash_tables/symbol_table.c
@@ -0,0 +1,22 @@
+ /* Note: only one hash table can be implemented a single file. */
+#include "../symbols.h"
+#include "hash/hash_table_def.h"
+DEFINE_HASH_TABLE(fb_symbol_table)
+#include "hash/hash_table_impl.h"
+
+static inline int ht_match(const void *key, size_t len, fb_symbol_t *sym)
+{
+ return len == ht_key_len(sym) && memcmp(key, ht_key(sym), len) == 0;
+}
+
+static inline const void *ht_key(fb_symbol_t *sym)
+{
+ return sym->ident->text;
+}
+
+static inline size_t ht_key_len(fb_symbol_t *sym)
+{
+ fb_token_t *ident = sym->ident;
+
+ return (size_t)ident->len;
+}
diff --git a/src/compiler/hash_tables/value_set.c b/src/compiler/hash_tables/value_set.c
new file mode 100644
index 0000000..d623c36
--- /dev/null
+++ b/src/compiler/hash_tables/value_set.c
@@ -0,0 +1,60 @@
+ /* Note: only one hash table can be implemented a single file. */
+#include "../symbols.h"
+#include "hash/ht_hash_function.h"
+
+static size_t value_hash_function(const void *key, size_t key_len)
+{
+ const fb_value_t *value = key;
+
+ (void)key_len;
+
+ switch (value->type) {
+ case vt_int:
+ return ht_int_hash_function((void *)(size_t)(value->i ^ value->type), sizeof(value->i));
+ case vt_uint:
+ return ht_int_hash_function((void *)(size_t)(value->u ^ value->type), sizeof(value->u));
+ case vt_bool:
+ return ht_int_hash_function((void *)(size_t)(value->b ^ value->type), sizeof(value->b));
+ default:
+ return 0;
+ }
+}
+
+#define HT_HASH_FUNCTION value_hash_function
+
+#include "hash/hash_table_def.h"
+DEFINE_HASH_TABLE(fb_value_set)
+#include "hash/hash_table_impl.h"
+
+static inline int ht_match(const void *key, size_t len, fb_value_t *item)
+{
+ const fb_value_t *value = key;
+
+ (void)len;
+
+ if (value->type != item->type) {
+ return 0;
+ }
+ switch (value->type) {
+ case vt_int:
+ return value->i == item->i;
+ case vt_uint:
+ return value->u == item->u;
+ case vt_bool:
+ return value->b == item->b;
+ default:
+ return 0;
+ }
+}
+
+static inline const void *ht_key(fb_value_t *value)
+{
+ return value;
+}
+
+static inline size_t ht_key_len(fb_value_t *value)
+{
+ (void)value;
+
+ return 0;
+}
diff --git a/src/compiler/keywords.h b/src/compiler/keywords.h
new file mode 100644
index 0000000..51e0ae8
--- /dev/null
+++ b/src/compiler/keywords.h
@@ -0,0 +1,56 @@
+/*
+ * FlatBuffers keyword table
+ *
+ * See luthor project test files for more details on keyword table
+ * syntax.
+ *
+ * In brief: Keywords are assigned a hash key that is easy
+ * for the lexer to test.
+ *
+ * The first char is length of keyword, two next chars are the leading
+ * to characters of the keyword, and the last char is the last char of
+ * the keyword. For keywords longer than 9 add length to '0' in the
+ * first character. For keywords shorter than 3 characters, see luthor
+ * project - we don't need it. The keywords should be sorted.
+ */
+
+LEX_KW_TABLE_BEGIN
+ lex_kw(int, '3', 'i', 'n', 't')
+ lex_kw(bool, '4', 'b', 'o', 'l')
+ lex_kw(byte, '4', 'b', 'y', 'e')
+ lex_kw(char, '4', 'c', 'h', 'r')
+ lex_kw(enum, '4', 'e', 'n', 'm')
+ lex_kw(int8, '4', 'i', 'n', '8')
+ lex_kw(long, '4', 'l', 'o', 'g')
+ lex_kw(null, '4', 'n', 'u', 'l')
+ lex_kw(true, '4', 't', 'r', 'e')
+ lex_kw(uint, '4', 'u', 'i', 't')
+ lex_kw(false, '5', 'f', 'a', 'e')
+ lex_kw(float, '5', 'f', 'l', 't')
+ lex_kw(int32, '5', 'i', 'n', '2')
+ lex_kw(int16, '5', 'i', 'n', '6')
+ lex_kw(int64, '5', 'i', 'n', '4')
+ lex_kw(table, '5', 't', 'a', 'e')
+ lex_kw(ubyte, '5', 'u', 'b', 'e')
+ lex_kw(uint8, '5', 'u', 'i', '8')
+ lex_kw(ulong, '5', 'u', 'l', 'g')
+ lex_kw(union, '5', 'u', 'n', 'n')
+ lex_kw(short, '5', 's', 'h', 't')
+ lex_kw(double, '6', 'd', 'o', 'e')
+ lex_kw(string, '6', 's', 't', 'g')
+ lex_kw(struct, '6', 's', 't', 't')
+ lex_kw(uint32, '6', 'u', 'i', '2')
+ lex_kw(uint16, '6', 'u', 'i', '6')
+ lex_kw(uint64, '6', 'u', 'i', '4')
+ lex_kw(ushort, '6', 'u', 's', 't')
+ lex_kw(float32, '7', 'f', 'l', '2')
+ lex_kw(float64, '7', 'f', 'l', '4')
+ lex_kw(include, '7', 'i', 'n', 'e')
+ lex_kw(attribute, '9', 'a', 't', 'e')
+ lex_kw(namespace, '9', 'n', 'a', 'e')
+ lex_kw(root_type, '9', 'r', 'o', 'e')
+ lex_kw(rpc_service, '0' + 11, 'r', 'p', 'e')
+ lex_kw(file_extension, '0' + 14, 'f', 'i', 'n')
+ lex_kw(file_identifier, '0' + 15, 'f', 'i', 'r')
+LEX_KW_TABLE_END
+
diff --git a/src/compiler/parser.c b/src/compiler/parser.c
new file mode 100644
index 0000000..4f31e0b
--- /dev/null
+++ b/src/compiler/parser.c
@@ -0,0 +1,1550 @@
+/*
+ * FlatBuffers IDL parser.
+ *
+ * Originally based on the numeric parser in the Luthor lexer project.
+ *
+ * We are moving away from TDOP approach because the grammer doesn't
+ * really benefit from it. We use the same overall framework.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <stdarg.h>
+
+#include "semantics.h"
+#include "codegen.h"
+#include "fileio.h"
+#include "pstrutil.h"
+#include "flatcc/portable/pparseint.h"
+
+void fb_default_error_out(void *err_ctx, const char *buf, size_t len)
+{
+ (void)err_ctx;
+
+ fwrite(buf, 1, len, stderr);
+}
+
+int fb_print_error(fb_parser_t *P, const char * format, ...)
+{
+ int n;
+ va_list ap;
+ char buf[ERROR_BUFSIZ];
+
+ va_start (ap, format);
+ n = vsnprintf (buf, ERROR_BUFSIZ, format, ap);
+ va_end (ap);
+ if (n >= ERROR_BUFSIZ) {
+ strcpy(buf + ERROR_BUFSIZ - 5, "...\n");
+ n = ERROR_BUFSIZ - 1;
+ }
+ P->error_out(P->error_ctx, buf, (size_t)n);
+ return n;
+}
+
+const char *error_find_file_of_token(fb_parser_t *P, fb_token_t *t)
+{
+ /*
+ * Search token in dependent buffers if not in current token
+ * buffer. We can do this as a linear search because we limit the
+ * number of output errors.
+ */
+ while (P) {
+ if (P->ts <= t && P->te > t) {
+ return P->schema.errorname;
+ }
+ P = P->dependencies;
+ }
+ return "";
+}
+
+void error_report(fb_parser_t *P, fb_token_t *t, const char *msg, fb_token_t *peer, const char *s, size_t len)
+{
+ const char *file, *peer_file;
+
+ if (t && !s) {
+ s = t->text;
+ len = (size_t)t->len;
+ }
+ if (!msg) {
+ msg = "";
+ }
+ if (!s) {
+ s = "";
+ len = 0;
+ }
+ if (t && !peer) {
+ file = error_find_file_of_token(P, t);
+ fb_print_error(P, "%s:%ld:%ld: error: '%.*s': %s\n",
+ file, (long)t->linenum, (long)t->pos, len, s, msg);
+ } else if (t && peer) {
+ file = error_find_file_of_token(P, t);
+ peer_file = error_find_file_of_token(P, peer);
+ fb_print_error(P, "%s:%ld:%ld: error: '%.*s': %s: %s:%ld:%ld: '%.*s'\n",
+ file, (long)t->linenum, (long)t->pos, len, s, msg,
+ peer_file, (long)peer->linenum, (long)peer->pos, (int)peer->len, peer->text);
+ } else if (!t && !peer) {
+ fb_print_error(P, "error: %s\n", msg);
+ } else if (peer) {
+ peer_file = error_find_file_of_token(P, peer);
+ fb_print_error(P, "error: %s: %s:%ld:%ld: '%.*s'\n",
+ msg,
+ peer_file, (long)peer->linenum, (long)peer->pos, (int)peer->len, peer->text);
+ } else {
+ fb_print_error(P, "internal error: unexpected state\n");
+ }
+ ++P->failed;
+}
+
+void error_ref_sym(fb_parser_t *P, fb_ref_t *ref, const char *msg, fb_symbol_t *s2)
+{
+ fb_ref_t *p;
+ char buf[FLATCC_MAX_IDENT_SHOW + 1];
+ size_t k = FLATCC_MAX_IDENT_SHOW;
+ size_t n = 0;
+ size_t n0 = 0;
+ int truncated = 0;
+
+ p = ref;
+ while (p && k > 0) {
+ if (n0 > 0) {
+ buf[n0] = '.';
+ --k;
+ ++n0;
+ }
+ n = (size_t)p->ident->len;
+ if (k < n) {
+ n = k;
+ truncated = 1;
+ }
+ memcpy(buf + n0, p->ident->text, n);
+ k -= n;
+ n0 += n;
+ p = p->link;
+ }
+ if (p) truncated = 1;
+ buf[n0] = '\0';
+ if (n0 > 0) {
+ --n0;
+ }
+ if (truncated) {
+ memcpy(buf + FLATCC_MAX_IDENT_SHOW + 1 - 4, "...\0", 4);
+ n0 = FLATCC_MAX_IDENT_SHOW;
+ }
+ error_report(P, ref->ident, msg, s2 ? s2->ident : 0, buf, n0);
+}
+
+//#define LEX_DEBUG
+
+/* Flatbuffers reserve keywords. */
+#define LEX_KEYWORDS
+
+#define LEX_C_BLOCK_COMMENT
+/*
+ * Flatbuffers also support /// on a single line for documentation but
+ * we can handle that within the normal line comment parsing logic.
+ */
+#define LEX_C99_LINE_COMMENT
+/*
+ * String escapes are not defined in fb schema but it only uses strings
+ * for attribute, namespace, file ext, and file id. For JSON objects we
+ * use C string escapes but control characters must be detected.
+ */
+#define LEX_C_STRING
+
+/* Accept numbers like -0x42 as integer literals. */
+#define LEX_HEX_NUMERIC
+
+#define lex_isblank(c) ((c) == ' ' || (c) == '\t')
+
+#include "parser.h"
+
+#ifdef LEX_DEBUG
+
+static void print_token(fb_token_t *t)
+{
+ lex_fprint_token(stderr, t->id, t->text, t->text + t->len, t->linenum, t->pos);
+}
+
+static void debug_token(const char *info, fb_token_t *t)
+{
+ fprintf(stderr, "%s\n ", info);
+ print_token(t);
+}
+#else
+#define debug_token(info, t) ((void)0)
+#endif
+
+static void revert_metadata(fb_metadata_t **list)
+{
+ REVERT_LIST(fb_metadata_t, link, list);
+}
+
+static void revert_symbols(fb_symbol_t **list)
+{
+ REVERT_LIST(fb_symbol_t, link, list);
+}
+
+static void revert_names(fb_name_t **list)
+{
+ REVERT_LIST(fb_name_t, link, list);
+}
+
+static inline fb_doc_t *fb_add_doc(fb_parser_t *P, fb_token_t *t)
+{
+ fb_doc_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->ident = t;
+ p->link = P->doc;
+ P->doc = p;
+ return p;
+}
+
+#define fb_assign_doc(P, p) {\
+ revert_symbols(&P->doc); p->doc = P->doc; P->doc = 0; }
+
+static inline fb_compound_type_t *fb_add_table(fb_parser_t *P)
+{
+ fb_compound_type_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = P->schema.symbols;
+ p->symbol.kind = fb_is_table;
+ P->schema.symbols = &p->symbol;
+ p->scope = P->current_scope;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline fb_compound_type_t *fb_add_struct(fb_parser_t *P)
+{
+ fb_compound_type_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = P->schema.symbols;
+ p->symbol.kind = fb_is_struct;
+ P->schema.symbols = &p->symbol;
+ p->scope = P->current_scope;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline fb_compound_type_t *fb_add_rpc_service(fb_parser_t *P)
+{
+ fb_compound_type_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = P->schema.symbols;
+ p->symbol.kind = fb_is_rpc_service;
+ P->schema.symbols = &p->symbol;
+ p->scope = P->current_scope;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline fb_compound_type_t *fb_add_enum(fb_parser_t *P)
+{
+ fb_compound_type_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = P->schema.symbols;
+ p->symbol.kind = fb_is_enum;
+ P->schema.symbols = &p->symbol;
+ p->scope = P->current_scope;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline fb_compound_type_t *fb_add_union(fb_parser_t *P)
+{
+ fb_compound_type_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = P->schema.symbols;
+ p->symbol.kind = fb_is_union;
+ P->schema.symbols = &p->symbol;
+ p->scope = P->current_scope;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline fb_ref_t *fb_add_ref(fb_parser_t *P, fb_token_t *t)
+{
+ fb_ref_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->ident = t;
+ return p;
+}
+
+static inline fb_attribute_t *fb_add_attribute(fb_parser_t *P)
+{
+ fb_attribute_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->name.link = P->schema.attributes;
+ P->schema.attributes = &p->name;
+ return p;
+}
+
+static inline fb_include_t *fb_add_include(fb_parser_t *P)
+{
+ fb_include_t *p;
+ p = new_elem(P, sizeof(*p));
+ p->link = P->schema.includes;
+ return P->schema.includes = p;
+}
+
+static inline fb_scope_t *fb_add_scope(fb_parser_t *P, fb_ref_t *name)
+{
+ fb_scope_t *p;
+
+ p = fb_scope_table_find(&P->schema.root_schema->scope_index, name, 0);
+ if (p) {
+ return p;
+ }
+ p = new_elem(P, sizeof(*p));
+ p->name = name;
+ p->prefix = P->schema.prefix;
+
+ fb_scope_table_insert_item(&P->schema.root_schema->scope_index, p, ht_keep);
+ return p;
+}
+
+static inline fb_metadata_t *fb_add_metadata(fb_parser_t *P, fb_metadata_t **metadata)
+{
+ fb_metadata_t *p;
+ p = new_elem(P, sizeof(*p));
+ p->link = *metadata;
+ return *metadata = p;
+}
+
+static inline fb_member_t *fb_add_member(fb_parser_t *P, fb_symbol_t **members)
+{
+ fb_member_t *p;
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = *members;
+ p->symbol.kind = fb_is_member;
+ *members = (fb_symbol_t *)p;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline int is_end(fb_token_t *t)
+{
+ return t->id == LEX_TOK_EOF;
+}
+
+static fb_token_t *next(fb_parser_t *P)
+{
+again:
+ ++P->token;
+ if (P->token == P->te) {
+ /* We keep returning end of token to help binary operators etc., if any. */
+ --P->token;
+ assert(0);
+ switch (P->token->id) {
+ case LEX_TOK_EOS: case LEX_TOK_EOB: case LEX_TOK_EOF:
+ P->token->id = LEX_TOK_EOF;
+ return P->token;
+ }
+ error_tok(P, P->token, "unexpected end of input");
+ }
+ if (P->token->id == tok_kw_doc_comment) {
+ /* Note: we can have blanks that are control characters here, such as \t. */
+ fb_add_doc(P, P->token);
+ goto again;
+ }
+ debug_token("next", P->token);
+ return P->token;
+}
+
+static void recover(fb_parser_t *P, long token_id, int consume)
+{
+ while (!is_end(P->token)) {
+ if (P->token->id == token_id) {
+ if (consume) {
+ next(P);
+ }
+ P->doc = 0;
+ return;
+ }
+ next(P);
+ }
+}
+
+static void recover2(fb_parser_t *P, long token_id, int consume, long token_id_2, int consume_2)
+{
+ while (!is_end(P->token)) {
+ if (P->token->id == token_id) {
+ if (consume) {
+ next(P);
+ }
+ P->doc = 0;
+ return;
+ }
+ if (P->token->id == token_id_2) {
+ if (consume_2) {
+ next(P);
+ }
+ P->doc = 0;
+ return;
+ }
+ next(P);
+ }
+}
+
+static inline fb_token_t *optional(fb_parser_t *P, long id) {
+ fb_token_t *t = 0;
+ if (P->token->id == id) {
+ t = P->token;
+ next(P);
+ }
+ return t;
+}
+
+static inline fb_token_t *match(fb_parser_t *P, long id, char *msg) {
+ fb_token_t *t = 0;
+ if (P->token->id == id) {
+ t = P->token;
+ next(P);
+ } else {
+ error_tok(P, P->token, msg);
+ }
+ return t;
+}
+
+/*
+ * When a keyword should also be accepted as an identifier.
+ * This is useful for JSON where field naems are visible.
+ * Since field names are not referenced within the schema,
+ * this is generally safe. Enums can also be resererved but
+ * they can then not be used as default values. Table names
+ * and other type names should not be remapped as they can then
+ * not by used as a type name for other fields.
+ */
+#if FLATCC_ALLOW_KW_FIELDS
+static inline void remap_field_ident(fb_parser_t *P)
+{
+ if (P->token->id >= LEX_TOK_KW_BASE && P->token->id < LEX_TOK_KW_END) {
+ P->token->id = LEX_TOK_ID;
+ }
+}
+#else
+static inline void remap_field_ident(fb_parser_t *P) { (void)P; }
+#endif
+
+#if FLATCC_ALLOW_KW_ENUMS
+static inline void remap_enum_ident(fb_parser_t *P)
+{
+ if (P->token->id >= LEX_TOK_KW_BASE && P->token->id < LEX_TOK_KW_END) {
+ P->token->id = LEX_TOK_ID;
+ }
+}
+#else
+static inline void remap_enum_ident(fb_parser_t *P) { (void)P; }
+#endif
+
+static fb_token_t *advance(fb_parser_t *P, long id, const char *msg, fb_token_t *peer)
+{
+ /*
+ * `advance` is generally used at end of statements so it is a
+ * convenient place to get rid of rogue doc comments we can't attach
+ * to anything meaningful.
+ */
+ P->doc = 0;
+ if (P->token->id != id) {
+ error_tok_2(P, P->token, msg, peer);
+ return P->token;
+ }
+ return next(P);
+}
+
+static void read_integer_value(fb_parser_t *P, fb_token_t *t, fb_value_t *v, int sign)
+{
+ int status;
+
+ v->type = vt_uint;
+ /* The token does not store the sign internally. */
+ parse_integer(t->text, (size_t)t->len, &v->u, &status);
+ if (status != PARSE_INTEGER_UNSIGNED) {
+ v->type = vt_invalid;
+ error_tok(P, t, "invalid integer format");
+ }
+ if (sign) {
+ v->i = -(int64_t)v->u;
+ v->type = vt_int;
+#ifdef FLATCC_FAIL_ON_INT_SIGN_OVERFLOW
+ /* Sometimes we might want this, so don't fail by default. */
+ if (v->i > 0) {
+ v->type = vt_invalid;
+ error_tok(P, t, "sign overflow in integer format");
+ }
+#endif
+ }
+}
+
+static void read_hex_value(fb_parser_t *P, fb_token_t *t, fb_value_t *v, int sign)
+{
+ int status;
+
+ v->type = vt_uint;
+ /* The token does not store the sign internally. */
+ parse_hex_integer(t->text, (size_t)t->len, &v->u, &status);
+ if (status != PARSE_INTEGER_UNSIGNED) {
+ v->type = vt_invalid;
+ error_tok(P, t, "invalid hex integer format");
+ }
+ if (sign) {
+ v->i = -(int64_t)v->u;
+ v->type = vt_int;
+#ifdef FLATCC_FAIL_ON_INT_SIGN_OVERFLOW
+ /* Sometimes we might want this, so don't fail by default. */
+ if (v->i > 0) {
+ v->type = vt_invalid;
+ error_tok(P, t, "sign overflow in hex integer format");
+ }
+#endif
+ }
+}
+
+static void read_float_value(fb_parser_t *P, fb_token_t *t, fb_value_t *v, int sign)
+{
+ char *end;
+
+ v->type = vt_float;
+ v->f = strtod(t->text, &end);
+ if (end != t->text + t->len) {
+ v->type = vt_invalid;
+ error_tok(P, t, "invalid float format");
+ } else if (t->text[0] == '.') {
+ v->type = vt_invalid;
+ /* The FB spec requires this, in line with the JSON format. */
+ error_tok(P, t, "numeric values must start with a digit");
+ } else if (sign) {
+ v->f = -v->f;
+ }
+}
+
+/*
+ * We disallow escape characters, newlines and other control characters,
+ * but especially escape characters because they would require us to
+ * reallocate the string and convert the escaped characters. We also
+ * disallow non-utf8 characters, but we do not check for it. The tab
+ * character could meaningfully be accepted, but we don't.
+ *
+ * String literals are only used to name attributes, namespaces,
+ * file identifiers and file externsions, so we really have no need
+ * for these extra featuresescape .
+ *
+ * JSON strings should be handled separately, if or when supported -
+ * either by converting escapes and reallocating the string, or
+ * simply by ignoring the escape errors and use the string unmodified.
+ */
+static void parse_string_literal(fb_parser_t *P, fb_value_t *v)
+{
+ fb_token_t *t;
+
+ v->type = vt_string;
+ v->s.s = 0;
+ v->s.len = 0;
+
+ for (;;) {
+ t = P->token;
+ switch (t->id) {
+ case LEX_TOK_STRING_PART:
+ if (v->s.s == 0) {
+ v->s.s = (char *)t->text;
+ }
+ break;
+ case LEX_TOK_STRING_ESCAPE:
+ v->type = vt_invalid;
+ error_tok(P, t, "escape not allowed in strings");
+ break;
+ case LEX_TOK_STRING_CTRL:
+ v->type = vt_invalid;
+ error_tok_as_string(P, t, "control characters not allowed in strings", "?", 1);
+ break;
+ case LEX_TOK_STRING_NEWLINE:
+ v->type = vt_invalid;
+ error_tok(P, t, "newline not allowed in strings");
+ break;
+ case LEX_TOK_STRING_UNTERMINATED:
+ case LEX_TOK_STRING_END:
+ goto done;
+
+ default:
+ error_tok(P, t, "internal error: unexpected token in string");
+ v->type = vt_invalid;
+ goto done;
+ }
+ next(P);
+ }
+done:
+ /*
+ * If we were to ignore all errors, we would get the full
+ * string as is excluding delimiting quotes.
+ */
+ if (v->s.s) {
+ v->s.len = (int)(P->token->text - v->s.s);
+ }
+ if (!match(P, LEX_TOK_STRING_END, "unterminated string")) {
+ v->type = vt_invalid;
+ }
+}
+
+/* Current token must be an identifier. */
+static void parse_ref(fb_parser_t *P, fb_ref_t **ref)
+{
+ *ref = fb_add_ref(P, P->token);
+ next(P);
+ ref = &((*ref)->link);
+ while (optional(P, '.')) {
+ if (P->token->id != LEX_TOK_ID) {
+ error_tok(P, P->token, "namespace prefix expected identifier");
+ break;
+ }
+ *ref = fb_add_ref(P, P->token);
+ ref = &((*ref)->link);
+ next(P);
+ }
+}
+
+/* `flags` */
+enum { allow_string_value = 1, allow_id_value = 2, allow_null_value = 4 };
+static void parse_value(fb_parser_t *P, fb_value_t *v, int flags, const char *error_msg)
+{
+ fb_token_t *t;
+ fb_token_t *sign;
+
+ sign = optional(P, '-');
+ t = P->token;
+
+ switch (t->id) {
+ case LEX_TOK_HEX:
+ read_hex_value(P, t, v, sign != 0);
+ break;
+ case LEX_TOK_INT:
+ read_integer_value(P, t, v, sign != 0);
+ break;
+ case LEX_TOK_FLOAT:
+ read_float_value(P, t, v, sign != 0);
+ break;
+ case tok_kw_true:
+ v->b = 1;
+ v->type = vt_bool;
+ break;
+ case tok_kw_false:
+ v->b = 0;
+ v->type = vt_bool;
+ break;
+ case tok_kw_null:
+ if (!(flags & allow_null_value)) {
+ v->type = vt_invalid;
+ error_tok(P, t, error_msg);
+ return;
+ }
+ v->type = vt_null;
+ break;
+ case LEX_TOK_STRING_BEGIN:
+ next(P);
+ parse_string_literal(P, v);
+ if (!(flags & allow_string_value)) {
+ v->type = vt_invalid;
+ error_tok(P, t, error_msg);
+ return;
+ }
+ if (sign) {
+ v->type = vt_invalid;
+ error_tok(P, t, "string constants cannot be signed");
+ return;
+ }
+ return;
+ case LEX_TOK_ID:
+ parse_ref(P, &v->ref);
+ v->type = vt_name_ref;
+ if (sign) {
+ v->type = vt_invalid;
+ /* Technically they could, but we do not allow it. */
+ error_tok(P, t, "named values cannot be signed");
+ }
+ return;
+ default:
+ /* We might have consumed a sign, but never mind that. */
+ error_tok(P, t, error_msg);
+ return;
+ }
+ if (sign && v->type == vt_bool) {
+ v->type = vt_invalid;
+ error_tok(P, t, "boolean constants cannot be signed");
+ }
+ next(P);
+}
+
+static void parse_fixed_array_size(fb_parser_t *P, fb_token_t *ttype, fb_value_t *v)
+{
+ const char *error_msg = "fixed length array length expected to be an unsigned integer";
+ fb_value_t vsize;
+ fb_token_t *tlen = P->token;
+
+ parse_value(P, &vsize, 0, error_msg);
+ if (vsize.type != vt_uint) {
+ error_tok(P, tlen, error_msg);
+ v->type = vt_invalid;
+ return;
+ }
+ if (v->type == vt_invalid) return;
+ switch (v->type) {
+ case vt_vector_type:
+ v->type = vt_fixed_array_type;
+ break;
+ case vt_vector_type_ref:
+ v->type = vt_fixed_array_type_ref;
+ break;
+ case vt_vector_string_type:
+ v->type = vt_fixed_array_string_type;
+ break;
+ case vt_invalid:
+ return;
+ default:
+ error_tok(P, ttype, "invalid fixed length array type");
+ v->type = vt_invalid;
+ return;
+ }
+ if (vsize.u == 0) {
+ error_tok(P, tlen, "fixed length array length cannot be 0");
+ v->type = vt_invalid;
+ return;
+ }
+ /*
+ * This allows for safe 64-bit multiplication by elements no
+ * larger than 2^32-1 and also fits into the value len field.
+ * without extra size cost.
+ */
+ if (vsize.u > UINT32_MAX) {
+ error_tok(P, tlen, "fixed length array length overflow");
+ v->type = vt_invalid;
+ return;
+ }
+ v->len = (uint32_t)vsize.u;
+}
+
+/* ':' must already be matched */
+static void parse_type(fb_parser_t *P, fb_value_t *v)
+{
+ fb_token_t *t = 0;
+ fb_token_t *ttype = 0;
+ fb_token_t *t0 = P->token;
+ int vector = 0;
+
+ v->len = 1;
+ v->type = vt_invalid;
+ while ((t = optional(P, '['))) {
+ ++vector;
+ }
+ if (vector > 1) {
+ error_tok(P, t0, "vector type can only be one-dimensional");
+ }
+ ttype = P->token;
+ switch (ttype->id) {
+ case tok_kw_int:
+ case tok_kw_bool:
+ case tok_kw_byte:
+ case tok_kw_long:
+ case tok_kw_uint:
+ case tok_kw_float:
+ case tok_kw_short:
+ case tok_kw_char:
+ case tok_kw_ubyte:
+ case tok_kw_ulong:
+ case tok_kw_ushort:
+ case tok_kw_double:
+ case tok_kw_int8:
+ case tok_kw_int16:
+ case tok_kw_int32:
+ case tok_kw_int64:
+ case tok_kw_uint8:
+ case tok_kw_uint16:
+ case tok_kw_uint32:
+ case tok_kw_uint64:
+ case tok_kw_float32:
+ case tok_kw_float64:
+ v->t = P->token;
+ v->type = vector ? vt_vector_type : vt_scalar_type;
+ next(P);
+ break;
+ case tok_kw_string:
+ v->t = P->token;
+ v->type = vector ? vt_vector_string_type : vt_string_type;
+ next(P);
+ break;
+ case LEX_TOK_ID:
+ parse_ref(P, &v->ref);
+ v->type = vector ? vt_vector_type_ref : vt_type_ref;
+ break;
+ case ']':
+ error_tok(P, t, "vector type cannot be empty");
+ break;
+ default:
+ error_tok(P, ttype, "invalid type specifier");
+ break;
+ }
+ if (vector && optional(P, ':')) {
+ parse_fixed_array_size(P, ttype, v);
+ }
+ while (optional(P, ']') && vector--) {
+ }
+ if (vector) {
+ error_tok_2(P, t, "vector type missing ']' to match", t0);
+ }
+ if ((t = optional(P, ']'))) {
+ error_tok_2(P, t, "extra ']' not matching", t0);
+ while (optional(P, ']')) {
+ }
+ }
+ if (ttype->id == tok_kw_char && v->type != vt_invalid) {
+ if (v->type != vt_fixed_array_type) {
+ error_tok(P, ttype, "char can only be used as a fixed length array type [char:<n>]");
+ v->type = vt_invalid;
+ }
+ }
+}
+
+static fb_metadata_t *parse_metadata(fb_parser_t *P)
+{
+ fb_token_t *t, *t0;
+ fb_metadata_t *md = 0;
+
+ if (!(t0 = optional(P, '('))) {
+ return 0;
+ }
+ if ((t = optional(P, LEX_TOK_ID)))
+ for (;;) {
+ fb_add_metadata(P, &md);
+ md->ident = t;
+ if (optional(P, ':')) {
+ parse_value(P, &md->value, allow_string_value, "scalar or string value expected");
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ return md;
+ }
+ if (!optional(P, ',')) {
+ break;
+ }
+ if (!(t = match(P, LEX_TOK_ID, "attribute name expected identifier after ','"))) {
+ break;
+ }
+ }
+ advance(P, ')', "metadata expected ')' to match", t0);
+ revert_metadata(&md);
+ return md;
+}
+
+static void parse_field(fb_parser_t *P, fb_member_t *fld)
+{
+ fb_token_t *t;
+
+ remap_field_ident(P);
+ if (!(t = match(P, LEX_TOK_ID, "field expected identifier"))) {
+ goto fail;
+ }
+ fld->symbol.ident = t;
+ if (!match(P, ':', "field expected ':' before mandatory type")) {
+ goto fail;
+ }
+ parse_type(P, &fld->type);
+ if (optional(P, '=')) {
+ /*
+ * Because types can be named references, we do not check the
+ * default assignment before the schema is fully parsed.
+ * We allow the initializer to be a name in case it is an enum
+ * name.
+ */
+ parse_value(P, &fld->value, allow_id_value | allow_null_value, "initializer must be of scalar type or null");
+ }
+ fld->metadata = parse_metadata(P);
+ advance(P, ';', "field must be terminated with ';'", 0);
+ return;
+fail:
+ recover2(P, ';', 1, '}', 0);
+}
+
+static void parse_method(fb_parser_t *P, fb_member_t *fld)
+{
+ fb_token_t *t;
+ if (!(t = match(P, LEX_TOK_ID, "method expected identifier"))) {
+ goto fail;
+ }
+ fld->symbol.ident = t;
+ if (!match(P, '(', "method expected '(' after identifier")) {
+ goto fail;
+ }
+ parse_type(P, &fld->req_type);
+ if (!match(P, ')', "method expected ')' after request type")) {
+ goto fail;
+ }
+ if (!match(P, ':', "method expected ':' before mandatory response type")) {
+ goto fail;
+ }
+ parse_type(P, &fld->type);
+ if ((t = optional(P, '='))) {
+ error_tok(P, t, "method does not accept an initializer");
+ goto fail;
+ }
+ fld->metadata = parse_metadata(P);
+ advance(P, ';', "method must be terminated with ';'", 0);
+ return;
+fail:
+ recover2(P, ';', 1, '}', 0);
+}
+
+/* `enum` must already be matched. */
+static void parse_enum_decl(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_token_t *t, *t0;
+ fb_member_t *member;
+
+ if (!(ct->symbol.ident = match(P, LEX_TOK_ID, "enum declaration expected identifier"))) {
+ goto fail;
+ }
+ if (optional(P, ':')) {
+ parse_type(P, &ct->type);
+ if (ct->type.type != vt_scalar_type) {
+ error_tok(P, ct->type.t, "integral type expected");
+ } else {
+ switch (ct->type.t->id) {
+ case tok_kw_float:
+ case tok_kw_double:
+ case tok_kw_float32:
+ case tok_kw_float64:
+ error_tok(P, ct->type.t, "integral type expected");
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ ct->metadata = parse_metadata(P);
+ if (!((t0 = match(P, '{', "enum declaration expected '{'")))) {
+ goto fail;
+ }
+ for (;;) {
+ remap_enum_ident(P);
+ if (!(t = match(P, LEX_TOK_ID,
+ "member identifier expected"))) {
+ goto fail;
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ goto fail;
+ }
+ member = fb_add_member(P, &ct->members);
+ member->symbol.ident = t;
+ if (optional(P, '=')) {
+ t = P->token;
+ parse_value(P, &member->value, 0, "integral constant expected");
+ /* Leave detailed type (e.g. no floats) and range checking to a later stage. */
+ }
+ /*
+ * Trailing comma is optional in flatc but not in grammar, we
+ * follow flatc.
+ */
+ if (!optional(P, ',') || P->token->id == '}') {
+ break;
+ }
+ P->doc = 0;
+ }
+ if (t0) {
+ advance(P, '}', "enum missing closing '}' to match", t0);
+ }
+ revert_symbols(&ct->members);
+ return;
+fail:
+ recover(P, '}', 1);
+}
+
+/* `union` must already be matched. */
+static void parse_union_decl(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_token_t *t0;
+ fb_member_t *member;
+ fb_ref_t *ref;
+ fb_token_t *t;
+
+ if (!(ct->symbol.ident = match(P, LEX_TOK_ID, "union declaration expected identifier"))) {
+ goto fail;
+ }
+ ct->metadata = parse_metadata(P);
+ if (!((t0 = match(P, '{', "union declaration expected '{'")))) {
+ goto fail;
+ }
+ for (;;) {
+ if (P->token->id != LEX_TOK_ID) {
+ error_tok(P, P->token, "union expects an identifier");
+ goto fail;
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ goto fail;
+ }
+ t = P->token;
+ member = fb_add_member(P, &ct->members);
+ parse_ref(P, &ref);
+ member->type.ref = ref;
+ member->type.type = vt_type_ref;
+ while (ref->link) {
+ ref = ref->link;
+ }
+ /* The union member name is the unqualified reference. */
+ member->symbol.ident = ref->ident;
+ if (optional(P, ':')) {
+ if (member->type.ref->link) {
+ error_tok(P, t, "qualified union member name cannot have an explicit type");
+ }
+ parse_type(P, &member->type);
+ /* Leave type checking to later stage. */
+ }
+ if (optional(P, '=')) {
+ parse_value(P, &member->value, 0, "integral constant expected");
+ /* Leave detailed type (e.g. no floats) and range checking to a later stage. */
+ }
+ if (!optional(P, ',') || P->token->id == '}') {
+ break;
+ }
+ P->doc = 0;
+ }
+ advance(P, '}', "union missing closing '}' to match", t0);
+ revert_symbols(&ct->members);
+ /* Add implicit `NONE` member first in the list. */
+ member = fb_add_member(P, &ct->members);
+ member->symbol.ident = &P->t_none;
+ return;
+fail:
+ recover2(P, ';', 1, '}', 0);
+}
+
+/* `struct` , `table`, or 'rpc_service' must already be matched. */
+static void parse_compound_type(fb_parser_t *P, fb_compound_type_t *ct, long token)
+{
+ fb_token_t *t = 0;
+
+ if (!(t = match(P, LEX_TOK_ID, "Declaration expected an identifier"))) {
+ goto fail;
+ }
+ ct->symbol.ident = t;
+ ct->metadata = parse_metadata(P);
+ if (!(match(P, '{', "Declaration expected '{'"))) {
+ goto fail;
+ }
+ t = P->token;
+
+/* Allow empty tables and structs. */
+#if 0
+ if (P->token->id == '}') {
+ error_tok(P, t, "table / struct declaration cannot be empty");
+ }
+#endif
+ while (P->token->id != '}') {
+ if (token == tok_kw_rpc_service) {
+ parse_method(P, fb_add_member(P, &ct->members));
+ } else {
+ parse_field(P, fb_add_member(P, &ct->members));
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ goto fail;
+ }
+ }
+ if (!optional(P, '}') && t) {
+ error_tok_2(P, P->token, "Declaration missing closing '}' to match", t);
+ }
+ revert_symbols(&ct->members);
+ return;
+fail:
+ recover(P, '}', 1);
+}
+
+static void parse_namespace(fb_parser_t *P)
+{
+ fb_ref_t *ref = 0;
+ fb_token_t *t = P->token;
+
+ if (optional(P, ';') && t) {
+ /* Revert to global namespace. */
+ P->current_scope = 0;
+ return;
+ }
+ if (P->token->id != LEX_TOK_ID) {
+ error_tok(P, P->token, "namespace expects an identifier");
+ recover(P, ';', 1);
+ return;
+ }
+ parse_ref(P, &ref);
+ advance(P, ';', "missing ';' expected by namespace at", t);
+ P->current_scope = fb_add_scope(P, ref);
+}
+
+static void parse_root_type(fb_parser_t *P, fb_root_type_t *rt)
+{
+ fb_token_t *t = P->token;
+
+ if (rt->name) {
+ error_tok(P, P->token, "root_type already set");
+ }
+ parse_ref(P, &rt->name);
+ rt->scope = P->current_scope;
+ advance(P, ';', "missing ';' expected by root_type at", t);
+}
+
+static void parse_include(fb_parser_t *P)
+{
+ fb_token_t *t = P->token;
+
+ while (optional(P, tok_kw_include)) {
+ if (P->opts.disable_includes) {
+ error_tok(P, t, "include statements not supported by current environment");
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ return;
+ }
+ if (!match(P, LEX_TOK_STRING_BEGIN,
+ "include expected a string literal as filename")) {
+ recover(P, ';', 1);
+ }
+ parse_string_literal(P, &fb_add_include(P)->name);
+ match(P, ';', "include statement expected ';'");
+ }
+}
+
+static void parse_attribute(fb_parser_t *P, fb_attribute_t *a)
+{
+ fb_token_t *t = P->token;
+
+ if (match(P, LEX_TOK_STRING_BEGIN, "attribute expected string literal")) {
+ parse_string_literal(P, &a->name.name);
+ if (a->name.name.s.len == 0) {
+ error_tok_as_string(P, t, "attribute name cannot be empty", 0, 0);
+ }
+ }
+ match(P, ';', "attribute expected ';'");
+}
+
+static void parse_file_extension(fb_parser_t *P, fb_value_t *v)
+{
+ if (v->type == vt_string) {
+ error_tok_as_string(P, P->token, "file extension already set", v->s.s, (size_t)v->s.len);
+ }
+ if (!match(P, LEX_TOK_STRING_BEGIN, "file_extension expected string literal")) {
+ goto fail;
+ }
+ parse_string_literal(P, v);
+ match(P, ';', "file_extension expected ';'");
+ return;
+fail:
+ recover(P, ';', 1);
+}
+
+static void parse_file_identifier(fb_parser_t *P, fb_value_t *v)
+{
+ fb_token_t *t;
+ if (v->type != vt_missing) {
+ error_tok_as_string(P, P->token, "file identifier already set", v->s.s, (size_t)v->s.len);
+ }
+ if (!match(P, LEX_TOK_STRING_BEGIN, "file_identifier expected string literal")) {
+ goto fail;
+ }
+ t = P->token;
+ parse_string_literal(P, v);
+ if (v->s.s && v->s.len != 4) {
+ v->type = vt_invalid;
+ error_tok(P, t, "file_identifier must be 4 characters");
+ }
+ match(P, ';', "file_identifier expected ';'");
+ return;
+fail:
+ recover(P, ';', 1);
+}
+
+static void parse_schema_decl(fb_parser_t *P)
+{
+ switch(P->token->id) {
+ case tok_kw_namespace:
+ next(P);
+ parse_namespace(P);
+ break;
+ case tok_kw_file_extension:
+ next(P);
+ parse_file_extension(P, &P->schema.file_extension);
+ break;
+ case tok_kw_file_identifier:
+ next(P);
+ parse_file_identifier(P, &P->schema.file_identifier);
+ break;
+ case tok_kw_root_type:
+ next(P);
+ parse_root_type(P, &P->schema.root_type);
+ break;
+ case tok_kw_attribute:
+ next(P);
+ parse_attribute(P, fb_add_attribute(P));
+ break;
+ case tok_kw_struct:
+ next(P);
+ parse_compound_type(P, fb_add_struct(P), tok_kw_struct);
+ break;
+ case tok_kw_table:
+ next(P);
+ parse_compound_type(P, fb_add_table(P), tok_kw_table);
+ break;
+ case tok_kw_rpc_service:
+ next(P);
+ parse_compound_type(P, fb_add_rpc_service(P), tok_kw_rpc_service);
+ break;
+ case tok_kw_enum:
+ next(P);
+ parse_enum_decl(P, fb_add_enum(P));
+ break;
+ case tok_kw_union:
+ next(P);
+ parse_union_decl(P, fb_add_union(P));
+ break;
+ case tok_kw_include:
+ error_tok(P, P->token, "include statements must be placed first in the schema");
+ break;
+ case '{':
+ error_tok(P, P->token, "JSON objects in schema file is not supported - but a schema specific JSON parser can be generated");
+ break;
+ case LEX_TOK_CTRL:
+ error_tok_as_string(P, P->token, "unexpected control character in schema definition", "?", 1);
+ break;
+ case LEX_TOK_COMMENT_CTRL:
+ error_tok_as_string(P, P->token, "unexpected control character in comment", "?", 1);
+ break;
+ case LEX_TOK_COMMENT_UNTERMINATED:
+ error_tok_as_string(P, P->token, "unterminated comment", "<eof>", 5);
+ break;
+ default:
+ error_tok(P, P->token, "unexpected token in schema definition");
+ break;
+ }
+}
+
+static int parse_schema(fb_parser_t *P)
+{
+ fb_token_t *t, *t0;
+ parse_include(P);
+ t = P->token;
+ for (;;) {
+ if (is_end(t)) {
+ break;
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ return -1;
+ }
+ t0 = t;
+ parse_schema_decl(P);
+ t = P->token;
+ if (t == t0) {
+ if (P->failed) {
+ return -1;
+ }
+ error_tok(P, t, "extra tokens in input");
+ return -1;
+ }
+ }
+ revert_names(&P->schema.attributes);
+ revert_symbols(&P->schema.symbols);
+ return 0;
+}
+
+static inline void clear_elem_buffers(fb_parser_t *P)
+{
+ void **p, **p2;
+
+ p = P->elem_buffers;
+ while (p) {
+ p2 = *((void**)p);
+ free(p);
+ p = p2;
+ };
+}
+
+static void push_token(fb_parser_t *P, long id, const char *first, const char *last)
+{
+ size_t offset;
+ fb_token_t *t;
+
+ P->te = P->ts + P->tcapacity;
+ if (P->token == P->te) {
+ offset = (size_t)(P->token - P->ts);
+ P->tcapacity = P->tcapacity ? 2 * P->tcapacity : 1024;
+ P->ts = realloc(P->ts, (size_t)P->tcapacity * sizeof(fb_token_t));
+ checkmem(P->ts);
+ P->te = P->ts + P->tcapacity;
+ P->token = P->ts + offset;
+ }
+ t = P->token;
+ t->id = id;
+ t->text = first;
+ t->len = (long)(last - first);
+ t->linenum = P->linenum;
+ t->pos = (long)(first - P->line + 1);
+ ++P->token;
+}
+
+/*
+ * If the file contains a control character, we can get multiple
+ * comments per line.
+ */
+static inline void push_comment(fb_parser_t *P, const char *first, const char *last)
+{
+ if (P->doc_mode) {
+ push_token(P, tok_kw_doc_comment, first, last);
+ }
+}
+
+static void inject_token(fb_token_t *t, const char *lex, long id)
+{
+ t->id = id;
+ t->text = lex;
+ t->len = (long)strlen(lex);
+ t->pos = 0;
+ t->linenum = 0;
+}
+
+/* --- Customize lexer --- */
+
+/* Depends on the `context` argument given to the lex function. */
+#define ctx(name) (((fb_parser_t *)context)->name)
+
+#define lex_emit_newline(first, last) (ctx(linenum)++, ctx(line) = last)
+
+#define lex_emit_string_newline(first, last) \
+ (ctx(linenum)++, ctx(line) = last, \
+ push_token((fb_parser_t*)context, LEX_TOK_STRING_NEWLINE, first, last))
+
+/*
+ * Add emtpy comment on comment start - otherwise we miss empty lines.
+ * Save is_doc becuase comment_part does not remember.
+ */
+#define lex_emit_comment_begin(first, last, is_doc) \
+ { ctx(doc_mode) = is_doc; push_comment((fb_parser_t*)context, last, last); }
+#define lex_emit_comment_part(first, last) push_comment((fb_parser_t*)context, first, last)
+#define lex_emit_comment_end(first, last) (ctx(doc_mode) = 0)
+
+/* By default emitted as lex_emit_other which would be ignored. */
+#define lex_emit_comment_unterminated(pos) \
+ push_token((fb_parser_t*)context, LEX_TOK_COMMENT_UNTERMINATED, pos, pos)
+
+#define lex_emit_comment_ctrl(pos) \
+ if (lex_isblank(*pos)) { \
+ push_comment((fb_parser_t*)context, pos, pos + 1); \
+ } else { \
+ push_token((fb_parser_t*)context, LEX_TOK_COMMENT_CTRL, \
+ pos, pos + 1); \
+ }
+
+/*
+ * Provide hook to lexer for emitting tokens. We can override many
+ * things, but most default to calling lex_emit, so that is all we need
+ * to handle.
+ *
+ * `context` is a magic name available to macros in the lexer.
+ */
+#define lex_emit(token, first, last) \
+ push_token((fb_parser_t*)context, token, first, last)
+
+/*
+ * We could just eos directly as it defaults to emit, but formally we
+ * should use the eof marker which is always zero, so parser can check
+ * for it easily, if needed.
+ */
+#define lex_emit_eos(first, last) \
+ push_token((fb_parser_t*)context, LEX_TOK_EOF, first, last)
+
+/*
+ * This event happens in place of eos if we exhaust the input buffer.
+ * In this case we treat this as end of input, but this choice prevents
+ * us from parsing across multiple buffers.
+ */
+#define lex_emit_eob(pos) \
+ push_token((fb_parser_t*)context, LEX_TOK_EOF, pos, pos)
+
+/*
+ * Luthor is our speedy generic lexer - it knows most common operators
+ * and therefore allows us to fail meaningfully on those that we don't
+ * support here, which is most.
+ */
+#include "lex/luthor.c"
+
+#include "keywords.h"
+
+/* Root schema `rs` is null for top level parser. */
+int fb_init_parser(fb_parser_t *P, fb_options_t *opts, const char *name,
+ fb_error_fun error_out, void *error_ctx, fb_root_schema_t *rs)
+{
+ size_t n, name_len;
+ char *s;
+
+ memset(P, 0, sizeof(*P));
+
+ if (error_out) {
+ P->error_out = error_out;
+ P->error_ctx = error_ctx;
+ } else {
+ P->error_out = fb_default_error_out;
+ }
+ if (opts) {
+ memcpy(&P->opts, opts, sizeof(*opts));
+ } else {
+ flatcc_init_options(&P->opts);
+ }
+ P->schema.root_schema = rs ? rs : &P->schema.root_schema_instance;
+ switch (P->opts.offset_size) {
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ error(P, "invalid offset configured, must be 2, 4 (default), or 8");
+ return -1;
+ }
+ switch (P->opts.voffset_size) {
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ error(P, "invalid voffset configured, must be 2 (default), 4, or 8");
+ return -1;
+ }
+ if (!name) {
+ /* Mostly for testing, just so we always have a name. */
+ name = FLATCC_DEFAULT_FILENAME;
+ }
+ if (name == 0) {
+ name = "";
+ }
+ name_len = strlen(name);
+ checkmem((P->schema.basename = fb_create_basename(name, name_len, opts->default_schema_ext)));
+ n = strlen(P->schema.basename);
+ checkmem(s = fb_copy_path_n(P->schema.basename, n));
+ pstrntoupper(s, n);
+ P->schema.basenameup = s;
+ P->schema.name.name.s.s = s;
+ P->schema.name.name.s.len = (int)n;
+ checkmem((P->schema.errorname = fb_create_basename(name, name_len, "")));
+ if (opts->ns) {
+ P->schema.prefix.s = (char *)opts->ns;
+ P->schema.prefix.len = (int)strlen(opts->ns);
+ }
+ P->current_scope = fb_add_scope(P, 0);
+ assert(P->current_scope == fb_scope_table_find(&P->schema.root_schema->scope_index, 0, 0));
+ return 0;
+}
+
+/*
+ * Main entry function for this specific parser type.
+ * We expect a zero terminated string.
+ *
+ * The parser structure is uninitialized upon entry, and should be
+ * cleared with `clear_flatbuffer_parser` subsequently.
+ *
+ * Datastructures point into the token buffer and into the input
+ * buffer, so the parser and input should not be cleared prematurely.
+ *
+ * The input buffer must remain valid until the parser is cleared
+ * because the internal represenation stores pointers into the buffer.
+ *
+ * `own_buffer` indicates that the the buffer should be deallocated when
+ * the parser is cleaned up.
+ */
+int fb_parse(fb_parser_t *P, const char *input, size_t len, int own_buffer)
+{
+ static const char *id_none = "NONE";
+ static const char *id_ubyte = "ubyte";
+
+ P->line = input;
+ P->linenum = 1;
+
+ /* Used with union defaults. */
+ inject_token(&P->t_none, id_none, LEX_TOK_ID);
+ inject_token(&P->t_ubyte, id_ubyte, tok_kw_ubyte);
+
+ if (own_buffer) {
+ P->managed_input = input;
+ }
+ lex(input, len, 0, P);
+
+ P->te = P->token;
+ P->token = P->ts;
+ /* Only used while processing table id's. */
+ checkmem((P->tmp_field_marker = malloc(sizeof(P->tmp_field_marker[0]) * (size_t)P->opts.vt_max_count)));
+ checkmem((P->tmp_field_index = malloc(sizeof(P->tmp_field_index[0]) * (size_t)P->opts.vt_max_count)));
+ if (P->token->id == tok_kw_doc_comment) {
+ next(P);
+ }
+ parse_schema(P);
+ return P->failed;
+}
+
+static void __destroy_scope_item(void *item, fb_scope_t *scope)
+{
+ /* Each scope points into table that is cleared separately. */
+ (void)item;
+
+ fb_symbol_table_clear(&scope->symbol_index);
+}
+
+void fb_clear_parser(fb_parser_t *P)
+{
+ fb_symbol_t *sym;
+ fb_compound_type_t *ct;
+
+ for (sym = P->schema.symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ case fb_is_rpc_service:
+ case fb_is_enum:
+ case fb_is_union:
+ ct = (fb_compound_type_t *)sym;
+ fb_symbol_table_clear(&ct->index);
+ fb_value_set_clear(&ct->value_set);
+ }
+ }
+ fb_schema_table_clear(&P->schema.root_schema_instance.include_index);
+ fb_name_table_clear(&P->schema.root_schema_instance.attribute_index);
+ ptr_set_clear(&P->schema.visible_schema);
+ if (P->tmp_field_marker) {
+ free(P->tmp_field_marker);
+ }
+ if (P->tmp_field_index) {
+ free(P->tmp_field_index);
+ }
+ if (P->ts) {
+ free(P->ts);
+ }
+ if (P->schema.basename) {
+ free((void *)P->schema.basename);
+ }
+ if (P->schema.basenameup) {
+ free((void *)P->schema.basenameup);
+ }
+ if (P->schema.errorname) {
+ free((void *)P->schema.errorname);
+ }
+ /*
+ * P->referer_path in included files points to parent P->path, so
+ * don't free it, and don't access it after this point.
+ */
+ if (P->path) {
+ free((void *)P->path);
+ }
+ fb_scope_table_destroy(&P->schema.root_schema_instance.scope_index,
+ __destroy_scope_item, 0);
+ /* Destroy last since destructor has references into elem buffer. */
+ clear_elem_buffers(P);
+ if (P->managed_input) {
+ free((void *)P->managed_input);
+ }
+ memset(P, 0, sizeof(*P));
+}
diff --git a/src/compiler/parser.h b/src/compiler/parser.h
new file mode 100644
index 0000000..ef2ecc1
--- /dev/null
+++ b/src/compiler/parser.h
@@ -0,0 +1,213 @@
+#ifndef PARSER_H
+#define PARSER_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "../../config/config.h"
+#include "flatcc/flatcc.h"
+#include "symbols.h"
+
+#define ELEM_BUFSIZ (64 * 1024)
+#define ERROR_BUFSIZ 200
+
+#define REVERT_LIST(TYPE, FIELD, HEAD) \
+ do { \
+ TYPE *tmp__next, *tmp__prev = 0, *tmp__link = *(HEAD); \
+ while (tmp__link) { \
+ tmp__next = tmp__link->FIELD; \
+ tmp__link->FIELD = tmp__prev; \
+ tmp__prev = tmp__link; \
+ tmp__link = tmp__next; \
+ } \
+ *(HEAD) = tmp__prev; \
+ } while (0)
+
+typedef struct fb_parser fb_parser_t;
+typedef flatcc_options_t fb_options_t;
+
+typedef void (*fb_error_fun)(void *err_ctx, const char *buf, size_t len);
+
+void __flatcc_fb_default_error_out(void *err_ctx, const char *buf, size_t len);
+#define fb_default_error_out __flatcc_fb_default_error_out
+
+int __flatcc_fb_print_error(fb_parser_t *P, const char * format, ...);
+#define fb_print_error __flatcc_fb_print_error
+
+struct fb_parser {
+ fb_parser_t *dependencies;
+ fb_parser_t *inverse_dependencies;
+ fb_error_fun error_out;
+ void *error_ctx;
+
+ const char *managed_input;
+
+ fb_token_t *ts, *te;
+ int tcapacity;
+ int doc_mode;
+ fb_doc_t *doc;
+ fb_token_t *token;
+
+ size_t elem_end;
+ void *elem_buffers;
+ size_t elem;
+ size_t offset_size;
+
+ const char *line;
+ long linenum;
+
+ /* Internal id (not a pointer into token stream). */
+ fb_token_t t_none;
+ fb_token_t t_ubyte;
+
+ int failed;
+
+ unsigned char *tmp_field_marker;
+ fb_symbol_t **tmp_field_index;
+ int nesting_level;
+
+ int has_schema;
+ fb_options_t opts;
+ fb_schema_t schema;
+ fb_scope_t *current_scope;
+ char *path;
+ char *referer_path;
+};
+
+static inline void checkmem(const void *p)
+{
+ if (!p) {
+ fprintf(stderr, "error: out of memory, aborting...\n");
+ exit(1);
+ }
+}
+
+static inline void *new_elem(fb_parser_t *P, size_t size)
+{
+ size_t elem;
+ void *buf;
+
+ size = (size + 15) & ~(size_t)15;
+ elem = P->elem;
+ if (elem + size > P->elem_end) {
+ buf = calloc(ELEM_BUFSIZ, 1);
+ checkmem(buf);
+ *(void**)buf = P->elem_buffers;
+ P->elem_buffers = buf;
+ elem = P->elem = (size_t)buf + 16;
+ P->elem_end = (size_t)buf + ELEM_BUFSIZ;
+ }
+ P->elem += size;
+ return (void*)elem;
+}
+
+int __flatcc_fb_print_error(fb_parser_t *P, const char * format, ...);
+#define fb_print_error __flatcc_fb_print_error
+
+const char *__flatcc_error_find_file_of_token(fb_parser_t *P, fb_token_t *t);
+#define error_find_file_of_token __flatcc_error_find_file_of_token
+
+/*
+ * This is the primary error reporting function.
+ * The parser is flagged as failed and error count incremented.
+ *
+ * If s is not null, then s, len replaces the token text of `t` but
+ * still reports the location of t. `peer` is optional and prints the
+ * token location and text and the end of the message.
+ * `msg` may be the only non-zero argument besides `P`.
+ *
+ * Various helper functions are available for the various cases.
+ *
+ * `fb_print_error` may be called instead to generate text to the error
+ * output that is not counted as an error.
+ */
+void __flatcc_error_report(fb_parser_t *P, fb_token_t *t, const char *msg, fb_token_t *peer, const char *s, size_t len);
+#define error_report __flatcc_error_report
+
+static void error_tok_2(fb_parser_t *P, fb_token_t *t, const char *msg, fb_token_t *peer)
+{
+ error_report(P, t, msg, peer, 0, 0);
+}
+
+static inline void error_tok(fb_parser_t *P, fb_token_t *t, const char *msg)
+{
+ error_tok_2(P, t, msg, 0);
+}
+
+/* Only use the token location. */
+static inline void error_tok_as_string(fb_parser_t *P, fb_token_t *t, const char *msg, char *s, size_t len)
+{
+ error_report(P, t, msg, 0, s, len);
+}
+
+static inline void error(fb_parser_t *P, const char *msg)
+{
+ error_tok(P, 0, msg);
+}
+
+static inline void error_name(fb_parser_t *P, fb_name_t *name, const char *msg)
+{
+ if (!name) {
+ error(P, msg);
+ } else {
+ error_report(P, 0, msg, 0, name->name.s.s, (size_t)name->name.s.len);
+ }
+}
+
+static inline void error_sym(fb_parser_t *P, fb_symbol_t *s, const char *msg)
+{
+ error_tok(P, s->ident, msg);
+}
+
+static inline void error_sym_2(fb_parser_t *P, fb_symbol_t *s, const char *msg, fb_symbol_t *s2)
+{
+ error_tok_2(P, s->ident, msg, s2->ident);
+}
+
+static inline void error_sym_tok(fb_parser_t *P, fb_symbol_t *s, const char *msg, fb_token_t *t2)
+{
+ error_tok_2(P, s->ident, msg, t2);
+}
+
+void error_ref_sym(fb_parser_t *P, fb_ref_t *ref, const char *msg, fb_symbol_t *s2);
+
+static inline void error_ref(fb_parser_t *P, fb_ref_t *ref, const char *msg)
+{
+ error_ref_sym(P, ref, msg, 0);
+}
+
+/*
+ * If `opts` is null, defaults options are being used, otherwise opts is
+ * copied into the parsers options. The name may be path, the basename
+ * without default extension will be extracted. The `error_out` funciton is
+ * optional, otherwise output is printed to stderr, truncated to a
+ * reasoanble size per error. `error_ctx` is provided as argument to
+ * `error_out` if non-zero, and otherwise ignored.
+ *
+ * This api only deals with a single schema file so a parent level
+ * driver must handle file inclusion and update P->dependencies but
+ * order is not significant (parse order is, but this is handled by
+ * updating the `include_index` in the root schema).
+ *
+ * P->dependencies must be cleared by callee in any order but once one
+ * is cleared the entire structure should be taken down because symbols
+ * trees point everywhere. For parses without file inclusion
+ * dependencies will be null. Dependencies are not handled at this
+ * level. P->inverse_dependencies is just the reverse list.
+ *
+ * The file at the head of the dependencies list is the root and the
+ * one that provides the root schema. Other root schemas are not used.
+ */
+int __flatcc_fb_init_parser(fb_parser_t *P, fb_options_t *opts, const char *name,
+ fb_error_fun error_out, void *error_ctx, fb_root_schema_t *rs);
+#define fb_init_parser __flatcc_fb_init_parser
+
+int __flatcc_fb_parse(fb_parser_t *P, const char *input, size_t len, int own_buffer);
+#define fb_parse __flatcc_fb_parse
+
+void __flatcc_fb_clear_parser(fb_parser_t *P);
+#define fb_clear_parser __flatcc_fb_clear_parser
+
+#endif /* PARSER_H */
diff --git a/src/compiler/pstrutil.h b/src/compiler/pstrutil.h
new file mode 100644
index 0000000..40795a6
--- /dev/null
+++ b/src/compiler/pstrutil.h
@@ -0,0 +1,58 @@
+#ifndef PSTRUTIL_H
+#define PSTRUTIL_H
+
+#include <ctype.h> /* toupper */
+
+
+/*
+ * NOTE: unlike strncpy, we return the first character, and we do not
+ * pad up to n. Same applies to related functions.
+ */
+
+/* `strnlen` not widely supported. */
+static inline size_t pstrnlen(const char *s, size_t max_len)
+{
+ const char *end = memchr (s, 0, max_len);
+ return end ? (size_t)(end - s) : max_len;
+}
+
+static inline char *pstrcpyupper(char *dst, const char *src) {
+ char *p = dst;
+ while (*src) {
+ *p++ = (char)toupper(*src++);
+ }
+ *p = '\0';
+ return dst;
+}
+
+static inline char *pstrncpyupper(char *dst, const char *src, size_t n) {
+ size_t i;
+ for (i = 0; i < n && src[i]; ++i) {
+ dst[i] = (char)toupper(src[i]);
+ }
+ if (i < n) {
+ dst[i] = '\0';
+ }
+ return dst;
+}
+
+static inline char *pstrtoupper(char *dst) {
+ char *p;
+ for (p = dst; *p; ++p) {
+ *p = (char)toupper(*p);
+ }
+ return dst;
+}
+
+static inline char *pstrntoupper(char *dst, size_t n) {
+ size_t i;
+ for (i = 0; i < n && dst[i]; ++i) {
+ dst[i] = (char)toupper(dst[i]);
+ }
+ return dst;
+}
+
+#undef strnlen
+#define strnlen pstrnlen
+
+#endif /* PSTRUTIL_H */
diff --git a/src/compiler/semantics.c b/src/compiler/semantics.c
new file mode 100644
index 0000000..d0a766a
--- /dev/null
+++ b/src/compiler/semantics.c
@@ -0,0 +1,1962 @@
+#include <string.h>
+#include <assert.h>
+
+#include "semantics.h"
+#include "parser.h"
+#include "coerce.h"
+#include "stdio.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+/* Same order as enum! */
+static const char *fb_known_attribute_names[] = {
+ "",
+ "id",
+ "deprecated",
+ "original_order",
+ "force_align",
+ "bit_flags",
+ "nested_flatbuffer",
+ "key",
+ "required",
+ "hash",
+ "base64",
+ "base64url",
+ "primary_key",
+ "sorted",
+};
+
+static const int fb_known_attribute_types[] = {
+ vt_invalid, /* Unknowns have arbitrary types. */
+ vt_uint,
+ vt_missing,
+ vt_missing,
+ vt_uint,
+ vt_missing,
+ vt_string,
+ vt_missing,
+ vt_missing,
+ vt_string,
+ vt_missing,
+ vt_missing,
+ vt_missing,
+ vt_missing,
+};
+
+static fb_scalar_type_t map_scalar_token_type(fb_token_t *t)
+{
+ switch (t->id) {
+ case tok_kw_uint64:
+ case tok_kw_ulong:
+ return fb_ulong;
+ case tok_kw_uint32:
+ case tok_kw_uint:
+ return fb_uint;
+ case tok_kw_uint16:
+ case tok_kw_ushort:
+ return fb_ushort;
+ case tok_kw_uint8:
+ case tok_kw_ubyte:
+ return fb_ubyte;
+ case tok_kw_char:
+ return fb_char;
+ case tok_kw_bool:
+ return fb_bool;
+ case tok_kw_int64:
+ case tok_kw_long:
+ return fb_long;
+ case tok_kw_int32:
+ case tok_kw_int:
+ return fb_int;
+ case tok_kw_int16:
+ case tok_kw_short:
+ return fb_short;
+ case tok_kw_int8:
+ case tok_kw_byte:
+ return fb_byte;
+ case tok_kw_float64:
+ case tok_kw_double:
+ return fb_double;
+ case tok_kw_float32:
+ case tok_kw_float:
+ return fb_float;
+ default:
+ return fb_missing_type;
+ }
+}
+
+/*
+ * The flatc compiler currently has a 256 limit.
+ *
+ * Some target C compilers might respect anything above
+ * 16 and may reguire that PAD option of the C code generator.
+ */
+static inline int is_valid_align(uint64_t align)
+{
+ uint64_t n = 1;
+ if (align == 0 || align > FLATCC_FORCE_ALIGN_MAX) {
+ return 0;
+ }
+ while (n <= align) {
+ if (n == align) {
+ return 1;
+ }
+ n *= 2;
+ }
+ return 0;
+}
+
+static inline uint64_t fb_align(uint64_t size, uint64_t align)
+{
+ assert(is_valid_align(align));
+
+ return (size + align - 1) & ~(align - 1);
+}
+
+/*
+ * The FNV-1a 32-bit little endian hash is a FlatBuffers standard for
+ * transmission of type identifiers in a compact form, in particular as
+ * alternative file identifiers. Note that if hash becomes 0, we map it
+ * to hash("").
+ */
+static inline void set_type_hash(fb_compound_type_t *ct)
+{
+ fb_ref_t *name;
+ fb_symbol_t *sym;
+ uint32_t hash;
+
+ hash = fb_hash_fnv1a_32_init();
+ if (ct->scope) {
+ for (name = ct->scope->name; name; name = name->link) {
+ hash = fb_hash_fnv1a_32_append(hash, name->ident->text, (size_t)name->ident->len);
+ hash = fb_hash_fnv1a_32_append(hash, ".", 1);
+ }
+ }
+ sym = &ct->symbol;
+ hash = fb_hash_fnv1a_32_append(hash, sym->ident->text, (size_t)sym->ident->len);
+ if (hash == 0) {
+ hash = fb_hash_fnv1a_32_init();
+ }
+ ct->type_hash = hash;
+}
+
+static inline fb_scope_t *fb_find_scope_by_string(fb_schema_t *S, const char *name, size_t len)
+{
+ if (!S || !S->root_schema) {
+ return 0;
+ }
+ if (len == 0) {
+ /* Global scope. */
+ name = 0;
+ }
+ return fb_scope_table_find(&S->root_schema->scope_index, name, len);
+}
+
+/* count = 0 indicates zero-terminated ref list, name = 0 indicates global scope. */
+static inline fb_scope_t *fb_find_scope_by_ref(fb_schema_t *S, const fb_ref_t *name, int count)
+{
+ if (!S || !S->root_schema) {
+ return 0;
+ }
+ return fb_scope_table_find(&S->root_schema->scope_index, name, (size_t)(-count));
+}
+
+static inline fb_symbol_t *define_fb_symbol(fb_symbol_table_t *si, fb_symbol_t *sym)
+{
+ return fb_symbol_table_insert_item(si, sym, ht_keep);
+}
+
+static inline fb_symbol_t *find_fb_symbol_by_token(fb_symbol_table_t *si, fb_token_t *token)
+{
+ return fb_symbol_table_find(si, token->text, (size_t)token->len);
+}
+
+static inline fb_name_t *define_fb_name(fb_name_table_t *ni, fb_name_t *name)
+{
+ return fb_name_table_insert_item(ni, name, ht_keep);
+}
+
+static inline fb_name_t *find_fb_name_by_token(fb_name_table_t *ni, fb_token_t *token)
+{
+ return fb_name_table_find(ni, token->text, (size_t)token->len);
+}
+
+/* Returns 1 if value exists, 0 otherwise, */
+static inline int add_to_value_set(fb_value_set_t *vs, fb_value_t *value)
+{
+ return fb_value_set_insert_item(vs, value, ht_keep) != 0;
+}
+
+static inline int is_in_value_set(fb_value_set_t *vs, fb_value_t *value)
+{
+ return 0 != fb_value_set_find_item(vs, value);
+}
+
+/*
+ * An immediate parent scope does not necessarily exist and it might
+ * appear in a later search, so we return the nearest existing parent
+ * and do not cache the parent.
+ */
+static inline fb_scope_t *find_parent_scope(fb_parser_t *P, fb_scope_t *scope)
+{
+ fb_ref_t *p;
+ int count;
+ fb_scope_t *parent;
+
+ parent = 0;
+ count = 0;
+ if (scope == 0) {
+ return 0;
+ }
+ p = scope->name;
+ while (p) {
+ ++count;
+ p = p->link;
+ }
+ if (count == 0) {
+ return 0;
+ }
+ while (count-- > 1) {
+ if ((parent = fb_find_scope_by_ref(&P->schema, scope->name, count))) {
+ return parent;
+ }
+ }
+ /* Root scope. */
+ return fb_find_scope_by_ref(&P->schema, 0, 0);
+}
+
+static inline fb_symbol_t *lookup_string_reference(fb_parser_t *P, fb_scope_t *local, const char *s, size_t len)
+{
+ fb_symbol_t *sym;
+ fb_scope_t *scope;
+ const char *name, *basename;
+ size_t k;
+
+ name = s;
+ basename = s;
+ k = len;
+ while (k > 0) {
+ if (s[--k] == '.') {
+ basename = s + k + 1;
+ --len;
+ break;
+ }
+ }
+ len -= k;
+ if (local && k == 0) {
+ do {
+ if ((sym = fb_symbol_table_find(&local->symbol_index, basename, len))) {
+ if (get_compound_if_visible(&P->schema, sym)) {
+ return sym;
+ }
+ }
+ local = find_parent_scope(P, local);
+ } while (local);
+ return 0;
+ }
+ if (!(scope = fb_find_scope_by_string(&P->schema, name, k))) {
+ return 0;
+ }
+ return fb_symbol_table_find(&scope->symbol_index, basename, len);
+}
+
+/*
+ * First search the optional local scope, then the scope of the namespace prefix if any.
+ * If `enumval` is non-zero, the last namepart is stored in that
+ * pointer and the lookup stops before that part.
+ *
+ * If the reference is prefixed with a namespace then the scope is
+ * looked up relative to root then the basename is searched in that
+ * scope.
+ *
+ * If the refernce is not prefixed with a namespace then the name is
+ * search in the local symbol table (which may be the root if null) and
+ * if that fails, the nearest existing parent scope is used as the new
+ * local scope and the process is repeated until local is root.
+ *
+ * This means that namespace prefixes cannot be relative to a parent
+ * namespace or to the current scope, but simple names can be found in a
+ * parent namespace.
+ */
+static inline fb_symbol_t *lookup_reference(fb_parser_t *P, fb_scope_t *local, fb_ref_t *name, fb_ref_t **enumval)
+{
+ fb_ref_t *basename, *last, *p;
+ fb_scope_t *scope;
+ fb_symbol_t *sym;
+ int count;
+
+ count = 0;
+ scope = 0;
+ p = name;
+ last = 0;
+ basename = 0;
+ while (p) {
+ basename = last;
+ last = p;
+ p = p->link;
+ ++count;
+ }
+ if (enumval) {
+ --count;
+ *enumval = last;
+ } else {
+ basename = last;
+ }
+ if (!basename) {
+ return 0;
+ }
+ if (local && count == 1) {
+ do {
+ if ((sym = find_fb_symbol_by_token(&local->symbol_index, basename->ident))) {
+ if (get_compound_if_visible(&P->schema, sym)) {
+ return sym;
+ }
+ }
+ local = find_parent_scope(P, local);
+ } while (local);
+ return 0;
+ }
+ /* Null name is valid in scope lookup, indicating global scope. */
+ if (count == 1) {
+ name = 0;
+ }
+ if (!(scope = fb_find_scope_by_ref(&P->schema, name, count - 1))) {
+ return 0;
+ }
+ sym = find_fb_symbol_by_token(&scope->symbol_index, basename->ident);
+ if (sym && get_compound_if_visible(&P->schema, sym)) {
+ return sym;
+ }
+ return 0;
+}
+
+static inline fb_symbol_t *lookup_type_reference(fb_parser_t *P, fb_scope_t *local, fb_ref_t *name)
+{
+ return lookup_reference(P, local, name, 0);
+}
+
+/*
+ * `ct` is null when looking up names for scalar types and otherwise it is
+ * the enum type being assigned. The provided reference may reference
+ * an enum value in the `ct` type, or another enum if a scope/type is
+ * given.
+ */
+static inline int lookup_enum_name(fb_parser_t *P, fb_scope_t *local, fb_compound_type_t *ct, fb_ref_t *ref, fb_value_t *value)
+{
+ fb_symbol_t *sym;
+ fb_ref_t *enumval;
+ fb_member_t *member;
+
+ enumval = 0;
+ assert(ref);
+ assert(ct == 0 || ct->symbol.kind == fb_is_enum);
+ sym = lookup_reference(P, local, ref, &enumval);
+ if (sym && sym->kind == fb_is_enum) {
+ ct = (fb_compound_type_t *)sym;
+ } else if (ref->link) {
+ /* If there was a scope / type prefix, it was not found, or it was not an enum type. */
+ return -1;
+ }
+ if (!ct) {
+ return -1;
+ }
+ sym = find_fb_symbol_by_token(&ct->index, enumval->ident);
+ if (!sym) {
+ return -1;
+ }
+ member = (fb_member_t *)sym;
+ *value = member->value;
+ return 0;
+}
+
+/* This is repeated for every include file, but this pose no problem. */
+static void install_known_attributes(fb_parser_t *P)
+{
+ unsigned int i;
+ fb_attribute_t *a;
+
+ for (i = 0; i < KNOWN_ATTR_COUNT; ++i) {
+ /* Don't put it in the parsed list, just the index. */
+ a = new_elem(P, sizeof(*a));
+ a->known = i;
+ a->name.name.s.s = (char *)fb_known_attribute_names[i];
+ a->name.name.s.len = (int)strlen(fb_known_attribute_names[i]);
+ a->name.name.type = vt_string;
+ a->name.link = 0;
+ if ((a = (fb_attribute_t *)define_fb_name(&P->schema.root_schema->attribute_index, &a->name))) {
+ /*
+ * If the user alredy defined the attribute, keep that instead.
+ * (Memory leak is ok here.)
+ */
+ a->known = i;
+ }
+ }
+}
+
+static void revert_order(fb_compound_type_t **list) {
+ fb_compound_type_t *next, *prev = 0, *link = *list;
+
+ while (link) {
+ next = link->order;
+ link->order = prev;
+ prev = link;
+ link = next;
+ }
+ *list = prev;
+}
+
+static inline unsigned short process_metadata(fb_parser_t *P, fb_metadata_t *m,
+ uint16_t expect, fb_metadata_t *out[KNOWN_ATTR_COUNT])
+{
+ uint16_t flags;
+ unsigned int i, n = FLATCC_ATTR_MAX;
+ int type;
+ fb_attribute_t *a;
+
+ memset(out, 0, sizeof(out[0]) * KNOWN_ATTR_COUNT);
+ for (flags = 0; m && n; --n, m = m->link) {
+ a = (fb_attribute_t *)find_fb_name_by_token(&P->schema.root_schema->attribute_index, m->ident);
+ if (!a) {
+ error_tok(P, m->ident, "unknown attribute not declared");
+ continue;
+ }
+ if (!(i = a->known)) {
+ continue;
+ }
+ if (!((1 << i) & expect)) {
+ error_tok(P, m->ident, "known attribute not expected in this context");
+ continue;
+ }
+ flags |= 1 << i;
+ if (out[i]) {
+ error_tok(P, m->ident, "known attribute listed multiple times");
+ continue;
+ }
+ out[i] = m;
+ type = fb_known_attribute_types[i];
+ if (type == vt_missing && m->value.type != vt_missing) {
+ error_tok(P, m->ident, "known attribute does not expect a value");
+ continue;
+ }
+ if (type == vt_string && m->value.type != vt_string) {
+ error_tok(P, m->ident, "known attribute expects a string");
+ continue;
+ }
+ if (type == vt_uint && m->value.type != vt_uint) {
+ error_tok(P, m->ident, "known attribute expects an unsigned integer");
+ continue;
+ }
+ if (type == vt_int && m->value.type != vt_uint && m->value.type != vt_int) {
+ error_tok(P, m->ident, "known attribute expects an integer");
+ continue;
+ }
+ if (type == vt_bool && m->value.type != vt_bool) {
+ error_tok(P, m->ident, "known attribute expects 'true' or 'false'");
+ continue;
+ }
+ }
+ if (m) {
+ error_tok(P, m->ident, "too many attributes");
+ }
+ return flags;
+}
+
+/*
+ * Recursive types are allowed, according to FlatBuffers Internals doc.,
+ * but this cannot be possible for structs because they have no default
+ * value or null option, and can only hold scalars and other structs, so
+ * recursion would never terminate. Enums are simple types and cannot be
+ * recursive either. Unions reference tables which may reference unions,
+ * and recursion works well here. Tables allow any other table, union,
+ * or scalar value to be optional or default, so recursion is possible.
+ * In conclusion, Unions and Table members may reference all other
+ * types, and self. Enums are trivially checked because the only allow
+ * scalars, which leaves structs that can build illegal forms.
+ *
+ * Object instances cannot be recursive meaning the object graph is
+ * always a tree, but this isn't really a concern for the schema
+ * compiler, and for the builder it happens naturally as it only adds to
+ * the buffer (though a compressor might reuse old data without
+ * violating the tree?).
+ *
+ * Conclusion: check structs for circular references and allow
+ * everything else to unfold, provided they otherwise pass type checks.
+ *
+ * Algorithm:
+ *
+ * Depth first search of the struct reference tree. We maintain flags to
+ * find back-links. We prune sub-trees already fully analyzed by using
+ * the closed flag. This operation is O(N) since each struct member is
+ * visited once.
+ *
+ * Recursion is limited to prevent blowing the stack and to protect
+ * against abuse.
+ */
+static int analyze_struct(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_compound_type_t *type;
+ fb_member_t *member;
+ int ret = 0;
+ uint64_t size;
+ uint16_t align;
+ fb_token_t *t;
+
+ assert(ct->symbol.kind == fb_is_struct);
+
+ assert(!(ct->symbol.flags & fb_circular_open));
+ if (ct->symbol.flags & fb_circular_closed) {
+ return 0;
+ }
+ assert(!ct->order);
+ ct->symbol.flags |= fb_circular_open;
+ align = 1;
+ for (sym = ct->members; sym; sym = sym->link) {
+ type = 0;
+ if (P->nesting_level >= FLATCC_NESTING_MAX) {
+ error(P, "maximum allowed nesting level exceeded while processing struct hierarchy");
+ return -1;
+ }
+ member = (fb_member_t *)sym;
+ switch (member->type.type) {
+ case vt_fixed_array_type:
+ case vt_scalar_type:
+ t = member->type.t;
+ member->type.st = map_scalar_token_type(t);
+ size = sizeof_scalar_type(member->type.st);
+ if (size < 1) {
+ error_sym_tok(P, sym, "unexpected type", t);
+ return -1;
+ }
+ member->align = (uint16_t)size;
+ member->size = size * member->type.len;
+ break;
+ case vt_fixed_array_compound_type_ref:
+ case vt_compound_type_ref:
+ /* Enums might not be valid, but then it would be detected earlier. */
+ if (member->type.ct->symbol.kind == fb_is_enum) {
+ type = member->type.ct;
+ size = type->size;
+ member->align = (uint16_t)size;
+ member->size = member->type.len * type->size;
+ break;
+ } else if (member->type.ct->symbol.kind == fb_is_struct) {
+ type = member->type.ct;
+ if (type->symbol.flags & fb_circular_open) {
+ error_sym_2(P, sym, "circular reference to struct at", &type->symbol);
+ return -1;
+ }
+ if (!(type->symbol.flags & fb_circular_closed)) {
+ if (P->opts.hide_later_struct) {
+ error_sym_2(P, sym, "dependency on later defined struct not permitted with current settings", &type->symbol);
+ }
+ ++P->nesting_level;
+ ret = analyze_struct(P, type);
+ --P->nesting_level;
+ if (ret) {
+ return ret;
+ }
+ }
+ member->align = type->align;
+ member->size = member->type.len * type->size;
+ break;
+ } else {
+ error_sym(P, sym, "unexpected compound type for field");
+ return -1;
+ }
+ break;
+ case vt_invalid:
+ /* Old error. */
+ return -1;
+ default:
+ error_sym(P, sym, "unexpected type");
+ return -1;
+ }
+ member->offset = fb_align(ct->size, member->align);
+ if (member->offset < ct->size || member->offset + member->size < member->offset) {
+ error_sym(P, sym, "struct size overflow");
+ return -1;
+ }
+ size = member->offset + member->size;
+ if (size < ct->size || size > FLATCC_STRUCT_MAX_SIZE) {
+ error_sym(P, sym, "struct field overflows maximum allowed struct size");
+ };
+ ct->size = size;
+ /*
+ * FB spec is not very clear on this - but experimentally a
+ * force aligned member struct will force that alignment upon a
+ * containing struct if the alignment would otherwise be
+ * smaller. In otherwise, a struct is aligned to the alignment
+ * of the largest member, not just the largest scalar member
+ * (directly or indirectly).
+ */
+ if (align < member->align) {
+ align = member->align;
+ }
+ }
+ if (ct->align > 0) {
+ if (align > ct->align) {
+ error_sym(P, &ct->symbol, "'force_align' cannot be smaller than natural alignment for");
+ ct->align = align;
+ }
+ } else {
+ ct->align = align;
+ }
+ /* Add trailing padding if necessary. */
+ ct->size = fb_align(ct->size, ct->align);
+
+ if (ct->size == 0) {
+ error_sym(P, &ct->symbol, "struct cannot be empty");
+ return -1;
+ }
+
+ ct->symbol.flags |= fb_circular_closed;
+ ct->symbol.flags &= (uint16_t)~fb_circular_open;
+ ct->order = P->schema.ordered_structs;
+ P->schema.ordered_structs = ct;
+ return ret;
+}
+
+static int define_nested_table(fb_parser_t *P, fb_scope_t *local, fb_member_t *member, fb_metadata_t *m)
+{
+ fb_symbol_t *type_sym;
+
+ if (member->type.type != vt_vector_type || member->type.st != fb_ubyte) {
+ error_tok(P, m->ident, "'nested_flatbuffer' attribute requires a [ubyte] vector type");
+ return -1;
+ }
+ if (m->value.type != vt_string) {
+ /* All known attributes get automatically type checked, so just ignore. */
+ return -1;
+ }
+ type_sym = lookup_string_reference(P, local, m->value.s.s, (size_t)m->value.s.len);
+ if (!type_sym) {
+ error_tok_as_string(P, m->ident, "nested reference not found", m->value.s.s, (size_t)m->value.s.len);
+ return -1;
+ }
+ if (type_sym->kind != fb_is_table) {
+ if (!P->opts.allow_struct_root) {
+ error_tok_2(P, m->ident, "nested reference does not refer to a table", type_sym->ident);
+ return -1;
+ }
+ if (type_sym->kind != fb_is_struct) {
+ error_tok_2(P, m->ident, "nested reference does not refer to a table or a struct", type_sym->ident);
+ return -1;
+ }
+ }
+ member->nest = (fb_compound_type_t *)type_sym;
+ return 0;
+}
+
+static int process_struct(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym, *old, *type_sym;
+ fb_member_t *member;
+ fb_metadata_t *knowns[KNOWN_ATTR_COUNT], *m;
+ uint16_t allow_flags;
+ int key_count = 0, primary_count = 0, key_ok = 0;
+
+ if (ct->type.type) {
+ error_sym(P, &ct->symbol, "internal error: struct cannot have a type");
+ return -1;
+ }
+ ct->metadata_flags = process_metadata(P, ct->metadata, fb_f_force_align, knowns);
+ if ((m = knowns[fb_attr_force_align])) {
+ if (!is_valid_align(m->value.u)) {
+ error_sym(P, &ct->symbol, "'force_align' exceeds maximum permitted alignment or is not a power of 2");
+ } else {
+ /* This may still fail on natural alignment size. */
+ ct->align = (uint16_t)m->value.u;
+ }
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ if ((old = define_fb_symbol(&ct->index, sym))) {
+ error_sym_2(P, sym, "struct field already defined here", old);
+ continue;
+ }
+ if (sym->kind != fb_is_member) {
+ error_sym(P, sym, "internal error: field type expected");
+ return -1;
+ }
+ key_ok = 1;
+ member = (fb_member_t *)sym;
+ allow_flags = 0;
+ /* Notice the difference between fb_f_ and fb_attr_ (flag vs index). */
+ if (P->opts.allow_struct_field_key) {
+ allow_flags |= fb_f_key;
+ if (P->opts.allow_primary_key) {
+ allow_flags |= fb_f_primary_key;
+ }
+ }
+ if (P->opts.allow_struct_field_deprecate) {
+ allow_flags |= fb_f_deprecated;
+ }
+ member->metadata_flags = process_metadata(P, member->metadata, allow_flags, knowns);
+ switch (member->type.type) {
+ case vt_fixed_array_type_ref:
+ key_ok = 0;
+ goto lbl_type_ref;
+ case vt_type_ref:
+lbl_type_ref:
+ type_sym = lookup_type_reference(P, ct->scope, member->type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->type.ref, "unknown type reference used with struct field", sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->type.ct = (fb_compound_type_t*)type_sym;
+ member->type.type = member->type.type == vt_fixed_array_type_ref ?
+ vt_fixed_array_compound_type_ref : vt_compound_type_ref;
+ if (type_sym->kind != fb_is_struct) {
+ if (P->opts.allow_enum_struct_field) {
+ if (type_sym->kind != fb_is_enum) {
+ error_sym_2(P, sym, "struct fields can only be scalars, structs, and enums, or arrays of these, but has type", type_sym);
+ member->type.type = vt_invalid;
+ return -1;
+ }
+ if (!P->opts.allow_enum_key) {
+ key_ok = 0;
+ break;
+ }
+ } else {
+ error_sym_2(P, sym, "struct fields can only be scalars and structs, or arrays of these, but has type", type_sym);
+ member->type.type = vt_invalid;
+ return -1;
+ }
+ } else {
+ key_ok = 0;
+ }
+ break;
+ case vt_fixed_array_string_type:
+ error_sym(P, sym, "fixed length arrays cannot have string elements");
+ member->type.type = vt_invalid;
+ return -1;
+ case vt_fixed_array_type:
+ key_ok = 0;
+ break;
+ case vt_scalar_type:
+ break;
+ default:
+ error_sym(P, sym, "struct member member can only be of struct scalar, or fixed length scalar array type");
+ return -1;
+ }
+ if (!key_ok) {
+ if (member->metadata_flags & fb_f_key) {
+ member->type.type = vt_invalid;
+ error_sym(P, sym, "key attribute now allowed for this kind of field");
+ return -1;
+ }
+ if (member->metadata_flags & fb_f_primary_key) {
+ member->type.type = vt_invalid;
+ error_sym(P, sym, "primary_key attribute now allowed for this kind of field");
+ return -1;
+ }
+ }
+ if (member->metadata_flags & fb_f_deprecated) {
+ if (member->metadata_flags & fb_f_key) {
+ error_sym(P, sym, "key attribute not allowed for deprecated struct member");
+ return -1;
+ } else if (member->metadata_flags & fb_f_primary_key) {
+ error_sym(P, sym, "primary_key attribute not allowed for deprecated struct member");
+ return -1;
+ }
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (member->metadata_flags & fb_f_primary_key) {
+ error_sym(P, sym, "primary_key attribute conflicts with key attribute");
+ member->type.type = vt_invalid;
+ return -1;
+ }
+ key_count++;
+ if (!ct->primary_key) {
+ /* First key is primary key if no primary key is given explicitly. */
+ ct->primary_key = member;
+ }
+ } else if (member->metadata_flags & fb_f_primary_key) {
+ if (primary_count++) {
+ error_sym(P, sym, "at most one struct member can have a primary_key attribute");
+ member->type.type = vt_invalid;
+ return -1;
+ }
+ key_count++;
+ /* Allow backends to treat primary key as an ordinary key. */
+ member->metadata_flags |= fb_f_key;
+ ct->primary_key = member;
+ }
+ if (member->value.type) {
+ error_sym(P, sym, "struct member member cannot have a default value");
+ continue;
+ }
+ }
+ if (key_count) {
+ ct->symbol.flags |= fb_indexed;
+ }
+ /* Set primary key flag for backends even if chosen by default. */
+ if (ct->primary_key) {
+ ct->primary_key->metadata_flags |= fb_f_primary_key;
+ }
+ if (key_count > 1 && !P->opts.allow_multiple_key_fields) {
+ error_sym(P, &ct->symbol, "table has multiple key fields, but at most one is permitted");
+ return -1;
+ }
+ return 0;
+}
+
+static fb_member_t *original_order_members(fb_parser_t *P, fb_member_t *next)
+{
+ fb_member_t *head = 0;
+ fb_member_t **tail = &head;
+
+ /* Not used for now, but in case we need error messages etc. */
+ (void)P;
+
+ while (next) {
+ *tail = next;
+ tail = &next->order;
+ next = (fb_member_t *)(((fb_symbol_t *)next)->link);
+ }
+ *tail = 0;
+ return head;
+}
+
+/*
+ * Alignment of table offset fields are generally not stored, and
+ * vectors store the element alignment for scalar types, so we
+ * detect alignment based on type also. Unions are tricky since they
+ * use a single byte type followed by an offset, but it is impractical
+ * to store these separately so we sort by the type field.
+ */
+static fb_member_t *align_order_members(fb_parser_t *P, fb_member_t *members)
+{
+ uint16_t i, j, k;
+ fb_member_t *heads[9] = {0};
+ fb_member_t **tails[9] = {0};
+ fb_member_t *next = members;
+
+ while (next) {
+ k = next->align;
+ switch (next->type.type) {
+ case vt_compound_type_ref:
+ switch (next->type.ct->symbol.kind) {
+ case fb_is_struct:
+ case fb_is_enum:
+ k = next->type.ct->align;
+ break;
+ case fb_is_union:
+ /*
+ * Unions align to their offsets because the type can
+ * always be added last in a second pass since it is 1
+ * byte.
+ */
+ k = (uint16_t)P->opts.offset_size;
+ break;
+ default:
+ k = (uint16_t)P->opts.offset_size;
+ break;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ case vt_string_type:
+ case vt_vector_type:
+ case vt_vector_string_type:
+ k = (uint16_t)P->opts.offset_size;
+ break;
+ case vt_invalid:
+ /* Just to have some sane behavior. */
+ return original_order_members(P, members);
+ default:
+ k = next->align;
+ break;
+ }
+ assert(k > 0);
+ i = 0;
+ while (k >>= 1) {
+ ++i;
+ }
+ /* Normally the largest alignment is 256, but otherwise we group them together. */
+ if (i > 7) {
+ i = 7;
+ }
+ if (!heads[i]) {
+ heads[i] = next;
+ } else {
+ *tails[i] = next;
+ }
+ tails[i] = &next->order;
+ next = (fb_member_t *)(((fb_symbol_t *)next)->link);
+ }
+ i = j = 8;
+ tails[8] = &heads[8];
+ while (j) {
+ while (i && !heads[--i]) {
+ }
+ *tails[j] = heads[i];
+ j = i;
+ }
+ return heads[8];
+}
+
+/* Temporary markers only used during assignment of field identifiers. */
+enum { unused_field = 0, normal_field, type_field };
+
+static int process_table(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ char msg_buf [100];
+ fb_symbol_t *sym, *old, *type_sym;
+ fb_member_t *member;
+ fb_metadata_t *knowns[KNOWN_ATTR_COUNT], *m;
+ int ret = 0;
+ uint64_t count = 0;
+ int need_id = 0, id_failed = 0;
+ uint64_t max_id = 0;
+ int key_ok, key_count = 0, primary_count = 0;
+ int is_union_vector, is_vector;
+ uint64_t i, j;
+ int max_id_errors = 10;
+ int allow_flags = 0;
+
+ /*
+ * This just tracks the presence of a `normal_field` or a hidden
+ * `type_field`. The size is litmited to 16-bit unsigned offsets.
+ * It is only of relevance for ithe optional `id` table field
+ * attribute.
+ */
+ uint8_t *field_marker = 0;
+ fb_symbol_t **field_index = 0;
+
+ assert(ct->symbol.kind == fb_is_table);
+ assert(!ct->type.type);
+
+ ct->metadata_flags = process_metadata(P, ct->metadata, fb_f_original_order, knowns);
+ /*
+ * `original_order` now lives as a flag, we need not consider it
+ * further until code generation.
+ */
+ for (sym = ct->members; sym; sym = sym->link) {
+ key_ok = 0;
+ type_sym = 0;
+ is_vector = 0;
+ is_union_vector = 0;
+ if ((old = define_fb_symbol(&ct->index, sym))) {
+ error_sym_2(P, sym, "table member already defined here", old);
+ continue;
+ }
+ if (sym->kind != fb_is_member) {
+ error_sym(P, sym, "internal error: member type expected");
+ return -1;
+ }
+ member = (fb_member_t *)sym;
+ if (member->type.type == vt_invalid) {
+ continue;
+ }
+ if (member->type.type == vt_scalar_type || member->type.type == vt_vector_type) {
+ member->type.st = map_scalar_token_type(member->type.t);
+ }
+ allow_flags =
+ fb_f_id | fb_f_nested_flatbuffer | fb_f_deprecated | fb_f_key |
+ fb_f_required | fb_f_hash | fb_f_base64 | fb_f_base64url | fb_f_sorted;
+
+ if (P->opts.allow_primary_key) {
+ allow_flags |= fb_f_primary_key;
+ }
+ member->metadata_flags = process_metadata(P, member->metadata, (uint16_t)allow_flags, knowns);
+ if ((m = knowns[fb_attr_nested_flatbuffer])) {
+ define_nested_table(P, ct->scope, member, m);
+ }
+ /* Note: we allow base64 and base64url with nested attribute. */
+ if ((member->metadata_flags & fb_f_base64) &&
+ (member->type.type != vt_vector_type || member->type.st != fb_ubyte)) {
+ error_sym(P, sym, "'base64' attribute is only allowed on vectors of type ubyte");
+ }
+ if ((member->metadata_flags & fb_f_base64url) &&
+ (member->type.type != vt_vector_type || member->type.st != fb_ubyte)) {
+ error_sym(P, sym, "'base64url' attribute is only allowed on vectors of type ubyte");
+ }
+ if ((member->metadata_flags & (fb_f_base64 | fb_f_base64url)) ==
+ (fb_f_base64 | fb_f_base64url)) {
+ error_sym(P, sym, "'base64' and 'base64url' attributes cannot both be set");
+ }
+ m = knowns[fb_attr_id];
+ if (m && count == 0) {
+ need_id = 1;
+ field_marker = P->tmp_field_marker;
+ memset(field_marker, 0, (size_t)P->opts.vt_max_count);
+ }
+ if (!id_failed) {
+ if (count >= P->opts.vt_max_count) {
+ error_sym(P, sym, "too many fields for vtable size");
+ id_failed = 1;
+ } else if (!need_id) {
+ member->id = (unsigned short)count;
+ }
+ ++count;
+ }
+ switch (member->type.type) {
+ case vt_scalar_type:
+ if (member->value.type == vt_null) {
+ member->value.type = vt_uint;
+ member->value.u = 0;
+ member->flags |= fb_fm_optional;
+ }
+ if (member->metadata_flags & fb_f_required) {
+ if (member->flags & fb_fm_optional) {
+ error_sym(P, sym, "'required' attribute is incompatible with optional table field (= null)");
+ } else {
+ error_sym(P, sym, "'required' attribute is redundant on scalar table field");
+ }
+ }
+ key_ok = 1;
+ if (member->value.type == vt_name_ref) {
+ if (lookup_enum_name(P, ct->scope, 0, member->value.ref, &member->value)) {
+ error_ref_sym(P, member->value.ref, "unknown name used as initializer for scalar field", sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ }
+ if (!member->value.type) {
+ /*
+ * Simplifying by ensuring we always have a default
+ * value where an initializer is possible (also goes for enum).
+ */
+ member->value.type = vt_uint;
+ member->value.u = 0;
+ }
+ if (fb_coerce_scalar_type(P, sym, member->type.st, &member->value)) {
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->size = sizeof_scalar_type(member->type.st);
+ member->align = (uint16_t)member->size;
+ break;
+ case vt_vector_type:
+ is_vector = 1;
+ member->size = sizeof_scalar_type(member->type.st);
+ member->align =(uint16_t) member->size;
+ if (member->value.type) {
+ error_sym(P, sym, "scalar vectors cannot have an initializer");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ case vt_fixed_array_type_ref:
+ case vt_fixed_array_string_type:
+ case vt_fixed_array_type:
+ error_sym(P, sym, "fixed length arrays can only be used with structs");
+ member->type.type = vt_invalid;
+ return -1;
+ case vt_string_type:
+ /* `size` or `align` not defined - these are implicit uoffset types. */
+ key_ok = P->opts.allow_string_key;
+ if (member->value.type) {
+ error_sym(P, sym, "strings cannot have an initializer");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ case vt_vector_string_type:
+ is_vector = 1;
+ /* `size` or `align` not defined - these are implicit uoffset types. */
+ if (member->value.type) {
+ error_sym(P, sym, "string vectors cannot have an initializer");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ case vt_type_ref:
+ type_sym = lookup_type_reference(P, ct->scope, member->type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->type.ref, "unknown type reference used with table field", sym);
+ member->type.type = vt_invalid;
+ /* We cannot count id's without knowing if it is a union. */
+ id_failed = 1;
+ continue;
+ }
+ switch (type_sym->kind) {
+ case fb_is_enum:
+ /*
+ * Note the enums without a 0 element requires an
+ * initializer in the schema, but that cannot happen
+ * with a null value, so in this case the value is force
+ * to 0. This is only relevant when using the `_get()`
+ * accessor instead of the `_option()`.
+ */
+ if (member->value.type == vt_null) {
+ member->value.type = vt_uint;
+ member->value.u = 0;
+ member->flags |= fb_fm_optional;
+ }
+ if (member->metadata_flags & fb_f_required) {
+ if (member->flags & fb_fm_optional) {
+ error_sym(P, sym, "'required' attribute is incompatible with optional enum table field (= null)");
+ } else {
+ error_sym(P, sym, "'required' attribute is redundant on enum table field");
+ }
+ }
+ key_ok = P->opts.allow_enum_key;
+ break;
+ case fb_is_table:
+ case fb_is_struct:
+ case fb_is_union:
+ break;
+ case fb_is_rpc_service:
+ error_sym_2(P, sym, "rpc service is not a valid table field type", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ default:
+ error_sym_2(P, sym, "internal error: unexpected field type", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->type.type = vt_compound_type_ref;
+ member->type.ct = (fb_compound_type_t*)type_sym;
+ /*
+ * Note: this information transfer won't always work because
+ * structs do not know their full size at this point so
+ * codegen must use the member->type.ct values.
+ */
+ member->size = member->type.ct->size;
+ member->align = member->type.ct->align;
+
+ if (type_sym->kind == fb_is_union && !id_failed) {
+ /* Hidden union type field. */
+ if (!need_id) {
+ member->id = (unsigned short)count;
+ }
+ ++count;
+ }
+ if (member->value.type) {
+ if (type_sym->kind != fb_is_enum) {
+ error_sym(P, sym, "non-scalar field cannot have an initializer");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ if (member->value.type == vt_name_ref) {
+ if (lookup_enum_name(P, ct->scope, member->type.ct, member->value.ref, &member->value)) {
+ error_ref_sym(P, member->value.ref, "unknown name used as initializer for enum field", sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ } else {
+ if (fb_coerce_scalar_type(P, sym, ((fb_compound_type_t *)type_sym)->type.st, &member->value)) {
+ member->type.type = vt_invalid;
+ continue;
+ }
+ /* Bitflags can have complex combinations of values, and do not nativele have a 0 value. */
+ if (P->opts.strict_enum_init && !(member->type.ct->metadata_flags & fb_f_bit_flags)
+ && !(member->flags & fb_fm_optional)) {
+ if (!is_in_value_set(&member->type.ct->value_set, &member->value)) {
+ error_sym(P, sym, "initializer does not match a defined enum value");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ }
+ }
+ } else {
+ /* Enum is the only type that cannot always default to 0. */
+ if (type_sym->kind == fb_is_enum) {
+ member->value.type = vt_uint;
+ member->value.u = 0;
+ if (fb_coerce_scalar_type(P, type_sym, ((fb_compound_type_t *)type_sym)->type.st, &member->value)) {
+ member->type.type = vt_invalid;
+ continue;
+ }
+ if (P->opts.strict_enum_init) {
+ /* TODO: consider if this error is necessary for bit_flags - flatc 2.0.0 errors on this. */
+ if (!is_in_value_set(&member->type.ct->value_set, &member->value)) {
+ error_sym_2(P, sym,
+ "enum type requires an explicit initializer because it has no 0 value", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ }
+ }
+ }
+ break;
+ case vt_vector_type_ref:
+ type_sym = lookup_type_reference(P, ct->scope, member->type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->type.ref, "unknown vector type reference used with table field", sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ switch (type_sym->kind) {
+ case fb_is_enum:
+ case fb_is_table:
+ case fb_is_struct:
+ case fb_is_union:
+ break;
+ default:
+ /* Vectors of strings are handled separately but this is irrelevant to the user. */
+ error_sym_tok(P, sym, "vectors can only hold scalars, structs, enums, strings, tables, and unions", member->type.t);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ is_vector = 1;
+ is_union_vector = type_sym->kind == fb_is_union;
+ if (member->value.type) {
+ error_sym(P, sym, "non-scalar field cannot have an initializer");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ /* Size of the vector element, not of the vector itself. */
+ member->type.type = vt_vector_compound_type_ref;
+ member->type.ct = (fb_compound_type_t*)type_sym;
+ member->size = member->type.ct->size;
+ member->align = member->type.ct->align;
+ if (type_sym->kind == fb_is_union && !id_failed) {
+ /* Hidden union type field. */
+ if (!need_id) {
+ member->id = (unsigned short)count;
+ }
+ ++count;
+ }
+ break;
+ default:
+ error_sym(P, sym, "unexpected table field type encountered");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ if (!id_failed) {
+ if (m && !need_id && !id_failed) {
+ error_tok(P, m->ident, "unexpected id attribute, must be used on all fields, or none");
+ id_failed = 1;
+ } else if (!m && need_id && !id_failed) {
+ error_sym(P, sym, "id attribute missing, must be used on all fields, or none");
+ id_failed = 1;
+ } else if (m) {
+ if (m->value.type == vt_uint) {
+ if (m->value.u >= P->opts.vt_max_count) {
+ error_sym(P, sym, "id too large to fit in vtable");
+ id_failed = 1;
+ } else {
+ member->id = (unsigned short)m->value.u;
+ if (member->id > max_id) {
+ max_id = member->id;
+ }
+ }
+ } else if (m->value.type == vt_int) {
+ error_tok(P, m->ident, "id attribute cannot be negative");
+ id_failed = 1;
+ } else {
+ error_tok(P, m->ident, "unexpecte id attribute type");
+ id_failed = 1;
+ }
+ }
+ }
+ if (need_id && !id_failed) {
+ if (field_marker[member->id] == type_field) {
+ error_tok(P, m->ident, "id attribute value conflicts with a hidden type field");
+ id_failed = normal_field;
+ } else if (field_marker[member->id]) {
+ error_tok(P, m->ident, "id attribute value conflicts with another field");
+ } else {
+ field_marker[member->id] = normal_field;
+ }
+ if (!id_failed && type_sym && type_sym->kind == fb_is_union) {
+ if (member->id <= 1) {
+ error_tok(P, m->ident, is_union_vector ?
+ "id attribute value should be larger to accommodate hidden union vector type field" :
+ "id attribute value should be larger to accommodate hidden union type field");
+ id_failed = 1;
+ } else if (field_marker[member->id - 1] == type_field) {
+ error_tok(P, m->ident, is_union_vector ?
+ "hidden union vector type field below attribute id value conflicts with another hidden type field" :
+ "hidden union type field below attribute id value conflicts with another hidden type field");
+ id_failed = 1;
+ } else if (field_marker[member->id - 1]) {
+ error_tok(P, m->ident, is_union_vector ?
+ "hidden union vector type field below attribute id value conflicts with another field" :
+ "hidden union type field below attribute id value conflicts with another field");
+ id_failed = 1;
+ } else {
+ field_marker[member->id - 1] = type_field;
+ }
+ }
+ }
+ if (member->metadata_flags & fb_f_deprecated) {
+ if (member->metadata_flags & fb_f_key) {
+ error_sym(P, sym, "key attribute not allowed for deprecated field");
+ return -1;
+ } else if (member->metadata_flags & fb_f_primary_key) {
+ error_sym(P, sym, "primary_key attribute not allowed for deprecated field");
+ return -1;
+ }
+ }
+ if (member->metadata_flags & fb_f_key) {
+ ++key_count;
+ if (!key_ok) {
+ error_sym(P, sym, "key attribute not allowed for this kind of field");
+ member->type.type = vt_invalid;
+ } else if (member->metadata_flags & fb_f_primary_key) {
+ error_sym(P, sym, "primary_key attribute conflicts with key attribute");
+ member->type.type = vt_invalid;
+ } else if (!ct->primary_key ||
+ (primary_count == 0 && ct->primary_key->id > member->id)) {
+ /*
+ * Set key field with lowest id as default primary key
+ * unless a key field also has a primary attribute.
+ */
+ ct->primary_key = member;
+ }
+ } else if (member->metadata_flags & fb_f_primary_key) {
+ if (member->metadata_flags & fb_f_primary_key) {
+ if (primary_count++) {
+ error_sym(P, sym, "at most one field can have a primary_key attribute");
+ member->type.type = vt_invalid;
+ continue;
+ } else {
+ ct->primary_key = member;
+ }
+ }
+ key_count++;
+ /* Allow backends to treat primary key as an ordinary key. */
+ member->metadata_flags |= fb_f_key;
+ }
+ if (member->metadata_flags & fb_f_sorted) {
+ if (is_union_vector) {
+ error_sym(P, sym, "sorted attribute not allowed for union vectors");
+ member->type.type = vt_invalid;
+ return -1;
+ } else if (!is_vector) {
+ error_sym(P, sym, "sorted attribute only allowed for vectors");
+ member->type.type = vt_invalid;
+ return -1;
+ }
+ /*
+ * A subsequent call to validate_table_attr will verify that a
+ * sorted vector of tables or structs have a defined field
+ * key. This cannot be done before all types have been
+ * processed.
+ */
+ }
+ }
+ if (!id_failed) {
+ ct->count = count;
+ }
+ if (!id_failed && need_id) {
+ if (count && max_id >= count) {
+ for (i = 0; i < max_id; ++i) {
+ if (field_marker[i] == 0) {
+ if (!max_id_errors--) {
+ error_sym(P, &ct->symbol, "... more id's missing");
+ break;
+ }
+ sprintf(msg_buf, "id range not consequtive from 0, missing id: %"PRIu64"", i);
+ error_sym(P, &ct->symbol, msg_buf);
+ }
+ }
+ id_failed = 1;
+ }
+ }
+ /* Order in which data is ordered in binary buffer. */
+ if (ct->metadata_flags & fb_f_original_order) {
+ ct->ordered_members = original_order_members(P, (fb_member_t *)ct->members);
+ } else {
+ /* Size efficient ordering. */
+ ct->ordered_members = align_order_members(P, (fb_member_t *)ct->members);
+ }
+ if (!id_failed && need_id && count > 0) {
+ field_index = P->tmp_field_index;
+ memset(field_index, 0, sizeof(field_index[0]) * (size_t)P->opts.vt_max_count);
+ /*
+ * Reorder by id so table constructor arguments in code
+ * generators always use same ordering across versions.
+ */
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ field_index[member->id] = sym;
+ }
+ j = 0;
+ if (field_index[0] == 0) {
+ j = 1; /* Adjust for union type. */
+ }
+ ct->members = field_index[j];
+ for (i = j + 1; i <= max_id; ++i) {
+ if (field_index[i] == 0) ++i; /* Adjust for union type. */
+ field_index[j]->link = field_index[i];
+ j = i;
+ }
+ field_index[max_id]->link = 0;
+ }
+ if (key_count) {
+ ct->symbol.flags |= fb_indexed;
+ }
+ /* Set primary key flag for backends even if chosen by default. */
+ if (ct->primary_key) {
+ ct->primary_key->metadata_flags |= fb_f_primary_key;
+ }
+ if (key_count > 1 && !P->opts.allow_multiple_key_fields) {
+ error_sym(P, &ct->symbol, "table has multiple key fields, but at most one is permitted");
+ ret = -1;
+ }
+ if (id_failed) {
+ ret = -1;
+ }
+ return ret;
+}
+
+/*
+ * Post processing of process_table because some information is only
+ * available when all types have been processed.
+ */
+static int validate_table_attr(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+
+ if (member->type.type == vt_vector_compound_type_ref &&
+ member->metadata_flags & fb_f_sorted) {
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ if (!member->type.ct->primary_key) {
+ error_sym(P, sym, "sorted table vector only valid when table has a key field");
+ return -1;
+ }
+ break;
+ case fb_is_struct:
+ if (!member->type.ct->primary_key) {
+ error_sym(P, sym, "sorted struct vector only valid when struct has a key field");
+ return -1;
+ }
+ break;
+ /* Other cases already handled in process_table. */
+ default:
+ continue;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * The parser already makes sure we have exactly one request type,
+ * one response type, and no initializer.
+ *
+ * We are a bit heavy on flagging attributes because their behavior
+ * isn't really specified at this point.
+ */
+static int process_rpc_service(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym, *old, *type_sym;
+ fb_member_t *member;
+#if FLATCC_ALLOW_RPC_SERVICE_ATTRIBUTES || FLATCC_ALLOW_RPC_METHOD_ATTRIBUTES
+ fb_metadata_t *knowns[KNOWN_ATTR_COUNT];
+#endif
+
+ assert(ct->symbol.kind == fb_is_rpc_service);
+ assert(!ct->type.type);
+
+ /*
+ * Deprecated is defined for fields - but it is unclear if this also
+ * covers rpc services. Anyway, we accept it since it may be useful,
+ * and does not harm.
+ */
+#if FLATCC_ALLOW_RPC_SERVICE_ATTRIBUTES
+ /* But we have no known attributes to support. */
+ ct->metadata_flags = process_metadata(P, ct->metadata, 0, knowns);
+#else
+ if (ct->metadata) {
+ error_sym(P, &ct->symbol, "rpc services cannot have attributes");
+ /* Non-fatal. */
+ }
+#endif
+
+ /*
+ * `original_order` now lives as a flag, we need not consider it
+ * further until code generation.
+ */
+ for (sym = ct->members; sym; sym = sym->link) {
+ type_sym = 0;
+ if ((old = define_fb_symbol(&ct->index, sym))) {
+ error_sym_2(P, sym, "rpc method already defined here", old);
+ continue;
+ }
+ if (sym->kind != fb_is_member) {
+ error_sym(P, sym, "internal error: member type expected");
+ return -1;
+ }
+ member = (fb_member_t *)sym;
+ if (member->value.type) {
+ error_sym(P, sym, "internal error: initializer should have been rejected by parser");
+ }
+ if (member->type.type == vt_invalid) {
+ continue;
+ }
+ if (member->type.type != vt_type_ref) {
+ error_sym(P, sym, "internal error: request type expected to be a type reference");
+ }
+ type_sym = lookup_type_reference(P, ct->scope, member->req_type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->req_type.ref, "unknown type reference used with rpc request type", sym);
+ member->type.type = vt_invalid;
+ continue;
+ } else {
+ if (type_sym->kind != fb_is_table) {
+ error_sym_2(P, sym, "rpc request type must reference a table, defined here", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->req_type.type = vt_compound_type_ref;
+ member->req_type.ct = (fb_compound_type_t*)type_sym;
+ }
+ type_sym = lookup_type_reference(P, ct->scope, member->type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->type.ref, "unknown type reference used with rpc response type", sym);
+ member->type.type = vt_invalid;
+ continue;
+ } else {
+ if (type_sym->kind != fb_is_table) {
+ error_sym_2(P, sym, "rpc response type must reference a table, defined here", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->type.type = vt_compound_type_ref;
+ member->type.ct = (fb_compound_type_t*)type_sym;
+ /* Symbols have no size. */
+ member->size = 0;
+ }
+#if FLATCC_ALLOW_RPC_METHOD_ATTRIBUTES
+#if FLATCC_ALLOW_DEPRECATED_RPC_METHOD
+ member->metadata_flags = process_metadata(P, member->metadata, fb_f_deprecated, knowns);
+#else
+ member->metadata_flags = process_metadata(P, member->metadata, 0, knowns);
+#endif
+#else
+ if (member->metadata) {
+ error_sym(P, sym, "rpc methods cannot have attributes");
+ /* Non-fatal. */
+ continue;
+ }
+#endif
+ }
+ return 0;
+}
+
+static int process_enum(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym, *old, *type_sym;
+ fb_member_t *member;
+ fb_metadata_t *knowns[KNOWN_ATTR_COUNT];
+ fb_value_t index = { { { 0 } }, 0, 0 };
+ fb_value_t old_index;
+ int first = 1;
+ int bit_flags = 0;
+ int is_union = ct->symbol.kind == fb_is_union;
+
+ if (!is_union) {
+ assert(ct->symbol.kind == fb_is_enum);
+ if (!ct->type.type) {
+ ct->type.type = vt_invalid;
+ error_sym(P, &ct->symbol, "enum must have a type");
+ return -1;
+ }
+ if (ct->type.type == vt_missing) {
+ /*
+ * Enums normally require a type, but the parser may have an
+ * option to allow missing type, and then we provide a
+ * sensible default.
+ */
+ ct->type.st = fb_int;
+ ct->type.type = vt_scalar_type;
+ } else if (ct->type.type == vt_scalar_type) {
+ ct->type.st = map_scalar_token_type(ct->type.t);
+ } else {
+ /* Spec does not mention boolean type in enum, but we allow it. */
+ error_sym(P, &ct->symbol, "enum type must be a scalar integral type or bool");
+ return -1;
+ }
+ ct->size = sizeof_scalar_type(ct->type.st);
+ ct->align = (uint16_t)ct->size;
+ } else {
+ if (ct->type.type) {
+ error_sym(P, &ct->symbol, "unions cannot have a type, they are always enumerated as ubyte");
+ return -1;
+ }
+ /*
+ * We preprocess unions as enums to get the value assignments.
+ * The type field is not documented, but generated output from
+ * flatc suggests ubyte. We use a an injected token to represent
+ * the ubyte type so we do not have to hardcode elsewhere.
+ */
+ ct->type.type = vt_scalar_type;
+ ct->type.st = fb_ubyte;
+ /*
+ * The union field use the type field and not the offset field
+ * to define its size because type.type is scalar.
+ */
+ ct->size = sizeof_scalar_type(fb_ubyte);
+ ct->align = (uint16_t)ct->size;
+ }
+
+ ct->metadata_flags = process_metadata(P, ct->metadata, fb_f_bit_flags, knowns);
+ if (ct->metadata_flags & fb_f_bit_flags) {
+ bit_flags = 1;
+ index.type = vt_uint;
+ index.u = 0;
+ }
+
+ if (ct->type.st == fb_bool) {
+ index.b = 0;
+ index.type = vt_bool;
+ } else {
+ index.i = 0;
+ index.type = vt_int;
+ if (fb_coerce_scalar_type(P, (fb_symbol_t *)ct, ct->type.st, &index)) {
+ error(P, "internal error: unexpected conversion failure on enum 0 index");
+ return -1;
+ }
+ }
+ old_index = index;
+
+ for (sym = ct->members; sym; sym = sym->link, first = 0) {
+ member = (fb_member_t *)sym;
+ if ((old = define_fb_symbol(&ct->index, sym))) {
+ if (old->ident == &P->t_none) {
+ /*
+ * Parser injects `NONE` as the first union member and
+ * it therefore gets index 0. Additional use of NONE
+ * will fail.
+ */
+ error_sym(P, sym, "'NONE' is a predefined value");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ error_sym_2(P, sym, "value already defined here", old);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ if (sym->kind != fb_is_member) {
+ error_sym(P, sym, "internal error: enum value type expected");
+ return -1;
+ }
+ /* Enum / union values cannot have metadata. */
+ assert(member->metadata == 0);
+ if (is_union) {
+ if (member->symbol.ident == &P->t_none) {
+ /* Handle implicit NONE specially. */
+ member->type.type = vt_missing;
+ } else if (member->type.type == vt_string_type) {
+ member->size = 0;
+ } else if (member->type.type != vt_type_ref) {
+ if (member->type.type != vt_invalid) {
+ error_sym(P, sym, "union member type must be string or a reference to a table or a struct");
+ member->type.type = vt_invalid;
+ }
+ continue;
+ } else {
+ type_sym = lookup_type_reference(P, ct->scope, member->type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->type.ref, "unknown type reference used with union member", sym);
+ member->type.type = vt_invalid;
+ continue;
+ } else {
+ if (type_sym->kind != fb_is_table && type_sym->kind != fb_is_struct) {
+ error_sym_2(P, sym, "union member type reference must be a table or a struct, defined here", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->type.type = vt_compound_type_ref;
+ member->type.ct = (fb_compound_type_t*)type_sym;
+ /* Symbols have no size. */
+ member->size = 0;
+ }
+ }
+ }
+ if (!member->value.type && !first) {
+ if (index.type == vt_uint) {
+ if (ct->type.st == fb_long && index.u == UINT64_MAX) {
+ /* Not captured by range check. */
+ error_sym(P, sym, "64-bit unsigned int overflow");
+ }
+ index.u = index.u + 1;
+ } else if (index.type == vt_int && !first) {
+ if (ct->type.st == fb_long && index.i == INT64_MAX) {
+ /* Not captured by range check. */
+ error_sym(P, sym, "64-bit signed int overflow");
+ }
+ index.i = index.i + 1;
+ } else if (index.type == vt_bool && !first) {
+ if (index.b == 1) {
+ error_sym(P, sym, "boolean overflow: cannot enumerate past true");
+ }
+ index.b = 1;
+ }
+ }
+ if (bit_flags) {
+ if (member->value.type) {
+ if (member->value.type != vt_uint) {
+ error_sym(P, sym, "enum value must be unsigned int when used with 'bit_flags'");
+ return -1;
+ } else {
+ index = member->value;
+ }
+ }
+ if (index.u >= sizeof_scalar_type(ct->type.st) * 8) {
+ error_sym(P, sym, "enum value out of range when used with 'bit_flags'");
+ return -1;
+ }
+ member->value.u = UINT64_C(1) << index.u;
+ member->value.type = vt_uint;
+ if (fb_coerce_scalar_type(P, sym, ct->type.st, &member->value)) {
+ /* E.g. enumval = 15 causes signed overflow with short. */
+ error_sym(P, sym, "enum value out of range when used with 'bit_flags'");
+ return -1;
+ }
+ } else {
+ if (member->value.type) {
+ index = member->value;
+ }
+ /*
+ * Captures errors in user assigned values. Also captures
+ * overflow on auto-increment on all types except maximum
+ * representation size, i.e. long or ulong which we handled
+ * above.
+ */
+ if (fb_coerce_scalar_type(P, sym, ct->type.st, &index)) {
+ return -1;
+ }
+ member->value = index;
+ }
+ if (!first && P->opts.ascending_enum) {
+ /* Without ascending enum we also allow duplicate values (but not names). */
+ if ((index.type == vt_uint && index.u <= old_index.u) ||
+ (index.type == vt_int && index.i <= old_index.i)) {
+ if (is_union && old_index.u == 0) {
+ /*
+ * The user explicitly assigned zero, or less, to the first
+ * element (here second element after parser injecting
+ * the NONE element).
+ */
+ error_sym(P, sym, "union values must be positive, 0 is reserved for implicit 'NONE'");
+ member->value.type = vt_invalid;
+ return -1;
+ }
+ error_sym(P, sym, "enum values must be in ascending order");
+ member->value.type = vt_invalid;
+ return -1;
+ }
+ if (index.type == vt_bool && index.b <= old_index.b) {
+ error_sym(P, sym, "enum of type bool can only enumerate from false (0) to true (1)");
+ member->value.type = vt_invalid;
+ return -1;
+ }
+ }
+ old_index = index;
+ if (add_to_value_set(&ct->value_set, &member->value)) {
+ if (is_union) {
+ error_sym(P, sym, "union members require unique positive values (0 being reserved for 'NONE'");
+ member->value.type = vt_invalid;
+ return -1;
+ } else {
+ /*
+ * With ascending enums this won't happen, but
+ * otherwise flag secondary values so we can remove them
+ * from inverse name mappings in code gen.
+ */
+ member->symbol.flags |= fb_duplicate;
+ }
+ }
+ if (member->metadata) {
+ error_sym(P, sym, "enum values cannot have attributes");
+ /* Non-fatal. */
+ continue;
+ }
+ }
+ return 0;
+}
+
+static inline int process_union(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ return process_enum(P, ct);
+}
+
+int fb_build_schema(fb_parser_t *P)
+{
+ fb_schema_t *S = &P->schema;
+ fb_symbol_t *sym, *old_sym;
+ fb_name_t *old_name;
+ fb_compound_type_t *ct;
+ fb_attribute_t *a;
+
+ /* Make sure self is visible at this point in time. */
+ assert(ptr_set_exists(&P->schema.visible_schema, &P->schema));
+ for (sym = S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ case fb_is_enum:
+ case fb_is_union:
+ case fb_is_struct:
+ case fb_is_rpc_service:
+ ct = (fb_compound_type_t *)sym;
+ set_type_hash(ct);
+ ct->schema = &P->schema;
+ if (ct->scope && (old_sym = define_fb_symbol(&ct->scope->symbol_index, sym))) {
+ error_sym_2(P, sym, "symbol already defined here", old_sym);
+ }
+ }
+ }
+
+ /*
+ * Known attributes will be pre-defined if not provided by the
+ * user. After that point, all attribute references must be
+ * defined.
+ */
+ for (a = (fb_attribute_t *)S->attributes; a; a = (fb_attribute_t *)a->name.link) {
+ if ((old_name = define_fb_name(&S->root_schema->attribute_index, &a->name))) {
+ /*
+ * Allow attributes to be defined multiple times, including
+ * known attributes.
+ */
+#if 0
+ error_name(P, &a->name, "attribute already defined");
+#endif
+ }
+ }
+ install_known_attributes(P);
+
+ if (!P->opts.hide_later_enum) {
+ for (sym = S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_enum:
+ ct = (fb_compound_type_t *)sym;
+ if (process_enum(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ default:
+ continue;
+ }
+ }
+ }
+
+ /*
+ * Resolve type references both earlier and later than point of
+ * reference. This also supports recursion for tables and unions.
+ */
+ for (sym = S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ ct = (fb_compound_type_t *)sym;
+ if (process_struct(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ case fb_is_table:
+ /* Handle after structs and enums. */
+ continue;
+ case fb_is_rpc_service:
+ /*
+ * Also handle rpc_service later like tables, just in case
+ * we allow non-table types in request/response type.
+ */
+ continue;
+ case fb_is_enum:
+ if (P->opts.hide_later_enum) {
+ ct = (fb_compound_type_t *)sym;
+ if (process_enum(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ }
+ break;
+ case fb_is_union:
+ ct = (fb_compound_type_t *)sym;
+ if (process_union(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ default:
+ error_sym(P, sym, "internal error: unexpected symbol at schema level");
+ return -1;
+ }
+ }
+ for (sym = P->schema.symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ /*
+ * Structs need two stages, first process symbols, then
+ * analyze for size, alignment, and circular references.
+ */
+ ct = (fb_compound_type_t *)sym;
+ if (ct->type.type != vt_invalid && analyze_struct(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ default:
+ continue;
+ }
+ }
+ for (sym = P->schema.symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ ct = (fb_compound_type_t *)sym;
+ /* Only now is the full struct size available. */
+ if (ct->type.type != vt_invalid && process_table(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ case fb_is_rpc_service:
+ ct = (fb_compound_type_t *)sym;
+ /* Only now is the full struct size available. */
+ if (ct->type.type != vt_invalid && process_rpc_service(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ }
+ }
+ for (sym = P->schema.symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ ct = (fb_compound_type_t *)sym;
+ /*
+ * Some table attributes depend on attributes on members and
+ * therefore can only be validated after procesing.
+ */
+ if (ct->type.type != vt_invalid && validate_table_attr(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ }
+ }
+ revert_order(&P->schema.ordered_structs);
+ if (!S->root_type.name) {
+ if (P->opts.require_root_type) {
+ error(P, "root type not declared");
+ }
+ } else {
+ sym = S->root_type.type = lookup_type_reference(P,
+ S->root_type.scope, S->root_type.name);
+ if (!sym) {
+ error_ref(P, S->root_type.name, "root type not found");
+ } else if (P->opts.allow_struct_root) {
+ if (sym->kind != fb_is_struct && sym->kind != fb_is_table) {
+ error_ref(P, S->root_type.name, "root type must be a struct or a table");
+ } else {
+ S->root_type.type = sym;
+ }
+ } else {
+ if (sym->kind != fb_is_table) {
+ error_ref(P, S->root_type.name, "root type must be a table");
+ } else {
+ S->root_type.type = sym;
+ }
+ }
+ S->root_type.name = 0;
+ }
+ P->has_schema = !P->failed;
+ return P->failed;
+}
diff --git a/src/compiler/semantics.h b/src/compiler/semantics.h
new file mode 100644
index 0000000..f4b0ec3
--- /dev/null
+++ b/src/compiler/semantics.h
@@ -0,0 +1,12 @@
+#ifndef SCHEMA_H
+#define SCHEMA_H
+
+#include "parser.h"
+
+int __flatcc_fb_build_schema(fb_parser_t *P);
+#define fb_build_schema __flatcc_fb_build_schema
+
+
+fb_scope_t *fb_find_scope(fb_schema_t *S, fb_ref_t *name);
+
+#endif /* SCHEMA_H */
diff --git a/src/compiler/symbols.h b/src/compiler/symbols.h
new file mode 100644
index 0000000..143a785
--- /dev/null
+++ b/src/compiler/symbols.h
@@ -0,0 +1,457 @@
+/* Flatbuffers parser attributes and symbols. */
+
+#ifndef SYMBOLS_H
+#define SYMBOLS_H
+
+#include <stdint.h>
+
+#include "config.h"
+#include "lex/tokens.h"
+#include "hash/hash_table.h"
+#include "hash/ptr_set.h"
+
+typedef struct fb_token fb_token_t;
+typedef struct fb_string fb_string_t;
+typedef struct fb_value fb_value_t;
+typedef struct fb_symbol fb_symbol_t;
+
+typedef struct fb_metadata fb_metadata_t;
+
+typedef struct fb_name fb_name_t;
+typedef fb_symbol_t fb_namespace_t;
+typedef fb_symbol_t fb_ref_t;
+/* Doc is not strictly a symbol, just a chained token list, but close enough. */
+typedef fb_symbol_t fb_doc_t;
+typedef fb_name_t fb_include_t;
+typedef struct fb_attribute fb_attribute_t;
+
+typedef struct fb_member fb_member_t;
+typedef struct fb_compound_type fb_compound_type_t;
+
+typedef struct fb_scope fb_scope_t;
+typedef struct fb_root_schema fb_root_schema_t;
+typedef struct fb_root_type fb_root_type_t;
+typedef struct fb_schema fb_schema_t;
+
+enum {
+ tok_kw_base = LEX_TOK_KW_BASE,
+ tok_kw_bool,
+ tok_kw_byte,
+ tok_kw_char,
+ tok_kw_enum,
+ tok_kw_float32,
+ tok_kw_float64,
+ tok_kw_int,
+ tok_kw_int8,
+ tok_kw_int16,
+ tok_kw_int32,
+ tok_kw_int64,
+ tok_kw_long,
+ tok_kw_null,
+ tok_kw_true,
+ tok_kw_uint,
+ tok_kw_false,
+ tok_kw_float,
+ tok_kw_short,
+ tok_kw_table,
+ tok_kw_ubyte,
+ tok_kw_uint8,
+ tok_kw_uint16,
+ tok_kw_uint32,
+ tok_kw_uint64,
+ tok_kw_ulong,
+ tok_kw_union,
+ tok_kw_double,
+ tok_kw_string,
+ tok_kw_struct,
+ tok_kw_ushort,
+ tok_kw_include,
+ tok_kw_attribute,
+ tok_kw_namespace,
+ tok_kw_root_type,
+ tok_kw_rpc_service,
+ tok_kw_file_extension,
+ tok_kw_file_identifier,
+ LEX_TOK_KW_END,
+ /* Pseudo keywords. */
+ tok_kw_doc_comment
+};
+
+struct fb_token {
+ const char *text;
+ long len;
+ long linenum;
+ long pos;
+ long id;
+};
+
+enum fb_scalar_type {
+ fb_missing_type = 0,
+ fb_ulong,
+ fb_uint,
+ fb_ushort,
+ fb_ubyte,
+ fb_bool,
+ fb_long,
+ fb_int,
+ fb_short,
+ fb_byte,
+ fb_double,
+ fb_float,
+ fb_char,
+};
+
+typedef enum fb_scalar_type fb_scalar_type_t;
+
+static inline size_t sizeof_scalar_type(fb_scalar_type_t st)
+{
+ static const size_t scalar_type_size[] = {
+ 0, 8, 4, 2, 1, 1, 8, 4, 2, 1, 8, 4, 1
+ };
+
+ return scalar_type_size[st];
+}
+
+enum fb_value_type {
+ vt_missing = 0,
+ vt_invalid = 1,
+ vt_null,
+ vt_string,
+ vt_float,
+ vt_int,
+ vt_uint,
+ vt_bool,
+ vt_vector_type,
+ vt_scalar_type,
+ vt_vector_string_type,
+ vt_string_type,
+ vt_vector_type_ref,
+ vt_type_ref,
+ vt_name_ref,
+ vt_compound_type_ref,
+ vt_vector_compound_type_ref,
+ vt_fixed_array_type,
+ vt_fixed_array_type_ref,
+ vt_fixed_array_string_type,
+ vt_fixed_array_compound_type_ref
+};
+
+struct fb_string {
+ char *s;
+ /* printf statements relies on this being int. */
+ int len;
+};
+
+struct fb_value {
+ union {
+ fb_string_t s;
+ double f;
+ int64_t i;
+ uint64_t u;
+ uint8_t b;
+ fb_token_t *t;
+ fb_compound_type_t *ct;
+ fb_scalar_type_t st;
+ fb_ref_t *ref;
+ };
+ unsigned short type;
+ uint32_t len;
+};
+
+enum fb_kind {
+ fb_is_table,
+ fb_is_struct,
+ fb_is_rpc_service,
+ fb_is_enum,
+ fb_is_union,
+ fb_is_member
+};
+
+/*
+ * Used for white, gray, black graph coloring while detecting circular
+ * references.
+ */
+enum fb_symbol_flags {
+ fb_circular_open = 1,
+ fb_circular_closed = 2,
+ fb_duplicate = 4,
+ fb_indexed = 8,
+};
+
+enum fb_member_flags {
+ fb_fm_optional = 1
+};
+
+/*
+ * We keep the link first in all structs so that we can use a
+ * generic list reverse function after all symbols have been pushed
+ * within a scope.
+ */
+struct fb_symbol {
+ fb_symbol_t *link;
+ fb_token_t *ident;
+ uint16_t kind;
+ uint16_t flags;
+};
+
+struct fb_name {
+ fb_name_t *link;
+ fb_value_t name;
+};
+
+#define fb_name_table __flatcc_fb_name_table
+#define fb_value_set __flatcc_fb_value_set
+#define fb_symbol_table __flatcc_fb_symbol_table
+#define fb_scope_table __flatcc_fb_scope_table
+
+DECLARE_HASH_TABLE(fb_name_table, fb_name_t *)
+DECLARE_HASH_TABLE(fb_schema_table, fb_schema_t *)
+DECLARE_HASH_TABLE(fb_value_set, fb_value_t *)
+DECLARE_HASH_TABLE(fb_symbol_table, fb_symbol_t *)
+DECLARE_HASH_TABLE(fb_scope_table, fb_scope_t *)
+
+struct fb_member {
+ fb_symbol_t symbol;
+ /* Struct or table field type, or method response type. */
+ fb_value_t type;
+ /* Method request type only used for methods. */
+ fb_value_t req_type;
+ fb_value_t value;
+ fb_metadata_t *metadata;
+ fb_doc_t *doc;
+ uint16_t metadata_flags;
+ /*
+ * `align`, `offset` are for structs only. 64-bit allows for
+ * dynamically configured 64-bit file offsets. Align is restricted to
+ * at most 256 and must be a power of 2.
+ */
+ uint16_t align;
+ uint16_t flags;
+ uint64_t offset;
+ uint64_t size;
+
+ /* `id` is for table fields only. */
+ uint64_t id;
+ /*
+ * Resolved `nested_flatbuffer` attribute type. Always a table if
+ * set, and only on struct and table fields.
+ */
+ fb_compound_type_t *nest;
+ /* Used to generate table fields in sorted order. */
+ fb_member_t *order;
+
+ /*
+ * Use by code generators. Only valid during export and may hold
+ * garbage from a prevous export.
+ */
+ size_t export_index;
+};
+
+struct fb_metadata {
+ fb_metadata_t *link;
+ fb_token_t *ident;
+ fb_value_t value;
+};
+
+struct fb_compound_type {
+ fb_symbol_t symbol;
+ /* `scope` may span multiple input files, but has a unique namespace. */
+ fb_scope_t *scope;
+ /* Identifies the the schema the symbol belongs. */
+ fb_schema_t *schema;
+ fb_symbol_t *members;
+ fb_member_t *ordered_members;
+ fb_member_t *primary_key;
+ fb_metadata_t *metadata;
+ fb_doc_t *doc;
+ fb_value_t type;
+ fb_symbol_table_t index;
+ /* Only for enums. */
+ fb_value_set_t value_set;
+ /* FNV-1a 32 bit hash of fully qualified name, accidental 0 maps to hash(""). */
+ uint32_t type_hash;
+ uint16_t metadata_flags;
+ /* `count` is for tables only. */
+ uint64_t count;
+ /* `align`, `size` is for structs only. */
+ uint16_t align;
+ uint64_t size;
+ /* Sort structs with forward references. */
+ fb_compound_type_t *order;
+ /*
+ * Use by code generators. Only valid during export and may hold
+ * garbage from a previous export.
+ */
+ size_t export_index;
+};
+
+enum fb_known_attributes {
+ fb_attr_unknown = 0,
+ fb_attr_id = 1,
+ fb_attr_deprecated = 2,
+ fb_attr_original_order = 3,
+ fb_attr_force_align = 4,
+ fb_attr_bit_flags = 5,
+ fb_attr_nested_flatbuffer = 6,
+ fb_attr_key = 7,
+ fb_attr_required = 8,
+ fb_attr_hash = 9,
+ fb_attr_base64 = 10,
+ fb_attr_base64url = 11,
+ fb_attr_primary_key = 12,
+ fb_attr_sorted = 13,
+ KNOWN_ATTR_COUNT
+};
+
+enum fb_known_attribute_flags {
+ fb_f_unknown = 1 << fb_attr_unknown,
+ fb_f_id = 1 << fb_attr_id,
+ fb_f_deprecated = 1 << fb_attr_deprecated,
+ fb_f_original_order = 1 << fb_attr_original_order,
+ fb_f_force_align = 1 << fb_attr_force_align,
+ fb_f_bit_flags = 1 << fb_attr_bit_flags,
+ fb_f_nested_flatbuffer = 1 << fb_attr_nested_flatbuffer,
+ fb_f_key = 1 << fb_attr_key,
+ fb_f_required = 1 << fb_attr_required,
+ fb_f_hash = 1 << fb_attr_hash,
+ fb_f_base64 = 1 << fb_attr_base64,
+ fb_f_base64url = 1 << fb_attr_base64url,
+ fb_f_primary_key = 1 << fb_attr_primary_key,
+ fb_f_sorted = 1 << fb_attr_sorted,
+};
+
+struct fb_attribute {
+ fb_name_t name;
+ unsigned int known;
+};
+
+struct fb_scope {
+ fb_ref_t *name;
+ fb_symbol_table_t symbol_index;
+ fb_string_t prefix;
+};
+
+struct fb_root_schema {
+ fb_scope_table_t scope_index;
+ fb_name_table_t attribute_index;
+ fb_schema_table_t include_index;
+ int include_count;
+ int include_depth;
+ size_t total_source_size;
+};
+
+struct fb_root_type {
+ /* Root decl. before symbol is visible. */
+ fb_ref_t *name;
+ /* Resolved symbol. */
+ fb_symbol_t *type;
+ fb_scope_t *scope;
+};
+
+/*
+ * We store the parsed structure as token references. Tokens are stored
+ * in a single array pointing into the source buffer.
+ *
+ * Strings may contain multiple tokens when holding control characters
+ * and line breaks, but for our purposes the first string part is
+ * sufficient.
+ */
+
+struct fb_schema {
+ fb_include_t *includes;
+ fb_name_t *attributes;
+ fb_value_t file_identifier;
+ fb_value_t file_extension;
+ fb_symbol_t *symbols;
+ /* Topologically sorted structs. */
+ fb_compound_type_t *ordered_structs;
+ fb_root_type_t root_type;
+ fb_root_schema_t *root_schema;
+ /* Only used if schema is root. */
+ fb_root_schema_t root_schema_instance;
+
+ /* An optional scope prefix for generated code. */
+ fb_string_t prefix;
+
+ /* The basenameup in a format that can be index. */
+ fb_name_t name;
+
+ /* These are allocated strings that must be freed. */
+
+ /* Name of schema being processed without path or default extension. */
+ char *basename;
+ /* Uppercase basename for codegen and for case insenstive file inclusion check. */
+ char *basenameup;
+ /* Basename with extension. */
+ char *errorname;
+
+ /*
+ * The dependency schemas visible to this schema (includes self).
+ * Compound symbols have a link to schema which can be checked
+ * against this set to see if the symbol is visible in this
+ * conctext.
+ */
+ ptr_set_t visible_schema;
+};
+
+/*
+ * Helpers to ensure a symbol is actually visible because a scope
+ * (namespace) may be extended when a parent inlcudes another file
+ * first.
+ */
+static inline fb_compound_type_t *get_enum_if_visible(fb_schema_t *schema, fb_symbol_t *sym)
+{
+ fb_compound_type_t *ct = 0;
+
+ switch (sym->kind) {
+ case fb_is_union:
+ /* Fall through. */
+ case fb_is_enum:
+ ct = (fb_compound_type_t *)sym;
+ if (!ptr_set_exists(&schema->visible_schema, ct->schema)) {
+ ct = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return ct;
+}
+
+static inline fb_compound_type_t *get_compound_if_visible(fb_schema_t *schema, fb_symbol_t *sym)
+{
+ fb_compound_type_t *ct = 0;
+
+ switch (sym->kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ case fb_is_rpc_service:
+ case fb_is_union:
+ case fb_is_enum:
+ ct = (fb_compound_type_t *)sym;
+ if (!ptr_set_exists(&schema->visible_schema, ct->schema)) {
+ ct = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return ct;
+}
+
+/* Constants are specific to 32-bit FNV-1a hash. It is important to use unsigned integers. */
+static inline uint32_t fb_hash_fnv1a_32_init(void)
+{
+ return 2166136261UL;
+}
+
+static inline uint32_t fb_hash_fnv1a_32_append(uint32_t hash, const char *data, size_t len)
+{
+ while (len--) {
+ hash ^= *(uint8_t *)data++;
+ hash = hash * 16777619UL;
+ }
+ return hash;
+}
+
+#endif /* SYMBOLS_H */
diff --git a/src/runtime/CMakeLists.txt b/src/runtime/CMakeLists.txt
new file mode 100644
index 0000000..127e2a4
--- /dev/null
+++ b/src/runtime/CMakeLists.txt
@@ -0,0 +1,16 @@
+include_directories (
+ "${PROJECT_SOURCE_DIR}/include"
+)
+
+add_library(flatccrt
+ builder.c
+ emitter.c
+ refmap.c
+ verifier.c
+ json_parser.c
+ json_printer.c
+)
+
+if (FLATCC_INSTALL)
+ install(TARGETS flatccrt DESTINATION ${lib_dir})
+endif()
diff --git a/src/runtime/builder.c b/src/runtime/builder.c
new file mode 100644
index 0000000..b62c2b6
--- /dev/null
+++ b/src/runtime/builder.c
@@ -0,0 +1,2035 @@
+/*
+ * Codegenerator for C, building FlatBuffers.
+ *
+ * There are several approaches, some light, some requiring a library,
+ * some with vectored I/O etc.
+ *
+ * Here we focus on a reasonable balance of light code and efficiency.
+ *
+ * Builder code is generated to a separate file that includes the
+ * generated read-only code.
+ *
+ * Mutable buffers are not supported in this version.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_builder.h"
+#include "flatcc/flatcc_emitter.h"
+
+/*
+ * `check` is designed to handle incorrect use errors that can be
+ * ignored in production of a tested product.
+ *
+ * `check_error` fails if condition is false and is designed to return an
+ * error code in production.
+ */
+
+#if FLATCC_BUILDER_ASSERT_ON_ERROR
+#define check(cond, reason) FLATCC_BUILDER_ASSERT(cond, reason)
+#else
+#define check(cond, reason) ((void)0)
+#endif
+
+#if FLATCC_BUILDER_SKIP_CHECKS
+#define check_error(cond, err, reason) ((void)0)
+#else
+#define check_error(cond, err, reason) if (!(cond)) { check(cond, reason); return err; }
+#endif
+
+/* `strnlen` not widely supported. */
+static inline size_t pstrnlen(const char *s, size_t max_len)
+{
+ const char *end = memchr(s, 0, max_len);
+ return end ? (size_t)(end - s) : max_len;
+}
+#undef strnlen
+#define strnlen pstrnlen
+
+/* Padding can be up to 255 zeroes, and 1 zero string termination byte.
+ * When two paddings are combined at nested buffers, we need twice that.
+ * Visible to emitter so it can test for zero padding in iov. */
+const uint8_t flatcc_builder_padding_base[512] = { 0 };
+#define _pad flatcc_builder_padding_base
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define write_uoffset __flatbuffers_uoffset_write_to_pe
+#define write_voffset __flatbuffers_voffset_write_to_pe
+#define write_identifier __flatbuffers_uoffset_write_to_pe
+#define write_utype __flatbuffers_utype_write_to_pe
+
+#define field_size sizeof(uoffset_t)
+#define max_offset_count FLATBUFFERS_COUNT_MAX(field_size)
+#define union_size sizeof(flatcc_builder_union_ref_t)
+#define max_union_count FLATBUFFERS_COUNT_MAX(union_size)
+#define utype_size sizeof(utype_t)
+#define max_utype_count FLATBUFFERS_COUNT_MAX(utype_size)
+
+#define max_string_len FLATBUFFERS_COUNT_MAX(1)
+#define identifier_size FLATBUFFERS_IDENTIFIER_SIZE
+
+
+#define iovec_t flatcc_iovec_t
+#define frame_size sizeof(__flatcc_builder_frame_t)
+#define frame(x) (B->frame[0].x)
+
+
+/* `align` must be a power of 2. */
+static inline uoffset_t alignup_uoffset(uoffset_t x, size_t align)
+{
+ return (x + (uoffset_t)align - 1u) & ~((uoffset_t)align - 1u);
+}
+
+static inline size_t alignup_size(size_t x, size_t align)
+{
+ return (x + align - 1u) & ~(align - 1u);
+}
+
+
+typedef struct vtable_descriptor vtable_descriptor_t;
+struct vtable_descriptor {
+ /* Where the vtable is emitted. */
+ flatcc_builder_ref_t vt_ref;
+ /* Which buffer it was emitted to. */
+ uoffset_t nest_id;
+ /* Where the vtable is cached. */
+ uoffset_t vb_start;
+ /* Hash table collision chain. */
+ uoffset_t next;
+};
+
+typedef struct flatcc_iov_state flatcc_iov_state_t;
+struct flatcc_iov_state {
+ size_t len;
+ int count;
+ flatcc_iovec_t iov[FLATCC_IOV_COUNT_MAX];
+};
+
+#define iov_state_t flatcc_iov_state_t
+
+/* This assumes `iov_state_t iov;` has been declared in scope */
+#define push_iov_cond(base, size, cond) if ((size) > 0 && (cond)) { iov.len += size;\
+ iov.iov[iov.count].iov_base = (void *)(base); iov.iov[iov.count].iov_len = (size); ++iov.count; }
+#define push_iov(base, size) push_iov_cond(base, size, 1)
+#define init_iov() { iov.len = 0; iov.count = 0; }
+
+
+int flatcc_builder_default_alloc(void *alloc_context, iovec_t *b, size_t request, int zero_fill, int hint)
+{
+ void *p;
+ size_t n;
+
+ (void)alloc_context;
+
+ if (request == 0) {
+ if (b->iov_base) {
+ FLATCC_BUILDER_FREE(b->iov_base);
+ b->iov_base = 0;
+ b->iov_len = 0;
+ }
+ return 0;
+ }
+ switch (hint) {
+ case flatcc_builder_alloc_ds:
+ n = 256;
+ break;
+ case flatcc_builder_alloc_ht:
+ /* Should be exact size, or space size is just wasted. */
+ n = request;
+ break;
+ case flatcc_builder_alloc_fs:
+ n = sizeof(__flatcc_builder_frame_t) * 8;
+ break;
+ case flatcc_builder_alloc_us:
+ n = 64;
+ break;
+ default:
+ /*
+ * We have many small structures - vs stack for tables with few
+ * elements, and few offset fields in patch log. No need to
+ * overallocate in case of busy small messages.
+ */
+ n = 32;
+ break;
+ }
+ while (n < request) {
+ n *= 2;
+ }
+ if (request <= b->iov_len && b->iov_len / 2 >= n) {
+ /* Add hysteresis to shrink. */
+ return 0;
+ }
+ if (!(p = FLATCC_BUILDER_REALLOC(b->iov_base, n))) {
+ return -1;
+ }
+ /* Realloc might also shrink. */
+ if (zero_fill && b->iov_len < n) {
+ memset((uint8_t *)p + b->iov_len, 0, n - b->iov_len);
+ }
+ b->iov_base = p;
+ b->iov_len = n;
+ return 0;
+}
+
+#define T_ptr(base, pos) ((void *)((uint8_t *)(base) + (uoffset_t)(pos)))
+#define ds_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_ds].iov_base, (pos)))
+#define vs_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vs].iov_base, (pos)))
+#define pl_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_pl].iov_base, (pos)))
+#define us_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_us].iov_base, (pos)))
+#define vd_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vd].iov_base, (pos)))
+#define vb_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vb].iov_base, (pos)))
+#define vs_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_vs].iov_base))
+#define pl_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_pl].iov_base))
+#define us_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_us].iov_base))
+
+#define table_limit (FLATBUFFERS_VOFFSET_MAX - field_size + 1)
+#define data_limit (FLATBUFFERS_UOFFSET_MAX - field_size + 1)
+
+#define set_identifier(id) memcpy(&B->identifier, (id) ? (void *)(id) : (void *)_pad, identifier_size)
+
+/* Must also return true when no buffer has been started. */
+#define is_top_buffer(B) (B->nest_id == 0)
+
+/*
+ * Tables use a stack represention better suited for quickly adding
+ * fields to tables, but it must occasionally be refreshed following
+ * reallocation or reentry from child frame.
+ */
+static inline void refresh_ds(flatcc_builder_t *B, uoffset_t type_limit)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
+
+ B->ds = ds_ptr(B->ds_first);
+ B->ds_limit = (uoffset_t)buf->iov_len - B->ds_first;
+ /*
+ * So we don't allocate outside tables representation size, nor our
+ * current buffer size.
+ */
+ if (B->ds_limit > type_limit) {
+ B->ds_limit = type_limit;
+ }
+ /* So exit frame can refresh fast. */
+ frame(type_limit) = type_limit;
+}
+
+static int reserve_ds(flatcc_builder_t *B, size_t need, uoffset_t limit)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
+
+ if (B->alloc(B->alloc_context, buf, B->ds_first + need, 1, flatcc_builder_alloc_ds)) {
+ return -1;
+ }
+ refresh_ds(B, limit);
+ return 0;
+}
+
+/*
+ * Make sure there is always an extra zero termination on stack
+ * even if it isn't emitted such that string updates may count
+ * on zero termination being present always.
+ */
+static inline void *push_ds(flatcc_builder_t *B, uoffset_t size)
+{
+ size_t offset;
+
+ offset = B->ds_offset;
+ if ((B->ds_offset += size) >= B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset + 1, data_limit)) {
+ return 0;
+ }
+ }
+ return B->ds + offset;
+}
+
+static inline void unpush_ds(flatcc_builder_t *B, uoffset_t size)
+{
+ B->ds_offset -= size;
+ memset(B->ds + B->ds_offset, 0, size);
+}
+
+static inline void *push_ds_copy(flatcc_builder_t *B, const void *data, uoffset_t size)
+{
+ void *p;
+
+ if (!(p = push_ds(B, size))) {
+ return 0;
+ }
+ memcpy(p, data, size);
+ return p;
+}
+
+static inline void *push_ds_field(flatcc_builder_t *B, uoffset_t size, uint16_t align, voffset_t id)
+{
+ uoffset_t offset;
+
+ /*
+ * We calculate table field alignment relative to first entry, not
+ * header field with vtable offset.
+ *
+ * Note: >= comparison handles special case where B->ds is not
+ * allocated yet and size is 0 so the return value would be mistaken
+ * for an error.
+ */
+ offset = alignup_uoffset(B->ds_offset, align);
+ if ((B->ds_offset = offset + size) >= B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset + 1, table_limit)) {
+ return 0;
+ }
+ }
+ B->vs[id] = (voffset_t)(offset + field_size);
+ if (id >= B->id_end) {
+ B->id_end = id + 1u;
+ }
+ return B->ds + offset;
+}
+
+static inline void *push_ds_offset_field(flatcc_builder_t *B, voffset_t id)
+{
+ uoffset_t offset;
+
+ offset = alignup_uoffset(B->ds_offset, field_size);
+ if ((B->ds_offset = offset + field_size) > B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset, table_limit)) {
+ return 0;
+ }
+ }
+ B->vs[id] = (voffset_t)(offset + field_size);
+ if (id >= B->id_end) {
+ B->id_end = id + 1u;
+ }
+ *B->pl++ = (flatbuffers_voffset_t)offset;
+ return B->ds + offset;
+}
+
+static inline void *reserve_buffer(flatcc_builder_t *B, int alloc_type, size_t used, size_t need, int zero_init)
+{
+ iovec_t *buf = B->buffers + alloc_type;
+
+ if (used + need > buf->iov_len) {
+ if (B->alloc(B->alloc_context, buf, used + need, zero_init, alloc_type)) {
+ check(0, "memory allocation failed");
+ return 0;
+ }
+ }
+ return (void *)((size_t)buf->iov_base + used);
+}
+
+static inline int reserve_fields(flatcc_builder_t *B, int count)
+{
+ size_t used, need;
+
+ /* Provide faster stack operations for common table operations. */
+ used = frame(container.table.vs_end) + frame(container.table.id_end) * sizeof(voffset_t);
+ need = (size_t)(count + 2) * sizeof(voffset_t);
+ if (!(B->vs = reserve_buffer(B, flatcc_builder_alloc_vs, used, need, 1))) {
+ return -1;
+ }
+ /* Move past header for convenience. */
+ B->vs += 2;
+ used = frame(container.table.pl_end);
+ /* Add one to handle special case of first table being empty. */
+ need = (size_t)count * sizeof(*(B->pl)) + 1;
+ if (!(B->pl = reserve_buffer(B, flatcc_builder_alloc_pl, used, need, 0))) {
+ return -1;
+ }
+ return 0;
+}
+
+static int alloc_ht(flatcc_builder_t *B)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ht;
+
+ size_t size, k;
+ /* Allocate null entry so we can check for return errors. */
+ FLATCC_ASSERT(B->vd_end == 0);
+ if (!reserve_buffer(B, flatcc_builder_alloc_vd, B->vd_end, sizeof(vtable_descriptor_t), 0)) {
+ return -1;
+ }
+ B->vd_end = sizeof(vtable_descriptor_t);
+ size = field_size * FLATCC_BUILDER_MIN_HASH_COUNT;
+ if (B->alloc(B->alloc_context, buf, size, 1, flatcc_builder_alloc_ht)) {
+ return -1;
+ }
+ while (size * 2 <= buf->iov_len) {
+ size *= 2;
+ }
+ size /= field_size;
+ for (k = 0; (((size_t)1) << k) < size; ++k) {
+ }
+ B->ht_width = k;
+ return 0;
+}
+
+static inline uoffset_t *lookup_ht(flatcc_builder_t *B, uint32_t hash)
+{
+ uoffset_t *T;
+
+ if (B->ht_width == 0) {
+ if (alloc_ht(B)) {
+ return 0;
+ }
+ }
+ T = B->buffers[flatcc_builder_alloc_ht].iov_base;
+
+ return &T[FLATCC_BUILDER_BUCKET_VT_HASH(hash, B->ht_width)];
+}
+
+void flatcc_builder_flush_vtable_cache(flatcc_builder_t *B)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ht;
+
+ if (B->ht_width == 0) {
+ return;
+ }
+ memset(buf->iov_base, 0, buf->iov_len);
+ /* Reserve the null entry. */
+ B->vd_end = sizeof(vtable_descriptor_t);
+ B->vb_end = 0;
+}
+
+int flatcc_builder_custom_init(flatcc_builder_t *B,
+ flatcc_builder_emit_fun *emit, void *emit_context,
+ flatcc_builder_alloc_fun *alloc, void *alloc_context)
+{
+ /*
+ * Do not allocate anything here. Only the required buffers will be
+ * allocated. For simple struct buffers, no allocation is required
+ * at all.
+ */
+ memset(B, 0, sizeof(*B));
+
+ if (emit == 0) {
+ B->is_default_emitter = 1;
+ emit = flatcc_emitter;
+ emit_context = &B->default_emit_context;
+ }
+ if (alloc == 0) {
+ alloc = flatcc_builder_default_alloc;
+ }
+ B->alloc_context = alloc_context;
+ B->alloc = alloc;
+ B->emit_context = emit_context;
+ B->emit = emit;
+ return 0;
+}
+
+int flatcc_builder_init(flatcc_builder_t *B)
+{
+ return flatcc_builder_custom_init(B, 0, 0, 0, 0);
+}
+
+int flatcc_builder_custom_reset(flatcc_builder_t *B, int set_defaults, int reduce_buffers)
+{
+ iovec_t *buf;
+ int i;
+
+ for (i = 0; i < FLATCC_BUILDER_ALLOC_BUFFER_COUNT; ++i) {
+ buf = B->buffers + i;
+ if (buf->iov_base) {
+ /* Don't try to reduce the hash table. */
+ if (i != flatcc_builder_alloc_ht &&
+ reduce_buffers && B->alloc(B->alloc_context, buf, 1, 1, i)) {
+ return -1;
+ }
+ memset(buf->iov_base, 0, buf->iov_len);
+ } else {
+ FLATCC_ASSERT(buf->iov_len == 0);
+ }
+ }
+ B->vb_end = 0;
+ if (B->vd_end > 0) {
+ /* Reset past null entry. */
+ B->vd_end = sizeof(vtable_descriptor_t);
+ }
+ B->min_align = 0;
+ B->emit_start = 0;
+ B->emit_end = 0;
+ B->level = 0;
+ B->limit_level = 0;
+ B->ds_offset = 0;
+ B->ds_limit = 0;
+ B->nest_count = 0;
+ B->nest_id = 0;
+ /* Needed for correct offset calculation. */
+ B->ds = B->buffers[flatcc_builder_alloc_ds].iov_base;
+ B->pl = B->buffers[flatcc_builder_alloc_pl].iov_base;
+ B->vs = B->buffers[flatcc_builder_alloc_vs].iov_base;
+ B->frame = 0;
+ if (set_defaults) {
+ B->vb_flush_limit = 0;
+ B->max_level = 0;
+ B->disable_vt_clustering = 0;
+ }
+ if (B->is_default_emitter) {
+ flatcc_emitter_reset(&B->default_emit_context);
+ }
+ if (B->refmap) {
+ flatcc_refmap_reset(B->refmap);
+ }
+ return 0;
+}
+
+int flatcc_builder_reset(flatcc_builder_t *B)
+{
+ return flatcc_builder_custom_reset(B, 0, 0);
+}
+
+void flatcc_builder_clear(flatcc_builder_t *B)
+{
+ iovec_t *buf;
+ int i;
+
+ for (i = 0; i < FLATCC_BUILDER_ALLOC_BUFFER_COUNT; ++i) {
+ buf = B->buffers + i;
+ B->alloc(B->alloc_context, buf, 0, 0, i);
+ }
+ if (B->is_default_emitter) {
+ flatcc_emitter_clear(&B->default_emit_context);
+ }
+ if (B->refmap) {
+ flatcc_refmap_clear(B->refmap);
+ }
+ memset(B, 0, sizeof(*B));
+}
+
+static inline void set_min_align(flatcc_builder_t *B, uint16_t align)
+{
+ if (B->min_align < align) {
+ B->min_align = align;
+ }
+}
+
+/*
+ * It's a max, but the minimum viable alignment is the largest observed
+ * alignment requirement, but no larger.
+ */
+static inline void get_min_align(uint16_t *align, uint16_t b)
+{
+ if (*align < b) {
+ *align = b;
+ }
+}
+
+void *flatcc_builder_enter_user_frame_ptr(flatcc_builder_t *B, size_t size)
+{
+ size_t *frame;
+
+ size = alignup_size(size, sizeof(size_t)) + sizeof(size_t);
+
+ if (!(frame = reserve_buffer(B, flatcc_builder_alloc_us, B->user_frame_end, size, 0))) {
+ return 0;
+ }
+ memset(frame, 0, size);
+ *frame++ = B->user_frame_offset;
+ B->user_frame_offset = B->user_frame_end + sizeof(size_t);
+ B->user_frame_end += size;
+ return frame;
+}
+
+size_t flatcc_builder_enter_user_frame(flatcc_builder_t *B, size_t size)
+{
+ size_t *frame;
+
+ size = alignup_size(size, sizeof(size_t)) + sizeof(size_t);
+
+ if (!(frame = reserve_buffer(B, flatcc_builder_alloc_us, B->user_frame_end, size, 0))) {
+ return 0;
+ }
+ memset(frame, 0, size);
+ *frame++ = B->user_frame_offset;
+ B->user_frame_offset = B->user_frame_end + sizeof(size_t);
+ B->user_frame_end += size;
+ return B->user_frame_offset;
+}
+
+
+size_t flatcc_builder_exit_user_frame(flatcc_builder_t *B)
+{
+ size_t *hdr;
+
+ FLATCC_ASSERT(B->user_frame_offset > 0);
+
+ hdr = us_ptr(B->user_frame_offset);
+ B->user_frame_end = B->user_frame_offset - sizeof(size_t);
+ return B->user_frame_offset = hdr[-1];
+}
+
+size_t flatcc_builder_exit_user_frame_at(flatcc_builder_t *B, size_t handle)
+{
+ FLATCC_ASSERT(B->user_frame_offset >= handle);
+
+ B->user_frame_offset = handle;
+ return flatcc_builder_exit_user_frame(B);
+}
+
+size_t flatcc_builder_get_current_user_frame(flatcc_builder_t *B)
+{
+ return B->user_frame_offset;
+}
+
+void *flatcc_builder_get_user_frame_ptr(flatcc_builder_t *B, size_t handle)
+{
+ return us_ptr(handle);
+}
+
+static int enter_frame(flatcc_builder_t *B, uint16_t align)
+{
+ if (++B->level > B->limit_level) {
+ if (B->max_level > 0 && B->level > B->max_level) {
+ return -1;
+ }
+ if (!(B->frame = reserve_buffer(B, flatcc_builder_alloc_fs,
+ (size_t)(B->level - 1) * frame_size, frame_size, 0))) {
+ return -1;
+ }
+ B->limit_level = (int)(B->buffers[flatcc_builder_alloc_fs].iov_len / frame_size);
+ if (B->max_level > 0 && B->max_level < B->limit_level) {
+ B->limit_level = B->max_level;
+ }
+ } else {
+ ++B->frame;
+ }
+ frame(ds_offset) = B->ds_offset;
+ frame(align) = B->align;
+ B->align = align;
+ /* Note: do not assume padding before first has been allocated! */
+ frame(ds_first) = B->ds_first;
+ frame(type_limit) = data_limit;
+ B->ds_first = alignup_uoffset(B->ds_first + B->ds_offset, 8);
+ B->ds_offset = 0;
+ return 0;
+}
+
+static inline void exit_frame(flatcc_builder_t *B)
+{
+ memset(B->ds, 0, B->ds_offset);
+ B->ds_offset = frame(ds_offset);
+ B->ds_first = frame(ds_first);
+ refresh_ds(B, frame(type_limit));
+
+ /*
+ * Restore local alignment: e.g. a table should not change alignment
+ * because a child table was just created elsewhere in the buffer,
+ * but the overall alignment (min align), should be aware of it.
+ * Each buffer has its own min align that then migrates up without
+ * being affected by sibling or child buffers.
+ */
+ set_min_align(B, B->align);
+ B->align = frame(align);
+
+ --B->frame;
+ --B->level;
+}
+
+static inline uoffset_t front_pad(flatcc_builder_t *B, uoffset_t size, uint16_t align)
+{
+ return (uoffset_t)(B->emit_start - (flatcc_builder_ref_t)size) & (align - 1u);
+}
+
+static inline uoffset_t back_pad(flatcc_builder_t *B, uint16_t align)
+{
+ return (uoffset_t)(B->emit_end) & (align - 1u);
+}
+
+static inline flatcc_builder_ref_t emit_front(flatcc_builder_t *B, iov_state_t *iov)
+{
+ flatcc_builder_ref_t ref;
+
+ /*
+ * We might have overflow when including headers, but without
+ * headers we should have checks to prevent overflow in the
+ * uoffset_t range, hence we subtract 16 to be safe. With that
+ * guarantee we can also make a safe check on the soffset_t range.
+ *
+ * We only allow buffers half the theoritical size of
+ * FLATBUFFERS_UOFFSET_MAX so we can safely use signed references.
+ *
+ * NOTE: vtables vt_offset field is signed, and the check in create
+ * table only ensures the signed limit. The check would fail if the
+ * total buffer size could grow beyond UOFFSET_MAX, and we prevent
+ * that by limiting the lower end to SOFFSET_MIN, and the upper end
+ * at emit_back to SOFFSET_MAX.
+ */
+ ref = B->emit_start - (flatcc_builder_ref_t)iov->len;
+ if ((iov->len > 16 && iov->len - 16 > FLATBUFFERS_UOFFSET_MAX) || ref >= B->emit_start) {
+ check(0, "buffer too large to represent");
+ return 0;
+ }
+ if (B->emit(B->emit_context, iov->iov, iov->count, ref, iov->len)) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ return B->emit_start = ref;
+}
+
+static inline flatcc_builder_ref_t emit_back(flatcc_builder_t *B, iov_state_t *iov)
+{
+ flatcc_builder_ref_t ref;
+
+ ref = B->emit_end;
+ B->emit_end = ref + (flatcc_builder_ref_t)iov->len;
+ /*
+ * Similar to emit_front check, but since we only emit vtables and
+ * padding at the back, we are not concerned with iov->len overflow,
+ * only total buffer overflow.
+ *
+ * With this check, vtable soffset references at table header can
+ * still overflow in extreme cases, so this must be checked
+ * separately.
+ */
+ if (B->emit_end < ref) {
+ check(0, "buffer too large to represent");
+ return 0;
+ }
+ if (B->emit(B->emit_context, iov->iov, iov->count, ref, iov->len)) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ /*
+ * Back references always return ref + 1 because ref == 0 is valid and
+ * should not be mistaken for error. vtables understand this.
+ */
+ return ref + 1;
+}
+
+static int align_to_block(flatcc_builder_t *B, uint16_t *align, uint16_t block_align, int is_nested)
+{
+ size_t end_pad;
+ iov_state_t iov;
+
+ block_align = block_align ? block_align : B->block_align ? B->block_align : 1;
+ get_min_align(align, field_size);
+ get_min_align(align, block_align);
+ /* Pad end of buffer to multiple. */
+ if (!is_nested) {
+ end_pad = back_pad(B, block_align);
+ if (end_pad) {
+ init_iov();
+ push_iov(_pad, end_pad);
+ if (0 == emit_back(B, &iov)) {
+ check(0, "emitter rejected buffer content");
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_embed_buffer(flatcc_builder_t *B,
+ uint16_t block_align,
+ const void *data, size_t size, uint16_t align, flatcc_builder_buffer_flags_t flags)
+{
+ uoffset_t size_field, pad;
+ iov_state_t iov;
+ int with_size = (flags & flatcc_builder_with_size) != 0;
+
+ if (align_to_block(B, &align, block_align, !is_top_buffer(B))) {
+ return 0;
+ }
+ pad = front_pad(B, (uoffset_t)(size + (with_size ? field_size : 0)), align);
+ write_uoffset(&size_field, (uoffset_t)size + pad);
+ init_iov();
+ /* Add ubyte vector size header if nested buffer. */
+ push_iov_cond(&size_field, field_size, !is_top_buffer(B));
+ push_iov(data, size);
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_buffer(flatcc_builder_t *B,
+ const char identifier[identifier_size], uint16_t block_align,
+ flatcc_builder_ref_t object_ref, uint16_t align, flatcc_builder_buffer_flags_t flags)
+{
+ flatcc_builder_ref_t buffer_ref;
+ uoffset_t header_pad, id_size = 0;
+ uoffset_t object_offset, buffer_size, buffer_base;
+ iov_state_t iov;
+ flatcc_builder_identifier_t id_out = 0;
+ int is_nested = (flags & flatcc_builder_is_nested) != 0;
+ int with_size = (flags & flatcc_builder_with_size) != 0;
+
+ if (align_to_block(B, &align, block_align, is_nested)) {
+ return 0;
+ }
+ set_min_align(B, align);
+ if (identifier) {
+ FLATCC_ASSERT(sizeof(flatcc_builder_identifier_t) == identifier_size);
+ FLATCC_ASSERT(sizeof(flatcc_builder_identifier_t) == field_size);
+ memcpy(&id_out, identifier, identifier_size);
+ id_out = __flatbuffers_thash_read_from_le(&id_out);
+ write_identifier(&id_out, id_out);
+ }
+ id_size = id_out ? identifier_size : 0;
+ header_pad = front_pad(B, field_size + id_size + (uoffset_t)(with_size ? field_size : 0), align);
+ init_iov();
+ /* ubyte vectors size field wrapping nested buffer. */
+ push_iov_cond(&buffer_size, field_size, is_nested || with_size);
+ push_iov(&object_offset, field_size);
+ /* Identifiers are not always present in buffer. */
+ push_iov(&id_out, id_size);
+ push_iov(_pad, header_pad);
+ buffer_base = (uoffset_t)B->emit_start - (uoffset_t)iov.len + (uoffset_t)((is_nested || with_size) ? field_size : 0);
+ if (is_nested) {
+ write_uoffset(&buffer_size, (uoffset_t)B->buffer_mark - buffer_base);
+ } else {
+ /* Also include clustered vtables. */
+ write_uoffset(&buffer_size, (uoffset_t)B->emit_end - buffer_base);
+ }
+ write_uoffset(&object_offset, (uoffset_t)object_ref - buffer_base);
+ if (0 == (buffer_ref = emit_front(B, &iov))) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ return buffer_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_struct(flatcc_builder_t *B, const void *data, size_t size, uint16_t align)
+{
+ size_t pad;
+ iov_state_t iov;
+
+ check(align >= 1, "align cannot be 0");
+ set_min_align(B, align);
+ pad = front_pad(B, (uoffset_t)size, align);
+ init_iov();
+ push_iov(data, size);
+ /*
+ * Normally structs will already be a multiple of their alignment,
+ * so this padding will not likely be emitted.
+ */
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+int flatcc_builder_start_buffer(flatcc_builder_t *B,
+ const char identifier[identifier_size], uint16_t block_align, flatcc_builder_buffer_flags_t flags)
+{
+ /*
+ * This saves the parent `min_align` in the align field since we
+ * shouldn't use that for the current buffer. `exit_frame`
+ * automatically aggregates align up, so it is updated when the
+ * buffer frame exits.
+ */
+ if (enter_frame(B, B->min_align)) {
+ return -1;
+ }
+ /* B->align now has parent min_align, and child frames will save it. */
+ B->min_align = 1;
+ /* Save the parent block align, and set proper defaults for this buffer. */
+ frame(container.buffer.block_align) = B->block_align;
+ B->block_align = block_align;
+ frame(container.buffer.flags = B->buffer_flags);
+ B->buffer_flags = (uint16_t)flags;
+ frame(container.buffer.mark) = B->buffer_mark;
+ frame(container.buffer.nest_id) = B->nest_id;
+ /*
+ * End of buffer when nested. Not defined for top-level because we
+ * here (on only here) permit strings etc. to be created before buffer start and
+ * because top-level buffer vtables can be clustered.
+ */
+ B->buffer_mark = B->emit_start;
+ /* Must be 0 before and after entering top-level buffer, and unique otherwise. */
+ B->nest_id = B->nest_count++;
+ frame(container.buffer.identifier) = B->identifier;
+ set_identifier(identifier);
+ frame(type) = flatcc_builder_buffer;
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_buffer(flatcc_builder_t *B, flatcc_builder_ref_t root)
+{
+ flatcc_builder_ref_t buffer_ref;
+ flatcc_builder_buffer_flags_t flags;
+
+ flags = (flatcc_builder_buffer_flags_t)B->buffer_flags & flatcc_builder_with_size;
+ flags |= is_top_buffer(B) ? 0 : flatcc_builder_is_nested;
+ check(frame(type) == flatcc_builder_buffer, "expected buffer frame");
+ set_min_align(B, B->block_align);
+ if (0 == (buffer_ref = flatcc_builder_create_buffer(B, (void *)&B->identifier,
+ B->block_align, root, B->min_align, flags))) {
+ return 0;
+ }
+ B->buffer_mark = frame(container.buffer.mark);
+ B->nest_id = frame(container.buffer.nest_id);
+ B->identifier = frame(container.buffer.identifier);
+ B->buffer_flags = frame(container.buffer.flags);
+ exit_frame(B);
+ return buffer_ref;
+}
+
+void *flatcc_builder_start_struct(flatcc_builder_t *B, size_t size, uint16_t align)
+{
+ /* Allocate space for the struct on the ds stack. */
+ if (enter_frame(B, align)) {
+ return 0;
+ }
+ frame(type) = flatcc_builder_struct;
+ refresh_ds(B, data_limit);
+ return push_ds(B, (uoffset_t)size);
+}
+
+void *flatcc_builder_struct_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_struct(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t object_ref;
+
+ check(frame(type) == flatcc_builder_struct, "expected struct frame");
+ if (0 == (object_ref = flatcc_builder_create_struct(B, B->ds, B->ds_offset, B->align))) {
+ return 0;
+ }
+ exit_frame(B);
+ return object_ref;
+}
+
+static inline int vector_count_add(flatcc_builder_t *B, uoffset_t count, uoffset_t max_count)
+{
+ uoffset_t n, n1;
+ n = frame(container.vector.count);
+ n1 = n + count;
+ /*
+ * This prevents elem_size * count from overflowing iff max_vector
+ * has been set sensible. Without this check we might allocate to
+ * little on the ds stack and return a buffer the user thinks is
+ * much larger which of course is bad even though the buffer eventually
+ * would fail anyway.
+ */
+ check_error(n <= n1 && n1 <= max_count, -1, "vector too large to represent");
+ frame(container.vector.count) = n1;
+ return 0;
+}
+
+void *flatcc_builder_extend_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, frame(container.vector.max_count))) {
+ return 0;
+ }
+ return push_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+}
+
+void *flatcc_builder_vector_push(flatcc_builder_t *B, const void *data)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ check_error(frame(container.vector.count) <= frame(container.vector.max_count), 0, "vector max count exceeded");
+ frame(container.vector.count) += 1;
+ return push_ds_copy(B, data, frame(container.vector.elem_size));
+}
+
+void *flatcc_builder_append_vector(flatcc_builder_t *B, const void *data, size_t count)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ if (vector_count_add(B, (uoffset_t)count, frame(container.vector.max_count))) {
+ return 0;
+ }
+ return push_ds_copy(B, data, frame(container.vector.elem_size) * (uoffset_t)count);
+}
+
+flatcc_builder_ref_t *flatcc_builder_extend_offset_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, max_offset_count)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)(field_size * count));
+}
+
+flatcc_builder_ref_t *flatcc_builder_offset_vector_push(flatcc_builder_t *B, flatcc_builder_ref_t ref)
+{
+ flatcc_builder_ref_t *p;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (frame(container.vector.count) == max_offset_count) {
+ return 0;
+ }
+ frame(container.vector.count) += 1;
+ if (0 == (p = push_ds(B, field_size))) {
+ return 0;
+ }
+ *p = ref;
+ return p;
+}
+
+flatcc_builder_ref_t *flatcc_builder_append_offset_vector(flatcc_builder_t *B, const flatcc_builder_ref_t *refs, size_t count)
+{
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (vector_count_add(B, (uoffset_t)count, max_offset_count)) {
+ return 0;
+ }
+ return push_ds_copy(B, refs, (uoffset_t)(field_size * count));
+}
+
+char *flatcc_builder_extend_string(flatcc_builder_t *B, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ if (vector_count_add(B, (uoffset_t)len, max_string_len)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)len);
+}
+
+char *flatcc_builder_append_string(flatcc_builder_t *B, const char *s, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ if (vector_count_add(B, (uoffset_t)len, max_string_len)) {
+ return 0;
+ }
+ return push_ds_copy(B, s, (uoffset_t)len);
+}
+
+char *flatcc_builder_append_string_str(flatcc_builder_t *B, const char *s)
+{
+ return flatcc_builder_append_string(B, s, strlen(s));
+}
+
+char *flatcc_builder_append_string_strn(flatcc_builder_t *B, const char *s, size_t max_len)
+{
+ return flatcc_builder_append_string(B, s, strnlen(s, max_len));
+}
+
+int flatcc_builder_truncate_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ check_error(frame(container.vector.count) >= count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+int flatcc_builder_truncate_offset_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ check_error(frame(container.vector.count) >= (uoffset_t)count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+int flatcc_builder_truncate_string(flatcc_builder_t *B, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ check_error(frame(container.vector.count) >= len, -1, "cannot truncate string past empty");
+ frame(container.vector.count) -= (uoffset_t)len;
+ unpush_ds(B, (uoffset_t)len);
+ return 0;
+}
+
+int flatcc_builder_start_vector(flatcc_builder_t *B, size_t elem_size, uint16_t align, size_t max_count)
+{
+ get_min_align(&align, field_size);
+ if (enter_frame(B, align)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = (uoffset_t)elem_size;
+ frame(container.vector.count) = 0;
+ frame(container.vector.max_count) = (uoffset_t)max_count;
+ frame(type) = flatcc_builder_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+int flatcc_builder_start_offset_vector(flatcc_builder_t *B)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = field_size;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_offset_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_offset_vector(flatcc_builder_t *B,
+ const flatcc_builder_ref_t *vec, size_t count)
+{
+ flatcc_builder_ref_t *_vec;
+
+ if (flatcc_builder_start_offset_vector(B)) {
+ return 0;
+ }
+ if (!(_vec = flatcc_builder_extend_offset_vector(B, count))) {
+ return 0;
+ }
+ memcpy(_vec, vec, count * field_size);
+ return flatcc_builder_end_offset_vector(B);
+}
+
+int flatcc_builder_start_string(flatcc_builder_t *B)
+{
+ if (enter_frame(B, 1)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = 1;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_string;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+int flatcc_builder_reserve_table(flatcc_builder_t *B, int count)
+{
+ check(count >= 0, "cannot reserve negative count");
+ return reserve_fields(B, count);
+}
+
+int flatcc_builder_start_table(flatcc_builder_t *B, int count)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.table.vs_end) = vs_offset(B->vs);
+ frame(container.table.pl_end) = pl_offset(B->pl);
+ frame(container.table.vt_hash) = B->vt_hash;
+ frame(container.table.id_end) = B->id_end;
+ B->vt_hash = 0;
+ FLATCC_BUILDER_INIT_VT_HASH(B->vt_hash);
+ B->id_end = 0;
+ frame(type) = flatcc_builder_table;
+ if (reserve_fields(B, count)) {
+ return -1;
+ }
+ refresh_ds(B, table_limit);
+ return 0;
+}
+
+flatcc_builder_vt_ref_t flatcc_builder_create_vtable(flatcc_builder_t *B,
+ const voffset_t *vt, voffset_t vt_size)
+{
+ flatcc_builder_vt_ref_t vt_ref;
+ iov_state_t iov;
+ voffset_t *vt_;
+ size_t i;
+
+ /*
+ * Only top-level buffer can cluster vtables because only it can
+ * extend beyond the end.
+ *
+ * We write the vtable after the referencing table to maintain
+ * the construction invariant that any offset reference has
+ * valid emitted data at a higher address, and also that any
+ * issued negative emit address represents an offset reference
+ * to some flatbuffer object or vector (or possibly a root
+ * struct).
+ *
+ * The vt_ref is stored as the reference + 1 to avoid having 0 as a
+ * valid reference (which usally means error). It also idententifies
+ * vtable references as the only uneven references, and the only
+ * references that can be used multiple times in the same buffer.
+ *
+ * We do the vtable conversion here so cached vtables can be built
+ * hashed and compared more efficiently, and so end users with
+ * direct vtable construction don't have to worry about endianness.
+ * This also ensures the hash function works the same wrt.
+ * collision frequency.
+ */
+
+ if (!flatbuffers_is_native_pe()) {
+ /* Make space in vtable cache for temporary endian conversion. */
+ if (!(vt_ = reserve_buffer(B, flatcc_builder_alloc_vb, B->vb_end, vt_size, 0))) {
+ return 0;
+ }
+ for (i = 0; i < vt_size / sizeof(voffset_t); ++i) {
+ write_voffset(&vt_[i], vt[i]);
+ }
+ vt = vt_;
+ /* We don't need to free the reservation since we don't advance any base pointer. */
+ }
+
+ init_iov();
+ push_iov(vt, vt_size);
+ if (is_top_buffer(B) && !B->disable_vt_clustering) {
+ /* Note that `emit_back` already returns ref + 1 as we require for vtables. */
+ if (0 == (vt_ref = emit_back(B, &iov))) {
+ return 0;
+ }
+ } else {
+ if (0 == (vt_ref = emit_front(B, &iov))) {
+ return 0;
+ }
+ /*
+ * We don't have a valid 0 ref here, but to be consistent with
+ * clustered vtables we offset by one. This cannot be zero
+ * either.
+ */
+ vt_ref += 1;
+ }
+ return vt_ref;
+}
+
+flatcc_builder_vt_ref_t flatcc_builder_create_cached_vtable(flatcc_builder_t *B,
+ const voffset_t *vt, voffset_t vt_size, uint32_t vt_hash)
+{
+ vtable_descriptor_t *vd, *vd2;
+ uoffset_t *pvd, *pvd_head;
+ uoffset_t next;
+ voffset_t *vt_;
+
+ /* This just gets the hash table slot, we still have to inspect it. */
+ if (!(pvd_head = lookup_ht(B, vt_hash))) {
+ return 0;
+ }
+ pvd = pvd_head;
+ next = *pvd;
+ /* Tracks if there already is a cached copy. */
+ vd2 = 0;
+ while (next) {
+ vd = vd_ptr(next);
+ vt_ = vb_ptr(vd->vb_start);
+ if (vt_[0] != vt_size || 0 != memcmp(vt, vt_, vt_size)) {
+ pvd = &vd->next;
+ next = vd->next;
+ continue;
+ }
+ /* Can't share emitted vtables between buffers, */
+ if (vd->nest_id != B->nest_id) {
+ /* but we don't have to resubmit to cache. */
+ vd2 = vd;
+ /* See if there is a better match. */
+ pvd = &vd->next;
+ next = vd->next;
+ continue;
+ }
+ /* Move to front hash strategy. */
+ if (pvd != pvd_head) {
+ *pvd = vd->next;
+ vd->next = *pvd_head;
+ *pvd_head = next;
+ }
+ /* vtable exists and has been emitted within current buffer. */
+ return vd->vt_ref;
+ }
+ /* Allocate new descriptor. */
+ if (!(vd = reserve_buffer(B, flatcc_builder_alloc_vd, B->vd_end, sizeof(vtable_descriptor_t), 0))) {
+ return 0;
+ }
+ next = B->vd_end;
+ B->vd_end += (uoffset_t)sizeof(vtable_descriptor_t);
+
+ /* Identify the buffer this vtable descriptor belongs to. */
+ vd->nest_id = B->nest_id;
+
+ /* Move to front hash strategy. */
+ vd->next = *pvd_head;
+ *pvd_head = next;
+ if (0 == (vd->vt_ref = flatcc_builder_create_vtable(B, vt, vt_size))) {
+ return 0;
+ }
+ if (vd2) {
+ /* Reuse cached copy. */
+ vd->vb_start = vd2->vb_start;
+ } else {
+ if (B->vb_flush_limit && B->vb_flush_limit < B->vb_end + vt_size) {
+ flatcc_builder_flush_vtable_cache(B);
+ } else {
+ /* Make space in vtable cache. */
+ if (!(vt_ = reserve_buffer(B, flatcc_builder_alloc_vb, B->vb_end, vt_size, 0))) {
+ return -1;
+ }
+ vd->vb_start = B->vb_end;
+ B->vb_end += vt_size;
+ memcpy(vt_, vt, vt_size);
+ }
+ }
+ return vd->vt_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_table(flatcc_builder_t *B, const void *data, size_t size, uint16_t align,
+ flatbuffers_voffset_t *offsets, int offset_count, flatcc_builder_vt_ref_t vt_ref)
+{
+ int i;
+ uoffset_t pad, vt_offset, vt_offset_field, vt_base, base, offset, *offset_field;
+ iov_state_t iov;
+
+ check(offset_count >= 0, "expected non-negative offset_count");
+ /*
+ * vtable references are offset by 1 to avoid confusion with
+ * 0 as an error reference. It also uniquely identifies them
+ * as vtables being the only uneven reference type.
+ */
+ check(vt_ref & 1, "invalid vtable referenc");
+ get_min_align(&align, field_size);
+ set_min_align(B, align);
+ /* Alignment is calculated for the first element, not the header. */
+ pad = front_pad(B, (uoffset_t)size, align);
+ base = (uoffset_t)B->emit_start - (uoffset_t)(pad + size + field_size);
+ /* Adjust by 1 to get unencoded vtable reference. */
+ vt_base = (uoffset_t)(vt_ref - 1);
+ vt_offset = base - vt_base;
+ /* Avoid overflow. */
+ if (base - vt_offset != vt_base) {
+ return -1;
+ }
+ /* Protocol endian encoding. */
+ write_uoffset(&vt_offset_field, vt_offset);
+ for (i = 0; i < offset_count; ++i) {
+ offset_field = (uoffset_t *)((size_t)data + offsets[i]);
+ offset = *offset_field - base - offsets[i] - (uoffset_t)field_size;
+ write_uoffset(offset_field, offset);
+ }
+ init_iov();
+ push_iov(&vt_offset_field, field_size);
+ push_iov(data, size);
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+int flatcc_builder_check_required_field(flatcc_builder_t *B, flatbuffers_voffset_t id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ return id < B->id_end && B->vs[id] != 0;
+}
+
+int flatcc_builder_check_union_field(flatcc_builder_t *B, flatbuffers_voffset_t id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ if (id == 0 || id >= B->id_end) {
+ return 0;
+ }
+ if (B->vs[id - 1] == 0) {
+ return B->vs[id] == 0;
+ }
+ if (*(uint8_t *)(B->ds + B->vs[id - 1])) {
+ return B->vs[id] != 0;
+ }
+ return B->vs[id] == 0;
+}
+
+int flatcc_builder_check_required(flatcc_builder_t *B, const flatbuffers_voffset_t *required, int count)
+{
+ int i;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ if (B->id_end < count) {
+ return 0;
+ }
+ for (i = 0; i < count; ++i) {
+ if (B->vs[required[i]] == 0) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_table(flatcc_builder_t *B)
+{
+ voffset_t *vt, vt_size;
+ flatcc_builder_ref_t table_ref, vt_ref;
+ int pl_count;
+ voffset_t *pl;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ /* We have `ds_limit`, so we should not have to check for overflow here. */
+
+ vt = B->vs - 2;
+ vt_size = (voffset_t)(sizeof(voffset_t) * (B->id_end + 2u));
+ /* Update vtable header fields, first vtable size, then object table size. */
+ vt[0] = vt_size;
+ /*
+ * The `ds` buffer is always at least `field_size` aligned but excludes the
+ * initial vtable offset field. Therefore `field_size` is added here
+ * to the total table size in the vtable.
+ */
+ vt[1] = (voffset_t)(B->ds_offset + field_size);
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)vt[0], (uint32_t)vt[1]);
+ /* Find already emitted vtable, or emit a new one. */
+ if (!(vt_ref = flatcc_builder_create_cached_vtable(B, vt, vt_size, B->vt_hash))) {
+ return 0;
+ }
+ /* Clear vs stack so it is ready for the next vtable (ds stack is cleared by exit frame). */
+ memset(vt, 0, vt_size);
+
+ pl = pl_ptr(frame(container.table.pl_end));
+ pl_count = (int)(B->pl - pl);
+ if (0 == (table_ref = flatcc_builder_create_table(B, B->ds, B->ds_offset, B->align, pl, pl_count, vt_ref))) {
+ return 0;
+ }
+ B->vt_hash = frame(container.table.vt_hash);
+ B->id_end = frame(container.table.id_end);
+ B->vs = vs_ptr(frame(container.table.vs_end));
+ B->pl = pl_ptr(frame(container.table.pl_end));
+ exit_frame(B);
+ return table_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_vector(flatcc_builder_t *B,
+ const void *data, size_t count, size_t elem_size, uint16_t align, size_t max_count)
+{
+ /*
+ * Note: it is important that vec_size is uoffset not size_t
+ * in case sizeof(uoffset_t) > sizeof(size_t) because max_count is
+ * defined in terms of uoffset_t representation size, and also
+ * because we risk accepting too large a vector even if max_count is
+ * not violated.
+ */
+ uoffset_t vec_size, vec_pad, length_prefix;
+ iov_state_t iov;
+
+ check_error(count <= max_count, 0, "vector max_count violated");
+ get_min_align(&align, field_size);
+ set_min_align(B, align);
+ vec_size = (uoffset_t)count * (uoffset_t)elem_size;
+ /*
+ * That can happen on 32 bit systems when uoffset_t is defined as 64-bit.
+ * `emit_front/back` captures overflow, but not if our size type wraps first.
+ */
+#if FLATBUFFERS_UOFFSET_MAX > SIZE_MAX
+ check_error(vec_size < SIZE_MAX, 0, "vector larger than address space");
+#endif
+ write_uoffset(&length_prefix, (uoffset_t)count);
+ /* Alignment is calculated for the first element, not the header. */
+ vec_pad = front_pad(B, vec_size, align);
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(data, vec_size);
+ push_iov(_pad, vec_pad);
+ return emit_front(B, &iov);
+}
+
+/*
+ * Note: FlatBuffers official documentation states that the size field of a
+ * vector is a 32-bit element count. It is not quite clear if the
+ * intention is to have the size field be of type uoffset_t since tables
+ * also have a uoffset_t sized header, or if the vector size should
+ * remain unchanged if uoffset is changed to 16- or 64-bits
+ * respectively. Since it makes most sense to have a vector compatible
+ * with the addressable space, we choose to use uoffset_t as size field,
+ * which remains compatible with the default 32-bit version of uoffset_t.
+ */
+flatcc_builder_ref_t flatcc_builder_end_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+
+ if (0 == (vector_ref = flatcc_builder_create_vector(B, B->ds,
+ frame(container.vector.count), frame(container.vector.elem_size),
+ B->align, frame(container.vector.max_count)))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+size_t flatcc_builder_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+void *flatcc_builder_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+/* This function destroys the source content but avoids stack allocation. */
+static flatcc_builder_ref_t _create_offset_vector_direct(flatcc_builder_t *B,
+ flatcc_builder_ref_t *vec, size_t count, const utype_t *types)
+{
+ uoffset_t vec_size, vec_pad;
+ uoffset_t length_prefix, offset;
+ uoffset_t i;
+ soffset_t base;
+ iov_state_t iov;
+
+ if ((uoffset_t)count > max_offset_count) {
+ return 0;
+ }
+ set_min_align(B, field_size);
+ vec_size = (uoffset_t)(count * field_size);
+ write_uoffset(&length_prefix, (uoffset_t)count);
+ /* Alignment is calculated for the first element, not the header. */
+ vec_pad = front_pad(B, vec_size, field_size);
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(vec, vec_size);
+ push_iov(_pad, vec_pad);
+ base = B->emit_start - (soffset_t)iov.len;
+ for (i = 0; i < (uoffset_t)count; ++i) {
+ /*
+ * 0 is either end of buffer, start of vtables, or start of
+ * buffer depending on the direction in which the buffer is
+ * built. None of these can create a valid 0 reference but it
+ * is easy to create by mistake when manually building offset
+ * vectors.
+ *
+ * Unions do permit nulls, but only when the type is NONE.
+ */
+ if (vec[i] != 0) {
+ offset = (uoffset_t)
+ (vec[i] - base - (soffset_t)(i * field_size) - (soffset_t)field_size);
+ write_uoffset(&vec[i], offset);
+ if (types) {
+ check(types[i] != 0, "union vector cannot have non-null element with type NONE");
+ }
+ } else {
+ if (types) {
+ check(types[i] == 0, "union vector cannot have null element without type NONE");
+ } else {
+ check(0, "offset vector cannot have null element");
+ }
+ }
+ }
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_offset_vector_direct(flatcc_builder_t *B,
+ flatcc_builder_ref_t *vec, size_t count)
+{
+ return _create_offset_vector_direct(B, vec, count, 0);
+}
+
+flatcc_builder_ref_t flatcc_builder_end_offset_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (0 == (vector_ref = flatcc_builder_create_offset_vector_direct(B,
+ (flatcc_builder_ref_t *)B->ds, frame(container.vector.count)))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_offset_vector_for_unions(flatcc_builder_t *B, const utype_t *types)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (0 == (vector_ref = _create_offset_vector_direct(B,
+ (flatcc_builder_ref_t *)B->ds, frame(container.vector.count), types))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+void *flatcc_builder_offset_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+size_t flatcc_builder_offset_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+int flatcc_builder_table_add_union(flatcc_builder_t *B, int id,
+ flatcc_builder_union_ref_t uref)
+{
+ flatcc_builder_ref_t *pref;
+ flatcc_builder_utype_t *putype;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check_error(uref.type != 0 || uref.value == 0, -1, "expected null value for type NONE");
+ if (uref.value != 0) {
+ pref = flatcc_builder_table_add_offset(B, id);
+ check_error(pref != 0, -1, "unable to add union value");
+ *pref = uref.value;
+ }
+ putype = flatcc_builder_table_add(B, id - 1, utype_size, utype_size);
+ check_error(putype != 0, -1, "unable to add union type");
+ write_utype(putype, uref.type);
+ return 0;
+}
+
+int flatcc_builder_table_add_union_vector(flatcc_builder_t *B, int id,
+ flatcc_builder_union_vec_ref_t uvref)
+{
+ flatcc_builder_ref_t *pref;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check_error((uvref.type == 0) == (uvref.value == 0), -1, "expected both type and value vector, or neither");
+ if (uvref.type != 0) {
+ pref = flatcc_builder_table_add_offset(B, id - 1);
+ check_error(pref != 0, -1, "unable to add union member");
+ *pref = uvref.type;
+
+ pref = flatcc_builder_table_add_offset(B, id);
+ check_error(pref != 0, -1, "unable to add union member");
+ *pref = uvref.value;
+ }
+ return 0;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+ flatcc_builder_utype_t *types;
+ flatcc_builder_ref_t *refs;
+ size_t i;
+
+ if (flatcc_builder_start_offset_vector(B)) {
+ return uvref;
+ }
+ if (0 == flatcc_builder_extend_offset_vector(B, count)) {
+ return uvref;
+ }
+ if (0 == (types = push_ds(B, (uoffset_t)(utype_size * count)))) {
+ return uvref;
+ }
+
+ /* Safe even if push_ds caused stack reallocation. */
+ refs = flatcc_builder_offset_vector_edit(B);
+
+ for (i = 0; i < count; ++i) {
+ types[i] = urefs[i].type;
+ refs[i] = urefs[i].value;
+ }
+ uvref = flatcc_builder_create_union_vector_direct(B,
+ types, refs, count);
+ /* No need to clean up after out temporary types vector. */
+ exit_frame(B);
+ return uvref;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector_direct(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, flatcc_builder_ref_t *data, size_t count)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+
+ if (0 == (uvref.value = _create_offset_vector_direct(B, data, count, types))) {
+ return uvref;
+ }
+ if (0 == (uvref.type = flatcc_builder_create_type_vector(B, types, count))) {
+ return uvref;
+ }
+ return uvref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_type_vector(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, size_t count)
+{
+ return flatcc_builder_create_vector(B, types, count,
+ utype_size, utype_size, max_utype_count);
+}
+
+int flatcc_builder_start_union_vector(flatcc_builder_t *B)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = union_size;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_union_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_end_union_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+ flatcc_builder_utype_t *types;
+ flatcc_builder_union_ref_t *urefs;
+ flatcc_builder_ref_t *refs;
+ size_t i, count;
+
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+
+ /*
+ * We could split the union vector in-place, but then we would have
+ * to deal with strict pointer aliasing rules which is not worthwhile
+ * so we create a new offset and type vector on the stack.
+ *
+ * We assume the stack is sufficiently aligned as is.
+ */
+ count = flatcc_builder_union_vector_count(B);
+ if (0 == (refs = push_ds(B, (uoffset_t)(count * (utype_size + field_size))))) {
+ return uvref;
+ }
+ types = (flatcc_builder_utype_t *)(refs + count);
+
+ /* Safe even if push_ds caused stack reallocation. */
+ urefs = flatcc_builder_union_vector_edit(B);
+
+ for (i = 0; i < count; ++i) {
+ types[i] = urefs[i].type;
+ refs[i] = urefs[i].value;
+ }
+ uvref = flatcc_builder_create_union_vector_direct(B, types, refs, count);
+ /* No need to clean up after out temporary types vector. */
+ exit_frame(B);
+ return uvref;
+}
+
+void *flatcc_builder_union_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+size_t flatcc_builder_union_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_extend_union_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, max_union_count)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)(union_size * count));
+}
+
+int flatcc_builder_truncate_union_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ check_error(frame(container.vector.count) >= (uoffset_t)count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_union_vector_push(flatcc_builder_t *B,
+ flatcc_builder_union_ref_t uref)
+{
+ flatcc_builder_union_ref_t *p;
+
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ if (frame(container.vector.count) == max_union_count) {
+ return 0;
+ }
+ frame(container.vector.count) += 1;
+ if (0 == (p = push_ds(B, union_size))) {
+ return 0;
+ }
+ *p = uref;
+ return p;
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_append_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count)
+{
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ if (vector_count_add(B, (uoffset_t)count, max_union_count)) {
+ return 0;
+ }
+ return push_ds_copy(B, urefs, (uoffset_t)(union_size * count));
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string(flatcc_builder_t *B, const char *s, size_t len)
+{
+ uoffset_t s_pad;
+ uoffset_t length_prefix;
+ iov_state_t iov;
+
+ if (len > max_string_len) {
+ return 0;
+ }
+ write_uoffset(&length_prefix, (uoffset_t)len);
+ /* Add 1 for zero termination. */
+ s_pad = front_pad(B, (uoffset_t)len + 1, field_size) + 1;
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(s, len);
+ push_iov(_pad, s_pad);
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string_str(flatcc_builder_t *B, const char *s)
+{
+ return flatcc_builder_create_string(B, s, strlen(s));
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string_strn(flatcc_builder_t *B, const char *s, size_t max_len)
+{
+ return flatcc_builder_create_string(B, s, strnlen(s, max_len));
+}
+
+flatcc_builder_ref_t flatcc_builder_end_string(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t string_ref;
+
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ FLATCC_ASSERT(frame(container.vector.count) == B->ds_offset);
+ if (0 == (string_ref = flatcc_builder_create_string(B,
+ (const char *)B->ds, B->ds_offset))) {
+ return 0;
+ }
+ exit_frame(B);
+ return string_ref;
+}
+
+char *flatcc_builder_string_edit(flatcc_builder_t *B)
+{
+ return (char *)B->ds;
+}
+
+size_t flatcc_builder_string_len(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+void *flatcc_builder_table_add(flatcc_builder_t *B, int id, size_t size, uint16_t align)
+{
+ /*
+ * We align the offset relative to the first table field, excluding
+ * the header holding the vtable reference. On the stack, `ds_first`
+ * is aligned to 8 bytes thanks to the `enter_frame` logic, and this
+ * provides a safe way to update the fields on the stack, but here
+ * we are concerned with the target buffer alignment.
+ *
+ * We could also have aligned relative to the end of the table which
+ * would allow us to emit each field immediately, but it would be a
+ * confusing user experience wrt. field ordering, and it would add
+ * more variability to vtable layouts, thus reducing reuse, and
+ * frequent emissions to external emitter interface would be
+ * sub-optimal. Also, with that appoach, the vtable offsets would
+ * have to be adjusted at table end.
+ *
+ * As we have it, each emit occur at table end, vector end, string
+ * end, or buffer end, which might be helpful to various backend
+ * processors.
+ */
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check(id >= 0 && id <= (int)FLATBUFFERS_ID_MAX, "table id out of range");
+ if (align > B->align) {
+ B->align = align;
+ }
+#if FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
+ if (B->vs[id] != 0) {
+ return B->ds + B->vs[id] - field_size;
+ }
+#else
+ if (B->vs[id] != 0) {
+ check(0, "table field already set");
+ return 0;
+ }
+#endif
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)id, (uint32_t)size);
+ return push_ds_field(B, (uoffset_t)size, align, (voffset_t)id);
+}
+
+void *flatcc_builder_table_edit(flatcc_builder_t *B, size_t size)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ return B->ds + B->ds_offset - size;
+}
+
+void *flatcc_builder_table_add_copy(flatcc_builder_t *B, int id, const void *data, size_t size, uint16_t align)
+{
+ void *p;
+
+ if ((p = flatcc_builder_table_add(B, id, size, align))) {
+ memcpy(p, data, size);
+ }
+ return p;
+}
+
+flatcc_builder_ref_t *flatcc_builder_table_add_offset(flatcc_builder_t *B, int id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check(id >= 0 && id <= (int)FLATBUFFERS_ID_MAX, "table id out of range");
+#if FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
+ if (B->vs[id] != 0) {
+ return B->ds + B->vs[id] - field_size;
+ }
+#else
+ if (B->vs[id] != 0) {
+ check(0, "table field already set");
+ return 0;
+ }
+#endif
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)id, (uint32_t)field_size);
+ return push_ds_offset_field(B, (voffset_t)id);
+}
+
+uint16_t flatcc_builder_push_buffer_alignment(flatcc_builder_t *B)
+{
+ uint16_t old_min_align = B->min_align;
+
+ B->min_align = field_size;
+ return old_min_align;
+}
+
+void flatcc_builder_pop_buffer_alignment(flatcc_builder_t *B, uint16_t pushed_align)
+{
+ set_min_align(B, pushed_align);
+}
+
+uint16_t flatcc_builder_get_buffer_alignment(flatcc_builder_t *B)
+{
+ return B->min_align;
+}
+
+void flatcc_builder_set_vtable_clustering(flatcc_builder_t *B, int enable)
+{
+ /* Inverted because we zero all memory in B on init. */
+ B->disable_vt_clustering = !enable;
+}
+
+void flatcc_builder_set_block_align(flatcc_builder_t *B, uint16_t align)
+{
+ B->block_align = align;
+}
+
+int flatcc_builder_get_level(flatcc_builder_t *B)
+{
+ return B->level;
+}
+
+void flatcc_builder_set_max_level(flatcc_builder_t *B, int max_level)
+{
+ B->max_level = max_level;
+ if (B->limit_level < B->max_level) {
+ B->limit_level = B->max_level;
+ }
+}
+
+size_t flatcc_builder_get_buffer_size(flatcc_builder_t *B)
+{
+ return (size_t)(B->emit_end - B->emit_start);
+}
+
+flatcc_builder_ref_t flatcc_builder_get_buffer_start(flatcc_builder_t *B)
+{
+ return B->emit_start;
+}
+
+flatcc_builder_ref_t flatcc_builder_get_buffer_end(flatcc_builder_t *B)
+{
+ return B->emit_end;
+}
+
+void flatcc_builder_set_vtable_cache_limit(flatcc_builder_t *B, size_t size)
+{
+ B->vb_flush_limit = size;
+}
+
+void flatcc_builder_set_identifier(flatcc_builder_t *B, const char identifier[identifier_size])
+{
+ set_identifier(identifier);
+}
+
+enum flatcc_builder_type flatcc_builder_get_type(flatcc_builder_t *B)
+{
+ return B->frame ? frame(type) : flatcc_builder_empty;
+}
+
+enum flatcc_builder_type flatcc_builder_get_type_at(flatcc_builder_t *B, int level)
+{
+ if (level < 1 || level > B->level) {
+ return flatcc_builder_empty;
+ }
+ return B->frame[level - B->level].type;
+}
+
+void *flatcc_builder_get_direct_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ if (B->is_default_emitter) {
+ return flatcc_emitter_get_direct_buffer(&B->default_emit_context, size_out);
+ } else {
+ if (size_out) {
+ *size_out = 0;
+ }
+ }
+ return 0;
+}
+
+void *flatcc_builder_copy_buffer(flatcc_builder_t *B, void *buffer, size_t size)
+{
+ /* User is allowed to call tentatively to see if there is support. */
+ if (!B->is_default_emitter) {
+ return 0;
+ }
+ buffer = flatcc_emitter_copy_buffer(&B->default_emit_context, buffer, size);
+ check(buffer, "default emitter declined to copy buffer");
+ return buffer;
+}
+
+void *flatcc_builder_finalize_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ void * buffer;
+ size_t size;
+
+ size = flatcc_builder_get_buffer_size(B);
+
+ if (size_out) {
+ *size_out = size;
+ }
+
+ buffer = FLATCC_BUILDER_ALLOC(size);
+
+ if (!buffer) {
+ check(0, "failed to allocated memory for finalized buffer");
+ goto done;
+ }
+ if (!flatcc_builder_copy_buffer(B, buffer, size)) {
+ check(0, "default emitter declined to copy buffer");
+ FLATCC_BUILDER_FREE(buffer);
+ buffer = 0;
+ }
+done:
+ if (!buffer && size_out) {
+ *size_out = 0;
+ }
+ return buffer;
+}
+
+void *flatcc_builder_finalize_aligned_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ void * buffer;
+ size_t align;
+ size_t size;
+
+ size = flatcc_builder_get_buffer_size(B);
+
+ if (size_out) {
+ *size_out = size;
+ }
+ align = flatcc_builder_get_buffer_alignment(B);
+
+ size = (size + align - 1) & ~(align - 1);
+ buffer = FLATCC_BUILDER_ALIGNED_ALLOC(align, size);
+
+ if (!buffer) {
+ goto done;
+ }
+ if (!flatcc_builder_copy_buffer(B, buffer, size)) {
+ FLATCC_BUILDER_ALIGNED_FREE(buffer);
+ buffer = 0;
+ goto done;
+ }
+done:
+ if (!buffer && size_out) {
+ *size_out = 0;
+ }
+ return buffer;
+}
+
+void *flatcc_builder_aligned_alloc(size_t alignment, size_t size)
+{
+ return FLATCC_BUILDER_ALIGNED_ALLOC(alignment, size);
+}
+
+void flatcc_builder_aligned_free(void *p)
+{
+ FLATCC_BUILDER_ALIGNED_FREE(p);
+}
+
+void *flatcc_builder_alloc(size_t size)
+{
+ return FLATCC_BUILDER_ALLOC(size);
+}
+
+void flatcc_builder_free(void *p)
+{
+ FLATCC_BUILDER_FREE(p);
+}
+
+void *flatcc_builder_get_emit_context(flatcc_builder_t *B)
+{
+ return B->emit_context;
+}
diff --git a/src/runtime/emitter.c b/src/runtime/emitter.c
new file mode 100644
index 0000000..089ea00
--- /dev/null
+++ b/src/runtime/emitter.c
@@ -0,0 +1,269 @@
+#include <stdlib.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_emitter.h"
+
+static int advance_front(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = 0;
+
+ if (E->front && E->front->prev != E->back) {
+ E->front->prev->page_offset = E->front->page_offset - FLATCC_EMITTER_PAGE_SIZE;
+ E->front = E->front->prev;
+ goto done;
+ }
+ if (!(p = FLATCC_EMITTER_ALLOC(sizeof(flatcc_emitter_page_t)))) {
+ return -1;
+ }
+ E->capacity += FLATCC_EMITTER_PAGE_SIZE;
+ if (E->front) {
+ p->prev = E->back;
+ p->next = E->front;
+ E->front->prev = p;
+ E->back->next = p;
+ E->front = p;
+ goto done;
+ }
+ /*
+ * The first page is shared between front and back to avoid
+ * double unecessary extra allocation.
+ */
+ E->front = p;
+ E->back = p;
+ p->next = p;
+ p->prev = p;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ p->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ return 0;
+done:
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE;
+ E->front->page_offset = E->front->next->page_offset - FLATCC_EMITTER_PAGE_SIZE;
+ return 0;
+}
+
+static int advance_back(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = 0;
+
+ if (E->back && E->back->next != E->front) {
+ E->back = E->back->next;
+ goto done;
+ }
+ if (!(p = FLATCC_EMITTER_ALLOC(sizeof(flatcc_emitter_page_t)))) {
+ return -1;
+ }
+ E->capacity += FLATCC_EMITTER_PAGE_SIZE;
+ if (E->back) {
+ p->prev = E->back;
+ p->next = E->front;
+ E->front->prev = p;
+ E->back->next = p;
+ E->back = p;
+ goto done;
+ }
+ /*
+ * The first page is shared between front and back to avoid
+ * double unecessary extra allocation.
+ */
+ E->front = p;
+ E->back = p;
+ p->next = p;
+ p->prev = p;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ p->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ return 0;
+done:
+ E->back_cursor = E->back->page;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE;
+ E->back->page_offset = E->back->prev->page_offset + FLATCC_EMITTER_PAGE_SIZE;
+ return 0;
+}
+
+static int copy_front(flatcc_emitter_t *E, uint8_t *data, size_t size)
+{
+ size_t k;
+
+ data += size;
+ while (size) {
+ k = size;
+ if (k > E->front_left) {
+ k = E->front_left;
+ if (k == 0) {
+ if (advance_front(E)) {
+ return -1;
+ }
+ continue;
+ }
+ }
+ E->front_cursor -= k;
+ E->front_left -= k;
+ data -= k;
+ size -= k;
+ memcpy(E->front_cursor, data, k);
+ };
+ return 0;
+}
+
+static int copy_back(flatcc_emitter_t *E, uint8_t *data, size_t size)
+{
+ size_t k;
+
+ while (size) {
+ k = size;
+ if (k > E->back_left) {
+ k = E->back_left;
+ if (k == 0) {
+ if (advance_back(E)) {
+ return -1;
+ }
+ continue;
+ }
+ }
+ memcpy(E->back_cursor, data, k);
+ size -= k;
+ data += k;
+ E->back_cursor += k;
+ E->back_left -= k;
+ }
+ return 0;
+}
+
+int flatcc_emitter_recycle_page(flatcc_emitter_t *E, flatcc_emitter_page_t *p)
+{
+ if (p == E->front || p == E->back) {
+ return -1;
+ }
+ p->next->prev = p->prev;
+ p->prev->next = p->next;
+ p->prev = E->front->prev;
+ p->next = E->front;
+ p->prev->next = p;
+ p->next->prev = p;
+ return 0;
+}
+
+void flatcc_emitter_reset(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = E->front;
+
+ if (!E->front) {
+ return;
+ }
+ E->back = E->front;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->front->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ /* Heuristic to reduce peak allocation over time. */
+ if (E->used_average == 0) {
+ E->used_average = E->used;
+ }
+ E->used_average = E->used_average * 3 / 4 + E->used / 4;
+ E->used = 0;
+ while (E->used_average * 2 < E->capacity && E->back->next != E->front) {
+ /* We deallocate the page after back since it is less likely to be hot in cache. */
+ p = E->back->next;
+ E->back->next = p->next;
+ p->next->prev = E->back;
+ FLATCC_EMITTER_FREE(p);
+ E->capacity -= FLATCC_EMITTER_PAGE_SIZE;
+ }
+}
+
+void flatcc_emitter_clear(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = E->front;
+
+ if (!p) {
+ return;
+ }
+ p->prev->next = 0;
+ while (p->next) {
+ p = p->next;
+ FLATCC_EMITTER_FREE(p->prev);
+ }
+ FLATCC_EMITTER_FREE(p);
+ memset(E, 0, sizeof(*E));
+}
+
+int flatcc_emitter(void *emit_context,
+ const flatcc_iovec_t *iov, int iov_count,
+ flatbuffers_soffset_t offset, size_t len)
+{
+ flatcc_emitter_t *E = emit_context;
+ uint8_t *p;
+
+ E->used += len;
+ if (offset < 0) {
+ if (len <= E->front_left) {
+ E->front_cursor -= len;
+ E->front_left -= len;
+ p = E->front_cursor;
+ goto copy;
+ }
+ iov += iov_count;
+ while (iov_count--) {
+ --iov;
+ if (copy_front(E, iov->iov_base, iov->iov_len)) {
+ return -1;
+ }
+ }
+ } else {
+ if (len <= E->back_left) {
+ p = E->back_cursor;
+ E->back_cursor += len;
+ E->back_left -= len;
+ goto copy;
+ }
+ while (iov_count--) {
+ if (copy_back(E, iov->iov_base, iov->iov_len)) {
+ return -1;
+ }
+ ++iov;
+ }
+ }
+ return 0;
+copy:
+ while (iov_count--) {
+ memcpy(p, iov->iov_base, iov->iov_len);
+ p += iov->iov_len;
+ ++iov;
+ }
+ return 0;
+}
+
+void *flatcc_emitter_copy_buffer(flatcc_emitter_t *E, void *buf, size_t size)
+{
+ flatcc_emitter_page_t *p;
+ size_t len;
+
+ if (size < E->used) {
+ return 0;
+ }
+ if (!E->front) {
+ return 0;
+ }
+ if (E->front == E->back) {
+ memcpy(buf, E->front_cursor, E->used);
+ return buf;
+ }
+ len = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ memcpy(buf, E->front_cursor, len);
+ buf = (uint8_t *)buf + len;
+ p = E->front->next;
+ while (p != E->back) {
+ memcpy(buf, p->page, FLATCC_EMITTER_PAGE_SIZE);
+ buf = (uint8_t *)buf + FLATCC_EMITTER_PAGE_SIZE;
+ p = p->next;
+ }
+ memcpy(buf, p->page, FLATCC_EMITTER_PAGE_SIZE - E->back_left);
+ return buf;
+}
diff --git a/src/runtime/json_parser.c b/src/runtime/json_parser.c
new file mode 100644
index 0000000..4472af2
--- /dev/null
+++ b/src/runtime/json_parser.c
@@ -0,0 +1,1297 @@
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_json_parser.h"
+#include "flatcc/flatcc_assert.h"
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+
+#define offset_size uoffset_size
+#if FLATCC_USE_GRISU3 && !defined(PORTABLE_USE_GRISU3)
+#define PORTABLE_USE_GRISU3 1
+#endif
+#include "flatcc/portable/pparsefp.h"
+#include "flatcc/portable/pbase64.h"
+
+#if FLATCC_USE_SSE4_2
+#ifdef __SSE4_2__
+#define USE_SSE4_2
+#endif
+#endif
+
+#ifdef USE_SSE4_2
+#include <nmmintrin.h>
+#define cmpistri(end, haystack, needle, flags) \
+ if (end - haystack >= 16) do { \
+ int i; \
+ __m128i a = _mm_loadu_si128((const __m128i *)(needle)); \
+ do { \
+ __m128i b = _mm_loadu_si128((const __m128i *)(haystack)); \
+ i = _mm_cmpistri(a, b, flags); \
+ haystack += i; \
+ } while (i == 16 && end - haystack >= 16); \
+ } while(0)
+#endif
+
+const char *flatcc_json_parser_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_json_parser_error_##no: \
+ return str;
+ FLATCC_JSON_PARSE_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+const char *flatcc_json_parser_set_error(flatcc_json_parser_t *ctx, const char *loc, const char *end, int err)
+{
+ if (!ctx->error) {
+ ctx->error = err;
+ ctx->pos = (int)(loc - ctx->line_start + 1);
+ ctx->error_loc = loc;
+ }
+ return end;
+}
+
+const char *flatcc_json_parser_string_part(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+/*
+ * Disabled because it doesn't catch all control characters, but is
+ * useful for performance testing.
+ */
+#if 0
+//#ifdef USE_SSE4_2
+ cmpistri(end, buf, "\"\\\0\r\n\t\v\f", _SIDD_POSITIVE_POLARITY);
+#else
+ /*
+ * Testing for signed char >= 0x20 would also capture UTF-8
+ * encodings that we could verify, and also invalid encodings like
+ * 0xff, but we do not wan't to enforce strict UTF-8.
+ */
+ while (buf != end && *buf != '\"' && ((unsigned char)*buf) >= 0x20 && *buf != '\\') {
+ ++buf;
+ }
+#endif
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unterminated_string);
+ }
+ if (*buf == '"') {
+ return buf;
+ }
+ if (*buf < 0x20) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_character);
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_space_ext(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+again:
+#ifdef USE_SSE4_2
+ /*
+ * We can include line break, but then error reporting suffers and
+ * it really makes no big difference.
+ */
+ //cmpistri(end, buf, "\x20\t\v\f\r\n", _SIDD_NEGATIVE_POLARITY);
+ cmpistri(end, buf, "\x20\t\v\f", _SIDD_NEGATIVE_POLARITY);
+#else
+#if FLATCC_ALLOW_UNALIGNED_ACCESS
+ while (end - buf >= 16) {
+ if (*buf > 0x20) {
+ return buf;
+ }
+#if FLATCC_JSON_PARSE_WIDE_SPACE
+ if (((uint64_t *)buf)[0] != 0x2020202020202020) {
+descend:
+ if (((uint32_t *)buf)[0] == 0x20202020) {
+ buf += 4;
+ }
+#endif
+ if (((uint16_t *)buf)[0] == 0x2020) {
+ buf += 2;
+ }
+ if (*buf == 0x20) {
+ ++buf;
+ }
+ if (*buf > 0x20) {
+ return buf;
+ }
+ break;
+#if FLATCC_JSON_PARSE_WIDE_SPACE
+ }
+ if (((uint64_t *)buf)[1] != 0x2020202020202020) {
+ buf += 8;
+ goto descend;
+ }
+ buf += 16;
+#endif
+ }
+#endif
+#endif
+ while (buf != end && *buf == 0x20) {
+ ++buf;
+ }
+ while (buf != end && *buf <= 0x20) {
+ switch (*buf) {
+ case 0x0d: buf += (end - buf > 1 && buf[1] == 0x0a);
+ /* Consume following LF or treating CR as LF. */
+ ++ctx->line; ctx->line_start = ++buf; continue;
+ case 0x0a: ++ctx->line; ctx->line_start = ++buf; continue;
+ case 0x09: ++buf; continue;
+ case 0x20: goto again; /* Don't consume here, sync with power of 2 spaces. */
+ default: return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ }
+ return buf;
+}
+
+static int decode_hex4(const char *buf, uint32_t *result)
+{
+ uint32_t u, x;
+ char c;
+
+ u = 0;
+ c = buf[0];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u = x << 12;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 12;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[1];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x << 8;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 8;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[2];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x << 4;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 4;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[3];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x;
+ } else {
+ return -1;
+ }
+ }
+ *result = u;
+ return 0;
+}
+
+static int decode_unicode_char(uint32_t u, char *code)
+{
+ if (u <= 0x7f) {
+ code[0] = 1;
+ code[1] = (char)u;
+ } else if (u <= 0x7ff) {
+ code[0] = 2;
+ code[1] = (char)(0xc0 | (u >> 6));
+ code[2] = (char)(0x80 | (u & 0x3f));
+ } else if (u <= 0xffff) {
+ code[0] = 3;
+ code[1] = (char)(0xe0 | (u >> 12));
+ code[2] = (char)(0x80 | ((u >> 6) & 0x3f));
+ code[3] = (char)(0x80 | (u & 0x3f));
+ } else if (u <= 0x10ffff) {
+ code[0] = 4;
+ code[1] = (char)(0xf0 | (u >> 18));
+ code[2] = (char)(0x80 | ((u >> 12) & 0x3f));
+ code[3] = (char)(0x80 | ((u >> 6) & 0x3f));
+ code[4] = (char)(0x80 | (u & 0x3f));
+ } else {
+ code[0] = 0;
+ return -1;
+ }
+ return 0;
+}
+
+static inline uint32_t combine_utf16_surrogate_pair(uint32_t high, uint32_t low)
+{
+ return (high - 0xd800) * 0x400 + (low - 0xdc00) + 0x10000;
+}
+
+static inline int decode_utf16_surrogate_pair(uint32_t high, uint32_t low, char *code)
+{
+ return decode_unicode_char(combine_utf16_surrogate_pair(high, low), code);
+}
+
+
+/*
+ * UTF-8 code points can have up to 4 bytes but JSON can only
+ * encode up to 3 bytes via the \uXXXX syntax.
+ * To handle the range U+10000..U+10FFFF two UTF-16 surrogate
+ * pairs must be used. If this is not detected, the pairs
+ * survive in the output which is not valid but often tolerated.
+ * Emojis generally require such a pair, unless encoded
+ * unescaped in UTF-8.
+ *
+ * If a high surrogate pair is detected and a low surrogate pair
+ * follows, the combined sequence is decoded as a 4 byte
+ * UTF-8 sequence. Unpaired surrogate halves are decoded as is
+ * despite being an invalid UTF-8 value.
+ */
+
+const char *flatcc_json_parser_string_escape(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_json_parser_escape_buffer_t code)
+{
+ char c, v;
+ uint32_t u, u2;
+
+ if (end - buf < 2 || buf[0] != '\\') {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ switch (buf[1]) {
+ case 'x':
+ v = 0;
+ code[0] = 1;
+ if (end - buf < 4) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ c = buf[2];
+ if (c >= '0' && c <= '9') {
+ v |= (c - '0') << 4;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ v |= (c - 'a' + 10) << 4;
+ } else {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ }
+ c = buf[3];
+ if (c >= '0' && c <= '9') {
+ v |= c - '0';
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ v |= c - 'a' + 10;
+ } else {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ }
+ code[1] = v;
+ return buf + 4;
+ case 'u':
+ if (end - buf < 6) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ if (decode_hex4(buf + 2, &u)) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ };
+ /* If a high UTF-16 surrogate half pair was detected */
+ if (u >= 0xd800 && u <= 0xdbff &&
+ /* and there is space for a matching low half pair */
+ end - buf >= 12 &&
+ /* and there is a second escape following immediately */
+ buf[6] == '\\' && buf[7] == 'u' &&
+ /* and it is valid hex */
+ decode_hex4(buf + 8, &u2) == 0 &&
+ /* and it is a low UTF-16 surrogate pair */
+ u2 >= 0xdc00 && u2 <= 0xdfff) {
+ /* then decode the pair into a single 4 byte utf-8 sequence. */
+ if (decode_utf16_surrogate_pair(u, u2, code)) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ return buf + 12;
+ /*
+ * Otherwise decode unmatched surrogate pairs as is any
+ * other UTF-8. Some systems might depend on these surviving.
+ * Leave ignored errors for the next parse step.
+ */
+ }
+ decode_unicode_char(u, code);
+ return buf + 6;
+ case 't':
+ code[0] = 1;
+ code[1] = '\t';
+ return buf + 2;
+ case 'n':
+ code[0] = 1;
+ code[1] = '\n';
+ return buf + 2;
+ case 'r':
+ code[0] = 1;
+ code[1] = '\r';
+ return buf + 2;
+ case 'b':
+ code[0] = 1;
+ code[1] = '\b';
+ return buf + 2;
+ case 'f':
+ code[0] = 1;
+ code[1] = '\f';
+ return buf + 2;
+ case '\"':
+ code[0] = 1;
+ code[1] = '\"';
+ return buf + 2;
+ case '\\':
+ code[0] = 1;
+ code[1] = '\\';
+ return buf + 2;
+ case '/':
+ code[0] = 1;
+ code[1] = '/';
+ return buf + 2;
+ default:
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+}
+
+/* Only applies to unquoted constants during generic parsring, otherwise it is skipped as a string. */
+const char *flatcc_json_parser_skip_constant(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ char c;
+ const char *k;
+
+ while (buf != end) {
+ c = *buf;
+ if ((c & 0x80) || (c == '_') || (c >= '0' && c <= '9') || c == '.') {
+ ++buf;
+ continue;
+ }
+ /* Upper case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'z') {
+ ++buf;
+ continue;
+ }
+ buf = flatcc_json_parser_space(ctx, (k = buf), end);
+ if (buf == k) {
+ return buf;
+ }
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_match_constant(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos, int *more)
+{
+ const char *mark = buf, *k = buf + pos;
+
+ if (end - buf <= pos) {
+ *more = 0;
+ return buf;
+ }
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (ctx->unquoted) {
+ buf = flatcc_json_parser_space(ctx, k, end);
+ if (buf == end) {
+ /*
+ * We cannot make a decision on more.
+ * Just return end and let parser handle sync point in
+ * case it is able to resume parse later on.
+ * For the same reason we do not lower ctx->unquoted.
+ */
+ *more = 0;
+ return buf;
+ }
+ if (buf != k) {
+ char c = *buf;
+ /*
+ * Space was seen - and thus we have a valid match.
+ * If the next char is an identifier start symbol
+ * we raise the more flag to support syntax like:
+ *
+ * `flags: Hungry Sleepy Awake, ...`
+ */
+ if (c == '_' || (c & 0x80)) {
+ *more = 1;
+ return buf;
+ }
+ c |= 0x20;
+ if (c >= 'a' && c <= 'z') {
+ *more = 1;
+ return buf;
+ }
+ }
+ /*
+ * Space was not seen, so the match is only valid if followed
+ * by a JSON separator symbol, and there cannot be more values
+ * following so `more` is lowered.
+ */
+ *more = 0;
+ if (*buf == ',' || *buf == '}' || *buf == ']') {
+ return buf;
+ }
+ return mark;
+ }
+#endif
+ buf = k;
+ if (*buf == 0x20) {
+ ++buf;
+ while (buf != end && *buf == 0x20) {
+ ++buf;
+ }
+ if (buf == end) {
+ *more = 0;
+ return buf;
+ }
+ /* We accept untrimmed space like " Green Blue ". */
+ if (*buf != '\"') {
+ *more = 1;
+ return buf;
+ }
+ }
+ switch (*buf) {
+ case '\\':
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ case '\"':
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ *more = 0;
+ return buf;
+ }
+ *more = 0;
+ return mark;
+}
+
+const char *flatcc_json_parser_unmatched_symbol(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (ctx->flags & flatcc_json_parser_f_skip_unknown) {
+ buf = flatcc_json_parser_symbol_end(ctx, buf, end);
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf != end && *buf == ':') {
+ ++buf;
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ } else {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_colon);
+ }
+ return flatcc_json_parser_generic_json(ctx, buf, end);
+ } else {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unknown_symbol);
+ }
+}
+
+static const char *__flatcc_json_parser_number(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end) {
+ return buf;
+ }
+ if (*buf == '-') {
+ ++buf;
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ }
+ if (*buf == '0') {
+ ++buf;
+ } else {
+ if (*buf < '1' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ if (buf != end) {
+ if (*buf == '.') {
+ ++buf;
+ if (*buf < '0' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ }
+ if (buf != end && (*buf == 'e' || *buf == 'E')) {
+ ++buf;
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ if (*buf == '+' || *buf == '-') {
+ ++buf;
+ }
+ if (buf == end || *buf < '0' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+
+ /*
+ * For strtod termination we must ensure the tail is not valid
+ * including non-json exponent types. The simplest approach is
+ * to accept anything that could be valid json successor
+ * characters and reject end of buffer since we expect a closing
+ * '}'.
+ *
+ * The ',' is actually not safe if strtod uses a non-POSIX locale.
+ */
+ if (buf != end) {
+ switch (*buf) {
+ case ',':
+ case ':':
+ case ']':
+ case '}':
+ case ' ':
+ case '\r':
+ case '\t':
+ case '\n':
+ case '\v':
+ return buf;
+ }
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+}
+
+const char *flatcc_json_parser_double(flatcc_json_parser_t *ctx, const char *buf, const char *end, double *v)
+{
+ const char *next, *k;
+
+ *v = 0.0;
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ if (*buf == '-') ++k;
+ if (end - k > 1 && (k[0] == '.' || (k[0] == '0' && k[1] == '0'))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ next = parse_double(buf, (size_t)(end - buf), v);
+ if (next == 0 || next == buf) {
+ if (parse_double_isinf(*v)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_overflow);
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ return next;
+}
+
+const char *flatcc_json_parser_float(flatcc_json_parser_t *ctx, const char *buf, const char *end, float *v)
+{
+ const char *next, *k;
+
+ *v = 0.0;
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ if (*buf == '-') ++k;
+ if (end - k > 1 && (k[0] == '.' || (k[0] == '0' && k[1] == '0'))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ next = parse_float(buf, (size_t)(end - buf), v);
+ if (next == 0 || next == buf) {
+ if (parse_float_isinf(*v)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_overflow);
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ return next;
+}
+
+const char *flatcc_json_parser_generic_json(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ char stack[FLATCC_JSON_PARSE_GENERIC_MAX_NEST];
+ char *sp, *spend;
+ const char *k;
+ flatcc_json_parser_escape_buffer_t code;
+ int more = 0;
+
+ sp = stack;
+ spend = sp + FLATCC_JSON_PARSE_GENERIC_MAX_NEST;
+
+again:
+ if (buf == end) {
+ return buf;
+ }
+ if (sp != stack && sp[-1] == '}') {
+ /* Inside an object, about to read field name. */
+ buf = flatcc_json_parser_symbol_start(ctx, buf, end);
+ buf = flatcc_json_parser_symbol_end(ctx, buf, end);
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_object);
+ }
+ if (*buf != ':') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_colon);
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ switch (*buf) {
+ case '\"':
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ while (buf != end && *buf != '\"') {
+ buf = flatcc_json_parser_string_part(ctx, buf, end);
+ if (buf != end && *buf == '\"') {
+ break;
+ }
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ }
+ buf = flatcc_json_parser_string_end(ctx, buf, end);
+ break;
+ case '-':
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ buf = __flatcc_json_parser_number(ctx, buf, end);
+ break;
+#if !FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ case 't': case 'f':
+ {
+ uint8_t v;
+ buf = flatcc_json_parser_bool(ctx, (k = buf), end, &v);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ }
+ break;
+ case 'n':
+ buf = flatcc_json_parser_null((k = buf), end);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ break;
+#endif
+ case '[':
+ if (sp == spend) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_deep_nesting);
+ }
+ *sp++ = ']';
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == ']') {
+ break;
+ }
+ goto again;
+ case '{':
+ if (sp == spend) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_deep_nesting);
+ }
+ *sp++ = '}';
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == '}') {
+ break;
+ }
+ goto again;
+
+ default:
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ buf = flatcc_json_parser_skip_constant(ctx, (k = buf), end);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ break;
+#else
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+#endif
+ }
+ while (buf != end && sp != stack) {
+ --sp;
+ if (*sp == ']') {
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ } else {
+ buf = flatcc_json_parser_object_end(ctx, buf, end, &more);
+ }
+ if (more) {
+ ++sp;
+ goto again;
+ }
+ }
+ if (buf == end && sp != stack) {
+ return flatcc_json_parser_set_error(ctx, buf, end, sp[-1] == ']' ?
+ flatcc_json_parser_error_unbalanced_array :
+ flatcc_json_parser_error_unbalanced_object);
+ }
+ /* Any ',', ']', or '}' belongs to parent context. */
+ return buf;
+}
+
+const char *flatcc_json_parser_integer(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_sign, uint64_t *value)
+{
+ uint64_t x0, x = 0;
+ const char *k;
+
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ *value_sign = *buf == '-';
+ buf += *value_sign;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ x0 = x;
+ x = x * 10 + (uint64_t)(*buf - '0');
+ if (x0 > x) {
+ return flatcc_json_parser_set_error(ctx, buf, end, value_sign ?
+ flatcc_json_parser_error_underflow : flatcc_json_parser_error_overflow);
+ }
+ ++buf;
+ }
+ if (buf == k) {
+ /* Give up, but don't fail the parse just yet, it might be a valid symbol. */
+ return buf;
+ }
+ if (buf != end && (*buf == 'e' || *buf == 'E' || *buf == '.')) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_float_unexpected);
+ }
+ *value = x;
+ return buf;
+}
+
+/* Array Creation - depends on flatcc builder. */
+
+const char *flatcc_json_parser_build_uint8_vector_base64(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref, int urlsafe)
+{
+ const char *mark;
+ uint8_t *pval;
+ size_t max_len;
+ size_t decoded_len, src_len;
+ int mode;
+ int ret;
+
+ mode = urlsafe ? base64_mode_url : base64_mode_rfc4648;
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf == end || *buf != '\"') {
+ goto base64_failed;
+ }
+ max_len = base64_decoded_size((size_t)(buf - mark));
+ if (flatcc_builder_start_vector(ctx->ctx, 1, 1, FLATBUFFERS_COUNT_MAX((utype_size)))) {
+ goto failed;
+ }
+ if (!(pval = flatcc_builder_extend_vector(ctx->ctx, max_len))) {
+ goto failed;
+ }
+ src_len = (size_t)(buf - mark);
+ decoded_len = max_len;
+ if ((ret = base64_decode(pval, (const uint8_t *)mark, &decoded_len, &src_len, mode))) {
+ buf = mark + src_len;
+ goto base64_failed;
+ }
+ if (src_len != (size_t)(buf - mark)) {
+ buf = mark + src_len;
+ goto base64_failed;
+ }
+ if (decoded_len < max_len) {
+ if (flatcc_builder_truncate_vector(ctx->ctx, max_len - decoded_len)) {
+ goto failed;
+ }
+ }
+ if (!(*ref = flatcc_builder_end_vector(ctx->ctx))) {
+ goto failed;
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+
+failed:
+ *ref = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+
+base64_failed:
+ *ref = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end,
+ urlsafe ? flatcc_json_parser_error_base64url : flatcc_json_parser_error_base64);
+}
+
+const char *flatcc_json_parser_char_array(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, char *s, size_t n)
+{
+ flatcc_json_parser_escape_buffer_t code;
+ const char *mark;
+ size_t k = 0;
+
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ if (buf != end)
+ while (*buf != '\"') {
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf == end) return end;
+ k = (size_t)(buf - mark);
+ if (k > n) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);
+ }
+ k = n; /* Might truncate UTF-8. */
+ }
+ memcpy(s, mark, k);
+ s += k;
+ n -= k;
+ if (*buf == '\"') break;
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ if (buf == end) return end;
+ k = (size_t)code[0];
+ mark = code + 1;
+ if (k > n) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);
+ }
+ k = n; /* Might truncate UTF-8. */
+ }
+ memcpy(s, mark, k);
+ s += k;
+ n -= k;
+ }
+ if (n != 0) {
+ if (ctx->flags & flatcc_json_parser_f_reject_array_underflow) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_underflow);
+ }
+ memset(s, 0, n);
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+}
+
+
+/* String Creation - depends on flatcc builder. */
+
+const char *flatcc_json_parser_build_string(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref)
+{
+ flatcc_json_parser_escape_buffer_t code;
+ const char *mark;
+
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf != end && *buf == '\"') {
+ *ref = flatcc_builder_create_string(ctx->ctx, mark, (size_t)(buf - mark));
+ } else {
+ if (flatcc_builder_start_string(ctx->ctx) ||
+ 0 == flatcc_builder_append_string(ctx->ctx, mark, (size_t)(buf - mark))) goto failed;
+ while (buf != end && *buf != '\"') {
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ if (0 == flatcc_builder_append_string(ctx->ctx, code + 1, (size_t)code[0])) goto failed;
+ if (end != (buf = flatcc_json_parser_string_part(ctx, (mark = buf), end))) {
+ if (0 == flatcc_builder_append_string(ctx->ctx, mark, (size_t)(buf - mark))) goto failed;
+ }
+ }
+ *ref = flatcc_builder_end_string(ctx->ctx);
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+
+failed:
+ *ref = 0;
+ return buf;
+}
+
+/* UNIONS */
+
+/*
+ * Unions are difficult to parse because the type field may appear after
+ * the union table and because having two fields opens up for many more
+ * possible error scenarios. We must store each union of a table
+ * temporarily - this cannot be in the generated table parser function
+ * because there could be many unions (about 2^15 with default voffsets)
+ * although usually there will be only a few. We can also not store the
+ * data encoded in the existing table buffer in builder because we may
+ * have to remove it due to schema forwarding and removing it messes up
+ * the table layout. We also cannot naively allocate it dynamically for
+ * performance reasons. Instead we place the temporary union data in a
+ * separate frame from the table buffer, but on a similar stack. This is
+ * called the user stack and we manage one frame per table that is known
+ * to contain unions.
+ *
+ * Even the temporary structures in place we still cannot parse a union
+ * before we know its type. Due to JSON typically sorting fields
+ * alphabetically in various pretty printers, we are likely to receive
+ * the type late with (`<union_name>_type` following `<union_name>`.
+ * To deal with this we store a backtracking pointer and parses the
+ * table generically in a first pass and reparse the table once the type
+ * is known. This can happen recursively with nested tables containing
+ * unions which is why we need to have a stack frame.
+ *
+ * If the type field is stored first we just store the type in the
+ * custom frame and immediately parses the table with the right type
+ * once we see it. The parse will be much faster and we can strongly
+ * recommend that flatbuffer serializers do this, but we cannot require
+ * it.
+ *
+ * The actual overhead of dealing with the custom stack frame is fairly
+ * cheap once we get past the first custom stack allocation.
+ *
+ * We cannot update the builder before both the table and table type
+ * has been parsed because the the type might have to be ingored due
+ * to schema forwarding. Therefore the union type must be cached or
+ * reread. This happens trivially be calling the union parser with the
+ * type as argument, but it is important to be aware of before
+ * refactoring the code.
+ *
+ * The user frame is created at table start and remains valid until
+ * table exit, but we cannot assume the pointers to the frame remain
+ * valid. Specifically we cannot use frame pointers after calling
+ * the union parser. This means the union type must be cached or reread
+ * so it can be added to the table. Because the type is passed to
+ * the union parser this caching happens automatically but it is still
+ * important to be aware that it is required.
+ *
+ * The frame reserves temporary information for all unions the table
+ * holds, enumerated 0 <= `union_index` < `union_total`
+ * where the `union_total` is fixed type specific number.
+ *
+ * The `type_present` is needed because union types range from 0..255
+ * and we need an extra bit do distinguish not present from union type
+ * `NONE = 0`.
+ */
+
+typedef struct {
+ const char *backtrace;
+ const char *line_start;
+ int line;
+ uint8_t type_present;
+ uint8_t type;
+ /* Union vectors: */
+ uoffset_t count;
+ size_t h_types;
+} __flatcc_json_parser_union_entry_t;
+
+typedef struct {
+ size_t union_total;
+ size_t union_count;
+ __flatcc_json_parser_union_entry_t unions[1];
+} __flatcc_json_parser_union_frame_t;
+
+const char *flatcc_json_parser_prepare_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_total, size_t *handle)
+{
+ __flatcc_json_parser_union_frame_t *f;
+
+ if (!(*handle = flatcc_builder_enter_user_frame(ctx->ctx,
+ sizeof(__flatcc_json_parser_union_frame_t) + (union_total - 1) *
+ sizeof(__flatcc_json_parser_union_entry_t)))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+ }
+ f = flatcc_builder_get_user_frame_ptr(ctx->ctx, *handle);
+ /* Frames have zeroed memory. */
+ f->union_total = union_total;
+ return buf;
+}
+
+const char *flatcc_json_parser_finalize_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t handle)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+
+ if (f->union_count) {
+ buf = flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_incomplete);
+ }
+ flatcc_builder_exit_user_frame_at(ctx->ctx, handle);
+ return buf;
+}
+
+const char *flatcc_json_parser_union(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle, flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = &f->unions[union_index];
+ flatcc_builder_union_ref_t uref;
+
+ if (e->backtrace) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ if (!e->type_present) {
+ /* If we supported table: null, we should not count it, but we don't. */
+ ++f->union_count;
+ e->line = ctx->line;
+ e->line_start = ctx->line_start;
+ buf = flatcc_json_parser_generic_json(ctx, (e->backtrace = buf), end);
+ } else {
+ uref.type = e->type;
+ if (e->type == 0) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_none_present);
+ }
+ --f->union_count;
+ buf = union_parser(ctx, buf, end, e->type, &uref.value);
+ if (buf != end) {
+ if (flatcc_builder_table_add_union(ctx->ctx, id, uref)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ }
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_union_type(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index, flatbuffers_voffset_t id,
+ size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ flatcc_builder_union_ref_t uref;
+ const char *mark;
+ int line;
+ const char *line_start;
+
+ if (e->type_present) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ e->type_present = 1;
+ buf = flatcc_json_parser_uint8(ctx, (mark = buf), end, &e->type);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_uint8(ctx, buf, end, type_parsers, &e->type);
+ }
+ /* Only count the union if the type is not NONE. */
+ if (e->backtrace == 0) {
+ f->union_count += e->type != 0;
+ return buf;
+ }
+ FLATCC_ASSERT(f->union_count);
+ --f->union_count;
+ /*
+ * IMPORTANT: we cannot access any value in the frame or entry
+ * pointer after calling union parse because it might cause the
+ * stack to reallocate. We should read the frame pointer again if
+ * needed - we don't but remember it if refactoring code.
+ *
+ * IMPORTANT 2: Do not assign buf here. We are backtracking.
+ */
+ line = ctx->line;
+ line_start = ctx->line_start;
+ ctx->line = e->line;
+ ctx->line_start = e->line_start;
+ uref.type = e->type;
+ if (end == union_parser(ctx, e->backtrace, end, e->type, &uref.value)) {
+ return end;
+ }
+ if (flatcc_builder_table_add_union(ctx->ctx, id, uref)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ ctx->line = line;
+ ctx->line_start = line_start;
+ return buf;
+}
+
+static const char *_parse_union_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t h_types, uoffset_t count,
+ flatbuffers_voffset_t id, flatcc_json_parser_union_f *union_parser)
+{
+ flatcc_builder_ref_t ref = 0, *pref;
+ utype_t *types;
+ int more;
+ size_t i;
+
+ if (flatcc_builder_start_offset_vector(ctx->ctx)) goto failed;
+ buf = flatcc_json_parser_array_start(ctx, buf, end, &more);
+ i = 0;
+ while (more) {
+ if (i == count) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_vector_length);
+ }
+ /* Frame must be restored between calls to table parser. */
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ buf = union_parser(ctx, buf, end, types[i], &ref);
+ if (buf == end) {
+ return buf;
+ }
+ if (!(pref = flatcc_builder_extend_offset_vector(ctx->ctx, 1))) goto failed;
+ *pref = ref;
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ ++i;
+ }
+ if (i != count) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_vector_length);
+ }
+ /* Frame must be restored between calls to table parser. */
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ if (!(ref = flatcc_builder_end_offset_vector_for_unions(ctx->ctx, types))) goto failed;
+ if (!(pref = flatcc_builder_table_add_offset(ctx->ctx, id))) goto failed;
+ *pref = ref;
+ return buf;
+failed:
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+}
+
+const char *flatcc_json_parser_union_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle, flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ if (e->backtrace) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ if (!e->type_present) {
+ ++f->union_count;
+ e->line = ctx->line;
+ e->line_start = ctx->line_start;
+ buf = flatcc_json_parser_generic_json(ctx, (e->backtrace = buf), end);
+ } else {
+ --f->union_count;
+ buf = _parse_union_vector(ctx, buf, end, e->h_types, e->count, id, union_parser);
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_union_type_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index, flatbuffers_voffset_t id,
+ size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser,
+ flatcc_json_parser_is_known_type_f accept_type)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ const char *mark;
+ int line;
+ const char *line_start;
+ int more;
+ utype_t val;
+ void *pval;
+ flatcc_builder_ref_t ref, *pref;
+ utype_t *types;
+ size_t size;
+ size_t h_types;
+ uoffset_t count;
+
+#if FLATBUFFERS_UTYPE_MAX != UINT8_MAX
+#error "Update union vector parser to support current union type definition."
+#endif
+
+ if (e->type_present) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ e->type_present = 1;
+ if (flatcc_builder_start_vector(ctx->ctx, 1, 1, FLATBUFFERS_COUNT_MAX((utype_size)))) goto failed;
+ buf = flatcc_json_parser_array_start(ctx, buf, end, &more);
+ while (more) {
+ if (!(pval = flatcc_builder_extend_vector(ctx->ctx, 1))) goto failed;
+ buf = flatcc_json_parser_uint8(ctx, (mark = buf), end, &val);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_uint8(ctx, (mark = buf), end, type_parsers, &val);
+ if (buf == mark || buf == end) goto failed;
+ }
+ /* Parse unknown types as NONE */
+ if (!accept_type(val)) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_unknown)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unknown_union);
+ }
+ val = 0;
+ }
+ flatbuffers_uint8_write_to_pe(pval, val);
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ }
+ count = (uoffset_t)flatcc_builder_vector_count(ctx->ctx);
+ e->count = count;
+ size = count * utype_size;
+ /* Store type vector so it is accessible to the table vector parser. */
+ h_types = flatcc_builder_enter_user_frame(ctx->ctx, size);
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ memcpy(types, flatcc_builder_vector_edit(ctx->ctx), size);
+ if (!((ref = flatcc_builder_end_vector(ctx->ctx)))) goto failed;
+ if (!(pref = flatcc_builder_table_add_offset(ctx->ctx, id - 1))) goto failed;
+ *pref = ref;
+
+ /* Restore union frame after possible invalidation due to types frame allocation. */
+ f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ e = f->unions + union_index;
+
+ e->h_types = h_types;
+ if (e->backtrace == 0) {
+ ++f->union_count;
+ return buf;
+ }
+ FLATCC_ASSERT(f->union_count);
+ --f->union_count;
+ line = ctx->line;
+ line_start = ctx->line_start;
+ ctx->line = e->line;
+ ctx->line_start = e->line_start;
+ /* We must not assign buf here because we are backtracking. */
+ if (end == _parse_union_vector(ctx, e->backtrace, end, h_types, count, id, union_parser)) return end;
+ /*
+ * NOTE: We do not need the user frame anymore, but if we did, it
+ * would have to be restored from its handle due to the above parse.
+ */
+ ctx->line = line;
+ ctx->line_start = line_start;
+ return buf;
+failed:
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+}
+
+int flatcc_json_parser_table_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid,
+ flatcc_json_parser_table_f *parser)
+{
+ flatcc_json_parser_t _ctx;
+ flatcc_builder_ref_t root;
+ flatcc_builder_buffer_flags_t builder_flags = flags & flatcc_json_parser_f_with_size ? flatcc_builder_with_size : 0;
+
+ ctx = ctx ? ctx : &_ctx;
+ flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);
+ if (flatcc_builder_start_buffer(B, fid, 0, builder_flags)) return -1;
+ buf = parser(ctx, buf, buf + bufsiz, &root);
+ if (ctx->error) {
+ return ctx->error;
+ }
+ if (!flatcc_builder_end_buffer(B, root)) return -1;
+ ctx->end_loc = buf;
+ return 0;
+}
+
+int flatcc_json_parser_struct_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid,
+ flatcc_json_parser_table_f *parser)
+{
+ flatcc_json_parser_t _ctx;
+ flatcc_builder_ref_t root;
+ flatcc_builder_buffer_flags_t builder_flags = flags & flatcc_json_parser_f_with_size ? flatcc_builder_with_size : 0;
+
+ ctx = ctx ? ctx : &_ctx;
+ flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);
+ if (flatcc_builder_start_buffer(B, fid, 0, builder_flags)) return -1;
+ buf = parser(ctx, buf, buf + bufsiz, &root);
+ if (ctx->error) {
+ return ctx->error;
+ }
+ if (!flatcc_builder_end_buffer(B, root)) return -1;
+ ctx->end_loc = buf;
+ return 0;
+}
diff --git a/src/runtime/json_printer.c b/src/runtime/json_printer.c
new file mode 100644
index 0000000..4ebe1c1
--- /dev/null
+++ b/src/runtime/json_printer.c
@@ -0,0 +1,1486 @@
+/*
+ * Runtime support for printing flatbuffers to JSON.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_assert.h"
+
+/*
+ * Grisu significantly improves printing speed of floating point values
+ * and also the overall printing speed when floating point values are
+ * present in non-trivial amounts. (Also applies to parsing).
+ */
+#if FLATCC_USE_GRISU3 && !defined(PORTABLE_USE_GRISU3)
+#define PORTABLE_USE_GRISU3 1
+#endif
+
+#include "flatcc/flatcc_flatbuffers.h"
+#include "flatcc/flatcc_json_printer.h"
+#include "flatcc/flatcc_identifier.h"
+
+#include "flatcc/portable/pprintint.h"
+#include "flatcc/portable/pprintfp.h"
+#include "flatcc/portable/pbase64.h"
+
+
+#define RAISE_ERROR(err) flatcc_json_printer_set_error(ctx, flatcc_json_printer_error_##err)
+
+const char *flatcc_json_printer_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_json_printer_error_##no: \
+ return str;
+ FLATCC_JSON_PRINT_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+#define flatcc_json_printer_utype_enum_f flatcc_json_printer_union_type_f
+#define flatbuffers_utype_read_from_pe __flatbuffers_utype_read_from_pe
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+
+#define offset_size uoffset_size
+
+#if FLATBUFFERS_UTYPE_MAX == UINT8_MAX
+#define print_utype print_uint8
+#else
+#ifdef FLATBUFFERS_UTYPE_MIN
+#define print_utype print_int64
+#else
+#define print_utype print_uint64
+#endif
+#endif
+
+static inline const void *read_uoffset_ptr(const void *p)
+{
+ return (uint8_t *)p + __flatbuffers_uoffset_read_from_pe(p);
+}
+
+static inline voffset_t read_voffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_voffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline const void *get_field_ptr(flatcc_json_printer_table_descriptor_t *td, int id)
+{
+ uoffset_t vo = (uoffset_t)(id + 2) * (uoffset_t)sizeof(voffset_t);
+
+ if (vo >= (uoffset_t)td->vsize) {
+ return 0;
+ }
+ vo = read_voffset(td->vtable, vo);
+ if (vo == 0) {
+ return 0;
+ }
+ return (uint8_t *)td->table + vo;
+}
+
+#define print_char(c) *ctx->p++ = (c)
+
+#define print_null() do { \
+ print_char('n'); \
+ print_char('u'); \
+ print_char('l'); \
+ print_char('l'); \
+} while (0)
+
+#define print_start(c) do { \
+ ++ctx->level; \
+ *ctx->p++ = c; \
+} while (0)
+
+#define print_end(c) do { \
+ if (ctx->indent) { \
+ *ctx->p++ = '\n'; \
+ --ctx->level; \
+ print_indent(ctx); \
+ } \
+ *ctx->p++ = c; \
+} while (0)
+
+#define print_space() do { \
+ *ctx->p = ' '; \
+ ctx->p += !!ctx->indent; \
+} while (0)
+
+#define print_nl() do { \
+ if (ctx->indent) { \
+ *ctx->p++ = '\n'; \
+ print_indent(ctx); \
+ } else { \
+ flatcc_json_printer_flush_partial(ctx); \
+ } \
+} while (0)
+
+/* Call at the end so print_end does not have to check for level. */
+#define print_last_nl() do { \
+ if (ctx->indent && ctx->level == 0) { \
+ *ctx->p++ = '\n'; \
+ } \
+ ctx->flush(ctx, 1); \
+} while (0)
+
+int flatcc_json_printer_fmt_float(char *buf, float n)
+{
+#if FLATCC_JSON_PRINT_HEX_FLOAT
+ return print_hex_float(buf, n);
+#else
+ return print_float(n, buf);
+#endif
+}
+
+int flatcc_json_printer_fmt_double(char *buf, double n)
+{
+#if FLATCC_JSON_PRINT_HEX_FLOAT
+ return print_hex_double(buf, n);
+#else
+ return print_double(n, buf);
+#endif
+}
+
+int flatcc_json_printer_fmt_bool(char *buf, int n)
+{
+ if (n) {
+ memcpy(buf, "true", 4);
+ return 4;
+ }
+ memcpy(buf, "false", 5);
+ return 5;
+}
+
+static void print_ex(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ size_t k;
+
+ if (ctx->p >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ k = (size_t)(ctx->pflush - ctx->p);
+ while (n > k) {
+ memcpy(ctx->p, s, k);
+ ctx->p += k;
+ s += k;
+ n -= k;
+ ctx->flush(ctx, 0);
+ k = (size_t)(ctx->pflush - ctx->p);
+ }
+ memcpy(ctx->p, s, n);
+ ctx->p += n;
+}
+
+static inline void print(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ if (ctx->p + n >= ctx->pflush) {
+ print_ex(ctx, s, n);
+ } else {
+ memcpy(ctx->p, s, n);
+ ctx->p += n;
+ }
+}
+
+static void print_escape(flatcc_json_printer_t *ctx, unsigned char c)
+{
+ unsigned char x;
+
+ print_char('\\');
+ switch (c) {
+ case '"': print_char('\"'); break;
+ case '\\': print_char('\\'); break;
+ case '\t' : print_char('t'); break;
+ case '\f' : print_char('f'); break;
+ case '\r' : print_char('r'); break;
+ case '\n' : print_char('n'); break;
+ case '\b' : print_char('b'); break;
+ default:
+ print_char('u');
+ print_char('0');
+ print_char('0');
+ x = c >> 4;
+ x += x < 10 ? '0' : 'a' - 10;
+ print_char((char)x);
+ x = c & 15;
+ x += x < 10 ? '0' : 'a' - 10;
+ print_char((char)x);
+ break;
+ }
+}
+
+/*
+ * Even though we know the the string length, we need to scan for escape
+ * characters. There may be embedded zeroes. Because FlatBuffer strings
+ * are always zero terminated, we assume and optimize for this.
+ *
+ * We enforce \u00xx for control characters, but not for invalid
+ * characters like 0xff - this makes it possible to handle some other
+ * codepages transparently while formally not valid. (Formally JSON
+ * also supports UTF-16/32 little/big endian but flatbuffers only
+ * support UTF-8 and we expect this in JSON input/output too).
+ */
+static void print_string(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ const char *p = s;
+ /* Unsigned is important. */
+ unsigned char c;
+ size_t k;
+
+ print_char('\"');
+ for (;;) {
+ c = (unsigned char)*p;
+ while (c >= 0x20 && c != '\"' && c != '\\') {
+ c = (unsigned char)*++p;
+ }
+ k = (size_t)(p - s);
+ /* Even if k == 0, print ensures buffer flush. */
+ print(ctx, s, k);
+ n -= k;
+ if (n == 0) break;
+ s += k;
+ print_escape(ctx, c);
+ ++p;
+ --n;
+ ++s;
+ }
+ print_char('\"');
+}
+
+/*
+ * Similar to print_string, but null termination is not guaranteed, and
+ * trailing nulls are stripped.
+ */
+static void print_char_array(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ const char *p = s;
+ /* Unsigned is important. */
+ unsigned char c = 0;
+ size_t k;
+
+ while (n > 0 && s[n - 1] == '\0') --n;
+
+ print_char('\"');
+ for (;;) {
+ while (n) {
+ c = (unsigned char)*p;
+ if (c < 0x20 || c == '\"' || c == '\\') break;
+ ++p;
+ --n;
+ }
+ k = (size_t)(p - s);
+ /* Even if k == 0, print ensures buffer flush. */
+ print(ctx, s, k);
+ if (n == 0) break;
+ s += k;
+ print_escape(ctx, c);
+ ++p;
+ --n;
+ ++s;
+ }
+ print_char('\"');
+}
+
+static void print_uint8_vector_base64_object(flatcc_json_printer_t *ctx, const void *p, int mode)
+{
+ const int unpadded_mode = mode & ~base64_enc_modifier_padding;
+ size_t k, n, len;
+ const uint8_t *data;
+ size_t data_len, src_len;
+
+ data_len = (size_t)__flatbuffers_uoffset_read_from_pe(p);
+ data = (const uint8_t *)p + uoffset_size;
+
+ print_char('\"');
+
+ len = base64_encoded_size(data_len, mode);
+ if (ctx->p + len >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ while (ctx->p + len > ctx->pflush) {
+ /* Multiples of 4 output chars consumes exactly 3 bytes before final padding. */
+ k = (size_t)(ctx->pflush - ctx->p) & ~(size_t)3;
+ n = k * 3 / 4;
+ FLATCC_ASSERT(n > 0);
+ src_len = k * 3 / 4;
+ base64_encode((uint8_t *)ctx->p, data, 0, &src_len, unpadded_mode);
+ ctx->p += k;
+ data += n;
+ data_len -= n;
+ ctx->flush(ctx, 0);
+ len = base64_encoded_size(data_len, mode);
+ }
+ base64_encode((uint8_t *)ctx->p, data, 0, &data_len, mode);
+ ctx->p += len;
+ print_char('\"');
+}
+
+static void print_indent_ex(flatcc_json_printer_t *ctx, size_t n)
+{
+ size_t k;
+
+ if (ctx->p >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ k = (size_t)(ctx->pflush - ctx->p);
+ while (n > k) {
+ memset(ctx->p, ' ', k);
+ ctx->p += k;
+ n -= k;
+ ctx->flush(ctx, 0);
+ k = (size_t)(ctx->pflush - ctx->p);
+ }
+ memset(ctx->p, ' ', n);
+ ctx->p += n;
+}
+
+static inline void print_indent(flatcc_json_printer_t *ctx)
+{
+ size_t n = (size_t)(ctx->level * ctx->indent);
+
+ if (ctx->p + n > ctx->pflush) {
+ print_indent_ex(ctx, n);
+ } else {
+ memset(ctx->p, ' ', n);
+ ctx->p += n;
+ }
+}
+
+/*
+ * Helpers for external use - does not do autmatic pretty printing, but
+ * does escape strings.
+ */
+void flatcc_json_printer_string(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ print_string(ctx, s, n);
+}
+
+void flatcc_json_printer_write(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ print(ctx, s, n);
+}
+
+void flatcc_json_printer_nl(flatcc_json_printer_t *ctx)
+{
+ print_char('\n');
+ flatcc_json_printer_flush_partial(ctx);
+}
+
+void flatcc_json_printer_char(flatcc_json_printer_t *ctx, char c)
+{
+ print_char(c);
+}
+
+void flatcc_json_printer_indent(flatcc_json_printer_t *ctx)
+{
+ /*
+ * This is only needed when indent is 0 but helps external users
+ * to avoid flushing when indenting.
+ */
+ print_indent(ctx);
+}
+
+void flatcc_json_printer_add_level(flatcc_json_printer_t *ctx, int n)
+{
+ ctx->level += n;
+}
+
+int flatcc_json_printer_get_level(flatcc_json_printer_t *ctx)
+{
+ return ctx->level;
+}
+
+static inline void print_symbol(flatcc_json_printer_t *ctx, const char *name, size_t len)
+{
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ if (ctx->p + len < ctx->pflush) {
+ memcpy(ctx->p, name, len);
+ ctx->p += len;
+ } else {
+ print(ctx, name, len);
+ }
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+}
+
+static inline void print_name(flatcc_json_printer_t *ctx, const char *name, size_t len)
+{
+ print_nl();
+ print_symbol(ctx, name, len);
+ print_char(':');
+ print_space();
+}
+
+#define __flatcc_define_json_printer_scalar(TN, T) \
+void flatcc_json_printer_ ## TN( \
+ flatcc_json_printer_t *ctx, T v) \
+{ \
+ ctx->p += print_ ## TN(v, ctx->p); \
+}
+
+__flatcc_define_json_printer_scalar(uint8, uint8_t)
+__flatcc_define_json_printer_scalar(uint16, uint16_t)
+__flatcc_define_json_printer_scalar(uint32, uint32_t)
+__flatcc_define_json_printer_scalar(uint64, uint64_t)
+__flatcc_define_json_printer_scalar(int8, int8_t)
+__flatcc_define_json_printer_scalar(int16, int16_t)
+__flatcc_define_json_printer_scalar(int32, int32_t)
+__flatcc_define_json_printer_scalar(int64, int64_t)
+__flatcc_define_json_printer_scalar(float, float)
+__flatcc_define_json_printer_scalar(double, double)
+
+void flatcc_json_printer_enum(flatcc_json_printer_t *ctx, const char *symbol, size_t len)
+{
+ print_symbol(ctx, symbol, len);
+}
+
+void flatcc_json_printer_delimit_enum_flags(flatcc_json_printer_t *ctx, int multiple)
+{
+#if FLATCC_JSON_PRINT_ALWAYS_QUOTE_MULTIPLE_FLAGS
+ int quote = !ctx->unquote || multiple;
+#else
+ int quote = !ctx->unquote;
+#endif
+ *ctx->p = '"';
+ ctx->p += quote;
+}
+
+void flatcc_json_printer_enum_flag(flatcc_json_printer_t *ctx, int count, const char *symbol, size_t len)
+{
+ *ctx->p = ' ';
+ ctx->p += count > 0;
+ print(ctx, symbol, len);
+}
+
+static inline void print_string_object(flatcc_json_printer_t *ctx, const void *p)
+{
+ size_t len;
+ const char *s;
+
+ len = (size_t)__flatbuffers_uoffset_read_from_pe(p);
+ s = (const char *)p + uoffset_size;
+ print_string(ctx, s, len);
+}
+
+#define __define_print_scalar_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _struct_field(flatcc_json_printer_t *ctx,\
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len) \
+{ \
+ T x = flatbuffers_ ## TN ## _read_from_pe((uint8_t *)p + offset); \
+ \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+void flatcc_json_printer_char_array_struct_field(
+ flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len, size_t count)
+{
+ p = (void *)((size_t)p + offset);
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_char_array(ctx, p, count);
+}
+
+#define __define_print_scalar_array_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count) \
+{ \
+ p = (void *)((size_t)p + offset); \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+}
+
+#define __define_print_enum_array_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ \
+ p = (void *)((size_t)p + offset); \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+}
+
+#define __define_print_enum_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x = flatbuffers_ ## TN ## _read_from_pe((uint8_t *)p + offset); \
+ \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+#define __define_print_scalar_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (p) { \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (x == v && ctx->skip_default) { \
+ return; \
+ } \
+ } else { \
+ if (!ctx->force_default) { \
+ return; \
+ } \
+ x = v; \
+ } \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+#define __define_print_scalar_optional_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (!p) return; \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+
+#define __define_print_enum_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (p) { \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (x == v && ctx->skip_default) { \
+ return; \
+ } \
+ } else { \
+ if (!ctx->force_default) { \
+ return; \
+ } \
+ x = v; \
+ } \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+#define __define_print_enum_optional_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (!p) return; \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+static inline void print_table_object(flatcc_json_printer_t *ctx,
+ const void *p, int ttl, flatcc_json_printer_table_f pf)
+{
+ flatcc_json_printer_table_descriptor_t td;
+
+ if (!--ttl) {
+ flatcc_json_printer_set_error(ctx, flatcc_json_printer_error_deep_recursion);
+ return;
+ }
+ print_start('{');
+ td.count = 0;
+ td.ttl = ttl;
+ td.table = p;
+ td.vtable = (uint8_t *)p - __flatbuffers_soffset_read_from_pe(p);
+ td.vsize = __flatbuffers_voffset_read_from_pe(td.vtable);
+ pf(ctx, &td);
+ print_end('}');
+}
+
+void flatcc_json_printer_string_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_string_object(ctx, read_uoffset_ptr(p));
+ }
+}
+
+void flatcc_json_printer_uint8_vector_base64_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len, int urlsafe)
+{
+ const void *p = get_field_ptr(td, id);
+ int mode;
+
+ mode = urlsafe ? base64_mode_url : base64_mode_rfc4648;
+ mode |= base64_enc_modifier_padding;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_uint8_vector_base64_object(ctx, read_uoffset_ptr(p), mode);
+ }
+}
+
+#define __define_print_scalar_vector_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _vector_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len) \
+{ \
+ const void *p = get_field_ptr(td, id); \
+ uoffset_t count; \
+ \
+ if (p) { \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ p = read_uoffset_ptr(p); \
+ count = __flatbuffers_uoffset_read_from_pe(p); \
+ p = (void *)((size_t)p + uoffset_size); \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+ } \
+}
+
+#define __define_print_enum_vector_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_vector_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ const void *p; \
+ uoffset_t count; \
+ \
+ if (ctx->noenum) { \
+ flatcc_json_printer_ ## TN ## _vector_field(ctx, td, id, name, len);\
+ return; \
+ } \
+ p = get_field_ptr(td, id); \
+ if (p) { \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ p = read_uoffset_ptr(p); \
+ count = __flatbuffers_uoffset_read_from_pe(p); \
+ p = (void *)((size_t)p + uoffset_size); \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ pf(ctx, flatbuffers_ ## TN ## _read_from_pe(p)); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ pf(ctx, flatbuffers_ ## TN ## _read_from_pe(p)); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+ } \
+}
+
+__define_print_scalar_field(uint8, uint8_t)
+__define_print_scalar_field(uint16, uint16_t)
+__define_print_scalar_field(uint32, uint32_t)
+__define_print_scalar_field(uint64, uint64_t)
+__define_print_scalar_field(int8, int8_t)
+__define_print_scalar_field(int16, int16_t)
+__define_print_scalar_field(int32, int32_t)
+__define_print_scalar_field(int64, int64_t)
+__define_print_scalar_field(bool, flatbuffers_bool_t)
+__define_print_scalar_field(float, float)
+__define_print_scalar_field(double, double)
+
+__define_print_enum_field(uint8, uint8_t)
+__define_print_enum_field(uint16, uint16_t)
+__define_print_enum_field(uint32, uint32_t)
+__define_print_enum_field(uint64, uint64_t)
+__define_print_enum_field(int8, int8_t)
+__define_print_enum_field(int16, int16_t)
+__define_print_enum_field(int32, int32_t)
+__define_print_enum_field(int64, int64_t)
+__define_print_enum_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_optional_field(uint8, uint8_t)
+__define_print_scalar_optional_field(uint16, uint16_t)
+__define_print_scalar_optional_field(uint32, uint32_t)
+__define_print_scalar_optional_field(uint64, uint64_t)
+__define_print_scalar_optional_field(int8, int8_t)
+__define_print_scalar_optional_field(int16, int16_t)
+__define_print_scalar_optional_field(int32, int32_t)
+__define_print_scalar_optional_field(int64, int64_t)
+__define_print_scalar_optional_field(bool, flatbuffers_bool_t)
+__define_print_scalar_optional_field(float, float)
+__define_print_scalar_optional_field(double, double)
+
+__define_print_enum_optional_field(uint8, uint8_t)
+__define_print_enum_optional_field(uint16, uint16_t)
+__define_print_enum_optional_field(uint32, uint32_t)
+__define_print_enum_optional_field(uint64, uint64_t)
+__define_print_enum_optional_field(int8, int8_t)
+__define_print_enum_optional_field(int16, int16_t)
+__define_print_enum_optional_field(int32, int32_t)
+__define_print_enum_optional_field(int64, int64_t)
+__define_print_enum_optional_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_struct_field(uint8, uint8_t)
+__define_print_scalar_struct_field(uint16, uint16_t)
+__define_print_scalar_struct_field(uint32, uint32_t)
+__define_print_scalar_struct_field(uint64, uint64_t)
+__define_print_scalar_struct_field(int8, int8_t)
+__define_print_scalar_struct_field(int16, int16_t)
+__define_print_scalar_struct_field(int32, int32_t)
+__define_print_scalar_struct_field(int64, int64_t)
+__define_print_scalar_struct_field(bool, flatbuffers_bool_t)
+__define_print_scalar_struct_field(float, float)
+__define_print_scalar_struct_field(double, double)
+
+__define_print_scalar_array_struct_field(uint8, uint8_t)
+__define_print_scalar_array_struct_field(uint16, uint16_t)
+__define_print_scalar_array_struct_field(uint32, uint32_t)
+__define_print_scalar_array_struct_field(uint64, uint64_t)
+__define_print_scalar_array_struct_field(int8, int8_t)
+__define_print_scalar_array_struct_field(int16, int16_t)
+__define_print_scalar_array_struct_field(int32, int32_t)
+__define_print_scalar_array_struct_field(int64, int64_t)
+__define_print_scalar_array_struct_field(bool, flatbuffers_bool_t)
+__define_print_scalar_array_struct_field(float, float)
+__define_print_scalar_array_struct_field(double, double)
+
+__define_print_enum_array_struct_field(uint8, uint8_t)
+__define_print_enum_array_struct_field(uint16, uint16_t)
+__define_print_enum_array_struct_field(uint32, uint32_t)
+__define_print_enum_array_struct_field(uint64, uint64_t)
+__define_print_enum_array_struct_field(int8, int8_t)
+__define_print_enum_array_struct_field(int16, int16_t)
+__define_print_enum_array_struct_field(int32, int32_t)
+__define_print_enum_array_struct_field(int64, int64_t)
+__define_print_enum_array_struct_field(bool, flatbuffers_bool_t)
+
+__define_print_enum_struct_field(uint8, uint8_t)
+__define_print_enum_struct_field(uint16, uint16_t)
+__define_print_enum_struct_field(uint32, uint32_t)
+__define_print_enum_struct_field(uint64, uint64_t)
+__define_print_enum_struct_field(int8, int8_t)
+__define_print_enum_struct_field(int16, int16_t)
+__define_print_enum_struct_field(int32, int32_t)
+__define_print_enum_struct_field(int64, int64_t)
+__define_print_enum_struct_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_vector_field(utype, flatbuffers_utype_t)
+__define_print_scalar_vector_field(uint8, uint8_t)
+__define_print_scalar_vector_field(uint16, uint16_t)
+__define_print_scalar_vector_field(uint32, uint32_t)
+__define_print_scalar_vector_field(uint64, uint64_t)
+__define_print_scalar_vector_field(int8, int8_t)
+__define_print_scalar_vector_field(int16, int16_t)
+__define_print_scalar_vector_field(int32, int32_t)
+__define_print_scalar_vector_field(int64, int64_t)
+__define_print_scalar_vector_field(bool, flatbuffers_bool_t)
+__define_print_scalar_vector_field(float, float)
+__define_print_scalar_vector_field(double, double)
+
+__define_print_enum_vector_field(utype, flatbuffers_utype_t)
+__define_print_enum_vector_field(uint8, uint8_t)
+__define_print_enum_vector_field(uint16, uint16_t)
+__define_print_enum_vector_field(uint32, uint32_t)
+__define_print_enum_vector_field(uint64, uint64_t)
+__define_print_enum_vector_field(int8, int8_t)
+__define_print_enum_vector_field(int16, int16_t)
+__define_print_enum_vector_field(int32, int32_t)
+__define_print_enum_vector_field(int64, int64_t)
+__define_print_enum_vector_field(bool, flatbuffers_bool_t)
+
+void flatcc_json_printer_struct_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ size_t size,
+ flatcc_json_printer_struct_f pf)
+{
+ const uint8_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ p += uoffset_size;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_nl();
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ --count;
+ }
+ while (count--) {
+ p += size;
+ print_char(',');
+ print_nl();
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_string_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len)
+{
+ const uoffset_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_nl();
+ print_string_object(ctx, read_uoffset_ptr(p));
+ --count;
+ }
+ while (count--) {
+ ++p;
+ print_char(',');
+ print_nl();
+ print_string_object(ctx, read_uoffset_ptr(p));
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_table_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf)
+{
+ const uoffset_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ --count;
+ }
+ while (count--) {
+ ++p;
+ print_char(',');
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_union_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf)
+{
+ const uoffset_t *pt = get_field_ptr(td, id - 1);
+ const uoffset_t *p = get_field_ptr(td, id);
+ utype_t *types, type;
+ uoffset_t count;
+ char type_name[FLATCC_JSON_PRINT_NAME_LEN_MAX + 5];
+ flatcc_json_printer_union_descriptor_t ud;
+
+ ud.ttl = td->ttl;
+ if (len > FLATCC_JSON_PRINT_NAME_LEN_MAX) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "identifier too long");
+ return;
+ }
+ memcpy(type_name, name, len);
+ memcpy(type_name + len, "_type", 5);
+ if (p && pt) {
+ flatcc_json_printer_utype_enum_vector_field(ctx, td, id - 1,
+ type_name, len + 5, ptf);
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ pt = read_uoffset_ptr(pt);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ ++pt;
+ types = (utype_t *)pt;
+ print_name(ctx, name, len);
+ print_start('[');
+
+ if (count) {
+ type = __flatbuffers_utype_read_from_pe(types);
+ if (type != 0) {
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ } else {
+ print_null();
+ }
+ --count;
+ }
+ while (count--) {
+ ++p;
+ ++types;
+ type = __flatbuffers_utype_read_from_pe(types);
+ print_char(',');
+ if (type != 0) {
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ } else {
+ print_null();
+ }
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_table_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ }
+}
+
+void flatcc_json_printer_union_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf)
+{
+ const void *pt = get_field_ptr(td, id - 1);
+ const void *p = get_field_ptr(td, id);
+ utype_t type;
+ flatcc_json_printer_union_descriptor_t ud;
+
+ if (!p || !pt) {
+ return;
+ }
+ type = __flatbuffers_utype_read_from_pe(pt);
+ if (td->count++) {
+ print_char(',');
+ }
+ print_nl();
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ if (ctx->p + len < ctx->pflush) {
+ memcpy(ctx->p, name, len);
+ ctx->p += len;
+ } else {
+ print(ctx, name, len);
+ }
+ print(ctx, "_type", 5);
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ print_char(':');
+ print_space();
+ if (ctx->noenum) {
+ ctx->p += print_utype(type, ctx->p);
+ } else {
+ ptf(ctx, type);
+ }
+ if (type != 0) {
+ print_char(',');
+ print_name(ctx, name, len);
+ ud.ttl = td->ttl;
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ }
+}
+
+void flatcc_json_printer_union_table(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_table_f pf)
+{
+ print_table_object(ctx, read_uoffset_ptr(ud->member), ud->ttl, pf);
+}
+
+void flatcc_json_printer_union_struct(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_struct_f pf)
+{
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(ud->member));
+ print_end('}');
+}
+
+void flatcc_json_printer_union_string(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud)
+{
+ print_string_object(ctx, read_uoffset_ptr(ud->member));
+}
+
+void flatcc_json_printer_embedded_struct_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ flatcc_json_printer_struct_f pf)
+{
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, (uint8_t *)p + offset);
+ print_end('}');
+}
+
+void flatcc_json_printer_embedded_struct_array_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ size_t size, size_t count,
+ flatcc_json_printer_struct_f pf)
+{
+ size_t i;
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('[');
+ for (i = 0; i < count; ++i) {
+ if (i > 0) {
+ print_char(',');
+ }
+ print_start('{'); \
+ pf(ctx, (uint8_t *)p + offset + i * size);
+ print_end('}');
+ }
+ print_end(']');
+}
+
+void flatcc_json_printer_struct_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_struct_f *pf)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ }
+}
+
+/*
+ * Make sure the buffer identifier is valid before assuming the rest of
+ * the buffer is sane.
+ * NOTE: this won't work with type hashes because these can contain
+ * nulls in the fid string. In this case use null as fid to disable
+ * check.
+ */
+static int accept_header(flatcc_json_printer_t * ctx,
+ const void *buf, size_t bufsiz, const char *fid)
+{
+ flatbuffers_thash_t id, id2 = 0;
+
+ if (buf == 0 || bufsiz < offset_size + FLATBUFFERS_IDENTIFIER_SIZE) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "buffer header too small");
+ return 0;
+ }
+ if (fid != 0) {
+ id2 = flatbuffers_type_hash_from_string(fid);
+ id = __flatbuffers_thash_read_from_pe((uint8_t *)buf + offset_size);
+ if (!(id2 == 0 || id == id2)) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "identifier mismatch");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int flatcc_json_printer_struct_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid,
+ flatcc_json_printer_struct_f *pf)
+{
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return -1;
+ }
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(buf));
+ print_end('}');
+ print_last_nl();
+ return flatcc_json_printer_get_error(ctx) ? -1 : (int)ctx->total + (int)(ctx->p - ctx->buf);
+}
+
+int flatcc_json_printer_table_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid, flatcc_json_printer_table_f *pf)
+{
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return -1;
+ }
+ print_table_object(ctx, read_uoffset_ptr(buf), FLATCC_JSON_PRINT_MAX_LEVELS, pf);
+ print_last_nl();
+ return flatcc_json_printer_get_error(ctx) ? -1 : (int)ctx->total + (int)(ctx->p - ctx->buf);
+}
+
+void flatcc_json_printer_struct_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_struct_f *pf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return;
+ }
+ buf = (const uoffset_t *)((size_t)buf + __flatbuffers_uoffset_read_from_pe(buf));
+ bufsiz = __flatbuffers_uoffset_read_from_pe(buf);
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return;
+ }
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(buf));
+ print_end('}');
+}
+
+void flatcc_json_printer_table_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_table_f pf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return;
+ }
+ buf = (const uoffset_t *)((size_t)buf + __flatbuffers_uoffset_read_from_pe(buf));
+ bufsiz = __flatbuffers_uoffset_read_from_pe(buf);
+ ++buf;
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return;
+ }
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_table_object(ctx, read_uoffset_ptr(buf), td->ttl, pf);
+}
+
+static void __flatcc_json_printer_flush(flatcc_json_printer_t *ctx, int all)
+{
+ if (!all && ctx->p >= ctx->pflush) {
+ size_t spill = (size_t)(ctx->p - ctx->pflush);
+
+ fwrite(ctx->buf, ctx->flush_size, 1, ctx->fp);
+ memcpy(ctx->buf, ctx->buf + ctx->flush_size, spill);
+ ctx->p = ctx->buf + spill;
+ ctx->total += ctx->flush_size;
+ } else {
+ size_t len = (size_t)(ctx->p - ctx->buf);
+
+ fwrite(ctx->buf, len, 1, ctx->fp);
+ ctx->p = ctx->buf;
+ ctx->total += len;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init(flatcc_json_printer_t *ctx, void *fp)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->fp = fp ? fp : stdout;
+ ctx->flush = __flatcc_json_printer_flush;
+ if (!(ctx->buf = FLATCC_JSON_PRINTER_ALLOC(FLATCC_JSON_PRINT_BUFFER_SIZE))) {
+ return -1;
+ }
+ ctx->own_buffer = 1;
+ ctx->size = FLATCC_JSON_PRINT_BUFFER_SIZE;
+ ctx->flush_size = FLATCC_JSON_PRINT_FLUSH_SIZE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ /*
+ * Make sure we have space for primitive operations such as printing numbers
+ * without having to flush.
+ */
+ FLATCC_ASSERT(ctx->flush_size + FLATCC_JSON_PRINT_RESERVE <= ctx->size);
+ return 0;
+}
+
+static void __flatcc_json_printer_flush_buffer(flatcc_json_printer_t *ctx, int all)
+{
+ (void)all;
+
+ if (ctx->p >= ctx->pflush) {
+ RAISE_ERROR(overflow);
+ ctx->total += (size_t)(ctx->p - ctx->buf);
+ ctx->p = ctx->buf;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init_buffer(flatcc_json_printer_t *ctx, char *buffer, size_t buffer_size)
+{
+ FLATCC_ASSERT(buffer_size >= FLATCC_JSON_PRINT_RESERVE);
+ if (buffer_size < FLATCC_JSON_PRINT_RESERVE) {
+ return -1;
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->buf = buffer;
+ ctx->size = buffer_size;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ ctx->flush = __flatcc_json_printer_flush_buffer;
+ return 0;
+}
+
+static void __flatcc_json_printer_flush_dynamic_buffer(flatcc_json_printer_t *ctx, int all)
+{
+ size_t len = (size_t)(ctx->p - ctx->buf);
+ char *p;
+
+ (void)all;
+
+ *ctx->p = '\0';
+ if (ctx->p < ctx->pflush) {
+ return;
+ }
+ p = FLATCC_JSON_PRINTER_REALLOC(ctx->buf, ctx->size * 2);
+ if (!p) {
+ RAISE_ERROR(overflow);
+ ctx->total += len;
+ ctx->p = ctx->buf;
+ } else {
+ ctx->size *= 2;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->buf = p;
+ ctx->p = p + len;
+ ctx->pflush = p + ctx->flush_size;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init_dynamic_buffer(flatcc_json_printer_t *ctx, size_t buffer_size)
+{
+ if (buffer_size == 0) {
+ buffer_size = FLATCC_JSON_PRINT_DYN_BUFFER_SIZE;
+ }
+ if (buffer_size < FLATCC_JSON_PRINT_RESERVE) {
+ buffer_size = FLATCC_JSON_PRINT_RESERVE;
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->buf = FLATCC_JSON_PRINTER_ALLOC(buffer_size);
+ ctx->own_buffer = 1;
+ ctx->size = buffer_size;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ ctx->flush = __flatcc_json_printer_flush_dynamic_buffer;
+ if (!ctx->buf) {
+ RAISE_ERROR(overflow);
+ return -1;
+ }
+ return 0;
+}
+
+void *flatcc_json_printer_get_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size)
+{
+ ctx->flush(ctx, 0);
+ if (buffer_size) {
+ *buffer_size = (size_t)(ctx->p - ctx->buf);
+ }
+ return ctx->buf;
+}
+
+void *flatcc_json_printer_finalize_dynamic_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size)
+{
+ void *buffer;
+
+ buffer = flatcc_json_printer_get_buffer(ctx, buffer_size);
+ memset(ctx, 0, sizeof(*ctx));
+ return buffer;
+}
+
+void flatcc_json_printer_clear(flatcc_json_printer_t *ctx)
+{
+ if (ctx->own_buffer && ctx->buf) {
+ FLATCC_JSON_PRINTER_FREE(ctx->buf);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+}
diff --git a/src/runtime/refmap.c b/src/runtime/refmap.c
new file mode 100644
index 0000000..a2497f0
--- /dev/null
+++ b/src/runtime/refmap.c
@@ -0,0 +1,248 @@
+/*
+ * Optional file that can be included in runtime library to support DAG
+ * cloning with the builder and may also be used for custom purposes
+ * standalone. See also comments in `flatcc/flatcc_builder.h`.
+ *
+ * Note that dynamic construction takes place and that large offset
+ * vectors might consume significant space if there are not many shared
+ * references. In the basic use case no allocation takes place because a
+ * few references can be held using only a small stack allocated hash
+ * table.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_refmap.h"
+#include "flatcc/flatcc_alloc.h"
+#include "flatcc/flatcc_assert.h"
+
+#define _flatcc_refmap_calloc FLATCC_CALLOC
+#define _flatcc_refmap_free FLATCC_FREE
+
+/* Can be used as a primitive defense against collision attacks. */
+#ifdef FLATCC_HASH_SEED
+#define _flatcc_refmap_seed FLATCC_HASH_SEED
+#else
+#define _flatcc_refmap_seed 0x2f693b52
+#endif
+
+static inline size_t _flatcc_refmap_above_load_factor(size_t count, size_t buckets)
+{
+ static const size_t d = 256;
+ static const size_t n = (size_t)((FLATCC_REFMAP_LOAD_FACTOR) * 256.0f);
+
+ return count >= buckets * n / d;
+}
+
+#define _flatcc_refmap_probe(k, i, N) ((k + i) & N)
+
+void flatcc_refmap_clear(flatcc_refmap_t *refmap)
+{
+ if (refmap->table && refmap->table != refmap->min_table) {
+ _flatcc_refmap_free(refmap->table);
+ }
+ flatcc_refmap_init(refmap);
+}
+
+static inline size_t _flatcc_refmap_hash(const void *src)
+{
+ /* MurmurHash3 64-bit finalizer */
+ uint64_t x;
+
+ x = (uint64_t)((size_t)src) ^ _flatcc_refmap_seed;
+
+ x ^= x >> 33;
+ x *= 0xff51afd7ed558ccdULL;
+ x ^= x >> 33;
+ x *= 0xc4ceb9fe1a85ec53ULL;
+ x ^= x >> 33;
+ return (size_t)x;
+}
+
+void flatcc_refmap_reset(flatcc_refmap_t *refmap)
+{
+ if (refmap->count) {
+ memset(refmap->table, 0, sizeof(refmap->table[0]) * refmap->buckets);
+ }
+ refmap->count = 0;
+}
+
+/*
+ * Technically resize also supports shrinking which may be useful for
+ * adapations, but the current hash table never deletes individual items.
+ */
+int flatcc_refmap_resize(flatcc_refmap_t *refmap, size_t count)
+{
+ const size_t min_buckets = sizeof(refmap->min_table) / sizeof(refmap->min_table[0]);
+
+ size_t i;
+ size_t buckets;
+ size_t buckets_old;
+ struct flatcc_refmap_item *T_old;
+
+ if (count < refmap->count) {
+ count = refmap->count;
+ }
+ buckets = min_buckets;
+
+ while (_flatcc_refmap_above_load_factor(count, buckets)) {
+ buckets *= 2;
+ }
+ if (refmap->buckets == buckets) {
+ return 0;
+ }
+ T_old = refmap->table;
+ buckets_old = refmap->buckets;
+ if (buckets == min_buckets) {
+ memset(refmap->min_table, 0, sizeof(refmap->min_table));
+ refmap->table = refmap->min_table;
+ } else {
+ refmap->table = _flatcc_refmap_calloc(buckets, sizeof(refmap->table[0]));
+ if (refmap->table == 0) {
+ refmap->table = T_old;
+ FLATCC_ASSERT(0); /* out of memory */
+ return -1;
+ }
+ }
+ refmap->buckets = buckets;
+ refmap->count = 0;
+ for (i = 0; i < buckets_old; ++i) {
+ if (T_old[i].src) {
+ flatcc_refmap_insert(refmap, T_old[i].src, T_old[i].ref);
+ }
+ }
+ if (T_old && T_old != refmap->min_table) {
+ _flatcc_refmap_free(T_old);
+ }
+ return 0;
+}
+
+flatcc_refmap_ref_t flatcc_refmap_insert(flatcc_refmap_t *refmap, const void *src, flatcc_refmap_ref_t ref)
+{
+ struct flatcc_refmap_item *T;
+ size_t N, i, j, k;
+
+ if (src == 0) return ref;
+ if (_flatcc_refmap_above_load_factor(refmap->count, refmap->buckets)) {
+ if (flatcc_refmap_resize(refmap, refmap->count * 2)) {
+ return flatcc_refmap_not_found; /* alloc failed */
+ }
+ }
+ T = refmap->table;
+ N = refmap->buckets - 1;
+ k = _flatcc_refmap_hash(src);
+ i = 0;
+ j = _flatcc_refmap_probe(k, i, N);
+ while (T[j].src) {
+ if (T[j].src == src) {
+ return T[j].ref = ref;
+ }
+ ++i;
+ j = _flatcc_refmap_probe(k, i, N);
+ }
+ ++refmap->count;
+ T[j].src = src;
+ return T[j].ref = ref;
+}
+
+flatcc_refmap_ref_t flatcc_refmap_find(flatcc_refmap_t *refmap, const void *src)
+{
+ struct flatcc_refmap_item *T;
+ size_t N, i, j, k;
+
+ if (refmap->count == 0) {
+ return flatcc_refmap_not_found;
+ }
+ T = refmap->table;
+ N = refmap->buckets - 1;
+ k = _flatcc_refmap_hash(src);
+ i = 0;
+ j = _flatcc_refmap_probe(k, i, N);
+ while (T[j].src) {
+ if (T[j].src == src) return T[j].ref;
+ ++i;
+ j = _flatcc_refmap_probe(k, i, N);
+ }
+ return flatcc_refmap_not_found;
+}
+
+/*
+ * To run test from project root:
+ *
+ * cc -D FLATCC_REFMAP_TEST -I include src/runtime/refmap.c -o test_refmap && ./test_refmap
+ *
+ */
+#ifdef FLATCC_REFMAP_TEST
+
+#include <stdio.h>
+
+#ifndef FLATCC_REFMAP_H
+#include "flatcc/flatcc_refmap.h"
+#endif
+
+#define test(x) do { if (!(x)) { fprintf(stderr, "%02d: refmap test failed\n", __LINE__); exit(-1); } } while (0)
+#define test_start() fprintf(stderr, "starting refmap test ...\n")
+#define test_ok() fprintf(stderr, "refmap test succeeded\n")
+
+int main()
+{
+ int i;
+ int data[1000];
+ int a = 1;
+ int b = 2;
+ int c = 3;
+ flatcc_refmap_t refmap;
+
+ flatcc_refmap_init(&refmap);
+
+ test(flatcc_refmap_find(&refmap, &a) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &b) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &c) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, 0) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &a) == 0);
+
+ test(flatcc_refmap_insert(&refmap, &a, 42) == 42);
+ test(flatcc_refmap_find(&refmap, &a) == 42);
+ test(flatcc_refmap_find(&refmap, &b) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &c) == flatcc_refmap_not_found);
+ test(flatcc_refmap_insert(&refmap, &a, 42) == 42);
+ test(flatcc_refmap_find(&refmap, &a) == 42);
+ test(refmap.count == 1);
+ test(flatcc_refmap_insert(&refmap, &a, 43) == 43);
+ test(flatcc_refmap_find(&refmap, &a) == 43);
+ test(refmap.count == 1);
+ test(flatcc_refmap_insert(&refmap, &b, -10) == -10);
+ test(flatcc_refmap_insert(&refmap, &c, 100) == 100);
+ test(refmap.count == 3);
+ test(flatcc_refmap_find(&refmap, &a) == 43);
+ test(flatcc_refmap_find(&refmap, &b) == -10);
+ test(flatcc_refmap_find(&refmap, &c) == 100);
+
+ test(flatcc_refmap_insert(&refmap, 0, 1000) == 1000);
+ test(flatcc_refmap_find(&refmap, 0) == 0);
+ test(refmap.count == 3);
+
+ test(flatcc_refmap_insert(&refmap, &b, 0) == 0);
+ test(flatcc_refmap_find(&refmap, &b) == 0);
+ test(refmap.count == 3);
+
+ flatcc_refmap_reset(&refmap);
+ test(refmap.count == 0);
+ test(refmap.buckets > 0);
+ for (i = 0; i < 1000; ++i) {
+ test(flatcc_refmap_insert(&refmap, data + i, i + 42) == i + 42);
+ }
+ test(refmap.count == 1000);
+ for (i = 0; i < 1000; ++i) {
+ test(flatcc_refmap_find(&refmap, data + i) == i + 42);
+ }
+ flatcc_refmap_clear(&refmap);
+ test(refmap.count == 0);
+ test(refmap.buckets == 0);
+ test_ok();
+ return 0;
+}
+
+#endif /* FLATCC_REFMAP_TEST */
diff --git a/src/runtime/verifier.c b/src/runtime/verifier.c
new file mode 100644
index 0000000..9c43bf6
--- /dev/null
+++ b/src/runtime/verifier.c
@@ -0,0 +1,617 @@
+/*
+ * Runtime support for verifying flatbuffers.
+ *
+ * Depends mutually on generated verifier functions for table types that
+ * call into this library.
+ */
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_flatbuffers.h"
+#include "flatcc/flatcc_verifier.h"
+#include "flatcc/flatcc_identifier.h"
+
+/* Customization for testing. */
+#if FLATCC_DEBUG_VERIFY
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 1
+#include <stdio.h>
+#define FLATCC_VERIFIER_ASSERT(cond, reason) \
+ if (!(cond)) { fprintf(stderr, "verifier assert: %s\n", \
+ flatcc_verify_error_string(reason)); FLATCC_ASSERT(0); return reason; }
+#endif
+
+#if FLATCC_TRACE_VERIFY
+#include <stdio.h>
+#define trace_verify(s, p) \
+ fprintf(stderr, "trace verify: %s: 0x%02x\n", (s), (unsigned)(size_t)(p));
+#else
+#define trace_verify(s, p) ((void)0)
+#endif
+
+/* The runtime library does not use the global config file. */
+
+/* This is a guideline, not an exact measure. */
+#ifndef FLATCC_VERIFIER_MAX_LEVELS
+#define FLATCC_VERIFIER_MAX_LEVELS 100
+#endif
+
+#ifndef FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 0
+#endif
+
+/*
+ * Generally a check should tell if a buffer is valid or not such
+ * that runtime can take appropriate actions rather than crash,
+ * also in debug, but assertions are helpful in debugging a problem.
+ *
+ * This must be compiled into the debug runtime library to take effect.
+ */
+#ifndef FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 1
+#endif
+
+/* May be redefined for logging purposes. */
+#ifndef FLATCC_VERIFIER_ASSERT
+#define FLATCC_VERIFIER_ASSERT(cond, reason) FLATCC_ASSERT(cond)
+#endif
+
+#if FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define flatcc_verify(cond, reason) if (!(cond)) { FLATCC_VERIFIER_ASSERT(cond, reason); return reason; }
+#else
+#define flatcc_verify(cond, reason) if (!(cond)) { return reason; }
+#endif
+
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+#define thash_t flatbuffers_thash_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+#define thash_size sizeof(thash_t)
+#define offset_size uoffset_size
+
+const char *flatcc_verify_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_verify_error_##no: \
+ return str;
+ FLATCC_VERIFY_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+/* `cond` may have side effects. */
+#define verify(cond, reason) do { int c = (cond); flatcc_verify(c, reason); } while(0)
+
+/*
+ * Identify checks related to runtime conditions (buffer size and
+ * alignment) as seperate from those related to buffer content.
+ */
+#define verify_runtime(cond, reason) verify(cond, reason)
+
+#define check_result(x) if (x) { return (x); }
+
+#define check_field(td, id, required, base) do { \
+ int ret = get_offset_field(td, id, required, &base); \
+ if (ret || !base) { return ret; }} while (0)
+
+static inline uoffset_t read_uoffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_uoffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline thash_t read_thash_identifier(const char *identifier)
+{
+ return flatbuffers_type_hash_from_string(identifier);
+}
+
+static inline thash_t read_thash(const void *p, uoffset_t base)
+{
+ return __flatbuffers_thash_read_from_pe((uint8_t *)p + base);
+}
+
+static inline voffset_t read_voffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_voffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline int check_header(uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t k = base + offset;
+
+ if (uoffset_size <= voffset_size && k + offset_size < k) {
+ return 0;
+ }
+
+ /* The `k > base` rather than `k >= base` is to avoid null offsets. */
+ return k > base && k + offset_size <= end && !(k & (offset_size - 1));
+}
+
+static inline int check_aligned_header(uoffset_t end, uoffset_t base, uoffset_t offset, uint16_t align)
+{
+ uoffset_t k = base + offset;
+
+ if (uoffset_size <= voffset_size && k + offset_size < k) {
+ return 0;
+ }
+ /* Alignment refers to element 0 and header must also be aligned. */
+ align = align < uoffset_size ? uoffset_size : align;
+
+ /* Note to self: the builder can also use the mask OR trick to propagate `min_align`. */
+ return k > base && k + offset_size <= end && !((k + offset_size) & ((offset_size - 1) | (align - 1u)));
+}
+
+static inline int verify_struct(uoffset_t end, uoffset_t base, uoffset_t offset, uoffset_t size, uint16_t align)
+{
+ /* Structs can have zero size so `end` is a valid value. */
+ if (offset == 0 || base + offset > end) {
+ return flatcc_verify_error_offset_out_of_range;
+ }
+ base += offset;
+ verify(base + size >= base, flatcc_verify_error_struct_size_overflow);
+ verify(base + size <= end, flatcc_verify_error_struct_out_of_range);
+ verify (!(base & (align - 1u)), flatcc_verify_error_struct_unaligned);
+ return flatcc_verify_ok;
+}
+
+static inline voffset_t read_vt_entry(flatcc_table_verifier_descriptor_t *td, voffset_t id)
+{
+ voffset_t vo = (id + 2u) * sizeof(voffset_t);
+
+ /* Assumes tsize has been verified for alignment. */
+ if (vo >= td->vsize) {
+ return 0;
+ }
+ return read_voffset(td->vtable, vo);
+}
+
+static inline const void *get_field_ptr(flatcc_table_verifier_descriptor_t *td, voffset_t id)
+{
+ voffset_t vte = read_vt_entry(td, id);
+ return vte ? (const uint8_t *)td->buf + td->table + vte : 0;
+}
+
+static int verify_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, uoffset_t size, uint16_t align)
+{
+ uoffset_t k, k2;
+ voffset_t vte;
+ uoffset_t base = (uoffset_t)(size_t)td->buf;
+
+
+ /*
+ * Otherwise range check assumptions break, and normal access code likely also.
+ * We don't require voffset_size < uoffset_size, but some checks are faster if true.
+ */
+ FLATCC_ASSERT(uoffset_size >= voffset_size);
+ FLATCC_ASSERT(soffset_size == uoffset_size);
+
+ vte = read_vt_entry(td, id);
+ if (!vte) {
+ verify(!required, flatcc_verify_error_required_field_missing);
+ return flatcc_verify_ok;
+ }
+ trace_verify("table buffer", td->buf);
+ trace_verify("table", td->table);
+ trace_verify("id", id);
+ trace_verify("vte", vte);
+
+ /*
+ * Note that we don't add td.table to k and we test against table
+ * size not table end or buffer end. Otherwise it would not be safe
+ * to optimized out the k <= k2 check for normal uoffset and voffset
+ * configurations.
+ */
+ k = vte;
+ k2 = k + size;
+ verify(k2 <= td->tsize, flatcc_verify_error_table_field_out_of_range);
+ /* This normally optimizes to nop. */
+ verify(uoffset_size > voffset_size || k <= k2, flatcc_verify_error_table_field_size_overflow);
+ trace_verify("table + vte", vte + td->table);
+ k += td->table + base;
+ trace_verify("entry: buf + table + vte", k);
+ trace_verify("align", align);
+ trace_verify("align masked entry", k & (align - 1u));
+ verify(!(k & (align - 1u)), flatcc_verify_error_table_field_not_aligned);
+ /* We assume the table size has already been verified. */
+ return flatcc_verify_ok;
+}
+
+static int get_offset_field(flatcc_table_verifier_descriptor_t *td, voffset_t id, int required, uoffset_t *out)
+{
+ uoffset_t k, k2;
+ voffset_t vte;
+
+ vte = read_vt_entry(td, id);
+ if (!vte) {
+ *out = 0;
+ if (required) {
+ return flatcc_verify_error_required_field_missing;
+ }
+ /* Missing, but not invalid. */
+ return flatcc_verify_ok;
+ }
+ /*
+ * Note that we don't add td.table to k and we test against table
+ * size not table end or buffer end. Otherwise it would not be safe
+ * to optimized out the k <= k2 check for normal uoffset and voffset
+ * configurations.
+ */
+ k = vte;
+ k2 = k + offset_size;
+ verify(k2 <= td->tsize, flatcc_verify_error_table_field_out_of_range);
+ /* This normally optimizes to nop. */
+ verify(uoffset_size > voffset_size || k <= k2, flatcc_verify_error_table_field_size_overflow);
+ k += td->table;
+ verify(!(k & (offset_size - 1u)), flatcc_verify_error_table_field_not_aligned);
+ /* We assume the table size has already been verified. */
+ *out = k;
+ return flatcc_verify_ok;
+}
+
+static inline int verify_string(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t n;
+
+ verify(check_header(end, base, offset), flatcc_verify_error_string_header_out_of_range_or_unaligned);
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ verify(end - base > n, flatcc_verify_error_string_out_of_range);
+ verify(((uint8_t *)buf + base)[n] == 0, flatcc_verify_error_string_not_zero_terminated);
+ return flatcc_verify_ok;
+}
+
+/*
+ * Keep interface somwewhat similar ot flatcc_builder_start_vector.
+ * `max_count` is a precomputed division to manage overflow check on vector length.
+ */
+static inline int verify_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset, uoffset_t elem_size, uint16_t align, uoffset_t max_count)
+{
+ uoffset_t n;
+
+ verify(check_aligned_header(end, base, offset, align), flatcc_verify_error_vector_header_out_of_range_or_unaligned);
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ /* `n * elem_size` can overflow uncontrollably otherwise. */
+ verify(n <= max_count, flatcc_verify_error_vector_count_exceeds_representable_vector_size);
+ verify(end - base >= n * elem_size, flatcc_verify_error_vector_out_of_range);
+ return flatcc_verify_ok;
+}
+
+static inline int verify_string_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t i, n;
+
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ for (i = 0; i < n; ++i, base += offset_size) {
+ check_result(verify_string(buf, end, base, read_uoffset(buf, base)));
+ }
+ return flatcc_verify_ok;
+}
+
+static inline int verify_table(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset,
+ int ttl, flatcc_table_verifier_f tvf)
+{
+ uoffset_t vbase, vend;
+ flatcc_table_verifier_descriptor_t td;
+
+ verify((td.ttl = ttl - 1), flatcc_verify_error_max_nesting_level_reached);
+ verify(check_header(end, base, offset), flatcc_verify_error_table_header_out_of_range_or_unaligned);
+ td.table = base + offset;
+ /* Read vtable offset - it is signed, but we want it unsigned, assuming 2's complement works. */
+ vbase = td.table - read_uoffset(buf, td.table);
+ verify((soffset_t)vbase >= 0 && !(vbase & (voffset_size - 1)), flatcc_verify_error_vtable_offset_out_of_range_or_unaligned);
+ verify(vbase + voffset_size <= end, flatcc_verify_error_vtable_header_out_of_range);
+ /* Read vtable size. */
+ td.vsize = read_voffset(buf, vbase);
+ vend = vbase + td.vsize;
+ verify(vend <= end && !(td.vsize & (voffset_size - 1)), flatcc_verify_error_vtable_size_out_of_range_or_unaligned);
+ /* Optimizes away overflow check if uoffset_t is large enough. */
+ verify(uoffset_size > voffset_size || vend >= vbase, flatcc_verify_error_vtable_size_overflow);
+
+ verify(td.vsize >= 2 * voffset_size, flatcc_verify_error_vtable_header_too_small);
+ /* Read table size. */
+ td.tsize = read_voffset(buf, vbase + voffset_size);
+ verify(end - td.table >= td.tsize, flatcc_verify_error_table_size_out_of_range);
+ td.vtable = (uint8_t *)buf + vbase;
+ td.buf = buf;
+ td.end = end;
+ return tvf(&td);
+}
+
+static inline int verify_table_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset, int ttl, flatcc_table_verifier_f tvf)
+{
+ uoffset_t i, n;
+
+ verify(ttl-- > 0, flatcc_verify_error_max_nesting_level_reached);
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ for (i = 0; i < n; ++i, base += offset_size) {
+ check_result(verify_table(buf, end, base, read_uoffset(buf, base), ttl, tvf));
+ }
+ return flatcc_verify_ok;
+}
+
+static inline int verify_union_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset,
+ uoffset_t count, const utype_t *types, int ttl, flatcc_union_verifier_f uvf)
+{
+ uoffset_t i, n, elem;
+ flatcc_union_verifier_descriptor_t ud;
+
+ verify(ttl-- > 0, flatcc_verify_error_max_nesting_level_reached);
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ verify(n == count, flatcc_verify_error_union_vector_length_mismatch);
+ base += offset_size;
+
+ ud.buf = buf;
+ ud.end = end;
+ ud.ttl = ttl;
+
+ for (i = 0; i < n; ++i, base += offset_size) {
+ /* Table vectors can never be null, but unions can when the type is NONE. */
+ elem = read_uoffset(buf, base);
+ if (elem == 0) {
+ verify(types[i] == 0, flatcc_verify_error_union_element_absent_without_type_NONE);
+ } else {
+ verify(types[i] != 0, flatcc_verify_error_union_element_present_with_type_NONE);
+ ud.type = types[i];
+ ud.base = base;
+ ud.offset = elem;
+ check_result(uvf(&ud));
+ }
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, size_t size, uint16_t align)
+{
+ check_result(verify_field(td, id, 0, (uoffset_t)size, align));
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_string_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_string(td->buf, td->end, base, read_uoffset(td->buf, base));
+}
+
+int flatcc_verify_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, size_t elem_size, uint16_t align, size_t max_count)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_vector(td->buf, td->end, base, read_uoffset(td->buf, base),
+ (uoffset_t)elem_size, align, (uoffset_t)max_count);
+}
+
+int flatcc_verify_string_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_string_vector(td->buf, td->end, base, read_uoffset(td->buf, base));
+}
+
+int flatcc_verify_table_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_table_verifier_f tvf)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_table(td->buf, td->end, base, read_uoffset(td->buf, base), td->ttl, tvf);
+}
+
+int flatcc_verify_table_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_table_verifier_f tvf)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_table_vector(td->buf, td->end, base, read_uoffset(td->buf, base), td->ttl, tvf);
+}
+
+int flatcc_verify_union_table(flatcc_union_verifier_descriptor_t *ud, flatcc_table_verifier_f *tvf)
+{
+ return verify_table(ud->buf, ud->end, ud->base, ud->offset, ud->ttl, tvf);
+}
+
+int flatcc_verify_union_struct(flatcc_union_verifier_descriptor_t *ud, size_t size, uint16_t align)
+{
+ return verify_struct(ud->end, ud->base, ud->offset, (uoffset_t)size, align);
+}
+
+int flatcc_verify_union_string(flatcc_union_verifier_descriptor_t *ud)
+{
+ return verify_string(ud->buf, ud->end, ud->base, ud->offset);
+}
+
+int flatcc_verify_buffer_header(const void *buf, size_t bufsiz, const char *fid)
+{
+ thash_t id, id2;
+
+ verify_runtime(!(((size_t)buf) & (offset_size - 1)), flatcc_verify_error_runtime_buffer_header_not_aligned);
+ /* -8 ensures no scalar or offset field size can overflow. */
+ verify_runtime(bufsiz <= FLATBUFFERS_UOFFSET_MAX - 8, flatcc_verify_error_runtime_buffer_size_too_large);
+ /*
+ * Even if we specify no fid, the user might later. Therefore
+ * require space for it. Not all buffer generators will take this
+ * into account, so it is possible to fail an otherwise valid buffer
+ * - but such buffers aren't safe.
+ */
+ verify(bufsiz >= offset_size + FLATBUFFERS_IDENTIFIER_SIZE, flatcc_verify_error_buffer_header_too_small);
+ if (fid != 0) {
+ id2 = read_thash_identifier(fid);
+ id = read_thash(buf, offset_size);
+ verify(id2 == 0 || id == id2, flatcc_verify_error_identifier_mismatch);
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_typed_buffer_header(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ thash_t id, id2;
+
+ verify_runtime(!(((size_t)buf) & (offset_size - 1)), flatcc_verify_error_runtime_buffer_header_not_aligned);
+ /* -8 ensures no scalar or offset field size can overflow. */
+ verify_runtime(bufsiz <= FLATBUFFERS_UOFFSET_MAX - 8, flatcc_verify_error_runtime_buffer_size_too_large);
+ /*
+ * Even if we specify no fid, the user might later. Therefore
+ * require space for it. Not all buffer generators will take this
+ * into account, so it is possible to fail an otherwise valid buffer
+ * - but such buffers aren't safe.
+ */
+ verify(bufsiz >= offset_size + FLATBUFFERS_IDENTIFIER_SIZE, flatcc_verify_error_buffer_header_too_small);
+ if (thash != 0) {
+ id2 = thash;
+ id = read_thash(buf, offset_size);
+ verify(id2 == 0 || id == id2, flatcc_verify_error_identifier_mismatch);
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_struct_as_root(const void *buf, size_t bufsiz, const char *fid, size_t size, uint16_t align)
+{
+ check_result(flatcc_verify_buffer_header(buf, bufsiz, fid));
+ return verify_struct((uoffset_t)bufsiz, 0, read_uoffset(buf, 0), (uoffset_t)size, align);
+}
+
+int flatcc_verify_struct_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash, size_t size, uint16_t align)
+{
+ check_result(flatcc_verify_typed_buffer_header(buf, bufsiz, thash));
+ return verify_struct((uoffset_t)bufsiz, 0, read_uoffset(buf, 0), (uoffset_t)size, align);
+}
+
+int flatcc_verify_table_as_root(const void *buf, size_t bufsiz, const char *fid, flatcc_table_verifier_f *tvf)
+{
+ check_result(flatcc_verify_buffer_header(buf, (uoffset_t)bufsiz, fid));
+ return verify_table(buf, (uoffset_t)bufsiz, 0, read_uoffset(buf, 0), FLATCC_VERIFIER_MAX_LEVELS, tvf);
+}
+
+int flatcc_verify_table_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash, flatcc_table_verifier_f *tvf)
+{
+ check_result(flatcc_verify_typed_buffer_header(buf, (uoffset_t)bufsiz, thash));
+ return verify_table(buf, (uoffset_t)bufsiz, 0, read_uoffset(buf, 0), FLATCC_VERIFIER_MAX_LEVELS, tvf);
+}
+
+int flatcc_verify_struct_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, const char *fid, size_t size, uint16_t align)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ check_result(flatcc_verify_vector_field(td, id, required, align, 1, FLATBUFFERS_COUNT_MAX(1)));
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ bufsiz = read_uoffset(buf, 0);
+ ++buf;
+ return flatcc_verify_struct_as_root(buf, bufsiz, fid, size, align);
+}
+
+int flatcc_verify_table_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, const char *fid,
+ uint16_t align, flatcc_table_verifier_f tvf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ check_result(flatcc_verify_vector_field(td, id, required, align, 1, FLATBUFFERS_COUNT_MAX(1)));
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ bufsiz = read_uoffset(buf, 0);
+ ++buf;
+ /*
+ * Don't verify nested buffers identifier - information is difficult to get and
+ * might not be what is desired anyway. User can do it later.
+ */
+ check_result(flatcc_verify_buffer_header(buf, bufsiz, fid));
+ return verify_table(buf, bufsiz, 0, read_uoffset(buf, 0), td->ttl, tvf);
+}
+
+int flatcc_verify_union_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_union_verifier_f uvf)
+{
+ voffset_t vte_type, vte_table;
+ const uint8_t *type;
+ uoffset_t base;
+ flatcc_union_verifier_descriptor_t ud;
+
+ if (0 == (vte_type = read_vt_entry(td, id - 1))) {
+ vte_table = read_vt_entry(td, id);
+ verify(vte_table == 0, flatcc_verify_error_union_cannot_have_a_table_without_a_type);
+ verify(!required, flatcc_verify_error_type_field_absent_from_required_union_field);
+ return flatcc_verify_ok;
+ }
+ /* No need to check required here. */
+ check_result(verify_field(td, id - 1, 0, 1, 1));
+ /* Only now is it safe to read the type. */
+ vte_table = read_vt_entry(td, id);
+ type = (const uint8_t *)td->buf + td->table + vte_type;
+ verify(*type || vte_table == 0, flatcc_verify_error_union_type_NONE_cannot_have_a_value);
+
+ if (*type == 0) {
+ return flatcc_verify_ok;
+ }
+ check_field(td, id, required, base);
+ ud.buf = td->buf;
+ ud.end = td->end;
+ ud.ttl = td->ttl;
+ ud.base = base;
+ ud.offset = read_uoffset(td->buf, base);
+ ud.type = *type;
+ return uvf(&ud);
+}
+
+int flatcc_verify_union_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_union_verifier_f uvf)
+{
+ voffset_t vte_type, vte_table;
+ const uoffset_t *buf;
+ const utype_t *types;
+ uoffset_t count, base;
+
+ if (0 == (vte_type = read_vt_entry(td, id - 1))) {
+ if (0 == (vte_table = read_vt_entry(td, id))) {
+ verify(!required, flatcc_verify_error_type_field_absent_from_required_union_vector_field);
+ }
+ }
+ check_result(flatcc_verify_vector_field(td, id - 1, required,
+ utype_size, utype_size, FLATBUFFERS_COUNT_MAX(utype_size)));
+ if (0 == (buf = get_field_ptr(td, id - 1))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ count = read_uoffset(buf, 0);
+ ++buf;
+ types = (utype_t *)buf;
+
+ check_field(td, id, required, base);
+ return verify_union_vector(td->buf, td->end, base, read_uoffset(td->buf, base),
+ count, types, td->ttl, uvf);
+}
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
new file mode 100644
index 0000000..59a6132
--- /dev/null
+++ b/test/CMakeLists.txt
@@ -0,0 +1,27 @@
+# Note: some files under source control may be tested with binary comparison.
+# Under git such files are protected with the `.gitattributes` file.
+# Incorrect line endings may lead to failed tests.
+if (FLATCC_TEST)
+if (FLATCC_CXX_TEST)
+ # This is tests is primarly for making sure C++ users can use
+ # generated FlatCC code. It fails for pre GCC 4.7 C++ because both
+ # stdint.h and stdalign.h are not sufficiently supported and it
+ # is not worth attempting to support in flatcc/portable.
+ add_subdirectory(monster_test_cpp)
+endif()
+add_subdirectory(cgen_test)
+add_subdirectory(monster_test)
+add_subdirectory(monster_test_solo)
+add_subdirectory(monster_test_concat)
+add_subdirectory(monster_test_prefix)
+add_subdirectory(flatc_compat)
+add_subdirectory(json_test)
+add_subdirectory(emit_test)
+add_subdirectory(load_test)
+add_subdirectory(optional_scalars_test)
+# Reflection can break during development, so it is necessary
+# to disable until new reflection code generates cleanly.
+if (FLATCC_REFLECTION)
+ add_subdirectory(reflection_test)
+endif()
+endif()
diff --git a/test/README.md b/test/README.md
new file mode 100644
index 0000000..4c0b485
--- /dev/null
+++ b/test/README.md
@@ -0,0 +1,19 @@
+NOTE: shell scripts driven by flatcc/test/test.sh have been ported to CMake.
+use flatcc/scripts/test.sh to drive CMake tests.
+
+Run `leakcheck.sh` and `leakcheck-full.sh` for memory checks.
+
+To install valgrind on OS-X Yosemite use `brew install --HEAD valgrind`
+
+For decoding valgrind error messages:
+<http://derickrethans.nl/valgrind-null.html>
+
+clang has built-in memory check, but only for `x86_64 Linux`:
+<http://clang.llvm.org/docs/LeakSanitizer.html>
+
+On OS-X Yosemite with valgrind that isn't officially supported for that
+platform, a few spurious unitialized memory access errors are reported
+when printing the filextension in `codegen_c.c`. and in the equivalent
+builder. After inspection, nothing suggests this is an actual bug - more
+likely it relates to a strnlen optimization in fprintf `"%.*s"` syntax
+that valgrind doesn't catch.
diff --git a/test/benchmark/README.md b/test/benchmark/README.md
new file mode 100644
index 0000000..c98dbbb
--- /dev/null
+++ b/test/benchmark/README.md
@@ -0,0 +1,68 @@
+# FlatBench
+
+This is based on the Google FlatBuffer benchmark schema, but the
+benchmark itself is independent and not directly comparable, although
+roughly the same operations are being executed.
+
+The `benchflatc` folder contains C++ headers and code generated by Googles
+`flatc` compiler while `benchflatcc` contains material from this project.
+
+The `benchraw` folder contains structs similar to those used in Googles
+benchmark, but again the benchmark isn't directly comparable.
+
+It should be noted that allocation strategies differ. The C++ builder
+is constructed on each build iteration whereas theh C version resets
+instead. The benchmark is designed such that the C++ version could do
+the same if the builder supports it.
+
+## Execution
+
+Build and run each benchmark individually:
+
+ benchmark/benchflatc/run.sh
+ benchmark/benchflatcc/run.sh
+ benchmark/benchraw/run.sh
+ benchmark/benchflatccjson/run.sh
+
+Note that each benchmark runs in both debug and optimized versions!
+
+
+# Environment
+
+The the benchmark are designed for a `*nix environmen.
+
+- A C compiler named `cc` supporting -std=c11 is required for flatcc.
+- A C++ compiler named `c++` supporting -std=c++11 is requried for
+ flatc.
+- A C compiler named `cc` supporting <stdint.h> is required for raw benchmark.
+- Test is driven by a shell script.
+
+The time measurements in `elapsed.h` ought to work with Windows, but it
+has not been tested. The tests could be compiled for Windows with a
+separate set of `.bat` files that adapt to the relevant compiler settings
+(not provided).
+
+
+## Output
+
+The source and generated files and compiled binaries are placed in a
+dedicated folder under:
+
+ build/tmp/test/benchmark/
+
+Only flatcc includes files from the containing project - other
+benchmarks copy any relevant files into place.
+
+The optimized flatc C++ benchmark is 24K vs flatcc for C using 35K.
+
+
+## JSON numeric conversion
+
+The Json printer benchmark is significantly impacted by floating point
+conversion performance. By using the grisu3 algorithm instead of the
+printing speed more than doubles compared to sprintf "%.17g" method with
+clang glibc. The parsing, on the other hand, parsing slows down
+slightly because floats are always printed as double which increases the
+json text from 700 to 722 bytes. For comparision, RapidJSON also only
+supports double precision because the JSON spec does not specifically
+mention preicision.
diff --git a/test/benchmark/benchall.sh b/test/benchmark/benchall.sh
new file mode 100755
index 0000000..87d6983
--- /dev/null
+++ b/test/benchmark/benchall.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd `dirname $0`
+
+echo "running all benchmarks (raw, flatc C++, flatcc C)"
+
+echo "building and benchmarking raw structs"
+benchraw/run.sh
+echo "building and benchmarking flatc generated C++"
+benchflatc/run.sh
+echo "building and benchmarking flatcc generated C"
+benchflatcc/run.sh
+echo "building and benchmarking flatcc json generated C"
+benchflatccjson/run.sh
diff --git a/test/benchmark/benchflatc/benchflatc.cpp b/test/benchmark/benchflatc/benchflatc.cpp
new file mode 100644
index 0000000..ae24abd
--- /dev/null
+++ b/test/benchmark/benchflatc/benchflatc.cpp
@@ -0,0 +1,70 @@
+#define BENCH_TITLE "flatc for C++"
+
+#define BENCHMARK_BUFSIZ 1000
+#define DECLARE_BENCHMARK(BM)\
+ void *BM = 0
+#define CLEAR_BENCHMARK(BM)
+
+#include <string.h>
+#include "flatbench_generated.h"
+
+using namespace flatbuffers;
+using namespace benchfb;
+
+/* The builder is created each time - perhaps fbb can be reused somehow? */
+int encode(void *bench, void *buffer, size_t *size)
+{
+ const int veclen = 3;
+ Offset<FooBar> vec[veclen];
+ FlatBufferBuilder fbb;
+
+ (void)bench;
+
+ for (int i = 0; i < veclen; i++) {
+ // We add + i to not make these identical copies for a more realistic
+ // compression test.
+ auto const &foo = Foo(0xABADCAFEABADCAFE + i, 10000 + i, '@' + i, 1000000 + i);
+ auto const &bar = Bar(foo, 123456 + i, 3.14159f + i, 10000 + i);
+ auto name = fbb.CreateString("Hello, World!");
+ auto foobar = CreateFooBar(fbb, &bar, name, 3.1415432432445543543 + i, '!' + i);
+ vec[i] = foobar;
+ }
+ auto location = fbb.CreateString("https://www.example.com/myurl/");
+ auto foobarvec = fbb.CreateVector(vec, veclen);
+ auto foobarcontainer = CreateFooBarContainer(fbb, foobarvec, true, Enum_Bananas, location);
+ fbb.Finish(foobarcontainer);
+ if (*size < fbb.GetSize()) {
+ return -1;
+ }
+ *size = fbb.GetSize();
+ memcpy(buffer, fbb.GetBufferPointer(), *size);
+ return 0;
+}
+
+int64_t decode(void *bench, void *buffer, size_t size, int64_t sum)
+{
+ auto foobarcontainer = GetFooBarContainer(buffer);
+
+ (void)bench;
+ sum += foobarcontainer->initialized();
+ sum += foobarcontainer->location()->Length();
+ sum += foobarcontainer->fruit();
+ for (unsigned int i = 0; i < foobarcontainer->list()->Length(); i++) {
+ auto foobar = foobarcontainer->list()->Get(i);
+ sum += foobar->name()->Length();
+ sum += foobar->postfix();
+ sum += static_cast<int64_t>(foobar->rating());
+ auto bar = foobar->sibling();
+ sum += static_cast<int64_t>(bar->ratio());
+ sum += bar->size();
+ sum += bar->time();
+ auto &foo = bar->parent();
+ sum += foo.count();
+ sum += foo.id();
+ sum += foo.length();
+ sum += foo.prefix();
+ }
+ return sum + 2 * sum;
+}
+
+#include "benchmain.h"
diff --git a/test/benchmark/benchflatc/flatbench_generated.h b/test/benchmark/benchflatc/flatbench_generated.h
new file mode 100644
index 0000000..0b2abc5
--- /dev/null
+++ b/test/benchmark/benchflatc/flatbench_generated.h
@@ -0,0 +1,166 @@
+// automatically generated by the FlatBuffers compiler, do not modify
+
+#ifndef FLATBUFFERS_GENERATED_FLATBENCH_BENCHFB_H_
+#define FLATBUFFERS_GENERATED_FLATBENCH_BENCHFB_H_
+
+#include "flatbuffers/flatbuffers.h"
+
+
+namespace benchfb {
+
+struct Foo;
+struct Bar;
+struct FooBar;
+struct FooBarContainer;
+
+enum Enum {
+ Enum_Apples = 0,
+ Enum_Pears = 1,
+ Enum_Bananas = 2
+};
+
+inline const char **EnumNamesEnum() {
+ static const char *names[] = { "Apples", "Pears", "Bananas", nullptr };
+ return names;
+}
+
+inline const char *EnumNameEnum(Enum e) { return EnumNamesEnum()[static_cast<int>(e)]; }
+
+MANUALLY_ALIGNED_STRUCT(8) Foo FLATBUFFERS_FINAL_CLASS {
+ private:
+ uint64_t id_;
+ int16_t count_;
+ int8_t prefix_;
+ int8_t __padding0;
+ uint32_t length_;
+
+ public:
+ Foo(uint64_t id, int16_t count, int8_t prefix, uint32_t length)
+ : id_(flatbuffers::EndianScalar(id)), count_(flatbuffers::EndianScalar(count)), prefix_(flatbuffers::EndianScalar(prefix)), __padding0(0), length_(flatbuffers::EndianScalar(length)) { (void)__padding0; }
+
+ uint64_t id() const { return flatbuffers::EndianScalar(id_); }
+ int16_t count() const { return flatbuffers::EndianScalar(count_); }
+ int8_t prefix() const { return flatbuffers::EndianScalar(prefix_); }
+ uint32_t length() const { return flatbuffers::EndianScalar(length_); }
+};
+STRUCT_END(Foo, 16);
+
+MANUALLY_ALIGNED_STRUCT(8) Bar FLATBUFFERS_FINAL_CLASS {
+ private:
+ Foo parent_;
+ int32_t time_;
+ float ratio_;
+ uint16_t size_;
+ int16_t __padding0;
+ int32_t __padding1;
+
+ public:
+ Bar(const Foo &parent, int32_t time, float ratio, uint16_t size)
+ : parent_(parent), time_(flatbuffers::EndianScalar(time)), ratio_(flatbuffers::EndianScalar(ratio)), size_(flatbuffers::EndianScalar(size)), __padding0(0), __padding1(0) { (void)__padding0; (void)__padding1; }
+
+ const Foo &parent() const { return parent_; }
+ int32_t time() const { return flatbuffers::EndianScalar(time_); }
+ float ratio() const { return flatbuffers::EndianScalar(ratio_); }
+ uint16_t size() const { return flatbuffers::EndianScalar(size_); }
+};
+STRUCT_END(Bar, 32);
+
+struct FooBar FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ const Bar *sibling() const { return GetStruct<const Bar *>(4); }
+ const flatbuffers::String *name() const { return GetPointer<const flatbuffers::String *>(6); }
+ double rating() const { return GetField<double>(8, 0); }
+ uint8_t postfix() const { return GetField<uint8_t>(10, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<Bar>(verifier, 4 /* sibling */) &&
+ VerifyField<flatbuffers::uoffset_t>(verifier, 6 /* name */) &&
+ verifier.Verify(name()) &&
+ VerifyField<double>(verifier, 8 /* rating */) &&
+ VerifyField<uint8_t>(verifier, 10 /* postfix */) &&
+ verifier.EndTable();
+ }
+};
+
+struct FooBarBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_sibling(const Bar *sibling) { fbb_.AddStruct(4, sibling); }
+ void add_name(flatbuffers::Offset<flatbuffers::String> name) { fbb_.AddOffset(6, name); }
+ void add_rating(double rating) { fbb_.AddElement<double>(8, rating, 0); }
+ void add_postfix(uint8_t postfix) { fbb_.AddElement<uint8_t>(10, postfix, 0); }
+ FooBarBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
+ FooBarBuilder &operator=(const FooBarBuilder &);
+ flatbuffers::Offset<FooBar> Finish() {
+ auto o = flatbuffers::Offset<FooBar>(fbb_.EndTable(start_, 4));
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<FooBar> CreateFooBar(flatbuffers::FlatBufferBuilder &_fbb,
+ const Bar *sibling = 0,
+ flatbuffers::Offset<flatbuffers::String> name = 0,
+ double rating = 0,
+ uint8_t postfix = 0) {
+ FooBarBuilder builder_(_fbb);
+ builder_.add_rating(rating);
+ builder_.add_name(name);
+ builder_.add_sibling(sibling);
+ builder_.add_postfix(postfix);
+ return builder_.Finish();
+}
+
+struct FooBarContainer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ const flatbuffers::Vector<flatbuffers::Offset<FooBar>> *list() const { return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<FooBar>> *>(4); }
+ uint8_t initialized() const { return GetField<uint8_t>(6, 0); }
+ Enum fruit() const { return static_cast<Enum>(GetField<int16_t>(8, 0)); }
+ const flatbuffers::String *location() const { return GetPointer<const flatbuffers::String *>(10); }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<flatbuffers::uoffset_t>(verifier, 4 /* list */) &&
+ verifier.Verify(list()) &&
+ verifier.VerifyVectorOfTables(list()) &&
+ VerifyField<uint8_t>(verifier, 6 /* initialized */) &&
+ VerifyField<int16_t>(verifier, 8 /* fruit */) &&
+ VerifyField<flatbuffers::uoffset_t>(verifier, 10 /* location */) &&
+ verifier.Verify(location()) &&
+ verifier.EndTable();
+ }
+};
+
+struct FooBarContainerBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_list(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<FooBar>>> list) { fbb_.AddOffset(4, list); }
+ void add_initialized(uint8_t initialized) { fbb_.AddElement<uint8_t>(6, initialized, 0); }
+ void add_fruit(Enum fruit) { fbb_.AddElement<int16_t>(8, static_cast<int16_t>(fruit), 0); }
+ void add_location(flatbuffers::Offset<flatbuffers::String> location) { fbb_.AddOffset(10, location); }
+ FooBarContainerBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); }
+ FooBarContainerBuilder &operator=(const FooBarContainerBuilder &);
+ flatbuffers::Offset<FooBarContainer> Finish() {
+ auto o = flatbuffers::Offset<FooBarContainer>(fbb_.EndTable(start_, 4));
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<FooBarContainer> CreateFooBarContainer(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<FooBar>>> list = 0,
+ uint8_t initialized = 0,
+ Enum fruit = Enum_Apples,
+ flatbuffers::Offset<flatbuffers::String> location = 0) {
+ FooBarContainerBuilder builder_(_fbb);
+ builder_.add_location(location);
+ builder_.add_list(list);
+ builder_.add_fruit(fruit);
+ builder_.add_initialized(initialized);
+ return builder_.Finish();
+}
+
+inline const benchfb::FooBarContainer *GetFooBarContainer(const void *buf) { return flatbuffers::GetRoot<benchfb::FooBarContainer>(buf); }
+
+inline bool VerifyFooBarContainerBuffer(flatbuffers::Verifier &verifier) { return verifier.VerifyBuffer<benchfb::FooBarContainer>(); }
+
+inline void FinishFooBarContainerBuffer(flatbuffers::FlatBufferBuilder &fbb, flatbuffers::Offset<benchfb::FooBarContainer> root) { fbb.Finish(root); }
+
+} // namespace benchfb
+
+#endif // FLATBUFFERS_GENERATED_FLATBENCH_BENCHFB_H_
diff --git a/test/benchmark/benchflatc/flatbuffers/flatbuffers.h b/test/benchmark/benchflatc/flatbuffers/flatbuffers.h
new file mode 100644
index 0000000..3482cbe
--- /dev/null
+++ b/test/benchmark/benchflatc/flatbuffers/flatbuffers.h
@@ -0,0 +1,1189 @@
+/*
+ * Copyright 2014 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_H_
+#define FLATBUFFERS_H_
+
+#include <assert.h>
+
+#include <cstdint>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <string>
+#include <type_traits>
+#include <vector>
+#include <algorithm>
+#include <functional>
+#include <memory>
+
+#if __cplusplus <= 199711L && \
+ (!defined(_MSC_VER) || _MSC_VER < 1600) && \
+ (!defined(__GNUC__) || \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40603))
+ #error A C++11 compatible compiler is required for FlatBuffers.
+ #error __cplusplus _MSC_VER __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__
+#endif
+
+// The wire format uses a little endian encoding (since that's efficient for
+// the common platforms).
+#if !defined(FLATBUFFERS_LITTLEENDIAN)
+ #if defined(__GNUC__) || defined(__clang__)
+ #ifdef __BIG_ENDIAN__
+ #define FLATBUFFERS_LITTLEENDIAN 0
+ #else
+ #define FLATBUFFERS_LITTLEENDIAN 1
+ #endif // __BIG_ENDIAN__
+ #elif defined(_MSC_VER)
+ #if defined(_M_PPC)
+ #define FLATBUFFERS_LITTLEENDIAN 0
+ #else
+ #define FLATBUFFERS_LITTLEENDIAN 1
+ #endif
+ #else
+ #error Unable to determine endianness, define FLATBUFFERS_LITTLEENDIAN.
+ #endif
+#endif // !defined(FLATBUFFERS_LITTLEENDIAN)
+
+#define FLATBUFFERS_VERSION_MAJOR 1
+#define FLATBUFFERS_VERSION_MINOR 0
+#define FLATBUFFERS_VERSION_REVISION 0
+#define FLATBUFFERS_STRING_EXPAND(X) #X
+#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X)
+
+#if (!defined(_MSC_VER) || _MSC_VER > 1600) && \
+ (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 407))
+ #define FLATBUFFERS_FINAL_CLASS final
+#else
+ #define FLATBUFFERS_FINAL_CLASS
+#endif
+
+namespace flatbuffers {
+
+// Our default offset / size type, 32bit on purpose on 64bit systems.
+// Also, using a consistent offset type maintains compatibility of serialized
+// offset values between 32bit and 64bit systems.
+typedef uint32_t uoffset_t;
+
+// Signed offsets for references that can go in both directions.
+typedef int32_t soffset_t;
+
+// Offset/index used in v-tables, can be changed to uint8_t in
+// format forks to save a bit of space if desired.
+typedef uint16_t voffset_t;
+
+typedef uintmax_t largest_scalar_t;
+
+// Pointer to relinquished memory.
+typedef std::unique_ptr<uint8_t, std::function<void(uint8_t * /* unused */)>>
+ unique_ptr_t;
+
+// Wrapper for uoffset_t to allow safe template specialization.
+template<typename T> struct Offset {
+ uoffset_t o;
+ Offset() : o(0) {}
+ Offset(uoffset_t _o) : o(_o) {}
+ Offset<void> Union() const { return Offset<void>(o); }
+};
+
+inline void EndianCheck() {
+ int endiantest = 1;
+ // If this fails, see FLATBUFFERS_LITTLEENDIAN above.
+ assert(*reinterpret_cast<char *>(&endiantest) == FLATBUFFERS_LITTLEENDIAN);
+ (void)endiantest;
+}
+
+template<typename T> T EndianScalar(T t) {
+ #if FLATBUFFERS_LITTLEENDIAN
+ return t;
+ #else
+ #if defined(_MSC_VER)
+ #pragma push_macro("__builtin_bswap16")
+ #pragma push_macro("__builtin_bswap32")
+ #pragma push_macro("__builtin_bswap64")
+ #define __builtin_bswap16 _byteswap_ushort
+ #define __builtin_bswap32 _byteswap_ulong
+ #define __builtin_bswap64 _byteswap_uint64
+ #endif
+ // If you're on the few remaining big endian platforms, we make the bold
+ // assumption you're also on gcc/clang, and thus have bswap intrinsics:
+ if (sizeof(T) == 1) { // Compile-time if-then's.
+ return t;
+ } else if (sizeof(T) == 2) {
+ auto r = __builtin_bswap16(*reinterpret_cast<uint16_t *>(&t));
+ return *reinterpret_cast<T *>(&r);
+ } else if (sizeof(T) == 4) {
+ auto r = __builtin_bswap32(*reinterpret_cast<uint32_t *>(&t));
+ return *reinterpret_cast<T *>(&r);
+ } else if (sizeof(T) == 8) {
+ auto r = __builtin_bswap64(*reinterpret_cast<uint64_t *>(&t));
+ return *reinterpret_cast<T *>(&r);
+ } else {
+ assert(0);
+ }
+ #if defined(_MSC_VER)
+ #pragma pop_macro("__builtin_bswap16")
+ #pragma pop_macro("__builtin_bswap32")
+ #pragma pop_macro("__builtin_bswap64")
+ #endif
+ #endif
+}
+
+template<typename T> T ReadScalar(const void *p) {
+ return EndianScalar(*reinterpret_cast<const T *>(p));
+}
+
+template<typename T> void WriteScalar(void *p, T t) {
+ *reinterpret_cast<T *>(p) = EndianScalar(t);
+}
+
+template<typename T> size_t AlignOf() {
+ #ifdef _MSC_VER
+ return __alignof(T);
+ #else
+ return alignof(T);
+ #endif
+}
+
+// When we read serialized data from memory, in the case of most scalars,
+// we want to just read T, but in the case of Offset, we want to actually
+// perform the indirection and return a pointer.
+// The template specialization below does just that.
+// It is wrapped in a struct since function templates can't overload on the
+// return type like this.
+// The typedef is for the convenience of callers of this function
+// (avoiding the need for a trailing return decltype)
+template<typename T> struct IndirectHelper {
+ typedef T return_type;
+ static const size_t element_stride = sizeof(T);
+ static return_type Read(const uint8_t *p, uoffset_t i) {
+ return EndianScalar((reinterpret_cast<const T *>(p))[i]);
+ }
+};
+template<typename T> struct IndirectHelper<Offset<T>> {
+ typedef const T *return_type;
+ static const size_t element_stride = sizeof(uoffset_t);
+ static return_type Read(const uint8_t *p, uoffset_t i) {
+ p += i * sizeof(uoffset_t);
+ return reinterpret_cast<return_type>(p + ReadScalar<uoffset_t>(p));
+ }
+};
+template<typename T> struct IndirectHelper<const T *> {
+ typedef const T *return_type;
+ static const size_t element_stride = sizeof(T);
+ static return_type Read(const uint8_t *p, uoffset_t i) {
+ return reinterpret_cast<const T *>(p + i * sizeof(T));
+ }
+};
+
+// An STL compatible iterator implementation for Vector below, effectively
+// calling Get() for every element.
+template<typename T, bool bConst>
+struct VectorIterator : public
+ std::iterator < std::input_iterator_tag,
+ typename std::conditional < bConst,
+ const typename IndirectHelper<T>::return_type,
+ typename IndirectHelper<T>::return_type > ::type, uoffset_t > {
+
+ typedef std::iterator<std::input_iterator_tag,
+ typename std::conditional<bConst,
+ const typename IndirectHelper<T>::return_type,
+ typename IndirectHelper<T>::return_type>::type, uoffset_t> super_type;
+
+public:
+ VectorIterator(const uint8_t *data, uoffset_t i) :
+ data_(data + IndirectHelper<T>::element_stride * i) {};
+ VectorIterator(const VectorIterator &other) : data_(other.data_) {}
+ VectorIterator(VectorIterator &&other) : data_(std::move(other.data_)) {}
+
+ VectorIterator &operator=(const VectorIterator &other) {
+ data_ = other.data_;
+ return *this;
+ }
+
+ VectorIterator &operator=(VectorIterator &&other) {
+ data_ = other.data_;
+ return *this;
+ }
+
+ bool operator==(const VectorIterator& other) const {
+ return data_ == other.data_;
+ }
+
+ bool operator!=(const VectorIterator& other) const {
+ return data_ != other.data_;
+ }
+
+ ptrdiff_t operator-(const VectorIterator& other) const {
+ return (data_ - other.data_) / IndirectHelper<T>::element_stride;
+ }
+
+ typename super_type::value_type operator *() const {
+ return IndirectHelper<T>::Read(data_, 0);
+ }
+
+ typename super_type::value_type operator->() const {
+ return IndirectHelper<T>::Read(data_, 0);
+ }
+
+ VectorIterator &operator++() {
+ data_ += IndirectHelper<T>::element_stride;
+ return *this;
+ }
+
+ VectorIterator operator++(int) {
+ VectorIterator temp(data_);
+ data_ += IndirectHelper<T>::element_stride;
+ return temp;
+ }
+
+private:
+ const uint8_t *data_;
+};
+
+// This is used as a helper type for accessing vectors.
+// Vector::data() assumes the vector elements start after the length field.
+template<typename T> class Vector {
+public:
+ typedef VectorIterator<T, false> iterator;
+ typedef VectorIterator<T, true> const_iterator;
+
+ uoffset_t size() const { return EndianScalar(length_); }
+
+ // Deprecated: use size(). Here for backwards compatibility.
+ uoffset_t Length() const { return size(); }
+
+ typedef typename IndirectHelper<T>::return_type return_type;
+
+ return_type Get(uoffset_t i) const {
+ assert(i < size());
+ return IndirectHelper<T>::Read(Data(), i);
+ }
+
+ return_type operator[](uoffset_t i) const { return Get(i); }
+
+ // If this is a Vector of enums, T will be its storage type, not the enum
+ // type. This function makes it convenient to retrieve value with enum
+ // type E.
+ template<typename E> E GetEnum(uoffset_t i) const {
+ return static_cast<E>(Get(i));
+ }
+
+ const void *GetStructFromOffset(size_t o) const {
+ return reinterpret_cast<const void *>(Data() + o);
+ }
+
+ iterator begin() { return iterator(Data(), 0); }
+ const_iterator begin() const { return const_iterator(Data(), 0); }
+
+ iterator end() { return iterator(Data(), size()); }
+ const_iterator end() const { return const_iterator(Data(), size()); }
+
+ // Change elements if you have a non-const pointer to this object.
+ // Scalars only. See reflection_reader.h, and the documentation.
+ void Mutate(uoffset_t i, T val) {
+ assert(i < size());
+ WriteScalar(data() + i, val);
+ }
+
+ // Change an element of a vector of tables (or strings).
+ // "val" points to the new table/string, as you can obtain from
+ // e.g. reflection::AddFlatBuffer().
+ void MutateOffset(uoffset_t i, const uint8_t *val) {
+ assert(i < size());
+ assert(sizeof(T) == sizeof(uoffset_t));
+ WriteScalar(data() + i, val - (Data() + i * sizeof(uoffset_t)));
+ }
+
+ // The raw data in little endian format. Use with care.
+ const uint8_t *Data() const {
+ return reinterpret_cast<const uint8_t *>(&length_ + 1);
+ }
+
+ uint8_t *Data() {
+ return reinterpret_cast<uint8_t *>(&length_ + 1);
+ }
+
+ // Similarly, but typed, much like std::vector::data
+ const T *data() const { return reinterpret_cast<const T *>(Data()); }
+ T *data() { return reinterpret_cast<T *>(Data()); }
+
+ template<typename K> return_type LookupByKey(K key) const {
+ void *search_result = std::bsearch(&key, Data(), size(),
+ IndirectHelper<T>::element_stride, KeyCompare<K>);
+
+ if (!search_result) {
+ return nullptr; // Key not found.
+ }
+
+ const uint8_t *data = reinterpret_cast<const uint8_t *>(search_result);
+
+ return IndirectHelper<T>::Read(data, 0);
+ }
+
+protected:
+ // This class is only used to access pre-existing data. Don't ever
+ // try to construct these manually.
+ Vector();
+
+ uoffset_t length_;
+
+private:
+ template<typename K> static int KeyCompare(const void *ap, const void *bp) {
+ const K *key = reinterpret_cast<const K *>(ap);
+ const uint8_t *data = reinterpret_cast<const uint8_t *>(bp);
+ auto table = IndirectHelper<T>::Read(data, 0);
+
+ // std::bsearch compares with the operands transposed, so we negate the
+ // result here.
+ return -table->KeyCompareWithValue(*key);
+ }
+};
+
+// Represent a vector much like the template above, but in this case we
+// don't know what the element types are (used with reflection.h).
+class VectorOfAny {
+public:
+ uoffset_t size() const { return EndianScalar(length_); }
+
+ const uint8_t *Data() const {
+ return reinterpret_cast<const uint8_t *>(&length_ + 1);
+ }
+ uint8_t *Data() {
+ return reinterpret_cast<uint8_t *>(&length_ + 1);
+ }
+protected:
+ VectorOfAny();
+
+ uoffset_t length_;
+};
+
+// Convenient helper function to get the length of any vector, regardless
+// of wether it is null or not (the field is not set).
+template<typename T> static inline size_t VectorLength(const Vector<T> *v) {
+ return v ? v->Length() : 0;
+}
+
+struct String : public Vector<char> {
+ const char *c_str() const { return reinterpret_cast<const char *>(Data()); }
+ std::string str() const { return c_str(); }
+
+ bool operator <(const String &o) const {
+ return strcmp(c_str(), o.c_str()) < 0;
+ }
+};
+
+// Simple indirection for buffer allocation, to allow this to be overridden
+// with custom allocation (see the FlatBufferBuilder constructor).
+class simple_allocator {
+ public:
+ virtual ~simple_allocator() {}
+ virtual uint8_t *allocate(size_t size) const { return new uint8_t[size]; }
+ virtual void deallocate(uint8_t *p) const { delete[] p; }
+};
+
+// This is a minimal replication of std::vector<uint8_t> functionality,
+// except growing from higher to lower addresses. i.e push_back() inserts data
+// in the lowest address in the vector.
+class vector_downward {
+ public:
+ explicit vector_downward(size_t initial_size,
+ const simple_allocator &allocator)
+ : reserved_(initial_size),
+ buf_(allocator.allocate(reserved_)),
+ cur_(buf_ + reserved_),
+ allocator_(allocator) {
+ assert((initial_size & (sizeof(largest_scalar_t) - 1)) == 0);
+ }
+
+ ~vector_downward() {
+ if (buf_)
+ allocator_.deallocate(buf_);
+ }
+
+ void clear() {
+ if (buf_ == nullptr)
+ buf_ = allocator_.allocate(reserved_);
+
+ cur_ = buf_ + reserved_;
+ }
+
+ // Relinquish the pointer to the caller.
+ unique_ptr_t release() {
+ // Actually deallocate from the start of the allocated memory.
+ std::function<void(uint8_t *)> deleter(
+ std::bind(&simple_allocator::deallocate, allocator_, buf_));
+
+ // Point to the desired offset.
+ unique_ptr_t retval(data(), deleter);
+
+ // Don't deallocate when this instance is destroyed.
+ buf_ = nullptr;
+ cur_ = nullptr;
+
+ return retval;
+ }
+
+ size_t growth_policy(size_t bytes) {
+ return (bytes / 2) & ~(sizeof(largest_scalar_t) - 1);
+ }
+
+ uint8_t *make_space(size_t len) {
+ if (len > static_cast<size_t>(cur_ - buf_)) {
+ auto old_size = size();
+ auto largest_align = AlignOf<largest_scalar_t>();
+ reserved_ += std::max(len, growth_policy(reserved_));
+ // Round up to avoid undefined behavior from unaligned loads and stores.
+ reserved_ = (reserved_ + (largest_align - 1)) & ~(largest_align - 1);
+ auto new_buf = allocator_.allocate(reserved_);
+ auto new_cur = new_buf + reserved_ - old_size;
+ memcpy(new_cur, cur_, old_size);
+ cur_ = new_cur;
+ allocator_.deallocate(buf_);
+ buf_ = new_buf;
+ }
+ cur_ -= len;
+ // Beyond this, signed offsets may not have enough range:
+ // (FlatBuffers > 2GB not supported).
+ assert(size() < (1UL << (sizeof(soffset_t) * 8 - 1)) - 1);
+ return cur_;
+ }
+
+ uoffset_t size() const {
+ assert(cur_ != nullptr && buf_ != nullptr);
+ return static_cast<uoffset_t>(reserved_ - (cur_ - buf_));
+ }
+
+ uint8_t *data() const {
+ assert(cur_ != nullptr);
+ return cur_;
+ }
+
+ uint8_t *data_at(size_t offset) { return buf_ + reserved_ - offset; }
+
+ // push() & fill() are most frequently called with small byte counts (<= 4),
+ // which is why we're using loops rather than calling memcpy/memset.
+ void push(const uint8_t *bytes, size_t num) {
+ auto dest = make_space(num);
+ for (size_t i = 0; i < num; i++) dest[i] = bytes[i];
+ }
+
+ void fill(size_t zero_pad_bytes) {
+ auto dest = make_space(zero_pad_bytes);
+ for (size_t i = 0; i < zero_pad_bytes; i++) dest[i] = 0;
+ }
+
+ void pop(size_t bytes_to_remove) { cur_ += bytes_to_remove; }
+
+ private:
+ // You shouldn't really be copying instances of this class.
+ vector_downward(const vector_downward &);
+ vector_downward &operator=(const vector_downward &);
+
+ size_t reserved_;
+ uint8_t *buf_;
+ uint8_t *cur_; // Points at location between empty (below) and used (above).
+ const simple_allocator &allocator_;
+};
+
+// Converts a Field ID to a virtual table offset.
+inline voffset_t FieldIndexToOffset(voffset_t field_id) {
+ // Should correspond to what EndTable() below builds up.
+ const int fixed_fields = 2; // Vtable size and Object Size.
+ return (field_id + fixed_fields) * sizeof(voffset_t);
+}
+
+// Computes how many bytes you'd have to pad to be able to write an
+// "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in
+// memory).
+inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) {
+ return ((~buf_size) + 1) & (scalar_size - 1);
+}
+
+// Helper class to hold data needed in creation of a flat buffer.
+// To serialize data, you typically call one of the Create*() functions in
+// the generated code, which in turn call a sequence of StartTable/PushElement/
+// AddElement/EndTable, or the builtin CreateString/CreateVector functions.
+// Do this is depth-first order to build up a tree to the root.
+// Finish() wraps up the buffer ready for transport.
+class FlatBufferBuilder FLATBUFFERS_FINAL_CLASS {
+ public:
+ explicit FlatBufferBuilder(uoffset_t initial_size = 1024,
+ const simple_allocator *allocator = nullptr)
+ : buf_(initial_size, allocator ? *allocator : default_allocator),
+ minalign_(1), force_defaults_(false) {
+ offsetbuf_.reserve(16); // Avoid first few reallocs.
+ vtables_.reserve(16);
+ EndianCheck();
+ }
+
+ // Reset all the state in this FlatBufferBuilder so it can be reused
+ // to construct another buffer.
+ void Clear() {
+ buf_.clear();
+ offsetbuf_.clear();
+ vtables_.clear();
+ minalign_ = 1;
+ }
+
+ // The current size of the serialized buffer, counting from the end.
+ uoffset_t GetSize() const { return buf_.size(); }
+
+ // Get the serialized buffer (after you call Finish()).
+ uint8_t *GetBufferPointer() const { return buf_.data(); }
+
+ // Get the released pointer to the serialized buffer.
+ // Don't attempt to use this FlatBufferBuilder afterwards!
+ // The unique_ptr returned has a special allocator that knows how to
+ // deallocate this pointer (since it points to the middle of an allocation).
+ // Thus, do not mix this pointer with other unique_ptr's, or call release() /
+ // reset() on it.
+ unique_ptr_t ReleaseBufferPointer() { return buf_.release(); }
+
+ void ForceDefaults(bool fd) { force_defaults_ = fd; }
+
+ void Pad(size_t num_bytes) { buf_.fill(num_bytes); }
+
+ void Align(size_t elem_size) {
+ if (elem_size > minalign_) minalign_ = elem_size;
+ buf_.fill(PaddingBytes(buf_.size(), elem_size));
+ }
+
+ void PushBytes(const uint8_t *bytes, size_t size) {
+ buf_.push(bytes, size);
+ }
+
+ void PopBytes(size_t amount) { buf_.pop(amount); }
+
+ template<typename T> void AssertScalarT() {
+ // The code assumes power of 2 sizes and endian-swap-ability.
+ static_assert(std::is_scalar<T>::value
+ // The Offset<T> type is essentially a scalar but fails is_scalar.
+ || sizeof(T) == sizeof(Offset<void>),
+ "T must be a scalar type");
+ }
+
+ // Write a single aligned scalar to the buffer
+ template<typename T> uoffset_t PushElement(T element) {
+ AssertScalarT<T>();
+ T litle_endian_element = EndianScalar(element);
+ Align(sizeof(T));
+ PushBytes(reinterpret_cast<uint8_t *>(&litle_endian_element), sizeof(T));
+ return GetSize();
+ }
+
+ template<typename T> uoffset_t PushElement(Offset<T> off) {
+ // Special case for offsets: see ReferTo below.
+ return PushElement(ReferTo(off.o));
+ }
+
+ // When writing fields, we track where they are, so we can create correct
+ // vtables later.
+ void TrackField(voffset_t field, uoffset_t off) {
+ FieldLoc fl = { off, field };
+ offsetbuf_.push_back(fl);
+ }
+
+ // Like PushElement, but additionally tracks the field this represents.
+ template<typename T> void AddElement(voffset_t field, T e, T def) {
+ // We don't serialize values equal to the default.
+ if (e == def && !force_defaults_) return;
+ auto off = PushElement(e);
+ TrackField(field, off);
+ }
+
+ template<typename T> void AddOffset(voffset_t field, Offset<T> off) {
+ if (!off.o) return; // An offset of 0 means NULL, don't store.
+ AddElement(field, ReferTo(off.o), static_cast<uoffset_t>(0));
+ }
+
+ template<typename T> void AddStruct(voffset_t field, const T *structptr) {
+ if (!structptr) return; // Default, don't store.
+ Align(AlignOf<T>());
+ PushBytes(reinterpret_cast<const uint8_t *>(structptr), sizeof(T));
+ TrackField(field, GetSize());
+ }
+
+ void AddStructOffset(voffset_t field, uoffset_t off) {
+ TrackField(field, off);
+ }
+
+ // Offsets initially are relative to the end of the buffer (downwards).
+ // This function converts them to be relative to the current location
+ // in the buffer (when stored here), pointing upwards.
+ uoffset_t ReferTo(uoffset_t off) {
+ Align(sizeof(uoffset_t)); // To ensure GetSize() below is correct.
+ assert(off <= GetSize()); // Must refer to something already in buffer.
+ return GetSize() - off + sizeof(uoffset_t);
+ }
+
+ void NotNested() {
+ // If you hit this, you're trying to construct an object when another
+ // hasn't finished yet.
+ assert(!offsetbuf_.size());
+ }
+
+ // From generated code (or from the parser), we call StartTable/EndTable
+ // with a sequence of AddElement calls in between.
+ uoffset_t StartTable() {
+ NotNested();
+ return GetSize();
+ }
+
+ // This finishes one serialized object by generating the vtable if it's a
+ // table, comparing it against existing vtables, and writing the
+ // resulting vtable offset.
+ uoffset_t EndTable(uoffset_t start, voffset_t numfields) {
+ // Write the vtable offset, which is the start of any Table.
+ // We fill it's value later.
+ auto vtableoffsetloc = PushElement<soffset_t>(0);
+ // Write a vtable, which consists entirely of voffset_t elements.
+ // It starts with the number of offsets, followed by a type id, followed
+ // by the offsets themselves. In reverse:
+ buf_.fill(numfields * sizeof(voffset_t));
+ auto table_object_size = vtableoffsetloc - start;
+ assert(table_object_size < 0x10000); // Vtable use 16bit offsets.
+ PushElement<voffset_t>(static_cast<voffset_t>(table_object_size));
+ PushElement<voffset_t>(FieldIndexToOffset(numfields));
+ // Write the offsets into the table
+ for (auto field_location = offsetbuf_.begin();
+ field_location != offsetbuf_.end();
+ ++field_location) {
+ auto pos = static_cast<voffset_t>(vtableoffsetloc - field_location->off);
+ // If this asserts, it means you've set a field twice.
+ assert(!ReadScalar<voffset_t>(buf_.data() + field_location->id));
+ WriteScalar<voffset_t>(buf_.data() + field_location->id, pos);
+ }
+ offsetbuf_.clear();
+ auto vt1 = reinterpret_cast<voffset_t *>(buf_.data());
+ auto vt1_size = ReadScalar<voffset_t>(vt1);
+ auto vt_use = GetSize();
+ // See if we already have generated a vtable with this exact same
+ // layout before. If so, make it point to the old one, remove this one.
+ for (auto it = vtables_.begin(); it != vtables_.end(); ++it) {
+ auto vt2 = reinterpret_cast<voffset_t *>(buf_.data_at(*it));
+ auto vt2_size = *vt2;
+ if (vt1_size != vt2_size || memcmp(vt2, vt1, vt1_size)) continue;
+ vt_use = *it;
+ buf_.pop(GetSize() - vtableoffsetloc);
+ break;
+ }
+ // If this is a new vtable, remember it.
+ if (vt_use == GetSize()) {
+ vtables_.push_back(vt_use);
+ }
+ // Fill the vtable offset we created above.
+ // The offset points from the beginning of the object to where the
+ // vtable is stored.
+ // Offsets default direction is downward in memory for future format
+ // flexibility (storing all vtables at the start of the file).
+ WriteScalar(buf_.data_at(vtableoffsetloc),
+ static_cast<soffset_t>(vt_use) -
+ static_cast<soffset_t>(vtableoffsetloc));
+ return vtableoffsetloc;
+ }
+
+ // This checks a required field has been set in a given table that has
+ // just been constructed.
+ template<typename T> void Required(Offset<T> table, voffset_t field) {
+ auto table_ptr = buf_.data_at(table.o);
+ auto vtable_ptr = table_ptr - ReadScalar<soffset_t>(table_ptr);
+ bool ok = ReadScalar<voffset_t>(vtable_ptr + field) != 0;
+ // If this fails, the caller will show what field needs to be set.
+ assert(ok);
+ (void)ok;
+ }
+
+ uoffset_t StartStruct(size_t alignment) {
+ Align(alignment);
+ return GetSize();
+ }
+
+ uoffset_t EndStruct() { return GetSize(); }
+
+ void ClearOffsets() { offsetbuf_.clear(); }
+
+ // Aligns such that when "len" bytes are written, an object can be written
+ // after it with "alignment" without padding.
+ void PreAlign(size_t len, size_t alignment) {
+ buf_.fill(PaddingBytes(GetSize() + len, alignment));
+ }
+ template<typename T> void PreAlign(size_t len) {
+ AssertScalarT<T>();
+ PreAlign(len, sizeof(T));
+ }
+
+ // Functions to store strings, which are allowed to contain any binary data.
+ Offset<String> CreateString(const char *str, size_t len) {
+ NotNested();
+ PreAlign<uoffset_t>(len + 1); // Always 0-terminated.
+ buf_.fill(1);
+ PushBytes(reinterpret_cast<const uint8_t *>(str), len);
+ PushElement(static_cast<uoffset_t>(len));
+ return Offset<String>(GetSize());
+ }
+
+ Offset<String> CreateString(const char *str) {
+ return CreateString(str, strlen(str));
+ }
+
+ Offset<String> CreateString(const std::string &str) {
+ return CreateString(str.c_str(), str.length());
+ }
+
+ Offset<String> CreateString(const String *str) {
+ return CreateString(str->c_str(), str->Length());
+ }
+
+ uoffset_t EndVector(size_t len) {
+ return PushElement(static_cast<uoffset_t>(len));
+ }
+
+ void StartVector(size_t len, size_t elemsize) {
+ PreAlign<uoffset_t>(len * elemsize);
+ PreAlign(len * elemsize, elemsize); // Just in case elemsize > uoffset_t.
+ }
+
+ uint8_t *ReserveElements(size_t len, size_t elemsize) {
+ return buf_.make_space(len * elemsize);
+ }
+
+ template<typename T> Offset<Vector<T>> CreateVector(const T *v, size_t len) {
+ NotNested();
+ StartVector(len, sizeof(T));
+ for (auto i = len; i > 0; ) {
+ PushElement(v[--i]);
+ }
+ return Offset<Vector<T>>(EndVector(len));
+ }
+
+ template<typename T> Offset<Vector<T>> CreateVector(const std::vector<T> &v) {
+ return CreateVector(v.data(), v.size());
+ }
+
+ template<typename T> Offset<Vector<const T *>> CreateVectorOfStructs(
+ const T *v, size_t len) {
+ NotNested();
+ StartVector(len * sizeof(T) / AlignOf<T>(), AlignOf<T>());
+ PushBytes(reinterpret_cast<const uint8_t *>(v), sizeof(T) * len);
+ return Offset<Vector<const T *>>(EndVector(len));
+ }
+
+ template<typename T> Offset<Vector<const T *>> CreateVectorOfStructs(
+ const std::vector<T> &v) {
+ return CreateVectorOfStructs(v.data(), v.size());
+ }
+
+ template<typename T> Offset<Vector<Offset<T>>> CreateVectorOfSortedTables(
+ Offset<T> *v, size_t len) {
+ std::sort(v, v + len,
+ [this](const Offset<T> &a, const Offset<T> &b) -> bool {
+ auto table_a = reinterpret_cast<T *>(buf_.data_at(a.o));
+ auto table_b = reinterpret_cast<T *>(buf_.data_at(b.o));
+ return table_a->KeyCompareLessThan(table_b);
+ }
+ );
+ return CreateVector(v, len);
+ }
+
+ template<typename T> Offset<Vector<Offset<T>>> CreateVectorOfSortedTables(
+ std::vector<Offset<T>> *v) {
+ return CreateVectorOfSortedTables(v->data(), v->size());
+ }
+
+ // Specialized version for non-copying use cases. Write the data any time
+ // later to the returned buffer pointer `buf`.
+ uoffset_t CreateUninitializedVector(size_t len, size_t elemsize,
+ uint8_t **buf) {
+ NotNested();
+ StartVector(len, elemsize);
+ *buf = buf_.make_space(len * elemsize);
+ return EndVector(len);
+ }
+
+ template<typename T> Offset<Vector<T>> CreateUninitializedVector(
+ size_t len, T **buf) {
+ return CreateUninitializedVector(len, sizeof(T),
+ reinterpret_cast<uint8_t **>(buf));
+ }
+
+ static const size_t kFileIdentifierLength = 4;
+
+ // Finish serializing a buffer by writing the root offset.
+ // If a file_identifier is given, the buffer will be prefix with a standard
+ // FlatBuffers file header.
+ template<typename T> void Finish(Offset<T> root,
+ const char *file_identifier = nullptr) {
+ // This will cause the whole buffer to be aligned.
+ PreAlign(sizeof(uoffset_t) + (file_identifier ? kFileIdentifierLength : 0),
+ minalign_);
+ if (file_identifier) {
+ assert(strlen(file_identifier) == kFileIdentifierLength);
+ buf_.push(reinterpret_cast<const uint8_t *>(file_identifier),
+ kFileIdentifierLength);
+ }
+ PushElement(ReferTo(root.o)); // Location of root.
+ }
+
+ private:
+ // You shouldn't really be copying instances of this class.
+ FlatBufferBuilder(const FlatBufferBuilder &);
+ FlatBufferBuilder &operator=(const FlatBufferBuilder &);
+
+ struct FieldLoc {
+ uoffset_t off;
+ voffset_t id;
+ };
+
+ simple_allocator default_allocator;
+
+ vector_downward buf_;
+
+ // Accumulating offsets of table members while it is being built.
+ std::vector<FieldLoc> offsetbuf_;
+
+ std::vector<uoffset_t> vtables_; // todo: Could make this into a map?
+
+ size_t minalign_;
+
+ bool force_defaults_; // Serialize values equal to their defaults anyway.
+};
+
+// Helpers to get a typed pointer to the root object contained in the buffer.
+template<typename T> T *GetMutableRoot(void *buf) {
+ EndianCheck();
+ return reinterpret_cast<T *>(reinterpret_cast<uint8_t *>(buf) +
+ EndianScalar(*reinterpret_cast<uoffset_t *>(buf)));
+}
+
+template<typename T> const T *GetRoot(const void *buf) {
+ return GetMutableRoot<T>(const_cast<void *>(buf));
+}
+
+// Helper to see if the identifier in a buffer has the expected value.
+inline bool BufferHasIdentifier(const void *buf, const char *identifier) {
+ return strncmp(reinterpret_cast<const char *>(buf) + sizeof(uoffset_t),
+ identifier, FlatBufferBuilder::kFileIdentifierLength) == 0;
+}
+
+// Helper class to verify the integrity of a FlatBuffer
+class Verifier FLATBUFFERS_FINAL_CLASS {
+ public:
+ Verifier(const uint8_t *buf, size_t buf_len, size_t _max_depth = 64,
+ size_t _max_tables = 1000000)
+ : buf_(buf), end_(buf + buf_len), depth_(0), max_depth_(_max_depth),
+ num_tables_(0), max_tables_(_max_tables)
+ {}
+
+ // Central location where any verification failures register.
+ bool Check(bool ok) const {
+ #ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
+ assert(ok);
+ #endif
+ return ok;
+ }
+
+ // Verify any range within the buffer.
+ bool Verify(const void *elem, size_t elem_len) const {
+ return Check(elem_len <= (size_t) (end_ - buf_) && elem >= buf_ && elem <= end_ - elem_len);
+ }
+
+ // Verify a range indicated by sizeof(T).
+ template<typename T> bool Verify(const void *elem) const {
+ return Verify(elem, sizeof(T));
+ }
+
+ // Verify a pointer (may be NULL) of a table type.
+ template<typename T> bool VerifyTable(const T *table) {
+ return !table || table->Verify(*this);
+ }
+
+ // Verify a pointer (may be NULL) of any vector type.
+ template<typename T> bool Verify(const Vector<T> *vec) const {
+ const uint8_t *end;
+ return !vec ||
+ VerifyVector(reinterpret_cast<const uint8_t *>(vec), sizeof(T),
+ &end);
+ }
+
+ // Verify a pointer (may be NULL) to string.
+ bool Verify(const String *str) const {
+ const uint8_t *end;
+ return !str ||
+ (VerifyVector(reinterpret_cast<const uint8_t *>(str), 1, &end) &&
+ Verify(end, 1) && // Must have terminator
+ Check(*end == '\0')); // Terminating byte must be 0.
+ }
+
+ // Common code between vectors and strings.
+ bool VerifyVector(const uint8_t *vec, size_t elem_size,
+ const uint8_t **end) const {
+ // Check we can read the size field.
+ if (!Verify<uoffset_t>(vec)) return false;
+ // Check the whole array. If this is a string, the byte past the array
+ // must be 0.
+ auto size = ReadScalar<uoffset_t>(vec);
+ auto byte_size = sizeof(size) + elem_size * size;
+ *end = vec + byte_size;
+ return Verify(vec, byte_size);
+ }
+
+ // Special case for string contents, after the above has been called.
+ bool VerifyVectorOfStrings(const Vector<Offset<String>> *vec) const {
+ if (vec) {
+ for (uoffset_t i = 0; i < vec->size(); i++) {
+ if (!Verify(vec->Get(i))) return false;
+ }
+ }
+ return true;
+ }
+
+ // Special case for table contents, after the above has been called.
+ template<typename T> bool VerifyVectorOfTables(const Vector<Offset<T>> *vec) {
+ if (vec) {
+ for (uoffset_t i = 0; i < vec->size(); i++) {
+ if (!vec->Get(i)->Verify(*this)) return false;
+ }
+ }
+ return true;
+ }
+
+ // Verify this whole buffer, starting with root type T.
+ template<typename T> bool VerifyBuffer() {
+ // Call T::Verify, which must be in the generated code for this type.
+ return Verify<uoffset_t>(buf_) &&
+ reinterpret_cast<const T *>(buf_ + ReadScalar<uoffset_t>(buf_))->
+ Verify(*this);
+ }
+
+ // Called at the start of a table to increase counters measuring data
+ // structure depth and amount, and possibly bails out with false if
+ // limits set by the constructor have been hit. Needs to be balanced
+ // with EndTable().
+ bool VerifyComplexity() {
+ depth_++;
+ num_tables_++;
+ return Check(depth_ <= max_depth_ && num_tables_ <= max_tables_);
+ }
+
+ // Called at the end of a table to pop the depth count.
+ bool EndTable() {
+ depth_--;
+ return true;
+ }
+
+ private:
+ const uint8_t *buf_;
+ const uint8_t *end_;
+ size_t depth_;
+ size_t max_depth_;
+ size_t num_tables_;
+ size_t max_tables_;
+};
+
+// "structs" are flat structures that do not have an offset table, thus
+// always have all members present and do not support forwards/backwards
+// compatible extensions.
+
+class Struct FLATBUFFERS_FINAL_CLASS {
+ public:
+ template<typename T> T GetField(uoffset_t o) const {
+ return ReadScalar<T>(&data_[o]);
+ }
+
+ template<typename T> T GetPointer(uoffset_t o) const {
+ auto p = &data_[o];
+ return reinterpret_cast<T>(p + ReadScalar<uoffset_t>(p));
+ }
+
+ template<typename T> T GetStruct(uoffset_t o) const {
+ return reinterpret_cast<T>(&data_[o]);
+ }
+
+ const uint8_t *GetAddressOf(uoffset_t o) const { return &data_[o]; }
+ uint8_t *GetAddressOf(uoffset_t o) { return &data_[o]; }
+
+ private:
+ uint8_t data_[1];
+};
+
+// "tables" use an offset table (possibly shared) that allows fields to be
+// omitted and added at will, but uses an extra indirection to read.
+class Table {
+ public:
+ // This gets the field offset for any of the functions below it, or 0
+ // if the field was not present.
+ voffset_t GetOptionalFieldOffset(voffset_t field) const {
+ // The vtable offset is always at the start.
+ auto vtable = data_ - ReadScalar<soffset_t>(data_);
+ // The first element is the size of the vtable (fields + type id + itself).
+ auto vtsize = ReadScalar<voffset_t>(vtable);
+ // If the field we're accessing is outside the vtable, we're reading older
+ // data, so it's the same as if the offset was 0 (not present).
+ return field < vtsize ? ReadScalar<voffset_t>(vtable + field) : 0;
+ }
+
+ template<typename T> T GetField(voffset_t field, T defaultval) const {
+ auto field_offset = GetOptionalFieldOffset(field);
+ return field_offset ? ReadScalar<T>(data_ + field_offset) : defaultval;
+ }
+
+ template<typename P> P GetPointer(voffset_t field) {
+ auto field_offset = GetOptionalFieldOffset(field);
+ auto p = data_ + field_offset;
+ return field_offset
+ ? reinterpret_cast<P>(p + ReadScalar<uoffset_t>(p))
+ : nullptr;
+ }
+ template<typename P> P GetPointer(voffset_t field) const {
+ return const_cast<Table *>(this)->GetPointer<P>(field);
+ }
+
+ template<typename P> P GetStruct(voffset_t field) const {
+ auto field_offset = GetOptionalFieldOffset(field);
+ auto p = const_cast<uint8_t *>(data_ + field_offset);
+ return field_offset ? reinterpret_cast<P>(p) : nullptr;
+ }
+
+ template<typename T> bool SetField(voffset_t field, T val) {
+ auto field_offset = GetOptionalFieldOffset(field);
+ if (!field_offset) return false;
+ WriteScalar(data_ + field_offset, val);
+ return true;
+ }
+
+ bool SetPointer(voffset_t field, const uint8_t *val) {
+ auto field_offset = GetOptionalFieldOffset(field);
+ if (!field_offset) return false;
+ WriteScalar(data_ + field_offset, val - (data_ + field_offset));
+ return true;
+ }
+
+ uint8_t *GetAddressOf(voffset_t field) {
+ auto field_offset = GetOptionalFieldOffset(field);
+ return field_offset ? data_ + field_offset : nullptr;
+ }
+ const uint8_t *GetAddressOf(voffset_t field) const {
+ return const_cast<Table *>(this)->GetAddressOf(field);
+ }
+
+ uint8_t *GetVTable() { return data_ - ReadScalar<soffset_t>(data_); }
+
+ bool CheckField(voffset_t field) const {
+ return GetOptionalFieldOffset(field) != 0;
+ }
+
+ // Verify the vtable of this table.
+ // Call this once per table, followed by VerifyField once per field.
+ bool VerifyTableStart(Verifier &verifier) const {
+ // Check the vtable offset.
+ if (!verifier.Verify<soffset_t>(data_)) return false;
+ auto vtable = data_ - ReadScalar<soffset_t>(data_);
+ // Check the vtable size field, then check vtable fits in its entirety.
+ return verifier.VerifyComplexity() &&
+ verifier.Verify<voffset_t>(vtable) &&
+ verifier.Verify(vtable, ReadScalar<voffset_t>(vtable));
+ }
+
+ // Verify a particular field.
+ template<typename T> bool VerifyField(const Verifier &verifier,
+ voffset_t field) const {
+ // Calling GetOptionalFieldOffset should be safe now thanks to
+ // VerifyTable().
+ auto field_offset = GetOptionalFieldOffset(field);
+ // Check the actual field.
+ return !field_offset || verifier.Verify<T>(data_ + field_offset);
+ }
+
+ // VerifyField for required fields.
+ template<typename T> bool VerifyFieldRequired(const Verifier &verifier,
+ voffset_t field) const {
+ auto field_offset = GetOptionalFieldOffset(field);
+ return verifier.Check(field_offset != 0) &&
+ verifier.Verify<T>(data_ + field_offset);
+ }
+
+ private:
+ // private constructor & copy constructor: you obtain instances of this
+ // class by pointing to existing data only
+ Table();
+ Table(const Table &other);
+
+ uint8_t data_[1];
+};
+
+// Utility function for reverse lookups on the EnumNames*() functions
+// (in the generated C++ code)
+// names must be NULL terminated.
+inline int LookupEnum(const char **names, const char *name) {
+ for (const char **p = names; *p; p++)
+ if (!strcmp(*p, name))
+ return static_cast<int>(p - names);
+ return -1;
+}
+
+// These macros allow us to layout a struct with a guarantee that they'll end
+// up looking the same on different compilers and platforms.
+// It does this by disallowing the compiler to do any padding, and then
+// does padding itself by inserting extra padding fields that make every
+// element aligned to its own size.
+// Additionally, it manually sets the alignment of the struct as a whole,
+// which is typically its largest element, or a custom size set in the schema
+// by the force_align attribute.
+// These are used in the generated code only.
+
+#if defined(_MSC_VER)
+ #define MANUALLY_ALIGNED_STRUCT(alignment) \
+ __pragma(pack(1)); \
+ struct __declspec(align(alignment))
+ #define STRUCT_END(name, size) \
+ __pragma(pack()); \
+ static_assert(sizeof(name) == size, "compiler breaks packing rules")
+#elif defined(__GNUC__) || defined(__clang__)
+ #define MANUALLY_ALIGNED_STRUCT(alignment) \
+ _Pragma("pack(1)") \
+ struct __attribute__((aligned(alignment)))
+ #define STRUCT_END(name, size) \
+ _Pragma("pack()") \
+ static_assert(sizeof(name) == size, "compiler breaks packing rules")
+#else
+ #error Unknown compiler, please define structure alignment macros
+#endif
+
+// String which identifies the current version of FlatBuffers.
+// flatbuffer_version_string is used by Google developers to identify which
+// applications uploaded to Google Play are using this library. This allows
+// the development team at Google to determine the popularity of the library.
+// How it works: Applications that are uploaded to the Google Play Store are
+// scanned for this version string. We track which applications are using it
+// to measure popularity. You are free to remove it (of course) but we would
+// appreciate if you left it in.
+
+// Weak linkage is culled by VS & doesn't work on cygwin.
+#if !defined(_WIN32) && !defined(__CYGWIN__)
+
+extern volatile __attribute__((weak)) const char *flatbuffer_version_string;
+volatile __attribute__((weak)) const char *flatbuffer_version_string =
+ "FlatBuffers "
+ FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "."
+ FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "."
+ FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION);
+
+#endif // !defined(_WIN32) && !defined(__CYGWIN__)
+
+} // namespace flatbuffers
+
+#endif // FLATBUFFERS_H_
diff --git a/test/benchmark/benchflatc/run.sh b/test/benchmark/benchflatc/run.sh
new file mode 100755
index 0000000..4aff0b8
--- /dev/null
+++ b/test/benchmark/benchflatc/run.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../../..
+ROOT=`pwd`
+TMP=build/tmp/test/benchmark/benchflatc
+INC=$ROOT/include
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+
+CXX=${CXX:-c++}
+cp -r test/benchmark/benchmain/* ${TMP}
+cp -r test/benchmark/benchflatc/* ${TMP}
+#include include at root as it may conflict
+cp -r ${ROOT}/include/flatcc/support ${TMP}
+
+cd ${TMP}
+$CXX -g -std=c++11 benchflatc.cpp -o benchflatc_d -I $INC
+$CXX -O3 -DNDEBUG -std=c++11 benchflatc.cpp -o benchflatc -I $INC
+echo "running flatbench flatc for C++ (debug)"
+./benchflatc_d
+echo "running flatbench flatc for C++ (optimized)"
+./benchflatc
diff --git a/test/benchmark/benchflatcc/benchflatcc.c b/test/benchmark/benchflatcc/benchflatcc.c
new file mode 100644
index 0000000..682418a
--- /dev/null
+++ b/test/benchmark/benchflatcc/benchflatcc.c
@@ -0,0 +1,98 @@
+#define BENCH_TITLE "flatcc for C"
+
+#define BENCHMARK_BUFSIZ 1000
+#define DECLARE_BENCHMARK(BM)\
+ flatcc_builder_t builder, *BM;\
+ BM = &builder;\
+ flatcc_builder_init(BM);
+
+#define CLEAR_BENCHMARK(BM) flatcc_builder_clear(BM);
+
+
+#include "flatbench_builder.h"
+
+#define C(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_FooBarContainer, x)
+#define FooBar(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_FooBar, x)
+#define Bar(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Bar, x)
+#define Foo(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Foo, x)
+#define Enum(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Enum, x)
+#define True flatbuffers_true
+#define False flatbuffers_false
+#define StringLen flatbuffers_string_len
+
+int encode(flatcc_builder_t *B, void *buffer, size_t *size)
+{
+ int i, veclen = 3;
+ void *buffer_ok;
+
+ flatcc_builder_reset(B);
+
+ C(start_as_root(B));
+ C(list_start(B));
+ for (i = 0; i < veclen; ++i) {
+ /*
+ * By using push_start instead of push_create we can construct
+ * the sibling field (of Bar type) in-place on the stack,
+ * otherwise we would need to create a temporary Bar struct.
+ */
+ C(list_push_start(B));
+ FooBar(sibling_create(B,
+ 0xABADCAFEABADCAFE + i, 10000 + i, '@' + i, 1000000 + i,
+ 123456 + i, 3.14159f + i, 10000 + i));
+ FooBar(name_create_str(B, "Hello, World!"));
+ FooBar(rating_add(B, 3.1415432432445543543 + i));
+ FooBar(postfix_add(B, '!' + i));
+ C(list_push_end(B));
+ }
+ C(list_end(B));
+ C(location_create_str(B, "https://www.example.com/myurl/"));
+ C(fruit_add(B, Enum(Bananas)));
+ C(initialized_add(B, True));
+ C(end_as_root(B));
+
+ /*
+ * This only works with the default emitter and only if the buffer
+ * is larger enough. Otherwise use whatever custom operation the
+ * emitter provides.
+ */
+ buffer_ok = flatcc_builder_copy_buffer(B, buffer, *size);
+ *size = flatcc_builder_get_buffer_size(B);
+ return !buffer_ok;
+}
+
+int64_t decode(flatcc_builder_t *B, void *buffer, size_t size, int64_t sum)
+{
+ unsigned int i;
+ C(table_t) foobarcontainer;
+ FooBar(vec_t) list;
+ FooBar(table_t) foobar;
+ Bar(struct_t) bar;
+ Foo(struct_t) foo;
+
+ (void)B;
+
+ foobarcontainer = C(as_root(buffer));
+ sum += C(initialized(foobarcontainer));
+ sum += StringLen(C(location(foobarcontainer)));
+ sum += C(fruit(foobarcontainer));
+ list = C(list(foobarcontainer));
+ for (i = 0; i < FooBar(vec_len(list)); ++i) {
+ foobar = FooBar(vec_at(list, i));
+ sum += StringLen(FooBar(name(foobar)));
+ sum += FooBar(postfix(foobar));
+ sum += (int64_t)FooBar(rating(foobar));
+ bar = FooBar(sibling(foobar));
+ sum += (int64_t)Bar(ratio(bar));
+ sum += Bar(size(bar));
+ sum += Bar(time(bar));
+ foo = Bar(parent(bar));
+ sum += Foo(count(foo));
+ sum += Foo(id(foo));
+ sum += Foo(length(foo));
+ sum += Foo(prefix(foo));
+ }
+ return sum + 2 * sum;
+}
+
+/* Copy to same folder before compilation or use include directive. */
+#include "benchmain.h"
diff --git a/test/benchmark/benchflatcc/run.sh b/test/benchmark/benchflatcc/run.sh
new file mode 100755
index 0000000..2d63dae
--- /dev/null
+++ b/test/benchmark/benchflatcc/run.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../../..
+ROOT=`pwd`
+TMP=build/tmp/test/benchmark/benchflatcc
+${ROOT}/scripts/build.sh
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+#bin/flatcc -a -o ${TMP} test/benchmark/schema/flatbench.fbs
+bin/flatcc --json-printer -a -o ${TMP} test/benchmark/schema/flatbench.fbs
+
+CC=${CC:-cc}
+cp -r test/benchmark/benchmain/* ${TMP}
+cp -r test/benchmark/benchflatcc/* ${TMP}
+cd ${TMP}
+$CC -g -std=c11 -I ${ROOT}/include benchflatcc.c \
+ ${ROOT}/lib/libflatccrt_d.a -o benchflatcc_d
+$CC -O3 -DNDEBUG -std=c11 -I ${ROOT}/include benchflatcc.c \
+ ${ROOT}/lib/libflatccrt.a -o benchflatcc
+echo "running flatbench flatcc for C (debug)"
+./benchflatcc_d
+echo "running flatbench flatcc for C (optimized)"
+./benchflatcc
diff --git a/test/benchmark/benchflatccjson/benchflatccjson.c b/test/benchmark/benchflatccjson/benchflatccjson.c
new file mode 100644
index 0000000..26ee291
--- /dev/null
+++ b/test/benchmark/benchflatccjson/benchflatccjson.c
@@ -0,0 +1,182 @@
+#define BENCH_TITLE "flatcc json parser and printer for C"
+
+/*
+ * NOTE:
+ *
+ * Using dtoa_grisu3.c over sprintf("%.17g") more than doubles the
+ * encoding performance of this benchmark from 3.3 us/op to 1.3 us/op.
+ */
+
+#include <stdlib.h>
+
+/*
+ * Builder is only needed so we can create the initial buffer to encode
+ * json from, but it also includes the reader which is needed calculate
+ * the decoded checksum after parsing.
+ */
+#include "flatbench_builder.h"
+
+#include "flatbench_json_parser.h"
+#include "flatbench_json_printer.h"
+
+#define C(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_FooBarContainer, x)
+#define FooBar(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_FooBar, x)
+#define Bar(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Bar, x)
+#define Foo(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Foo, x)
+#define Enum(x) FLATBUFFERS_WRAP_NAMESPACE(benchfb_Enum, x)
+#define True flatbuffers_true
+#define False flatbuffers_false
+#define StringLen flatbuffers_string_len
+
+typedef struct flatcc_jsonbench {
+ flatcc_builder_t builder;
+ flatcc_json_parser_t parser;
+ flatcc_json_printer_t printer;
+
+ /* Holds the source data to print (encode) from. */
+ char bin[1000];
+ size_t bin_size;
+ /* Extra buffer for extracting the parse (decoded) into. */
+ char decode_buffer[1000];
+ /*
+ * The target encode / source decode buffer is provided by the
+ * benchmark framework.
+ */
+} flatcc_jsonbench_t;
+
+int flatcc_jsonbench_init(flatcc_jsonbench_t *bench)
+{
+ int i, veclen = 3;
+ void *buffer_ok;
+ flatcc_builder_t *B = &bench->builder;
+
+ flatcc_builder_init(B);
+
+ /* Generate the data needed to print from, just once. */
+ C(start_as_root(B));
+ C(list_start(B));
+ for (i = 0; i < veclen; ++i) {
+ /*
+ * By using push_start instead of push_create we can construct
+ * the sibling field (of Bar type) in-place on the stack,
+ * otherwise we would need to create a temporary Bar struct.
+ */
+ C(list_push_start(B));
+ FooBar(sibling_create(B,
+ 0xABADCAFEABADCAFE + i, 10000 + i, '@' + i, 1000000 + i,
+ 123456 + i, 3.14159f + i, 10000 + i));
+ FooBar(name_create_str(B, "Hello, World!"));
+ FooBar(rating_add(B, 3.1415432432445543543 + i));
+ FooBar(postfix_add(B, '!' + i));
+ C(list_push_end(B));
+ }
+ C(list_end(B));
+ C(location_create_str(B, "https://www.example.com/myurl/"));
+ C(fruit_add(B, Enum(Bananas)));
+ C(initialized_add(B, True));
+ C(end_as_root(B));
+
+ buffer_ok = flatcc_builder_copy_buffer(B, bench->bin, sizeof(bench->bin));
+ bench->bin_size = flatcc_builder_get_buffer_size(B);
+
+ flatcc_builder_reset(&bench->builder);
+ return !buffer_ok;
+}
+
+void flatcc_jsonbench_clear(flatcc_jsonbench_t *bench)
+{
+ flatcc_json_printer_clear(&bench->printer);
+ flatcc_builder_clear(&bench->builder);
+ // parser does not need to be cleared.
+}
+
+/*
+ * For a buffer large enough to hold encoded representation.
+ *
+ * 1000 is enough for compact json, but for pretty printing we must up.
+ */
+#define BENCHMARK_BUFSIZ 10000
+
+/* Interface to main benchmark logic. */
+#define DECLARE_BENCHMARK(BM) \
+ flatcc_jsonbench_t flatcc_jsonbench, *BM; \
+ BM = &flatcc_jsonbench; \
+ flatcc_jsonbench_init(BM);
+
+#define CLEAR_BENCHMARK(BM) flatcc_jsonbench_clear(BM);
+
+int encode(flatcc_jsonbench_t *bench, void *buffer, size_t *size)
+{
+ int ret;
+
+ flatcc_json_printer_init_buffer(&bench->printer, buffer, *size);
+ /*
+ * Normally avoid setting indentation - this yields compact
+ * spaceless json which is what you want in resource critical
+ * parsing and printing. But - it doesn't get that much slower,
+ * so interesting to benchmark. Improve by enabling SSE4_2, but
+ * generally not worth the trouble.
+ */
+ //flatcc_json_printer_set_indent(&bench->printer, 8);
+
+ /*
+ * Unquoted makes it slightly slower, noenum hardly makes a
+ * difference - for this particular data set.
+ */
+ // flatcc_json_printer_set_noenum(&bench->printer, 1);
+ // flatcc_json_printer_set_unquoted(&bench->printer, 1);
+ ret = flatbench_print_json(&bench->printer, bench->bin, bench->bin_size);
+ *size = flatcc_json_printer_flush(&bench->printer);
+
+ return ret < 0 ? ret : 0;
+}
+
+int64_t decode(flatcc_jsonbench_t *bench, void *buffer, size_t size, int64_t sum)
+{
+ unsigned int i;
+ int ret;
+ flatcc_builder_t *B = &bench->builder;
+
+ C(table_t) foobarcontainer;
+ FooBar(vec_t) list;
+ FooBar(table_t) foobar;
+ Bar(struct_t) bar;
+ Foo(struct_t) foo;
+
+ flatcc_builder_reset(B);
+ ret = flatbench_parse_json(B, &bench->parser, buffer, size, 0);
+ if (ret) {
+ return 0;
+ }
+ if (!flatcc_builder_copy_buffer(B,
+ bench->decode_buffer, sizeof(bench->decode_buffer))) {
+ return 0;
+ }
+
+ /* Traverse parsed result to calculate checksum. */
+
+ foobarcontainer = C(as_root(bench->decode_buffer));
+ sum += C(initialized(foobarcontainer));
+ sum += StringLen(C(location(foobarcontainer)));
+ sum += C(fruit(foobarcontainer));
+ list = C(list(foobarcontainer));
+ for (i = 0; i < FooBar(vec_len(list)); ++i) {
+ foobar = FooBar(vec_at(list, i));
+ sum += StringLen(FooBar(name(foobar)));
+ sum += FooBar(postfix(foobar));
+ sum += (int64_t)FooBar(rating(foobar));
+ bar = FooBar(sibling(foobar));
+ sum += (int64_t)Bar(ratio(bar));
+ sum += Bar(size(bar));
+ sum += Bar(time(bar));
+ foo = Bar(parent(bar));
+ sum += Foo(count(foo));
+ sum += Foo(id(foo));
+ sum += Foo(length(foo));
+ sum += Foo(prefix(foo));
+ }
+ return sum + 2 * sum;
+}
+
+/* Copy to same folder before compilation or use include directive. */
+#include "benchmain.h"
diff --git a/test/benchmark/benchflatccjson/run.sh b/test/benchmark/benchflatccjson/run.sh
new file mode 100755
index 0000000..c24e02e
--- /dev/null
+++ b/test/benchmark/benchflatccjson/run.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../../..
+ROOT=`pwd`
+TMP=build/tmp/test/benchmark/benchflatccjson
+${ROOT}/scripts/build.sh
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+bin/flatcc --json -crw -o ${TMP} test/benchmark/schema/flatbench.fbs
+
+CC=${CC:-cc}
+cp -r test/benchmark/benchmain/* ${TMP}
+cp -r test/benchmark/benchflatccjson/* ${TMP}
+cd ${TMP}
+$CC -g -std=c11 -I ${ROOT}/include benchflatccjson.c \
+ ${ROOT}/lib/libflatccrt_d.a -o benchflatccjson_d
+$CC -O3 -DNDEBUG -std=c11 -I ${ROOT}/include benchflatccjson.c \
+ ${ROOT}/lib/libflatccrt.a -o benchflatccjson
+echo "running flatbench flatcc json parse and print for C (debug)"
+./benchflatccjson_d
+echo "running flatbench flatcc json parse and print for C (optimized)"
+./benchflatccjson
diff --git a/test/benchmark/benchmain/benchmain.h b/test/benchmark/benchmain/benchmain.h
new file mode 100644
index 0000000..f29c548
--- /dev/null
+++ b/test/benchmark/benchmain/benchmain.h
@@ -0,0 +1,66 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "flatcc/support/elapsed.h"
+
+#ifdef NDEBUG
+#define COMPILE_TYPE "(optimized)"
+#else
+#define COMPILE_TYPE "(debug)"
+#endif
+
+int main(int argc, char *argv[])
+{
+ /*
+ * The size must be large enough to hold different representations,
+ * including printed json, but we know the printed json is close to
+ * 700 bytes.
+ */
+ const int bufsize = BENCHMARK_BUFSIZ, rep = 1000000;
+ void *buf;
+ size_t size, old_size;
+ double t1, t2, t3;
+ /* Use volatie to prevent over optimization. */
+ volatile int64_t total = 0;
+ int i, ret = 0;
+ DECLARE_BENCHMARK(BM);
+
+ buf = malloc(bufsize);
+
+ /* Warmup to preallocate internal buffers. */
+ size = bufsize;
+ old_size = size;
+ encode(BM, buf, &size);
+ t1 = elapsed_realtime();
+ for (i = 0; i < rep; ++i) {
+ size = bufsize;
+ ret |= encode(BM, buf, &size);
+ assert(ret == 0);
+ if (i > 0 && size != old_size) {
+ printf("abort on inconsistent encoding size\n");
+ goto done;
+ }
+ old_size = size;
+ }
+ t2 = elapsed_realtime();
+ for (i = 0; i < rep; ++i) {
+ total = decode(BM, buf, size, total);
+ }
+ t3 = elapsed_realtime();
+ if (total != -8725036910085654784LL) {
+ printf("ABORT ON CHECKSUM FAILURE\n");
+ goto done;
+ }
+ printf("----\n");
+ show_benchmark(BENCH_TITLE " encode " COMPILE_TYPE, t1, t2, size, rep, "1M");
+ printf("\n");
+ show_benchmark(BENCH_TITLE " decode/traverse " COMPILE_TYPE, t2, t3, size, rep, "1M");
+ printf("----\n");
+ ret = 0;
+done:
+ if (buf) {
+ free(buf);
+ }
+ CLEAR_BENCHMARK(BM);
+ return 0;
+}
diff --git a/test/benchmark/benchout-osx.txt b/test/benchmark/benchout-osx.txt
new file mode 100644
index 0000000..ab0ec63
--- /dev/null
+++ b/test/benchmark/benchout-osx.txt
@@ -0,0 +1,169 @@
+running all benchmarks (raw, flatc C++, flatcc C)
+building and benchmarking raw strucs
+running flatbench for raw C structs (debug)
+----
+operation: flatbench for raw C structs encode (debug)
+elapsed time: 0.106 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 2956.926 (MB/s)
+throughput in ops per sec: 9477325.499
+throughput in 1M ops per sec: 9.477
+time per op: 105.515 (ns)
+
+operation: flatbench for raw C structs decode/traverse (debug)
+elapsed time: 0.074 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 4222.379 (MB/s)
+throughput in ops per sec: 13533264.765
+throughput in 1M ops per sec: 13.533
+time per op: 73.892 (ns)
+----
+running flatbench for raw C structs (optimized)
+----
+operation: flatbench for raw C structs encode (optimized)
+elapsed time: 0.052 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 5991.474 (MB/s)
+throughput in ops per sec: 19203441.257
+throughput in 1M ops per sec: 19.203
+time per op: 52.074 (ns)
+
+operation: flatbench for raw C structs decode/traverse (optimized)
+elapsed time: 0.012 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 26342.452 (MB/s)
+throughput in ops per sec: 84430935.495
+throughput in 1M ops per sec: 84.431
+time per op: 11.844 (ns)
+----
+building and benchmarking flatc generated C++
+running flatbench flatc for C++ (debug)
+----
+operation: flatc for C++ encode (debug)
+elapsed time: 5.338 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 64.444 (MB/s)
+throughput in ops per sec: 187337.801
+throughput in 1M ops per sec: 0.187
+time per op: 5.338 (us)
+
+operation: flatc for C++ decode/traverse (debug)
+elapsed time: 0.798 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 430.966 (MB/s)
+throughput in ops per sec: 1252809.425
+throughput in 1M ops per sec: 1.253
+time per op: 798.206 (ns)
+----
+running flatbench flatc for C++ (optimized)
+----
+operation: flatc for C++ encode (optimized)
+elapsed time: 0.716 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 480.630 (MB/s)
+throughput in ops per sec: 1397180.769
+throughput in 1M ops per sec: 1.397
+time per op: 715.727 (ns)
+
+operation: flatc for C++ decode/traverse (optimized)
+elapsed time: 0.029 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 12058.751 (MB/s)
+throughput in ops per sec: 35054509.763
+throughput in 1M ops per sec: 35.055
+time per op: 28.527 (ns)
+----
+building and benchmarking flatcc generated C
+[1/1] Linking C executable ../../bin/flatcc_d
+[1/1] Linking C executable ../../bin/flatcc
+running flatbench flatcc for C (debug)
+----
+operation: flatcc for C encode (debug)
+elapsed time: 1.975 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 170.157 (MB/s)
+throughput in ops per sec: 506418.346
+throughput in 1M ops per sec: 0.506
+time per op: 1.975 (us)
+
+operation: flatcc for C decode/traverse (debug)
+elapsed time: 0.566 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 593.408 (MB/s)
+throughput in ops per sec: 1766094.864
+throughput in 1M ops per sec: 1.766
+time per op: 566.221 (ns)
+----
+running flatbench flatcc for C (optimized)
+----
+operation: flatcc for C encode (optimized)
+elapsed time: 0.606 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 554.266 (MB/s)
+throughput in ops per sec: 1649601.539
+throughput in 1M ops per sec: 1.650
+time per op: 606.207 (ns)
+
+operation: flatcc for C decode/traverse (optimized)
+elapsed time: 0.029 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 11740.452 (MB/s)
+throughput in ops per sec: 34941821.867
+throughput in 1M ops per sec: 34.942
+time per op: 28.619 (ns)
+----
+building and benchmarking flatcc json generated C
+[1/1] Linking C executable ../../bin/flatcc_d
+[1/1] Linking C executable ../../bin/flatcc
+running flatbench flatcc json parse and print for C (debug)
+----
+operation: flatcc json parser and printer for C encode (debug)
+elapsed time: 4.633 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 155.855 (MB/s)
+throughput in ops per sec: 215866.116
+throughput in 1M ops per sec: 0.216
+time per op: 4.633 (us)
+
+operation: flatcc json parser and printer for C decode/traverse (debug)
+elapsed time: 6.957 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 103.781 (MB/s)
+throughput in ops per sec: 143740.882
+throughput in 1M ops per sec: 0.144
+time per op: 6.957 (us)
+----
+running flatbench flatcc json parse and print for C (optimized)
+----
+operation: flatcc json parser and printer for C encode (optimized)
+elapsed time: 1.358 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 531.528 (MB/s)
+throughput in ops per sec: 736188.912
+throughput in 1M ops per sec: 0.736
+time per op: 1.358 (us)
+
+operation: flatcc json parser and printer for C decode/traverse (optimized)
+elapsed time: 2.224 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 324.572 (MB/s)
+throughput in ops per sec: 449546.295
+throughput in 1M ops per sec: 0.450
+time per op: 2.224 (us)
+----
diff --git a/test/benchmark/benchout-ubuntu.txt b/test/benchmark/benchout-ubuntu.txt
new file mode 100644
index 0000000..b551901
--- /dev/null
+++ b/test/benchmark/benchout-ubuntu.txt
@@ -0,0 +1,169 @@
+running all benchmarks (raw, flatc C++, flatcc C)
+building and benchmarking raw strucs
+running flatbench for raw C structs (debug)
+----
+operation: flatbench for raw C structs encode (debug)
+elapsed time: 0.065 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 4781.609 (MB/s)
+throughput in ops per sec: 15325670.498
+throughput in 1M ops per sec: 15.326
+time per op: 65.250 (ns)
+
+operation: flatbench for raw C structs decode/traverse (debug)
+elapsed time: 0.063 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 4931.325 (MB/s)
+throughput in ops per sec: 15805528.774
+throughput in 1M ops per sec: 15.806
+time per op: 63.269 (ns)
+----
+running flatbench for raw C structs (optimized)
+----
+operation: flatbench for raw C structs encode (optimized)
+elapsed time: 0.030 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 10521.346 (MB/s)
+throughput in ops per sec: 33722263.438
+throughput in 1M ops per sec: 33.722
+time per op: 29.654 (ns)
+
+operation: flatbench for raw C structs decode/traverse (optimized)
+elapsed time: 0.012 (s)
+iterations: 1000000
+size: 312 (bytes)
+bandwidth: 25409.235 (MB/s)
+throughput in ops per sec: 81439856.666
+throughput in 1M ops per sec: 81.440
+time per op: 12.279 (ns)
+----
+building and benchmarking flatc generated C++
+running flatbench flatc for C++ (debug)
+----
+operation: flatc for C++ encode (debug)
+elapsed time: 5.577 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 61.679 (MB/s)
+throughput in ops per sec: 179300.638
+throughput in 1M ops per sec: 0.179
+time per op: 5.577 (us)
+
+operation: flatc for C++ decode/traverse (debug)
+elapsed time: 0.892 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 385.522 (MB/s)
+throughput in ops per sec: 1120703.084
+throughput in 1M ops per sec: 1.121
+time per op: 892.297 (ns)
+----
+running flatbench flatc for C++ (optimized)
+----
+operation: flatc for C++ encode (optimized)
+elapsed time: 0.516 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 667.104 (MB/s)
+throughput in ops per sec: 1939254.783
+throughput in 1M ops per sec: 1.939
+time per op: 515.662 (ns)
+
+operation: flatc for C++ decode/traverse (optimized)
+elapsed time: 0.030 (s)
+iterations: 1000000
+size: 344 (bytes)
+bandwidth: 11479.294 (MB/s)
+throughput in ops per sec: 33370040.378
+throughput in 1M ops per sec: 33.370
+time per op: 29.967 (ns)
+----
+building and benchmarking flatcc generated C
+[1/1] Linking C executable ../../bin/flatcc_d
+[1/1] Linking C executable ../../bin/flatcc
+running flatbench flatcc for C (debug)
+----
+operation: flatcc for C encode (debug)
+elapsed time: 1.893 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 177.461 (MB/s)
+throughput in ops per sec: 528159.065
+throughput in 1M ops per sec: 0.528
+time per op: 1.893 (us)
+
+operation: flatcc for C decode/traverse (debug)
+elapsed time: 0.643 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 522.374 (MB/s)
+throughput in ops per sec: 1554685.277
+throughput in 1M ops per sec: 1.555
+time per op: 643.217 (ns)
+----
+running flatbench flatcc for C (optimized)
+----
+operation: flatcc for C encode (optimized)
+elapsed time: 0.531 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 632.498 (MB/s)
+throughput in ops per sec: 1882434.440
+throughput in 1M ops per sec: 1.882
+time per op: 531.227 (ns)
+
+operation: flatcc for C decode/traverse (optimized)
+elapsed time: 0.028 (s)
+iterations: 1000000
+size: 336 (bytes)
+bandwidth: 12200.879 (MB/s)
+throughput in ops per sec: 36312139.148
+throughput in 1M ops per sec: 36.312
+time per op: 27.539 (ns)
+----
+building and benchmarking flatcc json generated C
+[1/1] Linking C executable ../../bin/flatcc_d
+[1/1] Linking C executable ../../bin/flatcc
+running flatbench flatcc json parse and print for C (debug)
+----
+operation: flatcc json parser and printer for C encode (debug)
+elapsed time: 3.931 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 183.674 (MB/s)
+throughput in ops per sec: 254396.609
+throughput in 1M ops per sec: 0.254
+time per op: 3.931 (us)
+
+operation: flatcc json parser and printer for C decode/traverse (debug)
+elapsed time: 6.874 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 105.031 (MB/s)
+throughput in ops per sec: 145472.171
+throughput in 1M ops per sec: 0.145
+time per op: 6.874 (us)
+----
+running flatbench flatcc json parse and print for C (optimized)
+----
+operation: flatcc json parser and printer for C encode (optimized)
+elapsed time: 1.210 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 596.609 (MB/s)
+throughput in ops per sec: 826328.137
+throughput in 1M ops per sec: 0.826
+time per op: 1.210 (us)
+
+operation: flatcc json parser and printer for C decode/traverse (optimized)
+elapsed time: 1.772 (s)
+iterations: 1000000
+size: 722 (bytes)
+bandwidth: 407.372 (MB/s)
+throughput in ops per sec: 564227.736
+throughput in 1M ops per sec: 0.564
+time per op: 1.772 (us)
+----
diff --git a/test/benchmark/benchraw/benchraw.c b/test/benchmark/benchraw/benchraw.c
new file mode 100644
index 0000000..fd6a9ea
--- /dev/null
+++ b/test/benchmark/benchraw/benchraw.c
@@ -0,0 +1,117 @@
+#define BENCH_TITLE "flatbench for raw C structs"
+
+#define BENCHMARK_BUFSIZ 1000
+#define DECLARE_BENCHMARK(BM)\
+ void *BM = 0
+#define CLEAR_BENCHMARK(BM)
+
+#include <string.h>
+#include <stdint.h>
+
+#define STRING_LEN 32
+#define VEC_LEN 3
+#define fb_bool uint8_t
+
+enum Enum { Apples, Pears, Bananas };
+
+struct Foo {
+ int64_t id;
+ short count;
+ char prefix;
+ int length;
+};
+
+struct Bar {
+ struct Foo parent;
+ int time;
+ float ratio;
+ unsigned short size;
+};
+
+struct FooBar {
+ struct Bar sibling;
+ int name_len;
+ char name[STRING_LEN];
+ double rating;
+ unsigned char postfix;
+};
+
+struct FooBarContainer {
+ struct FooBar list[VEC_LEN];
+ fb_bool initialized;
+ enum Enum fruit;
+ int location_len;
+ char location[STRING_LEN];
+};
+
+int encode(void *bench, void *buffer, size_t *size)
+{
+ int i;
+ struct FooBarContainer fbc;
+ struct FooBar *foobar;
+ struct Foo *foo;
+ struct Bar *bar;
+
+ (void)bench;
+
+ strcpy(fbc.location, "https://www.example.com/myurl/");
+ fbc.location_len = strlen(fbc.location);
+ fbc.fruit = Bananas;
+ fbc.initialized = 1;
+ for (i = 0; i < VEC_LEN; ++i) {
+ foobar = &fbc.list[i];
+ foobar->rating = 3.1415432432445543543 + i;
+ foobar->postfix = '!' + i;
+ strcpy(foobar->name, "Hello, World!");
+ foobar->name_len = strlen(foobar->name);
+ bar = &foobar->sibling;
+ bar->ratio = 3.14159f + i;
+ bar->size = 10000 + i;
+ bar->time = 123456 + i;
+ foo = &bar->parent;
+ foo->id = 0xABADCAFEABADCAFE + i;
+ foo->count = 10000 + i;
+ foo->length = 1000000 + i;
+ foo->prefix = '@' + i;
+ }
+ if (*size < sizeof(struct FooBarContainer)) {
+ return -1;
+ }
+ *size = sizeof(struct FooBarContainer);
+ memcpy(buffer, &fbc, *size);
+ return 0;
+}
+
+int64_t decode(void *bench, void *buffer, size_t size, int64_t sum)
+{
+ int i;
+ struct FooBarContainer *foobarcontainer;
+ struct FooBar *foobar;
+ struct Foo *foo;
+ struct Bar *bar;
+
+ (void)bench;
+
+ foobarcontainer = buffer;
+ sum += foobarcontainer->initialized;
+ sum += foobarcontainer->location_len;
+ sum += foobarcontainer->fruit;
+ for (i = 0; i < VEC_LEN; ++i) {
+ foobar = &foobarcontainer->list[i];
+ sum += foobar->name_len;
+ sum += foobar->postfix;
+ sum += (int64_t)foobar->rating;
+ bar = &foobar->sibling;
+ sum += (int64_t)bar->ratio;
+ sum += bar->size;
+ sum += bar->time;
+ foo = &bar->parent;
+ sum += foo->count;
+ sum += foo->id;
+ sum += foo->length;
+ sum += foo->prefix;
+ }
+ return sum + 2 * sum;
+}
+
+#include "benchmain.h"
diff --git a/test/benchmark/benchraw/run.sh b/test/benchmark/benchraw/run.sh
new file mode 100755
index 0000000..13e3333
--- /dev/null
+++ b/test/benchmark/benchraw/run.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../../..
+ROOT=`pwd`
+TMP=build/tmp/test/benchmark/benchraw
+INC=$ROOT/include
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+
+CC=${CC:-cc}
+cp -r test/benchmark/benchmain/* ${TMP}
+cp -r test/benchmark/benchraw/* ${TMP}
+
+cd ${TMP}
+$CC -g benchraw.c -o benchraw_d -I $INC
+$CC -O3 -DNDEBUG benchraw.c -o benchraw -I $INC
+echo "running flatbench for raw C structs (debug)"
+./benchraw_d
+echo "running flatbench for raw C structs (optimized)"
+./benchraw
diff --git a/test/benchmark/schema/flatbench.fbs b/test/benchmark/schema/flatbench.fbs
new file mode 100644
index 0000000..34bd2df
--- /dev/null
+++ b/test/benchmark/schema/flatbench.fbs
@@ -0,0 +1,37 @@
+// trying to represent a typical mix of datatypes:
+// 1 array of 3 elements, each element: 1 string, 3 nested objects, 9 scalars
+// root element has the array, additional string and an enum
+
+namespace benchfb;
+
+enum Enum : short { Apples, Pears, Bananas }
+
+struct Foo {
+ id:ulong;
+ count:short;
+ prefix:byte;
+ length:uint;
+}
+
+struct Bar {
+ parent:Foo;
+ time:int;
+ ratio:float;
+ size:ushort;
+}
+
+table FooBar {
+ sibling:Bar;
+ name:string;
+ rating:double;
+ postfix:ubyte;
+}
+
+table FooBarContainer {
+ list:[FooBar]; // 3 copies of the above
+ initialized:bool;
+ fruit:Enum;
+ location:string;
+}
+
+root_type FooBarContainer;
diff --git a/test/cgen_test/CMakeLists.txt b/test/cgen_test/CMakeLists.txt
new file mode 100644
index 0000000..2edc040
--- /dev/null
+++ b/test/cgen_test/CMakeLists.txt
@@ -0,0 +1,43 @@
+include(CTest)
+
+include_directories (
+ "${PROJECT_SOURCE_DIR}/include"
+)
+
+add_executable(cgen_test
+ cgen_test.c
+)
+
+target_link_libraries(cgen_test
+ flatcc
+)
+
+add_test(cgen_test cgen_test${CMAKE_EXECUTABLE_SUFFIX})
+
+
+# Compilation of the generated code tests many import edge cases
+# in the parser and code generator but due to CMake limitations,
+# custom target dependencies only work for Make build targets.
+#
+# expansion of flags results in quotes the compiler won't eat,
+# separating arguments should fix this, but not sure how portable it is.
+# see also http://stackoverflow.com/questions/9870162/avoid-quoting-in-cmake-add-custom-command
+separate_arguments(CUSTOM_C_FLAGS UNIX_COMMAND "${CMAKE_C_FLAGS}")
+
+add_custom_target(test_generated
+ COMMAND ./cgen_test${CMAKE_EXECUTABLE_SUFFIX} > test_generated${CMAKE_EXECUTABLE_SUFFIX}.c
+ COMMAND ${CMAKE_C_COMPILER} ${CUSTOM_C_FLAGS} test_generated${CMAKE_EXECUTABLE_SUFFIX}.c -c
+ -I${CMAKE_SOURCE_DIR}/include WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/test/cgen_test
+ )
+add_dependencies(test_generated cgen_test)
+
+# Might be related to:
+# https://cmake.org/Bug/view.php?id=14963#c37230
+# https://github.com/ninja-build/ninja/issues/760
+if(${CMAKE_MAKE_PROGRAM} MATCHES make)
+# this is now also broken for make - the system include path is not
+# visible so build fails on <assert.h> not found in the custom build
+# stage where CMAKE_C_COMPILER uses a compiler call that has this
+# behavior
+#add_test(test_generated ${CMAKE_MAKE_PROGRAM} test_generated)
+endif(${CMAKE_MAKE_PROGRAM} MATCHES make)
diff --git a/test/cgen_test/cgen_test.c b/test/cgen_test/cgen_test.c
new file mode 100644
index 0000000..6d58ed1
--- /dev/null
+++ b/test/cgen_test/cgen_test.c
@@ -0,0 +1,163 @@
+/*
+ * Parse and verify schema complex nonsense schema, then generate common
+ * and schema specific files for reader and builder, all outfileenated to
+ * stdout, followed by the schema source in a comment.
+ *
+ * Notes:
+ *
+ * Later type declarations are visible in the same file regardles of
+ * namespace. Earlier type declarations in included files are also
+ * visible. Included files cannot see types in later included or
+ * including files. (We do not test file inclusion here though). This
+ * behaviour is chosen both because it seems sensible and because it
+ * allows for independent file generation of each schema file.
+ *
+ * Googles flatc compiler does in some cases allow for later references
+ * e.g. Monster reference itself, but require structs to be ordered.
+ * We do not required that so structs are sorted topologically before
+ * being given to the code generator. Tables use multiple layers of
+ * forward declarations in the generated C code.
+ *
+ * Note: The google flatc compiler does support multiple namspaces
+ * but apparently it not currently generate C++ correctly. As such is
+ * not well-defined what exactly the meaning of a namespace is. We have
+ * chosen it to mean everything from the definition until the next in
+ * the same file. Anything before a namespace declaration is global.
+ * Namespace declarations only affect the local file. Namespace
+ * references allow spaces between dots when used as an operator, but
+ * not when used as a string attribute - this matches flatc behavior.
+ * When a namespace prefix is not specified, but the current scope is
+ * searched first, then the global scope.
+ *
+ * There is no way to specify the global scope once a namespace has been
+ * declared, other than starting a new file. It could be considered to
+ * allow for "namespace ;".
+ *
+ * The flatc compiler does not allow a namespace prefixes in root_type,
+ * but that is likely an oversight. We support it.
+ *
+ */
+#include <stdio.h>
+#include <string.h>
+#include "flatcc/flatcc.h"
+
+int main(void)
+{
+ const char *name = "../xyzzy.fbs";
+
+ char input[] =
+ "\t//buffers do not support include statements\n"
+ "//\tinclude \"foobar.fbs\";\n"
+ "// in flatc, only one field can have a key, but we have no issues\n"
+ "// as long as the vector is sorted accordingly. The first key gets\n"
+ "// gets a shorter find alias method.\n"
+ "// (all scalar vectors can also be searched - they have find defined)\n"
+ "/* block comments are also allowed.\n */\n"
+ "//////////////////////////////////////////////////////////\n"
+ "//////////////////////////////////////////////////////////\n"
+ "table point { x : float (key); y: float; z: float (key); }\n"
+ "namespace mystic;\n"
+ "/************ ANOTHER DOC CASE *************/\n"
+ "table island { lattitude : int; longitude : int; }\n"
+ " /// There are two different point tables\n"
+ "// we test multi line doc comments here - this line should be ignored.\n"
+ " /// - one in each name space.\n"
+ "table point { interest: agent; blank: string; geo: mystic.island; }\n"
+ "enum agent:int { lot, pirate, vessel, navy, parrot }\n"
+ "\tnamespace the;\n"
+ "//root_type point;\n"
+ "attribute \"foo\";\n"
+ "//attribute \"\"; // ensure empty strings are accepted.\n"
+ "/// shade is for CG applications\n"
+ "struct shade (force_align:2) { x: byte; y: byte; z: byte;\n"
+ "/// alpha is unsigned!\n"
+ "alpha: ubyte (key); }\n"
+ "/// the.ui is a union\n"
+ "///\n"
+ "/// We got one blank comment line above.\n"
+ "union u1 { /// Note that the.point is different from mystic.point in other namespace.\n"
+ "point\n"
+ "= /// meaningless doc comment that should be stripped\n"
+ "2, foo = 4, mystic.island = 17, } enum e1:short { z = -2, one , two , three = 3, }\n"
+ "// key on enum not supported by flatc\n"
+ "table foo { m: u1; e: e1 = z (key); x : int = mystic.agent.vessel; interest: mystic.agent = pirate; strange: mystic.agent = flags2.zulu; }\n"
+ "// Unknown attributes can be repeated with varying types since behavior is unspecified.\n"
+ "enum flags : short (bit_flags, \n"
+ "/// foos are plentiful - here it is an enum of value 42\n"
+ "foo: 42, foo, foo: \"hello\") { f1 = 1, f2 = 13, f3 }\n"
+ "enum flags2 : uint (bit_flags) { zulu, alpha, bravo, charlie, delta, echo, foxtrot }\n"
+ "/// A boolean enum - all enums must be type.\n"
+ "enum confirm : bool { no, yes }\n"
+ "// enums are not formally permitted in structs, but can be enabled.\n"
+ "// This is advanced: boolean enum binary search on struct vector ...\n"
+ "struct notify { primary_recipient: confirm (key); secondary_recipient: confirm; flags : flags; }\n"
+ "// duplicates are disallowed by default, but can be enabled\n"
+ "// enum dupes : byte { large = 2, great = 2, small = 0, other }\n"
+ "table goo { hello: string (key, required); confirmations: [confirm];\n"
+ " never_mind: double = 3.1415 (deprecated);\n"
+ " embedded_t: [ubyte] (nested_flatbuffer: \"foo\");\n"
+ " embedded_s: [ubyte] (nested_flatbuffer: \"little.whale.c2\");\n"
+ " shady: shade;\n"
+ "}\n"
+ "struct s1 (force_align:4) { index: int (key); }\n"
+ "struct c1 { a: ubyte; x1 : little.whale.c2; x2:uint; x3: short; light: shade (deprecated); }\n"
+ "/// not all constructs support doc comments - this one doesn't\n"
+ "namespace little.whale;\n"
+ "struct c2 { y : c3; }\n"
+ "//struct c3 { z : c1; }\n"
+ "struct c3 { z : the.s1; }\n"
+ "file_identifier \"fbuz\";\n"
+ "file_extension \"cgen_test\";\n"
+ "root_type little.whale.c2;\n"
+ "//root_type c2;\n"
+ "//root_type the.goo;\n"
+ "table hop { einhorn: c3 (required); jupiter: c2; names: [string] (required); ehlist: [c3]; k2: the.goo; k2vec: [the.goo]; lunar: the.flags2 = bravo; }\n"
+ "table TestOrder { x0 : byte; x1: bool = true; x2: short; x3: the.shade; x4: string; x5 : the.u1; x6 : [string]; x7: double; }\n"
+ "table TestOrder2 (original_order) { x0 : byte; x1: bool = true; x1a : bool = 1; x2: short; x3: the.shade; x4: string; x5: the.u1; x6 : [string]; x7: double; }\n"
+ "table StoreResponse {}\n"
+ "rpc_service MonsterStorage {\n"
+ " Store(Monster):StoreResponse;\n"
+ " Retrieve(MonsterId):Monster;\n"
+ " RetrieveOne(MonsterId):Monster (deprecated);\n"
+ "}\n"
+ "/* \n"
+ "*/table Monster {}\ntable MonsterId{ id: int; }\n"
+ "/* \t/ */\n"; /* '\v' would give an error. */
+
+ flatcc_options_t opts;
+ flatcc_context_t ctx = 0;
+ int ret = -1;
+
+ flatcc_init_options(&opts);
+ opts.cgen_common_reader = 1;
+ opts.cgen_reader = 1;
+ opts.cgen_common_builder = 1;
+ opts.cgen_builder = 1;
+ opts.gen_stdout = 1;
+
+ /* The basename xyzzy is derived from path. */
+ if (!(ctx = flatcc_create_context(&opts, name, 0, 0))) {
+ fprintf(stderr, "unexpected: could not initialize context\n");
+ return -1;
+ }
+ if ((ret = flatcc_parse_buffer(ctx, input, sizeof(input)))) {
+ fprintf(stderr, "sorry, parse failed\n");
+ goto done;
+ } else {
+ fprintf(stderr, "schema is valid!\n");
+ fprintf(stderr, "now generating code for C ...\n\n");
+ if (flatcc_generate_files(ctx)) {
+ fprintf(stderr, "failed to generate output for C\n");
+ goto done;
+ };
+ fprintf(stdout,
+ "\n#if 0 /* FlatBuffers Schema Source */\n"
+ "%s\n"
+ "#endif /* FlatBuffers Schema Source */\n",
+ input);
+ }
+ ret = 0;
+done:
+ flatcc_destroy_context(ctx);
+ return ret;
+}
diff --git a/test/cgen_test/cgen_test.sh b/test/cgen_test/cgen_test.sh
new file mode 100755
index 0000000..0280e9d
--- /dev/null
+++ b/test/cgen_test/cgen_test.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../..
+ROOT=`pwd`
+
+CC=${CC:-cc}
+TMP=${ROOT}/build/tmp/test/cgen_test
+INC=${ROOT}/include
+
+${ROOT}/scripts/build.sh
+
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+
+echo "generating test source for Debug" 1>&2
+${ROOT}/build/Debug/test/cgen_test/cgen_test_d > ${TMP}/test_generated_d.c
+cd ${TMP} && $CC test_generated_d.c -c -I${INC}
+
+echo "generating test source for Release" 1>&2
+${ROOT}/build/Release/test/cgen_test/cgen_test > ${TMP}/test_generated.c
+cd ${TMP} && $CC test_generated.c -c -Wall -O3 -I${INC}
diff --git a/test/debug.sh b/test/debug.sh
new file mode 100755
index 0000000..0efbcbe
--- /dev/null
+++ b/test/debug.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/..
+mkdir -p build/tmp/out
+lldb -- \
+ bin/flatcc_d -a -o build/tmp/out --prefix zzz --common-prefix hello \
+ test/monster_test/monster_test.fbs
diff --git a/test/emit_test/CMakeLists.txt b/test/emit_test/CMakeLists.txt
new file mode 100644
index 0000000..54f3b28
--- /dev/null
+++ b/test/emit_test/CMakeLists.txt
@@ -0,0 +1,20 @@
+include(CTest)
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/test/emit_test")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_emit_test ALL)
+add_custom_command (
+ TARGET gen_emit_test
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND flatcc_cli -a -o "${GEN_DIR}" "${FBS_DIR}/emit_test.fbs"
+ DEPENDS flatcc_cli "${FBS}"
+)
+add_executable(emit_test emit_test.c)
+add_dependencies(emit_test gen_emit_test)
+target_link_libraries(emit_test flatccrt)
+
+add_test(emit_test emit_test${CMAKE_EXECUTABLE_SUFFIX})
diff --git a/test/emit_test/emit_test.c b/test/emit_test/emit_test.c
new file mode 100644
index 0000000..ddb973d
--- /dev/null
+++ b/test/emit_test/emit_test.c
@@ -0,0 +1,137 @@
+#include <stdio.h>
+#include <assert.h>
+#include "emit_test_builder.h"
+#include "flatcc/support/hexdump.h"
+#include "flatcc/portable/pparsefp.h"
+
+#define test_assert(x) do { if (!(x)) { assert(0); return -1; }} while(0)
+/* Direct floating point comparisons are not always directly comparable,
+ * especially not for GCC 32-bit compilers. */
+#define test_assert_floateq(x, y) test_assert(parse_float_is_equal((x), (y)))
+#define test_assert_doubleeq(x, y) test_assert(parse_double_is_equal((x), (y)))
+
+int dbg_emitter(void *emit_context,
+ const flatcc_iovec_t *iov, int iov_count,
+ flatbuffers_soffset_t offset, size_t len)
+{
+ int i;
+
+ (void)emit_context;
+
+ printf("dbg: emit: iov_count: %d, offset: %ld, len: %ld\n",
+ (int)iov_count, (long)offset, (long)len);
+
+ for (i = 0; i < iov_count; ++i) {
+ if (iov[i].iov_base == flatcc_builder_padding_base) {
+ printf("dbg: padding at: %ld, len: %ld\n",
+ (long)offset, (long)iov[i].iov_len);
+ }
+ if (iov[i].iov_base == 0) {
+ printf("dbg: null vector reserved at: %ld, len: %ld\n",
+ (long)offset, (long)iov[i].iov_len);
+ }
+ offset += (flatbuffers_soffset_t)iov[i].iov_len;
+ }
+ return 0;
+}
+
+int debug_test(void)
+{
+ flatcc_builder_t builder, *B;
+ float x[10] = { 0 };
+
+ B = &builder;
+ printf("dbg: output is generated by a custom emitter that doesn't actually build a buffer\n");
+ flatcc_builder_custom_init(B, dbg_emitter, 0, 0, 0);
+ /* We can create a null vector because we have a custom emitter. */
+ main_create_as_root(B, 42, 1, flatbuffers_float_vec_create(B, x, 10));
+ flatcc_builder_clear(B);
+ return 0;
+}
+
+/*
+ * this assumes a very simple schema:
+ * "table { time: long; device: ubyte; samples: [float]; }"
+ */
+int emit_test(void)
+{
+ /*
+ * Note that there is some apparently unnecessary padding after 0x01
+ * which is caused by the end of the buffer content excluding
+ * vtables is forced to buffer alignment due to clustering and
+ * because alignment happens before the buffer is fully generated.
+ */
+ unsigned char expect[] =
+#if FLATBUFFERS_PROTOCOL_IS_LE
+ "\x04\x00\x00\x00\xd4\xff\xff\xff\x2a\x00\x00\x00\x00\x00\x00\x00"
+ "\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00"
+ "\x00\x00\x80\x3f\xcd\xcc\x8c\x3f\x9a\x99\x99\x3f\x66\x66\xa6\x3f"
+ "\x0a\x00\x11\x00\x04\x00\x10\x00\x0c\x00";
+#else
+
+ "\x00\x00\x00\x04\xff\xff\xff\xd4\x00\x00\x00\x00\x00\x00\x00\x2a"
+ "\x00\x00\x00\x0c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04"
+ "\x3f\x80\x00\x00\x3f\x8c\xcc\xcd\x3f\x99\x99\x9a\x3f\xa6\x66\x66"
+ "\x00\x0a\x00\x11\x00\x04\x00\x10\x00\x0c";
+#endif
+
+ size_t size;
+ uint8_t *buf;
+ flatcc_emitter_t *E;
+ flatcc_builder_t builder, *B;
+ flatbuffers_float_vec_ref_t vref;
+ float data[4] = { 1.0f, 1.1f, 1.2f, 1.3f };
+
+ main_table_t mt;
+ int64_t time;
+
+ (void)expect;
+ B = &builder;
+
+ flatcc_builder_init(B);
+
+ /* Get the default emitter. */
+ E = flatcc_builder_get_emit_context(B);
+
+ vref = flatbuffers_float_vec_create(B, data, 4);
+ //vref = 0;
+ main_create_as_root(B, 42, 1, vref);
+
+ /* We could also have used flatcc_builder API wrapper for this. */
+ buf = flatcc_emitter_get_direct_buffer(E, &size);
+ if (!buf) {
+ return -1;
+ }
+ test_assert(size == flatcc_emitter_get_buffer_size(E));
+ test_assert(size == flatcc_builder_get_buffer_size(B));
+
+ fprintf(stderr, "buffer size: %d\n", (int)size);
+ hexdump("emit_test", buf, size, stderr);
+
+ test_assert(size == 58);
+ test_assert(sizeof(expect) - 1 == size);
+ test_assert(0 == memcmp(buf, expect, size));
+
+ mt = main_as_root(buf);
+ time = main_time(mt);
+ test_assert(time == 42);
+ test_assert(main_device(mt) == 1);
+ test_assert(flatbuffers_float_vec_len(main_samples(mt)) == 4);
+ test_assert_floateq(flatbuffers_float_vec_at(main_samples(mt), 2), 1.2f);
+
+ /* We use get_direct_buffer, so we can't clear the builder until last. */
+ flatcc_builder_clear(B);
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int ret = 0;
+
+ (void)argc;
+ (void)argv;
+
+ ret |= debug_test();
+ ret |= emit_test();
+ return ret;
+}
diff --git a/test/emit_test/emit_test.fbs b/test/emit_test/emit_test.fbs
new file mode 100644
index 0000000..973c111
--- /dev/null
+++ b/test/emit_test/emit_test.fbs
@@ -0,0 +1,6 @@
+
+table main {
+ time: long;
+ device: ubyte;
+ samples: [float];
+}
diff --git a/test/emit_test/emit_test.sh b/test/emit_test/emit_test.sh
new file mode 100755
index 0000000..addecfc
--- /dev/null
+++ b/test/emit_test/emit_test.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../..
+ROOT=`pwd`
+TMP=build/tmp/test
+${ROOT}/scripts/build.sh
+
+mkdir -p ${TMP}/emit_test
+rm -rf ${TMP}/emit_test/*
+bin/flatcc -a -o ${TMP}/emit_test test/emit_test/emit_test.fbs \
+ test/monster_test/monster_test.fbs
+cp test/emit_test/*.c ${TMP}/emit_test
+cd ${TMP}/emit_test
+cc -g -I ${ROOT}/include emit_test.c \
+ ${ROOT}/lib/libflatccrt.a -o emit_test_d
+echo "running emit test"
+./emit_test_d
+
diff --git a/test/flatc_compat/.gitattributes b/test/flatc_compat/.gitattributes
new file mode 100644
index 0000000..cf2b141
--- /dev/null
+++ b/test/flatc_compat/.gitattributes
@@ -0,0 +1,2 @@
+# We do a binary comparison test, so we need it to be unchanged on Windows.
+monsterdata_test.golden -text
diff --git a/test/flatc_compat/CMakeLists.txt b/test/flatc_compat/CMakeLists.txt
new file mode 100644
index 0000000..a472979
--- /dev/null
+++ b/test/flatc_compat/CMakeLists.txt
@@ -0,0 +1,21 @@
+include(CTest)
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/test/monster_test")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_flatc_compat ALL)
+add_custom_command (
+ TARGET gen_flatc_compat
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/monsterdata_test.mon" ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND flatcc_cli -a -o "${GEN_DIR}" "${FBS_DIR}/monster_test.fbs"
+ DEPENDS flatcc_cli "${FBS_DIR}/monster_test.fbs" "${FBS_DIR}/include_test1.fbs" "${FBS_DIR}/include_test2.fbs"
+)
+add_executable(flatc_compat flatc_compat.c)
+add_dependencies(flatc_compat gen_flatc_compat)
+target_link_libraries(flatc_compat flatccrt)
+
+add_test(flatc_compat flatc_compat${CMAKE_EXECUTABLE_SUFFIX})
diff --git a/test/flatc_compat/README.md b/test/flatc_compat/README.md
new file mode 100644
index 0000000..ffc0bf0
--- /dev/null
+++ b/test/flatc_compat/README.md
@@ -0,0 +1,10 @@
+Basic sanity check to verify that a `flatcc` generated reader can read
+binaries generated by Googles `flatc` compiler.
+
+`monsterdata_test.mon` is generated by Googles flatc compiler using the
+`monsterdata_test.golden` json as input and `monster_test.fbs` as schema.
+
+`monsterdata_test.json` was also copied for completeness - it apparently
+relates to undocumented hash attributed that converts json strings into
+hashes where the golden file has the correct target type with respect to
+the schema..
diff --git a/test/flatc_compat/flatc_compat.c b/test/flatc_compat/flatc_compat.c
new file mode 100644
index 0000000..24aae19
--- /dev/null
+++ b/test/flatc_compat/flatc_compat.c
@@ -0,0 +1,226 @@
+#include <stdlib.h>
+#include <assert.h>
+
+#include "monster_test_reader.h"
+#include "monster_test_verifier.h"
+#include "flatcc/support/readfile.h"
+#include "flatcc/support/hexdump.h"
+
+#define align_up(alignment, size) \
+ (((size_t)(size) + (size_t)(alignment) - 1) & ~((size_t)(alignment) - 1))
+
+const char *filename = "monsterdata_test.mon";
+
+#undef ns
+#define ns(x) MyGame_Example_ ## x
+
+int verify_monster(void *buffer)
+{
+ ns(Monster_table_t) monster, mini;
+ ns(Vec3_struct_t) vec;
+ ns(Test_struct_t) test;
+ ns(Test_vec_t) testvec;
+ ns(Any_union_type_t) mini_type;
+ flatbuffers_string_t name;
+ size_t offset;
+ flatbuffers_uint8_vec_t inv;
+ flatbuffers_string_vec_t aofs;
+ flatbuffers_string_t s;
+ size_t i;
+
+ if (!(monster = ns(Monster_as_root(buffer)))) {
+ printf("Monster not available\n");
+ return -1;
+ }
+ if (ns(Monster_hp(monster)) != 80) {
+ printf("Health points are not as expected\n");
+ return -1;
+ }
+ if (!(vec = ns(Monster_pos(monster)))) {
+ printf("Position is absent\n");
+ return -1;
+ }
+ offset = (size_t)((char *)vec - (char *)buffer);
+ if (offset & 15) {
+ printf("Force align of Vec3 struct not correct\n");
+ return -1;
+ }
+ if (ns(Vec3_x(vec)) != 1) {
+ printf("Position failing on x coordinate\n");
+ return -1;
+ }
+ if (ns(Vec3_y(vec)) != 2) {
+ printf("Position failing on y coordinate\n");
+ return -1;
+ }
+ if (ns(Vec3_z(vec)) != 3) {
+ printf("Position failing on z coordinate\n");
+ return -1;
+ }
+ if (ns(Vec3_test1(vec)) != 3) {
+ printf("Vec3 test1 is not 3\n");
+ return -1;
+ }
+ if (ns(Vec3_test2(vec)) != ns(Color_Green)) {
+ printf("Vec3 test2 not Green\n");
+ return -1;
+ }
+ test = ns(Vec3_test3(vec));
+ if (ns(Test_a(test)) != 5 || ns(Test_b(test) != 6)) {
+ printf("test3 not valid in Vec3\n");
+ return -1;
+ }
+ name = ns(Monster_name(monster));
+ if (flatbuffers_string_len(name) != 9) {
+ printf("Name length is not correct\n");
+ return -1;
+ }
+ if (!name || strcmp(name, "MyMonster")) {
+ printf("Name is not correct\n");
+ return -1;
+ }
+ inv = ns(Monster_inventory(monster));
+ if (flatbuffers_uint8_vec_len(inv) != 5) {
+ printf("Inventory has wrong length\n");
+ return -1;
+ }
+ for (i = 0; i < 5; ++i) {
+ if (flatbuffers_uint8_vec_at(inv, i) != i) {
+ printf("Inventory item #%d is wrong\n", (int)i);
+ return -1;
+ }
+ }
+ if (!(aofs = ns(Monster_testarrayofstring(monster)))) {
+ printf("Array of string not present\n");
+ return -1;
+ }
+ if (flatbuffers_string_vec_len(aofs) != 2) {
+ printf("Array of string has wrong vector length\n");
+ return -1;
+ }
+ s = flatbuffers_string_vec_at(aofs, 0);
+ if (strcmp(s, "test1")) {
+ printf("First string array element is wrong\n");
+ return -1;
+ }
+ s = flatbuffers_string_vec_at(aofs, 1);
+ if (strcmp(s, "test2")) {
+ printf("Second string array element is wrong\n");
+ return -1;
+ }
+ mini_type = ns(Monster_test_type(monster));
+ if (mini_type != ns(Any_Monster)) {
+ printf("Not any monster\n");
+ return -1;
+ }
+ mini = ns(Monster_test(monster));
+ if (!mini) {
+ printf("test monster not there\n");
+ return -1;
+ }
+ if (strcmp(ns(Monster_name(mini)), "Fred")) {
+ printf("test monster isn't Fred\n");
+ return -1;
+ }
+ testvec = ns(Monster_test4(monster));
+ if (ns(Test_vec_len(testvec)) != 2) {
+ printf("Test struct vector has wrong length\n");
+ return -1;
+ }
+ test = ns(Test_vec_at(testvec, 0));
+ if (ns(Test_a(test) != 10)) {
+ printf("Testvec[0].a is wrong\n");
+ return -1;
+ }
+ if (ns(Test_b(test) != 20)) {
+ printf("Testvec[0].b is wrong\n");
+ return -1;
+ }
+ test = ns(Test_vec_at(testvec, 1));
+ if (ns(Test_a(test) != 30)) {
+ printf("Testvec[1].a is wrong\n");
+ return -1;
+ }
+ if (ns(Test_b(test) != 40)) {
+ printf("Testvec[1].b is wrong\n");
+ return -1;
+ }
+ assert(ns(Monster_testhashs32_fnv1(monster)) == -579221183L);
+ assert(ns(Monster_testhashu32_fnv1(monster)) == 3715746113L);
+ assert(ns(Monster_testhashs64_fnv1(monster)) == 7930699090847568257LL);
+ assert(ns(Monster_testhashu64_fnv1(monster)) == 7930699090847568257LL);
+ assert(ns(Monster_testhashs32_fnv1a(monster)) == -1904106383L);
+ assert(ns(Monster_testhashu32_fnv1a(monster)) == 2390860913L);
+ assert(ns(Monster_testhashs64_fnv1a(monster)) == 4898026182817603057LL);
+ assert(ns(Monster_testhashu64_fnv1a(monster)) == 4898026182817603057LL);
+ return 0;
+}
+
+
+/* We take arguments so test can run without copying sources. */
+#define usage \
+"wrong number of arguments:\n" \
+"usage: <program> [<input-filename>]\n"
+
+int main(int argc, char *argv[])
+{
+ int ret;
+ size_t size;
+ void *buffer, *raw_buffer;
+
+ if (argc != 1 && argc != 2) {
+ fprintf(stderr, usage);
+ exit(1);
+ }
+ if (argc == 2) {
+ filename = argv[1];
+ }
+
+ raw_buffer = readfile(filename, 1024, &size);
+ buffer = aligned_alloc(256, align_up(256, size));
+ memcpy(buffer, raw_buffer, size);
+ free(raw_buffer);
+
+ if (!buffer) {
+ fprintf(stderr, "could not read binary test file: %s\n", filename);
+ return -1;
+ }
+ hexdump("monsterdata_test.mon", buffer, size, stderr);
+ /*
+ * Not automated, but verifying size - 3 fails as expected because the last
+ * object in the file is a string, and the zero termination fails.
+ * size - 1 and size - 2 verifies because the buffers contains
+ * padding. Note that `flatcc` does not pad at the end beyond whatever
+ * is stored (normally a vtable), but this is generated with `flatc
+ * v1.1`.
+ */
+ if (flatcc_verify_ok != ns(Monster_verify_as_root_with_identifier(buffer, size, "MONS"))) {
+#if FLATBUFFERS_PROTOCOL_IS_BE
+ fprintf(stderr, "flatc golden reference buffer was correctly rejected by flatcc verificiation\n"
+ "because flatc is little endian and flatcc has been compiled for big endian protocol format\n");
+ ret = 0;
+ goto done;
+#else
+ fprintf(stderr, "could not verify foreign monster file\n");
+ ret = -1;
+ goto done;
+#endif
+ }
+
+#if FLATBUFFERS_PROTOCOL_IS_BE
+ fprintf(stderr, "flatcc compiled with big endian protocol failed to reject reference little endian buffer\n");
+ ret = -1;
+ goto done;
+#else
+ if (flatcc_verify_ok != ns(Monster_verify_as_root(buffer, size))) {
+ fprintf(stderr, "could not verify foreign monster file with default identifier\n");
+ ret = -1;
+ goto done;
+ }
+ ret = verify_monster(buffer);
+#endif
+
+done:
+ aligned_free(buffer);
+ return ret;
+}
diff --git a/test/flatc_compat/flatc_compat.sh b/test/flatc_compat/flatc_compat.sh
new file mode 100755
index 0000000..1b2dbb9
--- /dev/null
+++ b/test/flatc_compat/flatc_compat.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../..
+ROOT=`pwd`
+TMP=build/tmp/test
+
+${ROOT}/scripts/build.sh
+
+mkdir -p ${TMP}/flatc_compat
+rm -rf ${TMP}/flatc_compat/*
+bin/flatcc -a -o ${TMP}/flatc_compat test/monster_test/monster_test.fbs
+
+cp test/flatc_compat/*.{json,mon,c} ${TMP}/flatc_compat/
+cd ${TMP}/flatc_compat
+cc -g -I ${ROOT}/include flatc_compat.c \
+ ${ROOT}/lib/libflatccrt.a -o flatc_compat_d
+echo "Google FPL flatc compatibility test - reading flatc generated binary"
+./flatc_compat_d
+
diff --git a/test/flatc_compat/monsterdata_test.golden b/test/flatc_compat/monsterdata_test.golden
new file mode 100644
index 0000000..73afc42
--- /dev/null
+++ b/test/flatc_compat/monsterdata_test.golden
@@ -0,0 +1,48 @@
+{
+ pos: {
+ x: 1,
+ y: 2,
+ z: 3,
+ test1: 3,
+ test2: Green,
+ test3: {
+ a: 5,
+ b: 6
+ }
+ },
+ hp: 80,
+ name: "MyMonster",
+ inventory: [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4
+ ],
+ test_type: Monster,
+ test: {
+ name: "Fred"
+ },
+ test4: [
+ {
+ a: 10,
+ b: 20
+ },
+ {
+ a: 30,
+ b: 40
+ }
+ ],
+ testarrayofstring: [
+ "test1",
+ "test2"
+ ],
+ testhashs32_fnv1: -579221183,
+ testhashu32_fnv1: 3715746113,
+ testhashs64_fnv1: 7930699090847568257,
+ testhashu64_fnv1: 7930699090847568257,
+ testhashs32_fnv1a: -1904106383,
+ testhashu32_fnv1a: 2390860913,
+ testhashs64_fnv1a: 4898026182817603057,
+ testhashu64_fnv1a: 4898026182817603057
+}
diff --git a/test/flatc_compat/monsterdata_test.json b/test/flatc_compat/monsterdata_test.json
new file mode 100755
index 0000000..a718efa
--- /dev/null
+++ b/test/flatc_compat/monsterdata_test.json
@@ -0,0 +1,51 @@
+{
+ pos: {
+ x: 1,
+ y: 2,
+ z: 3,
+ test1: 3,
+ test2: Green,
+ test3: {
+ a: 5,
+ b: 6
+ }
+ },
+ hp: 80,
+ name: "MyMonster",
+ inventory: [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4
+ ],
+ test_type: Monster,
+ test: {
+ name: "Fred"
+ },
+ test4: [
+ {
+ a: 10,
+ b: 20
+ },
+ {
+ a: 30,
+ b: 40
+ }
+ ],
+ testarrayofstring: [
+ "test1",
+ "test2"
+ ],
+ testarrayofbools:[
+ true, false, true
+ ],
+ testhashs32_fnv1: "This string is being hashed!",
+ testhashu32_fnv1: "This string is being hashed!",
+ testhashs64_fnv1: "This string is being hashed!",
+ testhashu64_fnv1: "This string is being hashed!",
+ testhashs32_fnv1a: "This string is being hashed!",
+ testhashu32_fnv1a: "This string is being hashed!",
+ testhashs64_fnv1a: "This string is being hashed!",
+ testhashu64_fnv1a: "This string is being hashed!",
+}
diff --git a/test/flatc_compat/monsterdata_test.mon b/test/flatc_compat/monsterdata_test.mon
new file mode 100644
index 0000000..08646d4
--- /dev/null
+++ b/test/flatc_compat/monsterdata_test.mon
Binary files differ
diff --git a/test/json_test/CMakeLists.txt b/test/json_test/CMakeLists.txt
new file mode 100644
index 0000000..332905d
--- /dev/null
+++ b/test/json_test/CMakeLists.txt
@@ -0,0 +1,64 @@
+include(CTest)
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/test/monster_test")
+
+set(DATA_DST "${CMAKE_CURRENT_BINARY_DIR}")
+set(DATA_SRC "${PROJECT_SOURCE_DIR}/test/flatc_compat")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_monster_test_json ALL)
+add_custom_command (
+ TARGET gen_monster_test_json
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E copy "${DATA_SRC}/monsterdata_test.golden" "${DATA_DST}"
+ COMMAND ${CMAKE_COMMAND} -E copy "${DATA_SRC}/monsterdata_test.mon" "${DATA_DST}"
+ COMMAND flatcc_cli -av --json -o "${GEN_DIR}" "${FBS_DIR}/monster_test.fbs"
+ DEPENDS flatcc_cli "${FBS_DIR}/monster_test.fbs" "${FBS_DIR}/include_test1.fbs" "${FBS_DIR}/include_test2.fbs"
+)
+
+add_executable(test_basic_parse test_basic_parse.c)
+add_executable(test_json_parser test_json_parser.c)
+add_executable(test_json_printer test_json_printer.c)
+add_executable(test_json test_json.c)
+
+add_dependencies(test_basic_parse gen_monster_test_json)
+add_dependencies(test_json_parser gen_monster_test_json)
+add_dependencies(test_json_printer gen_monster_test_json)
+add_dependencies(test_json gen_monster_test_json)
+
+target_link_libraries(test_basic_parse flatccrt)
+target_link_libraries(test_json_parser flatccrt)
+target_link_libraries(test_json_printer flatccrt)
+target_link_libraries(test_json flatccrt)
+
+add_test(test_basic_parse test_basic_parse${CMAKE_EXECUTABLE_SUFFIX})
+add_test(test_json_parser test_json_parser${CMAKE_EXECUTABLE_SUFFIX})
+add_test(test_json_printer test_json_printer${CMAKE_EXECUTABLE_SUFFIX})
+add_test(test_json test_json${CMAKE_EXECUTABLE_SUFFIX})
+
+# Compile without default library in order to test various runtime flags
+set(RTPATH "${PROJECT_SOURCE_DIR}/src/runtime")
+set(RTSRC
+ "${RTPATH}/builder.c"
+ "${RTPATH}/emitter.c"
+ "${RTPATH}/refmap.c"
+ "${RTPATH}/verifier.c"
+ "${RTPATH}/json_parser.c"
+ "${RTPATH}/json_printer.c"
+)
+
+macro(jstest trg flags)
+ add_executable(${trg} test_json.c ${RTSRC})
+ add_dependencies(${trg} gen_monster_test_json)
+ add_test(${trg} ${trg}${CMAKE_EXECUTABLE_SUFFIX})
+ set_target_properties(${trg} PROPERTIES COMPILE_FLAGS ${flags})
+endmacro()
+
+jstest(json_test_uql "-DFLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST=1")
+jstest(json_test_uql_off "-DFLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST=0")
+jstest(json_test_uq "-DFLATCC_JSON_PARSE_ALLOW_UNQUOTED=1")
+jstest(json_test_uq_off "-DFLATCC_JSON_PARSE_ALLOW_UNQUOTED=0")
+jstest(json_test "-DFLATCC_JSON_PARSE_WIDE_SPACE=1")
diff --git a/test/json_test/flatcc_golden.c b/test/json_test/flatcc_golden.c
new file mode 100644
index 0000000..a22e3d3
--- /dev/null
+++ b/test/json_test/flatcc_golden.c
@@ -0,0 +1,45 @@
+/*
+ * Flatcc generated monster test binary based on parsing Google flatc's
+ * golden json file.
+ */
+static const unsigned char flatcc_golden_le[] = {
+ 0x0c, 0x00, 0x00, 0x00, 0x4d, 0x4f, 0x4e, 0x53, 0x00, 0x00, 0x00, 0x00, 0x20, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x40, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, 0x02, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x41, 0xc9, 0x79, 0xdd,
+ 0x41, 0xc9, 0x79, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x81, 0x91, 0x7b, 0xf2, 0xcd, 0x80, 0x0f, 0x6e,
+ 0x81, 0x91, 0x7b, 0xf2, 0xcd, 0x80, 0x0f, 0x6e, 0x71, 0xa4, 0x81, 0x8e, 0x71, 0xa4, 0x81, 0x8e,
+ 0xf1, 0xdd, 0x67, 0xc7, 0xdc, 0x48, 0xf9, 0x43, 0xf1, 0xdd, 0x67, 0xc7, 0xdc, 0x48, 0xf9, 0x43,
+ 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x74, 0x65, 0x73, 0x74, 0x32, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x74, 0x65, 0x73, 0x74,
+ 0x31, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x14, 0x00, 0x1e, 0x00, 0x28, 0x00,
+ 0xd0, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x46, 0x72, 0x65, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x4d, 0x79, 0x4d, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x72, 0x00, 0x00, 0x00,
+ 0x0c, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x34, 0x00, 0x74, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x24, 0x00, 0x28, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x34, 0x00,
+ 0x30, 0x00, 0x38, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x44, 0x00, 0x4c, 0x00, 0x54, 0x00, 0x5c, 0x00, 0x60, 0x00, 0x64, 0x00, 0x6c, 0x00,
+};
+
+static const unsigned char flatcc_golden_be[] = {
+ 0x00, 0x00, 0x00, 0x0c, 0x53, 0x4e, 0x4f, 0x4d, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x20,
+ 0x3f, 0x80, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x40, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x05, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x00, 0x74,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x38, 0xdd, 0x79, 0xc9, 0x41,
+ 0xdd, 0x79, 0xc9, 0x41, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x0f, 0x80, 0xcd, 0xf2, 0x7b, 0x91, 0x81,
+ 0x6e, 0x0f, 0x80, 0xcd, 0xf2, 0x7b, 0x91, 0x81, 0x8e, 0x81, 0xa4, 0x71, 0x8e, 0x81, 0xa4, 0x71,
+ 0x43, 0xf9, 0x48, 0xdc, 0xc7, 0x67, 0xdd, 0xf1, 0x43, 0xf9, 0x48, 0xdc, 0xc7, 0x67, 0xdd, 0xf1,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x05,
+ 0x74, 0x65, 0x73, 0x74, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x74, 0x65, 0x73, 0x74,
+ 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x0a, 0x14, 0x00, 0x00, 0x1e, 0x28, 0x00,
+ 0xff, 0xff, 0xff, 0xd0, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x46, 0x72, 0x65, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x01, 0x02, 0x03, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x09, 0x4d, 0x79, 0x4d, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x72, 0x00, 0x00, 0x00,
+ 0x00, 0x0c, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x34, 0x00, 0x74,
+ 0x00, 0x04, 0x00, 0x00, 0x00, 0x24, 0x00, 0x28, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x34,
+ 0x00, 0x30, 0x00, 0x38, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x40, 0x00, 0x44, 0x00, 0x4c, 0x00, 0x54, 0x00, 0x5c, 0x00, 0x60, 0x00, 0x64, 0x00, 0x6c,
+};
diff --git a/test/json_test/json_test.sh b/test/json_test/json_test.sh
new file mode 100755
index 0000000..e08bab5
--- /dev/null
+++ b/test/json_test/json_test.sh
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../..
+ROOT=`pwd`
+TMP=${ROOT}/build/tmp/test/json_test
+
+CC=${CC:-cc}
+${ROOT}/scripts/build.sh
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+
+# could also use --json to generate both at once
+bin/flatcc -av --json -o ${TMP} test/monster_test/monster_test.fbs
+
+cp test/json_test/*.c ${TMP}
+cp test/flatc_compat/monsterdata_test.golden ${TMP}
+cp test/flatc_compat/monsterdata_test.mon ${TMP}
+
+cd ${TMP}
+
+$CC -g -I ${ROOT}/include test_basic_parse.c \
+ ${ROOT}/lib/libflatccrt_d.a -o test_basic_parse_d
+
+$CC -g -I ${ROOT}/include test_json_parser.c \
+ ${ROOT}/lib/libflatccrt_d.a -o test_json_parser_d
+
+$CC -g -I ${ROOT}/include test_json_printer.c \
+ ${ROOT}/lib/libflatccrt_d.a -o test_json_printer_d
+
+$CC -g -I ${ROOT}/include test_json.c\
+ ${ROOT}/lib/libflatccrt_d.a -o test_json_d
+
+
+echo "running json basic parse test debug"
+./test_basic_parse_d
+
+echo "running json parser test debug"
+./test_json_parser_d
+
+echo "running json printer test debug"
+./test_json_printer_d
+
+echo "running json test debug"
+./test_json_d
+
+$CC -O2 -DNDEBUG -I ${ROOT}/include test_basic_parse.c \
+ ${ROOT}/lib/libflatccrt.a -o test_basic_parse
+
+#$CC -O3 -DNDEBUG -I ${ROOT}/include test_json_parser.c \
+#$CC -Os -save-temps -DNDEBUG -I ${ROOT}/include test_json_parser.c \
+
+$CC -O2 -DNDEBUG -I ${ROOT}/include test_json_parser.c \
+ ${ROOT}/lib/libflatccrt.a -o test_json_parser
+
+$CC -O2 -DNDEBUG -I ${ROOT}/include test_json_printer.c\
+ ${ROOT}/lib/libflatccrt.a -o test_json_printer
+
+$CC -O2 -DNDEBUG -I ${ROOT}/include test_json.c\
+ ${ROOT}/lib/libflatccrt.a -o test_json
+
+echo "running json basic parse test optimized"
+./test_basic_parse
+
+echo "running json parser test optimized"
+./test_json_parser
+
+echo "running json printer test optimimized"
+./test_json_printer
+
+echo "running json test optimized"
+./test_json
+
+echo "json tests passed"
diff --git a/test/json_test/test_basic_parse.c b/test/json_test/test_basic_parse.c
new file mode 100644
index 0000000..7b8f4ba
--- /dev/null
+++ b/test/json_test/test_basic_parse.c
@@ -0,0 +1,291 @@
+#include <stdio.h>
+#include "flatcc/flatcc_builder.h"
+#include "flatcc/flatcc_json_parser.h"
+
+/*
+ * Helper macros for generating compile time tries.
+ *
+ * - this is for prototyping - codegenerator does this without macroes.
+ */
+#define __FLATCC_CHARW(x, p) (((uint64_t)(x)) << ((p) * 8))
+#define __FLATCC_KW1(s) (__FLATCC_CHARW(s[0], 7))
+#define __FLATCC_KW2(s) (__FLATCC_KW1(s) | __FLATCC_CHARW(s[1], 6))
+#define __FLATCC_KW3(s) (__FLATCC_KW2(s) | __FLATCC_CHARW(s[2], 5))
+#define __FLATCC_KW4(s) (__FLATCC_KW3(s) | __FLATCC_CHARW(s[3], 4))
+#define __FLATCC_KW5(s) (__FLATCC_KW4(s) | __FLATCC_CHARW(s[4], 3))
+#define __FLATCC_KW6(s) (__FLATCC_KW5(s) | __FLATCC_CHARW(s[5], 2))
+#define __FLATCC_KW7(s) (__FLATCC_KW6(s) | __FLATCC_CHARW(s[6], 1))
+#define __FLATCC_KW8(s) (__FLATCC_KW7(s) | __FLATCC_CHARW(s[7], 0))
+#define __FLATCC_KW(s, n) __FLATCC_KW ## n(s)
+
+#define __FLATCC_MASKKW(n) ((~(uint64_t)0) << ((8 - (n)) * 8))
+#define __FLATCC_MATCHKW(w, s, n) ((__FLATCC_MASKKW(n) & (w)) == __FLATCC_KW(s, n))
+#define __FLATCC_LTKW(w, s, n) ((__FLATCC_MASKKW(n) & (w)) < __FLATCC_KW(s, n))
+
+
+const char g_data[] = " \
+ \
+{ \r\n \
+ \"first\": 1, \
+ \"second\": 2.0, \
+ \"seconds left\": 42, \
+ \"seconds lead\": 1, \n \
+ \"zulu\": \"really\" \n \
+} \
+";
+
+/*
+ * This is proof of concept test before code-generation to evaluate
+ * efficient parsing and buffer construction principles while scanning
+ * text such as a JSON. We do no use a schema per se, but implicitly
+ * define one in the way that we construct the parser.
+ */
+
+#define match(x) if (end > buf && buf[0] == x) { ++buf; } \
+ else { fprintf(stderr, "failed to match '%c'\n", x); \
+ buf = flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_invalid_character); goto fail; }
+
+/* Space is optional, but we do expect more input. */
+#define space() { \
+ buf = flatcc_json_parser_space(ctx, buf, end); \
+ if (buf == end) { fprintf(stderr, "parse failed\n"); goto fail; }} \
+
+#ifdef FLATCC_JSON_ALLOW_UNKNOWN_FIELD
+#define ignore_field() { \
+ buf = flatcc_json_parser_symbol_end(ctx, buf, end); \
+ space(); match(':'); space(); \
+ buf = flatcc_json_parser_generic_json(ctx, buf, end); \
+ if (buf == end) { \
+ goto fail; \
+ }}
+#else
+#define ignore_field() { \
+ buf = flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unknown_symbol);\
+ goto fail; }
+#endif
+
+
+/*
+ * We build a flatbuffer dynamically without a schema, but we still need
+ * to assigned vtable entries.
+ */
+enum {
+ id_first = 0,
+ id_second = 1,
+ id_seconds_left = 2,
+ id_seconds_lead = 3,
+ id_zulu = 10
+};
+
+enum {
+ ctx_done = 0, ctx_t1_start, ctx_t1_again
+};
+
+const char *test(flatcc_builder_t *B, const char *buf, const char *end, int *ret)
+{
+ flatcc_json_parser_t parse_ctx, *ctx;
+ flatcc_builder_ref_t root = 0, ref, *p_ref;
+ uint64_t w;
+ const char *k;
+ char *s;
+ flatcc_json_parser_escape_buffer_t code;
+
+ void *p;
+
+ ctx = &parse_ctx;
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->line = 1;
+ ctx->line_start = buf;
+
+ flatcc_builder_start_buffer(B, "TEST", 0, 0);
+
+ space(); match('{'); space();
+ flatcc_builder_start_table(B, id_zulu + 1);
+
+t1_again:
+
+ buf = flatcc_json_parser_symbol_start(ctx, buf, end);
+ w = flatcc_json_parser_symbol_part(buf, end);
+ k = end - buf > 8 ? buf + 8 : end;
+ /*
+ * We implement a trie here. Because we compare big endian
+ * any trailing garbage in a word is least significant
+ * and masked out in MATCH tests.
+ *
+ * When a keyword is a prefix of another, the shorter keyword
+ * must be tested first because any trailing "garbage" will
+ * be larger (or equal if at buffer end or invalid nulls are
+ * contained) than the short keyword, but if testing the long
+ * keyword, the shorter keyword may be either larger or smaller
+ * depending on what content follows.
+ *
+ * Errors result in `buf` being set to `end` so we need not test
+ * for errors all the time. We use space as a convenient bailout
+ * point.
+ */
+ if (__FLATCC_LTKW(w, "second", 6)) {
+ if (!__FLATCC_MATCHKW(w, "first", 5)) {
+ ignore_field();
+ } else {
+ buf = flatcc_json_parser_symbol_end(ctx, buf + 5, end);
+ space(); match(':'); space();
+ p = flatcc_builder_table_add(B, id_first, 1, 1);
+ if (!p) { goto fail; }
+ k = buf;
+ buf = flatcc_json_parser_uint8(ctx, buf, end, p);
+ /* Here we could optionally parse for symbolic constants. */
+ if (k == buf) { goto fail; };
+ /* Successfully parsed field. */
+ }
+ } else {
+ if (__FLATCC_LTKW(w, "zulu", 4)) {
+ if (__FLATCC_LTKW(w, "seconds ", 8)) {
+ if (!__FLATCC_MATCHKW(w, "second", 6)) {
+ ignore_field();
+ } else {
+ buf = flatcc_json_parser_symbol_end(ctx, buf + 6, end);
+ space(); match(':'); space();
+ p = flatcc_builder_table_add(B, id_second, 8, 8);
+ if (!p) { goto fail; }
+ k = buf;
+ buf = flatcc_json_parser_double(ctx, buf, end, p);
+ /* Here we could optionally parse for symbolic constants. */
+ if (k == buf) { goto fail; };
+ /* Successfully parsed field. */
+ }
+ } else {
+ if (!__FLATCC_MATCHKW(w, "seconds ", 8)) {
+ ignore_field();
+ } else {
+ /* We have multiple keys matching the first word, so we load another. */
+ buf = k;
+ w = flatcc_json_parser_symbol_part(buf, end);
+ k = end - buf > 8 ? buf + 8 : end;
+ if (__FLATCC_LTKW(w, "left", 4)) {
+ if (!__FLATCC_MATCHKW(w, "lead", 4)) {
+ ignore_field();
+ } else {
+ buf = flatcc_json_parser_symbol_end(ctx, buf + 4, end);
+ space(); match(':'); space();
+ p = flatcc_builder_table_add(B, id_seconds_lead, 8, 8);
+ if (!p) { goto fail; }
+ k = buf;
+ buf = flatcc_json_parser_int64(ctx, buf, end, p);
+ /* Here we could optionally parse for symbolic constants. */
+ if (k == buf) { goto fail; };
+ /* Successfully parsed field. */
+ }
+ } else {
+ if (!__FLATCC_MATCHKW(w, "left", 4)) {
+ ignore_field();
+ } else {
+ buf = flatcc_json_parser_symbol_end(ctx, buf + 4, end);
+ space(); match(':'); space();
+ p = flatcc_builder_table_add(B, id_seconds_left, 4, 4);
+ if (!p) { goto fail; }
+ k = buf;
+ buf = flatcc_json_parser_uint32(ctx, buf, end, p);
+ /* Here we could optionally parse for symbolic constants. */
+ if (k == buf) { goto fail; };
+ /* Successfully parsed field. */
+ }
+ }
+ }
+ }
+ } else {
+ if (!__FLATCC_MATCHKW(w, "zulu", 4)) {
+ ignore_field();
+ } else {
+ buf = flatcc_json_parser_symbol_end(ctx, buf + 4, end);
+ space(); match(':'); space();
+ /*
+ * Parse field as string. If we are lucky, we can
+ * create the string in one go, which is faster.
+ * We can't if the string contains escape codes.
+ */
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ k = buf;
+ buf = flatcc_json_parser_string_part(ctx, buf, end);
+ if (buf == end) {
+ goto fail;
+ }
+ if (buf[0] == '\"') {
+ ref = flatcc_builder_create_string(B, k, (size_t)(buf - k));
+ } else {
+ /* Start string with enough space for what we have. */
+ flatcc_builder_start_string(B);
+ s = flatcc_builder_extend_string(B, (size_t)(buf - k));
+ if (!s) { goto fail; }
+ memcpy(s, k, (size_t)(buf - k));
+ do {
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ flatcc_builder_append_string(B, code + 1, (size_t)code[0]);
+ k = buf;
+ buf = flatcc_json_parser_string_part(ctx, buf, end);
+ if (buf == end) {
+ goto fail;
+ }
+ flatcc_builder_append_string(B, k, (size_t)(buf - k));
+ } while (buf[0] != '\"');
+ ref = flatcc_builder_end_string(B);
+ }
+ if (!ref) {
+ goto fail;
+ }
+ /* Duplicate fields may fail or assert. */
+ p_ref = flatcc_builder_table_add_offset(B, id_zulu);
+ if (!p_ref) {
+ goto fail;
+ }
+ *p_ref = ref;
+ buf = flatcc_json_parser_string_end(ctx, buf, end);
+ /* Successfully parsed field. */
+ }
+ }
+ }
+ space();
+ if (*buf == ',') {
+ ++buf;
+ space();
+ if (*buf != '}') {
+ goto t1_again;
+ }
+#if !FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_trailing_comma);
+#endif
+ }
+ match('}');
+ root = flatcc_builder_end_table(B);
+
+ flatcc_builder_end_buffer(B, root);
+#if !FLATCC_JSON_PARSE_IGNORE_TRAILING_DATA
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf != end) {
+ fprintf(stderr, "extra characters in input\n");
+ goto fail;
+ }
+#endif
+fail:
+ if (ctx->error) {
+ fprintf(stderr, "%d:%d: %s\n", (int)ctx->line, (int)(ctx->error_loc - ctx->line_start + 1), flatcc_json_parser_error_string(ctx->error));
+ flatcc_builder_reset(B);
+ } else {
+ fprintf(stderr, "parse accepted\n");
+ }
+ *ret = ctx->error;
+ return buf;
+}
+
+int main(void)
+{
+ int ret = -1;
+ flatcc_builder_t builder;
+
+ flatcc_builder_init(&builder);
+
+ test(&builder, g_data, g_data + sizeof(g_data) - 1, &ret);
+
+ flatcc_builder_clear(&builder);
+ return ret;
+}
diff --git a/test/json_test/test_json.c b/test/json_test/test_json.c
new file mode 100644
index 0000000..aeee13a
--- /dev/null
+++ b/test/json_test/test_json.c
@@ -0,0 +1,882 @@
+#include <stdio.h>
+#include "monster_test_json_parser.h"
+#include "monster_test_json_printer.h"
+#include "monster_test_verifier.h"
+
+#include "flatcc/support/hexdump.h"
+
+#define UQL FLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST
+#define UQ FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+
+#undef ns
+#define ns(x) FLATBUFFERS_WRAP_NAMESPACE(MyGame_Example, x)
+
+#undef nsf
+#define nsf(x) FLATBUFFERS_WRAP_NAMESPACE(Fantasy, x)
+
+struct test_scope {
+ const char *identifier;
+ flatcc_json_parser_table_f *parser;
+ flatcc_json_printer_table_f *printer;
+ flatcc_table_verifier_f *verifier;
+};
+
+static const struct test_scope Monster = {
+ /* The is the schema global file identifier. */
+ ns(Monster_file_identifier),
+ ns(Monster_parse_json_table),
+ ns(Monster_print_json_table),
+ ns(Monster_verify_table)
+};
+
+static const struct test_scope Alt = {
+ /* This is the type hash identifier. */
+ ns(Alt_type_identifier),
+ ns(Alt_parse_json_table),
+ ns(Alt_print_json_table),
+ ns(Alt_verify_table)
+};
+
+static const struct test_scope Movie = {
+ /* This is the type hash identifier. */
+ nsf(Movie_type_identifier),
+ nsf(Movie_parse_json_table),
+ nsf(Movie_print_json_table),
+ nsf(Movie_verify_table)
+};
+
+int test_json(const struct test_scope *scope, char *json,
+ char *expect, int expect_err,
+ flatcc_json_parser_flags_t parse_flags, flatcc_json_printer_flags_t print_flags, int line)
+{
+ int ret = -1;
+ int err;
+ void *flatbuffer = 0;
+ char *buf = 0;
+ size_t flatbuffer_size, buf_size;
+ flatcc_builder_t builder, *B;
+ flatcc_json_parser_t parser_ctx;
+ flatcc_json_printer_t printer_ctx;
+ int i;
+
+ B = &builder;
+ flatcc_builder_init(B);
+ flatcc_json_printer_init_dynamic_buffer(&printer_ctx, 0);
+ flatcc_json_printer_set_flags(&printer_ctx, print_flags);
+ err = flatcc_json_parser_table_as_root(B, &parser_ctx, json, strlen(json), parse_flags,
+ scope->identifier, scope->parser);
+ if (err != expect_err) {
+ if (expect_err) {
+ if (err) {
+ fprintf(stderr, "%d: json test: parse failed with: %s\n",
+ line, flatcc_json_parser_error_string(err));
+ fprintf(stderr, "but expected to fail with: %s\n",
+ flatcc_json_parser_error_string(expect_err));
+ fprintf(stderr, "%s\n", json);
+ } else {
+ fprintf(stderr, "%d: json test: parse successful, but expected to fail with: %s\n",
+ line, flatcc_json_parser_error_string(expect_err));
+ fprintf(stderr, "%s\n", json);
+ }
+ } else {
+ fprintf(stderr, "%d: json test: parse failed: %s\n", line, flatcc_json_parser_error_string(err));
+ fprintf(stderr, "%s\n", json);
+ }
+ for (i = 0; i < parser_ctx.pos - 1; ++i) {
+ fprintf(stderr, " ");
+ }
+ fprintf(stderr, "^\n");
+ goto failed;
+ }
+ if (expect_err) {
+ ret = 0;
+ goto done;
+ }
+ flatbuffer = flatcc_builder_finalize_aligned_buffer(B, &flatbuffer_size);
+ if ((ret = flatcc_verify_table_as_root(flatbuffer, flatbuffer_size, scope->identifier, scope->verifier))) {
+ fprintf(stderr, "%s:%d: buffer verification failed: %s\n",
+ __FILE__, line, flatcc_verify_error_string(ret));
+ goto failed;
+ }
+
+ flatcc_json_printer_table_as_root(&printer_ctx, flatbuffer, flatbuffer_size, scope->identifier, scope->printer);
+ buf = flatcc_json_printer_get_buffer(&printer_ctx, &buf_size);
+ if (!buf || strcmp(expect, buf)) {
+ fprintf(stderr, "%d: json test: printed buffer not as expected, got:\n", line);
+ fprintf(stderr, "%s\n", buf);
+ fprintf(stderr, "expected:\n");
+ fprintf(stderr, "%s\n", expect);
+ goto failed;
+ }
+ ret = 0;
+
+done:
+ flatcc_builder_aligned_free(flatbuffer);
+ flatcc_builder_clear(B);
+ flatcc_json_printer_clear(&printer_ctx);
+ return ret;
+
+failed:
+ if (flatbuffer) {
+ hexdump("parsed buffer", flatbuffer, flatbuffer_size, stderr);
+ }
+ ret = -1;
+ goto done;
+}
+
+#define BEGIN_TEST(name) int ret = 0; const struct test_scope *scope = &name
+#define END_TEST() return ret;
+
+#define TEST(x, y) \
+ ret |= test_json(scope, (x), (y), 0, 0, 0, __LINE__);
+
+#define TEST_ERROR(x, err) \
+ ret |= test_json(scope, (x), 0, err, 0, 0, __LINE__);
+
+#define TEST_FLAGS(fparse, fprint, x, y) \
+ ret |= test_json(scope, (x), (y), 0, (fparse), (fprint), __LINE__);
+
+#define TEST_ERROR_FLAGS(fparse, fprint, x, err) \
+ ret |= test_json(scope, (x), 0, err, (fparse), (fprint), __LINE__);
+
+int edge_case_tests(void)
+{
+ BEGIN_TEST(Monster);
+/*
+ * Each symbolic value is type coerced and added. One might expect
+ * or'ing flags together, but it doesn't work with signed values
+ * and floating point target values. We would either need a much
+ * more complicated parser or restrict the places where symbols are
+ * allowed.
+ */
+#if 0
+ TEST( "{ name: \"Monster\", color: \"Green Blue Red Blue\"}",
+ "{\"name\":\"Monster\",\"color\":\"Red Green Blue\"}");
+#else
+#if UQ
+ TEST( "{ name: \"Monster\", color: \"Green Blue Red Blue\"}",
+ "{\"name\":\"Monster\",\"color\":19}");
+#else
+ TEST( "{ \"name\": \"Monster\", \"color\": \"Green Blue Red Blue\"}",
+ "{\"name\":\"Monster\",\"color\":19}");
+#endif
+#endif
+
+/*
+ * If a value is stored, even if default, it is also printed.
+ * This option can also be flagged compile time for better performance.
+ */
+ TEST_FLAGS(flatcc_json_parser_f_force_add, 0,
+ "{ \"name\": \"Monster\", \"color\": 8}",
+ "{\"name\":\"Monster\",\"color\":\"Blue\"}");
+
+ TEST_FLAGS(0, flatcc_json_printer_f_noenum,
+ "{ \"name\": \"Monster\", \"color\": \"Green\"}",
+ "{\"name\":\"Monster\",\"color\":2}");
+
+ TEST_FLAGS(flatcc_json_parser_f_force_add, flatcc_json_printer_f_skip_default,
+ "{ \"name\": \"Monster\", \"color\": 8}",
+ "{\"name\":\"Monster\"}");
+
+ TEST_FLAGS(0, flatcc_json_printer_f_force_default,
+ "{ \"name\": \"Monster\", \"testf\":3.0}",
+"{\"mana\":150,\"hp\":100,\"name\":\"Monster\",\"color\":\"Blue\",\"testbool\":true,\"testhashs32_fnv1\":0,\"testhashu32_fnv1\":0,\"testhashs64_fnv1\":0,\"testhashu64_fnv1\":0,\"testhashs32_fnv1a\":0,\"testhashu32_fnv1a\":0,\"testhashs64_fnv1a\":0,\"testhashu64_fnv1a\":0,\"testf\":3,\"testf2\":3,\"testf3\":0}");
+
+
+ /*
+ * Cannot test the default of testf field because float is printed as double with
+ * configuration dependent precision.
+ */
+#if 0
+ TEST_FLAGS(0, flatcc_json_printer_f_force_default,
+ "{ \"name\": \"Monster\", \"testf3\":3.14159012}",
+"{\"mana\":150,\"hp\":100,\"name\":\"Monster\",\"color\":\"Blue\",\"testbool\":true,\"testhashs32_fnv1\":0,\"testhashu32_fnv1\":0,\"testhashs64_fnv1\":0,\"testhashu64_fnv1\":0,\"testhashs32_fnv1a\":0,\"testhashu32_fnv1a\":0,\"testhashs64_fnv1a\":0,\"testhashu64_fnv1a\":0,\"testf\":3.14159,\"testf2\":3,\"testf3\":0}");
+#endif
+
+ TEST_FLAGS(flatcc_json_parser_f_force_add, 0,
+ "{ \"name\": \"Monster\", \"color\": \"Blue\"}",
+ "{\"name\":\"Monster\",\"color\":\"Blue\"}");
+
+ TEST_FLAGS(flatcc_json_parser_f_skip_unknown, 0,
+ "{ \"name\": \"Monster\", \"xcolor\": \"Green\", \"hp\": 42}",
+ "{\"hp\":42,\"name\":\"Monster\"}");
+
+ TEST_FLAGS(flatcc_json_parser_f_skip_unknown, flatcc_json_printer_f_unquote,
+ "{ \"name\": \"Monster\", \"xcolor\": \"Green\", \"hp\": 42}",
+ "{hp:42,name:\"Monster\"}");
+
+ /* Also test generic parser used with unions with late type field. */
+ TEST_FLAGS(flatcc_json_parser_f_skip_unknown, 0,
+ "{ \"name\": \"Monster\", \"xcolor\": \"Green\", "
+ "\"foobar\": { \"a\": [1, 2.0, ], \"a1\": {}, \"b\": null, \"c\":[], }, \"hp\": 42 }",
+ "{\"hp\":42,\"name\":\"Monster\"}");
+#if UQ
+/*
+ * If a value is stored, even if default, it is also printed.
+ * This option can also be flagged compile time for better performance.
+ */
+ TEST_FLAGS(flatcc_json_parser_f_force_add, 0,
+ "{ name: \"Monster\", color: 8}",
+ "{\"name\":\"Monster\",\"color\":\"Blue\"}");
+
+ TEST_FLAGS(0, flatcc_json_printer_f_noenum,
+ "{ name: \"Monster\", color: Green}",
+ "{\"name\":\"Monster\",\"color\":2}");
+
+ TEST_FLAGS(flatcc_json_parser_f_force_add, flatcc_json_printer_f_skip_default,
+ "{ name: \"Monster\", color: 8}",
+ "{\"name\":\"Monster\"}");
+
+ TEST_FLAGS(0, flatcc_json_printer_f_force_default,
+ "{ name: \"Monster\"}",
+"{\"mana\":150,\"hp\":100,\"name\":\"Monster\",\"color\":\"Blue\",\"testbool\":true,\"testhashs32_fnv1\":0,\"testhashu32_fnv1\":0,\"testhashs64_fnv1\":0,\"testhashu64_fnv1\":0,\"testhashs32_fnv1a\":0,\"testhashu32_fnv1a\":0,\"testhashs64_fnv1a\":0,\"testhashu64_fnv1a\":0,\"testf\":314159,\"testf2\":3,\"testf3\":0}");
+
+ TEST_FLAGS(flatcc_json_parser_f_force_add, 0,
+ "{ name: \"Monster\", color: Blue}",
+ "{\"name\":\"Monster\",\"color\":\"Blue\"}");
+
+ TEST_FLAGS(flatcc_json_parser_f_skip_unknown, 0,
+ "{ name: \"Monster\", xcolor: Green, hp: 42}",
+ "{\"hp\":42,\"name\":\"Monster\"}");
+
+ TEST_FLAGS(flatcc_json_parser_f_skip_unknown, flatcc_json_printer_f_unquote,
+ "{ name: \"Monster\", xcolor: Green, hp: 42}",
+ "{hp:42,name:\"Monster\"}");
+
+ /* Also test generic parser used with unions with late type field. */
+ TEST_FLAGS(flatcc_json_parser_f_skip_unknown, 0,
+ "{ name: \"Monster\", xcolor: Green, "
+ "foobar: { a: [1, 2.0, ], a1: {}, b: null, c:[], }, hp: 42 }",
+ "{\"hp\":42,\"name\":\"Monster\"}");
+#endif
+
+/* Without skip unknown, we should expect failure. */
+#if 0
+ TEST( "{ name: \"Monster\", xcolor: Green}",
+ "{\"name\":\"Monster\"}");
+#endif
+
+/* We do not support null. */
+#if 0
+ TEST(
+ "{ name: \"Monster\", test_type: null }",
+ "{\"name\":\"Monster\"}");
+#endif
+
+/*
+ * We do not allow empty flag strings because they might mean
+ * either default value, or 0.
+ */
+#if 0
+ /* Questionable if this really is an error. */
+ TEST( "{ name: \"Monster\", color: \"\"}",
+ "{\"name\":\"Monster\",\"color\":0}"); // TODO: should this be color:"" ?
+
+ TEST( "{ name: \"Monster\", color: \" \"}",
+ "{\"name\":\"Monster\",\"color\":0}");
+
+#endif
+
+ END_TEST();
+}
+
+int error_case_tests(void)
+{
+ BEGIN_TEST(Monster);
+
+ TEST_ERROR( "{ \"nickname\": \"Monster\" }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ \"name\": \"Monster\", \"test_type\": \"Monster\", \"test\": { \"nickname\": \"Joker\", \"color\": \"Red\" } } }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ \"name\": \"Monster\", \"test_type\": \"Monster\", \"test\": { \"name\": \"Joker\", \"colour\": \"Red\" } } }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ \"name\": \"Monster\", \"testarrayoftables\": [ { \"nickname\": \"Joker\", \"color\": \"Red\" } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ \"name\": \"Monster\", \"testarrayoftables\": [ { \"name\": \"Joker\", \"colour\": \"Red\" } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ \"name\": \"Monster\", \"testarrayoftables\": ["
+ "{ \"name\": \"Joker\", \"color\": \"Red\", \"test_type\": \"Monster\", \"test\": { \"nickname\": \"Harley\", \"color\": \"Blue\" } } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ \"name\": \"Monster\", \"testarrayoftables\": ["
+ "{ \"name\": \"Joker\", \"color\": \"Red\", \"test_type\": \"Monster\", \"test\": { \"name\": \"Harley\", \"colour\": \"Blue\" } } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ \"name\": \"Monster\", \"testarrayoftables\": ["
+ "{ \"name\": \"Joker\", \"test_type\": \"Monster\", \"test\": { \"nickname\": \"Harley\" } },"
+ "{ \"name\": \"Bonnie\", \"test_type\": \"Monster\", \"test\": { \"name\": \"Clyde\" } } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ \"name\": \"Monster\", \"testarrayoftables\": ["
+ "{ \"name\": \"Joker\", \"test_type\": \"Monster\", \"test\": { \"name\": \"Harley\" } },"
+ "{ \"name\": \"Bonnie\", \"test_type\": \"Monster\", \"test\": { \"nickname\": \"Clyde\" } } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+
+#if !UQ
+ TEST_ERROR( "{ nickname: \"Monster\" }",
+ flatcc_json_parser_error_unexpected_character );
+
+ TEST_ERROR( "{ \"name\": \"Monster\", \"color\": Green }",
+ flatcc_json_parser_error_unexpected_character );
+
+ TEST_ERROR( "{ \"name\": \"Monster\", \"color\": Green Red Blue }",
+ flatcc_json_parser_error_unexpected_character );
+#endif
+
+#if UQ
+ TEST_ERROR( "{ nickname: \"Monster\" }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ name: \"Monster\", test_type: Monster, test: { nickname: \"Joker\", color: \"Red\" } } }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ name: \"Monster\", test_type: Monster, test: { name: \"Joker\", colour: \"Red\" } } }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ name: \"Monster\", testarrayoftables: [ { nickname: \"Joker\", color: \"Red\" } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ name: \"Monster\", testarrayoftables: [ { name: \"Joker\", colour: \"Red\" } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ name: \"Monster\", testarrayoftables: ["
+ "{ name: \"Joker\", color: \"Red\", test_type: Monster, test: { nickname: \"Harley\", color: \"Blue\" } } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ name: \"Monster\", testarrayoftables: ["
+ "{ name: \"Joker\", color: \"Red\", test_type: Monster, test: { name: \"Harley\", colour: \"Blue\" } } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ name: \"Monster\", testarrayoftables: ["
+ "{ name: \"Joker\", test_type: Monster, test: { nickname: \"Harley\" } },"
+ "{ name: \"Bonnie\", test_type: Monster, test: { name: \"Clyde\" } } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+ TEST_ERROR( "{ name: \"Monster\", testarrayoftables: ["
+ "{ name: \"Joker\", test_type: Monster, test: { name: \"Harley\" } },"
+ "{ name: \"Bonnie\", test_type: Monster, test: { nickname: \"Clyde\" } } ] }",
+ flatcc_json_parser_error_unknown_symbol );
+
+#endif
+
+ END_TEST();
+}
+
+#define RANDOM_BASE64 "zLOuiUjH49tz4Ap2JnmpTX5NqoiMzlD8hSw45QCS2yaSp7UYoA" \
+ "oE8KpY/5pKYmk+54NI40hyeyZ1zRUE4vKQT0hEdVl0iXq2fqPamkVD1AZlVvQJ1m00PaoXOSgG+64Zv+Uygw=="
+
+#define RANDOM_BASE64_NOPAD "zLOuiUjH49tz4Ap2JnmpTX5NqoiMzlD8hSw45QCS2yaSp7UYoA" \
+ "oE8KpY/5pKYmk+54NI40hyeyZ1zRUE4vKQT0hEdVl0iXq2fqPamkVD1AZlVvQJ1m00PaoXOSgG+64Zv+Uygw"
+
+#define RANDOM_BASE64URL "zLOuiUjH49tz4Ap2JnmpTX5NqoiMzlD8hSw45QCS2yaSp7UYoA" \
+ "oE8KpY_5pKYmk-54NI40hyeyZ1zRUE4vKQT0hEdVl0iXq2fqPamkVD1AZlVvQJ1m00PaoXOSgG-64Zv-Uygw=="
+
+#define RANDOM_BASE64URL_NOPAD "zLOuiUjH49tz4Ap2JnmpTX5NqoiMzlD8hSw45QCS2yaSp7UYoA" \
+ "oE8KpY_5pKYmk-54NI40hyeyZ1zRUE4vKQT0hEdVl0iXq2fqPamkVD1AZlVvQJ1m00PaoXOSgG-64Zv-Uygw"
+
+int base64_tests(void)
+{
+ BEGIN_TEST(Monster);
+
+ /* Reference */
+ TEST( "{ \"name\": \"Monster\" }",
+ "{\"name\":\"Monster\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"testbase64\":{} }",
+ "{\"name\":\"Monster\",\"testbase64\":{}}");
+
+
+ TEST( "{ \"name\": \"Monster\", \"testbase64\":{ \"data\":\"" RANDOM_BASE64 "\"} }",
+ "{\"name\":\"Monster\",\"testbase64\":{\"data\":\"" RANDOM_BASE64 "\"}}");
+
+ TEST( "{ \"name\": \"Monster\", \"testbase64\":{ \"urldata\":\"" RANDOM_BASE64URL "\"} }",
+ "{\"name\":\"Monster\",\"testbase64\":{\"urldata\":\"" RANDOM_BASE64URL "\"}}");
+
+ TEST( "{ \"name\": \"Monster\", \"testbase64\":{ \"data\":\"" RANDOM_BASE64_NOPAD "\"} }",
+ "{\"name\":\"Monster\",\"testbase64\":{\"data\":\"" RANDOM_BASE64 "\"}}");
+
+ TEST( "{ \"name\": \"Monster\", \"testbase64\":{ \"urldata\":\"" RANDOM_BASE64URL_NOPAD "\"} }",
+ "{\"name\":\"Monster\",\"testbase64\":{\"urldata\":\"" RANDOM_BASE64URL "\"}}");
+
+ TEST_ERROR( "{ \"name\": \"Monster\", \"testbase64\":{ \"data\":\"" RANDOM_BASE64URL "\"} }",
+ flatcc_json_parser_error_base64);
+
+ TEST_ERROR( "{ \"name\": \"Monster\", \"testbase64\":{ \"urldata\":\"" RANDOM_BASE64 "\"} }",
+ flatcc_json_parser_error_base64url);
+
+/* Test case from Googles flatc implementation. */
+#if UQ
+ TEST( "{name: \"Monster\","
+ "testbase64: {"
+ "data: \"23A/47d450+sdfx9+wRYIS09ckas/asdFBQ=\","
+ "urldata: \"23A_47d450-sdfx9-wRYIS09ckas_asdFBQ=\","
+ "nested: \"FAAAAE1PTlMMAAwAAAAEAAYACAAMAAAAAAAAAAQAAAANAAAATmVzdGVkTW9uc3RlcgAAAA==\""
+ "}}",
+ "{\"name\":\"Monster\","
+ "\"testbase64\":{"
+ "\"data\":\"23A/47d450+sdfx9+wRYIS09ckas/asdFBQ=\","
+ "\"urldata\":\"23A_47d450-sdfx9-wRYIS09ckas_asdFBQ=\","
+ "\"nested\":\"FAAAAE1PTlMMAAwAAAAEAAYACAAMAAAAAAAAAAQAAAANAAAATmVzdGVkTW9uc3RlcgAAAA==\""
+ "}}");
+
+ TEST( "{name: \"Monster\","
+ "testbase64: {"
+ "data: \"23A/47d450+sdfx9+wRYIS09ckas/asdFBQ\","
+ "urldata: \"23A_47d450-sdfx9-wRYIS09ckas_asdFBQ\","
+ "nested: \"FAAAAE1PTlMMAAwAAAAEAAYACAAMAAAAAAAAAAQAAAANAAAATmVzdGVkTW9uc3RlcgAAAA\""
+ "}}",
+ "{\"name\":\"Monster\","
+ "\"testbase64\":{"
+ "\"data\":\"23A/47d450+sdfx9+wRYIS09ckas/asdFBQ=\","
+ "\"urldata\":\"23A_47d450-sdfx9-wRYIS09ckas_asdFBQ=\","
+ "\"nested\":\"FAAAAE1PTlMMAAwAAAAEAAYACAAMAAAAAAAAAAQAAAANAAAATmVzdGVkTW9uc3RlcgAAAA==\""
+ "}}");
+#endif
+
+ END_TEST();
+}
+
+int mixed_type_union_tests(void)
+{
+ BEGIN_TEST(Movie);
+
+ /* Reference */
+
+ TEST( "{ \"main_character_type\": \"Rapunzel\", \"main_character\": { \"hair_length\": 19 } }",
+ "{\"main_character_type\":\"Rapunzel\",\"main_character\":{\"hair_length\":19}}");
+
+ TEST( "{ \"main_character_type\": \"Rapunzel\", \"main_character\": { \"hair_length\": 19 },"
+ " \"side_kick_type\": \"Other\", \"side_kick\": \"a donkey\"}",
+ "{\"main_character_type\":\"Rapunzel\",\"main_character\":{\"hair_length\":19},"
+ "\"side_kick_type\":\"Other\",\"side_kick\":\"a donkey\"}");
+
+ TEST( "{ \"main_character_type\": \"Rapunzel\", \"main_character\": { \"hair_length\": 19 },"
+ " \"side_kick_type\": \"Fantasy.Character.Other\", \"side_kick\": \"a donkey\"}}",
+ "{\"main_character_type\":\"Rapunzel\",\"main_character\":{\"hair_length\":19},"
+ "\"side_kick_type\":\"Other\",\"side_kick\":\"a donkey\"}");
+
+ TEST( "{ \"main_character_type\": \"Rapunzel\", \"main_character\": { \"hair_length\": 19 },"
+ " \"side_kick_type\": \"Fantasy.Character.Other\", \"side_kick\": \"a donkey\","
+ " \"antagonist_type\": \"MuLan\", \"antagonist\": {\"sword_attack_damage\": 42}}",
+ "{\"main_character_type\":\"Rapunzel\",\"main_character\":{\"hair_length\":19},"
+ "\"antagonist_type\":\"MuLan\",\"antagonist\":{\"sword_attack_damage\":42},"
+ "\"side_kick_type\":\"Other\",\"side_kick\":\"a donkey\"}");
+
+ TEST( "{ \"main_character_type\": \"Rapunzel\", \"main_character\": { \"hair_length\": 19 },"
+ " \"side_kick_type\": \"Fantasy.Character.Other\", \"side_kick\": \"a donkey\","
+ " \"antagonist_type\": \"MuLan\", \"antagonist\": {\"sword_attack_damage\": 42},"
+ " \"characters_type\": [], \"characters\": []}",
+ "{\"main_character_type\":\"Rapunzel\",\"main_character\":{\"hair_length\":19},"
+ "\"antagonist_type\":\"MuLan\",\"antagonist\":{\"sword_attack_damage\":42},"
+ "\"side_kick_type\":\"Other\",\"side_kick\":\"a donkey\","
+ "\"characters_type\":[],\"characters\":[]}")
+
+ TEST( "{ \"main_character_type\": \"Rapunzel\", \"main_character\": { \"hair_length\": 19 },"
+ " \"side_kick_type\": \"Fantasy.Character.Other\", \"side_kick\": \"a donkey\","
+ " \"antagonist_type\": \"MuLan\", \"antagonist\": {\"sword_attack_damage\": 42},"
+ " \"characters_type\": [\"Fantasy.Character.Rapunzel\", \"Other\", 0, \"MuLan\"],"
+ " \"characters\": [{\"hair_length\":19}, \"unattributed extras\", null, {\"sword_attack_damage\":2}]}",
+ "{\"main_character_type\":\"Rapunzel\",\"main_character\":{\"hair_length\":19},"
+ "\"antagonist_type\":\"MuLan\",\"antagonist\":{\"sword_attack_damage\":42},"
+ "\"side_kick_type\":\"Other\",\"side_kick\":\"a donkey\","
+ "\"characters_type\":[\"Rapunzel\",\"Other\",\"NONE\",\"MuLan\"],"
+ "\"characters\":[{\"hair_length\":19},\"unattributed extras\",null,{\"sword_attack_damage\":2}]}")
+
+ TEST( "{ \"main_character_type\": \"Rapunzel\", \"main_character\": { \"hair_length\": 19 },"
+ " \"side_kick_type\": \"Character.Other\", \"side_kick\": \"a donkey\"}",
+ "{\"main_character_type\":\"Rapunzel\",\"main_character\":{\"hair_length\":19},"
+ "\"side_kick_type\":\"Other\",\"side_kick\":\"a donkey\"}");
+
+ END_TEST();
+}
+
+int union_vector_tests(void)
+{
+ BEGIN_TEST(Alt);
+ /* Union vector */
+
+ TEST( "{ \"manyany_type\": [ \"Monster\" ], \"manyany\": [{\"name\": \"Joe\"}] }",
+ "{\"manyany_type\":[\"Monster\"],\"manyany\":[{\"name\":\"Joe\"}]}");
+
+ TEST( "{\"manyany_type\": [ \"NONE\" ], \"manyany\": [ null ] }",
+ "{\"manyany_type\":[\"NONE\"],\"manyany\":[null]}");
+
+ TEST( "{\"manyany_type\": [ \"Monster\", \"NONE\" ], \"manyany\": [{\"name\": \"Joe\"}, null] }",
+ "{\"manyany_type\":[\"Monster\",\"NONE\"],\"manyany\":[{\"name\":\"Joe\"},null]}");
+
+ TEST( "{\"manyany_type\": [ \"Monster\" ], \"manyany\": [ { \"name\":\"Joe\", \"test_type\": \"Monster\", \"test\": { \"name\": \"any Monster\" } } ] }",
+ "{\"manyany_type\":[\"Monster\"],\"manyany\":[{\"name\":\"Joe\",\"test_type\":\"Monster\",\"test\":{\"name\":\"any Monster\"}}]}");
+
+ TEST( "{\"manyany\": [{\"name\": \"Joe\"}], \"manyany_type\": [ \"Monster\" ] }",
+ "{\"manyany_type\":[\"Monster\"],\"manyany\":[{\"name\":\"Joe\"}]}");
+
+ TEST( "{\"manyany\": [{\"manyany\":[null, null], \"manyany_type\": [\"NONE\", \"NONE\"]}], \"manyany_type\": [ \"Alt\" ] }",
+ "{\"manyany_type\":[\"Alt\"],\"manyany\":[{\"manyany_type\":[\"NONE\",\"NONE\"],\"manyany\":[null,null]}]}");
+
+ END_TEST();
+}
+
+int fixed_array_tests(void)
+{
+ BEGIN_TEST(Alt);
+ /* Fixed array */
+
+#if UQ
+ TEST( "{ \"fixed_array\": { \"foo\": [ 1.0, 2.0, 0, 0, 0, 0, 0,"
+ " 0, 0, 0, 0, 0, 0, 0, 0, 16.0], col:[\"Blue Red\", Green, Red],"
+ "tests:[ {b:4}, {a:1, b:2}],"
+ " \"bar\": [ 100, 0, 0, 0, 0, 0, 0, 0, 0, 1000],"
+ " \"text\":\"hello\"}}",
+ "{\"fixed_array\":{\"foo\":[1,2,0,0,0,0,0,"
+ "0,0,0,0,0,0,0,0,16],"
+ "\"bar\":[100,0,0,0,0,0,0,0,0,1000],"
+ "\"col\":[\"Red Blue\",\"Green\",\"Red\"],"
+ "\"tests\":[{\"a\":0,\"b\":4},{\"a\":1,\"b\":2}],"
+ "\"text\":\"hello\"}}");
+#else
+ TEST( "{ \"fixed_array\": { \"foo\": [ 1.0, 2.0, 0, 0, 0, 0, 0,"
+ " 0, 0, 0, 0, 0, 0, 0, 0, 16.0], \"col\":[\"Blue Red\", \"Green\", \"Red\"],"
+ "\"tests\":[ {\"b\":4}, {\"a\":1, \"b\":2}],"
+ " \"bar\": [ 100, 0, 0, 0, 0, 0, 0, 0, 0, 1000],"
+ " \"text\":\"hello\"}}",
+ "{\"fixed_array\":{\"foo\":[1,2,0,0,0,0,0,"
+ "0,0,0,0,0,0,0,0,16],"
+ "\"bar\":[100,0,0,0,0,0,0,0,0,1000],"
+ "\"col\":[\"Red Blue\",\"Green\",\"Red\"],"
+ "\"tests\":[{\"a\":0,\"b\":4},{\"a\":1,\"b\":2}],"
+ "\"text\":\"hello\"}}");
+#endif
+
+ TEST_FLAGS(flatcc_json_parser_f_skip_array_overflow, 0,
+ "{ \"fixed_array\": { \"foo\": [ 1.0, 2.0, 0, 0, 0, 0, 0,"
+ " 0, 0, 0, 0, 0, 0, 0, 0, 16.0, 99],"
+ " \"bar\": [ 100, 0, 0, 0, 0, 0, 0, 0, 0, 1000, 99],"
+ " \"text\":\"hello, world\"}}",
+ "{\"fixed_array\":{\"foo\":[1,2,0,0,0,0,0,"
+ "0,0,0,0,0,0,0,0,16],"
+ "\"bar\":[100,0,0,0,0,0,0,0,0,1000],"
+ "\"col\":[0,0,0],"
+ "\"tests\":[{\"a\":0,\"b\":0},{\"a\":0,\"b\":0}],"
+ "\"text\":\"hello\"}}");
+
+ TEST( "{ \"fixed_array\": { \"foo\": [ 1.0, 2.0 ],"
+ " \"bar\": [ 100 ], \"text\": \"K\\x00A\\x00\" }}",
+ "{\"fixed_array\":{\"foo\":[1,2,0,0,0,0,0,"
+ "0,0,0,0,0,0,0,0,0],"
+ "\"bar\":[100,0,0,0,0,0,0,0,0,0],"
+ "\"col\":[0,0,0],"
+ "\"tests\":[{\"a\":0,\"b\":0},{\"a\":0,\"b\":0}],"
+ "\"text\":\"K\\u0000A\"}}");
+
+ TEST_ERROR_FLAGS(flatcc_json_parser_f_reject_array_underflow, 0,
+ "{ \"fixed_array\": { \"foo\": [ 1.0, 2.0 ] }}",
+ flatcc_json_parser_error_array_underflow);
+
+ TEST_ERROR_FLAGS(flatcc_json_parser_f_reject_array_underflow, 0,
+ "{ \"fixed_array\": { \"text\": \"K\\x00A\\x00\" }}",
+ flatcc_json_parser_error_array_underflow);
+
+ TEST_ERROR(
+ "{ \"fixed_array\": { \"foo\": [ 1.0, 2.0, 0, 0, 0, 0, 0,"
+ " 0, 0, 0, 0, 0, 0, 0, 0, 16.0, 99],"
+ " \"bar\": [ 100, 0, 0, 0, 0, 0, 0, 0, 0, 1000, 99] }}",
+ flatcc_json_parser_error_array_overflow);
+
+ END_TEST();
+}
+
+/*
+ * Here we cover some border cases around unions and flag
+ * enumerations, and nested buffers.
+ *
+ * More complex objects with struct members etc. are reasonably
+ * covered in the printer and parser tests using the golden data
+ * set.
+ */
+int main(void)
+{
+ BEGIN_TEST(Monster);
+
+ ret |= edge_case_tests();
+ ret |= error_case_tests();
+ ret |= union_vector_tests();
+ ret |= fixed_array_tests();
+ ret |= base64_tests();
+ ret |= mixed_type_union_tests();
+
+ /* Allow trailing comma. */
+ TEST( "{ \"name\": \"Monster\", }",
+ "{\"name\":\"Monster\"}");
+
+ TEST( "{\"color\": \"Red\", \"name\": \"Monster\", }",
+ "{\"name\":\"Monster\",\"color\":\"Red\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": \"Green\" }",
+ "{\"name\":\"Monster\",\"color\":\"Green\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": \"Green Red Blue\" }",
+ "{\"name\":\"Monster\",\"color\":\"Red Green Blue\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": \" Green Red Blue \" }",
+ "{\"name\":\"Monster\",\"color\":\"Red Green Blue\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": \"Red\" }",
+ "{\"name\":\"Monster\",\"color\":\"Red\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\" :\"Green\" }",
+ "{\"name\":\"Monster\",\"color\":\"Green\"}");
+
+ /* Default value. */
+ TEST( "{ \"name\": \"Monster\", \"color\": \"Blue\" }",
+ "{\"name\":\"Monster\"}");
+
+ /* Default value. */
+ TEST( "{ \"name\": \"Monster\", \"color\": 8}",
+ "{\"name\":\"Monster\"}");
+#if UQ
+ /* Allow trailing comma. */
+ TEST( "{ name: \"Monster\", }",
+ "{\"name\":\"Monster\"}");
+
+ TEST( "{color: \"Red\", name: \"Monster\", }",
+ "{\"name\":\"Monster\",\"color\":\"Red\"}");
+
+ TEST( "{ name: \"Monster\", color: \"Green\" }",
+ "{\"name\":\"Monster\",\"color\":\"Green\"}");
+
+ TEST( "{ name: \"Monster\", color: \"Green Red Blue\" }",
+ "{\"name\":\"Monster\",\"color\":\"Red Green Blue\"}");
+
+ TEST( "{ name: \"Monster\", color: \" Green Red Blue \" }",
+ "{\"name\":\"Monster\",\"color\":\"Red Green Blue\"}");
+
+ TEST( "{ name: \"Monster\", color: Red }",
+ "{\"name\":\"Monster\",\"color\":\"Red\"}");
+
+ TEST( "{ name: \"Monster\", color: Green }",
+ "{\"name\":\"Monster\",\"color\":\"Green\"}");
+
+ /* Default value. */
+ TEST( "{ name: \"Monster\", color: Blue }",
+ "{\"name\":\"Monster\"}");
+
+ /* Default value. */
+ TEST( "{ name: \"Monster\", color: 8}",
+ "{\"name\":\"Monster\"}");
+#endif
+#if UQL
+ TEST( "{ name: \"Monster\", color: Green Red }",
+ "{\"name\":\"Monster\",\"color\":\"Red Green\"}");
+#endif
+
+#if UQL
+ /* No leading space in unquoted flag. */
+ TEST( "{ name: \"Monster\", color:Green Red }",
+ "{\"name\":\"Monster\",\"color\":\"Red Green\"}");
+
+ TEST( "{ name: \"Monster\", color: Green Red}",
+ "{\"name\":\"Monster\",\"color\":\"Red Green\"}");
+
+ TEST( "{ name: \"Monster\", color:Green Blue Red }",
+ "{\"name\":\"Monster\",\"color\":\"Red Green Blue\"}");
+#endif
+
+ TEST( "{ \"name\": \"Monster\", \"color\": 1}",
+ "{\"name\":\"Monster\",\"color\":\"Red\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": 2}",
+ "{\"name\":\"Monster\",\"color\":\"Green\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": 9}",
+ "{\"name\":\"Monster\",\"color\":\"Red Blue\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": 11}",
+ "{\"name\":\"Monster\",\"color\":\"Red Green Blue\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": 12}",
+ "{\"name\":\"Monster\",\"color\":12}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": 15}",
+ "{\"name\":\"Monster\",\"color\":15}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": 0}",
+ "{\"name\":\"Monster\",\"color\":0}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": \"Color.Red\"}",
+ "{\"name\":\"Monster\",\"color\":\"Red\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"color\": \"MyGame.Example.Color.Red\"}",
+ "{\"name\":\"Monster\",\"color\":\"Red\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"hp\": \"Color.Green\"}",
+ "{\"hp\":2,\"name\":\"Monster\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"hp\": \"Color.Green\"}",
+ "{\"hp\":2,\"name\":\"Monster\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"test_type\": \"Monster\", \"test\": { \"name\": \"any Monster\" } }",
+ "{\"name\":\"Monster\",\"test_type\":\"Monster\",\"test\":{\"name\":\"any Monster\"}}");
+
+ /* This is tricky because the test field must be reparsed after discovering the test type. */
+ TEST( "{ \"name\": \"Monster\", \"test\": { \"name\": \"second Monster\" }, \"test_type\": \"Monster\" }",
+ "{\"name\":\"Monster\",\"test_type\":\"Monster\",\"test\":{\"name\":\"second Monster\"}}");
+
+ /* Also test that parsing can continue after reparse. */
+ TEST( "{ \"name\": \"Monster\", \"test\": { \"name\": \"second Monster\" }, \"hp\":17, \"test_type\":\n \"Monster\", \"color\":\"Green\" }",
+ "{\"hp\":17,\"name\":\"Monster\",\"color\":\"Green\",\"test_type\":\"Monster\",\"test\":{\"name\":\"second Monster\"}}");
+
+ /* Test that NONE is recognized, and that we do not get a missing table error.*/
+ TEST( "{ \"name\": \"Monster\", \"test_type\": \"NONE\" }",
+ "{\"name\":\"Monster\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"test_type\": 0 }",
+ "{\"name\":\"Monster\"}");
+
+#if UQ
+ TEST( "{ name: \"Monster\", color: 1}",
+ "{\"name\":\"Monster\",\"color\":\"Red\"}");
+
+ TEST( "{ name: \"Monster\", color: 2}",
+ "{\"name\":\"Monster\",\"color\":\"Green\"}");
+
+ TEST( "{ name: \"Monster\", color: 9}",
+ "{\"name\":\"Monster\",\"color\":\"Red Blue\"}");
+
+ TEST( "{ name: \"Monster\", color: 11}",
+ "{\"name\":\"Monster\",\"color\":\"Red Green Blue\"}");
+
+ TEST( "{ name: \"Monster\", color: 12}",
+ "{\"name\":\"Monster\",\"color\":12}");
+
+ TEST( "{ name: \"Monster\", color: 15}",
+ "{\"name\":\"Monster\",\"color\":15}");
+
+ TEST( "{ name: \"Monster\", color: 0}",
+ "{\"name\":\"Monster\",\"color\":0}");
+
+ TEST( "{ name: \"Monster\", color: Color.Red}",
+ "{\"name\":\"Monster\",\"color\":\"Red\"}");
+
+ TEST( "{ name: \"Monster\", color: MyGame.Example.Color.Red}",
+ "{\"name\":\"Monster\",\"color\":\"Red\"}");
+
+ TEST( "{ name: \"Monster\", hp: Color.Green}",
+ "{\"hp\":2,\"name\":\"Monster\"}");
+
+ TEST( "{ name: \"Monster\", hp: Color.Green}",
+ "{\"hp\":2,\"name\":\"Monster\"}");
+
+ TEST( "{ name: \"Monster\", test_type: Monster, test: { name: \"any Monster\" } }",
+ "{\"name\":\"Monster\",\"test_type\":\"Monster\",\"test\":{\"name\":\"any Monster\"}}");
+
+ /* This is tricky because the test field must be reparsed after discovering the test type. */
+ TEST( "{ name: \"Monster\", test: { name: \"second Monster\" }, test_type: Monster }",
+ "{\"name\":\"Monster\",\"test_type\":\"Monster\",\"test\":{\"name\":\"second Monster\"}}");
+
+ /* Also test that parsing can continue after reparse. */
+ TEST( "{ name: \"Monster\", test: { name: \"second Monster\" }, hp:17, test_type:\n Monster, color:Green }",
+ "{\"hp\":17,\"name\":\"Monster\",\"color\":\"Green\",\"test_type\":\"Monster\",\"test\":{\"name\":\"second Monster\"}}");
+
+ /* Test that NONE is recognized, and that we do not get a missing table error.*/
+ TEST( "{ name: \"Monster\", test_type: NONE }",
+ "{\"name\":\"Monster\"}");
+
+ TEST( "{ name: \"Monster\", test_type: 0 }",
+ "{\"name\":\"Monster\"}");
+
+#endif
+
+#if UQL
+ /*
+ * Test that generic parsing handles multiple flags correctly during
+ * first pass before backtracking.
+ */
+ TEST( "{ name: \"Monster\", test: { name: \"second Monster\", color: Red Green }, test_type: Monster }",
+ "{\"name\":\"Monster\",\"test_type\":\"Monster\",\"test\":{\"name\":\"second Monster\",\"color\":\"Red Green\"}}");
+#endif
+
+ /* Ditto quoted flags. */
+ TEST( "{ \"name\": \"Monster\", \"test\": { \"name\": \"second Monster\", \"color\": \" Red Green \" }, \"test_type\": \"Monster\" }",
+ "{\"name\":\"Monster\",\"test_type\":\"Monster\",\"test\":{\"name\":\"second Monster\",\"color\":\"Red Green\"}}");
+
+ /*
+ * Note the '\/' becomes just '/', and that '/' also works in input.
+ *
+ * The json printer does not have a concept of \x it always uses
+ * unicode.
+ *
+ * We use json extension \x to inject a control 03 which extends
+ * to a printed unicode escape, while the \u00F8 is a valid
+ * character after encoding, and is thus not escaped after printing
+ * but rather becoems a two-byte utf-8 encoding of 'ø' which
+ * we use C encoding to form utf8 C3B8 == \u00F8.
+ */
+ TEST( "{ \"name\": \"Mon\xfF\xFf\\x03s\\xC3\\xF9\\u00F8ter\\b\\f\\n\\r\\t\\\"\\\\\\/'/\", }",
+ "{\"name\":\"Mon\xff\xff\\u0003s\xc3\xf9\xc3\xb8ter\\b\\f\\n\\r\\t\\\"\\\\/'/\"}");
+
+ TEST( "{ \"name\": \"\\u168B\\u1691\"}",
+ "{\"name\":\"\xe1\x9a\x8b\xe1\x9a\x91\"}");
+
+ /* Nested flatbuffer, either is a known object, or as a vector. */
+ TEST( "{ \"name\": \"Monster\", \"testnestedflatbuffer\":{ \"name\": \"sub Monster\" } }",
+ "{\"name\":\"Monster\",\"testnestedflatbuffer\":{\"name\":\"sub Monster\"}}");
+
+#if FLATBUFFERS_PROTOCOL_IS_LE
+ TEST( "{ \"name\": \"Monster\", \"testnestedflatbuffer\":"
+ "[" /* start of nested flatbuffer, implicit size: 40 */
+ "4,0,0,0," /* header: object offset = 4, no identifier */
+ "248,255,255,255," /* vtable offset */
+ "16,0,0,0," /* offset to name */
+ "12,0,8,0,0,0,0,0,0,0,4,0," /* vtable */
+ "11,0,0,0,115,117,98,32,77,111,110,115,116,101,114,0" /* name = "sub Monster" */
+ "]" /* end of nested flatbuffer */
+ "}",
+ "{\"name\":\"Monster\",\"testnestedflatbuffer\":{\"name\":\"sub Monster\"}}");
+#else
+ TEST( "{ \"name\": \"Monster\", \"testnestedflatbuffer\":"
+ "[" /* start of nested flatbuffer, implicit size: 40 */
+ "0,0,0,4," /* header: object offset = 4, no identifier */
+ "255,255,255,248," /* vtable offset */
+ "0,0,0,16," /* offset to name */
+ "0,12,0,8,0,0,0,0,0,0,0,4," /* vtable */
+ "0,0,0,11,115,117,98,32,77,111,110,115,116,101,114,0" /* name = "sub Monster" */
+ "]" /* end of nested flatbuffer */
+ "}",
+ "{\"name\":\"Monster\",\"testnestedflatbuffer\":{\"name\":\"sub Monster\"}}");
+#endif
+
+ /* Test empty table */
+ TEST( "{ \"name\": \"Monster\", \"testempty\": {} }",
+ "{\"name\":\"Monster\",\"testempty\":{}}");
+
+ /* Test empty array */
+ TEST( "{ \"name\": \"Monster\", \"testarrayoftables\": [] }",
+ "{\"name\":\"Monster\",\"testarrayoftables\":[]}");
+
+ /* Test JSON prefix parsing */
+ TEST( "{ \"name\": \"Monster\", \"test_type\":\"Alt\", \"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing\": { \"aaaa\": \"test\", \"aaaa12345\": 17 } }}}",
+ "{\"name\":\"Monster\",\"test_type\":\"Alt\",\"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing\":{\"aaaa\":\"test\",\"aaaa12345\":17}}}}");
+
+ /* TODO: this parses with the last to }} missing, although it does not add the broken objects. */
+ TEST( "{ \"name\": \"Monster\", \"test_type\":\"Alt\", \"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing\": { \"bbbb\": \"test\", \"bbbb1234\": 19 } }",
+ "{\"name\":\"Monster\"}");
+
+ TEST( "{ \"name\": \"Monster\", \"test_type\":\"Alt\", \"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing\": { \"bbbb\": \"test\", \"bbbb1234\": 19 } }}}",
+ "{\"name\":\"Monster\",\"test_type\":\"Alt\",\"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing\":{\"bbbb\":\"test\",\"bbbb1234\":19}}}}");
+
+ TEST( "{ \"name\": \"Monster\", \"test_type\":\"Alt\", \"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing\": { \"cccc\": \"test\", \"cccc1234\": 19, \"cccc12345\": 17 } }}}",
+ "{\"name\":\"Monster\",\"test_type\":\"Alt\",\"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing\":{\"cccc\":\"test\",\"cccc1234\":19,\"cccc12345\":17}}}}");
+
+ TEST( "{ \"name\": \"Monster\", \"test_type\":\"Alt\", \"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing\": { \"dddd1234\": 19, \"dddd12345\": 17 } }}}",
+ "{\"name\":\"Monster\",\"test_type\":\"Alt\",\"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing\":{\"dddd1234\":19,\"dddd12345\":17}}}}");
+
+ TEST( "{ \"name\": \"Monster\", \"test_type\":\"Alt\", \"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing2\": { \"aaaa_bbbb_steps\": 19, \"aaaa_bbbb_start_\": 17 } }}}",
+ "{\"name\":\"Monster\",\"test_type\":\"Alt\",\"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing2\":{\"aaaa_bbbb_steps\":19,\"aaaa_bbbb_start_\":17}}}}");
+
+ TEST( "{ \"name\": \"Monster\", \"test_type\":\"Alt\", \"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing3\": { \"aaaa_bbbb_steps\": 19, \"aaaa_bbbb_start_steps\": 17 } }}}",
+ "{\"name\":\"Monster\",\"test_type\":\"Alt\",\"test\":{\"prefix\":{"
+ "\"testjsonprefixparsing3\":{\"aaaa_bbbb_steps\":19,\"aaaa_bbbb_start_steps\":17}}}}");
+
+ return ret ? -1: 0;
+}
diff --git a/test/json_test/test_json_parser.c b/test/json_test/test_json_parser.c
new file mode 100644
index 0000000..a985cfa
--- /dev/null
+++ b/test/json_test/test_json_parser.c
@@ -0,0 +1,164 @@
+#include <stdio.h>
+
+#ifndef FLATCC_BENCHMARK
+#define FLATCC_BENCHMARK 0
+#endif
+
+/* Only needed for verification. */
+#include "monster_test_reader.h"
+#include "monster_test_json_parser.h"
+#include "flatcc/support/hexdump.h"
+#include "flatcc/support/cdump.h"
+#include "flatcc/support/readfile.h"
+
+#if FLATCC_BENCHMARK
+#include "flatcc/support/elapsed.h"
+#endif
+
+const char *filename = "monsterdata_test.golden";
+
+#define BENCH_TITLE "monsterdata_test.golden"
+
+#ifdef NDEBUG
+#define COMPILE_TYPE "(optimized)"
+#else
+#define COMPILE_TYPE "(debug)"
+#endif
+
+#define FILE_SIZE_MAX (1024 * 10)
+
+#undef ns
+#define ns(x) FLATBUFFERS_WRAP_NAMESPACE(MyGame_Example, x)
+
+#define test_assert(x) do { if (!(x)) { assert(0); return -1; }} while(0)
+
+/* A helper to simplify creating buffers vectors from C-arrays. */
+#define c_vec_len(V) (sizeof(V)/sizeof((V)[0]))
+
+int verify_parse(void *buffer)
+{
+ ns(Test_struct_t) test;
+ ns(Vec3_struct_t) pos;
+ ns(Monster_table_t) monster = ns(Monster_as_root_with_identifier)(buffer, ns(Monster_file_identifier));
+
+ pos = ns(Monster_pos(monster));
+ test_assert(pos);
+ test_assert(ns(Vec3_x(pos) == 1));
+ test_assert(ns(Vec3_y(pos) == 2));
+ test_assert(ns(Vec3_z(pos) == 3));
+ test_assert(ns(Vec3_test1(pos) == 3.0));
+ test_assert(ns(Vec3_test2(pos) == ns(Color_Green)));
+ test = ns(Vec3_test3(pos));
+ test_assert(test);
+ test_assert(ns(Test_a(test)) == 5);
+ test_assert(ns(Test_b(test)) == 6);
+
+ // TODO: hp and further fields
+
+ return 0;
+
+}
+// TODO:
+// when running benchmark with the wrong size argument (output size
+// instead of input size), the warmup loop iterates indefinitely in the
+// first iteration. This suggests there is an end check missing somwhere
+// and this needs to be debugged. The input size as of this writing is 701
+// bytes, and the output size is 288 bytes.
+int test_parse(void)
+{
+#if FLATCC_BENCHMARK
+ double t1, t2;
+ int i;
+ int rep = 1000000;
+ int warmup_rep = 1000000;
+#endif
+
+ const char *buf;
+ void *flatbuffer = 0;
+ size_t in_size, out_size;
+ flatcc_json_parser_t ctx;
+ flatcc_builder_t builder;
+ flatcc_builder_t *B = &builder;
+ int ret = -1;
+ flatcc_json_parser_flags_t flags = 0;
+
+ flatcc_builder_init(B);
+
+ buf = readfile(filename, FILE_SIZE_MAX, &in_size);
+ if (!buf) {
+ fprintf(stderr, "%s: could not read input json file\n", filename);
+ return -1;
+ }
+
+ if (monster_test_parse_json(B, &ctx, buf, in_size, flags)) {
+ goto failed;
+ }
+ fprintf(stderr, "%s: successfully parsed %d lines\n", filename, ctx.line);
+ flatbuffer = flatcc_builder_finalize_aligned_buffer(B, &out_size);
+ hexdump("parsed monsterdata_test.golden", flatbuffer, out_size, stdout);
+ fprintf(stderr, "input size: %lu, output size: %lu\n",
+ (unsigned long)in_size, (unsigned long)out_size);
+ verify_parse(flatbuffer);
+
+ cdump("golden", flatbuffer, out_size, stdout);
+
+ flatcc_builder_reset(B);
+#if FLATCC_BENCHMARK
+ fprintf(stderr, "Now warming up\n");
+ for (i = 0; i < warmup_rep; ++i) {
+ if (monster_test_parse_json(B, &ctx, buf, in_size, flags)) {
+ goto failed;
+ }
+ flatcc_builder_reset(B);
+ }
+
+ fprintf(stderr, "Now benchmarking\n");
+ t1 = elapsed_realtime();
+ for (i = 0; i < rep; ++i) {
+ if (monster_test_parse_json(B, &ctx, buf, in_size, flags)) {
+ goto failed;
+ }
+ flatcc_builder_reset(B);
+ }
+ t2 = elapsed_realtime();
+
+ printf("----\n");
+ show_benchmark(BENCH_TITLE " C generated JSON parse " COMPILE_TYPE, t1, t2, in_size, rep, "1M");
+#endif
+ ret = 0;
+
+done:
+ if (flatbuffer) {
+ flatcc_builder_aligned_free(flatbuffer);
+ }
+ if (buf) {
+ free((void *)buf);
+ }
+ flatcc_builder_clear(B);
+ return ret;
+
+failed:
+ fprintf(stderr, "%s:%d:%d: %s\n",
+ filename, (int)ctx.line, (int)(ctx.error_loc - ctx.line_start + 1),
+ flatcc_json_parser_error_string(ctx.error));
+ goto done;
+}
+
+/* We take arguments so test can run without copying sources. */
+#define usage \
+"wrong number of arguments:\n" \
+"usage: <program> [<input-filename>]\n"
+
+int main(int argc, const char *argv[])
+{
+ fprintf(stderr, "JSON parse test\n");
+
+ if (argc != 1 && argc != 2) {
+ fprintf(stderr, usage);
+ exit(1);
+ }
+ if (argc == 2) {
+ filename = argv[1];
+ }
+ return test_parse();
+}
diff --git a/test/json_test/test_json_printer.c b/test/json_test/test_json_printer.c
new file mode 100644
index 0000000..efbd572
--- /dev/null
+++ b/test/json_test/test_json_printer.c
@@ -0,0 +1,129 @@
+#include <stdio.h>
+
+/* Only needed for verification. */
+#include "monster_test_json_printer.h"
+#include "flatcc/support/readfile.h"
+#include "flatcc_golden.c"
+
+#ifdef NDEBUG
+#define COMPILE_TYPE "(optimized)"
+#else
+#define COMPILE_TYPE "(debug)"
+#endif
+
+#define FILE_SIZE_MAX (1024 * 10)
+
+#undef ns
+#define ns(x) FLATBUFFERS_WRAP_NAMESPACE(MyGame_Example, x)
+
+/* A helper to simplify creating buffers vectors from C-arrays. */
+#define c_vec_len(V) (sizeof(V)/sizeof((V)[0]))
+
+const char *filename = 0; /* "monsterdata_test.mon"; */
+const char *golden_filename = "monsterdata_test.golden";
+const char *target_filename = "monsterdata_test.json.txt";
+
+int test_print(void)
+{
+ int ret = 0;
+ const char *buf = 0;
+ const char *golden = 0;
+ const char *target = 0;
+ size_t size = 0, golden_size = 0, target_size = 0;
+ flatcc_json_printer_t ctx_obj, *ctx;
+ FILE *fp = 0;
+
+ ctx = &ctx_obj;
+
+ fp = fopen(target_filename, "wb");
+ if (!fp) {
+ fprintf(stderr, "%s: could not open output file\n", target_filename);
+ /* ctx not ready for clenaup, so exit directly. */
+ return -1;
+ }
+ flatcc_json_printer_init(ctx, fp);
+ /* Uses same formatting as golden reference file. */
+ flatcc_json_printer_set_nonstrict(ctx);
+
+ if (filename && strcmp(filename, "-")) {
+ buf = readfile(filename, FILE_SIZE_MAX, &size);
+ } else {
+#if FLATBUFFERS_PROTOCOL_IS_LE
+ buf = (const char *)flatcc_golden_le;
+ size = sizeof(flatcc_golden_le);
+#else
+ buf = (const char *)flatcc_golden_be;
+ size = sizeof(flatcc_golden_be);
+#endif
+ }
+
+ if (!buf) {
+ fprintf(stderr, "%s: could not read input flatbuffer file\n", filename);
+ goto fail;
+ }
+ golden = readfile(golden_filename, FILE_SIZE_MAX, &golden_size);
+ if (!golden) {
+ fprintf(stderr, "%s: could not read verification json file\n", golden_filename);
+ goto fail;
+ }
+ ns(Monster_print_json_as_root(ctx, buf, size, "MONS"));
+ flatcc_json_printer_flush(ctx);
+ if (flatcc_json_printer_get_error(ctx)) {
+ printf("could not print monster data\n");
+ }
+ fclose(fp);
+ fp = 0;
+ target = readfile(target_filename, FILE_SIZE_MAX, &target_size);
+ if (!target) {
+ fprintf(stderr, "%s: could not read back output file\n", target_filename);
+ goto fail;
+ }
+ if (target_size != golden_size || memcmp(target, golden, target_size)) {
+ fprintf(stderr, "generated output file did not match verification file\n");
+ goto fail;
+ }
+ fprintf(stderr, "json print test succeeded\n");
+
+done:
+ flatcc_json_printer_clear(ctx);
+ if (!filename) {
+ buf = 0;
+ }
+ if (buf) {
+ free((void *)buf);
+ }
+ if (golden) {
+ free((void *)golden);
+ }
+ if (target) {
+ free((void *)target);
+ }
+ if (fp) {
+ fclose(fp);
+ }
+ return ret;
+fail:
+ ret = -1;
+ goto done;
+}
+
+/* We take arguments so output file can be generated in build directory without copying sources. */
+#define usage \
+"wrong number of arguments:\n" \
+"usage: <program> [(<input-filename>|'-') <reference-filename> <output-filename>]\n" \
+" noargs, or '-' use default binary buffer matching endianness of flatbuffer format\n"
+
+int main(int argc, const char *argv[])
+{
+ fprintf(stderr, "running json print test\n");
+ if (argc != 1 && argc != 4) {
+ fprintf(stderr, usage);
+ exit(1);
+ }
+ if (argc == 4) {
+ filename = argv[1];
+ golden_filename = argv[2];
+ target_filename = argv[3];
+ }
+ return test_print();
+}
diff --git a/test/leakcheck-full.sh b/test/leakcheck-full.sh
new file mode 100755
index 0000000..db2f452
--- /dev/null
+++ b/test/leakcheck-full.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+set -e
+../build.sh
+cd `dirname $0`/..
+mkdir -p build/tmp/leakcheck-full
+valgrind --leak-check=full --show-leak-kinds=all \
+ bin/flatcc_d -a -o build/tmp/leakcheck-full --prefix zzz --common-prefix \
+ hello test/monster_test/monster_test.fbs
diff --git a/test/leakcheck.sh b/test/leakcheck.sh
new file mode 100755
index 0000000..e77705d
--- /dev/null
+++ b/test/leakcheck.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+set -e
+../build.sh
+cd `dirname $0`/..
+mkdir -p build/tmp/leakcheck
+valgrind --leak-check=yes \
+ bin/flatcc_d -a -o build/tmp/leakcheck --prefix zzz --common-prefix hello \
+ test/monster_test/monster_test.fbs
diff --git a/test/load_test/CMakeLists.txt b/test/load_test/CMakeLists.txt
new file mode 100644
index 0000000..0c146d1
--- /dev/null
+++ b/test/load_test/CMakeLists.txt
@@ -0,0 +1,20 @@
+include(CTest)
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/test/monster_test")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_load_test ALL)
+add_custom_command (
+ TARGET gen_load_test
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND flatcc_cli -a -o "${GEN_DIR}" "${FBS_DIR}/monster_test.fbs"
+ DEPENDS flatcc_cli "${FBS_DIR}/monster_test.fbs" "${FBS_DIR}/include_test1.fbs" "${FBS_DIR}/include_test2.fbs"
+)
+add_executable(load_test load_test.c)
+add_dependencies(load_test gen_load_test)
+target_link_libraries(load_test flatccrt)
+
+add_test(load_test load_test${CMAKE_EXECUTABLE_SUFFIX})
diff --git a/test/load_test/load_test.c b/test/load_test/load_test.c
new file mode 100644
index 0000000..466f5cc
--- /dev/null
+++ b/test/load_test/load_test.c
@@ -0,0 +1,164 @@
+#include <stdio.h>
+#include "monster_test_builder.h"
+#include "flatcc/support/elapsed.h"
+
+#define ns(x) FLATBUFFERS_WRAP_NAMESPACE(MyGame_Example, x)
+#define nsc(x) FLATBUFFERS_WRAP_NAMESPACE(flatbuffers, x)
+#define c_vec_len(V) (sizeof(V)/sizeof((V)[0]))
+
+#define MEASURE_DECODE 1
+#define MONSTER_REP 1000
+#define NAME_REP 100
+#define INVENTORY_REP 100
+
+static uint8_t invdata[1000];
+
+static ns(Monster_ref_t) create_monster(flatcc_builder_t *B)
+{
+ size_t i;
+
+ ns(Monster_start(B));
+ ns(Monster_name_start(B));
+ for (i = 0; i < NAME_REP; ++i) {
+ nsc(string_append(B, "Monster", 7));
+ }
+ ns(Monster_name_end(B));
+ ns(Monster_inventory_start(B));
+ for (i = 0; i < INVENTORY_REP; ++i) {
+ nsc(uint8_vec_append(B, invdata, c_vec_len(invdata)));
+ }
+ ns(Monster_inventory_end(B));
+ return ns(Monster_end(B));
+}
+
+static ns(Monster_vec_ref_t) create_monsters(flatcc_builder_t *B)
+{
+ size_t i;
+ ns(Monster_ref_t) m;
+
+ ns(Monster_vec_start(B));
+ for (i = 0; i < MONSTER_REP; ++i) {
+ m = create_monster(B);
+ assert(m);
+ ns(Monster_vec_push(B, m));
+ }
+ return ns(Monster_vec_end(B));
+}
+
+static int create_root_monster(flatcc_builder_t *B)
+{
+ ns(Monster_vec_ref_t) mv;
+
+ flatcc_builder_reset(B);
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "root_monster"));
+ mv = create_monsters(B);
+ assert(mv);
+ ns(Monster_testarrayoftables_add(B, mv));
+ ns(Monster_end_as_root(B));
+ return 0;
+}
+
+#if MEASURE_DECODE
+static int verify_monster(const char *base, ns(Monster_table_t) mon)
+{
+ size_t i;
+ nsc(string_t) s = ns(Monster_name(mon));
+ /*
+ * This only works because it is a byte, otherwise
+ * vec_at should be used to convert endian format.
+ */
+ const uint8_t *inv = ns(Monster_inventory(mon));
+
+ if (nsc(string_len(s)) != NAME_REP * 7) {
+ assert(0);
+ return -1;
+ }
+ if (nsc(uint8_vec_len(inv)) != INVENTORY_REP * c_vec_len(invdata)) {
+ assert(0);
+ return -1;
+ }
+ for (i = 0; i < NAME_REP; ++i) {
+ if (memcmp(s + i * 7, "Monster", 7)) {
+ printf("failed monster name at %lu: %s\n", (unsigned long)i, s ? s : "NULL");
+ printf("offset: %ld\n", (long)(s + i * 7 - base));
+ assert(0);
+ return -1;
+ }
+ }
+ for (i = 0; i < INVENTORY_REP; ++i) {
+ if (memcmp(inv + i * c_vec_len(invdata), invdata, c_vec_len(invdata))) {
+ assert(0);
+ return -1;
+ }
+ }
+ return 0;
+}
+#endif
+
+int main(int argc, char *argv[])
+{
+ FILE *fp;
+ void *buffer;
+ size_t size;
+ flatcc_builder_t builder, *B;
+ ns(Monster_table_t) mon;
+ ns(Monster_vec_t) mv;
+ double t1, t2;
+ int rep = 10, i;
+ int ret = 0;
+
+#if MEASURE_DECODE
+ size_t j;
+#endif
+
+ (void)argc;
+ (void)argv;
+
+ B = &builder;
+ flatcc_builder_init(B);
+ create_root_monster(B);
+ buffer = flatcc_builder_finalize_buffer(B, &size);
+ fp = fopen("monster_load_test.dat", "wb");
+ if (!fp) {
+ ret = -1;
+ goto done;
+ }
+ ret |= size != fwrite(buffer, 1, size, fp);
+ fclose(fp);
+ if (ret) {
+ goto done;
+ }
+ printf("buffer size: %lu\n", (unsigned long)size);
+ printf("start timing ...\n");
+ t1 = elapsed_realtime();
+ for (i = 0; i < rep; ++i) {
+ create_root_monster(B);
+ flatcc_builder_copy_buffer(B, buffer, size);
+ mon = ns(Monster_as_root(buffer));
+ ret |= strcmp(ns(Monster_name(mon)), "root_monster");
+ assert(ret == 0);
+ mv = ns(Monster_testarrayoftables(mon));
+ /* Negated logic, 0 is OK. */
+ ret |= ns(Monster_vec_len(mv)) != MONSTER_REP;
+ assert(ret == 0);
+#if MEASURE_DECODE
+ for (j = 0; j < MONSTER_REP; ++j) {
+ ret |= verify_monster(buffer, ns(Monster_vec_at(mv, j)));
+ assert(ret == 0);
+ }
+#endif
+ if (ret) {
+ goto done;
+ }
+ }
+ t2 = elapsed_realtime();
+ show_benchmark("encode and partially decode large buffer", t1, t2, size, rep, 0);
+done:
+ flatcc_builder_clear(B);
+ free(buffer);
+ if (ret) {
+ printf("load test failed\n");
+ }
+ return ret;
+}
diff --git a/test/load_test/load_test.sh b/test/load_test/load_test.sh
new file mode 100755
index 0000000..d94fbb0
--- /dev/null
+++ b/test/load_test/load_test.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../..
+ROOT=`pwd`
+TMP=build/tmp/test/load_test
+
+${ROOT}/scripts/build.sh
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+bin/flatcc -a -o ${TMP} test/monster_test/monster_test.fbs
+
+cp test/load_test/*.c ${TMP}
+cd ${TMP}
+cc -g -I ${ROOT}/include load_test.c \
+ ${ROOT}/lib/libflatccrt.a -o load_test_d
+cc -O3 -DNDEBUG -I ${ROOT}/include load_test.c \
+ ${ROOT}/lib/libflatccrt.a -o load_test
+echo "running load test debug"
+./load_test_d
+echo "running load test optimized"
+./load_test
diff --git a/test/monster_test/CMakeLists.txt b/test/monster_test/CMakeLists.txt
new file mode 100644
index 0000000..5860a37
--- /dev/null
+++ b/test/monster_test/CMakeLists.txt
@@ -0,0 +1,20 @@
+include(CTest)
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/test/monster_test")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_monster_test ALL)
+add_custom_command (
+ TARGET gen_monster_test
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND flatcc_cli -a -o "${GEN_DIR}" "${FBS_DIR}/monster_test.fbs"
+ DEPENDS flatcc_cli "${FBS_DIR}/monster_test.fbs" "${FBS_DIR}/include_test1.fbs" "${FBS_DIR}/include_test2.fbs"
+)
+add_executable(monster_test monster_test.c)
+add_dependencies(monster_test gen_monster_test)
+target_link_libraries(monster_test flatccrt)
+
+add_test(monster_test monster_test${CMAKE_EXECUTABLE_SUFFIX})
diff --git a/test/monster_test/attributes.fbs b/test/monster_test/attributes.fbs
new file mode 100644
index 0000000..bbf5c43
--- /dev/null
+++ b/test/monster_test/attributes.fbs
@@ -0,0 +1,6 @@
+// Attributes not supported by flatcc
+attribute "flexbuffer";
+attribute "csharp_partial";
+attribute "cpp_type";
+attribute "streaming";
+attribute "idempotent";
diff --git a/test/monster_test/include_test1.fbs b/test/monster_test/include_test1.fbs
new file mode 100644
index 0000000..11aebe8
--- /dev/null
+++ b/test/monster_test/include_test1.fbs
@@ -0,0 +1,5 @@
+include "include_test2.fbs";
+include "include_test2.fbs"; // should be skipped
+include "include_test1.fbs"; // should be skipped
+
+
diff --git a/test/monster_test/include_test2.fbs b/test/monster_test/include_test2.fbs
new file mode 100644
index 0000000..ff23e93
--- /dev/null
+++ b/test/monster_test/include_test2.fbs
@@ -0,0 +1,11 @@
+include "include_test2.fbs"; // should be skipped
+
+attribute "included_attribute";
+
+namespace MyGame.OtherNameSpace;
+
+enum FromInclude:long { IncludeVal, Foo = 17 }
+
+struct Unused { unused: byte; }
+
+
diff --git a/test/monster_test/monster_test.c b/test/monster_test/monster_test.c
new file mode 100644
index 0000000..f3150d8
--- /dev/null
+++ b/test/monster_test/monster_test.c
@@ -0,0 +1,2919 @@
+#include <stdio.h>
+
+#include "monster_test_builder.h"
+#include "monster_test_verifier.h"
+
+#include "flatcc/support/hexdump.h"
+#include "flatcc/support/elapsed.h"
+#include "flatcc/portable/pparsefp.h"
+#include "../../config/config.h"
+
+/*
+ * Convenience macro to deal with long namespace names,
+ * and to make code reusable with other namespaces.
+ *
+ * Note: we could also use
+ *
+ * #define ns(x) MyGame_Example_ ## x
+ *
+ * but it wouldn't doesn't handled nested ns calls.
+ *
+ * For historic reason some of this test does not use the ns macro
+ * and some avoid nesting ns calls by placing parenthesis differently
+ * although this isn't required with this wrapper macro.
+ */
+#undef ns
+#define ns(x) FLATBUFFERS_WRAP_NAMESPACE(MyGame_Example, x)
+
+#undef nsf
+#define nsf(x) FLATBUFFERS_WRAP_NAMESPACE(Fantasy, x)
+
+/*
+ * Wrap the common namespace (flatbuffers_). Many operations in the
+ * common namespace such as `flatbuffers_string_create` are also mapped
+ * to member fields such as `MyGame_Example_Monster_name_create` and
+ * this macro provides a consistent interface to namespaces with
+ * `nsc(string_create)` similar to `ns(Monster_name_create)`.
+ */
+#undef nsc
+#define nsc(x) FLATBUFFERS_WRAP_NAMESPACE(flatbuffers, x)
+
+/* A helper to simplify creating buffers vectors from C-arrays. */
+#define c_vec_len(V) (sizeof(V)/sizeof((V)[0]))
+
+static const char zero_pad[100];
+
+int verify_empty_monster(void *buffer)
+{
+ /* Proper id given. */
+ ns(Monster_table_t) monster = ns(Monster_as_root_with_identifier)(buffer, ns(Monster_file_identifier));
+ /* Invalid id. */
+ ns(Monster_table_t) monster2 = ns(Monster_as_root_with_identifier(buffer, "1234"));
+ /* `with_id` can also mean ignore id when given a null argument. */
+ ns(Monster_table_t) monster3 = ns(Monster_as_root_with_identifier(buffer, 0));
+ /* Excessive text in identifier is ignored. */
+ ns(Monster_table_t) monster4 = ns(Monster_as_root_with_identifier(buffer, "MONSX"));
+ /* Default id should match proper id. */
+ ns(Monster_table_t) monster5 = ns(Monster_as_root(buffer));
+
+ if (!monster) {
+ printf("Monster not available\n");
+ return -1;
+ }
+ if (monster2) {
+ printf("Monster should not accept invalid identifier\n");
+ return -1;
+ }
+ if (monster3 != monster) {
+ printf("Monster should ignore identifier when given a null id\n");
+ return -1;
+ }
+ if (monster4 != monster) {
+ printf("Monster should accept a string as valid identifier");
+ return -1;
+ }
+ if (monster5 != monster) {
+ printf("Monster with default id should be accepted");
+ return -1;
+ }
+ if (ns(Monster_hp(monster)) != 100) {
+ printf("Health points are not as expected\n");
+ return -1;
+ }
+ if (ns(Monster_hp_is_present(monster))) {
+ printf("Health Points should default\n");
+ return -1;
+ }
+ if (ns(Monster_pos_is_present(monster))) {
+ printf("Position should be present\n");
+ return -1;
+ }
+ if (ns(Monster_pos(monster)) != 0) {
+ printf("Position shouldn't be available\n");
+ return -1;
+ }
+ return 0;
+}
+
+int test_enums(flatcc_builder_t *B)
+{
+ (void)B;
+
+ if (ns(neg_enum_neg1) != -12) {
+ printf("neg_enum_neg1 should be -12, was %d\n", ns(neg_enum_neg1));
+ return -1;
+ }
+ if (ns(neg_enum_neg2) != -11) {
+ printf("neg_enum_neg1 should be -11, was %d\n", ns(neg_enum_neg2));
+ return -1;
+ }
+ if (ns(int_enum_int1) != 2) {
+ printf("int_enum_int1 should be 2\n");
+ return -1;
+ }
+ if (ns(int_enum_int2) != 42) {
+ printf("int_enum_int2 should be 42\n");
+ return -1;
+ }
+ if (ns(hex_enum_hexneg) != -2) {
+ printf("enum hexneg should be -2\n");
+ return -1;
+ }
+ if (ns(hex_enum_hex1) != 3) {
+ printf("hex_enum_hex1 should be 3\n");
+ return -1;
+ }
+ if (ns(hex_enum_hex2) != INT32_C(0x7eafbeaf)) {
+ printf("hex_enum_hex2 should be 0x7eafbeaf\n");
+ return -1;
+ }
+ return 0;
+}
+
+int test_type_aliases(flatcc_builder_t *B)
+{
+ int ret = 0;
+ void *buffer = 0;
+ size_t size;
+ ns(TypeAliases_table_t) ta;
+ flatbuffers_uint8_vec_ref_t v8_ref;
+ flatbuffers_double_vec_ref_t vf64_ref;
+
+ flatcc_builder_reset(B);
+
+ v8_ref = flatbuffers_uint8_vec_create(B, 0, 0);
+ vf64_ref = flatbuffers_double_vec_create(B, 0, 0);
+ ns(TypeAliases_create_as_root(B,
+ INT8_MIN, UINT8_MAX, INT16_MIN, UINT16_MAX,
+ INT32_MIN, UINT32_MAX, INT64_MIN, UINT64_MAX, 2.3f, 2.3, v8_ref, vf64_ref));
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+ if ((ret = ns(TypeAliases_verify_as_root(buffer, size)))) {
+
+ hexdump("TypeAliases buffer", buffer, size, stderr);
+ printf("could not verify TypeAliases table, got %s\n", flatcc_verify_error_string(ret));
+ goto done;
+ }
+ ta = ns(TypeAliases_as_root(buffer));
+
+ if (ns(TypeAliases_i8(ta)) != INT8_MIN) goto failed;
+ if (ns(TypeAliases_i16(ta)) != INT16_MIN) goto failed;
+ if (ns(TypeAliases_i32(ta)) != INT32_MIN) goto failed;
+ if (ns(TypeAliases_i64(ta)) != INT64_MIN) goto failed;
+ if (ns(TypeAliases_u8(ta)) != UINT8_MAX) goto failed;
+ if (ns(TypeAliases_u16(ta)) != UINT16_MAX) goto failed;
+ if (ns(TypeAliases_u32(ta)) != UINT32_MAX) goto failed;
+ if (ns(TypeAliases_u64(ta)) != UINT64_MAX) goto failed;
+ if (!parse_float_is_equal(ns(TypeAliases_f32(ta)), 2.3f)) goto failed;
+ if (!parse_double_is_equal(ns(TypeAliases_f64(ta)), 2.3)) goto failed;
+ if (sizeof(ns(TypeAliases_i8(ta))) != 1) goto failed;
+ if (sizeof(ns(TypeAliases_i16(ta))) != 2) goto failed;
+ if (sizeof(ns(TypeAliases_i32(ta))) != 4) goto failed;
+ if (sizeof(ns(TypeAliases_i64(ta))) != 8) goto failed;
+ if (sizeof(ns(TypeAliases_u8(ta))) != 1) goto failed;
+ if (sizeof(ns(TypeAliases_u16(ta))) != 2) goto failed;
+ if (sizeof(ns(TypeAliases_u32(ta))) != 4) goto failed;
+ if (sizeof(ns(TypeAliases_u64(ta))) != 8) goto failed;
+ if (sizeof(ns(TypeAliases_f32(ta))) != 4) goto failed;
+ if (sizeof(ns(TypeAliases_f64(ta))) != 8) goto failed;
+
+done:
+ flatcc_builder_aligned_free(buffer);
+ return ret;
+
+failed:
+ ret = -1;
+ printf("Scalar type alias has unexpected value or size\n");
+ goto done;
+}
+
+int test_empty_monster(flatcc_builder_t *B)
+{
+ int ret;
+ ns(Monster_ref_t) root;
+ void *buffer;
+ size_t size;
+
+ flatcc_builder_reset(B);
+
+ flatbuffers_buffer_start(B, ns(Monster_file_identifier));
+ ns(Monster_start(B));
+ /* Cannot make monster empty as name is required. */
+ ns(Monster_name_create_str(B, "MyMonster"));
+ root = ns(Monster_end(B));
+ flatbuffers_buffer_end(B, root);
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+
+ hexdump("empty monster table", buffer, size, stderr);
+ if ((ret = verify_empty_monster(buffer))) {
+ goto done;
+ }
+
+ if ((ret = ns(Monster_verify_as_root_with_identifier(buffer, size, ns(Monster_file_identifier))))) {
+ printf("could not verify empty monster, got %s\n", flatcc_verify_error_string(ret));
+ return -1;
+ }
+
+ /*
+ * Note: this will assert if the verifier is set to assert during
+ * debugging. Also not that a buffer size - 1 is not necessarily
+ * invalid, but because we pack vtables tight at the end, we expect
+ * failure in this case.
+ */
+ if (flatcc_verify_ok == ns(Monster_verify_as_root(
+ buffer, size - 1))) {
+ printf("Monster verify failed to detect short buffer\n");
+ return -1;
+ }
+
+done:
+ flatcc_builder_aligned_free(buffer);
+ return ret;
+}
+
+int test_typed_empty_monster(flatcc_builder_t *B)
+{
+ int ret = -1;
+ ns(Monster_ref_t) root;
+ void *buffer;
+ size_t size;
+ flatbuffers_fid_t fid = { 0 };
+
+ flatcc_builder_reset(B);
+
+ flatbuffers_buffer_start(B, ns(Monster_type_identifier));
+ ns(Monster_start(B));
+ /* Cannot make monster empty as name is required. */
+ ns(Monster_name_create_str(B, "MyMonster"));
+ root = ns(Monster_end(B));
+ flatbuffers_buffer_end(B, root);
+
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+
+ hexdump("empty typed monster table", buffer, size, stderr);
+
+ if (flatbuffers_get_type_hash(buffer) != flatbuffers_type_hash_from_name("MyGame.Example.Monster")) {
+
+ printf("Monster does not have the expected type, got %lx\n", (unsigned long)flatbuffers_get_type_hash(buffer));
+ goto done;
+ }
+
+ if (!flatbuffers_has_type_hash(buffer, ns(Monster_type_hash))) {
+ printf("Monster does not have the expected type\n");
+ goto done;
+ }
+ if (!flatbuffers_has_type_hash(buffer, 0x330ef481)) {
+ printf("Monster does not have the expected type\n");
+ goto done;
+ }
+
+ if (!verify_empty_monster(buffer)) {
+ printf("typed empty monster should not verify with default identifier\n");
+ goto done;
+ }
+
+ if ((ret = ns(Monster_verify_as_root_with_identifier(buffer, size, ns(Monster_type_identifier))))) {
+ printf("could not verify typed empty monster, got %s\n", flatcc_verify_error_string(ret));
+ goto done;
+ }
+
+ if ((ret = ns(Monster_verify_as_typed_root(buffer, size)))) {
+ printf("could not verify typed empty monster, got %s\n", flatcc_verify_error_string(ret));
+ goto done;
+ }
+
+ if ((ret = ns(Monster_verify_as_root_with_type_hash(buffer, size, ns(Monster_type_hash))))) {
+ printf("could not verify empty monster with type hash, got %s\n", flatcc_verify_error_string(ret));
+ goto done;
+ }
+
+ if ((ret = ns(Monster_verify_as_root_with_type_hash(buffer, size, flatbuffers_type_hash_from_name("MyGame.Example.Monster"))))) {
+ printf("could not verify empty monster with explicit type hash, got %s\n", flatcc_verify_error_string(ret));
+ goto done;
+ }
+
+ flatbuffers_identifier_from_type_hash(0x330ef481, fid);
+ if ((ret = ns(Monster_verify_as_root_with_identifier(buffer, size, fid)))) {
+ printf("could not verify typed empty monster, got %s\n", flatcc_verify_error_string(ret));
+ goto done;
+ }
+
+ if (!ns(Monster_verify_as_root(buffer, size))) {
+ printf("should not have verified with the original identifier since we use types\n");
+ goto done;
+ }
+ ret = 0;
+
+done:
+ flatcc_builder_aligned_free(buffer);
+ return ret;
+}
+
+int verify_monster(void *buffer)
+{
+ ns(Monster_table_t) monster, mon, mon2;
+ ns(Monster_vec_t) monsters;
+ ns(Any_union_type_t) test_type;
+ ns(Any_union_t) test_union;
+ /* This is an encoded struct pointer. */
+ ns(Vec3_struct_t) vec;
+ const char *name;
+ /* This is a more precise type as there is a length field prefix. */
+ nsc(string_t) name2;
+ /* This is a native struct type. */
+ ns(Vec3_t) v;
+ ns(Test_vec_t) testvec;
+ ns(Test_t) testvec_data[] = {
+ {0x10, 0x20}, {0x30, 0x40}, {0x50, 0x60}, {0x70, (int8_t)0x80}, {0x191, (int8_t)0x91}
+ };
+ ns(Test_struct_t) test;
+ nsc(string_vec_t) strings;
+ nsc(string_t) s;
+ nsc(bool_vec_t) bools;
+ ns(Stat_table_t) stat;
+ int booldata[] = { 0, 1, 1, 0 };
+ const uint8_t *inv;
+ size_t i;
+
+ if (!nsc(has_identifier(buffer, 0))) {
+ printf("wrong monster identifier (when ignoring)\n");
+ return -1;
+ }
+ if (!nsc(has_identifier(buffer, "MONS"))) {
+ printf("wrong monster identifier (when explicit)\n");
+ return -1;
+ }
+ if (!nsc(has_identifier(buffer, "MONSTER"))) {
+ printf("extra characters in identifier should be ignored\n");
+ return -1;
+ }
+ if (nsc(has_identifier(buffer, "MON1"))) {
+ printf("accepted wrong monster identifier (when explicit)\n");
+ return -1;
+ }
+ if (!nsc(has_identifier(buffer, ns(Monster_file_identifier)))) {
+ printf("wrong monster identifier (via defined identifier)\n");
+ return -1;
+ }
+
+ if (!(monster = ns(Monster_as_root(buffer)))) {
+ printf("Monster not available\n");
+ return -1;
+ }
+ if (ns(Monster_hp(monster)) != 80) {
+ printf("Health points are not as expected\n");
+ return -1;
+ }
+ if (!(vec = ns(Monster_pos(monster)))) {
+ printf("Position is absent\n");
+ return -1;
+ }
+ if ((size_t)vec & 15) {
+ printf("Force align of Vec3 struct not correct\n");
+ }
+ /* -3.2f is actually -3.20000005 and not -3.2 due to representation loss.
+ * For 32-bit GCC compilers, -3.2f might be another value, so use lower
+ * precision portable comparison. */
+ if (!parse_float_is_equal(ns(Vec3_z(vec)), -3.2f)) {
+ printf("Position failing on z coordinate\n");
+ return -1;
+ }
+ if (nsc(is_native_pe())) {
+ if (!parse_float_is_equal(vec->x, 1.0f) ||
+ !parse_float_is_equal(vec->y, 2.0f) ||
+ !parse_float_is_equal(vec->z, -3.2f)) {
+ printf("Position is incorrect\n");
+ return -1;
+ }
+ }
+ /*
+ * NOTE: copy_from_pe and friends are provided in the builder
+ * interface, not the read only interface, but for advanced uses
+ * these may also be used for read operations.
+ * Also note that if we want the target struct fully null padded
+ * the struct must be zeroed first. The _clear operation is one way
+ * to achieve this - but it is not required for normal read access.
+ * See common_builder for more details. These operations can
+ * actually be very useful in their own right, disregarding any
+ * other flatbuffer logic when dealing with struct endian
+ * conversions in other protocols.
+ */
+ ns(Vec3_clear(&v)); /* Not strictly needed here. */
+ ns(Vec3_copy_from_pe(&v, vec));
+ if (!parse_float_is_equal(v.x, 1.0f) ||
+ !parse_float_is_equal(v.y, 2.0f) ||
+ !parse_float_is_equal(v.z, -3.2f)) {
+ printf("Position is incorrect after copy\n");
+ return -1;
+ }
+ if (vec->test1 != 0 || vec->test1 != 0 ||
+ memcmp(&vec->test3, zero_pad, sizeof(vec->test3)) != 0) {
+ printf("Zero default not correct for struct\n");
+ return -1;
+ }
+ name = ns(Monster_name(monster));
+ if (!name || strcmp(name, "MyMonster")) {
+ printf("Name is not correct\n");
+ return -1;
+ }
+ name2 = ns(Monster_name(monster));
+ if (nsc(string_len(name)) != 9 || nsc(string_len(name2)) != 9) {
+ printf("Name length is not correct\n");
+ return -1;
+ }
+ if (ns(Monster_color(monster)) != ns(Color_Green)) {
+ printf("Monster isn't a green monster\n");
+ return -1;
+ }
+ if (strcmp(ns(Color_name)(ns(Color_Green)), "Green")) {
+ printf("Enum name map does not have a green solution\n");
+ return -1;
+ }
+ /*
+ * This is bit tricky because Color is a bit flag, so we can have
+ * combinations that are expected, but that we do not know. The
+ * known value logic does not accomodate for that.
+ */
+ if (!ns(Color_is_known_value(ns(Color_Green)))) {
+ printf("Color enum does not recognize the value of the Green flag\n");
+ return -1;
+ }
+ if (!ns(Color_is_known_value(1))) {
+ printf("Color enum does not recognize the value of the Red flag\n");
+ return -1;
+ }
+ if (ns(Color_is_known_value(4))) {
+ printf("Color enum recognizes a value it shouldn't\n");
+ return -1;
+ }
+ if (!ns(Color_is_known_value(8))) {
+ printf("Color enum does not recognize the value of the Blue flag\n");
+ return -1;
+ }
+ if (ns(Color_is_known_value(9))) {
+ printf("Color enum recognizes a value it shouldn't\n");
+ return -1;
+ }
+ if (!ns(Any_is_known_type(ns(Any_Monster)))) {
+ printf("Any type does not accept Monster\n");
+ return -1;
+ }
+ if (ns(Any_is_known_type(42))) {
+ printf("Any type recognizes unexpected type\n");
+ return -1;
+ }
+ inv = ns(Monster_inventory(monster));
+ if ((nsc(uint8_vec_len(inv))) != 10) {
+ printf("Inventory length unexpected\n");
+ return -1;
+ }
+ for (i = 0; i < nsc(uint8_vec_len(inv)); ++i) {
+ if (nsc(uint8_vec_at(inv, i)) != i) {
+ printf("inventory item #%d is wrong\n", (int)i);
+ return -1;
+ }
+ }
+ if (ns(Monster_mana(monster) != 150)) {
+ printf("Mana not default\n");
+ return -1;
+ }
+ if (ns(Monster_mana_is_present(monster))) {
+ printf("Mana should default\n");
+ return -1;
+ }
+ if (!ns(Monster_hp_is_present(monster))) {
+ printf("Health points should be present\n");
+ return -1;
+ }
+ if (!ns(Monster_pos_is_present(monster))) {
+ printf("Position should be present\n");
+ return -1;
+ }
+ testvec = ns(Monster_test4(monster));
+ if (ns(Test_vec_len(testvec)) != 5) {
+ printf("Test4 vector is not the right length.\n");
+ return -1;
+ }
+ /*
+ * This particular test requires that the in-memory
+ * array layout matches the array layout in the buffer.
+ */
+ if (flatbuffers_is_native_pe()) {
+ for (i = 0; i < 5; ++i) {
+ test = ns(Test_vec_at(testvec, i));
+ if (testvec_data[i].a != ns(Test_a(test))) {
+ printf("Test4 vec failed at index %d, member a\n", (int)i);
+ return -1;
+ }
+ if (testvec_data[i].b != ns(Test_b(test))) {
+ printf("Test4 vec failed at index %d, member a\n", (int)i);
+ return -1;
+ }
+ }
+ } else {
+ printf("SKIPPING DIRECT VECTOR ACCESS WITH NON-NATIVE ENDIAN PROTOCOL\n");
+ }
+ monsters = ns(Monster_testarrayoftables(monster));
+ if (ns(Monster_vec_len(monsters)) != 8) {
+ printf("unexpected monster vector length\n");
+ return -1;
+ }
+ mon = ns(Monster_vec_at(monsters, 5));
+ assert(mon);
+ name = ns(Monster_name(mon));
+ if (strcmp(name, "TwoFace")) {
+ printf("monster 5 isn't TwoFace");
+ return -1;
+ }
+ mon2 = ns(Monster_vec_at(monsters, 1));
+ if (mon2 != mon) {
+ printf("DAG test failed, monster[5] != monster[1] as pointer\n");
+ return -1;
+ }
+ name = ns(Monster_name(mon2));
+ if (strcmp(name, "TwoFace")) {
+ printf("monster 1 isn't Joker, it is: %s\n", name);
+ return -1;
+ }
+ mon = ns(Monster_vec_at(monsters, 2));
+ name = ns(Monster_name(mon));
+ if (strcmp(name, "Joker")) {
+ printf("monster 2 isn't Joker, it is: %s\n", name);
+ return -1;
+ }
+ mon = ns(Monster_vec_at(monsters, 0));
+ name = ns(Monster_name(mon));
+ if (strcmp(name, "Gulliver")) {
+ printf("monster 0 isn't Gulliver, it is: %s\n", name);
+ return -1;
+ }
+ mon = ns(Monster_vec_at(monsters, 3));
+ name = ns(Monster_name(mon));
+ if (strcmp(name, "TwoFace")) {
+ printf("monster 3 isn't TwoFace, it is: %s\n", name);
+ return -1;
+ }
+ mon = ns(Monster_vec_at(monsters, 4));
+ name = ns(Monster_name(mon));
+ if (strcmp(name, "Joker")) {
+ printf("monster 4 isn't Joker, it is: %s\n", name);
+ return -1;
+ }
+ mon = ns(Monster_vec_at(monsters, 6));
+ name = ns(Monster_name(mon));
+ if (strcmp(name, "Gulliver")) {
+ printf("monster 6 isn't Gulliver, it is: %s\n", name);
+ return -1;
+ }
+ mon = ns(Monster_vec_at(monsters, 7));
+ name = ns(Monster_name(mon));
+ if (strcmp(name, "Joker")) {
+ printf("monster 7 isn't Gulliver, it is: %s\n", name);
+ return -1;
+ }
+ strings = ns(Monster_testarrayofstring(monster));
+ if (nsc(string_vec_len(strings) != 3)) {
+ printf("Monster array of strings has wrong length\n");
+ return -1;
+ }
+ if (strcmp(nsc(string_vec_at(strings, 0)), "Hello")) {
+ printf("string elem 0 is wrong\n");
+ return -1;
+ }
+ s = nsc(string_vec_at(strings, 1));
+ if (nsc(string_len(s)) != 2) {
+ printf("string 1 has wrong length");
+ return -1;
+ }
+ if (memcmp(s, ",\0", 2)) {
+ printf("string elem 1 has wrong content\n");
+ return -1;
+ }
+ if (strcmp(nsc(string_vec_at(strings, 2)), "world!")) {
+ printf("string elem 2 is wrong\n");
+ return -1;
+ }
+ if (!ns(Monster_testarrayofbools_is_present(monster))) {
+ printf("array of bools is missing\n");
+ return -1;
+ }
+ bools = ns(Monster_testarrayofbools(monster));
+ if (nsc(bool_vec_len(bools) != 4)) {
+ printf("bools have wrong vector length\n");
+ return -1;
+ }
+ if (sizeof(bools[0]) != 1) {
+ printf("bools have wrong element size\n");
+ return -1;
+ }
+ for (i = 0; i < 4; ++i) {
+ if (nsc(bool_vec_at(bools, i) != booldata[i])) {
+ printf("bools vector elem %d is wrong\n", (int)i);
+ return -1;
+ }
+ }
+ test_type = ns(Monster_test_type(monster));
+ if (test_type != ns(Any_Monster)) {
+ printf("the monster test type is not Any_Monster\n");
+ return -1;
+ }
+ mon = ns(Monster_test(monster));
+ if (strcmp(ns(Monster_name(mon)), "TwoFace")) {
+ printf("the test monster is not TwoFace\n");
+ return -1;
+ }
+ mon = ns(Monster_enemy(monster));
+ if (strcmp(ns(Monster_name(mon)), "the enemy")) {
+ printf("the monster is not the enemy\n");
+ return -1;
+ }
+ if (ns(Monster_test_type(mon)) != ns(Any_NONE)) {
+ printf("the enemy test type is not Any_NONE\n");
+ return -1;
+ }
+ test_union = ns(Monster_test_union(monster));
+ if (test_union.type != test_type) {
+ printf("the monster test union type is not Any_Monster\n");
+ return -1;
+ }
+ if (test_union.value != ns(Monster_test(monster))) {
+ printf("the union monster has gone awol\n");
+ return -1;
+ }
+ monsters = ns(Monster_testarrayoftables(mon));
+ i = ns(Monster_vec_len(monsters));
+ mon = ns(Monster_vec_at(monsters, i - 1));
+ if (ns(Monster_test_type)(mon) != ns(Any_Monster)) {
+ printf("The monster variant added with value, type methods is not working\n");
+ return -1;
+ }
+ mon = ns(Monster_test(mon));
+ if (strcmp(ns(Monster_name(mon)), "TwoFace")) {
+ printf("The monster variant added with value method is incorrect\n");
+ return -1;
+ }
+ if (ns(Monster_testbool(monster))) {
+ printf("testbool should not\n");
+ return -1;
+ }
+ if (!ns(Monster_testempty_is_present(monster))) {
+ printf("The empty table isn't present\n");
+ return -1;
+ }
+ stat = ns(Monster_testempty(monster));
+ if (ns(Stat_id_is_present(stat))
+ || ns(Stat_val_is_present(stat))
+ || ns(Stat_count_is_present(stat))) {
+ printf("empty table isn't empty\n");
+ return -1;
+ }
+ return 0;
+}
+
+int gen_monster(flatcc_builder_t *B, int with_size)
+{
+ uint8_t inv[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+ ns(Vec3_t) *vec;
+ ns(Test_t) *test, x;
+ ns(Monster_ref_t) mon, mon2, monsters[2];
+ ns(Monster_ref_t) *aoft;
+ nsc(string_ref_t) name;
+ nsc(string_ref_t) strings[3];
+ nsc(bool_t)bools[] = { 0, 1, 1, 0 };
+ flatcc_builder_reset(B);
+
+
+
+ /*
+ * Some FlatBuffer language interfaces require a string and other
+ * non-embeddable objects to be created before the table storing it
+ * is being created. This is not necessary (but possible) here
+ * because the flatcc_builder maintains an internal stack.
+ */
+ if (with_size) {
+ ns(Monster_start_as_root_with_size(B));
+ } else {
+ ns(Monster_start_as_root(B));
+ }
+
+ ns(Monster_hp_add(B, 80));
+ vec = ns(Monster_pos_start(B));
+ vec->x = 1, vec->y = 2, vec->z = -3.2f;
+ /* _end call converts to protocol endian format. */
+ ns(Monster_pos_end(B));
+ /*
+ * NOTE: Monster_name_add requires a reference to an
+ * already created string - adding a string directly
+ * will compile with a warning but fail badly. Instead
+ * create the string first, or do it in-place with
+ * the helper function `Monster_name_create_str`, or
+ * with one of several other options.
+ *
+ * Wrong: ns(Monster_name_add(B, "MyMonster"));
+ */
+ ns(Monster_name_create_str(B, "MyMonster"));
+
+ ns(Monster_color_add)(B, ns(Color_Green));
+
+ ns(Monster_inventory_create(B, inv, c_vec_len(inv)));
+
+ /* The vector is built in native endian format. */
+ ns(Monster_test4_start(B));
+ test = ns(Monster_test4_extend(B, 1));
+ test->a = 0x10;
+ test->b = 0x20;
+ test = ns(Monster_test4_extend(B, 2));
+ test->a = 0x30;
+ test->b = 0x40;
+ test[1].a = 0x50;
+ test[1].b = 0x60;
+ ns(Monster_test4_push_create(B, 0x70, (int8_t)0x80));
+ /*
+ * Zero padding within struct
+ * - not needed when receiving a pointer like `test` in the above.
+ */
+ ns(Test_clear(&x));
+ x.a = 0x190; /* This is a short. */
+ x.b = (int8_t)0x91; /* This is a byte. */
+ /* And x also has a hidden trailing padding byte. */
+ ns(Monster_test4_push(B, &x));
+ ns(Monster_test4_push(B, &x));
+ /* We can use either field mapped push or push on the type. */
+ ns(Test_vec_push(B, &x));
+ /*
+ * `_reserved_len` is similar to the `_vec_len` function in the
+ * reader interface but `_vec_len` would not work here.
+ */
+ assert(ns(Monster_test4_reserved_len(B)) == 7);
+ ns(Monster_test4_truncate(B, 2));
+ assert(ns(Monster_test4_reserved_len(B)) == 5);
+
+ /* It is not valid to dereference old pointers unless we call edit first. */
+ test = ns(Monster_test4_edit(B));
+ test[4].a += 1; /* 0x191 */
+
+ /* Each vector element is converted to protocol endian format at end. */
+ ns(Monster_test4_end(B));
+
+ /* Test creating object before containing vector. */
+ ns(Monster_start(B));
+ name = nsc(string_create(B, "TwoFace", 7));
+ ns(Monster_name_add(B, name));
+ mon = ns(Monster_end(B));
+ /*
+ * Here we create several monsters with only a name - this also
+ * tests reuse of vtables.
+ */
+ ns(Monster_testarrayoftables_start(B));
+ aoft = ns(Monster_testarrayoftables_extend(B, 2));
+ /*
+ * It is usually not ideal to update reference vectors directly and
+ * there must not be any unassigned elements (null) when the array
+ * ends. Normally a push_start ... push_end, or a push_create
+ * operation is preferable.
+ */
+ aoft[0] = mon;
+ /*
+ * But we can do things not otherwise possible - like constructing a
+ * DAG. Note that reference values (unlike pointers) are stable as
+ * long as the buffer is open for write, also past this vector.
+ */
+ aoft[1] = mon;
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_strn(B, "Joker", 30));
+ mon2 = *ns(Monster_testarrayoftables_push_end(B));
+ aoft = ns(Monster_testarrayoftables_extend(B, 3));
+ aoft[0] = mon;
+ aoft[1] = mon2;
+ ns(Monster_testarrayoftables_truncate(B, 1));
+ assert(ns(Monster_testarrayoftables_reserved_len(B)) == 5);
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_strn(B, "Gulliver at the Big Endians", 8));
+ /* We cannot call reserved_len while a monster is still open, */
+ monsters[0] = *ns(Monster_testarrayoftables_push_end(B));
+ /* but here the vector is on top of the stack again. */
+ assert(ns(Monster_testarrayoftables_reserved_len(B)) == 6);
+ /* Swap monsters[0] and monsters[5] */
+ aoft = ns(Monster_testarrayoftables_edit(B));
+ mon2 = aoft[5];
+ monsters[1] = aoft[2];
+ aoft[5] = mon;
+ aoft[0] = mon2;
+ ns(Monster_testarrayoftables_append(B, monsters, 2));
+ /*
+ * The end call converts the reference array into an endian encoded
+ * offset vector.
+ */
+ ns(Monster_testarrayoftables_end(B));
+
+ strings[0] = nsc(string_create_str(B, "Hello"));
+ /* Test embedded null character.
+ * Note _strn is at most n, or up to 0 termination:
+ * wrong: strings[1] = nsc(string_create_strn(B, ",\0", 2));
+ */
+ strings[1] = nsc(string_create(B, ",\0", 2));
+ strings[2] = nsc(string_create_str(B, "world!"));
+ ns(Monster_testarrayofstring_create(B, strings, 3));
+
+ assert(c_vec_len(bools) == 4);
+ ns(Monster_testarrayofbools_start(B));
+ ns(Monster_testarrayofbools_append(B, bools, 1));
+ ns(Monster_testarrayofbools_append(B, bools + 1, 3));
+ ns(Monster_testarrayofbools_end(B));
+
+ /*
+ * This is using a constructor argument list where a union
+ * is a single argument, unlike the C++ interface.
+ * A union is given a type and a table reference.
+ *
+ * We are not verifying the result as this is only to stress
+ * the type system of the builder - except: the last array
+ * element is tested to ensure add_value is getting through.
+ */
+ ns(Monster_test_add)(B, ns(Any_as_Monster(mon)));
+
+ ns(Monster_enemy_start(B));
+ ns(Monster_name_create_str(B, "the enemy"));
+
+ /* Create array of monsters to test various union constructors. */
+ ns(Monster_testarrayoftables_start(B));
+
+ ns(Monster_vec_push_start(B));
+ ns(Monster_test_add)(B, ns(Any_as_Monster(mon)));
+ /* Name is required. */
+ ns(Monster_name_create_str(B, "any name"));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_test_Monster_add(B, mon));
+ ns(Monster_name_create_str(B, "any name"));
+ ns(Monster_vec_push_end(B));
+ /*
+ * `push_start`: We can use the field specific method, or the type specific method
+ * that the field maps to.
+ */
+ ns(Monster_testarrayoftables_push_start(B));
+ /*
+ * This is mostly for internal use in create methods so the type
+ * can be added last and pack better in the table.
+ * `add_value` still takes union_ref because it is a NOP if
+ * the union type is NONE.
+ */
+ ns(Monster_test_add_value(B, ns(Any_as_Monster(mon))));
+ ns(Monster_name_create_str(B, "any name"));
+ ns(Monster_test_add_type(B, ns(Any_Monster)));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_end(B));
+
+ ns(Monster_enemy_end(B));
+
+ ns(Monster_testbool_add(B, 0));
+
+ ns(Monster_testempty_start(B));
+ ns(Monster_testempty_end(B));
+
+ ns(Monster_end_as_root(B));
+ return 0;
+}
+
+int test_monster(flatcc_builder_t *B)
+{
+ void *buffer;
+ size_t size;
+ int ret;
+
+ gen_monster(B, 0);
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+ hexdump("monster table", buffer, size, stderr);
+ if ((ret = ns(Monster_verify_as_root(buffer, size)))) {
+ printf("Monster buffer failed to verify, got: %s\n", flatcc_verify_error_string(ret));
+ return -1;
+ }
+ ret = verify_monster(buffer);
+
+ flatcc_builder_aligned_free(buffer);
+ return ret;
+}
+
+int test_monster_with_size(flatcc_builder_t *B)
+{
+ void *buffer, *frame;
+ size_t size, size2, esize;
+ int ret;
+
+ gen_monster(B, 1);
+
+ frame = flatcc_builder_finalize_aligned_buffer(B, &size);
+ hexdump("monster table with size", frame, size, stderr);
+ if (((size_t)frame & 15)) {
+ printf("Platform did not provide 16 byte aligned allocation and needs special attention.");
+ printf("buffer address: %x\n", (flatbuffers_uoffset_t)(size_t)frame);
+ return -1;
+ }
+
+ buffer = flatbuffers_read_size_prefix(frame, &size2);
+ esize = size - sizeof(flatbuffers_uoffset_t);
+ if (size2 != esize) {
+ printf("Size prefix has unexpected size, got %i, expected %i\n", (int)size2, (int)esize);
+ return -1;
+ }
+ if ((ret = ns(Monster_verify_as_root(buffer, size2)))) {
+ printf("Monster buffer with size prefix failed to verify, got: %s\n", flatcc_verify_error_string(ret));
+ return -1;
+ }
+ ret = verify_monster(buffer);
+
+ flatcc_builder_aligned_free(frame);
+ return ret;
+}
+
+int test_cloned_monster(flatcc_builder_t *B)
+{
+ void *buffer;
+ void *cloned_buffer;
+ size_t size;
+ int ret;
+ flatcc_refmap_t refmap, *refmap_old;
+
+ flatcc_refmap_init(&refmap);
+ gen_monster(B, 0);
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+ hexdump("monster table", buffer, size, stderr);
+ if ((ret = ns(Monster_verify_as_root(buffer, size)))) {
+ printf("Monster buffer failed to verify, got: %s\n", flatcc_verify_error_string(ret));
+ return -1;
+ }
+ if (verify_monster(buffer)) {
+ return -1;
+ }
+ flatcc_builder_reset(B);
+
+ /*
+ * Clone works without setting a refmap - but then shared references
+ * get expanded - and then the verify monster check fails on a DAG
+ * test.
+ */
+ refmap_old = flatcc_builder_set_refmap(B, &refmap);
+ if (!ns(Monster_clone_as_root(B, ns(Monster_as_root(buffer))))) {
+ printf("Cloned Monster didn't actually clone.");
+ return -1;
+ };
+ /*
+ * Restoring old refmap (or zeroing) is optional if we cleared the
+ * buffer in this scope, but we don't so we must detach and clean up
+ * the refmap manually. refmap_old is likely just null, but this
+ * way we do not interfere with caller.
+ */
+ flatcc_builder_set_refmap(B, refmap_old);
+ cloned_buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+ hexdump("cloned monster table", cloned_buffer, size, stderr);
+ if ((ret = ns(Monster_verify_as_root(cloned_buffer, size)))) {
+ printf("Cloned Monster buffer failed to verify, got: %s\n", flatcc_verify_error_string(ret));
+ return -1;
+ }
+ if (verify_monster(cloned_buffer)) {
+ printf("Cloned Monster did not have the expected content.");
+ return -1;
+ }
+
+ flatcc_refmap_clear(&refmap);
+ flatcc_builder_aligned_free(buffer);
+ flatcc_builder_aligned_free(cloned_buffer);
+ return ret;
+}
+
+int test_string(flatcc_builder_t *B)
+{
+ ns(Monster_table_t) mon;
+ void *buffer;
+ char *s;
+
+ flatcc_builder_reset(B);
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_start(B));
+ s = ns(Monster_name_extend(B, 3));
+ s[0] = '1';
+ s[1] = '2';
+ s[2] = '3';
+ ns(Monster_name_append_str(B, "4"));
+ assert(ns(Monster_name_reserved_len(B)) == 4);
+ ns(Monster_name_append_strn(B, "5678", 30));
+ assert(ns(Monster_name_reserved_len(B)) == 8);
+ ns(Monster_name_append(B, "90", 2));
+ assert(ns(Monster_name_reserved_len(B)) == 10);
+ ns(Monster_name_truncate(B, 3));
+ assert(ns(Monster_name_reserved_len(B)) == 7);
+ s = ns(Monster_name_edit(B));
+ s[4] = '.';
+ ns(Monster_name_end(B));
+ ns(Monster_end_as_root(B));
+ /* Only with small buffers and the default emitter. */
+ buffer = flatcc_builder_get_direct_buffer(B, 0);
+ assert(buffer);
+ mon = ns(Monster_as_root(buffer));
+ if (strcmp(ns(Monster_name(mon)), "1234.67")) {
+ printf("string test failed\n");
+ return -1;
+ }
+ return 0;
+}
+
+int test_sort_find(flatcc_builder_t *B)
+{
+ size_t pos;
+ ns(Monster_table_t) mon;
+ ns(Monster_vec_t) monsters;
+ ns(Monster_mutable_vec_t) mutable_monsters;
+ void *buffer;
+ size_t size;
+ int ret = -1;
+
+ flatcc_builder_reset(B);
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "MyMonster"));
+
+ ns(Monster_testarrayoftables_start(B));
+
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_str(B, "TwoFace"));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_str(B, "Joker"));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_str(B, "Gulliver"));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_str(B, "Alice"));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_str(B, "Gulliver"));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_end(B));
+
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+
+ hexdump("unsorted monster buffer", buffer, size, stderr);
+ mon = ns(Monster_as_root(buffer));
+ monsters = ns(Monster_testarrayoftables(mon));
+ assert(monsters);
+ mutable_monsters = (ns(Monster_mutable_vec_t))monsters;
+ ns(Monster_vec_sort_by_name(mutable_monsters));
+
+ hexdump("sorted monster buffer", buffer, size, stderr);
+
+ if (ns(Monster_vec_len(monsters)) != 5) {
+ printf("Sorted monster vector has wrong length\n");
+ goto done;
+ }
+ if (strcmp(ns(Monster_name(ns(Monster_vec_at(monsters, 0)))), "Alice")) {
+ printf("sort isn't working at elem 0\n");
+ goto done;
+ }
+ if (strcmp(ns(Monster_name(ns(Monster_vec_at(monsters, 1)))), "Gulliver")) {
+ printf("sort isn't working at elem 1\n");
+ goto done;
+ }
+ if (strcmp(ns(Monster_name(ns(Monster_vec_at(monsters, 2)))), "Gulliver")) {
+ printf("sort isn't working at elem 2\n");
+ goto done;
+ }
+ if (strcmp(ns(Monster_name(ns(Monster_vec_at(monsters, 3)))), "Joker")) {
+ printf("sort isn't working at elem 3\n");
+ goto done;
+ }
+ if (strcmp(ns(Monster_name(ns(Monster_vec_at(monsters, 4)))), "TwoFace")) {
+ printf("sort isn't working at elem 4\n");
+ goto done;
+ }
+ /*
+ * The heap sort isn't stable, but it should keep all elements
+ * unique. Note that we could still have identical objects if we
+ * actually stored the same object twice in DAG structure.
+ */
+ if (ns(Monster_vec_at(monsters, 1)) == ns(Monster_vec_at(monsters, 2))) {
+ printf("Two identical sort keys should not be identical objects (in this case)\n");
+ goto done;
+ }
+
+ if (3 != ns(Monster_vec_find(monsters, "Joker"))) {
+ printf("find by default key did not find the Joker\n");
+ goto done;
+ }
+ if (3 != ns(Monster_vec_find_n(monsters, "Joker2", 5))) {
+ printf("find by default key did not find the Joker with n\n");
+ goto done;
+ }
+ /*
+ * We can have multiple keys on a table or struct by naming the sort
+ * and find operations.
+ */
+ if (3 != ns(Monster_vec_find_by_name(monsters, "Joker"))) {
+ printf("find did not find the Joker\n");
+ goto done;
+ }
+ if (3 != ns(Monster_vec_find_n_by_name(monsters, "Joker3", 5))) {
+ printf("find did not find the Joker with n\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_find_by_name(monsters, "Jingle"))) {
+ printf("not found not working\n");
+ goto done;
+ }
+ if (0 != ns(Monster_vec_find_by_name(monsters, "Alice"))) {
+ printf("Alice not found\n");
+ goto done;
+ }
+ /*
+ * The search, unlike sort, is stable and should return the first
+ * index of repeated keys.
+ */
+ if (1 != (pos = ns(Monster_vec_find_by_name(monsters, "Gulliver")))) {
+ printf("Gulliver not found\n");
+ printf("got %d\n", (int)pos);
+ goto done;
+ }
+ if (4 != (pos = ns(Monster_vec_find_by_name(monsters, "TwoFace")))) {
+ printf("TwoFace not found\n");
+ printf("got %d\n", (int)pos);
+ goto done;
+ }
+
+ /*
+ * Just make sure the default key has a sort method - it is the same
+ * as sort_by_name for the monster schema.
+ */
+ ns(Monster_vec_sort(mutable_monsters));
+ ret = 0;
+
+done:
+ flatcc_builder_aligned_free(buffer);
+ return ret;
+}
+
+static size_t count_monsters(ns(Monster_vec_t) monsters, const char *name)
+{
+ size_t i;
+ size_t count = 0;
+
+ for (i = ns(Monster_vec_scan)(monsters, name);
+ i != nsc(not_found);
+ i = ns(Monster_vec_scan_ex)(monsters, i + 1, nsc(end), name)) {
+ ++count;
+ }
+
+ return count;
+}
+
+int test_scan(flatcc_builder_t *B)
+{
+ size_t pos;
+ ns(Monster_table_t) mon;
+ ns(Monster_vec_t) monsters;
+ nsc(uint8_vec_t) inv;
+ nsc(string_vec_t) strings;
+ void *buffer;
+ size_t size;
+ uint8_t invdata[] = { 6, 7, 1, 3, 4, 3, 2 };
+ int ret = -1;
+
+ flatcc_builder_reset(B);
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "MyMonster"));
+ ns(Monster_inventory_create(B, invdata, c_vec_len(invdata)));
+
+ ns(Monster_testarrayofstring_start(B));
+ ns(Monster_testarrayofstring_end(B));
+
+ ns(Monster_testarrayoftables_start(B));
+
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_str(B, "TwoFace"));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_str(B, "Joker"));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_str(B, "Gulliver"));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_str(B, "Alice"));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_push_start(B));
+ ns(Monster_name_create_str(B, "Gulliver"));
+ ns(Monster_testarrayoftables_push_end(B));
+
+ ns(Monster_testarrayoftables_end(B));
+
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+ mon = ns(Monster_as_root(buffer));
+ monsters = ns(Monster_testarrayoftables(mon));
+ assert(monsters);
+ inv = ns(Monster_inventory(mon));
+ assert(inv);
+ strings = ns(Monster_testarrayofstring(mon));
+ assert(strings);
+
+ if (1 != ns(Monster_vec_scan(monsters, "Joker"))) {
+ printf("scan_by did not find the Joker\n");
+ goto done;
+ }
+ if (1 != ns(Monster_vec_rscan(monsters, "Joker"))) {
+ printf("rscan_by did not find the Joker\n");
+ goto done;
+ }
+ if (1 != ns(Monster_vec_scan_n(monsters, "Joker3", 5))) {
+ printf("scan_by did not find the Joker with n\n");
+ goto done;
+ }
+ if (1 != ns(Monster_vec_rscan_n(monsters, "Joker3", 5))) {
+ printf("scan_by did not find the Joker with n\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_scan_ex(monsters, 2, nsc(end), "Joker"))) {
+ printf("scan_from found Joker past first occurence\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_scan(monsters, "Jingle"))) {
+ printf("not found not working\n");
+ goto done;
+ }
+ if (0 != ns(Monster_vec_scan(monsters, "TwoFace"))) {
+ printf("TwoFace not found\n");
+ goto done;
+ }
+ if (2 != ns(Monster_vec_scan_by_name(monsters, "Gulliver"))) {
+ printf("Gulliver not found\n");
+ goto done;
+ }
+ if (4 != ns(Monster_vec_rscan_by_name(monsters, "Gulliver"))) {
+ printf("Gulliver not found\n");
+ goto done;
+ }
+ if (4 != ns(Monster_vec_rscan_n_by_name(monsters, "Gulliver42", 8))) {
+ printf("Gulliver not found with n\n");
+ goto done;
+ }
+ if (2 != ns(Monster_vec_rscan_ex_n_by_name(monsters, 1, 3, "Gulliver42", 8))) {
+ printf("Gulliver not found with n\n");
+ goto done;
+ }
+ if (2 != ns(Monster_vec_scan_ex_by_name(monsters, 2, nsc(end), "Gulliver"))) {
+ printf("Gulliver not found starting from Gulliver\n");
+ goto done;
+ }
+ if (2 != ns(Monster_vec_scan_ex_n_by_name(monsters, 2, nsc(end), "Gulliver42", 8))) {
+ printf("Gulliver not found starting from Gulliver\n");
+ goto done;
+ }
+ if (4 != ns(Monster_vec_scan_ex_by_name(monsters, 3, nsc(end), "Gulliver"))) {
+ printf("Another Gulliver not found\n");
+ goto done;
+ }
+
+ if (nsc(not_found) != ns(Monster_vec_scan_ex(monsters, 1, 3, "Jingle"))) {
+ printf("not found in subrange not working\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_scan_ex(monsters, 1, 3, "TwoFace"))) {
+ printf("subrange doesn't limit low bound\n");
+ goto done;
+ }
+ if (1 != ns(Monster_vec_scan_ex(monsters, 1, 3, "Joker"))) {
+ printf("scan in subrange did not find Joker\n");
+ goto done;
+ }
+ if (2 != ns(Monster_vec_scan_ex_by_name(monsters, 1, 3, "Gulliver"))) {
+ printf("scan in subrange did not find Gulliver\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_scan_ex_by_name(monsters, 1, 3, "Alice"))) {
+ printf("subrange doesn't limit upper bound in scan\n");
+ goto done;
+ }
+
+ if (nsc(not_found) != ns(Monster_vec_rscan_ex(monsters, 1, 3, "Jingle"))) {
+ printf("not found in subrange not working with rscan\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_rscan_ex(monsters, 1, 3, "TwoFace"))) {
+ printf("subrange doesn't limit lower bound in rscan\n");
+ goto done;
+ }
+ if (1 != ns(Monster_vec_rscan_ex(monsters, 1, 3, "Joker"))) {
+ printf("rscan in subrange did not find Joker\n");
+ goto done;
+ }
+ if (2 != ns(Monster_vec_rscan_ex_by_name(monsters, 1, 3, "Gulliver"))) {
+ printf("rscan in subrange did not find Gulliver\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_rscan_ex_by_name(monsters, 1, 3, "Alice"))) {
+ printf("subrange doesn't limit upper bound in rscan\n");
+ goto done;
+ }
+
+ if (nsc(not_found) != ns(Monster_vec_scan_ex(monsters, 0, 0, "TwoFace"))) {
+ printf("TwoFace is found in empty range\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_scan_ex(monsters, 0, 0, "Joker"))) {
+ printf("Joker is found in empty range\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_scan_ex(monsters, 1, 1, "Joker"))) {
+ printf("Joker is found in another empty range\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_scan_ex(monsters, ns(Monster_vec_len(monsters)), nsc(end), "TwoFace"))) {
+ printf("TwoFace is found in empty range in the end\n");
+ goto done;
+ }
+
+ if (nsc(not_found) != ns(Monster_vec_rscan_ex(monsters, 0, 0, "TwoFace"))) {
+ printf("TwoFace is found in empty range\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_rscan_ex(monsters, 0, 0, "Joker"))) {
+ printf("Joker is found in empty range\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_rscan_ex(monsters, 1, 1, "Joker"))) {
+ printf("Joker is found in another empty range\n");
+ goto done;
+ }
+ if (nsc(not_found) != ns(Monster_vec_rscan_ex(monsters, ns(Monster_vec_len(monsters)), nsc(end), "TwoFace"))) {
+ printf("TwoFace is found in empty range in the end\n");
+ goto done;
+ }
+
+ if (1 != count_monsters(monsters, "Joker")) {
+ printf("number of Jokers is not 1\n");
+ goto done;
+ }
+ if (0 != count_monsters(monsters, "Jingle")) {
+ printf("number of Jingles is not 0\n");
+ goto done;
+ }
+ if (1 != count_monsters(monsters, "TwoFace")) {
+ printf("number of TwoFace is not 1\n");
+ goto done;
+ }
+ if (2 != count_monsters(monsters, "Gulliver")) {
+ printf("number of Gullivers is not 2\n");
+ goto done;
+ }
+
+
+ if (0 != (pos = nsc(uint8_vec_scan(inv, 6)))) {
+ printf("scan not working on first item of inventory\n");
+ goto done;
+ }
+ if (2 != (pos = nsc(uint8_vec_scan(inv, 1)))) {
+ printf("scan not working on middle item of inventory\n");
+ goto done;
+ }
+ if (nsc(not_found) != (pos = nsc(uint8_vec_scan_ex(inv, 3, nsc(end), 1)))) {
+ printf("scan_ex(item+1) not working on middle item of inventory\n");
+ goto done;
+ }
+ if (nsc(not_found) != (pos = nsc(uint8_vec_scan(inv, 5)))) {
+ printf("scan not working for repeating item of inventory\n");
+ goto done;
+ }
+ if (6 != (pos = nsc(uint8_vec_scan(inv, 2)))) {
+ printf("scan not working on last item of inventory\n");
+ goto done;
+ }
+ if (3 != (pos = nsc(uint8_vec_scan(inv, 3)))) {
+ printf("scan not working for repeating item of inventory\n");
+ goto done;
+ }
+ if (3 != (pos = nsc(uint8_vec_scan_ex(inv, 3, nsc(end), 3)))) {
+ printf("scan_ex(item) not working for repeating item of inventory\n");
+ goto done;
+ }
+ if (5 != (pos = nsc(uint8_vec_scan_ex(inv, 4, nsc(end), 3)))) {
+ printf("scan_ex(item+1) not working for repeating item of inventory\n");
+ goto done;
+ }
+ if (5 != (pos = nsc(uint8_vec_rscan(inv, 3)))) {
+ printf("rscan not working for repeating item of inventory\n");
+ goto done;
+ }
+ if (3 != (pos = nsc(uint8_vec_rscan_ex(inv, 1, 4, 3)))) {
+ printf("rscan_ex not working for repeating item of inventory\n");
+ goto done;
+ }
+
+ /* Test that all scan functions are generated for string arrays */
+ nsc(string_vec_scan(strings, "Hello"));
+ nsc(string_vec_scan_ex(strings, 0, nsc(end), "Hello"));
+ nsc(string_vec_scan_n(strings, "Hello", 4));
+ nsc(string_vec_scan_ex_n(strings, 0, nsc(end), "Hello", 4));
+ nsc(string_vec_rscan(strings, "Hello"));
+ nsc(string_vec_rscan_ex(strings, 0, nsc(end), "Hello"));
+ nsc(string_vec_rscan_n(strings, "Hello", 4));
+ nsc(string_vec_rscan_ex_n(strings, 0, nsc(end), "Hello", 4));
+
+#if FLATCC_ALLOW_SCAN_FOR_ALL_FIELDS
+ /* Check for presence of scan for non-key fields */
+ ns(Monster_vec_scan_by_hp(monsters, 13));
+ ns(Monster_vec_scan_ex_by_hp(monsters, 1, nsc(end), 42));
+ ns(Monster_vec_rscan_by_hp(monsters, 1));
+ ns(Monster_vec_rscan_ex_by_hp(monsters, 0, 2, 42));
+#endif
+
+ ret = 0;
+
+done:
+ flatcc_builder_aligned_free(buffer);
+ return ret;
+}
+
+int test_basic_sort(flatcc_builder_t *B)
+{
+ ns(Monster_table_t) mon;
+ nsc(uint8_vec_t) inv;
+ nsc(uint8_mutable_vec_t) minv;
+
+ void *buffer;
+ size_t size;
+ uint8_t invdata[] = { 6, 7, 1, 3, 4, 3, 2 };
+ uint8_t sortedinvdata[] = { 1, 2, 3, 3, 4, 6, 7 };
+ uint8_t v, i;
+
+ flatcc_builder_reset(B);
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "MyMonster"));
+ ns(Monster_inventory_create(B, invdata, c_vec_len(invdata)));
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_get_direct_buffer(B, &size);
+
+ mon = ns(Monster_as_root(buffer));
+ inv = ns(Monster_inventory(mon));
+ minv = (nsc(uint8_mutable_vec_t))inv;
+ nsc(uint8_vec_sort(minv));
+ assert(nsc(uint8_vec_len(inv) == c_vec_len(invdata)));
+ for (i = 0; i < nsc(uint8_vec_len(inv)); ++i) {
+ v = nsc(uint8_vec_at(inv, i));
+ if (v != sortedinvdata[i]) {
+ printf("inventory not sorted\n");
+ return -1;
+ }
+ if (nsc(uint8_vec_find(inv, v) != (i == 3 ? 2 : i))) {
+ printf("find not working on inventory\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int test_clone_slice(flatcc_builder_t *B)
+{
+ ns(Monster_table_t) mon, mon2;
+ nsc(string_vec_t) strings;
+ nsc(bool_vec_t) bools;
+ nsc(string_t) name;
+ ns(Monster_ref_t) monster_ref;
+ ns(Test_t) *t;
+ ns(Test_struct_t) test4;
+ ns(Test_struct_t) elem4;
+ void *buffer, *buf2;
+ size_t size;
+ int ret = -1;
+ uint8_t booldata[] = { 0, 1, 0, 0, 1, 0, 0 };
+
+ flatcc_builder_reset(B);
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "The Source"));
+ ns(Monster_testarrayofbools_create(B, booldata, c_vec_len(booldata)));
+
+ ns(Monster_test4_start(B));
+ t = ns(Monster_test4_extend(B, 2));
+ t[0].a = 22;
+ t[1].a = 44;
+ ns(Monster_test4_end(B));
+ ns(Monster_pos_start(B))->x = -42.3f;
+
+ ns(Monster_end_as_root(B));
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+ hexdump("clone slice source buffer", buffer, size, stderr);
+
+ mon = ns(Monster_as_root(buffer));
+
+ flatcc_builder_reset(B);
+ ns(Monster_start_as_root(B));
+
+ name = ns(Monster_name(mon));
+ assert(name);
+ bools = ns(Monster_testarrayofbools(mon));
+ assert(bools);
+ test4 = ns(Monster_test4(mon));
+ assert(test4);
+
+ ns(Monster_name_clone(B, name));
+ ns(Monster_testarrayofstring_start(B));
+ ns(Monster_testarrayofstring_push_clone(B, name));
+ ns(Monster_testarrayofstring_push_slice(B, name, 4, 20));
+ ns(Monster_testarrayofstring_push_slice(B, name, 0, 3));
+ ns(Monster_testarrayofstring_end(B));
+ ns(Monster_start(B));
+ ns(Monster_name_slice(B, name, 2, 20));
+ ns(Monster_testarrayofbools_clone(B, bools));
+
+ ns(Monster_test4_slice(B, test4, 1, 2));
+
+
+ monster_ref = ns(Monster_end(B));
+
+ ns(Monster_test_add(B, ns(Any_as_Monster(monster_ref))));
+ ns(Monster_testarrayofbools_slice(B, bools, 3, (size_t)-1));
+
+ ns(Monster_pos_clone(B, ns(Monster_pos(mon))));
+ ns(Monster_test4_clone(B, test4));
+
+ ns(Monster_end_as_root(B));
+
+ buf2 = flatcc_builder_get_direct_buffer(B, &size);
+ hexdump("target buffer of clone", buf2, size, stderr);
+ mon2 = ns(Monster_as_root(buf2));
+
+ if (strcmp(ns(Monster_name(mon2)), "The Source")) {
+ printf("The Source was not cloned\n");
+ goto done;
+ }
+
+ strings = ns(Monster_testarrayofstring(mon2));
+ if (strcmp(nsc(string_vec_at(strings, 0)), "The Source")) {
+ printf("Push clone failed The Source\n");
+ goto done;
+ }
+ if (nsc(string_len(nsc(string_vec_at(strings, 1)))) != 6) {
+ printf("Push slice failed Sourcee on length\n");
+ goto done;
+ }
+ if (strcmp(nsc(string_vec_at(strings, 1)), "Source")) {
+ printf("Push slice failed Source\n");
+ goto done;
+ }
+ if (nsc(string_len(nsc(string_vec_at(strings, 2)))) != 3) {
+ printf("Push slice failed The on length\n");
+ goto done;
+ }
+ if (strcmp(nsc(string_vec_at(strings, 2)), "The")) {
+ printf("Push slice failed The\n");
+ goto done;
+ }
+ mon = ns(Monster_test(mon2));
+ assert(mon);
+ if (strcmp(ns(Monster_name(mon)), "e Source")) {
+ printf("name_slice did not shorten The Source correctly");
+ goto done;
+ }
+ bools = ns(Monster_testarrayofbools(mon));
+ if (nsc(bool_vec_len(bools)) != 7) {
+ printf("clone bool has wrong length\n");
+ goto done;
+ }
+ if (memcmp(bools, booldata, 7)) {
+ printf("cloned bool has wrong content\n");
+ goto done;
+ }
+
+ bools = ns(Monster_testarrayofbools(mon2));
+ if (nsc(bool_vec_len(bools)) != 4) {
+ printf("slice bool has wrong length\n");
+ goto done;
+ }
+ if (memcmp(bools, booldata + 3, 4)) {
+ printf("sliced bool has wrong content\n");
+ goto done;
+ }
+ if (!parse_float_is_equal(ns(Monster_pos(mon2))->x, -42.3f)) {
+ printf("cloned pos struct failed\n");
+ goto done;
+ };
+ test4 = ns(Monster_test4(mon2));
+ if (ns(Test_vec_len(test4)) != 2) {
+ printf("struct vector test4 not cloned with correct length\n");
+ goto done;
+ }
+ elem4 = ns(Test_vec_at(test4, 0));
+ if (ns(Test_a(elem4)) != 22) {
+ printf("elem 0 of test4 not cloned\n");
+ goto done;
+ }
+ if (flatbuffers_is_native_pe() && ns(Test_vec_at(test4, 0))->a != 22) {
+ printf("elem 0 of test4 not cloned, direct access\n");
+ goto done;
+ }
+ elem4 = ns(Test_vec_at(test4, 1));
+ if (ns(Test_a(elem4)) != 44) {
+ printf("elem 1 of test4 not cloned\n");
+ goto done;
+ }
+ test4 = ns(Monster_test4(mon));
+ if (ns(Test_vec_len(test4)) != 1) {
+ printf("sliced struct vec not sliced\n");
+ goto done;
+ }
+ elem4 = ns(Test_vec_at(test4, 0));
+ if (ns(Test_a(elem4)) != 44) {
+ printf("sliced struct vec has wrong element\n");
+ goto done;
+ }
+
+ /*
+ * There is no push clone of structs because it becomes messy when
+ * the vector has to be ended using end_pe or alternative do double
+ * conversion with unclear semantics.
+ */
+
+ ret = 0;
+
+done:
+ flatcc_builder_aligned_free(buffer);
+ return ret;
+}
+
+int test_create_add_field(flatcc_builder_t *B)
+{
+ void *buffer;
+ size_t size;
+ int ret = -1;
+ ns(Monster_table_t) mon;
+ ns(Stat_table_t) stat;
+
+ flatcc_builder_reset(B);
+
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "MyMonster"));
+ ns(Monster_testempty_create(B, nsc(string_create_str(B, "hello")), -100, 2));
+ ns(Monster_enemy_add(B, 0));
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+ mon = ns(Monster_as_root(buffer));
+ if (ns(Monster_enemy_is_present(mon))) {
+ printf("enemy should not be present when adding null\n");
+ goto done;
+ }
+ stat = ns(Monster_testempty(mon));
+ if (!(ns(Stat_val(stat)) == -100)) {
+ printf("Stat didn't happen\n");
+ goto done;
+ }
+ ret = 0;
+done:
+ flatcc_builder_aligned_free(buffer);
+ return ret;
+}
+
+int verify_union_vector(void *buffer, size_t size)
+{
+ int ret = -1;
+ size_t n;
+ int color;
+
+ ns(Monster_table_t) mon;
+ ns(TestSimpleTableWithEnum_table_t) kermit;
+ flatbuffers_generic_vec_t anyvec;
+ ns(Any_vec_t) anyvec_type;
+ ns(Any_union_vec_t) anyvec_union;
+ ns(Any_union_t) anyelem;
+ ns(Alt_table_t) alt;
+
+ if ((ret = ns(Monster_verify_as_root(buffer, size)))) {
+ printf("Monster buffer with union vector failed to verify, got: %s\n", flatcc_verify_error_string(ret));
+ goto failed;
+ }
+
+ mon = ns(Monster_as_root(buffer));
+ if (ns(Monster_test_type(mon)) != ns(Any_Alt)) {
+ printf("test field does not have Alt type");
+ goto failed;
+ }
+ alt = ns(Monster_test(mon));
+ if (!alt || !ns(Alt_manyany_is_present(alt))) {
+ printf("manyany union vector should be present.\n");
+ goto failed;
+ }
+ anyvec_type = ns(Alt_manyany_type(alt));
+ anyvec = ns(Alt_manyany(alt));
+ n = ns(Any_vec_len(anyvec_type));
+ if (n != 1) {
+ printf("manyany union vector has wrong length.\n");
+ goto failed;
+ }
+ if (nsc(union_type_vec_at(anyvec_type, 0)) != ns(Any_TestSimpleTableWithEnum)) {
+ printf("manyany union vector has wrong element type.\n");
+ goto failed;
+ }
+ kermit = flatbuffers_generic_vec_at(anyvec, 0);
+ if (!kermit) {
+ printf("Kermit is lost.\n");
+ goto failed;
+ }
+ color = ns(TestSimpleTableWithEnum_color(kermit));
+ if (color != ns(Color_Green)) {
+ printf("Kermit has wrong color: %i.\n", (int)color);
+ goto failed;
+ }
+ anyvec_union = ns(Alt_manyany_union(alt));
+ if (ns(Any_union_vec_len(anyvec_union)) != 1) {
+ printf("manyany union vector has wrong length from a different perspective.\n");
+ goto failed;
+ }
+ anyelem = ns(Any_union_vec_at(anyvec_union, 0));
+ if (anyelem.type != nsc(union_type_vec_at(anyvec_type, 0))) {
+ printf("Kermit is now different.\n");
+ goto failed;
+ }
+ if (anyelem.value != kermit) {
+ printf("Kermit is incoherent.\n");
+ goto failed;
+ }
+ ret = 0;
+
+done:
+ return ret;
+
+failed:
+ ret = -1;
+ goto done;
+}
+
+int test_union_vector(flatcc_builder_t *B)
+{
+ void *buffer = 0, *cloned_buffer = 0;
+ size_t size;
+ int ret = -1;
+ flatcc_refmap_t refmap, *refmap_old;
+
+ ns(TestSimpleTableWithEnum_ref_t) kermit_ref;
+ ns(Any_union_vec_ref_t) anyvec_ref;
+
+
+ flatcc_refmap_init(&refmap);
+ flatcc_builder_reset(B);
+
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "Kermit"));
+
+ kermit_ref = ns(TestSimpleTableWithEnum_create(B,
+ ns(Color_Green), ns(Color_Green),
+ ns(Color_Green), ns(Color_Green)));
+ ns(Any_vec_start(B));
+ ns(Any_vec_push(B, ns(Any_as_TestSimpleTableWithEnum(kermit_ref))));
+ anyvec_ref = ns(Any_vec_end(B));
+ ns(Monster_test_Alt_start(B));
+ ns(Alt_manyany_add(B, anyvec_ref));
+ ns(Monster_test_Alt_end(B));
+
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+
+ if (verify_union_vector(buffer, size)) {
+ printf("Union vector Monster didn't verify.\n");
+ goto failed;
+ }
+ flatcc_builder_reset(B);
+ refmap_old = flatcc_builder_set_refmap(B, &refmap);
+ if (!ns(Monster_clone_as_root(B, ns(Monster_as_root(buffer))))) {
+ printf("Cloned union vector Monster didn't actually clone.\n");
+ goto failed;
+ };
+ flatcc_builder_set_refmap(B, refmap_old);
+ cloned_buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+
+ if (verify_union_vector(buffer, size)) {
+ printf("Cloned union vector Monster didn't verify.\n");
+ goto failed;
+ }
+
+ ret = 0;
+
+done:
+ flatcc_refmap_clear(&refmap);
+ flatcc_builder_aligned_free(buffer);
+ flatcc_builder_aligned_free(cloned_buffer);
+ return ret;
+
+failed:
+ ret = -1;
+ goto done;
+}
+
+int verify_fixed_length_array(const void *buffer, size_t size)
+{
+ const char *text;
+ ns(Monster_table_t) mon;
+ ns(Alt_table_t) alt;
+ ns(FooBar_struct_t) fa;
+ ns(FooBar_t) fa2;
+ ns(Test_struct_t) t0, t1;
+ int ret;
+
+ if ((ret = ns(Monster_verify_as_root(buffer, size)))) {
+ printf("Monster buffer with fixed length arrays failed to verify, got: %s\n", flatcc_verify_error_string(ret));
+ return -1;
+ }
+
+ mon = ns(Monster_as_root(buffer));
+ if (ns(Monster_test_type(mon)) != ns(Any_Alt)) {
+ printf("test field does not have Alt type");
+ return -1;
+ }
+
+ alt = ns(Monster_test(mon));
+ if (!alt || !ns(Alt_fixed_array_is_present(alt))) {
+ printf("fixed array should be present.\n");
+ return -1;
+ }
+
+ fa = ns(Alt_fixed_array(alt));
+
+ if (ns(FooBar_foo(fa, 0)) != 1.0f || ns(FooBar_bar(fa, 9) != 1000)) {
+ printf("Monster buffer with fixed length arrays has wrong content\n");
+ return -1;
+ }
+
+ if (ns(FooBar_foo_get(fa, 0)) != 1.0f || ns(FooBar_bar_get(fa, 9) != 1000)) {
+ printf("Monster buffer with fixed length arrays has wrong content\n");
+ return -1;
+ }
+ if (ns(FooBar_foo_get(fa, 16)) != 0.0f || ns(FooBar_bar_get(fa, 10) != 0)) {
+ printf("Monster buffer with fixed length arrays has bad bounds check\n");
+ return -1;
+ }
+ if (ns(FooBar_col_get(fa, 2)) != ns(Color_Red)) {
+ printf("Fixed length enum array content not correct\n");
+ return -1;
+ }
+ t0 = ns(FooBar_tests_get(fa, 0));
+ t1 = ns(FooBar_tests_get(fa, 1));
+ if (!t0 || !t1) {
+ printf("Monster buffer with fixed length struct arrays has missing element\n");
+ return -1;
+ }
+ if (ns(Test_a_get(t0)) != 0 || ns(Test_b_get(t0)) != 4) {
+ printf("Monster buffer with fixed length struct arrays has wrong first element member content\n");
+ return -1;
+ }
+ if (ns(Test_a_get(t1)) != 1 || ns(Test_b_get(t1)) != 2) {
+ printf("Monster buffer with fixed length struct arrays has wrong second element member content\n");
+ return -1;
+ }
+
+ /* Endian safe because char arrays are endian neutral. */
+ text = ns(FooBar_text_get_ptr(fa));
+ if (strncmp(text, "hello", ns(FooBar_text_get_len())) != 0) {
+ printf("Monster buffer with fixed length array field has wrong text\n");
+ return -1;
+ }
+
+ /*
+ * Note: use ns(FooBar_foo_get_ptr(fa) to get a raw pointer to the
+ * array is not endian safe. Since this is a struct array field,
+ * fa->foo would also provide the raw pointer.
+ */
+ if (flatbuffers_is_native_pe()) {
+ if (ns(FooBar_foo_get_ptr(fa))[1] != 2.0f) {
+ printf("Monster buffer with fixed length arrays get_ptr has wrong content\n");
+ return -1;
+ }
+ }
+
+ ns(FooBar_copy_from_pe(&fa2, fa));
+ if (fa2.foo[0] != 1.0f || fa2.foo[1] != 2.0f || fa2.foo[15] != 16.0f ||
+ fa2.bar[0] != 100 || fa2.bar[9] != 1000) {
+ printf("Monster buffer with copied fixed length arrays has wrong content\n");
+ return -1;
+ }
+ if (fa2.foo[2] != 0.0f || fa2.foo[14] != 0.0f || fa2.bar[1] != 0 || fa2.bar[8] != 0) {
+ printf("Monster buffer with copied fixed length arrays has not been zero padded\n");
+ return -1;
+ }
+
+ /*
+ * In-place conversion - a nop on little endian platforms.
+ * Cast needed to remove const
+ */
+ ns(FooBar_from_pe)((ns(FooBar_t) *)fa);
+ if (fa->foo[0] != 1.0f || fa->foo[1] != 2.0f || fa->foo[15] != 16.0f ||
+ fa->bar[0] != 100 || fa->bar[9] != 1000) {
+ printf("Monster buffer with in-place converted fixed length arrays has wrong content\n");
+ return -1;
+ }
+ if (fa->foo[2] != 0.0f || fa->foo[14] != 0.0f || fa->bar[1] != 0 || fa->bar[8] != 0) {
+ printf("Monster buffer with in-place converted fixed length arrays has not been zero padded\n");
+ return -1;
+ }
+ return 0;
+}
+
+int test_fixed_length_array(flatcc_builder_t *B)
+{
+ void *buffer = 0;
+ size_t size;
+ int ret = -1;
+ float foo_input[16] = { 1.0f, 2.0f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16.0f };
+ int bar_input[10] = { 100, 0, 0, 0, 0, 0, 0, 0, 0, 1000 };
+ ns(Color_enum_t) col_input[3] = { 0, 0, ns(Color_Red) };
+ ns(Test_t) tests_input[2] = {{ 0, 4 }, { 1, 2 }};
+
+ ns(FooBar_t) *foobar;
+
+ flatcc_builder_reset(B);
+
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "Monolith"));
+ ns(Monster_test_Alt_start(B));
+ foobar = ns(Alt_fixed_array_start(B));
+ foobar->foo[0] = 1.0f;
+ foobar->foo[1] = 2.0f;
+ foobar->foo[15] = 16.0f;
+ foobar->bar[0] = 100;
+ foobar->bar[9] = 1000;
+ foobar->col[2] = ns(Color_Red);
+ foobar->tests[0].b = 4;
+ foobar->tests[1].a = 1;
+ foobar->tests[1].b = 2;
+ strncpy(foobar->text, "hello, world", ns(FooBar_text_get_len()));
+ // or strncopy(foobar->text, "hello, world", sizeof(foobar->text));
+ ns(Alt_fixed_array_end(B));
+ ns(Monster_test_Alt_end(B));
+
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+ ret = verify_fixed_length_array(buffer, size);
+ flatcc_builder_aligned_free(buffer);
+ if (ret) return -1;
+
+ flatcc_builder_reset(B);
+
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "Monolith"));
+ ns(Monster_test_Alt_start(B));
+ foobar = ns(Alt_fixed_array_start(B));
+ ns(FooBar_assign)(foobar, foo_input, bar_input, col_input, tests_input, "hello");
+ ns(Alt_fixed_array_end(B));
+ ns(Monster_test_Alt_end(B));
+
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+ ret = verify_fixed_length_array(buffer, size);
+ flatcc_builder_aligned_free(buffer);
+ if (ret) return -1;
+
+ return 0;
+}
+
+#define STR(s) nsc(string_create_str(B, s))
+
+int test_recursive_sort(flatcc_builder_t *B)
+{
+ nsc(string_ref_t) name;
+
+ void *buffer = 0;
+ size_t size = 0;
+ int ret = -1;
+ ns(Alt_table_t) alt;
+ ns(Any_union_t) any;
+ ns(Monster_table_t) monster;
+ ns(MultipleKeys_vec_t) mkvec;
+ ns(MultipleKeys_table_t) mk;
+ size_t index;
+
+ flatcc_builder_reset(B);
+
+ ns(Monster_start_as_root(B));
+
+ name = STR("Keyed Monster");
+ ns(Alt_start(B));
+ ns(Alt_multik_start(B));
+ ns(Alt_multik_push_create(B, STR("hi"), STR("there"), 42));
+ ns(Alt_multik_push_create(B, STR("hello"), STR("anyone"), 10));
+ ns(Alt_multik_push_create(B, STR("hello"), STR("anyone"), 4));
+ ns(Alt_multik_push_create(B, STR("good day"), STR("sir"), 1004));
+ ns(Alt_multik_end(B));
+ ns(Monster_test_add)(B, ns(Any_as_Alt(ns(Alt_end(B)))));
+ ns(Monster_name_add)(B, name);
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+ monster = ns(Monster_as_root)(buffer);
+ ns(Monster_sort)((ns(Monster_mutable_table_t))monster);
+ any = ns(Monster_test_union(monster));
+ if (any.type != ns(Any_Alt)) {
+ printf("Any type no Alt as expected\n");
+ goto done;
+ }
+ alt = any.value;
+ mkvec = ns(Alt_multik(alt));
+ index = ns(MultipleKeys_vec_len(mkvec));
+ if (index != 4) {
+ printf("unexpected multik vec len, got %d\n", (int)index);
+ goto done;
+ }
+ mk = ns(MultipleKeys_vec_at(mkvec, 0));
+ if (ns(MultipleKeys_foobar(mk) != 4)) {
+ printf("multik elem 0 not sorted, but it really should be\n");
+ }
+ mk = ns(MultipleKeys_vec_at(mkvec, 1));
+ if (ns(MultipleKeys_foobar(mk) != 10)) {
+ printf("multik elem 1 not sorted, but it really should be\n");
+ }
+ mk = ns(MultipleKeys_vec_at(mkvec, 2));
+ if (ns(MultipleKeys_foobar(mk) != 42)) {
+ printf("multik elem 2 not sorted, but it really should be\n");
+ }
+ mk = ns(MultipleKeys_vec_at(mkvec, 3));
+ if (ns(MultipleKeys_foobar(mk) != 1004)) {
+ printf("multik elem 3 not sorted, but it really should be\n");
+ }
+
+ hexdump("MultiKeyed buffer", buffer, size, stderr);
+ if ((ret = ns(Monster_verify_as_root(buffer, size)))) {
+ printf("Multikeyed Monster buffer failed to verify, got: %s\n", flatcc_verify_error_string(ret));
+ goto done;
+ }
+
+ ret = 0;
+done:
+ flatcc_builder_aligned_free(buffer);
+ return ret;
+}
+
+int test_mixed_type_union(flatcc_builder_t *B)
+{
+ void *buffer;
+ size_t size;
+ int ret = -1;
+ /* Builder */
+ nsf(Character_union_ref_t) ut;
+ nsf(Rapunzel_ref_t) cameo_ref;
+ nsf(Attacker_ref_t) attacker_ref;
+ nsf(BookReader_ref_t) br_ref;
+ nsf(BookReader_t *) pbr;
+ nsf(Movie_table_t) mov;
+
+ /* Reader */
+ nsf(Character_union_vec_t) characters;
+ nsf(Character_union_t) character;
+ nsf(Rapunzel_struct_t) rapunzel;
+ nsf(Attacker_table_t) attacker;
+ nsc(string_t) text;
+
+ flatcc_builder_reset(B);
+
+ nsf(Movie_start_as_root(B));
+ br_ref = nsf(BookReader_create(B, 10));
+ cameo_ref = nsf(Rapunzel_create(B, 22));
+ ut = nsf(Character_as_Rapunzel(cameo_ref));
+ nsf(Movie_main_character_Rapunzel_create(B, 19));
+ nsf(Movie_cameo_Rapunzel_add(B, cameo_ref));
+ attacker_ref = nsf(Attacker_create(B, 42));
+ nsf(Movie_antagonist_MuLan_add(B, attacker_ref));
+ nsf(Movie_side_kick_Other_create_str(B, "Nemo"));
+ nsf(Movie_characters_start(B));
+ nsf(Movie_characters_push(B, ut));
+ nsf(Movie_characters_MuLan_push(B, attacker_ref));
+ nsf(Movie_characters_MuLan_push_create(B, 1));
+ nsf(Character_vec_push(B, nsf(Character_as_Other(nsc(string_create_str(B, "other"))))));
+ nsf(Movie_characters_Belle_push(B, br_ref));
+ pbr = nsf(Movie_characters_Belle_push_start(B));
+ pbr->books_read = 3;
+ nsf(Movie_characters_Belle_push_end(B));
+ nsf(Movie_characters_Belle_push(B, nsf(BookReader_create(B, 1))));
+ nsf(Movie_characters_Belle_push_create(B, 2));
+ nsf(Movie_characters_Other_push(B, nsc(string_create_str(B, "another"))));
+ nsf(Movie_characters_Other_push_create_str(B, "yet another"));
+ nsf(Movie_characters_end(B));
+ nsf(Movie_end_as_root(B));
+
+ buffer = flatcc_builder_finalize_aligned_buffer(B, &size);
+
+ hexdump("Movie buffer", buffer, size, stderr);
+ if ((ret = nsf(Movie_verify_as_root(buffer, size)))) {
+ printf("Movie buffer with mixed type union and union vector failed to verify, got: %s\n", flatcc_verify_error_string(ret));
+ return -1;
+ }
+ ret = -1;
+
+ mov = nsf(Movie_as_root(buffer));
+ if (!nsf(Movie_main_character_is_present(mov))) {
+ printf("Main_charactery union should be present.\n");
+ goto done;
+ }
+ if (!nsf(Movie_characters_is_present(mov))) {
+ printf("Characters union vector should be present.\n");
+ goto done;
+ }
+ character = nsf(Movie_main_character_union(mov));
+ if (character.type != nsf(Character_Rapunzel)) {
+ printf("Unexpected main character.\n");
+ goto done;
+ };
+ /*
+ * Tables and structs can cast by void pointer assignment while
+ * strings require an explicit cast.
+ */
+ rapunzel = character.value;
+ if (!rapunzel) {
+ printf("Rapunzel has gone AWOL\n");
+ }
+ if (nsf(Rapunzel_hair_length(rapunzel)) > 19) {
+ printf("Rapunzel's hair has grown unexpectedly\n");
+ goto done;
+ }
+ if (nsf(Rapunzel_hair_length(rapunzel)) < 19) {
+ printf("Rapunzel's hair has been trimmed unexpectedly\n");
+ goto done;
+ }
+ if (nsf(Movie_cameo_type(mov)) != nsf(Character_Rapunzel)) {
+ printf("Rapunzel did was not selected for cameo appearance.\n");
+ goto done;
+ }
+ rapunzel = nsf(Movie_cameo(mov));
+ if (!rapunzel) {
+ printf("Rapunzel did not show up for cameo appearance.\n");
+ goto done;
+ }
+ if (nsf(Rapunzel_hair_length(rapunzel)) != 22) {
+ printf("Rapunzel didn't style her hair for cameo role.\n");
+ goto done;
+ }
+ if (nsf(Movie_antagonist_type(mov)) != nsf(Character_MuLan)) {
+ printf("Unexpected antagonist.\n");
+ goto done;
+ }
+ attacker = nsf(Movie_antagonist(mov));
+ if (!attacker || nsf(Attacker_sword_attack_damage(attacker)) != 42) {
+ printf("Unexpected sword attack damamage.\n");
+ goto done;
+ }
+ if (nsf(Movie_side_kick_type(mov)) != nsf(Character_Other)) {
+ printf("Unexpected side kick.\n");
+ goto done;
+ }
+ /*
+ * We need to cast because generic pointers refer to the start
+ * of the memory block which is the string length, not the first
+ * character in the string.
+ */
+ text = nsc(string_cast_from_generic(nsf(Movie_side_kick(mov))));
+ if (!text) {
+ printf("missing side kick string.\n");
+ goto done;
+ }
+ if (strcmp(text, "Nemo")) {
+ printf("unexpected side kick string: '%s'.\n", text);
+ goto done;
+ }
+ text = nsf(Movie_side_kick_as_string(mov));
+ if (!text) {
+ printf("missing side kick string.\n");
+ goto done;
+ }
+ if (strcmp(text, "Nemo")) {
+ printf("unexpected side kick string (take 2): '%s'.\n", text);
+ goto done;
+ }
+ character = nsf(Movie_side_kick_union(mov));
+ text = nsc(string_cast_from_union(character));
+ if (strcmp(text, "Nemo")) {
+ printf("unexpected side kick string (take 3): '%s'.\n", text);
+ goto done;
+ }
+ characters = nsf(Movie_characters_union(mov));
+ character = nsf(Character_union_vec_at(characters, 0));
+ if (character.type != nsf(Character_Rapunzel)) {
+ printf("The first character is not Rapunzel.");
+ goto done;
+ };
+ character = nsf(Character_union_vec_at(characters, 1));
+ if (character.type != nsf(Character_MuLan)) {
+ printf("The second character is not MuLan.");
+ goto done;
+ };
+ attacker = character.value;
+ if (nsf(Attacker_sword_attack_damage(attacker) != 42)) {
+ printf("The second character has unexpected sword damage.");
+ goto done;
+ }
+ character = nsf(Character_union_vec_at(characters, 2));
+ if (character.type != nsf(Character_MuLan)) {
+ printf("The third character is not MuLan.");
+ goto done;
+ };
+ attacker = character.value;
+ if (nsf(Attacker_sword_attack_damage(attacker) != 1)) {
+ printf("The third character has unexpected sword damage.");
+ goto done;
+ }
+ if (nsc(union_type_vec_at(nsf(Movie_characters_type(mov)), 3)) != nsf(Character_Other)) {
+ printf("The fourth character was not of type 'Other'!\n");
+ goto done;
+ }
+ text = nsf(Character_union_vec_at_as_string(characters, 3));
+ if (!text || strcmp(text, "other")) {
+ printf("The fourth character was not described as 'other'.\n");
+ goto done;
+ }
+ character = nsf(Character_union_vec_at(characters, 3));
+ if (character.type != nsf(Character_Other)) {
+ printf("The fourth character is not of type 'Other' (take two).");
+ goto done;
+ };
+ text = nsc(string_cast_from_union(character));
+ if (!text || strcmp(text, "other")) {
+ printf("The fourth character was not described as 'other' (take two).\n");
+ goto done;
+ }
+ character = nsf(Character_union_vec_at(characters, 4));
+ if (character.type != nsf(Character_Belle)) {
+ printf("The fifth character is not Belle.");
+ goto done;
+ };
+ character = nsf(Character_union_vec_at(characters, 5));
+ if (character.type != nsf(Character_Belle)) {
+ printf("The sixth character is not Belle.");
+ goto done;
+ };
+ character = nsf(Character_union_vec_at(characters, 6));
+ if (character.type != nsf(Character_Belle)) {
+ printf("The seventh character is not Belle.");
+ goto done;
+ };
+ character = nsf(Character_union_vec_at(characters, 7));
+ if (character.type != nsf(Character_Belle)) {
+ printf("The eighth character is not Belle.");
+ goto done;
+ };
+ character = nsf(Character_union_vec_at(characters, 8));
+ if (character.type != nsf(Character_Other)) {
+ printf("The ninth character is not of type 'Other'.");
+ goto done;
+ };
+ character = nsf(Character_union_vec_at(characters, 9));
+ if (character.type != nsf(Character_Other)) {
+ printf("The ninth character is not of type 'Other'.");
+ goto done;
+ };
+ if (nsf(Character_union_vec_len(characters) != 10)) {
+ printf("The 11'th character should not exist.");
+ goto done;
+ };
+
+ ret = 0;
+done:
+ flatcc_builder_aligned_free(buffer);
+ return ret;
+}
+
+int test_add_set_defaults(flatcc_builder_t *B)
+{
+ void *buffer;
+ size_t size;
+ ns(Monster_table_t) mon;
+
+ flatcc_builder_reset(B);
+
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "MyMonster"));
+ ns(Monster_hp_add(B, 100));
+ ns(Monster_mana_add(B, 100));
+ ns(Monster_color_add(B, ns(Color_Blue)));
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_get_direct_buffer(B, &size);
+ mon = ns(Monster_as_root(buffer));
+ if (ns(Monster_hp_is_present(mon))) {
+ printf("default should not be present for hp field\n");
+ return -1;
+ }
+ if (!ns(Monster_mana_is_present(mon))) {
+ printf("non-default should be present for mana field\n");
+ return -1;
+ }
+ if (ns(Monster_color_is_present(mon))) {
+ printf("default should not be present for color field\n");
+ return -1;
+ }
+
+ flatcc_builder_reset(B);
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "MyMonster"));
+ ns(Monster_hp_force_add(B, 100));
+ ns(Monster_mana_force_add(B, 100));
+ ns(Monster_color_force_add(B, ns(Color_Blue)));
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_get_direct_buffer(B, &size);
+ mon = ns(Monster_as_root(buffer));
+ if (!ns(Monster_hp_is_present(mon))) {
+ printf("default should be present for hp field when forced\n");
+ return -1;
+ }
+ if (!ns(Monster_mana_is_present(mon))) {
+ printf("non-default should be present for mana field, also when forced\n");
+ return -1;
+ }
+ if (!ns(Monster_color_is_present(mon))) {
+ printf("default should be present for color field when forced\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int test_nested_buffer(flatcc_builder_t *B)
+{
+ void *buffer;
+ size_t size;
+ ns(Monster_table_t) mon, nested;
+
+ flatcc_builder_reset(B);
+
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "MyMonster"));
+ /*
+ * Note:
+ * ns(Monster_testnestedflatbuffer_start(B));
+ * would start a raw ubyte array so we use start_as_root.
+ */
+ ns(Monster_testnestedflatbuffer_start_as_root(B));
+ ns(Monster_name_create_str(B, "MyNestedMonster"));
+ ns(Monster_testnestedflatbuffer_end_as_root(B));
+ ns(Monster_hp_add(B, 10));
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_get_direct_buffer(B, &size);
+ hexdump("nested flatbuffer", buffer, size, stderr);
+
+ mon = ns(Monster_as_root(buffer));
+ if (strcmp(ns(Monster_name(mon)), "MyMonster")) {
+ printf("got the wrong root monster\n");
+ return -1;
+ }
+ /*
+ * Note:
+ * nested = ns(Monster_testnestedflatbuffer(mon));
+ * would return a raw ubyte vector not a monster.
+ */
+ nested = ns(Monster_testnestedflatbuffer_as_root(mon));
+
+ if (ns(Monster_hp(mon)) != 10) {
+ printf("health points wrong at root monster\n");
+ return -1;
+ }
+
+ assert(ns(Monster_name(nested)));
+ if (strcmp(ns(Monster_name(nested)), "MyNestedMonster")) {
+ printf("got the wrong nested monster\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int test_nested_buffer_first(flatcc_builder_t *B)
+{
+ void *buffer;
+ size_t size;
+ ns(Monster_table_t) mon, nested;
+
+ flatcc_builder_reset(B);
+
+ ns(Monster_start_as_root(B));
+ /*
+ * Note:
+ * ns(Monster_testnestedflatbuffer_start(B));
+ * would start a raw ubyte array so we use start_as_root.
+ *
+ * Here we create the nested buffer first, and the parent
+ * string after so the emitter sees the nested buffer first.
+ */
+ ns(Monster_testnestedflatbuffer_start_as_root(B));
+ ns(Monster_name_create_str(B, "MyNestedMonster"));
+ ns(Monster_testnestedflatbuffer_end_as_root(B));
+ ns(Monster_hp_add(B, 10));
+ ns(Monster_name_create_str(B, "MyMonster"));
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_get_direct_buffer(B, &size);
+ hexdump("nested flatbuffer", buffer, size, stderr);
+
+ mon = ns(Monster_as_root(buffer));
+ if (strcmp(ns(Monster_name(mon)), "MyMonster")) {
+ printf("got the wrong root monster\n");
+ return -1;
+ }
+ /*
+ * Note:
+ * nested = ns(Monster_testnestedflatbuffer(mon));
+ * would return a raw ubyte vector not a monster.
+ */
+ nested = ns(Monster_testnestedflatbuffer_as_root(mon));
+
+ if (ns(Monster_hp(mon)) != 10) {
+ printf("health points wrong at root monster\n");
+ return -1;
+ }
+
+ assert(ns(Monster_name(nested)));
+ if (strcmp(ns(Monster_name(nested)), "MyNestedMonster")) {
+ printf("got the wrong nested monster\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int test_nested_buffer_using_nest(flatcc_builder_t *B)
+{
+ void *buffer;
+ uint8_t nested_buffer[1024];
+ size_t size, nested_size;
+ ns(Monster_table_t) mon, nested;
+
+ flatcc_builder_reset(B);
+
+ ns(Monster_start_as_root(B));
+ ns(Monster_name_create_str(B, "MyNestedMonster"));
+ ns(Monster_mana_add(B, 42));
+ ns(Monster_end_as_root(B));
+
+ nested_size = flatcc_builder_get_buffer_size(B);
+ if (!flatcc_builder_copy_buffer(B, nested_buffer, sizeof(nested_buffer))) {
+ printf("nested buffer copy failed\n");
+ return -1;
+ }
+
+ flatcc_builder_reset(B);
+
+ ns(Monster_start_as_root(B));
+ ns(Monster_testnestedflatbuffer_nest(B, nested_buffer, nested_size, 0));
+ ns(Monster_hp_add(B, 10));
+ ns(Monster_name_create_str(B, "MyMonster"));
+ ns(Monster_end_as_root(B));
+
+ buffer = flatcc_builder_get_direct_buffer(B, &size);
+ hexdump("nested flatbuffer [using _nest()]", buffer, size, stderr);
+
+ mon = ns(Monster_as_root(buffer));
+ if (strcmp(ns(Monster_name(mon)), "MyMonster")) {
+ printf("got the wrong root monster\n");
+ return -1;
+ }
+ /*
+ * Note:
+ * nested = ns(Monster_testnestedflatbuffer(mon));
+ * would return a raw ubyte vector not a monster.
+ */
+ nested = ns(Monster_testnestedflatbuffer_as_root(mon));
+
+ if (ns(Monster_hp(mon)) != 10) {
+ printf("health points wrong at root monster\n");
+ return -1;
+ }
+
+ assert(ns(Monster_name(nested)));
+ if (strcmp(ns(Monster_name(nested)), "MyNestedMonster")) {
+ printf("got the wrong nested monster\n");
+ return -1;
+ }
+
+ if (ns(Monster_mana(nested)) != 42) {
+ printf("mana points wrong in nested monster\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int verify_include(void *buffer)
+{
+ (void)buffer;
+
+ if (MyGame_OtherNameSpace_FromInclude_Foo != 17) {
+ printf("Unexpected enum value `Foo` from included schema\n");
+ return -1;
+ }
+
+ if (MyGame_OtherNameSpace_FromInclude_IncludeVal != 0) {
+ printf("Unexpected enum value `IncludeVal` from included schema\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int test_struct_buffer(flatcc_builder_t *B)
+{
+ uint8_t buffer[100];
+
+ size_t size;
+ ns(Vec3_t) *v;
+ ns(Vec3_struct_t) vec3;
+
+ flatcc_builder_reset(B);
+ ns(Vec3_create_as_root(B, 1, 2, 3, 4.2, ns(Color_Blue), 2730, -17));
+ size = flatcc_builder_get_buffer_size(B);
+ assert(size == 48);
+ printf("dbg: struct buffer size: %d\n", (int)size);
+ assert(flatcc_emitter_get_buffer_size(flatcc_builder_get_emit_context(B)) == size);
+ if (!flatcc_builder_copy_buffer(B, buffer, 100)) {
+ printf("Copy failed\n");
+ return -1;
+ }
+ hexdump("Vec3 struct buffer", buffer, size, stderr);
+ if (!nsc(has_identifier(buffer, "MONS"))) {
+ printf("wrong Vec3 identifier (explicit)\n");
+ return -1;
+ }
+ if (nsc(has_identifier(buffer, "mons"))) {
+ printf("accepted wrong Vec3 identifier (explicit)\n");
+ return -1;
+ }
+ if (!nsc(has_identifier(buffer, ns(Vec3_identifier)))) {
+ printf("wrong Vec3 identifier (via define)\n");
+ return -1;
+ }
+ vec3 = ns(Vec3_as_root(buffer));
+ /* Convert buffer to native in place - a nop on native platform. */
+ v = (ns(Vec3_t) *)vec3;
+ ns(Vec3_from_pe(v));
+ if (!parse_float_is_equal(v->x, 1.0f) || !parse_float_is_equal(v->y, 2.0f) || !parse_float_is_equal(v->z, 3.0f)
+ || !parse_double_is_equal(v->test1, 4.2) || v->test2 != ns(Color_Blue)
+ || v->test3.a != 2730 || v->test3.b != -17
+ ) {
+ printf("struct buffer not valid\n");
+ return -1;
+ }
+ assert(ns(Color_Red) == 1 << 0);
+ assert(ns(Color_Green) == 1 << 1);
+ assert(ns(Color_Blue) == 1 << 3);
+ assert(sizeof(ns(Color_Blue) == 1));
+ return 0;
+}
+
+int test_typed_struct_buffer(flatcc_builder_t *B)
+{
+ uint8_t buffer[100];
+
+ size_t size;
+ ns(Vec3_t) *v;
+ ns(Vec3_struct_t) vec3;
+
+ flatcc_builder_reset(B);
+ ns(Vec3_create_as_typed_root(B, 1, 2, 3, 4.2, ns(Color_Blue), 2730, -17));
+ size = flatcc_builder_get_buffer_size(B);
+ assert(size == 48);
+ printf("dbg: struct buffer size: %d\n", (int)size);
+ assert(flatcc_emitter_get_buffer_size(flatcc_builder_get_emit_context(B)) == size);
+ if (!flatcc_builder_copy_buffer(B, buffer, 100)) {
+ printf("Copy failed\n");
+ return -1;
+ }
+ hexdump("typed Vec3 struct buffer", buffer, size, stderr);
+ if (!nsc(has_identifier(buffer, "\xd2\x3e\xf5\xa8"))) {
+ printf("wrong Vec3 identifier (explicit)\n");
+ return -1;
+ }
+ if (nsc(has_identifier(buffer, "mons"))) {
+ printf("accepted wrong Vec3 identifier (explicit)\n");
+ return -1;
+ }
+ if (!nsc(has_identifier(buffer, ns(Vec3_type_identifier)))) {
+ printf("wrong Vec3 identifier (via define)\n");
+ return -1;
+ }
+ if (!ns(Vec3_as_root_with_type_hash(buffer, ns(Vec3_type_hash)))) {
+ printf("wrong Vec3 type identifier (via define)\n");
+ return -1;
+ }
+ if (flatcc_verify_ok != ns(Vec3_verify_as_root_with_type_hash(buffer, size, ns(Vec3_type_hash)))) {
+ printf("verify failed with Vec3 type hash\n");
+ return -1;
+ }
+ vec3 = ns(Vec3_as_typed_root(buffer));
+ if (!vec3) {
+ printf("typed Vec3 could not be read\n");
+ return -1;
+ }
+ if (flatcc_verify_ok != ns(Vec3_verify_as_typed_root(buffer, size))) {
+ printf("verify failed with Vec3 as typed root\n");
+ return -1;
+ }
+ /* Convert buffer to native in place - a nop on native platform. */
+ v = (ns(Vec3_t) *)vec3;
+ ns(Vec3_from_pe(v));
+ if (!parse_float_is_equal(v->x, 1.0f) || !parse_float_is_equal(v->y, 2.0f) || !parse_float_is_equal(v->z, 3.0f)
+ || !parse_double_is_equal(v->test1, 4.2) || v->test2 != ns(Color_Blue)
+ || v->test3.a != 2730 || v->test3.b != -17
+ ) {
+ printf("struct buffer not valid\n");
+ return -1;
+ }
+ assert(ns(Color_Red) == 1 << 0);
+ assert(ns(Color_Green) == 1 << 1);
+ assert(ns(Color_Blue) == 1 << 3);
+ assert(sizeof(ns(Color_Blue) == 1));
+ return 0;
+}
+
+
+/* A stable test snapshot for reference. */
+int gen_monster_benchmark(flatcc_builder_t *B)
+{
+ uint8_t inv[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+ ns(Vec3_t) *vec;
+ ns(Test_t) *test, x;
+
+ flatcc_builder_reset(B);
+
+ ns(Monster_start_as_root(B));
+ ns(Monster_hp_add(B, 80));
+ vec = ns(Monster_pos_start(B));
+ vec->x = 1, vec->y = 2, vec->z = -3.2f;
+ ns(Monster_pos_end(B));
+ ns(Monster_name_create_str(B, "MyMonster"));
+ ns(Monster_inventory_create(B, inv, c_vec_len(inv)));
+ ns(Monster_test4_start(B));
+ test = ns(Monster_test4_extend(B, 1));
+ test->a = 0x10;
+ test->b = 0x20;
+ test = ns(Monster_test4_extend(B, 2));
+ test->a = 0x30;
+ test->b = 0x40;
+ test[1].a = 0x50;
+ test[1].b = 0x60;
+ ns(Monster_test4_push_create(B, 0x70, (int8_t)0x80));
+ x.a = 0x191; /* This is a short. */
+ x.b = (int8_t)0x91; /* This is a byte. */
+ ns(Monster_test4_push(B, &x));
+ ns(Monster_test4_end(B));
+ ns(Monster_end_as_root(B));
+
+ return 0;
+}
+
+int time_monster(flatcc_builder_t *B)
+{
+ double t1, t2;
+ const int rep = 1000000;
+ size_t size;
+ int i;
+
+ printf("start timing ...\n");
+ t1 = elapsed_realtime();
+ for (i = 0; i < rep; ++i) {
+ gen_monster_benchmark(B);
+ }
+ size = flatcc_builder_get_buffer_size(B);
+ t2 = elapsed_realtime();
+ show_benchmark("encode monster buffer", t1, t2, size, rep, "million");
+ return 0;
+}
+
+int gen_struct_buffer_benchmark(flatcc_builder_t *B)
+{
+ void *buffer;
+ ns(Vec3_t) *v;
+ ns(Vec3_struct_t) vec3;
+
+ flatcc_builder_reset(B);
+
+ ns(Vec3_create_as_root(B, 1, 2, 3, 4.2, ns(Color_Blue), 2730, -17));
+
+ buffer = flatcc_builder_get_direct_buffer(B, 0);
+ if (!buffer) {
+ return -1;
+ }
+ vec3 = ns(Vec3_as_root_with_identifier(buffer, 0));
+ /* Convert buffer to native in place - a nop on native platform. */
+ v = (ns(Vec3_t) *)vec3;
+ ns(Vec3_from_pe(v));
+ if (v->x != 1.0f || v->y != 2.0f || v->z != 3.0f
+ || v->test1 != 4.2 || v->test2 != ns(Color_Blue)
+ || v->test3.a != 2730 || v->test3.b != -17
+ ) {
+ return -1;
+ }
+ return 0;
+}
+
+int time_struct_buffer(flatcc_builder_t *B)
+{
+ double t1, t2;
+ const int rep = 1000000;
+ size_t size;
+ int i;
+ int ret = 0;
+
+ printf("start timing ...\n");
+ t1 = elapsed_realtime();
+ for (i = 0; i < rep; ++i) {
+ ret |= gen_struct_buffer_benchmark(B);
+ }
+ t2 = elapsed_realtime();
+ size = flatcc_builder_get_buffer_size(B);
+ if (ret) {
+ printf("struct not valid\n");
+ }
+ show_benchmark("encode, decode and access Vec struct buffers", t1, t2, size, rep, "million");
+ return ret;
+}
+
+int main(int argc, char *argv[])
+{
+ flatcc_builder_t builder, *B;
+
+ (void)argc;
+ (void)argv;
+
+ B = &builder;
+ flatcc_builder_init(B);
+
+#ifdef NDEBUG
+ printf("running optimized monster test\n");
+#else
+ printf("running debug monster test\n");
+#endif
+#if 1
+ if (test_enums(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_empty_monster(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_monster(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_monster_with_size(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_string(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_struct_buffer(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_typed_empty_monster(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_typed_struct_buffer(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_clone_slice(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_add_set_defaults(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_create_add_field(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_union_vector(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_basic_sort(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_sort_find(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_scan(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_nested_buffer(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_nested_buffer_first(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_nested_buffer_using_nest(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_cloned_monster(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (verify_include(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_type_aliases(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_mixed_type_union(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_recursive_sort(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+#if 1
+ if (test_fixed_length_array(B)) {
+ printf("TEST FAILED\n");
+ return -1;
+ }
+#endif
+
+#ifdef FLATBUFFERS_BENCHMARK
+ time_monster(B);
+ time_struct_buffer(B);
+#endif
+ flatcc_builder_clear(B);
+ return 0;
+}
diff --git a/test/monster_test/monster_test.fbs b/test/monster_test/monster_test.fbs
new file mode 100755
index 0000000..f0f78aa
--- /dev/null
+++ b/test/monster_test/monster_test.fbs
@@ -0,0 +1,365 @@
+// test schema file
+
+include "attributes.fbs";
+include "include_test1.fbs";
+
+struct InGlobalNamespace { unused: byte; }
+
+namespace MyGame;
+
+table InParentNamespace {}
+
+namespace MyGame.Example2;
+
+table Monster {} // Test having same name as below, but in different namespace.
+
+table Strange {}
+
+// Test schema reserved keywords as identifier
+// Can be disabled in config/config.h
+// It is still a good idea to use a namespace
+// to avoid conflicts with host language names
+// especially for enums and structs.
+// Reserved names can be important in some JSON
+// related use cases.
+table S2 {
+ namespace : int;
+ table : int;
+ struct : int;
+ union : int;
+ int : int;
+}
+
+// Enums fields can also be reserved, and these are also visible in
+// JSON, but they cannot be used as table field defaults because
+// type expressions do interpret keywords as keywords. For the same
+// reason it is of no use to allow reserved names as type names
+// such as table names.
+enum foo:int { x, y, table }
+
+namespace MyGame.Example;
+
+// Note: parent namespace resolution only works without namespace prefix
+//
+// Won't work:
+// union Foo { Example2.InParentNamespace }
+union Foo { InParentNamespace }
+
+
+namespace MyGame.Example2.SubSystem;
+
+// Note:
+//
+// parent namespace resolution only works without namespace prefix
+// or with a global namespace prefix (a fully qualified name), not
+// something in between.
+// (There is no really good reason for this, it just isn't implemented,
+// and flatc JSON enum parsing works the same way.)
+// The more local name wins any conflict.
+//
+union SubSystemA { Strange }
+union SubSystemB { MyGame.Example2.Strange }
+
+// Works in C++ flatc 1.8 but won't work with flatcc:
+// union SubSystemC { Example2.Strange }
+
+
+namespace MyGame.Example;
+
+// test case for negative enum values
+enum neg_enum:int {
+ neg1 = -12,
+ neg2 = -11,
+ neg3 = -10,
+}
+
+enum int_enum:int {
+ intneg = -02,
+ intneg2 = -1,
+ int1 = 02,
+ int2 = 42,
+}
+
+// test for hex constants
+enum hex_enum:int {
+ hexneg = -0x02,
+ hex1 = 0x3,
+ hex2 = 0x7eafbeaf,
+}
+
+attribute "priority";
+
+enum Color:byte (bit_flags) { Red = 0, Green, Blue = 3, }
+
+// Note:
+// For historic reasons C++ flatc 1.8 does permit conflicting base names
+// from different namespaces without explicitly resolving the conflict.
+//
+// flatcc does not, and cannot, support this because it needs a unique
+// base name to assign to the enumaration of union members - otherwise
+// these enumerations could not have a namespace prefix - which is used
+// in JSON and in default value assignment in the schema.
+//
+// Wont' work in flatc:
+// union Any { Monster, TestSimpleTableWithEnum, MyGame.Example2.Monster }
+
+union Any {
+ Monster,
+ TestSimpleTableWithEnum,
+ Monster2: MyGame.Example2.Monster,
+ Alt
+}
+
+struct Test { a:short; b:byte; }
+
+enum notemptyenum:int { x}
+
+table TestSimpleTableWithEnum (csharp_partial) {
+ color: Color = Green;
+ color2: Color = Color.Green;
+ uc : ubyte = MyGame.Example.Color.Green;
+ uc2 : ubyte = Color.Green;
+
+ // C++ flatc 1.8 dislikes enum values on non-enums
+ // color2: Color = Green;
+ // C++ flatc 1.8 dislikes enum values on non-enums
+ // uc : ubyte = 1;
+ // C++ flatc 1.8 dislikes enum values on non-enums, and namespace prefix
+ // uc2 : ubyte = 1;
+}
+
+table TestInclude {
+ global:InGlobalNamespace;
+ incval:MyGame.OtherNameSpace.FromInclude;
+ incval2:MyGame.OtherNameSpace.FromInclude = IncludeVal;
+ incval3 : int (included_attribute);
+ incval4:MyGame.OtherNameSpace.FromInclude = MyGame.OtherNameSpace.FromInclude.IncludeVal;
+ incval5: long = MyGame.OtherNameSpace.FromInclude.IncludeVal;
+
+ // C++ flatc 1.8 dislikes namespace prefix
+ // incval4:MyGame.OtherNameSpace.FromInclude = IncludeVal;
+ // C++ flatc 1.8 dislikes enum values on non-enums, and namespace prefix
+ // incval5: long = 0;
+}
+
+struct Vec3 (force_align: 16) {
+ x:float;
+ y:float;
+ z:float;
+ test1:double;
+ test2:Color;
+ test3:Test;
+}
+
+struct Ability {
+ id:uint(key);
+ distance:uint;
+}
+
+table Stat {
+ id:string;
+ val:long;
+ count:ushort;
+}
+
+// fixed length arrays new to flatcc 0.6.0
+struct FooBar {
+ foo:[float:0x10];
+ bar:[int:10];
+ col:[Color:3];
+ tests:[Test:2];
+ text:[char:5];
+}
+
+// `sorted` attribute new to flatcc 0.6.0, not supported by flatc 1.8.
+// tables with direct or indirect vector content marked as sorted
+// will get a mutable sort operation that recursively sorts all
+// such vectors. 'sorted` is only valid for non-union vectors.
+//
+// attribute "sorted";
+//
+table Alt {
+ prefix: TestJSONPrefix;
+ movie:Fantasy.Movie;
+ manyany: [Any];
+ multik: [MultipleKeys] (sorted);
+ rapunzels:[Fantasy.Rapunzel] (sorted);
+ names:[string] (sorted);
+ samples:[float32] (sorted);
+ fixed_array: FooBar;
+}
+
+table TestJSONPrefix {
+ testjsonprefixparsing:TestJSONPrefixParsing;
+ testjsonprefixparsing2:TestJSONPrefixParsing2;
+ testjsonprefixparsing3:TestJSONPrefixParsing3;
+}
+
+table TestJSONPrefixParsing
+{
+ aaaa: string;
+ aaaa12345: uint;
+
+ bbbb: string;
+ bbbb1234: long;
+
+ cccc: string;
+ cccc1234: long;
+ cccc12345: uint;
+
+ dddd1234: long;
+ dddd12345: uint;
+}
+
+// when there are two keys ending in same 8 character group
+table TestJSONPrefixParsing2
+{
+ aaaa_bbbb_steps: long;
+ aaaa_bbbb_start_: uint;
+}
+
+// when there are two keys ending in different 8 character group
+table TestJSONPrefixParsing3
+{
+ aaaa_bbbb_steps: long;
+ aaaa_bbbb_start_steps: uint;
+}
+
+// C++ flatc 1.8 does not yet support base64 as a built-in
+// attribute "base64";
+// attribute "base64url";
+
+table TestBase64
+{
+ data:[ubyte] (base64);
+ urldata:[ubyte] (base64url);
+ nested:[ubyte] (nested_flatbuffer: "Monster", base64);
+}
+
+// 'primary_key' attribute new to flatcc 0.6.0, not supported by flatc 1.8.
+// Allow multiple keys and allow one to be the default find and sort key
+// even if not listed first. A table with a single key field behaves the
+// same as a table with a single primary_key field, so use key for
+// compatiblity in that case.
+//
+// attribute "primary_key";
+//
+table MultipleKeys
+{
+ hello: string;
+ world: string (key);
+ foobar: int64 (primary_key);
+}
+
+table Monster {
+ pos:Vec3 (id: 0);
+ hp:short = 100 (id: 2);
+ mana:short = 150 (id: 1);
+ name:string (id: 3, required, key);
+ color:Color = Blue (id: 6);
+ inventory:[ubyte] (id: 5);
+ friendly:bool = false (deprecated, priority: 1, id: 4);
+ /// an example documentation comment: this will end up in the generated code
+ /// multiline too
+ testarrayoftables:[Monster] (id: 11);
+ testarrayofstring:[string] (id: 10);
+ testarrayofstring2:[string] (id: 28);
+ testarrayofbools:[bool] (id: 24);
+ testarrayofsortedstruct:[Ability] (id: 29);
+ enemy:MyGame.Example.Monster (id:12); // Test referring by full namespace.
+ // id 7 resever for Any_type
+ test:Any (id: 8);
+ test4:[Test] (id: 9);
+ test5:[Test] (id: 31);
+ testnestedflatbuffer:[ubyte] (id:13, nested_flatbuffer: "Monster");
+ testempty:Stat (id:14);
+ testbool:bool = 1 (id:15);
+ testhashs32_fnv1:int (id:16, hash:"fnv1_32");
+ testhashu32_fnv1:uint (id:17, hash:"fnv1_32");
+ testhashs64_fnv1:long (id:18, hash:"fnv1_64");
+ testhashu64_fnv1:ulong (id:19, hash:"fnv1_64");
+ testhashs32_fnv1a:int (id:20, hash:"fnv1a_32");
+ testhashu32_fnv1a:uint (id:21, hash:"fnv1a_32", cpp_type:"Stat");
+ testhashs64_fnv1a:long (id:22, hash:"fnv1a_64");
+ testhashu64_fnv1a:ulong (id:23, hash:"fnv1a_64");
+
+ // Googles flatc uses Pi as default be we don't because it
+ // messes up JSON tests because the numeric print format is
+ // configuration dependent.
+ //testf:float = 3.14159 (id:25);
+ testf:float = 3.14159e5 (id:25);
+ testf2:float = 3 (id:26);
+ testf3:float (id:27);
+ flex:[ubyte] (id:30, flexbuffer);
+ vector_of_longs:[long] (id:32);
+ vector_of_doubles:[double] (id:33);
+ parent_namespace_test:InParentNamespace (id:34);
+ testbase64:TestBase64 (id:35);
+}
+
+table TypeAliases {
+ i8:int8;
+ u8:uint8;
+ i16:int16;
+ u16:uint16;
+ i32:int32;
+ u32:uint32;
+ i64:int64;
+ u64:uint64;
+ f32:float32;
+ f64:float64;
+ v8:[int8];
+ vf64:[float64];
+}
+
+rpc_service MonsterStorage {
+ Store(Monster):Stat (streaming: "none");
+ Retrieve(Stat):Monster (streaming: "server", idempotent);
+}
+
+// Demonstrates the ability to have vectors of unions, and also to
+// store structs and strings in unions.
+
+namespace Fantasy;
+
+table Attacker {
+ sword_attack_damage: int;
+}
+
+struct Rapunzel {
+ hair_length: uint16 (key);
+ travel_points: int (deprecated);
+}
+
+struct BookReader {
+ books_read: int;
+}
+
+union Character {
+ MuLan: Attacker = 2, // Can have name be different from type.
+ Rapunzel = 8, // Or just both the same, as before.
+ Belle: Fantasy.BookReader,
+ BookFan: BookReader,
+ Other: string,
+ Unused: string = 255
+}
+
+table Movie {
+ main_character: Character;
+ antagonist: Character;
+ side_kick: Character;
+ cameo: Character;
+ characters: [Character];
+}
+
+root_type MyGame.Example.Monster;
+
+file_identifier "MONS";
+file_extension "mon";
+
+// Out of order enums
+
+enum ReorderedEnum: int { rx = 10, ry = 1, rz = 9 }
+
+enum ReorderedColor:byte (bit_flags) { RBlue = 3, RRed = 0, RGreen }
+
diff --git a/test/monster_test_concat/CMakeLists.txt b/test/monster_test_concat/CMakeLists.txt
new file mode 100644
index 0000000..dab13f5
--- /dev/null
+++ b/test/monster_test_concat/CMakeLists.txt
@@ -0,0 +1,21 @@
+include(CTest)
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/test/monster_test")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_monster_test_concat ALL)
+add_custom_command (
+ TARGET gen_monster_test_concat
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ # We could also use the recursive -r option, but this tests adding files manually to the output file.
+ COMMAND flatcc_cli -cwv --reader -o "${GEN_DIR}" "--outfile=monster_test.h" "${FBS_DIR}/attributes.fbs" "${FBS_DIR}/include_test2.fbs" "${FBS_DIR}/include_test1.fbs" "${FBS_DIR}/monster_test.fbs" DEPENDS flatcc_cli "${FBS_DIR}/monster_test.fbs" "${FBS_DIR}/include_test1.fbs" "${FBS_DIR}/include_test2.fbs" "${FBS_DIR}/attributes.fbs"
+)
+include_directories("${GEN_DIR}" "${INC_DIR}")
+add_executable(monster_test_concat monster_test_concat.c)
+add_dependencies(monster_test_concat gen_monster_test_concat)
+target_link_libraries(monster_test_concat flatccrt)
+
+add_test(monster_test_concat monster_test_concat${CMAKE_EXECUTABLE_SUFFIX})
diff --git a/test/monster_test_concat/README.txt b/test/monster_test_concat/README.txt
new file mode 100644
index 0000000..4924660
--- /dev/null
+++ b/test/monster_test_concat/README.txt
@@ -0,0 +1,2 @@
+This test is identical to monster_test_solo, except for directing output
+to a file directly with --outfile.
diff --git a/test/monster_test_concat/monster_test_concat.c b/test/monster_test_concat/monster_test_concat.c
new file mode 100644
index 0000000..3005580
--- /dev/null
+++ b/test/monster_test_concat/monster_test_concat.c
@@ -0,0 +1,24 @@
+/* Minimal test with all headers generated into a single file. */
+#include "monster_test.h"
+
+int main(int argc, char *argv[])
+{
+ int ret;
+ void *buf;
+ size_t size;
+ flatcc_builder_t builder, *B;
+
+ (void)argc;
+ (void)argv;
+
+ B = &builder;
+ flatcc_builder_init(B);
+
+ MyGame_Example_Monster_start_as_root(B);
+ MyGame_Example_Monster_name_create_str(B, "MyMonster");
+ MyGame_Example_Monster_end_as_root(B);
+ buf = flatcc_builder_get_direct_buffer(B, &size);
+ ret = MyGame_Example_Monster_verify_as_root(buf, size);
+ flatcc_builder_clear(B);
+ return ret;
+}
diff --git a/test/monster_test_cpp/CMakeLists.txt b/test/monster_test_cpp/CMakeLists.txt
new file mode 100644
index 0000000..dd576ca
--- /dev/null
+++ b/test/monster_test_cpp/CMakeLists.txt
@@ -0,0 +1,24 @@
+include(CTest)
+
+# Note: This re-uses the samples/monster fbs and .c file.
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+# We use our own separate gen dir so we don't clash with the real monster sample.
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated/monster_test_cpp")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/samples/monster")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_monster_test_cpp ALL)
+add_custom_command (
+ TARGET gen_monster_test_cpp
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND flatcc_cli -a -o "${GEN_DIR}" "${FBS_DIR}/monster.fbs"
+ DEPENDS flatcc_cli "${FBS_DIR}/monster.fbs"
+)
+
+add_executable(monster_test_cpp monster_test.cpp)
+add_dependencies(monster_test_cpp gen_monster_test_cpp)
+target_link_libraries(monster_test_cpp flatccrt)
+
+add_test(monster_test_cpp monster_test_cpp${CMAKE_EXECUTABLE_SUFFIX})
diff --git a/test/monster_test_cpp/monster_test.cpp b/test/monster_test_cpp/monster_test.cpp
new file mode 100644
index 0000000..9a7477a
--- /dev/null
+++ b/test/monster_test_cpp/monster_test.cpp
@@ -0,0 +1,3 @@
+extern "C" {
+#include "../../samples/monster/monster.c"
+}
diff --git a/test/monster_test_prefix/CMakeLists.txt b/test/monster_test_prefix/CMakeLists.txt
new file mode 100644
index 0000000..1e26761
--- /dev/null
+++ b/test/monster_test_prefix/CMakeLists.txt
@@ -0,0 +1,20 @@
+include(CTest)
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/test/monster_test")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_monster_test_prefix ALL)
+add_custom_command (
+ TARGET gen_monster_test_prefix
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND flatcc_cli -a --prefix=zzz_ --stdout "${FBS_DIR}/monster_test.fbs" > "${GEN_DIR}/zzz_monster_test.h"
+ DEPENDS flatcc_cli "${FBS_DIR}/monster_test.fbs" "${FBS_DIR}/include_test1.fbs" "${FBS_DIR}/include_test2.fbs"
+)
+add_executable(monster_test_prefix monster_test_prefix.c)
+add_dependencies(monster_test_prefix gen_monster_test_prefix)
+target_link_libraries(monster_test_prefix flatccrt)
+
+add_test(monster_test_prefix monster_test_prefix${CMAKE_EXECUTABLE_SUFFIX})
diff --git a/test/monster_test_prefix/monster_test_prefix.c b/test/monster_test_prefix/monster_test_prefix.c
new file mode 100644
index 0000000..bb070b2
--- /dev/null
+++ b/test/monster_test_prefix/monster_test_prefix.c
@@ -0,0 +1,24 @@
+/* Minimal test with all headers generated into a single file. */
+#include "zzz_monster_test.h"
+
+int main(int argc, char *argv[])
+{
+ int ret;
+ void *buf;
+ size_t size;
+ flatcc_builder_t builder, *B;
+
+ (void)argc;
+ (void)argv;
+
+ B = &builder;
+ flatcc_builder_init(B);
+
+ zzz_MyGame_Example_Monster_start_as_root(B);
+ zzz_MyGame_Example_Monster_name_create_str(B, "MyMonster");
+ zzz_MyGame_Example_Monster_end_as_root(B);
+ buf = flatcc_builder_get_direct_buffer(B, &size);
+ ret = zzz_MyGame_Example_Monster_verify_as_root_with_identifier(buf, size, zzz_MyGame_Example_Monster_file_identifier);
+ flatcc_builder_clear(B);
+ return ret;
+}
diff --git a/test/monster_test_solo/CMakeLists.txt b/test/monster_test_solo/CMakeLists.txt
new file mode 100644
index 0000000..a974434
--- /dev/null
+++ b/test/monster_test_solo/CMakeLists.txt
@@ -0,0 +1,21 @@
+include(CTest)
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/test/monster_test")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_monster_test_solo ALL)
+add_custom_command (
+ TARGET gen_monster_test_solo
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND flatcc_cli -cwv --reader --stdout "${FBS_DIR}/attributes.fbs" "${FBS_DIR}/include_test2.fbs" "${FBS_DIR}/include_test1.fbs" "${FBS_DIR}/monster_test.fbs" > "${GEN_DIR}/monster_test.h" DEPENDS flatcc_cli "${FBS_DIR}/monster_test.fbs" "${FBS_DIR}/include_test1.fbs" "${FBS_DIR}/include_test2.fbs" "${FBS_DIR}/attributes.fbs"
+)
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+add_executable(monster_test_solo monster_test_solo.c)
+add_dependencies(monster_test_solo gen_monster_test_solo)
+target_link_libraries(monster_test_solo flatccrt)
+
+add_test(monster_test_solo monster_test_solo${CMAKE_EXECUTABLE_SUFFIX})
diff --git a/test/monster_test_solo/monster_test_solo.c b/test/monster_test_solo/monster_test_solo.c
new file mode 100644
index 0000000..3005580
--- /dev/null
+++ b/test/monster_test_solo/monster_test_solo.c
@@ -0,0 +1,24 @@
+/* Minimal test with all headers generated into a single file. */
+#include "monster_test.h"
+
+int main(int argc, char *argv[])
+{
+ int ret;
+ void *buf;
+ size_t size;
+ flatcc_builder_t builder, *B;
+
+ (void)argc;
+ (void)argv;
+
+ B = &builder;
+ flatcc_builder_init(B);
+
+ MyGame_Example_Monster_start_as_root(B);
+ MyGame_Example_Monster_name_create_str(B, "MyMonster");
+ MyGame_Example_Monster_end_as_root(B);
+ buf = flatcc_builder_get_direct_buffer(B, &size);
+ ret = MyGame_Example_Monster_verify_as_root(buf, size);
+ flatcc_builder_clear(B);
+ return ret;
+}
diff --git a/test/optional_scalars_test/CMakeLists.txt b/test/optional_scalars_test/CMakeLists.txt
new file mode 100644
index 0000000..b13c1b9
--- /dev/null
+++ b/test/optional_scalars_test/CMakeLists.txt
@@ -0,0 +1,19 @@
+include(CTest)
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/test/optional_scalars_test")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_optional_scalars_test ALL)
+add_custom_command (
+ TARGET gen_optional_scalars_test
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND flatcc_cli -a --json -o "${GEN_DIR}" "${FBS_DIR}/optional_scalars_test.fbs"
+)
+add_executable(optional_scalars_test optional_scalars_test.c)
+add_dependencies(optional_scalars_test gen_optional_scalars_test)
+target_link_libraries(optional_scalars_test flatccrt)
+
+add_test(optional_scalars_test optional_scalars_test${CMAKE_EXECUTABLE_SUFFIX})
diff --git a/test/optional_scalars_test/optional_scalars_test.c b/test/optional_scalars_test/optional_scalars_test.c
new file mode 100644
index 0000000..7566c05
--- /dev/null
+++ b/test/optional_scalars_test/optional_scalars_test.c
@@ -0,0 +1,280 @@
+#include <assert.h>
+#include <stdio.h>
+
+#include "optional_scalars_test_builder.h"
+#include "optional_scalars_test_json_printer.h"
+#include "optional_scalars_test_json_parser.h"
+
+
+#undef ns
+#define ns(x) FLATBUFFERS_WRAP_NAMESPACE(optional_scalars, x)
+
+// #define TEST_ASSERT
+
+#ifdef TEST_ASSERT
+#define test_assert(x) do { if (!(x)) { assert(0); return -1; }} while(0)
+#else
+#define test_assert(x) do { if (!(x)) { return -1; }} while(0)
+#endif
+
+int create_scalar_stuff(flatcc_builder_t *builder)
+{
+ ns(ScalarStuff_start_as_root(builder));
+
+ ns(ScalarStuff_just_i8_add(builder, 10));
+ ns(ScalarStuff_maybe_i8_add(builder, 11));
+ ns(ScalarStuff_default_i8_add(builder, 12));
+
+ ns(ScalarStuff_just_i16_add(builder, 42));
+ ns(ScalarStuff_maybe_i16_add(builder, 42));
+ ns(ScalarStuff_default_i16_add(builder, 42));
+
+ ns(ScalarStuff_just_u32_add(builder, 0));
+ ns(ScalarStuff_maybe_u32_add(builder, 0));
+ ns(ScalarStuff_default_u32_add(builder, 0));
+
+ ns(ScalarStuff_just_f32_add(builder, 42));
+ ns(ScalarStuff_maybe_f32_add(builder, 42));
+ ns(ScalarStuff_default_f32_add(builder, 42));
+
+ ns(ScalarStuff_just_bool_add(builder, 1));
+ ns(ScalarStuff_maybe_bool_add(builder, 1));
+ ns(ScalarStuff_default_bool_add(builder, 1));
+
+ ns(ScalarStuff_just_enum_add)(builder, ns(OptionalByte_One));
+ ns(ScalarStuff_maybe_enum_add)(builder, ns(OptionalByte_One));
+ ns(ScalarStuff_default_enum_add)(builder, ns(OptionalByte_One));
+
+ ns(ScalarStuff_just_xfactor_add)(builder, ns(OptionalFactor_Twice));
+ ns(ScalarStuff_maybe_xfactor_add)(builder, ns(OptionalFactor_Twice));
+ ns(ScalarStuff_default_xfactor_add)(builder, ns(OptionalFactor_Twice));
+
+ ns(ScalarStuff_end_as_root(builder));
+
+ return 0;
+}
+
+int access_scalar_stuff(const void *buf)
+{
+ ns(ScalarStuff_table_t) t = ns(ScalarStuff_as_root(buf));
+ flatbuffers_int8_option_t maybe_i8;
+ flatbuffers_int16_option_t maybe_i16;
+ flatbuffers_uint32_option_t maybe_u32;
+ flatbuffers_uint8_option_t maybe_u8;
+ flatbuffers_float_option_t maybe_f32;
+ flatbuffers_bool_option_t maybe_bool;
+ ns(OptionalByte_option_t) maybe_enum;
+ ns(OptionalFactor_option_t) maybe_xfactor;
+ ns(OptionalFactor_option_t) maybe_yfactor;
+
+ test_assert(10 == ns(ScalarStuff_just_i8_get(t)));
+ test_assert(11 == ns(ScalarStuff_maybe_i8_get(t)));
+ test_assert(12 == ns(ScalarStuff_default_i8_get(t)));
+ maybe_i8 = ns(ScalarStuff_maybe_i8_option(t));
+ test_assert(!maybe_i8.is_null);
+ test_assert(11 == maybe_i8.value);
+ test_assert(ns(ScalarStuff_just_i8_is_present(t)));
+ test_assert(ns(ScalarStuff_maybe_i8_is_present(t)));
+ test_assert(ns(ScalarStuff_default_i8_is_present(t)));
+
+ test_assert(0 == ns(ScalarStuff_just_u8_get(t)));
+ test_assert(0 == ns(ScalarStuff_maybe_u8_get(t)));
+ test_assert(42 == ns(ScalarStuff_default_u8_get(t)));
+ maybe_u8 = ns(ScalarStuff_maybe_u8_option(t));
+ test_assert(maybe_u8.is_null);
+ test_assert(0 == maybe_u8.value);
+ test_assert(!ns(ScalarStuff_just_u8_is_present(t)));
+ test_assert(!ns(ScalarStuff_maybe_u8_is_present(t)));
+ test_assert(!ns(ScalarStuff_default_u8_is_present(t)));
+
+ test_assert(42 == ns(ScalarStuff_just_i16_get(t)));
+ test_assert(42 == ns(ScalarStuff_maybe_i16_get(t)));
+ test_assert(42 == ns(ScalarStuff_default_i16_get(t)));
+ maybe_i16 = ns(ScalarStuff_maybe_i16_option(t));
+ test_assert(!maybe_i16.is_null);
+ test_assert(42 == maybe_i16.value);
+ test_assert(ns(ScalarStuff_just_i16_is_present(t)));
+ test_assert(ns(ScalarStuff_maybe_i16_is_present(t)));
+ test_assert(!ns(ScalarStuff_default_i16_is_present(t)));
+
+ test_assert(0 == ns(ScalarStuff_just_u32_get(t)));
+ test_assert(0 == ns(ScalarStuff_maybe_u32_get(t)));
+ test_assert(0 == ns(ScalarStuff_default_u32_get(t)));
+ maybe_u32 = ns(ScalarStuff_maybe_u32_option(t));
+ test_assert(!maybe_u32.is_null);
+ test_assert(0 == maybe_u32.value);
+ test_assert(!ns(ScalarStuff_just_u32_is_present(t)));
+ test_assert(ns(ScalarStuff_maybe_u32_is_present(t)));
+ test_assert(ns(ScalarStuff_default_u32_is_present(t)));
+
+ test_assert(42 == ns(ScalarStuff_just_f32_get(t)));
+ test_assert(42 == ns(ScalarStuff_maybe_f32_get(t)));
+ test_assert(42 == ns(ScalarStuff_default_f32_get(t)));
+ maybe_f32 = ns(ScalarStuff_maybe_f32_option(t));
+ test_assert(!maybe_f32.is_null);
+ test_assert(42 == maybe_f32.value);
+ test_assert(ns(ScalarStuff_just_f32_is_present(t)));
+ test_assert(ns(ScalarStuff_maybe_f32_is_present(t)));
+ test_assert(!ns(ScalarStuff_default_f32_is_present(t)));
+
+ test_assert(1 == ns(ScalarStuff_just_bool_get(t)));
+ test_assert(1 == ns(ScalarStuff_maybe_bool_get(t)));
+ test_assert(1 == ns(ScalarStuff_default_bool_get(t)));
+ maybe_bool = ns(ScalarStuff_maybe_bool_option(t));
+ test_assert(!maybe_bool.is_null);
+ test_assert(1 == maybe_bool.value);
+ test_assert(ns(ScalarStuff_just_bool_is_present(t)));
+ test_assert(ns(ScalarStuff_maybe_bool_is_present(t)));
+ test_assert(!ns(ScalarStuff_default_bool_is_present(t)));
+
+ test_assert(1 == ns(ScalarStuff_just_enum_get(t)));
+ test_assert(1 == ns(ScalarStuff_maybe_enum_get(t)));
+ test_assert(1 == ns(ScalarStuff_default_enum_get(t)));
+ maybe_enum = ns(ScalarStuff_maybe_enum_option(t));
+ test_assert(!maybe_enum.is_null);
+ test_assert(maybe_enum.value == 1);
+ test_assert(ns(ScalarStuff_just_enum_is_present(t)));
+ test_assert(ns(ScalarStuff_maybe_enum_is_present(t)));
+ test_assert(!ns(ScalarStuff_default_enum_is_present(t)));
+
+ test_assert(2 == ns(ScalarStuff_just_xfactor_get(t)));
+ test_assert(2 == ns(ScalarStuff_maybe_xfactor_get(t)));
+ test_assert(2 == ns(ScalarStuff_default_xfactor_get(t)));
+ maybe_xfactor = ns(ScalarStuff_maybe_xfactor_option(t));
+ test_assert(!maybe_xfactor.is_null);
+ test_assert(maybe_xfactor.value == 2);
+ test_assert(ns(ScalarStuff_just_xfactor_is_present(t)));
+ test_assert(ns(ScalarStuff_maybe_xfactor_is_present(t)));
+ test_assert(!ns(ScalarStuff_default_xfactor_is_present(t)));
+
+ test_assert(1 == ns(ScalarStuff_just_yfactor_get(t)));
+ test_assert(0 == ns(ScalarStuff_maybe_yfactor_get(t)));
+ test_assert(2 == ns(ScalarStuff_default_yfactor_get(t)));
+ maybe_yfactor = ns(ScalarStuff_maybe_yfactor_option(t));
+ test_assert(maybe_yfactor.is_null);
+ test_assert(maybe_yfactor.value == 0);
+ test_assert(!ns(ScalarStuff_just_yfactor_is_present(t)));
+ test_assert(!ns(ScalarStuff_maybe_yfactor_is_present(t)));
+ test_assert(!ns(ScalarStuff_default_yfactor_is_present(t)));
+ return 0;
+}
+
+int test(void)
+{
+ flatcc_builder_t builder;
+ void *buf;
+ size_t size;
+
+ flatcc_builder_init(&builder);
+ test_assert(0 == create_scalar_stuff(&builder));
+ buf = flatcc_builder_finalize_aligned_buffer(&builder, &size);
+ test_assert(0 == access_scalar_stuff(buf));
+ flatcc_builder_aligned_free(buf);
+ flatcc_builder_clear(&builder);
+
+ return 0;
+}
+
+const char *expected_json =
+"{\"just_i8\":10,\"maybe_i8\":11,\"default_i8\":12,\"just_i16\":42,\"maybe_i16\":42,\"maybe_u32\":0,\"default_u32\":0,\"just_f32\":42,\"maybe_f32\":42,\"just_bool\":true,\"maybe_bool\":true,\"just_enum\":\"One\",\"maybe_enum\":\"One\",\"just_xfactor\":\"Twice\",\"maybe_xfactor\":\"Twice\"}";
+
+#if 0
+int print_buffer(const void *buf, size_t size)
+{
+ flatcc_json_printer_t printer;
+ flatcc_json_printer_init(&printer, 0);
+ ns(ScalarStuff_print_json_as_root)(&printer, buf, size, NULL);
+ if (flatcc_json_printer_get_error(&printer)) {
+ printf("could not print buffer\n");
+ return -1;
+ }
+ return 0;
+}
+#endif
+
+int test_json_printer(void)
+{
+ flatcc_builder_t builder;
+ void *buf;
+ size_t size;
+ flatcc_json_printer_t printer;
+ char *json_buf;
+ size_t json_size;
+
+ flatcc_builder_init(&builder);
+ test_assert(0 == create_scalar_stuff(&builder));
+ buf = flatcc_builder_finalize_aligned_buffer(&builder, &size);
+ test_assert(0 == access_scalar_stuff(buf));
+ flatcc_builder_clear(&builder);
+ flatcc_json_printer_init_dynamic_buffer(&printer, 0);
+ test_assert(ns(ScalarStuff_print_json_as_root)(&printer, buf, size, NULL));
+ flatcc_builder_aligned_free(buf);
+ json_buf = flatcc_json_printer_get_buffer(&printer, &json_size);
+ printf("%.*s\n", (int)json_size, json_buf);
+ test_assert(strlen(expected_json) == json_size);
+ test_assert(0 == memcmp(expected_json, json_buf, json_size));
+
+
+ flatcc_json_printer_clear(&printer);
+ return 0;
+}
+
+int test_json_parser(void)
+{
+ flatcc_builder_t builder;
+ void *buf;
+ size_t size;
+ flatcc_json_parser_t parser;
+ flatcc_json_printer_t printer;
+ char *json_buf;
+ size_t json_size;
+ int ret;
+
+ flatcc_builder_init(&builder);
+ ret = optional_scalars_ScalarStuff_parse_json_as_root(&builder,
+ &parser, expected_json, strlen(expected_json), 0, 0);
+ test_assert(ret == 0);
+
+ buf = flatcc_builder_finalize_aligned_buffer(&builder, &size);
+
+ flatcc_json_printer_init_dynamic_buffer(&printer, 0);
+ ns(ScalarStuff_print_json_as_root)(&printer, buf, size, NULL);
+ if (flatcc_json_printer_get_error(&printer)) {
+ printf("could not print buffer\n");
+ return -1;
+ }
+ test_assert(0 == access_scalar_stuff(buf));
+
+ json_buf = flatcc_json_printer_get_buffer(&printer, &json_size);
+ printf("%.*s\n", (int)json_size, json_buf);
+ test_assert(strlen(expected_json) == json_size);
+ test_assert(0 == memcmp(expected_json, json_buf, json_size));
+ flatcc_json_printer_clear(&printer);
+
+ flatcc_builder_aligned_free(buf);
+ flatcc_builder_clear(&builder);
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ /* Silence warnings. */
+ (void)argc;
+ (void)argv;
+
+ if (test()) {
+ printf("optional scalars test failed");
+ return 1;
+ }
+ if (test_json_printer()) {
+ printf("optional scalars json printer test failed");
+ return 1;
+ }
+ if (test_json_parser()) {
+ printf("optional scalars json parser test failed");
+ return 1;
+ }
+ printf("optional scalars test passed");
+ return 0;
+}
+
diff --git a/test/optional_scalars_test/optional_scalars_test.fbs b/test/optional_scalars_test/optional_scalars_test.fbs
new file mode 100644
index 0000000..ba4c9d4
--- /dev/null
+++ b/test/optional_scalars_test/optional_scalars_test.fbs
@@ -0,0 +1,71 @@
+namespace optional_scalars;
+
+enum OptionalByte: byte {
+ None = 0,
+ One = 1,
+}
+
+// Enums without a 0 element normally requires an initializer
+// which is a problem when = null is the default. In this case
+// the default value is forced to 0 when a reader insists on
+// getting a numerical value instead of null.
+enum OptionalFactor: byte {
+ Once = 1,
+ Twice = 2,
+}
+
+// This table tests optional scalars in tables. It should be integrated with
+// the main monster test once most languages support optional scalars.
+table ScalarStuff {
+ just_i8: int8;
+ maybe_i8: int8 = null;
+ default_i8: int8 = 42;
+ just_u8: uint8;
+ maybe_u8: uint8 = null;
+ default_u8: uint8 = 42;
+
+ just_i16: int16;
+ maybe_i16: int16 = null;
+ default_i16: int16 = 42;
+ just_u16: uint16;
+ maybe_u16: uint16 = null;
+ default_u16: uint16 = 42;
+
+ just_i32: int32;
+ maybe_i32: int32 = null;
+ default_i32: int32 = 42;
+ just_u32: uint32;
+ maybe_u32: uint32 = null;
+ default_u32: uint32 = 42;
+
+ just_i64: int64;
+ maybe_i64: int64 = null;
+ default_i64: int64 = 42;
+ just_u64: uint64;
+ maybe_u64: uint64 = null;
+ default_u64: uint64 = 42;
+
+ just_f32: float32;
+ maybe_f32: float32 = null;
+ default_f32: float32 = 42;
+ just_f64: float64;
+ maybe_f64: float64 = null;
+ default_f64: float64 = 42;
+
+ just_bool: bool;
+ maybe_bool: bool = null;
+ default_bool: bool = true;
+
+ just_enum: OptionalByte;
+ maybe_enum: OptionalByte = null;
+ default_enum: OptionalByte = One;
+
+ just_xfactor: OptionalFactor = Once;
+ maybe_xfactor: OptionalFactor = null;
+ default_xfactor: OptionalFactor = Twice;
+
+ just_yfactor: OptionalFactor = Once;
+ maybe_yfactor: OptionalFactor = null;
+ default_yfactor: OptionalFactor = Twice;
+
+}
diff --git a/test/reflection_test/CMakeLists.txt b/test/reflection_test/CMakeLists.txt
new file mode 100644
index 0000000..f82d1f5
--- /dev/null
+++ b/test/reflection_test/CMakeLists.txt
@@ -0,0 +1,20 @@
+include(CTest)
+
+set(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+set(FBS_DIR "${PROJECT_SOURCE_DIR}/test/monster_test")
+
+include_directories("${GEN_DIR}" "${INC_DIR}")
+
+add_custom_target(gen_reflection_test ALL)
+add_custom_command (
+ TARGET gen_reflection_test
+ COMMAND ${CMAKE_COMMAND} -E make_directory "${GEN_DIR}"
+ COMMAND flatcc_cli --schema -o "${GEN_DIR}" "${FBS_DIR}/monster_test.fbs"
+ DEPENDS flatcc_cli "${FBS_DIR}/monster_test.fbs" "${FBS_DIR}/include_test1.fbs" "${FBS_DIR}/include_test2.fbs"
+)
+add_executable(reflection_test reflection_test.c)
+add_dependencies(reflection_test gen_reflection_test)
+target_link_libraries(reflection_test flatccrt)
+
+add_test(reflection_test reflection_test${CMAKE_EXECUTABLE_SUFFIX})
diff --git a/test/reflection_test/reflection_test.c b/test/reflection_test/reflection_test.c
new file mode 100644
index 0000000..eef0bd1
--- /dev/null
+++ b/test/reflection_test/reflection_test.c
@@ -0,0 +1,196 @@
+#include "flatcc/support/readfile.h"
+#include "flatcc/reflection/reflection_reader.h"
+#include "flatcc/portable/pcrt.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+
+/* This is not an exhaustive test. */
+int test_schema(const char *monster_bfbs)
+{
+ void *buffer;
+ size_t size;
+ int ret = -1;
+ reflection_Schema_table_t S;
+ reflection_Object_vec_t Objs;
+ reflection_Object_table_t Obj;
+ reflection_Field_vec_t Flds;
+ reflection_Field_table_t F;
+ reflection_Type_table_t T;
+ size_t k, monster_index;
+ reflection_Service_vec_t Svcs;
+ reflection_Service_table_t Svc;
+ reflection_RPCCall_vec_t Calls;
+ reflection_RPCCall_table_t Call;
+ size_t call_index;
+ const char *strval;
+
+ buffer = readfile(monster_bfbs, 100000, &size);
+ if (!buffer) {
+ printf("failed to load binary schema\n");
+ goto done;
+ }
+ S = reflection_Schema_as_root(buffer);
+ Objs = reflection_Schema_objects(S);
+ for (k = 0; k < reflection_Object_vec_len(Objs); ++k) {
+ printf("dbg: obj #%d : %s\n", (int)k,
+ reflection_Object_name(reflection_Object_vec_at(Objs, k)));
+ }
+ k = reflection_Object_vec_find(Objs, "MyGame.Example.Monster");
+ if (k == flatbuffers_not_found) {
+ printf("Could not find monster in schema\n");
+ goto done;
+ }
+ monster_index = k;
+ Obj = reflection_Object_vec_at(Objs, k);
+ if (strcmp(reflection_Object_name(Obj), "MyGame.Example.Monster")) {
+ printf("Found wrong object in schema\n");
+ goto done;
+ }
+ Flds = reflection_Object_fields(Obj);
+ k = reflection_Field_vec_find(Flds, "mana");
+ if (k == flatbuffers_not_found) {
+ printf("Did not find mana field in Monster schema\n");
+ goto done;
+ }
+ F = reflection_Field_vec_at(Flds, k);
+ if (reflection_Field_default_integer(F) != 150) {
+ printf("mana field has wrong default value\n");
+ printf("field name: %s\n", reflection_Field_name(F));
+ printf("%"PRId64"\n", (int64_t)reflection_Field_default_integer(F));
+ goto done;
+ }
+ T = reflection_Field_type(F);
+ if (reflection_Type_base_type(T) != reflection_BaseType_Short) {
+ printf("mana field has wrong type\n");
+ goto done;
+ }
+ if (reflection_Field_optional(F)) {
+ printf("mana field is not optional\n");
+ goto done;
+ }
+ k = reflection_Field_vec_find(Flds, "enemy");
+ if (k == flatbuffers_not_found) {
+ printf("enemy field not found\n");
+ goto done;
+ }
+ T = reflection_Field_type(reflection_Field_vec_at(Flds, k));
+ if (reflection_Type_base_type(T) != reflection_BaseType_Obj) {
+ printf("enemy is not an object\n");
+ goto done;
+ }
+ if (reflection_Type_index(T) != (int32_t)monster_index) {
+ printf("enemy is not a monster\n");
+ goto done;
+ }
+ k = reflection_Field_vec_find(Flds, "testarrayoftables");
+ if (k == flatbuffers_not_found) {
+ printf("array of tables not found\n");
+ goto done;
+ }
+ T = reflection_Field_type(reflection_Field_vec_at(Flds, k));
+ if (reflection_Type_base_type(T) != reflection_BaseType_Vector) {
+ printf("array of tables is not of vector type\n");
+ goto done;
+ }
+ if (reflection_Type_element(T) != reflection_BaseType_Obj) {
+ printf("array of tables is not a vector of table type\n");
+ goto done;
+ }
+ if (reflection_Type_index(T) != (int32_t)monster_index) {
+ printf("array of tables is not a monster vector\n");
+ goto done;
+ }
+ /* list services and calls */
+ Svcs = reflection_Schema_services(S);
+ for (k = 0; k < reflection_Service_vec_len(Svcs); ++k) {
+ Svc = reflection_Service_vec_at(Svcs, k);
+ printf("dbg: svc #%d : %s\n", (int)k,
+ reflection_Service_name(Svc));
+ Calls = reflection_Service_calls(Svc);
+ for (call_index = 0 ;
+ call_index < reflection_RPCCall_vec_len(Calls) ;
+ call_index++) {
+ Call = reflection_RPCCall_vec_at(Calls, call_index);
+ printf("dbg: call %d : %s\n", (int)call_index,
+ reflection_RPCCall_name(Call));
+ }
+ }
+ /* Within service MyGame.Example.MonsterStorage ... */
+ k = reflection_Service_vec_find(Svcs, "MyGame.Example.MonsterStorage");
+ if (k == flatbuffers_not_found) {
+ printf("Could not find MonsterStorage service in schema\n");
+ goto done;
+ }
+ Svc = reflection_Service_vec_at(Svcs, k);
+ /* ... search the RPC call Store */
+ Calls = reflection_Service_calls(Svc);
+ k = reflection_RPCCall_vec_find(Calls, "Store");
+ if (k == flatbuffers_not_found) {
+ printf("Could not find call Store in service\n");
+ goto done;
+ }
+ Call = reflection_RPCCall_vec_at(Calls, k);
+ /* Ensure request type is MyGame.Example.Monster */
+ Obj = reflection_Object_vec_at(Objs, monster_index);
+ if (Obj != reflection_RPCCall_request(Call)) {
+ printf("Wrong request type of rpc call\n");
+ goto done;
+ }
+ /* Ensure response type is MyGame.Example.Stat */
+ k = reflection_Object_vec_find(Objs, "MyGame.Example.Stat");
+ if (k == flatbuffers_not_found) {
+ printf("Could not find Stat in schema\n");
+ goto done;
+ }
+ Obj = reflection_Object_vec_at(Objs, k);
+ if (Obj != reflection_RPCCall_response(Call)) {
+ printf("Wrong response type of rpc call\n");
+ goto done;
+ }
+ /* check the call has an attribute "streaming" */
+ k = reflection_KeyValue_vec_scan(reflection_RPCCall_attributes(Call), "streaming");
+ if (k == flatbuffers_not_found) {
+ printf("Could not find attribute in call\n");
+ goto done;
+ }
+ /* check the attribute value is "none" */
+ strval = reflection_KeyValue_value(
+ reflection_KeyValue_vec_at(reflection_RPCCall_attributes(Call), k));
+ if (!strval || 0 != strcmp("none", strval)) {
+ printf("Wrong attribute value in call\n");
+ goto done;
+ }
+ ret = 0;
+done:
+ if (buffer) {
+ free(buffer);
+ }
+ return ret;
+}
+
+/* We take arguments so test can run without copying sources. */
+#define usage \
+"wrong number of arguments:\n" \
+"usage: <program> [<output-filename>]\n"
+
+const char *filename = "generated/monster_test.bfbs";
+
+int main(int argc, char *argv[])
+{
+ /* Avoid assert dialogs on Windows. */
+ init_headless_crt();
+
+ if (argc != 1 && argc != 2) {
+ fprintf(stderr, usage);
+ exit(1);
+ }
+ if (argc == 2) {
+ filename = argv[1];
+ }
+
+ return test_schema(filename);
+}
diff --git a/test/reflection_test/reflection_test.sh b/test/reflection_test/reflection_test.sh
new file mode 100755
index 0000000..f1ad69c
--- /dev/null
+++ b/test/reflection_test/reflection_test.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -e
+cd `dirname $0`/../..
+ROOT=`pwd`
+TMP=${ROOT}/build/tmp/test/reflection_test
+
+CC=${CC:-cc}
+${ROOT}/scripts/build.sh
+mkdir -p ${TMP}/generated
+rm -rf ${TMP}/generated/*
+bin/flatcc --schema -o ${TMP}/generated test/monster_test/monster_test.fbs
+
+cp test/reflection_test/*.c ${TMP}
+cd ${TMP}
+
+$CC -g -I ${ROOT}/include reflection_test.c \
+ ${ROOT}/lib/libflatccrt.a -o reflection_test_d
+$CC -O3 -DNDEBUG -I ${ROOT}/include reflection_test.c \
+ ${ROOT}/lib/libflatccrt.a -o reflection_test
+echo "running reflection test debug"
+./reflection_test_d
+echo "running reflection test optimized"
+./reflection_test
diff --git a/test/test.sh b/test/test.sh
new file mode 100755
index 0000000..d4fae6f
--- /dev/null
+++ b/test/test.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+
+echo "This is the old test script replaced by CMake's ctest"
+echo "driven by scritps/test.sh"
+echo "pausing 5 seconds - press ctrl+C to quit"
+
+sleep 5
+
+set -e
+cd `dirname $0`/..
+ROOT=`pwd`
+
+CC=${CC:-cc}
+${ROOT}/scripts/build.sh
+
+TMP=${ROOT}/build/tmp/test
+INC=${ROOT}/include
+
+echo "" 1>&2
+
+mkdir -p ${TMP}
+rm -rf ${TMP}/*
+
+echo "running generation of complex schema (cgen_test)"
+${ROOT}/test/cgen_test/cgen_test.sh
+
+mkdir -p ${TMP}/monster_test
+
+mkdir -p ${TMP}/monster_test_smoke
+mkdir -p ${TMP}/monster_test_solo
+mkdir -p ${TMP}/monster_test_hello
+mkdir -p ${TMP}/monster_test_main
+
+#
+# These first tests are to ensure the generated code can compile,
+# they don't actually run tests against the api.
+#
+echo "generating smoke test generated monster source" 1>&2
+${ROOT}/bin/flatcc -I ${ROOT}/test/monster_test -a \
+ -o ${TMP}/monster_test_smoke ${ROOT}/test/monster_test/monster_test.fbs
+echo "#include \"monster_test_builder.h\"" > ${TMP}/monster_test_smoke/smoke_monster.c
+cd ${TMP}/monster_test_smoke && $CC -c -Wall -O3 -I ${INC} smoke_monster.c
+
+echo "generating smoke test generated monster source to single file" 1>&2
+${ROOT}/bin/flatcc -I ${ROOT}/test/monster_test -a --stdout \
+ ${ROOT}/test/monster_test/monster_test.fbs > ${TMP}/monster_test_solo/solo_monster.c
+cd ${TMP}/monster_test_solo && $CC -c -Wall -O3 -I ${INC} solo_monster.c
+
+echo "generating smoke test generated monster source with --prefix zzz --common-prefix hello" 1>&2
+${ROOT}/bin/flatcc -I ${ROOT}/test/monster_test -a \
+ --common-prefix hello --prefix zzz \
+ -o ${TMP}/monster_test_hello ${ROOT}/test/monster_test/monster_test.fbs
+echo "#include \"monster_test_builder.h\"" > ${TMP}/monster_test_hello/hello_monster.c
+cd ${TMP}/monster_test_hello && $CC -c -Wall -O3 -I ${INC} hello_monster.c
+
+#
+# This test ensures the reader api understands a monster buffer generated
+# by the external `flatc` tool by Google FPL.
+#
+echo "starting compat test"
+${ROOT}/test/flatc_compat/flatc_compat.sh
+
+echo "starting emit_test for altenative emitter backend smoke test"
+${ROOT}/test/emit_test/emit_test.sh
+
+#
+# This is the main `monster_test.c` test that covers nearly all
+# functionality of the reader and builder API for C.
+#
+echo "running main monster test"
+cd ${ROOT}/test/monster_test
+${ROOT}/bin/flatcc -I ${ROOT}/test/monster_test -a \
+ -o ${TMP}/monster_test_main ${ROOT}/test/monster_test/monster_test.fbs
+cd ${TMP}/monster_test_main
+cp ${ROOT}/test/monster_test/monster_test.c .
+$CC -g -I ${ROOT}/include monster_test.c \
+ ${ROOT}/lib/libflatccrt_d.a -o monster_test_d
+$CC -O3 -DNDEBUG -DFLATBUFFERS_BENCHMARK -I ${ROOT}/include monster_test.c \
+ ${ROOT}/lib/libflatccrt.a -o monster_test
+./monster_test_d
+
+echo "running optimized version of main monster test"
+./monster_test
+
+# This may fail if reflection feature is disabled
+echo "running reflection test"
+${ROOT}/test/reflection_test/reflection_test.sh
+
+# This may fail if reflection feature is disabled
+echo "running reflection sample"
+${ROOT}/samples/reflection/build.sh
+
+echo "running monster sample"
+${ROOT}/samples/monster/build.sh
+
+echo "running json test"
+${ROOT}/test/json_test/json_test.sh
+
+echo "running load test with large buffer"
+${ROOT}/test/load_test/load_test.sh
+
+echo "TEST PASSED"
diff --git a/test/union_vector_test/union_vector.fbs b/test/union_vector_test/union_vector.fbs
new file mode 100644
index 0000000..73b8800
--- /dev/null
+++ b/test/union_vector_test/union_vector.fbs
@@ -0,0 +1,26 @@
+table MuLan {
+ sword_attack_damage: int;
+}
+
+table Rapunzel {
+ hair_length: int;
+}
+
+table Belle {
+ books_read: int;
+}
+
+union Character {
+ MuLan,
+ Rapunzel,
+ Belle,
+}
+
+table Movie {
+ characters: [Character];
+ belles: [Belle];
+ character: Character;
+}
+
+root_type Movie;
+file_identifier "MOVI";