aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/flatcc/flatcc.h268
-rw-r--r--include/flatcc/flatcc_accessors.h101
-rw-r--r--include/flatcc/flatcc_alloc.h127
-rw-r--r--include/flatcc/flatcc_assert.h45
-rw-r--r--include/flatcc/flatcc_builder.h1911
-rw-r--r--include/flatcc/flatcc_emitter.h215
-rw-r--r--include/flatcc/flatcc_endian.h125
-rw-r--r--include/flatcc/flatcc_epilogue.h8
-rw-r--r--include/flatcc/flatcc_flatbuffers.h55
-rw-r--r--include/flatcc/flatcc_identifier.h148
-rw-r--r--include/flatcc/flatcc_iov.h31
-rw-r--r--include/flatcc/flatcc_json_parser.h908
-rw-r--r--include/flatcc/flatcc_json_printer.h788
-rw-r--r--include/flatcc/flatcc_portable.h14
-rw-r--r--include/flatcc/flatcc_prologue.h8
-rw-r--r--include/flatcc/flatcc_refmap.h144
-rw-r--r--include/flatcc/flatcc_rtconfig.h162
-rw-r--r--include/flatcc/flatcc_types.h97
-rw-r--r--include/flatcc/flatcc_unaligned.h16
-rw-r--r--include/flatcc/flatcc_verifier.h239
-rw-r--r--include/flatcc/flatcc_version.h14
-rw-r--r--include/flatcc/portable/LICENSE14
-rw-r--r--include/flatcc/portable/README.md57
-rw-r--r--include/flatcc/portable/grisu3_math.h329
-rw-r--r--include/flatcc/portable/grisu3_parse.h582
-rw-r--r--include/flatcc/portable/grisu3_print.h265
-rw-r--r--include/flatcc/portable/include/README4
-rw-r--r--include/flatcc/portable/include/linux/endian.h1
-rw-r--r--include/flatcc/portable/include/std/inttypes.h1
-rw-r--r--include/flatcc/portable/include/std/stdalign.h1
-rw-r--r--include/flatcc/portable/include/std/stdbool.h1
-rw-r--r--include/flatcc/portable/include/std/stdint.h1
-rw-r--r--include/flatcc/portable/paligned_alloc.h212
-rw-r--r--include/flatcc/portable/pattributes.h84
-rw-r--r--include/flatcc/portable/pbase64.h448
-rw-r--r--include/flatcc/portable/pcrt.h48
-rw-r--r--include/flatcc/portable/pdiagnostic.h85
-rw-r--r--include/flatcc/portable/pdiagnostic_pop.h20
-rw-r--r--include/flatcc/portable/pdiagnostic_push.h51
-rw-r--r--include/flatcc/portable/pendian.h206
-rw-r--r--include/flatcc/portable/pendian_detect.h118
-rw-r--r--include/flatcc/portable/pinline.h19
-rw-r--r--include/flatcc/portable/pinttypes.h52
-rw-r--r--include/flatcc/portable/portable.h2
-rw-r--r--include/flatcc/portable/portable_basic.h25
-rw-r--r--include/flatcc/portable/pparsefp.h226
-rw-r--r--include/flatcc/portable/pparseint.h374
-rw-r--r--include/flatcc/portable/pprintfp.h39
-rw-r--r--include/flatcc/portable/pprintint.h628
-rw-r--r--include/flatcc/portable/pstatic_assert.h67
-rw-r--r--include/flatcc/portable/pstatic_assert_scope.h280
-rw-r--r--include/flatcc/portable/pstdalign.h162
-rw-r--r--include/flatcc/portable/pstdbool.h37
-rw-r--r--include/flatcc/portable/pstdint.h898
-rw-r--r--include/flatcc/portable/punaligned.h190
-rw-r--r--include/flatcc/portable/pversion.h6
-rw-r--r--include/flatcc/portable/pwarnings.h52
-rw-r--r--include/flatcc/reflection/README19
-rw-r--r--include/flatcc/reflection/flatbuffers_common_builder.h685
-rw-r--r--include/flatcc/reflection/flatbuffers_common_reader.h578
-rw-r--r--include/flatcc/reflection/reflection_builder.h457
-rw-r--r--include/flatcc/reflection/reflection_reader.h411
-rw-r--r--include/flatcc/reflection/reflection_verifier.h308
-rw-r--r--include/flatcc/support/README1
-rw-r--r--include/flatcc/support/cdump.h38
-rw-r--r--include/flatcc/support/elapsed.h73
-rw-r--r--include/flatcc/support/hexdump.h47
-rw-r--r--include/flatcc/support/readfile.h66
68 files changed, 13692 insertions, 0 deletions
diff --git a/include/flatcc/flatcc.h b/include/flatcc/flatcc.h
new file mode 100644
index 0000000..04eb187
--- /dev/null
+++ b/include/flatcc/flatcc.h
@@ -0,0 +1,268 @@
+#ifndef FLATCC_H
+#define FLATCC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This is the primary `flatcc` interface when compiling `flatcc` as a
+ * library. Functions and types in the this interface will be kept
+ * stable to the extend possible or reasonable, but do not rely on other
+ * interfaces except "config.h" used to set default options for this
+ * interface.
+ *
+ * This interface is unrelated to the standalone flatbuilder library
+ * which has a life of its own.
+ */
+
+#include <stddef.h>
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4820) /* x bytes padding added in struct */
+#endif
+
+typedef struct flatcc_options flatcc_options_t;
+typedef void (*flatcc_error_fun) (void *err_ctx, const char *buf, size_t len);
+
+struct flatcc_options {
+ size_t max_schema_size;
+ int max_include_depth;
+ int max_include_count;
+ int disable_includes;
+ int allow_boolean_conversion;
+ int allow_enum_key;
+ int allow_enum_struct_field;
+ int allow_multiple_key_fields;
+ int allow_primary_key;
+ int allow_scan_for_all_fields;
+ int allow_string_key;
+ int allow_struct_field_deprecate;
+ int allow_struct_field_key;
+ int allow_struct_root;
+ int ascending_enum;
+ int hide_later_enum;
+ int hide_later_struct;
+ int offset_size;
+ int voffset_size;
+ int utype_size;
+ int bool_size;
+ int require_root_type;
+ int strict_enum_init;
+ uint64_t vt_max_count;
+
+ const char *default_schema_ext;
+ const char *default_bin_schema_ext;
+ const char *default_bin_ext;
+
+ /* Code Generator specific options. */
+ int gen_stdout;
+ int gen_dep;
+
+ const char *gen_depfile;
+ const char *gen_deptarget;
+ const char *gen_outfile;
+
+ int gen_append;
+
+ int cgen_pad;
+ int cgen_sort;
+ int cgen_pragmas;
+
+ int cgen_common_reader;
+ int cgen_common_builder;
+ int cgen_reader;
+ int cgen_builder;
+ int cgen_verifier;
+ int cgen_json_parser;
+ int cgen_json_printer;
+ int cgen_recursive;
+ int cgen_spacing;
+ int cgen_no_conflicts;
+
+
+ int bgen_bfbs;
+ int bgen_qualify_names;
+ int bgen_length_prefix;
+
+ /* Namespace args - these can override defaults so are null by default. */
+ const char *ns;
+ const char *nsc;
+
+ const char **inpaths;
+ const char **srcpaths;
+ int inpath_count;
+ int srcpath_count;
+ const char *outpath;
+};
+
+/* Runtime configurable optoins. */
+void flatcc_init_options(flatcc_options_t *opts);
+
+typedef void *flatcc_context_t;
+
+/*
+ * Call functions below in order listed one at a time.
+ * Each parse requires a new context.
+ *
+ * A reader file is named after the source base name, e.g.
+ * `monster.fbs` becomes `monster.h`. Builders are optional and created
+ * as `monster_builder.h`. A reader require a common header
+ * `flatbuffers_commoner.h` and a builder requires
+ * `flatbuffers_common_builder.h` in addition to the reader filers. A
+ * reader need no other source, but builders must link with the
+ * `flatbuilder` library and include files in `include/flatbuffers`.
+ *
+ * All the files may also be concatenated into one single file and then
+ * files will not be attempted included externally. This can be used
+ * with stdout output. The common builder can follow the common
+ * reader immediately, or at any later point before the first builder.
+ * The common files should only be included once, but not harm is done
+ * if duplication occurs.
+ *
+ * The outpath is prefixed every output filename. The containing
+ * directory must exist, but the prefix may have text following
+ * the directory, for example the namespace. If outpath = "stdout",
+ * files are generated to stdout.
+ *
+ * Note that const char * options must remain valid for the lifetime
+ * of the context since they are not copied. The options object itself
+ * is not used after initialization and may be reused.
+*/
+
+/*
+ * `name` is the name of the schema file or buffer. If it is path, the
+ * basename is extracted (leading path stripped), and the default schema
+ * extension is stripped if present. The resulting name is used
+ * internally when generating output files. Typically the `name`
+ * argument will be the same as a schema file path given to
+ * `flatcc_parse_file`, but it does not have to be.
+ *
+ * `name` may be null if only common files are generated.
+ *
+ * `error_out` is an optional error handler. If null output is truncated
+ * to a reasonable size and sent to stderr. `error_ctx` is provided as
+ * first argument to `error_out` if `error_out` is non-zero, otherwise
+ * it is ignored.
+ *
+ * Returns context or null on error.
+ */
+flatcc_context_t flatcc_create_context(flatcc_options_t *options, const char *name,
+ flatcc_error_fun error_out, void *error_ctx);
+
+/* Like `flatcc_create_context`, but with length argument for name. */
+/*
+ * Parse is optional - not needed for common files. If the input buffer version
+ * is called, the buffer must be zero terminated, otherwise an input
+ * path can be specified. The output path can be null.
+ *
+ * Only one parse can be called per context.
+ *
+ * The buffer size is limited to the max_schema_size option unless it is
+ * 0. The default is reasonable size like 64K depending on config flags.
+ *
+ * The buffer must remain valid for the duration of the context.
+ *
+ * The schema cannot contain include statements when parsed as a buffer.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_parse_buffer(flatcc_context_t ctx, const char *buf, size_t buflen);
+
+/*
+ * If options contain a non-zero `inpath` option, the resulting filename is
+ * prefixed with that path unless the filename is an absolute path.
+ *
+ * Errors are sent to the error handler given during initialization,
+ * or to stderr.
+ *
+ * The file size is limited to the max_schema_size option unless it is
+ * 0. The default is reasonable size like 64K depending on config flags.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_parse_file(flatcc_context_t ctx, const char *filename);
+
+/*
+ * Generate output files. The basename derived when the context was
+ * created is used used to name the output files with respective
+ * extensions. If the outpath option is not null it is prefixed the
+ * output files. The `cgen_common_reader, cgen_common_builder,
+ * cgen_reader, and cgen_builder` must be set or reset depending on what
+ * is to be generated. The common files do not require a parse, and the
+ * non-common files require a successfull parse or the result is
+ * undefined.
+ *
+ * Unlinke the parser, the code generator produce errors to stderr
+ * always. These errors are rare, such as using too long namespace
+ * names.
+ *
+ * If the `gen_stdout` option is set, all files are generated to stdout.
+ * In this case it is unwise to mix C and binary schema output options.
+ *
+ * If `bgen_bfbs` is set, a binary schema is generated to a file with
+ * the `.bfbs` extension. See also `flatcc_generate_binary_schema` for
+ * further details. Only `flatcc_generate_files` is called via the
+ * `flatcc` cli command.
+ *
+ * The option `bgen_length_prefix` option will cause a length prefix to be
+ * written to the each output binary schema. This option is only
+ * understood when writing to files.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_generate_files(flatcc_context_t ctx);
+
+/*
+ * Returns a buffer with a binary schema for a previous parse.
+ * The user is responsible for calling `free` on the returned buffer
+ * unless it returns 0 on error.
+ *
+ * Can be called instead of generate files, before, or after, but a
+ * schema must be parsed first.
+ *
+ * Returns a binary schema in `reflection.fbs` format. Any included
+ * files will be contained in the schema and there are no separate
+ * schema files for included schema.
+ *
+ * All type names are scoped, mening that they are refixed their
+ * namespace using `.` as the namespace separator, for example:
+ * "MyGame.Example.Monster". Note that the this differs from the current
+ * `flatc` compiler which does not prefix names. Enum names are not
+ * scoped, but the scope is implied by the containing enum type.
+ * The option `bgen_qualify_names=0` changes this behavior.
+ *
+ * If the default option `ascending_enum` is disabled, the `flatcc` will
+ * accept duplicate values and overlapping ranges like the C programming
+ * language. In this case enum values in the binary schema will not be
+ * searchable. At any rate enum names are not searchable in the current
+ * schema format.
+ *
+ */
+void *flatcc_generate_binary_schema(flatcc_context_t ctx, size_t *size);
+
+/*
+ * Similar to `flatcc_generate_binary_schema` but copies the binary
+ * schema into a user supplied buffer. If the buffer is too small
+ * the return value will be negative and the buffer content undefined.
+ */
+int flatcc_generate_binary_schema_to_buffer(flatcc_context_t ctx, void *buf, size_t bufsiz);
+
+/* Must be called to deallocate resources eventually - it valid but
+ * without effect to call with a null context. */
+void flatcc_destroy_context(flatcc_context_t ctx);
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_H */
diff --git a/include/flatcc/flatcc_accessors.h b/include/flatcc/flatcc_accessors.h
new file mode 100644
index 0000000..084ecb1
--- /dev/null
+++ b/include/flatcc/flatcc_accessors.h
@@ -0,0 +1,101 @@
+#ifndef FLATCC_ACCESSORS
+#define FLATCC_ACCESSORS
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#define __flatcc_basic_scalar_accessors_impl(N, T, W, E) \
+static inline size_t N ## __size(void) \
+{ return sizeof(T); } \
+static inline T *N ## __ptr_add(T *p, size_t i) \
+{ return p + i; } \
+static inline const T *N ## __const_ptr_add(const T *p, size_t i) \
+{ return p + i; } \
+static inline T N ## _read_from_pe(const void *p) \
+{ return N ## _cast_from_pe(*(T *)p); } \
+static inline T N ## _read_to_pe(const void *p) \
+{ return N ## _cast_to_pe(*(T *)p); } \
+static inline T N ## _read(const void *p) \
+{ return *(T *)p; } \
+static inline void N ## _write_from_pe(void *p, T v) \
+{ *(T *)p = N ## _cast_from_pe(v); } \
+static inline void N ## _write_to_pe(void *p, T v) \
+{ *(T *)p = N ## _cast_to_pe(v); } \
+static inline void N ## _write(void *p, T v) \
+{ *(T *)p = v; } \
+static inline T N ## _read_from_le(const void *p) \
+{ return N ## _cast_from_le(*(T *)p); } \
+typedef struct { int is_null; T value; } N ## _option_t;
+
+#define __flatcc_define_integer_accessors_impl(N, T, W, E) \
+static inline T N ## _cast_from_pe(T v) \
+{ return (T) E ## W ## toh((uint ## W ## _t)v); } \
+static inline T N ## _cast_to_pe(T v) \
+{ return (T) hto ## E ## W((uint ## W ## _t)v); } \
+static inline T N ## _cast_from_le(T v) \
+{ return (T) le ## W ## toh((uint ## W ## _t)v); } \
+static inline T N ## _cast_to_le(T v) \
+{ return (T) htole ## W((uint ## W ## _t)v); } \
+static inline T N ## _cast_from_be(T v) \
+{ return (T) be ## W ## toh((uint ## W ## _t)v); } \
+static inline T N ## _cast_to_be(T v) \
+{ return (T) htobe ## W((uint ## W ## _t)v); } \
+__flatcc_basic_scalar_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_real_accessors_impl(N, T, W, E) \
+union __ ## N ## _cast { T v; uint ## W ## _t u; }; \
+static inline T N ## _cast_from_pe(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = E ## W ## toh(x.u); return x.v; } \
+static inline T N ## _cast_to_pe(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = hto ## E ## W(x.u); return x.v; } \
+static inline T N ## _cast_from_le(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = le ## W ## toh(x.u); return x.v; } \
+static inline T N ## _cast_to_le(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = htole ## W(x.u); return x.v; } \
+static inline T N ## _cast_from_be(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = be ## W ## toh(x.u); return x.v; } \
+static inline T N ## _cast_to_be(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = htobe ## W(x.u); return x.v; } \
+__flatcc_basic_scalar_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_integer_accessors(N, T, W, E) \
+__flatcc_define_integer_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_real_accessors(N, T, W, E) \
+__flatcc_define_real_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_basic_integer_accessors(NS, TN, T, W, E) \
+__flatcc_define_integer_accessors(NS ## TN, T, W, E)
+
+#define __flatcc_define_basic_real_accessors(NS, TN, T, W, E) \
+__flatcc_define_real_accessors(NS ## TN, T, W, E)
+
+#define __flatcc_define_basic_scalar_accessors(NS, E) \
+__flatcc_define_basic_integer_accessors(NS, char, char, 8, E) \
+__flatcc_define_basic_integer_accessors(NS, uint8, uint8_t, 8, E) \
+__flatcc_define_basic_integer_accessors(NS, uint16, uint16_t, 16, E) \
+__flatcc_define_basic_integer_accessors(NS, uint32, uint32_t, 32, E) \
+__flatcc_define_basic_integer_accessors(NS, uint64, uint64_t, 64, E) \
+__flatcc_define_basic_integer_accessors(NS, int8, int8_t, 8, E) \
+__flatcc_define_basic_integer_accessors(NS, int16, int16_t, 16, E) \
+__flatcc_define_basic_integer_accessors(NS, int32, int32_t, 32, E) \
+__flatcc_define_basic_integer_accessors(NS, int64, int64_t, 64, E) \
+__flatcc_define_basic_real_accessors(NS, float, float, 32, E) \
+__flatcc_define_basic_real_accessors(NS, double, double, 64, E)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ACCESSORS */
diff --git a/include/flatcc/flatcc_alloc.h b/include/flatcc/flatcc_alloc.h
new file mode 100644
index 0000000..155364c
--- /dev/null
+++ b/include/flatcc/flatcc_alloc.h
@@ -0,0 +1,127 @@
+#ifndef FLATCC_ALLOC_H
+#define FLATCC_ALLOC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * These allocation abstractions are __only__ for runtime libraries.
+ *
+ * The flatcc compiler uses Posix allocation routines regardless
+ * of how this file is configured.
+ *
+ * This header makes it possible to use systems where malloc is not
+ * valid to use. In this case the portable library will not help
+ * because it implements Posix / C11 abstractions.
+ *
+ * Systems like FreeRTOS do not work with Posix memory calls and here it
+ * can be helpful to override runtime allocation primitives.
+ *
+ * In general, it is better to customize the allocator and emitter via
+ * flatcc_builder_custom_init and to avoid using the default emitter
+ * specific high level calls the copy out a buffer that must later be
+ * deallocated. This provides full control of allocation withou the need
+ * for this file.
+ *
+ *
+ * IMPORTANT
+ *
+ * If you override malloc, free, etc., make sure your applications
+ * use the same allocation methods. For example, samples/monster.c
+ * and several test cases are no longer guaranteed to work out of the
+ * box.
+ *
+ * The changes must only affect target runtime compilation including the
+ * the runtime library libflatccrt.
+ *
+ * The host system flatcc compiler and the compiler library libflatcc
+ * should NOT be compiled with non-Posix allocation since the compiler
+ * has a dependency on the runtime library and the wrong free operation
+ * might be callled. The safest way to avoid this problem this is to
+ * compile flatcc with the CMake script and the runtime files with a
+ * dedicated build system for the target system.
+ */
+
+#include <stdlib.h>
+
+#ifndef FLATCC_ALLOC
+#define FLATCC_ALLOC(n) malloc(n)
+#endif
+
+#ifndef FLATCC_FREE
+#define FLATCC_FREE(p) free(p)
+#endif
+
+#ifndef FLATCC_REALLOC
+#define FLATCC_REALLOC(p, n) realloc(p, n)
+#endif
+
+#ifndef FLATCC_CALLOC
+#define FLATCC_CALLOC(nm, n) calloc(nm, n)
+#endif
+
+/*
+ * Implements `aligned_alloc` and `aligned_free`.
+ * Even with C11, this implements non-standard aligned_free needed for portable
+ * aligned_alloc implementations.
+ */
+#ifndef FLATCC_USE_GENERIC_ALIGNED_ALLOC
+
+#ifndef FLATCC_NO_PALIGNED_ALLOC
+#include "flatcc/portable/paligned_alloc.h"
+#else
+#if !defined(__aligned_free_is_defined) || !__aligned_free_is_defined
+#define aligned_free free
+#endif
+#endif
+
+#else /* FLATCC_USE_GENERIC_ALIGNED_ALLOC */
+
+#ifndef FLATCC_ALIGNED_ALLOC
+static inline void *__flatcc_aligned_alloc(size_t alignment, size_t size)
+{
+ char *raw;
+ void *buf;
+ size_t total_size = (size + alignment - 1 + sizeof(void *));
+
+ if (alignment < sizeof(void *)) {
+ alignment = sizeof(void *);
+ }
+ raw = (char *)(size_t)FLATCC_ALLOC(total_size);
+ buf = raw + alignment - 1 + sizeof(void *);
+ buf = (void *)(((size_t)buf) & ~(alignment - 1));
+ ((void **)buf)[-1] = raw;
+ return buf;
+}
+#define FLATCC_ALIGNED_ALLOC(alignment, size) __flatcc_aligned_alloc(alignment, size)
+#endif /* FLATCC_USE_GENERIC_ALIGNED_ALLOC */
+
+#ifndef FLATCC_ALIGNED_FREE
+static inline void __flatcc_aligned_free(void *p)
+{
+ char *raw;
+
+ if (!p) return;
+ raw = ((void **)p)[-1];
+
+ FLATCC_FREE(raw);
+}
+#define FLATCC_ALIGNED_FREE(p) __flatcc_aligned_free(p)
+#endif
+
+#endif /* FLATCC_USE_GENERIC_ALIGNED_ALLOC */
+
+#ifndef FLATCC_ALIGNED_ALLOC
+#define FLATCC_ALIGNED_ALLOC(a, n) aligned_alloc(a, n)
+#endif
+
+#ifndef FLATCC_ALIGNED_FREE
+#define FLATCC_ALIGNED_FREE(p) aligned_free(p)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ALLOC_H */
diff --git a/include/flatcc/flatcc_assert.h b/include/flatcc/flatcc_assert.h
new file mode 100644
index 0000000..3db3e7b
--- /dev/null
+++ b/include/flatcc/flatcc_assert.h
@@ -0,0 +1,45 @@
+#ifndef FLATCC_ASSERT_H
+#define FLATCC_ASSERT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+* This assert abstraction is only used for the flatcc runtime library.
+* The flatcc compiler uses Posix assert routines regardless of how this
+* file is configured.
+*
+* This header makes it possible to use systems where assert is not
+* valid to use. Note that `<assert.h>` may remain a dependency for static
+* assertions.
+*
+* `FLATCC_ASSERT` is designed to handle errors which cannot be ignored
+* and could lead to crash. The portable library may use assertions that
+* are not affected by this macro.
+*
+* `FLATCC_ASSERT` defaults to POSIX assert but can be overrided by a
+* preprocessor definition.
+*
+* Runtime assertions can be entirely disabled by defining
+* `FLATCC_NO_ASSERT`.
+*/
+
+#ifdef FLATCC_NO_ASSERT
+/* NOTE: This will not affect inclusion of <assert.h> for static assertions. */
+#undef FLATCC_ASSERT
+#define FLATCC_ASSERT(x) ((void)0)
+/* Grisu3 is used for floating point conversion in JSON processing. */
+#define GRISU3_NO_ASSERT
+#endif
+
+#ifndef FLATCC_ASSERT
+#include <assert.h>
+#define FLATCC_ASSERT assert
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ASSERT_H */
diff --git a/include/flatcc/flatcc_builder.h b/include/flatcc/flatcc_builder.h
new file mode 100644
index 0000000..2e84d29
--- /dev/null
+++ b/include/flatcc/flatcc_builder.h
@@ -0,0 +1,1911 @@
+#ifndef FLATCC_BUILDER_H
+#define FLATCC_BUILDER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Library for building untyped FlatBuffers. Intended as a support
+ * library for generated C code to produce typed builders, but might
+ * also be useful in runtime environments and as support for scripting
+ * languages.
+ *
+ * The builder has two API layers: a stack based `start/end` approach,
+ * and a direct `create`, and they may be mixed freely. The direct
+ * approach may be used as part of more specialized optimizations such
+ * as rewriting buffers while the stack approach is convenient for state
+ * machine driven parsers without a stack, or with a very simple stack
+ * without extra allocations.
+ *
+ * The builder emits partial buffer sequences to a user provided emitter
+ * function and does not require a full buffer reprensenation in memory.
+ * For this reason it also does not support sorting or other operations
+ * that requires representing the buffer, but post-processors can easily
+ * do this, and the generated schema specific code and provide functions
+ * to handle this.
+ *
+ * A custom allocator with a default realloc implementation can place
+ * restraints on resource consumption and provide initial allocation
+ * sizes for various buffers and stacks in use.
+ *
+ * A buffer under construction uses a virtual address space for the
+ * completed part of the buffer, starting at 0 and growing in both
+ * directions, or just down depending on whether vtables should be
+ * clustered at the end or not. Clustering may help caching and
+ * preshipping that part of the buffer.
+ *
+ * Because an offset cannot be known before its reference location is
+ * defined, every completed table, vector, etc. returns a reference into
+ * the virtual address range. If the final buffer keeps the 0 offset,
+ * these references remain stable an may be used for external references
+ * into the buffer.
+ *
+ * The maximum buffer that can be constructed is in praxis limited to
+ * half the UOFFSET_MAX size, typically 2^31 bytes, not counting
+ * clustered vtables that may consume and additional 2^31 bytes
+ * (positive address range), but in praxis cannot because vtable
+ * references are signed and thus limited to 2^31 bytes (or equivalent
+ * depending on the flatbuffer types chosen).
+ *
+ * CORRECTION: in various places rules are mentioned about nesting and using
+ * a reference at most once. In fact, DAG's are also valid flatbuffers.
+ * This means a reference may be reused as long as each individual use
+ * obeys the rules and, for example, circular references are not
+ * constructed (circular types are ok, but objects graphs with cycles
+ * are not permitted). Be especially aware of the offset vector create
+ * call which translates the references into offsets - this can be
+ * reverted by noting the reference in vector and calculate the base
+ * used for the offset to restore the original references after the
+ * vector has been emitted.
+ */
+
+#include <stdlib.h>
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include "flatcc_flatbuffers.h"
+#include "flatcc_emitter.h"
+#include "flatcc_refmap.h"
+
+/* It is possible to enable logging here. */
+#ifndef FLATCC_BUILDER_ASSERT
+#define FLATCC_BUILDER_ASSERT(cond, reason) FLATCC_ASSERT(cond)
+#endif
+
+/*
+ * Eror handling is not convenient and correct use should not cause
+ * errors beyond possibly memory allocation, but assertions are a
+ * good way to trace problems.
+ *
+ * Note: some internal assertion will remain if disabled.
+ */
+#ifndef FLATCC_BUILDER_ASSERT_ON_ERROR
+#define FLATCC_BUILDER_ASSERT_ON_ERROR 1
+#endif
+
+/*
+ * If set, checks user input agains state and returns error,
+ * otherwise errors are ignored (assuming they won't happen).
+ * Errors will be asserted if enabled and checks are not skipped.
+ */
+#ifndef FLATCC_BUILDER_SKIP_CHECKS
+#define FLATCC_BUILDER_SKIP_CHECKS 0
+#endif
+
+
+/*
+ * When adding the same field to a table twice this is either an error
+ * or the existing field is returned, potentially introducing garbage
+ * if the type is a vector, table, or string. When implementing parsers
+ * it may be convenient to not treat this as an error.
+ */
+#ifndef FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
+#define FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD 0
+#endif
+
+/**
+ * This type must have same size as `flatbuffers_uoffset_t`
+ * and must be a signed type.
+ */
+typedef flatbuffers_soffset_t flatcc_builder_ref_t;
+typedef flatbuffers_utype_t flatcc_builder_utype_t;
+
+/**
+ * This type must be compatible with code generation that
+ * creates union specific ref types.
+ */
+typedef struct flatcc_builder_union_ref {
+ flatcc_builder_utype_t type;
+ flatcc_builder_ref_t value;
+} flatcc_builder_union_ref_t;
+
+typedef struct flatcc_builder_union_vec_ref {
+ flatcc_builder_ref_t type;
+ flatcc_builder_ref_t value;
+} flatcc_builder_union_vec_ref_t;
+
+/**
+ * Virtual tables are off by one to avoid being mistaken for error at
+ * position 0, and it makes them detectable as such because no other
+ * reference is uneven. Vtables are emitted at their actual location
+ * which is one less than the reference value.
+ */
+typedef flatbuffers_soffset_t flatcc_builder_vt_ref_t;
+
+typedef flatbuffers_uoffset_t flatcc_builder_identifier_t;
+
+/**
+ * Hints to custom allocators so they can provide initial alloc sizes
+ * etc. There will be at most one buffer for each allocation type per
+ * flatcc_builder instance. Buffers containing only structs may avoid
+ * allocation altogether using a `create` call. The vs stack must hold
+ * vtable entries for all open tables up to their requested max id, but
+ * unused max id overlap on the stack. The final vtables only store the
+ * largest id actually added. The fs stack must hold stack frames for
+ * the nesting levels expected in the buffer, each about 50-100 bytes.
+ * The ds stack holds open vectors, table data, and nested buffer state.
+ * `create` calls bypass the `ds` and `fs` stack and are thus faster.
+ * The vb buffer holds a copy of all vtables seen and emitted since last
+ * vtable flush. The patch log holds a uoffset for every table field
+ * added to currently open tables. The hash table holds a uoffset entry
+ * for each hash slot where the allocator decides how many to provide
+ * above a certain minimum. The vd buffer allocates vtable descriptors
+ * which is a reference to an emitted vtable, an offset to a cached
+ * vtable, and a link to next descriptor with same hash. Calling `reset`
+ * after build can either keep the allocation levels for the next
+ * buffer, or reduce the buffers already allocated by requesting 1 byte
+ * allocations (meaning provide a default).
+ *
+ * The user stack is not automatically allocated, but when entered
+ * explicitly, the boundary is rembered in the current live
+ * frame.
+ */
+enum flatcc_builder_alloc_type {
+ /* The stack where vtables are build. */
+ flatcc_builder_alloc_vs,
+ /* The stack where data structures are build. */
+ flatcc_builder_alloc_ds,
+ /* The virtual table buffer cache, holds a copy of each vt seen. */
+ flatcc_builder_alloc_vb,
+ /* The patch log, remembers table fields with outstanding offset refs. */
+ flatcc_builder_alloc_pl,
+ /* The stack of frames for nested types. */
+ flatcc_builder_alloc_fs,
+ /* The hash table part of the virtual table cache. */
+ flatcc_builder_alloc_ht,
+ /* The vtable descriptor buffer, i.e. list elements for emitted vtables. */
+ flatcc_builder_alloc_vd,
+ /* User stack frame for custom data. */
+ flatcc_builder_alloc_us,
+
+ /* Number of allocation buffers. */
+ flatcc_builder_alloc_buffer_count
+};
+
+/** Must reflect the `flatcc_builder_alloc_type` enum. */
+#define FLATCC_BUILDER_ALLOC_BUFFER_COUNT flatcc_builder_alloc_buffer_count
+
+#ifndef FLATCC_BUILDER_ALLOC
+#define FLATCC_BUILDER_ALLOC(n) FLATCC_ALLOC(n)
+#endif
+
+#ifndef FLATCC_BUILDER_FREE
+#define FLATCC_BUILDER_FREE(p) FLATCC_FREE(p)
+#endif
+
+#ifndef FLATCC_BUILDER_REALLOC
+#define FLATCC_BUILDER_REALLOC(p, n) FLATCC_REALLOC(p, n)
+#endif
+
+#ifndef FLATCC_BUILDER_ALIGNED_ALLOC
+#define FLATCC_BUILDER_ALIGNED_ALLOC(a, n) FLATCC_ALIGNED_ALLOC(a, n)
+#endif
+
+#ifndef FLATCC_BUILDER_ALIGNED_FREE
+#define FLATCC_BUILDER_ALIGNED_FREE(p) FLATCC_ALIGNED_FREE(p)
+#endif
+
+/**
+ * Emits data to a conceptual deque by appending to either front or
+ * back, starting from offset 0.
+ *
+ * Each emit call appends a strictly later or earlier sequence than the
+ * last emit with same offset sign. Thus a buffer is gradually grown at
+ * both ends. `len` is the combined length of all iov entries such that
+ * `offset + len` yields the former offset for negative offsets and
+ * `offset + len` yields the next offset for non-negative offsets.
+ * The bulk of the data will be in the negative range, possibly all of
+ * it. The first emitted emitted range will either start or end at
+ * offset 0. If offset 0 is emitted, it indicates the start of clustered
+ * vtables. The last positive (non-zero) offset may be zero padding to
+ * place the buffer in a full multiple of `block_align`, if set.
+ *
+ * No iov entry is empty, 0 < iov_count <= FLATCC_IOV_COUNT_MAX.
+ *
+ * The source data are in general ephemeral and should be consumed
+ * immediately, as opposed to caching iov.
+ *
+ * For high performance applications:
+ *
+ * The `create` calls may reference longer living data, but header
+ * fields etc. will still be short lived. If an emitter wants to
+ * reference data in another buffer rather than copying, it should
+ * inspect the memory range. The length of an iov entry may also be used
+ * since headers are never very long (anything starting at 16 bytes can
+ * safely be assumed to be user provided, or static zero padding). It is
+ * guaranteed that data pointers in `create` calls receive a unique slot
+ * separate from temporary headers, in the iov table which may be used
+ * for range checking or hashing (`create_table` is the only call that
+ * mutates the data buffer). It is also guaranteed (with the exception
+ * of `create_table` and `create_cached_vtable`) that data provided to
+ * create calls are not referenced at all by the builder, and these data
+ * may therefore de-facto be handles rather than direct pointers when
+ * the emitter and data provider can agree on such a protocol. This does
+ * NOT apply to any start/end/add/etc. calls which do copy to stack.
+ * `flatcc_builder_padding_base` may be used to test if an iov entry is
+ * zero padding which always begins at that address.
+ *
+ * Future: the emit interface could be extended with a type code
+ * and return an existing object insted of the emitted if, for
+ * example, they are identical. Outside this api level, generated
+ * code could provide a table comparison function to help such
+ * deduplication. It would be optional because two equal objects
+ * are not necessarily identical. The emitter already receives
+ * one object at time.
+ *
+ * Returns 0 on success and otherwise causes the flatcc_builder
+ * to fail.
+ */
+typedef int flatcc_builder_emit_fun(void *emit_context,
+ const flatcc_iovec_t *iov, int iov_count, flatbuffers_soffset_t offset, size_t len);
+
+/*
+ * Returns a pointer to static padding used in emitter calls. May
+ * sometimes also be used for empty defaults such as identifier.
+ */
+extern const uint8_t flatcc_builder_padding_base[];
+
+/**
+ * `request` is a minimum size to be returned, but allocation is
+ * expected to grow exponentially or in reasonable chunks. Notably,
+ * `alloc_type = flatcc_builder_alloc_ht` will only use highest available
+ * power of 2. The allocator may shrink if `request` is well below
+ * current size but should avoid repeated resizing on small changes in
+ * request sizes. If `zero_fill` is non-zero, allocated data beyond
+ * the current size must be zeroed. The buffer `b` may be null with 0
+ * length initially. `alloc_context` is completely implementation
+ * dependendent, and not needed when just relying on realloc. The
+ * resulting buffer may be the same or different with moved data, like
+ * realloc. Returns -1 with unmodified buffer on failure or 0 on
+ * success. The `alloc_type` identifies the buffer type. This may be
+ * used to cache buffers between instances of builders, or to decide a
+ * default allocation size larger than requested. If `need` is zero the
+ * buffer should be deallocate if non-zero, and return success (0)
+ * regardless.
+ */
+typedef int flatcc_builder_alloc_fun(void *alloc_context,
+ flatcc_iovec_t *b, size_t request, int zero_fill, int alloc_type);
+
+/*
+ * The number of hash slots there will be allocated space for. The
+ * allocator may provide more. The size returned should be
+ * `sizeof(flatbuffers_uoffset_t) * count`, where the size is a power of
+ * 2 (or the rest is wasted). The hash table can store many more entries
+ * than slots using linear search. The table does not resize.
+ */
+#ifndef FLATCC_BUILDER_MIN_HASH_COUNT
+#define FLATCC_BUILDER_MIN_HASH_COUNT 64
+#endif
+
+typedef struct __flatcc_builder_buffer_frame __flatcc_builder_buffer_frame_t;
+struct __flatcc_builder_buffer_frame {
+ flatcc_builder_identifier_t identifier;
+ flatcc_builder_ref_t mark;
+ flatbuffers_uoffset_t vs_end;
+ flatbuffers_uoffset_t nest_id;
+ uint16_t flags;
+ uint16_t block_align;
+};
+
+typedef struct __flatcc_builder_vector_frame __flatcc_builder_vector_frame_t;
+struct __flatcc_builder_vector_frame {
+ flatbuffers_uoffset_t elem_size;
+ flatbuffers_uoffset_t count;
+ flatbuffers_uoffset_t max_count;
+};
+
+typedef struct __flatcc_builder_table_frame __flatcc_builder_table_frame_t;
+struct __flatcc_builder_table_frame {
+ flatbuffers_uoffset_t vs_end;
+ flatbuffers_uoffset_t pl_end;
+ uint32_t vt_hash;
+ flatbuffers_voffset_t id_end;
+};
+
+/*
+ * Store state for nested structures such as buffers, tables and vectors.
+ *
+ * For less busy data and data where access to a previous state is
+ * irrelevant, the frame may store the current state directly. Otherwise
+ * the current state is maintained in the flatcc_builder_t structure in a
+ * possibly derived form (e.g. ds pointer instead of ds_end offset) and
+ * the frame is used to store the previous state when the frame is
+ * entered.
+ *
+ * Most operations have a start/update/end cycle the decides the
+ * liftetime of a frame, but these generally also have a direct form
+ * (create) that does not use a frame at all. These still do some
+ * state updates notably passing min_align to parent which may also be
+ * an operation without a frame following the child level operation
+ * (e.g. create struct, create buffer). Ending a frame results in the
+ * same kind of updates.
+ */
+typedef struct __flatcc_builder_frame __flatcc_builder_frame_t;
+struct __flatcc_builder_frame {
+ flatbuffers_uoffset_t ds_first;
+ flatbuffers_uoffset_t type_limit;
+ flatbuffers_uoffset_t ds_offset;
+ uint16_t align;
+ uint16_t type;
+ union {
+ __flatcc_builder_table_frame_t table;
+ __flatcc_builder_vector_frame_t vector;
+ __flatcc_builder_buffer_frame_t buffer;
+ } container;
+};
+
+/**
+ * The main flatcc_builder structure. Can be stack allocated and must
+ * be initialized with `flatcc_builder_init` and cleared with
+ * `flatcc_builder_clear` to reclaim memory. Between buffer builds,
+ * `flatcc_builder_reset` may be used.
+ */
+typedef struct flatcc_builder flatcc_builder_t;
+
+struct flatcc_builder {
+ /* Next entry on reserved stack in `alloc_pl` buffer. */
+ flatbuffers_voffset_t *pl;
+ /* Next entry on reserved stack in `alloc_vs` buffer. */
+ flatbuffers_voffset_t *vs;
+ /* One above the highest entry in vs, used to track vt_size. */
+ flatbuffers_voffset_t id_end;
+ /* The evolving vtable hash updated with every new field. */
+ uint32_t vt_hash;
+
+ /* Pointer to ds_first. */
+ uint8_t *ds;
+ /* Offset from `ds` on current frame. */
+ flatbuffers_uoffset_t ds_offset;
+ /* ds buffer size relative to ds_first, clamped to max size of current type. */
+ flatbuffers_uoffset_t ds_limit;
+
+ /* ds_first, ds_first + ds_offset is current ds stack range. */
+ flatbuffers_uoffset_t ds_first;
+ /* Points to currently open frame in `alloc_fs` buffer. */
+ __flatcc_builder_frame_t *frame;
+
+ /* Only significant to emitter function, if at all. */
+ void *emit_context;
+ /* Only significant to allocator function, if at all. */
+ void *alloc_context;
+ /* Customizable write function that both appends and prepends data. */
+ flatcc_builder_emit_fun *emit;
+ /* Customizable allocator that also deallocates. */
+ flatcc_builder_alloc_fun *alloc;
+ /* Buffers indexed by `alloc_type` */
+ flatcc_iovec_t buffers[FLATCC_BUILDER_ALLOC_BUFFER_COUNT];
+ /* Number of slots in ht given as 1 << ht_width. */
+ size_t ht_width;
+
+ /* The location in vb to add next cached vtable. */
+ flatbuffers_uoffset_t vb_end;
+ /* Where to allocate next vtable descriptor for hash table. */
+ flatbuffers_uoffset_t vd_end;
+ /* Ensure final buffer is aligned to at least this. Nested buffers get their own `min_align`. */
+ uint16_t min_align;
+ /* The current active objects alignment isolated from nested activity. */
+ uint16_t align;
+ /* The current buffers block alignment used when emitting buffer. */
+ uint16_t block_align;
+ /* Signed virtual address range used for `flatcc_builder_ref_t` and emitter. */
+ flatcc_builder_ref_t emit_start;
+ flatcc_builder_ref_t emit_end;
+ /* 0 for top level, and end of buffer ref for nested buffers (can also be 0). */
+ flatcc_builder_ref_t buffer_mark;
+ /* Next nest_id. */
+ flatbuffers_uoffset_t nest_count;
+ /* Unique id to prevent sharing of vtables across buffers. */
+ flatbuffers_uoffset_t nest_id;
+ /* Current nesting level. Helpful to state-machines with explicit stack and to check `max_level`. */
+ int level;
+ /* Aggregate check for allocated frame and max_level. */
+ int limit_level;
+ /* Track size prefixed buffer. */
+ uint16_t buffer_flags;
+
+ /* Settings that may happen with no frame allocated. */
+
+ flatcc_builder_identifier_t identifier;
+
+ /* Settings that survive reset (emitter, alloc, and contexts also survive): */
+
+ /* If non-zero, vtable cache gets flushed periodically. */
+ size_t vb_flush_limit;
+ /* If non-zero, fails on deep nesting to help drivers with a stack, such as recursive parsers etc. */
+ int max_level;
+ /* If non-zero, do not cluster vtables at end, only emit negative offsets (0 by default). */
+ int disable_vt_clustering;
+
+ /* Set if the default emitter is being used. */
+ int is_default_emitter;
+ /* Only used with default emitter. */
+ flatcc_emitter_t default_emit_context;
+
+ /* Offset to the last entered user frame on the user frame stack, after frame header, or 0. */
+ size_t user_frame_offset;
+
+ /* The offset to the end of the most recent user frame. */
+ size_t user_frame_end;
+
+ /* The optional user supplied refmap for cloning DAG's - not shared with nested buffers. */
+ flatcc_refmap_t *refmap;
+};
+
+/**
+ * Call this before any other API call.
+ *
+ * The emitter handles the completed chunks of the buffer that will no
+ * longer be required by the builder. It is largely a `write` function
+ * that can append to both positive and negative offsets.
+ *
+ * No memory is allocated during init. Buffers will be allocated as
+ * needed. The `emit_context` is only used by the emitter, if at all.
+ *
+ * `flatcc_builder_reset/clear` calls are automtically forwarded to the
+ * default emitter.
+ *
+ * Returns -1 on failure, 0 on success.
+ */
+int flatcc_builder_init(flatcc_builder_t *B);
+
+/**
+ * Use instead of `flatcc_builder_init` when providing a custom allocator
+ * or emitter. Leave emitter or allocator null to use default.
+ * Cleanup of emit and alloc context must be handled manually after
+ * the builder is cleared or reset, except if emitter is null the
+ * default will be automatically cleared and reset.
+ *
+ * Returns -1 on failure, 0 on success.
+ */
+int flatcc_builder_custom_init(flatcc_builder_t *B,
+ flatcc_builder_emit_fun *emit, void *emit_context,
+ flatcc_builder_alloc_fun *alloc, void *alloc_context);
+
+/*
+ * Returns (flatcc_emitter_t *) if the default context is used.
+ * Other emitter might have null contexts.
+ */
+void *flatcc_builder_get_emit_context(flatcc_builder_t *B);
+
+/**
+ * Prepares builder for a new build. The emitter is not told when a
+ * buffer is finished or when a new begins, and must be told so
+ * separately. Allocated buffers will be zeroed, but may optionally be
+ * reduced to their defaults (signalled by reallocating each non-empty
+ * buffer to a single byte). General settings are cleared optionally,
+ * such as cache flushing. Buffer specific settings such as buffer
+ * identifier are always cleared.
+ *
+ * Returns -1 if allocator complains during buffer reduction, 0 on
+ * success.
+ */
+int flatcc_builder_custom_reset(flatcc_builder_t *B,
+ int reduce_buffers, int set_defaults);
+
+/*
+ * Same as `flatcc_builder_custom_reset` with default arguments
+ * where buffers are not reduced and default settings are not reset.
+ */
+int flatcc_builder_reset(flatcc_builder_t *B);
+
+/**
+ * Deallocates all memory by calling allocate with a zero size request
+ * on each buffer, then zeroing the builder structure itself.
+ */
+void flatcc_builder_clear(flatcc_builder_t *B);
+
+/**
+ * Allocates to next higher power of 2 using system realloc and ignores
+ * `alloc_context`. Only reduces size if a small subsequent increase in
+ * size would not trigger a reallocation. `alloc_type` is used to
+ * set minimum sizes. Hash tables are allocated to the exact requested
+ * size. See also `alloc_fun`.
+ */
+int flatcc_builder_default_alloc(void *alloc_context,
+ flatcc_iovec_t *b, size_t request, int zero_fill, int alloc_type);
+
+/**
+ * If non-zero, the vtable cache will get flushed whenever it reaches
+ * the given limit at a point in time where more space is needed. The
+ * limit is not exact as it is only tested when reallocation is
+ * required.
+ */
+void flatcc_builder_set_vtable_cache_limit(flatcc_builder_t *B, size_t size);
+
+/**
+ * Manual flushing of vtable for long running tasks. Mostly used
+ * internally to deal with nested buffers.
+ */
+void flatcc_builder_flush_vtable_cache(flatcc_builder_t *B);
+
+/**
+ * Low-level support function to aid in constructing nested buffers without
+ * allocation. Not for regular use.
+ *
+ * Call where `start_buffer` would have been placed when using
+ * `create_buffer` in a nested context. Save the return value on a stack
+ * as argument to `pop_buffer_alignment`.
+ *
+ * The call resets the current derived buffer alignment so the nested
+ * buffer will not be aligned to more than required.
+ *
+ * Often it will not be necessary to be so careful with alignment since
+ * the alignment cannot be invalid by failing to use push and pop, but
+ * for code generation it will ensure the correct result every time.
+ */
+uint16_t flatcc_builder_push_buffer_alignment(flatcc_builder_t *B);
+
+/**
+ * Low-level call.
+ *
+ * Call with the return value from push_buffer_alignment after a nested
+ * `create_buffer_call`. The alignments merge back up in the buffer
+ * hierarchy so the top level buffer gets the largest of all aligments.
+ */
+void flatcc_builder_pop_buffer_alignment(flatcc_builder_t *B, uint16_t buffer_align);
+
+/**
+ * This value may be of interest when the buffer has been ended, for
+ * example when subsequently allocating memory for the buffer to ensure
+ * that memory is properly aligned.
+ */
+uint16_t flatcc_builder_get_buffer_alignment(flatcc_builder_t *B);
+
+/**
+ * Level 0 means no buffer is started, otherwise it increments with
+ * start calls and decrements with end calls (approximately for
+ * optimized operations such as table vectors).
+ *
+ * If `max_level` has been set, `get_level` always returns a value <=
+ * `max_level` provided no start call has failed.
+ *
+ * Level continues to increment inside nested buffers.
+ */
+int flatcc_builder_get_level(flatcc_builder_t *B);
+
+/**
+ * Setting the max level triggers a failure on start of new nestings
+ * when the level is reached. May be used to protect recursive descend
+ * parsers etc. or later buffer readers.
+ *
+ * The builder itself is not sensitive to depth, and the allocator is a
+ * better way to protect resource abuse.
+ *
+ * `max_level` is not reset inside nested buffers.
+ */
+void flatcc_builder_set_max_level(flatcc_builder_t *B, int level);
+
+/**
+ * By default ordinary data such as tables are placed in front of
+ * earlier produced content and vtables are placed at the very end thus
+ * clustering vtables together. This can be disabled so all content is
+ * placed in front. Nested buffers ignores this setting because they can
+ * only place content in front because they cannot blend with the
+ * containing buffers content. Clustering could be more cache friendly
+ * and also enables pre-shipping of the vtables during transmission.
+ */
+void flatcc_builder_set_vtable_clustering(flatcc_builder_t *B, int enable);
+
+/**
+ * Sets a new user supplied refmap which maps source pointers to
+ * references and returns the old refmap, or null. It is also
+ * possible to disable an existing refmap by setting a null
+ * refmap.
+ *
+ * A clone or pick operation may use this map when present,
+ * depending on the data type. If a hit is found, the stored
+ * reference will be used instead of performing a new clone or
+ * pick operation. It is also possible to manually populate the
+ * refmap. Note that the builder does not have a concept of
+ * clone or pick - these are higher level recursive operations
+ * to add data from one buffer to another - but such code may
+ * rely on the builder to provide the current refmap during
+ * recursive operations. For this reason, the builder makes no
+ * calls to the refmap interface on its own - it just stores the
+ * current refmap such that recursive operations can find it.
+ *
+ * Refmaps MUST be reset, replaced or disabled if a source
+ * pointer may be reused for different purposes - for example if
+ * repeatedly reading FlatBuffers into the same memory buffer
+ * and performing a clone into a buffer under construction.
+ * Refmaps may also be replaced if the same object is to be
+ * cloned several times keeping the internal DAG structure
+ * intact with every new clone being an independent object.
+ *
+ * Refmaps must also be replaced or disabled prior to starting a
+ * nested buffer and after stopping it, or when cloning a object
+ * as a nested root. THIS IS VERY EASY TO GET WRONG! The
+ * builder does a lot of bookkeeping for nested buffers but not
+ * in this case. Shared references may happen and they WILL fail
+ * verification and they WILL break when copying out a nested
+ * buffer to somewhere else. The user_frame stack may be used
+ * for pushing refmaps, but often user codes recursive stack
+ * will work just as well.
+ *
+ * It is entirely optional to use refmaps when cloning - they
+ * preserve DAG structure and may speed up operations or slow
+ * them down, depending on the source material.
+ *
+ * Refmaps may consume a lot of space when large offset vectors
+ * are cloned when these do not have significant shared
+ * references. They may also be very cheap to use without any
+ * dynamic allocation when objects are small and have at most a
+ * few references.
+ *
+ * Refmaps only support init, insert, find, reset, clear but not
+ * delete. There is a standard implementation in the runtime
+ * source tree but it can easily be replaced compile time and it
+ * may also be left out if unused. The builder wraps reset, insert,
+ * and find so the user does not have to check if a refmap is
+ * present but other operations must be done direcly on the
+ * refmap.
+ *
+ * The builder wrapped refmap operations are valid on a null
+ * refmap which will find nothing and insert nothing.
+ *
+ * The builder will reset the refmap during a builder reset and
+ * clear the refmap during a builder clear operation. If the
+ * refmap goes out of scope before that happens it is important
+ * to call set_refmap with null and manually clear the refmap.
+ */
+static inline flatcc_refmap_t *flatcc_builder_set_refmap(flatcc_builder_t *B, flatcc_refmap_t *refmap)
+{
+ flatcc_refmap_t *refmap_old;
+
+ refmap_old = B->refmap;
+ B->refmap = refmap;
+ return refmap_old;
+}
+
+/* Retrieves the current refmap, or null. */
+static inline flatcc_refmap_t *flatcc_builder_get_refmap(flatcc_builder_t *B)
+{
+ return B->refmap;
+}
+
+/* Finds a reference, or a null reference if no refmap is active. * */
+static inline flatcc_builder_ref_t flatcc_builder_refmap_find(flatcc_builder_t *B, const void *src)
+{
+ return B->refmap ? flatcc_refmap_find(B->refmap, src) : flatcc_refmap_not_found;
+}
+
+/*
+ * Inserts into the current refmap with the inseted ref upon
+ * upon success, or not_found on failure (default 0), or just
+ * returns ref if refmap is absent.
+ *
+ * Note that if an existing item exists, the ref is replaced
+ * and the new, not the old, ref is returned.
+ */
+static inline flatcc_builder_ref_t flatcc_builder_refmap_insert(flatcc_builder_t *B, const void *src, flatcc_builder_ref_t ref)
+{
+ return B->refmap ? flatcc_refmap_insert(B->refmap, src, ref) : ref;
+}
+
+static inline void flatcc_builder_refmap_reset(flatcc_builder_t *B)
+{
+ if (B->refmap) flatcc_refmap_reset(B->refmap);
+}
+
+
+typedef uint16_t flatcc_builder_buffer_flags_t;
+static const flatcc_builder_buffer_flags_t flatcc_builder_is_nested = 1;
+static const flatcc_builder_buffer_flags_t flatcc_builder_with_size = 2;
+
+/* The flag size in the API needs to match the internal size. */
+static_assert(sizeof(flatcc_builder_buffer_flags_t) ==
+ sizeof(((flatcc_builder_t *)0)->buffer_flags), "flag size mismatch");
+
+/**
+ * An alternative to start buffer, start struct/table ... end buffer.
+ *
+ * This call is mostly of interest as a means to quicly create a zero
+ * allocation top-level buffer header following a call to create_struct,
+ * or to create_vtable/create_table. For that, it is quite simple to
+ * use. For general buffer construction without allocation, more care is
+ * needed, as discussed below.
+ *
+ * If the content is created with `start/end_table` calls, or similar,
+ * it is better to use `start/end_buffer` since stack allocation is used
+ * anyway.
+ *
+ * The buffer alignment must be provided manually as it is not derived
+ * from constructed content, unlike `start/end_buffer`. Typically
+ * `align` would be same argument as provided to `create_struct`.
+ * `get_buffer_alignment` may also used (note: `get_buffer_alignment`
+ * may return different after the call because it will be updated with
+ * the `block_align` argument to `create_buffer` but that is ok).
+ *
+ * The buffer may be constructed as a nested buffer with the `is_nested
+ * = 1` flag. As a nested buffer a ubyte vector header is placed before
+ * the aligned buffer header. A top-level buffer will normally have
+ * flags set to 0.
+ *
+ * A top-level buffer may also be constructed with the `with_size = 2`
+ * flag for top level buffers. It adds a size prefix similar to
+ * `is_nested` but the size is part of the aligned buffer. A size
+ * prefixed top level buffer must be accessed with a size prefix aware
+ * reader, or the buffer given to a standard reader must point to after
+ * the size field while keeping the buffer aligned to the size field
+ * (this will depend on the readers API which may be an arbitrary other
+ * language).
+ *
+ * If the `with_size` is used with the `is_nested` flag, the size is
+ * added as usual and all fields remain aligned as before, but padding
+ * is adjusted to ensure the buffer is aligned to the size field so
+ * that, for example, the nested buffer with size can safely be copied
+ * to a new memory buffer for consumption.
+ *
+ * Generally, references may only be used within the same buffer
+ * context. With `create_buffer` this becomes less precise. The rule
+ * here is that anything that would be valid with start/end_buffer
+ * nestings is also valid when removing the `start_buffer` call and
+ * replacing `end_buffer` with `create_buffer`.
+ *
+ * Note the additional burden of tracking buffer alignment manually -
+ * To help with this use `push_buffer_alignment` where `start_buffer`
+ * would have been placed, and `pop_buffer_alignment after the
+ * `create_buffer` call, and use `get_buffer_alignemnt` as described
+ * above.
+ *
+ * `create_buffer` is not suitable as a container for buffers created
+ * with `start/end_buffer` as these make assumptions about context that
+ * create buffer does not provide. Also, there is no point in doing so,
+ * since the idea of `create_buffer` is to avoid allocation in the first
+ * place.
+ */
+flatcc_builder_ref_t flatcc_builder_create_buffer(flatcc_builder_t *B,
+ const char identifier[FLATBUFFERS_IDENTIFIER_SIZE],
+ uint16_t block_align,
+ flatcc_builder_ref_t ref, uint16_t align, flatcc_builder_buffer_flags_t flags);
+
+/**
+ * Creates a struct within the current buffer without using any
+ * allocation.
+ *
+ * The struct should be used as a root in the `end_buffer` call or as a
+ * union value as there are no other ways to use struct while conforming
+ * to the FlatBuffer format - noting that tables embed structs in their
+ * own data area except in union fields.
+ *
+ * The struct should be in little endian format and follow the usual
+ * FlatBuffers alignment rules, although this API won't care about what
+ * is being stored.
+ *
+ * May also be used to simply emit a struct through the emitter
+ * interface without being in a buffer and without being a valid
+ * FlatBuffer.
+ */
+flatcc_builder_ref_t flatcc_builder_create_struct(flatcc_builder_t *B,
+ const void *data, size_t size, uint16_t align);
+
+/**
+ * Starts a struct and returns a pointer that should be used immediately
+ * to fill in the struct in protocol endian format, and when done,
+ * `end_struct` should be called. The returned reference should be used
+ * as argument to `end_buffer` or as a union value. See also
+ * `create_struct`.
+ */
+void *flatcc_builder_start_struct(flatcc_builder_t *B,
+ size_t size, uint16_t align);
+
+/**
+ * Return a pointer also returned at start struct, e.g. for endian
+ * conversion.
+ */
+void *flatcc_builder_struct_edit(flatcc_builder_t *B);
+
+/**
+ * Emits the struct started by `start_struct` and returns a reference to
+ * be used as root in an enclosing `end_buffer` call or as a union
+ * value. As mentioned in `create_struct`, these can also be used more
+ * freely, but not while being conformant FlatBuffers.
+ */
+flatcc_builder_ref_t flatcc_builder_end_struct(flatcc_builder_t *B);
+
+/**
+ * The buffer always aligns to at least the offset size (typically 4)
+ * and the internal alignment requirements of the buffer content which
+ * is derived as content is added.
+ *
+ * In addition, block_align can be specified. This ensures the resulting
+ * buffer is at least aligned to the block size and that the total size
+ * is zero padded to fill a block multiple if necessary. Because the
+ * emitter operates on a virtual address range before the full buffer is
+ * aligned, it may have to make assumptions based on that: For example,
+ * it may be processing encryption blocks in the fly, and the resulting
+ * buffer should be aligned to the encryption block size, even if the
+ * content is just a byte aligned struct. Block align helps ensure this.
+ * If the block align as 1 there will be no attempt to zero pad at the
+ * end, but the content may still warrant padding after the header. End
+ * padding is only needed with clustered vtables (which is the default).
+ *
+ * `block_align` is allowed to be 0 meaning it will inherit from parent if
+ * present, and otherwise it defaults to 1.
+ *
+ * The identifier may be null, and it may optionally be set later with
+ * `set_identifier` before the `end_buffer` call.
+ *
+ * General note:
+ *
+ * Only references returned with this buffer as current (i.e. last
+ * unended buffer) can be stored in other objects (tables, offset
+ * vectors) also belonging to this buffer, or used as the root argument
+ * to `end_buffer`. A reference may be stored at most once, and unused
+ * references will result in buffer garbage. All calls must be balanced
+ * around the respective start / end operations, but may otherwise nest
+ * freely, including nested buffers. Nested buffers are supposed to be
+ * stored in a table offset field to comply with FlatBuffers, but the
+ * API does not place any restrictions on where references are stored,
+ * as long as they are indicated as offset fields.
+ *
+ * All alignment in all API calls must be between 1 and 256 and must be a
+ * power of 2. This is not checked. Only if explicitly documented can it
+ * also be 0 for a default value.
+ *
+ * `flags` can be `with_size` but `is_nested` is derived from context
+ * see also `create_buffer`.
+ */
+int flatcc_builder_start_buffer(flatcc_builder_t *B,
+ const char identifier[FLATBUFFERS_IDENTIFIER_SIZE],
+ uint16_t block_align, flatcc_builder_buffer_flags_t flags);
+
+/**
+ * The root object should be a struct or a table to conform to the
+ * FlatBuffers format, but technically it can also be a vector or a
+ * string, or even a child buffer (which is also vector as seen by the
+ * buffer). The object must be created within the current buffer
+ * context, that is, while the current buffer is the deepest nested
+ * buffer on the stack.
+ */
+flatcc_builder_ref_t flatcc_builder_end_buffer(flatcc_builder_t *B, flatcc_builder_ref_t root);
+
+/**
+ * The embed buffer is mostly intended to add an existing buffer as a
+ * nested buffer. The buffer will be wrapped in a ubyte vector such that
+ * the buffer is aligned at vector start, after the size field.
+ *
+ * If `align` is 0 it will default to 8 so that all FlatBuffer numeric
+ * types will be readable. NOTE: generally do not count on align 0 being
+ * valid or even checked by the API, but in this case it may be
+ * difficult to know the internal buffer alignment, and 1 would be the wrong
+ * choice.
+ *
+ * If `block_align` is set (non-zero), the buffer is placed in an isolated
+ * block multiple. This may cost up to almost 2 block sizes in padding.
+ * If the `block_align` argument is 0, it inherits from the parent
+ * buffer block_size, or defaults to 1.
+ *
+ * The `align` argument must be set to respect the buffers internal
+ * alignment requirements, but if the buffer is smaller it will not be
+ * padded to isolate the buffer. For example a buffer of with
+ * `align = 64` and `size = 65` may share its last 64 byte block with
+ * other content, but not if `block_align = 64`.
+ *
+ * Because the ubyte size field is not, by default, part of the aligned
+ * buffer, significant space can be wasted if multiple blocks are added
+ * in sequence with a large block size.
+ *
+ * In most cases the distinction between the two alignments is not
+ * important, but it allows separate configuration of block internal
+ * alignment and block size, which can be important for auto-generated
+ * code that may know the alignment of the buffer, but not the users
+ * operational requirements.
+ *
+ * If the buffer is embedded without a parent buffer, it will simply
+ * emit the buffer through the emit interface, but may also add padding
+ * up to block alignment. At top-level there will be no size field
+ * header.
+ *
+ * If `with_size` flag is set, the buffer is aligned to size field and
+ * the above note about padding space no longer applies. The size field
+ * is added regardless. The `is_nested` flag has no effect since it is
+ * impplied.
+ */
+flatcc_builder_ref_t flatcc_builder_embed_buffer(flatcc_builder_t *B,
+ uint16_t block_align,
+ const void *data, size_t size, uint16_t align, flatcc_builder_buffer_flags_t flags);
+
+/**
+ * Applies to the innermost open buffer. The identifier may be null or
+ * contain all zero. Overrides any identifier given to the start buffer
+ * call.
+ */
+void flatcc_builder_set_identifier(flatcc_builder_t *B,
+ const char identifier[FLATBUFFERS_IDENTIFIER_SIZE]);
+
+enum flatcc_builder_type {
+ flatcc_builder_empty = 0,
+ flatcc_builder_buffer,
+ flatcc_builder_struct,
+ flatcc_builder_table,
+ flatcc_builder_vector,
+ flatcc_builder_offset_vector,
+ flatcc_builder_string,
+ flatcc_builder_union_vector
+};
+
+/**
+ * Returns the object type currently on the stack, for example if
+ * needing to decide how to close a buffer. Because a table is
+ * automatically added when starting a table buffer,
+ * `flatcc_builder_table_buffer` should not normally be seen and the level
+ * should be 2 before when closing a top-level table buffer, and 0
+ * after. A `flatcc_builder_struct_buffer` will be visible at level 1.
+ *
+ */
+enum flatcc_builder_type flatcc_builder_get_type(flatcc_builder_t *B);
+
+/**
+ * Similar to `get_type` but for a specific level. `get_type_at(B, 1)`
+ * will return `flatcc_builder_table_buffer` if this is the root buffer
+ * type. get_type_at(B, 0) is always `flatcc_builder_empty` and so are any
+ * level above `get_level`.
+ */
+enum flatcc_builder_type flatcc_builder_get_type_at(flatcc_builder_t *B, int level);
+
+/**
+ * The user stack is available for custom data. It may be used as
+ * a simple stack by extending or reducing the inner-most frame.
+ *
+ * A frame has a size and a location on the user stack. Entering
+ * a frame ensures the start is aligned to sizeof(size_t) and
+ * ensures the requested space is available without reallocation.
+ * When exiting a frame, the previous frame is restored.
+ *
+ * A user frame works completely independently of the builders
+ * frame stack for tracking tables vectors etc. and does not have
+ * to be completely at exit, but obviously it is not valid to
+ * exit more often the entered.
+ *
+ * The frame is zeroed when entered.
+ *
+ * Returns a non-zero handle to the user frame upon success or
+ * 0 on allocation failure.
+ */
+size_t flatcc_builder_enter_user_frame(flatcc_builder_t *B, size_t size);
+
+/**
+ * Makes the parent user frame current, if any. It is not valid to call
+ * if there isn't any current frame. Returns handle to parent frame if
+ * any, or 0.
+ */
+size_t flatcc_builder_exit_user_frame(flatcc_builder_t *B);
+
+/**
+ * Exits the frame represented by the given handle. All more
+ * recently entered frames will also be exited. Returns the parent
+ * frame handle if any, or 0.
+ */
+size_t flatcc_builder_exit_user_frame_at(flatcc_builder_t *B, size_t handle);
+
+/**
+ * Returns a non-zero handle to the current inner-most user frame if
+ * any, or 0.
+ */
+size_t flatcc_builder_get_current_user_frame(flatcc_builder_t *B);
+
+/*
+ * Returns a pointer to the user frame at the given handle. Any active
+ * frame can be accessed in this manner but the pointer is invalidated
+ * by user frame enter and exit operations.
+ */
+void *flatcc_builder_get_user_frame_ptr(flatcc_builder_t *B, size_t handle);
+
+/**
+ * Returns the size of the buffer and the logical start and end address
+ * of with respect to the emitters address range. `end` - `start` also
+ * yields the size. During construction `size` is the emitted number of
+ * bytes and after buffer close it is the actual buffer size - by then
+ * the start is also the return value of close buffer. End marks the end
+ * of the virtual table cluster block.
+ *
+ * NOTE: there is no guarantee that all vtables end up in the cluster
+ * block if there is placed a limit on the vtable size, or if nested
+ * buffers are being used. On the other hand, if these conditions are
+ * met, it is guaranteed that all vtables are present if the vtable
+ * block is available (this depends on external transmission - the
+ * vtables are always emitted before tables using them). In all cases
+ * the vtables will behave as valid vtables in a flatbuffer.
+ */
+size_t flatcc_builder_get_buffer_size(flatcc_builder_t *B);
+
+/**
+ * Returns the reference to the start of the emitter buffer so far, or
+ * in total after buffer end, in the virtual address range used
+ * by the emitter. Start is also returned by buffer end.
+ */
+flatcc_builder_ref_t flatcc_builder_get_buffer_start(flatcc_builder_t *B);
+
+/**
+ * Returns the reference to the end of buffer emitted so far. When
+ * clustering vtables, this is the end of tables, or after buffer end,
+ * also zero padding if block aligned. If clustering is disabled, this
+ * method will return 0 as the buffer only grows down then.
+ */
+flatcc_builder_ref_t flatcc_builder_get_buffer_mark(flatcc_builder_t *B);
+
+/**
+ * Creates the vtable in the current buffer context, somewhat similar to
+ * how create_vector operates. Each call results in a new table even if
+ * an identical has already been emitted.
+ *
+ * Also consider `create_cached_vtable` which will reuse existing
+ * vtables.
+ *
+ * This is low-low-level function intended to support
+ * `create_cached_vtable` or equivalent, and `create_table`, both of
+ * which are normally used indirectly via `start_table`, `table_add`,
+ * `table_add_offset`..., `table_end`.
+ *
+ * Creates a vtable as a verbatim copy. This means the vtable must
+ * include the header fields containing the vtable size and the table
+ * size in little endian voffset_t encoding followed by the vtable
+ * entries in same encoding.
+ *
+ * The function may be used to copy vtables from other other buffers
+ * since they are directly transferable.
+ *
+ * The returned reference is actually the emitted location + 1. This
+ * ensures the vtable is not mistaken for error because 0 is a valid
+ * vtable reference. `create_table` is aware of this and substracts one
+ * before computing the final offset relative to the table. This also
+ * means vtable references are uniquely identifiable by having the
+ * lowest bit set.
+ *
+ * vtable references may be reused within the same buffer, not any
+ * parent or other related buffer (technically this is possible though,
+ * as long as it is within same builder context, but it will not construct
+ * valid FlatBuffers because the buffer cannot be extracted in isolation).
+ */
+flatcc_builder_vt_ref_t flatcc_builder_create_vtable(flatcc_builder_t *B,
+ const flatbuffers_voffset_t *vt,
+ flatbuffers_voffset_t vt_size);
+
+/**
+ * Support function to `create_vtable`. See also the uncached version
+ * `create_vtable`.
+ *
+ * Looks up the constructed vtable on the vs stack too see if it matches
+ * a cached entry. If not, it emits a new vtable either at the end if
+ * top-level and clustering is enabled, or at the front (always for
+ * nested buffers).
+ *
+ * If the same vtable was already emitted in a different buffer, but not
+ * in the current buffer, the cache entry will be reused, but a new
+ * table will be emitted the first it happens in the same table.
+ *
+ * The returned reference is + 1 relative to the emitted address range
+ * to identify it as a vtable and to avoid mistaking the valid 0
+ * reference for an error (clustered vtables tend to start at the end at
+ * the virtual address 0, and up).
+ *
+ * The hash function can be chosen arbitrarily but may result in
+ * duplicate emitted vtables if different hash functions are being used
+ * concurrently, such as mixing the default used by `start/end table`
+ * with a custom function (this is not incorrect, it only increases the
+ * buffer size and cache pressure).
+ *
+ * If a vtable has a unique ID by other means than hashing the content,
+ * such as an integer id, and offset into another buffer, or a pointer,
+ * a good hash may be multiplication by a 32-bit prime number. The hash
+ * table is not very sensitive to collissions as it uses externally
+ * chained hashing with move to front semantics.
+ */
+flatcc_builder_vt_ref_t flatcc_builder_create_cached_vtable(flatcc_builder_t *B,
+ const flatbuffers_voffset_t *vt,
+ flatbuffers_voffset_t vt_size, uint32_t vt_hash);
+
+/*
+ * Based on Knuth's prime multiplier.
+ *
+ * This is an incremental hash that is called with id and size of each
+ * non-empty field, and finally with the two vtable header fields
+ * when vtables are constructed via `table_add/table_add_offset`.
+ *
+ */
+#ifndef FLATCC_SLOW_MUL
+#ifndef FLATCC_BUILDER_INIT_VT_HASH
+#define FLATCC_BUILDER_INIT_VT_HASH(hash) { (hash) = (uint32_t)0x2f693b52UL; }
+#endif
+#ifndef FLATCC_BUILDER_UPDATE_VT_HASH
+#define FLATCC_BUILDER_UPDATE_VT_HASH(hash, id, offset) \
+ { (hash) = (((((uint32_t)id ^ (hash)) * (uint32_t)2654435761UL)\
+ ^ (uint32_t)(offset)) * (uint32_t)2654435761UL); }
+#endif
+#ifndef FLATCC_BUILDER_BUCKET_VT_HASH
+#define FLATCC_BUILDER_BUCKET_VT_HASH(hash, width) (((uint32_t)(hash)) >> (32 - (width)))
+#endif
+#endif
+
+/*
+ * By default we use Bernsteins hash as fallback if multiplication is slow.
+ *
+ * This just have to be simple, fast, and work on devices without fast
+ * multiplication. We are not too sensitive to collisions. Feel free to
+ * experiment and replace.
+ */
+#ifndef FLATCC_BUILDER_INIT_VT_HASH
+#define FLATCC_BUILDER_INIT_VT_HASH(hash) { (hash) = 5381; }
+#endif
+#ifndef FLATCC_BUILDER_UPDATE_VT_HASH
+#define FLATCC_BUILDER_UPDATE_VT_HASH(hash, id, offset) \
+ { (hash) = ((((hash) << 5) ^ (id)) << 5) ^ (offset); }
+#endif
+#ifndef FLATCC_BUILDER_BUCKET_VT_HASH
+#define FLATCC_BUILDER_BUCKET_VT_HASH(hash, width) (((1 << (width)) - 1) & (hash))
+#endif
+
+
+
+/**
+ * Normally use `start_table` instead of this call.
+ *
+ * This is a low-level call only intended for high-performance
+ * applications that repeatedly churn about similar tables of known
+ * layout, or as a support layer for other builders that maintain their
+ * own allocation rather than using the stack of this builder.
+ *
+ * Creates a table from an already emitted vtable, actual data that is
+ * properly aligned relative to data start and in little endian
+ * encoding. Unlike structs, tables can have offset fields. These must
+ * be stored as flatcc_builder_ref_t types (which have uoffset_t size) as
+ * returned by the api in native encoding. The `offsets` table contain
+ * voffsets relative to `data` start (this is different from how vtables
+ * store offsets because they are relative to a table header). The
+ * `offsets` table is only used temporarily to translate the stored
+ * references and is not part of final buffer content. `offsets` may be
+ * null if `offset_count` is 0. `align` should be the highest aligned
+ * field in the table, but `size` need not be a multiple of `align`.
+ * Aside from endian encoding, the vtable must record a table size equal
+ * to `size + sizeof(flatbuffers_uoffset_t)` because it includes the
+ * table header field size. The vtable is not accessed by this call (nor
+ * is it available). Unlike other references, the vtable reference may
+ * be shared between tables in the same buffer (not with any related
+ * buffer such as a parent buffer).
+ *
+ * The operation will not use any allocation, but will update the
+ * alignment of the containing buffer if any.
+ *
+ * Note: unlike other create calls, except `create_offset_vector`,
+ * the source data is modified in order to translate references intok
+ * offsets before emitting the table.
+ */
+flatcc_builder_ref_t flatcc_builder_create_table(flatcc_builder_t *B,
+ const void *data, size_t size, uint16_t align,
+ flatbuffers_voffset_t *offsets, int offset_count,
+ flatcc_builder_vt_ref_t vt_ref);
+
+/**
+ * Starts a table, typically following a start_buffer call as an
+ * alternative to starting a struct, or to create table fields to be
+ * stored in a parent table, or in an offset vector.
+ * A number of `table_add` and table_add_offset` call may be placed
+ * before the `end_table` call. Struct fields should NOT use `struct`
+ * related call (because table structs are in-place), rather they should
+ * use the `table_add` call with the appropriate size and alignment.
+ *
+ * A table, like other reference returning calls, may also be started
+ * outside a buffer if the buffer header and alignment is of no
+ * interest to the application, for example as part of an externally
+ * built buffer.
+ *
+ * `count` must be larger than the largest id used for this table
+ * instance. Normally it is set to the number of fields defined in the
+ * schema, but it may be less if memory is constrained and only few
+ * fields with low valued id's are in use. The count can extended later
+ * with `reserve_table` if necessary. `count` may be also be set to a
+ * large enough value such as FLATBUFFERS_ID_MAX + 1 if memory is not a
+ * concern (reserves about twice the maximum vtable size to track the
+ * current vtable and voffsets where references must be translated to
+ * offsets at table end). `count` may be zero if for example
+ * `reserve_table` is being used.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+int flatcc_builder_start_table(flatcc_builder_t *B, int count);
+
+/**
+ * Call before adding a field with an id that is not below the count set
+ * at table start. Not needed in most cases. For performance reasons
+ * the builder does not check all bounds all the the time, but the user
+ * can do so if memory constraints prevent start_table from using a
+ * conservative value. See also `table_start`.
+ *
+ * Note: this call has absolutely no effect on the table layout, it just
+ * prevents internal buffer overruns.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+int flatcc_builder_reserve_table(flatcc_builder_t *B, int count);
+
+/**
+ * Completes the table constructed on the internal stack including
+ * emitting a vtable, or finding a matching vtable that has already been
+ * emitted to the same buffer. (Vtables cannot be shared between
+ * buffers, but they can between tables of the same buffer).
+ *
+ * Note: there is a considerable, but necessary, amount of bookkeeping
+ * involved in constructing tables. The `create_table` call is much
+ * faster, but it also expects a lot of work to be done already.
+ *
+ * Tables can be created with no fields added. This will result in an
+ * empty vtable and a table with just a vtable reference. If a table is
+ * used as a sub-table, such a table might also not be stored at all,
+ * but we do not return a special reference for that, nor do we provide
+ * and option to not create the table in this case. This may be
+ * interpreted as the difference between a null table (not stored in
+ * parent), and an empty table with a unique offset (and thus identity)
+ * different from other empty tables.
+ */
+flatcc_builder_ref_t flatcc_builder_end_table(flatcc_builder_t *B);
+
+/**
+ * Optionally this method can be called just before `flatcc_builder_end_table`
+ * to verify that all required fields have been set.
+ * Each entry is a table field id.
+ *
+ * Union fields should use the type field when checking for presence and
+ * may also want to check the soundness of the union field overall using
+ * `check_union_field` with the id one higher than the type field id.
+ *
+ * This funcion is typically called by an assertion in generated builder
+ * interfaces while release builds may want to avoid this performance
+ * overhead.
+ *
+ * Returns 1 if all fields are matched, 0 otherwise.
+ */
+int flatcc_builder_check_required(flatcc_builder_t *B, const flatbuffers_voffset_t *required, int count);
+
+/**
+ * Same as `check_required` when called with a single element.
+ *
+ * Typically used when direct calls are more convenient than building an
+ * array first. Useful when dealing with untrusted intput such as parsed
+ * text from an external source.
+ */
+int flatcc_builder_check_required_field(flatcc_builder_t *B, flatbuffers_voffset_t id);
+
+/**
+ * Checks that a union field is valid.
+ *
+ * The criteria is:
+ *
+ * If the type field is not present (at id - 1), or it holds a zero value,
+ * then the table field (at id) must be present.
+ *
+ * Generated builder code may be able to enforce valid unions without
+ * this check by setting both type and table together, but e.g. parsers
+ * may receive the type and the table independently and then it makes
+ * sense to validate the union fields before table completion.
+ *
+ * Note that an absent union field is perfectly valid. If a union is
+ * required, the type field (id - 1), should be checked separately
+ * while the table field should only be checked here because it can
+ * (and must) be absent when the type is NONE (= 0).
+ */
+int flatcc_builder_check_union_field(flatcc_builder_t *B, flatbuffers_voffset_t id);
+
+/**
+ * A struct, enum or scalar added should be stored in little endian in
+ * the return pointer location. The pointer is short lived and will
+ * not necessarily survive other builder calls.
+ *
+ * A union type field can also be set using this call. In fact, this is
+ * the only way to deal with unions via this API. Consequently, it is
+ * the users repsonsibility to ensure the appropriate type is added
+ * at the next higher id.
+ *
+ * Null and default values:
+ *
+ * FlatBuffers does not officially provide an option for null values
+ * because it does not distinguish between default values and values
+ * that are not present. At this api level, we do not deal with defaults
+ * at all. Callee should test the stored value against the default value
+ * and only add the field if it does not match the default. This only
+ * applies to scalar and enum values. Structs cannot have defaults so
+ * their absence means null, and strings, vectors and subtables do have
+ * natural null values different from the empty object and empty objects
+ * with different identity is also possible.
+ *
+ * To handle Null for scalars, the following approach is recommended:
+ *
+ * Provide a schema-specific `add` operation that only calls this
+ * low-level add method if the default does not match, and also provide
+ * another `set` operation that always stores the value, regardless of
+ * default. For most readers this will be transparent, except for extra
+ * space used, but for Null aware readers, these can support operations
+ * to test for Null/default/other value while still supporting the
+ * normal read operation that returns default when a value is absent
+ * (i.e. Null).
+ *
+ * It is valid to call with a size of 0 - the effect being adding the
+ * vtable entry. The call may also be dropped in this case to reduce
+ * the vtable size - the difference will be in null detection.
+ */
+void *flatcc_builder_table_add(flatcc_builder_t *B, int id, size_t size, uint16_t align);
+
+/**
+ * Returns a pointer to the buffer holding the last field added. The
+ * size argument must match the field size added. May, for example, be
+ * used to perform endian conversion after initially updating field
+ * as a native struct. Must be called before the table is ended.
+ */
+void *flatcc_builder_table_edit(flatcc_builder_t *B, size_t size);
+
+/**
+ * Similar to `table_add` but copies source data into the buffer before
+ * it is returned. Useful when adding a larger struct already encoded in
+ * little endian.
+ */
+void *flatcc_builder_table_add_copy(flatcc_builder_t *B, int id, const void *data, size_t size, uint16_t align);
+
+/**
+ * Add a string, vector, or sub-table depending on the type if the
+ * field identifier. The offset ref obtained when the field object was
+ * closed should be stored as is in the given pointer. The pointer
+ * is only valid short term, so create the object before calling
+ * add to table, but the owner table can be started earlier. Never mix
+ * refs from nested buffers with parent buffers.
+ *
+ * Also uses this method to add nested buffers. A nested buffer is
+ * simple a buffer created while another buffer is open. The buffer
+ * close operation provides the necessary reference.
+ *
+ * When the table closes, all references get converted into offsets.
+ * Before that point, it is not required that the offset is written
+ * to.
+ */
+flatcc_builder_ref_t *flatcc_builder_table_add_offset(flatcc_builder_t *B, int id);
+
+/*
+ * Adds a union type and reference in a single operation and returns 0
+ * on success. Stores the type field at `id - 1` and the value at
+ * `id`. The `value` is a reference to a table, to a string, or to a
+ * standalone `struct` outside the table.
+ *
+ * If the type is 0, the value field must also be 0.
+ *
+ * Unions can also be added as separate calls to the type and the offset
+ * separately which can lead to better packing when the type is placed
+ * together will other small fields.
+ */
+int flatcc_builder_table_add_union(flatcc_builder_t *B, int id,
+ flatcc_builder_union_ref_t uref);
+
+/*
+ * Adds a union type vector and value vector in a single operations
+ * and returns 0 on success.
+ *
+ * If both the type and value vector is null, nothing is added.
+ * Otherwise both must be present and have the same length.
+ *
+ * Any 0 entry in the type vector must also have a 0 entry in
+ * the value vector.
+ */
+int flatcc_builder_table_add_union_vector(flatcc_builder_t *B, int id,
+ flatcc_builder_union_vec_ref_t uvref);
+/**
+ * Creates a vector in a single operation using an externally supplied
+ * buffer. This completely bypasses the stack, but the size must be
+ * known and the content must be little endian. Do not use for strings
+ * and offset vectors. Other flatbuffer vectors could be used as a
+ * source, but the length prefix is not required.
+ *
+ * Set `max_count` to `FLATBUFFERS_COUNT_MAX(elem_size)` before a call
+ * to any string or vector operation to the get maximum safe vector
+ * size, or use (size_t)-1 if overflow is not a concern.
+ *
+ * The max count property is a global property that remains until
+ * explicitly changed.
+ *
+ * `max_count` is to prevent malicous or accidental overflow which is
+ * difficult to detect by multiplication alone, depending on the type
+ * sizes being used and having `max_count` thus avoids a division for
+ * every vector created. `max_count` does not guarantee a vector will
+ * fit in an empty buffer, it just ensures the internal size checks do
+ * not overflow. A safe, sane limit woud be max_count / 4 because that
+ * is half the maximum buffer size that can realistically be
+ * constructed, corresponding to a vector size of `UOFFSET_MAX / 4`
+ * which can always hold the vector in 1GB excluding the size field when
+ * sizeof(uoffset_t) = 4.
+ */
+flatcc_builder_ref_t flatcc_builder_create_vector(flatcc_builder_t *B,
+ const void *data, size_t count, size_t elem_size, uint16_t align, size_t max_count);
+
+/**
+ * Starts a vector on the stack.
+ *
+ * Do not use these calls for string or offset vectors, but do store
+ * scalars, enums and structs, always in little endian encoding.
+ *
+ * Use `extend_vector` subsequently to add zero, one or more elements
+ * at time.
+ *
+ * See `create_vector` for `max_count` argument (strings and offset
+ * vectors have a fixed element size and does not need this argument).
+ *
+ * Returns 0 on success.
+ */
+int flatcc_builder_start_vector(flatcc_builder_t *B, size_t elem_size,
+ uint16_t align, size_t max_count);
+
+/**
+ * Emits the vector constructed on the stack by start_vector.
+ *
+ * The vector may be accessed in the emitted stream using the returned
+ * reference, even if the containing buffer is still under construction.
+ * This may be useful for sorting. This api does not support sorting
+ * because offset vectors cannot read their references after emission,
+ * and while plain vectors could be sorted, it has been chosen that this
+ * task is better left as a separate processing step. Generated code can
+ * provide sorting functions that work on final in-memory buffers.
+ */
+flatcc_builder_ref_t flatcc_builder_end_vector(flatcc_builder_t *B);
+
+/** Returns the number of elements currently on the stack. */
+size_t flatcc_builder_vector_count(flatcc_builder_t *B);
+
+/**
+ * Returns a pointer ot the first vector element on stack,
+ * accessible up to the number of elements currently on stack.
+ */
+void *flatcc_builder_vector_edit(flatcc_builder_t *B);
+
+/**
+ * Returns a zero initialized buffer to a new region of the vector which
+ * is extended at the end. The buffer must be consumed before other api
+ * calls that may affect the stack, including `extend_vector`.
+ *
+ * Do not use for strings, offset or union vectors. May be used for nested
+ * buffers, but these have dedicated calls to provide better alignment.
+ */
+void *flatcc_builder_extend_vector(flatcc_builder_t *B, size_t count);
+
+/**
+ * A specialized `vector_extend` that pushes a single element.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error. Note: for structs, care must be taken to ensure
+ * the source has been zero padded. For this reason it may be better to
+ * use extend(B, 1) and assign specific fields instead.
+ */
+void *flatcc_builder_vector_push(flatcc_builder_t *B, const void *data);
+
+/**
+ * Pushes multiple elements at a time.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+void *flatcc_builder_append_vector(flatcc_builder_t *B, const void *data, size_t count);
+
+/**
+ * Removes elements already added to vector that has not been ended.
+ * For example, a vector of parsed list may remove the trailing comma,
+ * or the vector may simply overallocate to get some temporary working
+ * space. The total vector size must never become negative.
+ *
+ * Returns -1 if the count as larger than current count, or 0 on success.
+ */
+int flatcc_builder_truncate_vector(flatcc_builder_t *B, size_t count);
+
+/*
+ * Similar to `create_vector` but with references that get translated
+ * into offsets. The references must, as usual, belong to the current
+ * buffer. Strings, scalar and struct vectors can emit directly without
+ * stack allocation, but offset vectors must translate the offsets
+ * and therefore need the temporary space. Thus, this function is
+ * roughly equivalent to to start, append, end offset vector.
+ *
+ * See also `flatcc_builder_create_offset_vector_direct`.
+ */
+flatcc_builder_ref_t flatcc_builder_create_offset_vector(flatcc_builder_t *B,
+ const flatcc_builder_ref_t *data, size_t count);
+
+/*
+ * NOTE: this call takes non-const source array of references
+ * and destroys the content.
+ *
+ * This is a faster version of `create_offset_vector` where the
+ * source references are destroyed. In return the vector can be
+ * emitted directly without passing over the stack.
+ */
+flatcc_builder_ref_t flatcc_builder_create_offset_vector_direct(flatcc_builder_t *B,
+ flatcc_builder_ref_t *data, size_t count);
+
+
+/**
+ * Starts a vector holding offsets to tables or strings. Before
+ * completion it will hold `flatcc_builder_ref_t` references because the
+ * offset is not known until the vector start location is known, which
+ * depends to the final size, which for parsers is generally unknown.
+ */
+int flatcc_builder_start_offset_vector(flatcc_builder_t *B);
+
+/**
+ * Similar to `end_vector` but updates all stored references so they
+ * become offsets to the vector start.
+ */
+flatcc_builder_ref_t flatcc_builder_end_offset_vector(flatcc_builder_t *B);
+
+/**
+ * Same as `flatcc_builder_end_offset_vector` except null references are
+ * permitted when the corresponding `type` entry is 0 (the 'NONE' type).
+ * This makes it possible to build union vectors with less overhead when
+ * the `type` vector is already known. Use standand offset vector calls
+ * prior to this call.
+ */
+flatcc_builder_ref_t flatcc_builder_end_offset_vector_for_unions(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *type);
+
+/** Returns the number of elements currently on the stack. */
+size_t flatcc_builder_offset_vector_count(flatcc_builder_t *B);
+
+/**
+ * Returns a pointer ot the first vector element on stack,
+ * accessible up to the number of elements currently on stack.
+ */
+void *flatcc_builder_offset_vector_edit(flatcc_builder_t *B);
+
+/**
+ * Similar to `extend_vector` but returns a buffer indexable as
+ * `flatcc_builder_ref_t` array. All elements must be set to a valid
+ * unique non-null reference, but truncate and extend may be used to
+ * perform edits. Unused references will leave garbage in the buffer.
+ * References should not originate from any other buffer than the
+ * current, including parents and nested buffers. It is valid to reuse
+ * references in DAG form when contained in the sammer, excluding any
+ * nested, sibling or parent buffers.
+ */
+flatcc_builder_ref_t *flatcc_builder_extend_offset_vector(flatcc_builder_t *B, size_t count);
+
+/** Similar to truncate_vector. */
+int flatcc_builder_truncate_offset_vector(flatcc_builder_t *B, size_t count);
+
+/**
+ * A specialized extend that pushes a single element.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_ref_t *flatcc_builder_offset_vector_push(flatcc_builder_t *B,
+ flatcc_builder_ref_t ref);
+
+/**
+ * Takes an array of refs as argument to do a multi push operation.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_ref_t *flatcc_builder_append_offset_vector(flatcc_builder_t *B,
+ const flatcc_builder_ref_t *refs, size_t count);
+
+/**
+ * All union vector operations are like offset vector operations,
+ * except they take a struct with a type and a reference rather than
+ * just a reference. The finished union vector is returned as a struct
+ * of two references, one for the type vector and one for the table offset
+ * vector. Each reference goes to a separate table field where the type
+ * offset vector id must be one larger than the type vector.
+ */
+
+/**
+ * Creates a union vector which is in reality two vectors, a type vector
+ * and an offset vector. Both vectors references are returned.
+ */
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count);
+
+/*
+ * NOTE: this call takes non-const source array of references
+ * and destroys the content. The type array remains intact.
+ *
+ * This is a faster version of `create_union_vector` where the source
+ * references are destroyed and where the types are given in a separate
+ * array. In return the vector can be emitted directly without passing
+ * over the stack.
+ *
+ * Unlike `create_offset_vector` we do allow null references but only if
+ * the union type is NONE (0).
+ */
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector_direct(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, flatcc_builder_ref_t *data, size_t count);
+
+/*
+ * Creates just the type vector part of a union vector. This is
+ * similar to a normal `create_vector` call except that the size
+ * and alignment are given implicitly. Can be used during
+ * cloning or similar operations where the types are all given
+ * but the values must be handled one by one as prescribed by
+ * the type. The values can be added separately as an offset vector.
+ */
+flatcc_builder_ref_t flatcc_builder_create_type_vector(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, size_t count);
+
+/**
+ * Starts a vector holding types and offsets to tables or strings. Before
+ * completion it will hold `flatcc_builder_union_ref_t` references because the
+ * offset is not known until the vector start location is known, which
+ * depends to the final size, which for parsers is generally unknown,
+ * and also because the union type must be separated out into a separate
+ * vector. It would not be practicaly to push on two different vectors
+ * during construction.
+ */
+int flatcc_builder_start_union_vector(flatcc_builder_t *B);
+
+/**
+ * Similar to `end_vector` but updates all stored references so they
+ * become offsets to the vector start and splits the union references
+ * into a type vector and an offset vector.
+ */
+flatcc_builder_union_vec_ref_t flatcc_builder_end_union_vector(flatcc_builder_t *B);
+
+/** Returns the number of elements currently on the stack. */
+size_t flatcc_builder_union_vector_count(flatcc_builder_t *B);
+
+/**
+ * Returns a pointer ot the first vector element on stack,
+ * accessible up to the number of elements currently on stack.
+ */
+void *flatcc_builder_union_vector_edit(flatcc_builder_t *B);
+
+/**
+ * Similar to `extend_offset_vector` but returns a buffer indexable as a
+ * `flatcc_builder_union_ref_t` array. All elements must be set to a valid
+ * unique non-null reference with a valid union type to match, or it
+ * must be null with a zero union type.
+ */
+flatcc_builder_union_ref_t *flatcc_builder_extend_union_vector(flatcc_builder_t *B, size_t count);
+
+/** Similar to truncate_vector. */
+int flatcc_builder_truncate_union_vector(flatcc_builder_t *B, size_t count);
+
+/**
+ * A specialized extend that pushes a single element.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_union_ref_t *flatcc_builder_union_vector_push(flatcc_builder_t *B,
+ flatcc_builder_union_ref_t uref);
+
+/**
+ * Takes an array of union_refs as argument to do a multi push operation.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_union_ref_t *flatcc_builder_append_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count);
+
+/**
+ * Faster string operation that avoids temporary stack storage. The
+ * string is not required to be zero-terminated, but is expected
+ * (unchecked) to be utf-8. Embedded zeroes would be allowed but
+ * ubyte vectors should be used for that. The resulting string will
+ * have a zero termination added, not included in length.
+ */
+flatcc_builder_ref_t flatcc_builder_create_string(flatcc_builder_t *B,
+ const char *s, size_t len);
+
+/** `create_string` up to zero termination of source. */
+flatcc_builder_ref_t flatcc_builder_create_string_str(flatcc_builder_t *B,
+ const char *s);
+
+/**
+ * `create_string` up to zero termination or at most max_len of source.
+ *
+ * Note that like `strncpy` it will include `max_len` characters if
+ * the source is longer than `max_len`, but unlike `strncpy` it will
+ * always add zero termination.
+ */
+flatcc_builder_ref_t flatcc_builder_create_string_strn(flatcc_builder_t *B, const char *s, size_t max_len);
+
+/**
+ * Starts an empty string that can be extended subsequently.
+ *
+ * While the string is being created, it is guaranteed that there is
+ * always a null character after the end of the current string length.
+ * This also holds after `extend` and `append` operations. It is not
+ * allowed to modify the null character.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_builder_start_string(flatcc_builder_t *B);
+
+/**
+ * Similar to `extend_vector` except for the buffer return type and a
+ * slight speed advantage. Strings are expected to contain utf-8 content
+ * but this isn't verified, and null characters would be accepted. The
+ * length is given in bytes.
+ *
+ * Appending too much, then truncating can be used to trim string
+ * escapes during parsing, or convert between unicode formats etc.
+ */
+char *flatcc_builder_extend_string(flatcc_builder_t *B, size_t len);
+
+/**
+ * Concatenes a length of string. If the string contains zeroes (which
+ * it formally shouldn't), they will be copied in.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+char *flatcc_builder_append_string(flatcc_builder_t *B, const char *s, size_t len);
+
+/** `append_string` up to zero termination of source. */
+char *flatcc_builder_append_string_str(flatcc_builder_t *B, const char *s);
+
+/** `append_string` up zero termination or at most max_len of source. */
+char *flatcc_builder_append_string_strn(flatcc_builder_t *B, const char *s, size_t max_len);
+
+/**
+ * Similar to `truncate_vector` available for consistency and a slight
+ * speed advantage. Reduces string by `len` bytes - it does not set
+ * the length. The resulting length must not become negative. Zero
+ * termination is not counted.
+ *
+ * Returns -1 of the length becomes negative, 0 on success.
+ */
+int flatcc_builder_truncate_string(flatcc_builder_t *B, size_t len);
+
+/**
+ * Similar to `end_vector` but adds a trailing zero not included
+ * in the length. The trailing zero is added regardless of whatever
+ * zero content may exist in the provided string (although it
+ * formally should not contain any).
+ */
+flatcc_builder_ref_t flatcc_builder_end_string(flatcc_builder_t *B);
+
+/** Returns the length of string currently on the stack. */
+size_t flatcc_builder_string_len(flatcc_builder_t *B);
+
+/**
+ * Returns a ponter to the start of the string
+ * accessible up the length of string currently on the stack.
+ */
+char *flatcc_builder_string_edit(flatcc_builder_t *B);
+
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Fast acces to small buffers from default emitter.
+ *
+ * Only valid for default emitters before `flatcc_builder_clear`. The
+ * return buffer is not valid after a call to `flatcc_builder_reset` or
+ * `flatcc_builder_clear`.
+ *
+ * Returns null if the buffer size is too large to a have a linear
+ * memory representation or if the emitter is not the default. A valid
+ * size is between half and a full emitter page size depending on vtable
+ * content.
+ *
+ * Non-default emitters must be accessed by means specific to the
+ * particular emitter.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ *
+ * The returned buffer should NOT be deallocated explicitly.
+ *
+ * The buffer size is the size reported by `flatcc_builder_get_buffer_size`.
+ */
+void *flatcc_builder_get_direct_buffer(flatcc_builder_t *B, size_t *size_out);
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Default finalizer that allocates a buffer from the default emitter.
+ *
+ * Returns null if memory could not be allocated or if the emitter is
+ * not the default. This is just a convenience method - there are many
+ * other possible ways to extract the result of the emitter depending on
+ * use case.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ *
+ * The allocated buffer is aligned according to malloc which may not be
+ * sufficient in advanced cases - for that purpose
+ * `flatcc_builder_finalize_aligned_buffer` may be used.
+ *
+ * It may be worth calling `flatcc_builder_get_direct_buffer` first to see
+ * if the buffer is small enough to avoid copying.
+ *
+ * The returned buffer must be deallocated using `free`.
+ */
+void *flatcc_builder_finalize_buffer(flatcc_builder_t *B, size_t *size_out);
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Similar to `flatcc_builder_finalize_buffer` but ensures the returned
+ * memory is aligned to the overall alignment required for the buffer.
+ * Often it is not necessary unless special operations rely on larger
+ * alignments than the stored scalars.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ *
+ * The returned buffer must be deallocated using `aligned_free` which is
+ * implemented via `flatcc_flatbuffers.h`. `free` will usually work but
+ * is not portable to platforms without posix_memalign or C11
+ * aligned_alloc support.
+ *
+ * NOTE: if a library might be compiled with a version of aligned_free
+ * that differs from the application using it, use
+ * `flatcc_builder_aligned_free` to make sure the correct deallocation
+ * function is used.
+ */
+void *flatcc_builder_finalize_aligned_buffer(flatcc_builder_t *B, size_t *size_out);
+
+/*
+ * A stable implementation of `aligned_alloc` that is not sensitive
+ * to the applications compile time flags.
+ */
+void *flatcc_builder_aligned_alloc(size_t alignment, size_t size);
+
+/*
+ * A stable implementation of `aligned_free` that is not sensitive
+ * to the applications compile time flags.
+ */
+void flatcc_builder_aligned_free(void *p);
+
+/*
+ * Same allocation as `flatcc_builder_finalize_buffer` returnes. Usually
+ * same as `malloc` but can redefined via macros.
+ */
+void *flatcc_builder_alloc(size_t size);
+
+/*
+ * A stable implementation of `free` when the default allocation
+ * methods have been redefined.
+ *
+ * Deallocates memory returned from `flatcc_builder_finalize_buffer`.
+ */
+void flatcc_builder_free(void *p);
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Convenience method to copy buffer from default emitter. Forwards
+ * call to default emitter and returns input pointer, or null if
+ * the emitter is not default or of the given size is smaller than
+ * the buffer size.
+ *
+ * Note: the `size` argument is the target buffers capacity, not the
+ * flatcc_builders buffer size.
+ *
+ * Other emitters have custom interfaces for reaching their content.
+ */
+void *flatcc_builder_copy_buffer(flatcc_builder_t *B, void *buffer, size_t size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_BUILDER_H */
diff --git a/include/flatcc/flatcc_emitter.h b/include/flatcc/flatcc_emitter.h
new file mode 100644
index 0000000..b8c83b9
--- /dev/null
+++ b/include/flatcc/flatcc_emitter.h
@@ -0,0 +1,215 @@
+#ifndef FLATCC_EMITTER_H
+#define FLATCC_EMITTER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Default implementation of a flatbuilder emitter.
+ *
+ * This may be used as a starting point for more advanced emitters,
+ * for example writing completed pages to disk or network and
+ * the recycling those pages.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_types.h"
+#include "flatcc/flatcc_iov.h"
+#include "flatcc/flatcc_alloc.h"
+
+/*
+ * The buffer steadily grows during emission but the design allows for
+ * an extension where individual pages can recycled before the buffer
+ * is complete, for example because they have been transmitted.
+ *
+ * When done, the buffer can be cleared to free all memory, or reset to
+ * maintain an adaptive page pool for next buffer construction.
+ *
+ * Unlike an exponentially growing buffer, each buffer page remains
+ * stable in memory until reset, clear or recycle is called.
+ *
+ * Design notes for possible extensions:
+ *
+ * The buffer is a ring buffer marked by a front and a back page. The
+ * front and back may be the same page and may initially be absent.
+ * Anything outside these pages are unallocated pages for recycling.
+ * Any page between (but excluding) the front and back pages may be
+ * recycled by unlinking and relinking outside the front and back pages
+ * but then copy operations no longer makes sense. Each page stores the
+ * logical offset within the buffer but isn't otherwise used by the
+ * implemention - it might be used for network transmission. The buffer
+ * is not explicitly designed for multithreaded access but any page
+ * strictly between front and back is not touched unless recycled and in
+ * this case aligned allocation is useful to prevent cache line sharing.
+ */
+
+/*
+ * Memory is allocated in fixed length page units - the first page is
+ * split between front and back so each get half the page size. If the
+ * size is a multiple of 128 then each page offset will be a multiple of
+ * 64, which may be useful for sequencing etc.
+ */
+#ifndef FLATCC_EMITTER_PAGE_SIZE
+#define FLATCC_EMITTER_MAX_PAGE_SIZE 3000
+#define FLATCC_EMITTER_PAGE_MULTIPLE 64
+#define FLATCC_EMITTER_PAGE_SIZE ((FLATCC_EMITTER_MAX_PAGE_SIZE) &\
+ ~(2 * (FLATCC_EMITTER_PAGE_MULTIPLE) - 1))
+#endif
+
+#ifndef FLATCC_EMITTER_ALLOC
+#ifdef FLATCC_EMITTER_USE_ALIGNED_ALLOC
+/*
+ * <stdlib.h> does not always provide aligned_alloc, so include whatever
+ * is required when enabling this feature.
+ */
+#define FLATCC_EMITTER_ALLOC(n) aligned_alloc(FLATCC_EMITTER_PAGE_MULTIPLE,\
+ (((n) + FLATCC_EMITTER_PAGE_MULTIPLE - 1) & ~(FLATCC_EMITTER_PAGE_MULTIPLE - 1)))
+#ifndef FLATCC_EMITTER_FREE
+#define FLATCC_EMITTER_FREE(p) aligned_free(p)
+#endif
+#endif
+#endif
+
+#ifndef FLATCC_EMITTER_ALLOC
+#define FLATCC_EMITTER_ALLOC(n) FLATCC_ALLOC(n)
+#endif
+#ifndef FLATCC_EMITTER_FREE
+#define FLATCC_EMITTER_FREE(p) FLATCC_FREE(p)
+#endif
+
+typedef struct flatcc_emitter_page flatcc_emitter_page_t;
+typedef struct flatcc_emitter flatcc_emitter_t;
+
+struct flatcc_emitter_page {
+ uint8_t page[FLATCC_EMITTER_PAGE_SIZE];
+ flatcc_emitter_page_t *next;
+ flatcc_emitter_page_t *prev;
+ /*
+ * The offset is relative to page start, but not necessarily
+ * to any present content if part of front or back page,
+ * and undefined for unused pages.
+ */
+ flatbuffers_soffset_t page_offset;
+};
+
+/*
+ * Must be allocated and zeroed externally, e.g. on the stack
+ * then provided as emit_context to the flatbuilder along
+ * with the `flatcc_emitter` function.
+ */
+struct flatcc_emitter {
+ flatcc_emitter_page_t *front, *back;
+ uint8_t *front_cursor;
+ size_t front_left;
+ uint8_t *back_cursor;
+ size_t back_left;
+ size_t used;
+ size_t capacity;
+ size_t used_average;
+};
+
+/* Optional helper to ensure emitter is zeroed initially. */
+static inline void flatcc_emitter_init(flatcc_emitter_t *E)
+{
+ memset(E, 0, sizeof(*E));
+}
+
+/* Deallocates all buffer memory making the emitter ready for next use. */
+void flatcc_emitter_clear(flatcc_emitter_t *E);
+
+/*
+ * Similar to `clear_flatcc_emitter` but heuristacally keeps some allocated
+ * memory between uses while gradually reducing peak allocations.
+ * For small buffers, a single page will remain available with no
+ * additional allocations or deallocations after first use.
+ */
+void flatcc_emitter_reset(flatcc_emitter_t *E);
+
+/*
+ * Helper function that allows a page between front and back to be
+ * recycled while the buffer is still being constructed - most likely as part
+ * of partial copy or transmission. Attempting to recycle front or back
+ * pages will result in an error. Recycling pages outside the
+ * front and back will be valid but pointless. After recycling and copy
+ * operations are no longer well-defined and should be replaced with
+ * whatever logic is recycling the pages. The reset operation
+ * automatically recycles all (remaining) pages when emission is
+ * complete. After recycling, the `flatcc_emitter_size` function will
+ * return as if recycle was not called, but will only represent the
+ * logical size, not the size of the active buffer. Because a recycled
+ * page is fully utilized, it is fairly easy to compensate for this if
+ * required.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_emitter_recycle_page(flatcc_emitter_t *E, flatcc_emitter_page_t *p);
+
+/*
+ * The amount of data copied with `flatcc_emitter_copy_buffer` and related
+ * functions. Normally called at end of buffer construction but is
+ * always valid, as is the copy functions. The size is a direct
+ * function of the amount emitted data so the flatbuilder itself can
+ * also provide this information.
+ */
+static inline size_t flatcc_emitter_get_buffer_size(flatcc_emitter_t *E)
+{
+ return E->used;
+}
+
+/*
+ * Returns buffer start iff the buffer fits on a single internal page.
+ * Only useful for fairly small buffers - about half the page size since
+ * one half of first page goes to vtables that likely use little space.
+ * Returns null if request could not be served.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ */
+static inline void *flatcc_emitter_get_direct_buffer(flatcc_emitter_t *E, size_t *size_out)
+{
+ if (E->front == E->back) {
+ if (size_out) {
+ *size_out = E->used;
+ }
+ return E->front_cursor;
+ }
+ if (size_out) {
+ *size_out = 0;
+ }
+ return 0;
+}
+
+/*
+ * Copies the internal flatcc_emitter representation to an externally
+ * provided linear buffer that must have size `flatcc_emitter_get_size`.
+ *
+ * If pages have been recycled, only the remaining pages will be copied
+ * and thus less data than what `flatcc_emitter_get_size` would suggest. It
+ * makes more sense to provide a customized copy operation when
+ * recycling pages.
+ *
+ * If the buffer is too small, nothing is copied, otherwise the
+ * full buffer is copied and the input buffer is returned.
+ */
+void *flatcc_emitter_copy_buffer(flatcc_emitter_t *E, void *buf, size_t size);
+
+/*
+ * The emitter interface function to the flatbuilder API.
+ * `emit_context` should be of type `flatcc_emitter_t` for this
+ * particular implementation.
+ *
+ * This function is compatible with the `flatbuilder_emit_fun`
+ * type defined in "flatbuilder.h".
+ */
+int flatcc_emitter(void *emit_context,
+ const flatcc_iovec_t *iov, int iov_count,
+ flatbuffers_soffset_t offset, size_t len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_EMITTER_H */
diff --git a/include/flatcc/flatcc_endian.h b/include/flatcc/flatcc_endian.h
new file mode 100644
index 0000000..0592f31
--- /dev/null
+++ b/include/flatcc/flatcc_endian.h
@@ -0,0 +1,125 @@
+#ifndef FLATCC_ENDIAN_H
+#define FLATCC_ENDIAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This file provides helper macros to define type-specific macros and
+ * inline functions that convert between stored data and native data
+ * indedpently of both native (host) endianness and protocol endianness
+ * (i.e. the serialized endian format).
+ *
+ * To detect endianness correctly ensure one of the following is defined.
+ *
+ * __LITTLE_ENDIAN__
+ * __BIG_ENDIAN__
+ * FLATBUFFERS_LITTLEENDIAN=1
+ * FLATBUFFERS_LITTLEENDIAN=0
+ *
+ * Note: the Clang compiler likely already does this, but other
+ * compilers may have their own way, if at all.
+ *
+ * It is also necessary to include <endian.h> or a compatible
+ * implementation in order to provide:
+ *
+ * le16toh, le32to, le64toh, be16toh, be32toh, be64toh,
+ * htole16, htole32, htole64, htobe16, htobe32, htobe64.
+ *
+ * A simple way to ensure all of the above for most platforms is
+ * to include the portable endian support file:
+ *
+ * #include "flatcc/portable/pendian.h"
+ *
+ * It is also necessary to include
+ *
+ * #include "flatcc/flatcc_types.h"
+ *
+ * or an equivalent file. This makes it possible to change the
+ * endianness of the serialized data and the sizes of flatbuffer
+ * specific types such as `uoffset_t`.
+ *
+ * Note: the mentioned include files are likely already included
+ * by the file including this file, at least for the default
+ * configuration.
+ */
+
+#ifndef UINT8_t
+#include <stdint.h>
+#endif
+
+/* These are needed to simplify accessor macros and are not found in <endian.h>. */
+#ifndef le8toh
+#define le8toh(n) (n)
+#endif
+
+#ifndef be8toh
+#define be8toh(n) (n)
+#endif
+
+#ifndef htole8
+#define htole8(n) (n)
+#endif
+
+#ifndef htobe8
+#define htobe8(n) (n)
+#endif
+
+#include "flatcc/flatcc_accessors.h"
+
+/* This is the binary encoding endianness, usually LE for flatbuffers. */
+#if FLATBUFFERS_PROTOCOL_IS_LE
+#define flatbuffers_endian le
+#elif FLATBUFFERS_PROTOCOL_IS_BE
+#define flatbuffers_endian be
+#else
+#error "flatbuffers has no defined endiannesss"
+#endif
+
+ __flatcc_define_basic_scalar_accessors(flatbuffers_, flatbuffers_endian)
+
+ __flatcc_define_integer_accessors(flatbuffers_bool, flatbuffers_bool_t,
+ FLATBUFFERS_BOOL_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(flatbuffers_union_type, flatbuffers_union_type_t,
+ FLATBUFFERS_UTYPE_WIDTH, flatbuffers_endian)
+
+ __flatcc_define_integer_accessors(__flatbuffers_uoffset, flatbuffers_uoffset_t,
+ FLATBUFFERS_UOFFSET_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_soffset, flatbuffers_soffset_t,
+ FLATBUFFERS_SOFFSET_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_voffset, flatbuffers_voffset_t,
+ FLATBUFFERS_VOFFSET_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_utype, flatbuffers_utype_t,
+ FLATBUFFERS_UTYPE_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_thash, flatbuffers_thash_t,
+ FLATBUFFERS_THASH_WIDTH, flatbuffers_endian)
+
+/* flatcc/portable/pendian.h sets LITTLE/BIG flags if possible, and always defines le16toh. */
+#ifndef flatbuffers_is_native_pe
+#if defined(__LITTLE_ENDIAN__) || FLATBUFFERS_LITTLEENDIAN
+#undef FLATBUFFERS_LITTLEENDIAN
+#define FLATBUFFERS_LITTLEENDIAN 1
+#define flatbuffers_is_native_pe() (FLATBUFFERS_PROTOCOL_IS_LE)
+#elif defined(__BIG_ENDIAN__) || (defined(FLATBUFFERS_LITTLEENDIAN) && !FLATBUFFERS_LITTLEENDIAN)
+#undef FLATBUFFERS_LITTLEENDIAN
+#define FLATBUFFERS_LITTLEENDIAN 0
+#define flatbuffers_is_native_pe() (FLATBUFFERS_PROTOCOL_IS_BE)
+#else
+#define flatbuffers_is_native_pe() (__FLATBUFFERS_CONCAT(flatbuffers_endian, 16toh)(1) == 1)
+#endif
+#endif
+
+#ifndef flatbuffers_is_native_le
+#define flatbuffers_is_native_le() flatbuffers_is_native_pe()
+#endif
+
+#ifndef flatbuffers_is_native_be
+#define flatbuffers_is_native_be() (!flatbuffers_is_native_pe())
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ENDIAN_H */
diff --git a/include/flatcc/flatcc_epilogue.h b/include/flatcc/flatcc_epilogue.h
new file mode 100644
index 0000000..496857b
--- /dev/null
+++ b/include/flatcc/flatcc_epilogue.h
@@ -0,0 +1,8 @@
+/* Include guard intentionally left out. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "flatcc/portable/pdiagnostic_pop.h"
+
diff --git a/include/flatcc/flatcc_flatbuffers.h b/include/flatcc/flatcc_flatbuffers.h
new file mode 100644
index 0000000..4bfc743
--- /dev/null
+++ b/include/flatcc/flatcc_flatbuffers.h
@@ -0,0 +1,55 @@
+/*
+ * Even C11 compilers depend on clib support for `static_assert` which
+ * isn't always present, so we deal with this here for all compilers.
+ *
+ * Outside include guard to handle scope counter.
+ */
+#include "flatcc/portable/pstatic_assert.h"
+
+#ifndef FLATCC_FLATBUFFERS_H
+#define FLATCC_FLATBUFFERS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef flatcc_flatbuffers_defined
+#define flatcc_flatbuffers_defined
+
+#ifdef FLATCC_PORTABLE
+#include "flatcc/flatcc_portable.h"
+#endif
+#include "flatcc/portable/pwarnings.h"
+/* Needed by C99 compilers without FLATCC_PORTABLE. */
+#include "flatcc/portable/pstdalign.h"
+
+/* Handle fallthrough attribute in switch statements. */
+#include "flatcc/portable/pattributes.h"
+
+#include "flatcc/flatcc_alloc.h"
+#include "flatcc/flatcc_assert.h"
+
+#define __FLATBUFFERS_PASTE2(a, b) a ## b
+#define __FLATBUFFERS_PASTE3(a, b, c) a ## b ## c
+#define __FLATBUFFERS_CONCAT(a, b) __FLATBUFFERS_PASTE2(a, b)
+
+/*
+ * "flatcc_endian.h" requires the preceeding include files,
+ * or compatible definitions.
+ */
+#include "flatcc/portable/pendian.h"
+#include "flatcc/flatcc_types.h"
+#include "flatcc/flatcc_endian.h"
+#include "flatcc/flatcc_identifier.h"
+
+#ifndef FLATBUFFERS_WRAP_NAMESPACE
+#define FLATBUFFERS_WRAP_NAMESPACE(ns, x) ns ## _ ## x
+#endif
+
+#endif /* flatcc_flatbuffers_defined */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_FLATBUFFERS_H */
diff --git a/include/flatcc/flatcc_identifier.h b/include/flatcc/flatcc_identifier.h
new file mode 100644
index 0000000..825f0fd
--- /dev/null
+++ b/include/flatcc/flatcc_identifier.h
@@ -0,0 +1,148 @@
+#ifndef FLATCC_IDENTIFIER_H
+#define FLATCC_IDENTIFIER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef FLATCC_FLATBUFFERS_H
+#error "include via flatcc/flatcc_flatbuffers.h"
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+/*
+ * FlatBuffers identifiers are normally specified by "file_identifer" in
+ * the schema, but a standard hash of the fully qualified type name can
+ * also be used. This file implements such a mapping, but the generated
+ * headers also contain the necessary information for known types.
+ */
+
+
+/*
+ * Returns the type hash of a given name in native endian format.
+ * Generated code already provides these, but if a name was changed
+ * in the schema it may be relevant to recompute the hash manually.
+ *
+ * The wire-format of this value should always be little endian.
+ *
+ * Note: this must be the fully qualified name, e.g. in the namespace
+ * "MyGame.Example":
+ *
+ * flatbuffers_type_hash_from_name("MyGame.Example.Monster");
+ *
+ * or, in the global namespace just:
+ *
+ * flatbuffers_type_hash_from_name("MyTable");
+ *
+ * This assumes 32 bit hash type. For other sizes, other FNV-1a
+ * constants would be required.
+ *
+ * Note that we reserve hash value 0 for missing or ignored value.
+ */
+static inline flatbuffers_thash_t flatbuffers_type_hash_from_name(const char *name)
+{
+ uint32_t hash = UINT32_C(2166136261);
+ while (*name) {
+ hash ^= (unsigned char)*name;
+ hash = hash * UINT32_C(16777619);
+ ++name;
+ }
+ if (hash == 0) {
+ hash = UINT32_C(2166136261);
+ }
+ return hash;
+}
+
+/*
+ * Type hash encoded as little endian file identifier string.
+ * Note: if type hash is 0, the identifier should be null which
+ * we cannot return in this interface.
+ */
+static inline void flatbuffers_identifier_from_type_hash(flatbuffers_thash_t type_hash, flatbuffers_fid_t out_identifier)
+{
+ out_identifier[0] = (char)(type_hash & 0xff);
+ type_hash >>= 8;
+ out_identifier[1] = (char)(type_hash & 0xff);
+ type_hash >>= 8;
+ out_identifier[2] = (char)(type_hash & 0xff);
+ type_hash >>= 8;
+ out_identifier[3] = (char)(type_hash & 0xff);
+}
+
+/* Native integer encoding of file identifier. */
+static inline flatbuffers_thash_t flatbuffers_type_hash_from_identifier(const flatbuffers_fid_t identifier)
+{
+ uint8_t *p = (uint8_t *)identifier;
+
+ return identifier ?
+ (uint32_t)p[0] + (((uint32_t)p[1]) << 8) + (((uint32_t)p[2]) << 16) + (((uint32_t)p[3]) << 24) : 0;
+}
+
+/*
+ * Convert a null terminated string identifier like "MONS" or "X" into a
+ * native type hash identifier, usually for comparison. This will not
+ * work with type hash strings because they can contain null bytes.
+ */
+static inline flatbuffers_thash_t flatbuffers_type_hash_from_string(const char *identifier)
+{
+ flatbuffers_thash_t h = 0;
+ const uint8_t *p = (const uint8_t *)identifier;
+
+ if (!p[0]) return h;
+ h += ((flatbuffers_thash_t)p[0]);
+ if (!p[1]) return h;
+ h += ((flatbuffers_thash_t)p[1]) << 8;
+ if (!p[2]) return h;
+ h += ((flatbuffers_thash_t)p[2]) << 16;
+ /* No need to test for termination here. */
+ h += ((flatbuffers_thash_t)p[3]) << 24;
+ return h;
+}
+
+/*
+ * Computes the little endian wire format of the type hash. It can be
+ * used as a file identifer argument to various flatcc buffer calls.
+ *
+ * `flatbuffers_fid_t` is just `char [4]` for the default flatbuffers
+ * type system defined in `flatcc/flatcc_types.h`.
+ */
+static inline void flatbuffers_identifier_from_name(const char *name, flatbuffers_fid_t out_identifier)
+{
+ flatbuffers_identifier_from_type_hash(flatbuffers_type_hash_from_name(name), out_identifier);
+}
+
+/*
+ * This is a collision free hash (a permutation) of the type hash to
+ * provide better distribution for use in hash tables. It is likely not
+ * necessary in praxis, and for uniqueness of identifiers it provides no
+ * advantage over just using the FNV-1a type hash, except when truncating
+ * the identifier to less than 32-bits.
+ *
+ * Note: the output should not be used in transmission. It provides no
+ * additional information and just complicates matters. Furthermore, the
+ * unmodified type hash has the benefit that it can seed a child namespace.
+ */
+static inline uint32_t flatbuffers_disperse_type_hash(flatbuffers_thash_t type_hash)
+{
+ /* http://stackoverflow.com/a/12996028 */
+ uint32_t x = type_hash;
+
+ x = ((x >> 16) ^ x) * UINT32_C(0x45d9f3b);
+ x = ((x >> 16) ^ x) * UINT32_C(0x45d9f3b);
+ x = ((x >> 16) ^ x);
+ return x;
+}
+
+
+/* We have hardcoded assumptions about identifier size. */
+static_assert(sizeof(flatbuffers_fid_t) == 4, "unexpected file identifier size");
+static_assert(sizeof(flatbuffers_thash_t) == 4, "unexpected type hash size");
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_IDENTIFIER_H */
diff --git a/include/flatcc/flatcc_iov.h b/include/flatcc/flatcc_iov.h
new file mode 100644
index 0000000..a6d27f8
--- /dev/null
+++ b/include/flatcc/flatcc_iov.h
@@ -0,0 +1,31 @@
+#ifndef FLATCC_IOV_H
+#define FLATCC_IOV_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+
+/*
+ * The emitter receives one, or a few buffers at a time via
+ * this type. <sys/iov.h> compatible iovec structure used for
+ * allocation and emitter interface.
+ */
+typedef struct flatcc_iovec flatcc_iovec_t;
+struct flatcc_iovec {
+ void *iov_base;
+ size_t iov_len;
+};
+
+/*
+ * The largest iovec vector the builder will issue. It will
+ * always be a relatively small number.
+ */
+#define FLATCC_IOV_COUNT_MAX 8
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_IOV_H */
diff --git a/include/flatcc/flatcc_json_parser.h b/include/flatcc/flatcc_json_parser.h
new file mode 100644
index 0000000..f828129
--- /dev/null
+++ b/include/flatcc/flatcc_json_parser.h
@@ -0,0 +1,908 @@
+#ifndef FLATCC_JSON_PARSE_H
+#define FLATCC_JSON_PARSE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * JSON RFC:
+ * http://www.ietf.org/rfc/rfc4627.txt?number=4627
+ *
+ * With several flatbuffers specific extensions.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_builder.h"
+#include "flatcc/flatcc_unaligned.h"
+
+#define PDIAGNOSTIC_IGNORE_UNUSED
+#include "flatcc/portable/pdiagnostic_push.h"
+
+typedef uint32_t flatcc_json_parser_flags_t;
+static const flatcc_json_parser_flags_t flatcc_json_parser_f_skip_unknown = 1;
+static const flatcc_json_parser_flags_t flatcc_json_parser_f_force_add = 2;
+static const flatcc_json_parser_flags_t flatcc_json_parser_f_with_size = 4;
+static const flatcc_json_parser_flags_t flatcc_json_parser_f_skip_array_overflow = 8;
+static const flatcc_json_parser_flags_t flatcc_json_parser_f_reject_array_underflow = 16;
+
+#define FLATCC_JSON_PARSE_ERROR_MAP(XX) \
+ XX(ok, "ok") \
+ XX(eof, "eof") \
+ XX(deep_nesting, "deep nesting") \
+ XX(trailing_comma, "trailing comma") \
+ XX(expected_colon, "expected colon") \
+ XX(unexpected_character, "unexpected character") \
+ XX(invalid_numeric, "invalid numeric") \
+ XX(overflow, "overflow") \
+ XX(underflow, "underflow") \
+ XX(unbalanced_array, "unbalanced array") \
+ XX(unbalanced_object, "unbalanced object") \
+ XX(precision_loss, "precision loss") \
+ XX(float_unexpected, "float unexpected") \
+ XX(unknown_symbol, "unknown symbol") \
+ XX(unquoted_symbolic_list, "unquoted list of symbols") \
+ XX(unknown_union, "unknown union type") \
+ XX(expected_string, "expected string") \
+ XX(invalid_character, "invalid character") \
+ XX(invalid_escape, "invalid escape") \
+ XX(invalid_type, "invalid type") \
+ XX(unterminated_string, "unterminated string") \
+ XX(expected_object, "expected object") \
+ XX(expected_array, "expected array") \
+ XX(expected_scalar, "expected literal or symbolic scalar") \
+ XX(expected_union_type, "expected union type") \
+ XX(union_none_present, "union present with type NONE") \
+ XX(union_none_not_null, "union of type NONE is not null") \
+ XX(union_incomplete, "table has incomplete union") \
+ XX(duplicate, "table has duplicate field") \
+ XX(required, "required field missing") \
+ XX(union_vector_length, "union vector length mismatch") \
+ XX(base64, "invalid base64 content") \
+ XX(base64url, "invalid base64url content") \
+ XX(array_underflow, "fixed length array underflow") \
+ XX(array_overflow, "fixed length array overflow") \
+ XX(runtime, "runtime error") \
+ XX(not_supported, "not supported")
+
+enum flatcc_json_parser_error_no {
+#define XX(no, str) flatcc_json_parser_error_##no,
+ FLATCC_JSON_PARSE_ERROR_MAP(XX)
+#undef XX
+};
+
+const char *flatcc_json_parser_error_string(int err);
+
+#define flatcc_json_parser_ok flatcc_json_parser_error_ok
+#define flatcc_json_parser_eof flatcc_json_parser_error_eof
+
+/*
+ * The struct may be zero initialized in which case the line count will
+ * start at line zero, or the line may be set to 1 initially. The ctx
+ * is only used for error reporting and tracking non-standard unquoted
+ * ctx.
+ *
+ * `ctx` may for example hold a flatcc_builder_t pointer.
+ */
+typedef struct flatcc_json_parser_ctx flatcc_json_parser_t;
+struct flatcc_json_parser_ctx {
+ flatcc_builder_t *ctx;
+ const char *line_start;
+ flatcc_json_parser_flags_t flags;
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ int unquoted;
+#endif
+
+ int line, pos;
+ int error;
+ const char *start;
+ const char *end;
+ const char *error_loc;
+ /* Set at end of successful parse. */
+ const char *end_loc;
+};
+
+static inline int flatcc_json_parser_get_error(flatcc_json_parser_t *ctx)
+{
+ return ctx->error;
+}
+
+static inline void flatcc_json_parser_init(flatcc_json_parser_t *ctx, flatcc_builder_t *B, const char *buf, const char *end, flatcc_json_parser_flags_t flags)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->ctx = B;
+ ctx->line_start = buf;
+ ctx->line = 1;
+ ctx->flags = flags;
+ /* These are not needed for parsing, but may be helpful in reporting etc. */
+ ctx->start = buf;
+ ctx->end = end;
+ ctx->error_loc = buf;
+}
+
+const char *flatcc_json_parser_set_error(flatcc_json_parser_t *ctx, const char *loc, const char *end, int reason);
+
+/*
+ * Wide space is not necessarily beneficial in the typical space, but it
+ * also isn't expensive so it may be added when there are applications
+ * that can benefit.
+ */
+const char *flatcc_json_parser_space_ext(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+static inline const char *flatcc_json_parser_space(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (end - buf > 1) {
+ if (buf[0] > 0x20) {
+ return buf;
+ }
+ if (buf[0] == 0x20 && buf[1] > 0x20) {
+ return buf + 1;
+ }
+ }
+ return flatcc_json_parser_space_ext(ctx, buf, end);
+}
+
+
+static inline const char *flatcc_json_parser_string_start(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end || *buf != '\"') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_string);
+ }
+ return ++buf;
+}
+
+static inline const char *flatcc_json_parser_string_end(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end || *buf != '\"') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unterminated_string);
+ }
+ return ++buf;
+}
+
+/*
+ * Parse a string as a fixed length char array as `s` with length `n`.
+ * and raise errors according to overflow/underflow runtime flags. Zero
+ * and truncate as needed. A trailing zero is not inserted if the input
+ * is at least the same length as the char array.
+ *
+ * Runtime flags: `skip_array_overflow`, `pad_array_underflow`.
+ */
+const char *flatcc_json_parser_char_array(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, char *s, size_t n);
+
+/*
+ * Creates a string. Returns *ref == 0 on unrecoverable error or
+ * sets *ref to a valid new string reference.
+ */
+const char *flatcc_json_parser_build_string(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref);
+
+typedef char flatcc_json_parser_escape_buffer_t[5];
+/*
+ * If the buffer does not hold a valid escape sequence, an error is
+ * returned with code[0] = 0/
+ *
+ * Otherwise code[0] the length (1-4) of the remaining
+ * characters in the code, transcoded from the escape sequence
+ * where a length of 4 only happens with escapaped surrogate pairs.
+ *
+ * The JSON extension `\xXX` is supported and may produced invalid UTF-8
+ * characters such as 0xff. The standard JSON escape `\uXXXX` is not
+ * checked for invalid code points and may produce invalid UTF-8.
+ *
+ * Regular characters are expected to valid UTF-8 but they are not checked
+ * and may therefore produce invalid UTF-8.
+ *
+ * Control characters within a string are rejected except in the
+ * standard JSON escpaped form for `\n \r \t \b \f`.
+ *
+ * Additional escape codes as per standard JSON: `\\ \/ \"`.
+ */
+const char *flatcc_json_parser_string_escape(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_json_parser_escape_buffer_t code);
+
+/*
+ * Parses the longest unescaped run of string content followed by either
+ * an escape encoding, string termination, or error.
+ */
+const char *flatcc_json_parser_string_part(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+static inline const char *flatcc_json_parser_symbol_start(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end) {
+ return buf;
+ }
+ if (*buf == '\"') {
+ ++buf;
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ ctx->unquoted = 0;
+#endif
+ } else {
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (*buf == '.') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ ctx->unquoted = 1;
+#else
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+#endif
+ }
+ return buf;
+}
+
+static inline uint64_t flatcc_json_parser_symbol_part_ext(const char *buf, const char *end)
+{
+ uint64_t w = 0;
+ size_t n = (size_t)(end - buf);
+
+ if (n > 8) {
+ n = 8;
+ }
+ /* This can bloat inlining for a rarely executed case. */
+#if 1
+ switch (n) {
+ case 8:
+ w |= ((uint64_t)buf[7]) << (0 * 8);
+ goto lbl_n_7;
+ case 7:
+lbl_n_7:
+ w |= ((uint64_t)buf[6]) << (1 * 8);
+ goto lbl_n_6;
+ case 6:
+lbl_n_6:
+ w |= ((uint64_t)buf[5]) << (2 * 8);
+ goto lbl_n_5;
+ case 5:
+lbl_n_5:
+ w |= ((uint64_t)buf[4]) << (3 * 8);
+ goto lbl_n_4;
+ case 4:
+lbl_n_4:
+ w |= ((uint64_t)buf[3]) << (4 * 8);
+ goto lbl_n_3;
+ case 3:
+lbl_n_3:
+ w |= ((uint64_t)buf[2]) << (5 * 8);
+ goto lbl_n_2;
+ case 2:
+lbl_n_2:
+ w |= ((uint64_t)buf[1]) << (6 * 8);
+ goto lbl_n_1;
+ case 1:
+lbl_n_1:
+ w |= ((uint64_t)buf[0]) << (7 * 8);
+ break;
+ case 0:
+ break;
+ }
+#else
+ /* But this is hardly much of an improvement. */
+ {
+ size_t i;
+ for (i = 0; i < n; ++i) {
+ w <<= 8;
+ if (i < n) {
+ w = buf[i];
+ }
+ }
+ }
+#endif
+ return w;
+}
+
+/*
+ * Read out string as a big endian word. This allows for trie lookup,
+ * also when trailing characters are beyond keyword. This assumes the
+ * external words tested against are valid and therefore there need be
+ * no checks here. If a match is not made, the symbol_end function will
+ * consume and check any unmatched content - from _before_ this function
+ * was called - i.e. the returned buffer is tentative for use only if we
+ * accept the part returned here.
+ *
+ * Used for both symbols and symbolic constants.
+ */
+static inline uint64_t flatcc_json_parser_symbol_part(const char *buf, const char *end)
+{
+ size_t n = (size_t)(end - buf);
+
+#if FLATCC_ALLOW_UNALIGNED_ACCESS
+ if (n >= 8) {
+ return be64toh(*(uint64_t *)buf);
+ }
+#endif
+ return flatcc_json_parser_symbol_part_ext(buf, end);
+}
+
+/* Don't allow space in dot notation neither inside nor outside strings. */
+static inline const char *flatcc_json_parser_match_scope(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos)
+{
+ const char *mark = buf;
+
+ (void)ctx;
+
+ if (end - buf <= pos) {
+ return mark;
+ }
+ if (buf[pos] != '.') {
+ return mark;
+ }
+ return buf + pos + 1;
+}
+
+const char *flatcc_json_parser_match_constant(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos, int *more);
+
+/* We allow '.' in unquoted symbols, but not at the start or end. */
+static inline const char *flatcc_json_parser_symbol_end(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ char c, clast = 0;
+
+
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (ctx->unquoted) {
+ while (buf != end && *buf > 0x20) {
+ clast = c = *buf;
+ if (c == '_' || c == '.' || (c & 0x80) || (c >= '0' && c <= '9')) {
+ ++buf;
+ continue;
+ }
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'z') {
+ ++buf;
+ continue;
+ }
+ break;
+ }
+ if (clast == '.') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ } else {
+#else
+ {
+#endif
+ while (buf != end && *buf != '\"') {
+ if (*buf == '\\') {
+ if (end - buf < 2) {
+ break;
+ }
+ ++buf;
+ }
+ ++buf;
+ }
+ if (buf == end || *buf != '\"') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unterminated_string);
+ }
+ ++buf;
+ }
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_constant_start(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ buf = flatcc_json_parser_symbol_start(ctx, buf, end);
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (!ctx->unquoted) {
+#else
+ {
+#endif
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ }
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_object_start(flatcc_json_parser_t *ctx, const char *buf, const char *end, int *more)
+{
+ if (buf == end || *buf != '{') {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_object);
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == '}') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ *more = 1;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_object_end(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int *more)
+{
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf == end) {
+ *more = 0;
+ return buf;
+ }
+ if (*buf != ',') {
+ *more = 0;
+ if (*buf != '}') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_object);
+ } else {
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf == end) {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_object);
+ }
+#if FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA
+ if (*buf == '}') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+#endif
+ *more = 1;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_array_start(flatcc_json_parser_t *ctx, const char *buf, const char *end, int *more)
+{
+ if (buf == end || *buf != '[') {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_array);
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == ']') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ *more = 1;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_array_end(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int *more)
+{
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf == end) {
+ *more = 0;
+ return buf;
+ }
+ if (*buf != ',') {
+ *more = 0;
+ if (*buf != ']') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_array);
+ } else {
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf == end) {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_array);
+ }
+#if FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA
+ if (*buf == ']') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+#endif
+ *more = 1;
+ return buf;
+}
+
+/*
+ * Detects if a symbol terminates at a given `pos` relative to the
+ * buffer pointer, or return fast.
+ *
+ * Failure to match is not an error but a recommendation to try
+ * alternative longer suffixes - only if such do not exist will
+ * there be an error. If a match was not eventually found,
+ * the `flatcc_json_parser_unmatched_symbol` should be called to consume
+ * the symbol and generate error messages.
+ *
+ * If a match was detected, ':' and surrounding space is consumed,
+ * or an error is generated.
+ */
+static inline const char *flatcc_json_parser_match_symbol(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int pos)
+{
+ const char *mark = buf;
+
+ if (end - buf <= pos) {
+ return mark;
+ }
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (ctx->unquoted) {
+ if (buf[pos] > 0x20 && buf[pos] != ':') {
+ return mark;
+ }
+ buf += pos;
+ ctx->unquoted = 0;
+ } else {
+#else
+ {
+#endif
+ if (buf[pos] != '\"') {
+ return mark;
+ }
+ buf += pos + 1;
+ }
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf != end && *buf == ':') {
+ ++buf;
+ return flatcc_json_parser_space(ctx, buf, end);
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_colon);
+}
+
+static inline const char *flatcc_json_parser_match_type_suffix(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos)
+{
+ if (end - buf <= pos + 5) {
+ return buf;
+ }
+ if (memcmp(buf + pos, "_type", 5)) {
+ return buf;
+ }
+ return flatcc_json_parser_match_symbol(ctx, buf, end, pos + 5);
+}
+
+const char *flatcc_json_parser_unmatched_symbol(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+static inline const char *flatcc_json_parser_coerce_uint64(
+ flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, uint64_t *v)
+{
+ if (value_sign) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_underflow);
+ }
+ *v = value;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_coerce_bool(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, uint8_t *v)
+{
+ if (value_sign) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_underflow);
+ }
+ *v = (uint8_t)!!value;
+ return buf;
+}
+
+#define __flatcc_json_parser_define_coerce_unsigned(type, basetype, uctype) \
+static inline const char *flatcc_json_parser_coerce_ ## type( \
+ flatcc_json_parser_t *ctx, const char *buf, \
+ const char *end, int value_sign, uint64_t value, basetype *v) \
+{ \
+ if (value_sign) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_underflow); \
+ } \
+ if (value > uctype ## _MAX) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_overflow); \
+ } \
+ *v = (basetype)value; \
+ return buf; \
+}
+
+__flatcc_json_parser_define_coerce_unsigned(uint32, uint32_t, UINT32)
+__flatcc_json_parser_define_coerce_unsigned(uint16, uint16_t, UINT16)
+__flatcc_json_parser_define_coerce_unsigned(uint8, uint8_t, UINT8)
+
+#define __flatcc_json_parser_define_coerce_signed(type, basetype, uctype) \
+static inline const char *flatcc_json_parser_coerce_ ## type( \
+ flatcc_json_parser_t *ctx, const char *buf, \
+ const char *end, int value_sign, uint64_t value, basetype *v) \
+{ \
+ if (value_sign) { \
+ if (value > (uint64_t)(uctype ## _MAX) + 1) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_underflow); \
+ } \
+ *v = (basetype)-(int64_t)value; \
+ } else { \
+ if (value > uctype ## _MAX) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_overflow); \
+ } \
+ *v = (basetype)value; \
+ } \
+ return buf; \
+}
+
+__flatcc_json_parser_define_coerce_signed(int64, int64_t, INT64)
+__flatcc_json_parser_define_coerce_signed(int32, int32_t, INT32)
+__flatcc_json_parser_define_coerce_signed(int16, int16_t, INT16)
+__flatcc_json_parser_define_coerce_signed(int8, int8_t, INT8)
+
+static inline const char *flatcc_json_parser_coerce_float(
+ flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, float *v)
+{
+ (void)ctx;
+ (void)end;
+
+ *v = value_sign ? -(float)value : (float)value;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_coerce_double(
+ flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, double *v)
+{
+ (void)ctx;
+ (void)end;
+
+ *v = value_sign ? -(double)value : (double)value;
+ return buf;
+}
+
+const char *flatcc_json_parser_double(flatcc_json_parser_t *ctx, const char *buf, const char *end, double *v);
+
+const char *flatcc_json_parser_float(flatcc_json_parser_t *ctx, const char *buf, const char *end, float *v);
+
+/*
+ * If the buffer does not contain a valid start character for a numeric
+ * value, the function will return the the input buffer without failure.
+ * This makes is possible to try a symbolic parse.
+ */
+const char *flatcc_json_parser_integer(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_sign, uint64_t *value);
+
+/* Returns unchanged buffer without error if `null` is not matched. */
+static inline const char *flatcc_json_parser_null(const char *buf, const char *end)
+{
+ if (end - buf >= 4 && memcmp(buf, "null", 4) == 0) {
+ return buf + 4;
+ }
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_none(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end)
+{
+ if (end - buf >= 4 && memcmp(buf, "null", 4) == 0) {
+ return buf + 4;
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end,
+ flatcc_json_parser_error_union_none_not_null);
+}
+
+/*
+ * `parsers` is a null terminated array of parsers with at least one
+ * valid parser. A numeric literal parser may also be included.
+ */
+#define __flatcc_json_parser_define_integral_parser(type, basetype) \
+static inline const char *flatcc_json_parser_ ## type( \
+ flatcc_json_parser_t *ctx, \
+ const char *buf, const char *end, basetype *v) \
+{ \
+ uint64_t value = 0; \
+ int value_sign = 0; \
+ const char *mark = buf; \
+ \
+ *v = 0; \
+ if (buf == end) { \
+ return buf; \
+ } \
+ buf = flatcc_json_parser_integer(ctx, buf, end, &value_sign, &value); \
+ if (buf != mark) { \
+ return flatcc_json_parser_coerce_ ## type(ctx, \
+ buf, end, value_sign, value, v); \
+ } \
+ return buf; \
+}
+
+__flatcc_json_parser_define_integral_parser(uint64, uint64_t)
+__flatcc_json_parser_define_integral_parser(uint32, uint32_t)
+__flatcc_json_parser_define_integral_parser(uint16, uint16_t)
+__flatcc_json_parser_define_integral_parser(uint8, uint8_t)
+__flatcc_json_parser_define_integral_parser(int64, int64_t)
+__flatcc_json_parser_define_integral_parser(int32, int32_t)
+__flatcc_json_parser_define_integral_parser(int16, int16_t)
+__flatcc_json_parser_define_integral_parser(int8, int8_t)
+
+static inline const char *flatcc_json_parser_bool(flatcc_json_parser_t *ctx, const char *buf, const char *end, uint8_t *v)
+{
+ const char *k;
+ uint8_t tmp;
+
+ k = buf;
+ if (end - buf >= 4 && memcmp(buf, "true", 4) == 0) {
+ *v = 1;
+ return k + 4;
+ } else if (end - buf >= 5 && memcmp(buf, "false", 5) == 0) {
+ *v = 0;
+ return k + 5;
+ }
+ buf = flatcc_json_parser_uint8(ctx, buf, end, &tmp);
+ *v = !!tmp;
+ return buf;
+}
+
+/*
+ * The `parsers` argument is a zero terminated array of parser
+ * functions with increasingly general scopes.
+ *
+ * Symbols can be be or'ed together by listing multiple space separated
+ * flags in source being parsed, like `{ x : "Red Blue" }`.
+ * Intended for flags, but generally available.
+ *
+ * `aggregate` means there are more symbols to follow.
+ *
+ * This function does not return input `buf` value if match was
+ * unsuccessful. It will either match or error.
+ */
+typedef const char *flatcc_json_parser_integral_symbol_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, int *value_sign, uint64_t *value, int *aggregate);
+
+/*
+ * Raise an error if a syntax like `color: Red Green` is seen unless
+ * explicitly permitted. `color: "Red Green"` or `"color": "Red Green"
+ * or `color: Red` is permitted if unquoted is permitted but not
+ * unquoted list. Googles flatc JSON parser does not allow multiple
+ * symbolic values unless quoted, so this is the default.
+ */
+#if !FLATCC_JSON_PARSE_ALLOW_UNQUOTED || FLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST
+#define __flatcc_json_parser_init_check_unquoted_list()
+#define __flatcc_json_parser_check_unquoted_list()
+#else
+#define __flatcc_json_parser_init_check_unquoted_list() int list_count = 0;
+#define __flatcc_json_parser_check_unquoted_list() \
+ if (list_count++ && ctx->unquoted) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_unquoted_symbolic_list); \
+ }
+#endif
+
+#define __flatcc_json_parser_define_symbolic_integral_parser(type, basetype)\
+static const char *flatcc_json_parser_symbolic_ ## type( \
+ flatcc_json_parser_t *ctx, \
+ const char *buf, const char *end, \
+ flatcc_json_parser_integral_symbol_f *parsers[], \
+ basetype *v) \
+{ \
+ flatcc_json_parser_integral_symbol_f **p; \
+ const char *mark; \
+ basetype tmp = 0; \
+ uint64_t value; \
+ int value_sign, aggregate; \
+ __flatcc_json_parser_init_check_unquoted_list() \
+ \
+ *v = 0; \
+ buf = flatcc_json_parser_constant_start(ctx, buf, end); \
+ if (buf == end) { \
+ return buf; \
+ } \
+ do { \
+ p = parsers; \
+ do { \
+ /* call parser function */ \
+ buf = (*p)(ctx, (mark = buf), end, \
+ &value_sign, &value, &aggregate); \
+ if (buf == end) { \
+ return buf; \
+ } \
+ } while (buf == mark && *++p); \
+ if (mark == buf) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_expected_scalar); \
+ } \
+ __flatcc_json_parser_check_unquoted_list() \
+ if (end == flatcc_json_parser_coerce_ ## type(ctx, \
+ buf, end, value_sign, value, &tmp)) { \
+ return end; \
+ } \
+ /* \
+ * `+=`, not `|=` because we also coerce to float and double, \
+ * and because we need to handle signed values. This may give \
+ * unexpected results with duplicate flags. \
+ */ \
+ *v += tmp; \
+ } while (aggregate); \
+ return buf; \
+}
+
+__flatcc_json_parser_define_symbolic_integral_parser(uint64, uint64_t)
+__flatcc_json_parser_define_symbolic_integral_parser(uint32, uint32_t)
+__flatcc_json_parser_define_symbolic_integral_parser(uint16, uint16_t)
+__flatcc_json_parser_define_symbolic_integral_parser(uint8, uint8_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int64, int64_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int32, int32_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int16, int16_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int8, int8_t)
+
+__flatcc_json_parser_define_symbolic_integral_parser(bool, uint8_t)
+
+/* We still parse integral values, but coerce to float or double. */
+__flatcc_json_parser_define_symbolic_integral_parser(float, float)
+__flatcc_json_parser_define_symbolic_integral_parser(double, double)
+
+/* Parse vector as a base64 or base64url encoded string with no spaces permitted. */
+const char *flatcc_json_parser_build_uint8_vector_base64(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref, int urlsafe);
+
+/*
+ * This doesn't do anything other than validate and advance past
+ * a JSON value which may use unquoted symbols.
+ *
+ * Upon call it is assumed that leading space has been stripped and that
+ * a JSON value is expected (i.e. root, or just after ':' in a
+ * container object, or less likely as an array member). Any trailing
+ * comma is assumed to belong to the parent context. Returns a parse
+ * location stripped from space so container should post call expect
+ * ',', '}', or ']', or EOF if the JSON is valid.
+ */
+const char *flatcc_json_parser_generic_json(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+/* Parse a JSON table. */
+typedef const char *flatcc_json_parser_table_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *pref);
+
+/* Parses a JSON struct. */
+typedef const char *flatcc_json_parser_struct_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *pref);
+
+/* Constructs a table, struct, or string object unless the type is 0 or unknown. */
+typedef const char *flatcc_json_parser_union_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, uint8_t type, flatcc_builder_ref_t *pref);
+
+typedef int flatcc_json_parser_is_known_type_f(uint8_t type);
+
+/* Called at start by table parsers with at least 1 union. */
+const char *flatcc_json_parser_prepare_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_total, size_t *handle);
+
+const char *flatcc_json_parser_finalize_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t handle);
+
+const char *flatcc_json_parser_union(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_union_f *union_parser);
+
+const char *flatcc_json_parser_union_type(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser);
+
+const char *flatcc_json_parser_union_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_union_f *union_parser);
+
+const char *flatcc_json_parser_union_type_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser,
+ flatcc_json_parser_is_known_type_f accept_type);
+
+/*
+ * Parses a table as root.
+ *
+ * Use the flag `flatcc_json_parser_f_with_size` to create a buffer with
+ * size prefix.
+ *
+ * `ctx` may be null or an uninitialized json parser to receive parse results.
+ * `builder` must a newly initialized or reset builder object.
+ * `buf`, `bufsiz` may be larger than the parsed json if trailing
+ * space or zeroes are expected, but they must represent a valid memory buffer.
+ * `fid` must be null, or a valid file identifier.
+ * `flags` default to 0. See also `flatcc_json_parser_f_` constants.
+ */
+int flatcc_json_parser_table_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid,
+ flatcc_json_parser_table_f *parser);
+
+/*
+ * Similar to `flatcc_json_parser_table_as_root` but parses a struct as
+ * root.
+ */
+int flatcc_json_parser_struct_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid,
+ flatcc_json_parser_struct_f *parser);
+
+#include "flatcc/portable/pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_JSON_PARSE_H */
diff --git a/include/flatcc/flatcc_json_printer.h b/include/flatcc/flatcc_json_printer.h
new file mode 100644
index 0000000..cab49a1
--- /dev/null
+++ b/include/flatcc/flatcc_json_printer.h
@@ -0,0 +1,788 @@
+#ifndef FLATCC_JSON_PRINTER_H
+#define FLATCC_JSON_PRINTER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Definitions for default implementation, do not assume these are
+ * always valid.
+ */
+#define FLATCC_JSON_PRINT_FLUSH_SIZE (1024 * 16)
+#define FLATCC_JSON_PRINT_RESERVE 64
+#define FLATCC_JSON_PRINT_BUFFER_SIZE (FLATCC_JSON_PRINT_FLUSH_SIZE + FLATCC_JSON_PRINT_RESERVE)
+
+#ifndef FLATCC_JSON_PRINTER_ALLOC
+#define FLATCC_JSON_PRINTER_ALLOC(n) FLATCC_ALLOC(n)
+#endif
+
+#ifndef FLATCC_JSON_PRINTER_FREE
+#define FLATCC_JSON_PRINTER_FREE(p) FLATCC_FREE(p)
+#endif
+
+#ifndef FLATCC_JSON_PRINTER_REALLOC
+#define FLATCC_JSON_PRINTER_REALLOC(p, n) FLATCC_REALLOC(p, n)
+#endif
+
+/* Initial size that grows exponentially. */
+#define FLATCC_JSON_PRINT_DYN_BUFFER_SIZE 4096
+
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_flatbuffers.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+#define FLATCC_JSON_PRINT_ERROR_MAP(XX) \
+ XX(ok, "ok") \
+ /* \
+ * When the flatbuffer is null, has too small a header, or has \
+ * mismatching identifier when a match was requested. \
+ */ \
+ XX(bad_input, "bad input") \
+ XX(deep_recursion, "deep recursion") \
+ /* \
+ * When the output was larger than the available fixed length buffer, \
+ * or dynamic allocation could not grow the buffer sufficiently. \
+ */ \
+ XX(overflow, "overflow")
+
+enum flatcc_json_printer_error_no {
+#define XX(no, str) flatcc_json_printer_error_##no,
+ FLATCC_JSON_PRINT_ERROR_MAP(XX)
+#undef XX
+};
+
+#define flatcc_json_printer_ok flatcc_json_printer_error_ok
+
+typedef struct flatcc_json_printer_ctx flatcc_json_printer_t;
+
+typedef void flatcc_json_printer_flush_f(flatcc_json_printer_t *ctx, int all);
+
+struct flatcc_json_printer_ctx {
+ char *buf;
+ size_t size;
+ size_t flush_size;
+ size_t total;
+ const char *pflush;
+ char *p;
+ uint8_t own_buffer;
+ uint8_t indent;
+ uint8_t unquote;
+ uint8_t noenum;
+ uint8_t skip_default;
+ uint8_t force_default;
+ int level;
+ int error;
+
+ void *fp;
+ flatcc_json_printer_flush_f *flush;
+};
+
+static inline void flatcc_json_printer_set_error(flatcc_json_printer_t *ctx, int err)
+{
+ if (!ctx->error) {
+ ctx->error = err;
+ }
+}
+
+const char *flatcc_json_printer_error_string(int err);
+
+static inline int flatcc_json_printer_get_error(flatcc_json_printer_t *ctx)
+{
+ return ctx->error;
+}
+
+/*
+ * Call to reuse context between parses without without
+ * returning buffer. If a file pointer is being used,
+ * it will remain open.
+ *
+ * Reset does not affect the formatting settings indentation, and
+ * operational flags, but does zero the indentation level.
+ */
+static inline void flatcc_json_printer_reset(flatcc_json_printer_t *ctx)
+{
+ ctx->p = ctx->buf;
+ ctx->level = 0;
+ ctx->total = 0;
+ ctx->error = 0;
+}
+
+/*
+ * A custom init function can be implemented with a custom flush
+ * function can be custom implemented. A few have been provided:
+ * init with external fixed length buffer, and init with dynamically
+ * growing buffer.
+ *
+ * Because there are a lot of small print functions, it is essentially
+ * always faster to print to local buffer than moving to io directly
+ * such as using fprintf or fwrite. The flush callback is used to
+ * move data when enough has been collected.
+ *
+ * `fp` should be of type `FILE *` but we do not enforce it here
+ * because it allows the header to be independent of <stdio.h>
+ * when not required. If `fp` is null, it defaults to stdout.
+ *
+ * Returns -1 on alloc error (no cleanup needed), or 0 on success.
+ * Eventually the clear method must be called to return memory.
+ *
+ * The file pointer may be stdout or a custom file. The file pointer
+ * is not affected by reset or clear and should be closed manually.
+ *
+ * `set_flags` and related may be called subsequently to modify
+ * behavior.
+ */
+int flatcc_json_printer_init(flatcc_json_printer_t *ctx, void *fp);
+
+/*
+ * Prints to external buffer and sets overflow error if buffer is too
+ * small. Earlier content is then overwritten. A custom version of this
+ * function could flush the content to elsewhere before allowing the
+ * buffer content to be overwritten. The `buffers_size` must be large
+ * enough to hold `FLATCC_JSON_PRINT_RESERVED_SIZE` which is small but
+ * large enough value to hold entire numbers and the like.
+ *
+ * It is not strictly necessary to call clear because the buffer is
+ * external, but still good form and case the context type is changed
+ * later.
+ *
+ * Returns -1 on buffer size error (no cleanup needed), or 0 on success.
+ *
+ * `set_flags` and related may be called subsequently to modify
+ * behavior.
+ */
+int flatcc_json_printer_init_buffer(flatcc_json_printer_t *ctx, char *buffer, size_t buffer_size);
+
+/*
+ * Returns the current buffer pointer and also the content size in
+ * `buffer_size` if it is null. The operation is not very useful for
+ * file oriented printers (created with `init`) and will then only
+ * return the unflushed buffer content. For fixed length buffers
+ * (`init_buffer`), only the last content is available if the buffer
+ * overflowed. Works well with (`init_buffer`) when the dynamic buffer
+ * is be reused, otherwise `finalize_dynamic_buffer` could be more
+ * appropriate.
+ *
+ * The returned buffer is zero terminated.
+ *
+ * The returned pointer is only valid until next operation and should
+ * not deallocated manually.
+ */
+void *flatcc_json_printer_get_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size);
+
+/*
+ * Set to non-zero if names and enum symbols can be unquoted thus
+ * diverging from standard JSON while remaining compatible with `flatc`
+ * JSON flavor.
+ */
+static inline void flatcc_json_printer_set_unquoted(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->unquote = !!x;
+}
+
+/*
+ * Set to non-zero if enums should always be printed as numbers.
+ * Otherwise enums are printed as a symbol for member values, and as
+ * numbers for other values.
+ *
+ * NOTE: this setting will not affect code generated with enum mapping
+ * disabled - statically disabling enum mapping is signficantly faster
+ * for enums, less so for for union types.
+ */
+static inline void flatcc_json_printer_set_noenum(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->noenum = !!x;
+}
+
+/*
+ * Override priting an existing scalar field if it equals the default value.
+ * Note that this setting is not mutually exclusive to `set_force_default`.
+ */
+static inline void flatcc_json_printer_set_skip_default(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->skip_default = !!x;
+}
+
+/*
+ * Override skipping absent scalar fields and print the default value.
+ * Note that this setting is not mutually exclusive to `set_skip_default`.
+ */
+static inline void flatcc_json_printer_set_force_default(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->force_default = !!x;
+}
+
+
+/*
+ * Set pretty-print indentation in number of spaces. 0 (default) is
+ * compact with no spaces or linebreaks (default), anything above
+ * triggers pretty print.
+ */
+static inline void flatcc_json_printer_set_indent(flatcc_json_printer_t *ctx, uint8_t x)
+{
+ ctx->indent = x;
+}
+
+/*
+ * Override the default compact valid JSON format with a
+ * pretty printed non-strict version. Enums are translated
+ * to names, which is also the default.
+ */
+static inline void flatcc_json_printer_set_nonstrict(flatcc_json_printer_t *ctx)
+{
+ flatcc_json_printer_set_indent(ctx, 2);
+ flatcc_json_printer_set_unquoted(ctx, 1);
+ flatcc_json_printer_set_noenum(ctx, 0);
+}
+
+typedef uint32_t flatcc_json_printer_flags_t;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_unquote = 1;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_noenum = 2;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_skip_default = 4;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_force_default = 8;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_pretty = 16;
+static const flatcc_json_printer_flags_t flatcc_json_printer_f_nonstrict = 32;
+
+/*
+ * May be called instead of setting operational modes individually.
+ * Formatting is strict quoted json witout pretty printing by default.
+ *
+ * flags are:
+ *
+ * `unquote`,
+ * `noenum`,
+ * `skip_default`,
+ * `force_default`,
+ * `pretty`,
+ * `nonstrict`
+ *
+ * `pretty` flag sets indentation to 2.
+ * `nonstrict` implies: `noenum`, `unquote`, `pretty`.
+ */
+static inline void flatcc_json_printer_set_flags(flatcc_json_printer_t *ctx, flatcc_json_printer_flags_t flags)
+{
+ ctx->unquote = !!(flags & flatcc_json_printer_f_unquote);
+ ctx->noenum = !!(flags & flatcc_json_printer_f_noenum);
+ ctx->skip_default = !!(flags & flatcc_json_printer_f_skip_default);
+ ctx->force_default = !!(flags & flatcc_json_printer_f_force_default);
+ if (flags & flatcc_json_printer_f_pretty) {
+ flatcc_json_printer_set_indent(ctx, 2);
+ }
+ if (flags & flatcc_json_printer_f_nonstrict) {
+ flatcc_json_printer_set_nonstrict(ctx);
+ }
+}
+
+
+/*
+ * Detects if the conctext type uses dynamically allocated memory
+ * using malloc and realloc and frees any such memory.
+ *
+ * Not all context types needs to be cleared.
+ */
+void flatcc_json_printer_clear(flatcc_json_printer_t *ctx);
+
+/*
+ * Ensures that there ia always buffer capacity for priting the next
+ * primitive with delimiters.
+ *
+ * Only flushes complete flush units and is inexpensive to call.
+ * The content buffer has an extra reserve which ensures basic
+ * data types and delimiters can always be printed after a partial
+ * flush. At the end, a `flush` is required to flush the
+ * remaining incomplete buffer data.
+ *
+ * Numbers do not call partial flush but will always fit into the reserve
+ * capacity after a partial flush, also surrounded by delimiters.
+ *
+ * Variable length operations generally submit a partial flush so it is
+ * safe to print a number after a name without flushing, but vectors of
+ * numbers must (and do) issue a partial flush between elements. This is
+ * handled automatically but must be considered if using the primitives
+ * for special purposes. Because repeated partial flushes are very cheap
+ * this is only a concern for high performance applications.
+ *
+ * When identiation is enabled, partial flush is also automatically
+ * issued .
+ */
+static inline void flatcc_json_printer_flush_partial(flatcc_json_printer_t *ctx)
+{
+ if (ctx->p >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+}
+
+/* Returns the total printed size but flushed and in buffer. */
+static inline size_t flatcc_json_printer_total(flatcc_json_printer_t *ctx)
+{
+ return ctx->total + (size_t)(ctx->p - ctx->buf);
+}
+
+/*
+ * Flush the remaining data not flushed by partial flush. It is valid to
+ * call at any point if it is acceptable to have unaligned flush units,
+ * but this is not desireable if, for example, compression or encryption
+ * is added to the flush pipeline.
+ *
+ * Not called automatically at the end of printing a flatbuffer object
+ * in case more data needs to be appended without submitting incomplete
+ * flush units prematurely - for example adding a newline at the end.
+ *
+ * The flush behavior depeends on the underlying `ctx` object, for
+ * example dynamic buffers have no distinction between partial and full
+ * flushes - here it is merely ensured that the buffer always has a
+ * reserve capacity left.
+ *
+ * Returns the total printed size.
+ */
+static inline size_t flatcc_json_printer_flush(flatcc_json_printer_t *ctx)
+{
+ ctx->flush(ctx, 1);
+ return flatcc_json_printer_total(ctx);
+}
+
+/*
+ * Helper functions to print anything into the json buffer.
+ * Strings are escaped.
+ *
+ * When pretty printing (indent > 0), level 0 has special significance -
+ * so if wrapping printed json in a manually printed container json
+ * object, these functions can help manage this.
+ */
+
+/* Escaped and quoted string. */
+void flatcc_json_printer_string(flatcc_json_printer_t *ctx, const char *s, size_t n);
+/* Unescaped and unquoted string. */
+void flatcc_json_printer_write(flatcc_json_printer_t *ctx, const char *s, size_t n);
+/* Print a newline and issues a partial flush. */
+void flatcc_json_printer_nl(flatcc_json_printer_t *ctx);
+/* Like numbers, a partial flush is not issued. */
+void flatcc_json_printer_char(flatcc_json_printer_t *ctx, char c);
+/* Indents and issues a partial flush. */
+void flatcc_json_printer_indent(flatcc_json_printer_t *ctx);
+/* Adjust identation level, usually +/-1. */
+void flatcc_json_printer_add_level(flatcc_json_printer_t *ctx, int n);
+/* Returns current identation level (0 is top level). */
+int flatcc_json_printer_get_level(flatcc_json_printer_t *ctx);
+
+/*
+ * If called explicitly be aware that repeated calls to numeric
+ * printers may cause buffer overflow without flush in-between.
+ */
+void flatcc_json_printer_uint8(flatcc_json_printer_t *ctx, uint8_t v);
+void flatcc_json_printer_uint16(flatcc_json_printer_t *ctx, uint16_t v);
+void flatcc_json_printer_uint32(flatcc_json_printer_t *ctx, uint32_t v);
+void flatcc_json_printer_uint64(flatcc_json_printer_t *ctx, uint64_t v);
+void flatcc_json_printer_int8(flatcc_json_printer_t *ctx, int8_t v);
+void flatcc_json_printer_int16(flatcc_json_printer_t *ctx, int16_t v);
+void flatcc_json_printer_int32(flatcc_json_printer_t *ctx, int32_t v);
+void flatcc_json_printer_int64(flatcc_json_printer_t *ctx, int64_t v);
+void flatcc_json_printer_bool(flatcc_json_printer_t *ctx, int v);
+void flatcc_json_printer_float(flatcc_json_printer_t *ctx, float v);
+void flatcc_json_printer_double(flatcc_json_printer_t *ctx, double v);
+
+void flatcc_json_printer_enum(flatcc_json_printer_t *ctx,
+ const char *symbol, size_t len);
+
+/*
+ * Convenience function to add a trailing newline, flush the buffer,
+ * test for error and reset the context for reuse.
+ *
+ * Returns total size printed or < 0 on error.
+ *
+ * This function makes most sense for file oriented output.
+ * See also `finalize_dynamic_buffer`.
+ */
+static inline int flatcc_json_printer_finalize(flatcc_json_printer_t *ctx)
+{
+ int ret;
+ flatcc_json_printer_nl(ctx);
+ ret = (int)flatcc_json_printer_flush(ctx);
+ if (ctx->error) {
+ ret = -1;
+ }
+ flatcc_json_printer_reset(ctx);
+ return ret;
+}
+
+/*
+ * Allocates a small buffer and grows it dynamically.
+ * Buffer survives past reset. To reduce size between uses, call clear
+ * followed by init call. To reuse buffer just call reset between uses.
+ * If `buffer_size` is 0 a sensible default is being used. The size is
+ * automatically rounded up to reserved size if too small.
+ *
+ * Returns -1 on alloc error (no cleanup needed), or 0 on success.
+ * Eventually the clear method must be called to return memory.
+ *
+ * `set_flags` and related may be called subsequently to modify
+ * behavior.
+ */
+int flatcc_json_printer_init_dynamic_buffer(flatcc_json_printer_t *ctx, size_t buffer_size);
+
+/*
+ * Similar to calling `finalize` but returns the buffer and does NOT
+ * reset, but rather clears printer object and the returned buffer must
+ * be deallocated with `free`.
+ *
+ * The returned buffer is zero terminated.
+ *
+ * NOTE: it is entirely optional to use this method. For repeated used
+ * of dynamic buffers, `newline` (or not) followed by `get_buffer`
+ * and `reset` will be an alternative.
+ *
+ * Stores the printed buffer size in `buffer_size` if it is not null.
+ *
+ * See also `get_dynamic_buffer`.
+ */
+void *flatcc_json_printer_finalize_dynamic_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size);
+
+
+/*************************************************************
+ * The following is normally only used by generated code.
+ *************************************************************/
+
+typedef struct flatcc_json_printer_table_descriptor flatcc_json_printer_table_descriptor_t;
+
+struct flatcc_json_printer_table_descriptor {
+ const void *table;
+ const void *vtable;
+ int vsize;
+ int ttl;
+ int count;
+};
+
+typedef struct flatcc_json_printer_union_descriptor flatcc_json_printer_union_descriptor_t;
+
+struct flatcc_json_printer_union_descriptor {
+ const void *member;
+ int ttl;
+ uint8_t type;
+};
+
+typedef void flatcc_json_printer_table_f(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td);
+
+typedef void flatcc_json_printer_struct_f(flatcc_json_printer_t *ctx,
+ const void *p);
+
+typedef void flatcc_json_printer_union_f(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud);
+
+/* Generated value to name map callbacks. */
+typedef void flatcc_json_printer_union_type_f(flatcc_json_printer_t *ctx, flatbuffers_utype_t type);
+typedef void flatcc_json_printer_uint8_enum_f(flatcc_json_printer_t *ctx, uint8_t v);
+typedef void flatcc_json_printer_uint16_enum_f(flatcc_json_printer_t *ctx, uint16_t v);
+typedef void flatcc_json_printer_uint32_enum_f(flatcc_json_printer_t *ctx, uint32_t v);
+typedef void flatcc_json_printer_uint64_enum_f(flatcc_json_printer_t *ctx, uint64_t v);
+typedef void flatcc_json_printer_int8_enum_f(flatcc_json_printer_t *ctx, int8_t v);
+typedef void flatcc_json_printer_int16_enum_f(flatcc_json_printer_t *ctx, int16_t v);
+typedef void flatcc_json_printer_int32_enum_f(flatcc_json_printer_t *ctx, int32_t v);
+typedef void flatcc_json_printer_int64_enum_f(flatcc_json_printer_t *ctx, int64_t v);
+typedef void flatcc_json_printer_bool_enum_f(flatcc_json_printer_t *ctx, flatbuffers_bool_t v);
+
+#define __define_print_scalar_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v);
+
+#define __define_print_scalar_optional_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len);
+
+#define __define_print_scalar_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _struct_field(flatcc_json_printer_t *ctx,\
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len);
+
+#define __define_print_scalar_array_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count);
+
+#define __define_print_enum_array_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_enum_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_enum_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_enum_optional_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_scalar_vector_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _vector_field(flatcc_json_printer_t *ctx,\
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len);
+
+#define __define_print_enum_vector_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_vector_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+__define_print_scalar_field_proto(uint8, uint8_t)
+__define_print_scalar_field_proto(uint16, uint16_t)
+__define_print_scalar_field_proto(uint32, uint32_t)
+__define_print_scalar_field_proto(uint64, uint64_t)
+__define_print_scalar_field_proto(int8, int8_t)
+__define_print_scalar_field_proto(int16, int16_t)
+__define_print_scalar_field_proto(int32, int32_t)
+__define_print_scalar_field_proto(int64, int64_t)
+__define_print_scalar_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_field_proto(float, float)
+__define_print_scalar_field_proto(double, double)
+
+__define_print_enum_field_proto(uint8, uint8_t)
+__define_print_enum_field_proto(uint16, uint16_t)
+__define_print_enum_field_proto(uint32, uint32_t)
+__define_print_enum_field_proto(uint64, uint64_t)
+__define_print_enum_field_proto(int8, int8_t)
+__define_print_enum_field_proto(int16, int16_t)
+__define_print_enum_field_proto(int32, int32_t)
+__define_print_enum_field_proto(int64, int64_t)
+__define_print_enum_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_scalar_optional_field_proto(uint8, uint8_t)
+__define_print_scalar_optional_field_proto(uint16, uint16_t)
+__define_print_scalar_optional_field_proto(uint32, uint32_t)
+__define_print_scalar_optional_field_proto(uint64, uint64_t)
+__define_print_scalar_optional_field_proto(int8, int8_t)
+__define_print_scalar_optional_field_proto(int16, int16_t)
+__define_print_scalar_optional_field_proto(int32, int32_t)
+__define_print_scalar_optional_field_proto(int64, int64_t)
+__define_print_scalar_optional_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_optional_field_proto(float, float)
+__define_print_scalar_optional_field_proto(double, double)
+
+__define_print_enum_optional_field_proto(uint8, uint8_t)
+__define_print_enum_optional_field_proto(uint16, uint16_t)
+__define_print_enum_optional_field_proto(uint32, uint32_t)
+__define_print_enum_optional_field_proto(uint64, uint64_t)
+__define_print_enum_optional_field_proto(int8, int8_t)
+__define_print_enum_optional_field_proto(int16, int16_t)
+__define_print_enum_optional_field_proto(int32, int32_t)
+__define_print_enum_optional_field_proto(int64, int64_t)
+__define_print_enum_optional_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_scalar_struct_field_proto(uint8, uint8_t)
+__define_print_scalar_struct_field_proto(uint16, uint16_t)
+__define_print_scalar_struct_field_proto(uint32, uint32_t)
+__define_print_scalar_struct_field_proto(uint64, uint64_t)
+__define_print_scalar_struct_field_proto(int8, int8_t)
+__define_print_scalar_struct_field_proto(int16, int16_t)
+__define_print_scalar_struct_field_proto(int32, int32_t)
+__define_print_scalar_struct_field_proto(int64, int64_t)
+__define_print_scalar_struct_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_struct_field_proto(float, float)
+__define_print_scalar_struct_field_proto(double, double)
+
+/*
+ * char arrays are special as there are no char fields
+ * without arrays and because they are printed as strings.
+ */
+__define_print_scalar_array_struct_field_proto(char, char)
+
+__define_print_scalar_array_struct_field_proto(uint8, uint8_t)
+__define_print_scalar_array_struct_field_proto(uint16, uint16_t)
+__define_print_scalar_array_struct_field_proto(uint32, uint32_t)
+__define_print_scalar_array_struct_field_proto(uint64, uint64_t)
+__define_print_scalar_array_struct_field_proto(int8, int8_t)
+__define_print_scalar_array_struct_field_proto(int16, int16_t)
+__define_print_scalar_array_struct_field_proto(int32, int32_t)
+__define_print_scalar_array_struct_field_proto(int64, int64_t)
+__define_print_scalar_array_struct_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_array_struct_field_proto(float, float)
+__define_print_scalar_array_struct_field_proto(double, double)
+
+__define_print_enum_array_struct_field_proto(uint8, uint8_t)
+__define_print_enum_array_struct_field_proto(uint16, uint16_t)
+__define_print_enum_array_struct_field_proto(uint32, uint32_t)
+__define_print_enum_array_struct_field_proto(uint64, uint64_t)
+__define_print_enum_array_struct_field_proto(int8, int8_t)
+__define_print_enum_array_struct_field_proto(int16, int16_t)
+__define_print_enum_array_struct_field_proto(int32, int32_t)
+__define_print_enum_array_struct_field_proto(int64, int64_t)
+__define_print_enum_array_struct_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_enum_struct_field_proto(uint8, uint8_t)
+__define_print_enum_struct_field_proto(uint16, uint16_t)
+__define_print_enum_struct_field_proto(uint32, uint32_t)
+__define_print_enum_struct_field_proto(uint64, uint64_t)
+__define_print_enum_struct_field_proto(int8, int8_t)
+__define_print_enum_struct_field_proto(int16, int16_t)
+__define_print_enum_struct_field_proto(int32, int32_t)
+__define_print_enum_struct_field_proto(int64, int64_t)
+__define_print_enum_struct_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_scalar_vector_field_proto(uint8, uint8_t)
+__define_print_scalar_vector_field_proto(uint16, uint16_t)
+__define_print_scalar_vector_field_proto(uint32, uint32_t)
+__define_print_scalar_vector_field_proto(uint64, uint64_t)
+__define_print_scalar_vector_field_proto(int8, int8_t)
+__define_print_scalar_vector_field_proto(int16, int16_t)
+__define_print_scalar_vector_field_proto(int32, int32_t)
+__define_print_scalar_vector_field_proto(int64, int64_t)
+__define_print_scalar_vector_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_vector_field_proto(float, float)
+__define_print_scalar_vector_field_proto(double, double)
+
+__define_print_enum_vector_field_proto(uint8, uint8_t)
+__define_print_enum_vector_field_proto(uint16, uint16_t)
+__define_print_enum_vector_field_proto(uint32, uint32_t)
+__define_print_enum_vector_field_proto(uint64, uint64_t)
+__define_print_enum_vector_field_proto(int8, int8_t)
+__define_print_enum_vector_field_proto(int16, int16_t)
+__define_print_enum_vector_field_proto(int32, int32_t)
+__define_print_enum_vector_field_proto(int64, int64_t)
+__define_print_enum_vector_field_proto(bool, flatbuffers_bool_t)
+
+void flatcc_json_printer_uint8_vector_base64_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len, int urlsafe);
+
+/*
+ * If `fid` is null, the identifier is not checked and is allowed to be
+ * entirely absent.
+ *
+ * The buffer must at least be aligned to uoffset_t on systems that
+ * require aligned memory addresses (as always for flatbuffers).
+ */
+int flatcc_json_printer_table_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid,
+ flatcc_json_printer_table_f *pf);
+
+int flatcc_json_printer_struct_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid,
+ flatcc_json_printer_struct_f *pf);
+
+/*
+ * Call before and after enum flags to ensure proper quotation. Enum
+ * quotes may be configured runtime, but regardless of this, multiple
+ * flags may be forced to be quoted depending on compile time flag since
+ * not all parsers may be able to handle unquoted space separated values
+ * even if they handle non-strict unquoted json otherwise.
+ *
+ * Flags should only be called when not empty (0) and when there are no
+ * unknown flags in the value. Otherwise print the numeric value. The
+ * auto generated code deals with this.
+ *
+ * This bit twiddling hack may be useful:
+ *
+ * `multiple = 0 != (v & (v - 1);`
+ */
+void flatcc_json_printer_delimit_enum_flags(flatcc_json_printer_t *ctx, int multiple);
+
+/* The index increments from 0 to handle space. It is not the flag bit position. */
+void flatcc_json_printer_enum_flag(flatcc_json_printer_t *ctx, int index, const char *symbol, size_t len);
+
+/* A struct inside another struct, as opposed to inside a table or a root. */
+void flatcc_json_printer_embedded_struct_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_embedded_struct_array_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ size_t size, size_t count,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_struct_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_struct_f *pf);
+
+void flatcc_json_printer_string_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len);
+
+void flatcc_json_printer_string_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len);
+
+void flatcc_json_printer_table_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_struct_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ size_t size,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_table_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_union_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf);
+
+void flatcc_json_printer_struct_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_struct_f *pf);
+
+void flatcc_json_printer_table_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_union_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf);
+
+void flatcc_json_printer_union_table(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_union_struct(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_union_string(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_JSON_PRINTER_H */
diff --git a/include/flatcc/flatcc_portable.h b/include/flatcc/flatcc_portable.h
new file mode 100644
index 0000000..9b0eb0c
--- /dev/null
+++ b/include/flatcc/flatcc_portable.h
@@ -0,0 +1,14 @@
+#ifndef FLATCC_PORTABLE_H
+#define FLATCC_PORTABLE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "flatcc/portable/portable_basic.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_PORTABLE_H */
diff --git a/include/flatcc/flatcc_prologue.h b/include/flatcc/flatcc_prologue.h
new file mode 100644
index 0000000..3a74ed6
--- /dev/null
+++ b/include/flatcc/flatcc_prologue.h
@@ -0,0 +1,8 @@
+/* Include guard intentionally left out. */
+
+#define PDIAGNOSTIC_IGNORE_UNUSED
+#include "flatcc/portable/pdiagnostic_push.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
diff --git a/include/flatcc/flatcc_refmap.h b/include/flatcc/flatcc_refmap.h
new file mode 100644
index 0000000..062d94f
--- /dev/null
+++ b/include/flatcc/flatcc_refmap.h
@@ -0,0 +1,144 @@
+/*
+ * The flatcc builder supports storing a pointer to a refmap
+ * and wraps some operations to make them work as a dummy
+ * even if no refmap has been set. This enables optional
+ * DAG preservation possible during clone operations.
+ *
+ * A refmap maps a source address to a builder reference.
+ *
+ * This is just a map, but the semantics are important:
+ *
+ * The map thus preserves identity of the source. It is not a
+ * cache because cache eviction would fail to properly track
+ * identity.
+ *
+ * The map is used for memoization during object cloning are and
+ * may also be used by user logic doing similar operations.
+ * This ensures that identity is preserved so a source object is
+ * not duplicated which could lead to either loss of semantic
+ * information, or an explosion in size, or both. In some, or
+ * even most, cases this concern may not be important, but when
+ * it is important, it is important.
+ *
+ * The source address must not be reused for different content
+ * for the lifetime of the map, although the content doest not
+ * have to be valid or event exist at that location since source
+ * address is just used as a key.
+ *
+ * The lifetime may be a single clone operation which then
+ * tracks child object references as well, or it may be the
+ * lifetime of the buffer builder.
+ *
+ * The map may be flushed explicitly when the source addresses
+ * are no longer unique, such as when reusing a memory buffer,
+ * and when identity preservation is no longer important.
+ * Flushing a map is esentially the same as ending a lifetime.
+ *
+ * Multiple maps may exist concurrently for example if cloning
+ * an object twice into two new objects that should have
+ * separate identities. This is especially true and necessary
+ * when creating a new nested buffer because the nested buffer
+ * cannot share references with the parent. Cloning and object
+ * that contains a nested buffer does not require multiple maps
+ * because the nested buffer is then opaque.
+ */
+
+#ifndef FLATCC_REFMAP_H
+#define FLATCC_REFMAP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "flatcc/flatcc_types.h"
+
+#ifndef FLATCC_REFMAP_MIN_BUCKETS
+/* 8 buckets gives us 5 useful initial entries with a load factor of 0.7 */
+#define FLATCC_REFMAP_MIN_BUCKETS 8
+#endif
+
+#define FLATCC_REFMAP_LOAD_FACTOR 0.7f
+
+typedef struct flatcc_refmap flatcc_refmap_t;
+typedef flatbuffers_soffset_t flatcc_refmap_ref_t;
+
+static const flatcc_refmap_ref_t flatcc_refmap_not_found = 0;
+
+struct flatcc_refmap_item {
+ const void *src;
+ flatcc_refmap_ref_t ref;
+};
+
+struct flatcc_refmap {
+ size_t count;
+ size_t buckets;
+ struct flatcc_refmap_item *table;
+ /* Use stack allocation for small maps. */
+ struct flatcc_refmap_item min_table[FLATCC_REFMAP_MIN_BUCKETS];
+};
+
+/*
+ * Fast zero initialization - does not allocate any memory.
+ * May be replaced by memset 0, but `init` avoids clearing the
+ * stack allocated initial hash table until it is needed.
+ */
+static inline int flatcc_refmap_init(flatcc_refmap_t *refmap)
+{
+ refmap->count = 0;
+ refmap->buckets = 0;
+ refmap->table = 0;
+ return 0;
+}
+
+/*
+ * Removes all items and deallocates memory.
+ * Not required unless `insert` or `resize` took place. The map can be
+ * reused subsequently without calling `init`.
+ */
+void flatcc_refmap_clear(flatcc_refmap_t *refmap);
+
+/*
+ * Keeps allocated memory as is, but removes all items. The map
+ * must intialized first.
+ */
+void flatcc_refmap_reset(flatcc_refmap_t *refmap);
+
+/*
+ * Returns the inserted reference if the `src` pointer was found,
+ * without inspecting the content of the `src` pointer.
+ *
+ * Returns flatcc_refmap_not_found (default 0) if the `src` pointer was
+ * not found.
+ */
+flatcc_refmap_ref_t flatcc_refmap_find(flatcc_refmap_t *refmap, const void *src);
+
+/*
+ * Inserts a `src` source pointer and its associated `ref` reference
+ * into the refmap without inspecting the `src` pointer content. The
+ * `ref` value will be replaced if the the `src` pointer already exists.
+ *
+ * Inserting null will just return the ref without updating the map.
+ *
+ * There is no delete operation which simplifies an open
+ * addressing hash table, and it isn't needed for this use case.
+ *
+ * Returns the input ref or not_found on allocation error.
+ */
+flatcc_refmap_ref_t flatcc_refmap_insert(flatcc_refmap_t *refmap, const void *src, flatcc_refmap_ref_t ref);
+
+/*
+ * Set the hash table to accommodate at least `count` items while staying
+ * within the predefined load factor.
+ *
+ * Resize is primarily an internal operation, but the user may resize
+ * ahead of a large anticipated load, or after a large load to shrink
+ * the table using 0 as the `count` argument. The table never shrinks
+ * on its own account.
+ */
+int flatcc_refmap_resize(flatcc_refmap_t *refmap, size_t count);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_REFMAP_H */
diff --git a/include/flatcc/flatcc_rtconfig.h b/include/flatcc/flatcc_rtconfig.h
new file mode 100644
index 0000000..59727b6
--- /dev/null
+++ b/include/flatcc/flatcc_rtconfig.h
@@ -0,0 +1,162 @@
+#ifndef FLATCC_RTCONFIG_H
+#define FLATCC_RTCONFIG_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Include portability layer here since all other files depend on it. */
+#ifdef FLATCC_PORTABLE
+#include "flatcc/portable/portable.h"
+#endif
+
+/*
+ * Fast printing and parsing of double.
+ *
+ * This requires the grisu3/grisu3_* files to be in the include path,
+ * otherwise strod and sprintf will be used (these needed anyway
+ * as fallback for cases not supported by grisu3).
+ */
+#ifndef FLATCC_USE_GRISU3
+#define FLATCC_USE_GRISU3 1
+#endif
+
+/*
+ * This requires compiler that has enabled marc=native or similar so
+ * __SSE4_2__ flag is defined. Otherwise it will have no effect.
+ *
+ * While SSE may be used for different purposes, it has (as of this
+ * writing) only be used to test the effect on JSON whitespace handling
+ * which improved, but not by a lot, assuming 64-bit unligned access is
+ * otherwise available:
+ *
+ * With 8 space indentation, the JSON benchmark handles 308K parse ops/sec
+ * while SSE ups that to 333 parse ops/sec or 336 if \r\n is also
+ * consumed by SSE. Disabling indentation leaves SSE spacing handling
+ * ineffective, and performance reaches 450K parse ops/sec and can
+ * improve further to 500+K parse ops/sec if inexact GRISU3 numbers are
+ * allowed (they are pretty accurate anyway, just not exact). This
+ * feature requires hacking a flag direct in the grisu3 double parsing
+ * lib directly and only mentioned for comparison.
+ *
+ * In conclusion SSE doesn't add a lot to JSON space handling at least.
+ *
+ * Disabled by default, but can be overriden by build system.
+ */
+#ifndef FLATCC_USE_SSE4_2
+#define FLATCC_USE_SSE4_2 0
+#endif
+
+/*
+ * The verifier only reports yes and no. The following setting
+ * enables assertions in debug builds. It must be compiled into
+ * the runtime library and is not normally the desired behavior.
+ *
+ * NOTE: enabling this can break test cases so use with build, not test.
+ */
+#if !defined(FLATCC_DEBUG_VERIFY) && !defined(NDEBUG)
+#define FLATCC_DEBUG_VERIFY 0
+#endif
+
+#if !defined(FLATCC_TRACE_VERIFY)
+#define FLATCC_TRACE_VERIFY 0
+#endif
+
+
+/*
+ * Limit recursion level for tables. Actual level may be deeper
+ * when structs are deeply nested - but these are limited by the
+ * schema compiler.
+ */
+#ifndef FLATCC_JSON_PRINT_MAX_LEVELS
+#define FLATCC_JSON_PRINT_MAX_LEVELS 100
+#endif
+
+/* Maximum length of names printed exluding _type suffix. */
+#ifndef FLATCC_JSON_PRINT_NAME_LEN_MAX
+#define FLATCC_JSON_PRINT_NAME_LEN_MAX 100
+#endif
+
+/*
+ * Print float and double values with C99 hexadecimal floating point
+ * notation. This option is not valid JSON but it avoids precision
+ * loss, correctly handles NaN, +/-Infinity and is significantly faster
+ * to parse and print. Some JSON parsers rely on strtod which does
+ * support hexadecimal floating points when C99 compliant.
+ */
+#ifndef FLATCC_JSON_PRINT_HEX_FLOAT
+#define FLATCC_JSON_PRINT_HEX_FLOAT 0
+#endif
+
+/*
+ * Always print multipe enum flags like `color: "Red Green"`
+ * even when unquote is selected as an option for single
+ * value like `color: Green`. Otherwise multiple values
+ * are printed as `color: Red Green`, but this could break
+ * some flatbuffer json parser.
+ */
+#ifndef FLATCC_JSON_PRINT_ALWAYS_QUOTE_MULTIPLE_FLAGS
+#define FLATCC_JSON_PRINT_ALWAYS_QUOTE_MULTIPLE_FLAGS 1
+#endif
+
+/*
+ * The general nesting limit may be lower, but for skipping
+ * JSON we do not need to - we can set this high as it only
+ * costs a single char per level in a stack array.
+ */
+#ifndef FLATCC_JSON_PARSE_GENERIC_MAX_NEST
+#define FLATCC_JSON_PARSE_GENERIC_MAX_NEST 512
+#endif
+
+/* Store value even if it is default. */
+#ifndef FLATCC_JSON_PARSE_FORCE_DEFAULTS
+#define FLATCC_JSON_PARSE_FORCE_DEFAULTS 0
+#endif
+
+#ifndef FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+#define FLATCC_JSON_PARSE_ALLOW_UNQUOTED 1
+#endif
+
+/*
+ * Multiple enum values are by default not permitted unless
+ * quoted like `color: "Red Green" as per Googles flatc JSON
+ * parser while a single value like `color: Red` can be
+ * unquoted. Enabling this setting will allow `color: Red
+ * Green`, but only if FLATCC_JSON_PARSE_ALLOW_UNQUOTED is
+ * also enabled.
+ */
+#ifndef FLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST
+#define FLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST 0
+#endif
+
+#ifndef FLATCC_JSON_PARSE_ALLOW_UNKNOWN_FIELD
+#define FLATCC_JSON_PARSE_ALLOW_UNKNOWN_FIELD 1
+#endif
+
+#ifndef FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA
+#define FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA 1
+#endif
+
+/*
+ * Just parse to the closing bracket '}' if set.
+ * Otherwise parse to end by consuming space and
+ * fail if anything but space follows.
+ */
+#ifndef FLATCC_PARSE_IGNORE_TRAILING_DATA
+#define FLATCC_PARSE_IGNORE_TRAILING_DATA 0
+#endif
+
+/*
+ * Optimize to parse a lot of white space, but
+ * in most cases it probably slows parsing down.
+ */
+#ifndef FLATCC_JSON_PARSE_WIDE_SPACE
+#define FLATCC_JSON_PARSE_WIDE_SPACE 0
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_RTCONFIG_H */
diff --git a/include/flatcc/flatcc_types.h b/include/flatcc/flatcc_types.h
new file mode 100644
index 0000000..69605d2
--- /dev/null
+++ b/include/flatcc/flatcc_types.h
@@ -0,0 +1,97 @@
+#ifndef FLATCC_TYPES_H
+#define FLATCC_TYPES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+/*
+ * This should match generated type declaratios in
+ * `flatbuffers_common_reader.h` (might have different name prefix).
+ * Read only generated code does not depend on library code,
+ * hence the duplication.
+ */
+#ifndef flatbuffers_types_defined
+#define flatbuffers_types_defined
+
+/*
+ * uoffset_t and soffset_t must be same integer type, except for sign.
+ * They can be (u)int16_t, (u)int32_t, or (u)int64_t.
+ * The default is (u)int32_t.
+ *
+ * voffset_t is expected to be uint16_t, but can experimentally be
+ * compiled from uint8_t up to uint32_t.
+ *
+ * ID_MAX is the largest value that can index a vtable. The table size
+ * is given as voffset value. Each id represents a voffset value index
+ * from 0 to max inclusive. Space is required for two header voffset
+ * fields and the unaddressible highest index (due to the table size
+ * representation). For 16-bit voffsets this yields a max of 2^15 - 4,
+ * or (2^16 - 1) / 2 - 3.
+ */
+
+#define flatbuffers_uoffset_t_defined
+#define flatbuffers_soffset_t_defined
+#define flatbuffers_voffset_t_defined
+#define flatbuffers_utype_t_defined
+#define flatbuffers_bool_t_defined
+#define flatbuffers_thash_t_defined
+#define flatbuffers_fid_t_defined
+
+/* uoffset_t is also used for vector and string headers. */
+#define FLATBUFFERS_UOFFSET_MAX UINT32_MAX
+#define FLATBUFFERS_SOFFSET_MAX INT32_MAX
+#define FLATBUFFERS_SOFFSET_MIN INT32_MIN
+#define FLATBUFFERS_VOFFSET_MAX UINT16_MAX
+#define FLATBUFFERS_UTYPE_MAX UINT8_MAX
+/* Well - the max of the underlying type. */
+#define FLATBUFFERS_BOOL_MAX UINT8_MAX
+#define FLATBUFFERS_THASH_MAX UINT32_MAX
+
+#define FLATBUFFERS_ID_MAX (FLATBUFFERS_VOFFSET_MAX / sizeof(flatbuffers_voffset_t) - 3)
+/* Vectors of empty structs can yield div by zero, so we must guard against this. */
+#define FLATBUFFERS_COUNT_MAX(elem_size) (FLATBUFFERS_UOFFSET_MAX/((elem_size) == 0 ? 1 : (elem_size)))
+
+#define FLATBUFFERS_UOFFSET_WIDTH 32
+#define FLATBUFFERS_COUNT_WIDTH 32
+#define FLATBUFFERS_SOFFSET_WIDTH 32
+#define FLATBUFFERS_VOFFSET_WIDTH 16
+#define FLATBUFFERS_UTYPE_WIDTH 8
+#define FLATBUFFERS_BOOL_WIDTH 8
+#define FLATBUFFERS_THASH_WIDTH 32
+
+#define FLATBUFFERS_TRUE 1
+#define FLATBUFFERS_FALSE 0
+
+#define FLATBUFFERS_PROTOCOL_IS_LE 1
+#define FLATBUFFERS_PROTOCOL_IS_BE 0
+
+typedef uint32_t flatbuffers_uoffset_t;
+typedef int32_t flatbuffers_soffset_t;
+typedef uint16_t flatbuffers_voffset_t;
+typedef uint8_t flatbuffers_utype_t;
+typedef uint8_t flatbuffers_bool_t;
+typedef uint32_t flatbuffers_thash_t;
+/* Public facing type operations. */
+typedef flatbuffers_utype_t flatbuffers_union_type_t;
+
+static const flatbuffers_bool_t flatbuffers_true = FLATBUFFERS_TRUE;
+static const flatbuffers_bool_t flatbuffers_false = FLATBUFFERS_FALSE;
+
+#define FLATBUFFERS_IDENTIFIER_SIZE (FLATBUFFERS_THASH_WIDTH / 8)
+
+typedef char flatbuffers_fid_t[FLATBUFFERS_IDENTIFIER_SIZE];
+
+#endif /* flatbuffers_types_defined */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_TYPES_H */
diff --git a/include/flatcc/flatcc_unaligned.h b/include/flatcc/flatcc_unaligned.h
new file mode 100644
index 0000000..a7dc546
--- /dev/null
+++ b/include/flatcc/flatcc_unaligned.h
@@ -0,0 +1,16 @@
+#ifndef FLATCC_UNLIGNED_H
+#define FLATCC_UNLIGNED_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "flatcc/portable/punaligned.h"
+
+#define FLATCC_ALLOW_UNALIGNED_ACCESS PORTABLE_UNALIGNED_ACCESS
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_UNLIGNED_H */
diff --git a/include/flatcc/flatcc_verifier.h b/include/flatcc/flatcc_verifier.h
new file mode 100644
index 0000000..7e0d296
--- /dev/null
+++ b/include/flatcc/flatcc_verifier.h
@@ -0,0 +1,239 @@
+#ifndef FLATCC_VERIFIER_H
+#define FLATCC_VERIFIER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Runtime support for verifying flatbuffers.
+ *
+ * Link with the verifier implementation file.
+ *
+ * Note:
+ *
+ * 1) nested buffers will NOT have their identifier verified.
+ * The user may do so subsequently. The reason is in part because
+ * the information is not readily avaible without generated reader code,
+ * in part because the buffer might use a different, but valid,
+ * identifier and the user has no chance of specifiying this in the
+ * verifier code. The root verifier also doesn't assume a specific id
+ * but accepts a user supplied input which may be null.
+ *
+ * 2) All offsets in a buffer are verified for alignment relative to the
+ * buffer start, but the buffer itself is only assumed to aligned to
+ * uoffset_t. A reader should therefore ensure buffer alignment separately
+ * before reading the buffer. Nested buffers are in fact checked for
+ * alignment, but still only relative to the root buffer.
+ *
+ * 3) The max nesting level includes nested buffer nestings, so the
+ * verifier might fail even if the individual buffers are otherwise ok.
+ * This is to prevent abuse with lots of nested buffers.
+ *
+ *
+ * IMPORTANT:
+ *
+ * Even if verifier passes, the buffer may be invalid to access due to
+ * lack of alignemnt in memory, but the verifier is safe to call.
+ *
+ * NOTE: The buffer is not safe to modify after verification because an
+ * attacker may craft overlapping data structures such that modification
+ * of one field updates another in a way that violates the buffer
+ * constraints. This may also be caused by a clever compression scheme.
+ *
+ * It is likely faster to rewrite the table although this is also
+ * dangerous because an attacker (or even normal user) can draft a DAG
+ * that explodes when expanded carelesslessly. A safer approach is to
+ * hash all object references written and reuse those that match. This
+ * will expand references into other objects while bounding expansion
+ * and it will be safe to update assuming shared objects are ok to
+ * update.
+ *
+ */
+
+#include "flatcc/flatcc_types.h"
+
+#define FLATCC_VERIFY_ERROR_MAP(XX)\
+ XX(ok, "ok")\
+ XX(buffer_header_too_small, "buffer header too small")\
+ XX(identifier_mismatch, "identifier mismatch")\
+ XX(max_nesting_level_reached, "max nesting level reached")\
+ XX(required_field_missing, "required field missing")\
+ XX(runtime_buffer_header_not_aligned, "runtime: buffer header not aligned")\
+ XX(runtime_buffer_size_too_large, "runtime: buffer size too large")\
+ XX(string_not_zero_terminated, "string not zero terminated")\
+ XX(string_out_of_range, "string out of range")\
+ XX(struct_out_of_range, "struct out of range")\
+ XX(struct_size_overflow, "struct size overflow")\
+ XX(struct_unaligned, "struct unaligned")\
+ XX(table_field_not_aligned, "table field not aligned")\
+ XX(table_field_out_of_range, "table field out of range")\
+ XX(table_field_size_overflow, "table field size overflow")\
+ XX(table_header_out_of_range_or_unaligned, "table header out of range or unaligned")\
+ XX(vector_header_out_of_range_or_unaligned, "vector header out of range or unaligned")\
+ XX(string_header_out_of_range_or_unaligned, "string header out of range or unaligned")\
+ XX(offset_out_of_range, "offset out of range")\
+ XX(table_offset_out_of_range_or_unaligned, "table offset out of range or unaligned")\
+ XX(table_size_out_of_range, "table size out of range")\
+ XX(type_field_absent_from_required_union_field, "type field absent from required union field")\
+ XX(type_field_absent_from_required_union_vector_field, "type field absent from required union vector field")\
+ XX(union_cannot_have_a_table_without_a_type, "union cannot have a table without a type")\
+ XX(union_type_NONE_cannot_have_a_value, "union value field present with type NONE")\
+ XX(vector_count_exceeds_representable_vector_size, "vector count exceeds representable vector size")\
+ XX(vector_out_of_range, "vector out of range")\
+ XX(vtable_header_out_of_range, "vtable header out of range")\
+ XX(vtable_header_too_small, "vtable header too small")\
+ XX(vtable_offset_out_of_range_or_unaligned, "vtable offset out of range or unaligned")\
+ XX(vtable_size_out_of_range_or_unaligned, "vtable size out of range or unaligned")\
+ XX(vtable_size_overflow, "vtable size overflow")\
+ XX(union_element_absent_without_type_NONE, "union element absent without type NONE")\
+ XX(union_element_present_with_type_NONE, "union element present with type NONE")\
+ XX(union_vector_length_mismatch, "union type and table vectors have different lengths")\
+ XX(union_vector_verification_not_supported, "union vector verification not supported")\
+ XX(not_supported, "not supported")
+
+
+enum flatcc_verify_error_no {
+#define XX(no, str) flatcc_verify_error_##no,
+ FLATCC_VERIFY_ERROR_MAP(XX)
+#undef XX
+};
+
+#define flatcc_verify_ok flatcc_verify_error_ok
+
+const char *flatcc_verify_error_string(int err);
+
+/*
+ * Type specific table verifier function that checks each known field
+ * for existence in the vtable and then calls the appropriate verifier
+ * function in this library.
+ *
+ * The table descriptor values have been verified for bounds, overflow,
+ * and alignment, but vtable entries after header must be verified
+ * for all fields the table verifier function understands.
+ *
+ * Calls other typespecific verifier functions recursively whenever a
+ * table field, union or table vector is encountered.
+ */
+typedef struct flatcc_table_verifier_descriptor flatcc_table_verifier_descriptor_t;
+struct flatcc_table_verifier_descriptor {
+ /* Pointer to buffer. Not assumed to be aligned beyond uoffset_t. */
+ const void *buf;
+ /* Buffer size. */
+ flatbuffers_uoffset_t end;
+ /* Time to live: number nesting levels left before failure. */
+ int ttl;
+ /* Vtable of current table. */
+ const void *vtable;
+ /* Table offset relative to buffer start */
+ flatbuffers_uoffset_t table;
+ /* Table end relative to buffer start as per vtable[1] field. */
+ flatbuffers_voffset_t tsize;
+ /* Size of vtable in bytes. */
+ flatbuffers_voffset_t vsize;
+};
+
+typedef int flatcc_table_verifier_f(flatcc_table_verifier_descriptor_t *td);
+
+typedef struct flatcc_union_verifier_descriptor flatcc_union_verifier_descriptor_t;
+
+struct flatcc_union_verifier_descriptor {
+ /* Pointer to buffer. Not assumed to be aligned beyond uoffset_t. */
+ const void *buf;
+ /* Buffer size. */
+ flatbuffers_uoffset_t end;
+ /* Time to live: number nesting levels left before failure. */
+ int ttl;
+ /* Type of union value to be verified */
+ flatbuffers_utype_t type;
+ /* Offset relative to buffer start to where union value offset is stored. */
+ flatbuffers_uoffset_t base;
+ /* Offset of union value relative to base. */
+ flatbuffers_uoffset_t offset;
+};
+
+typedef int flatcc_union_verifier_f(flatcc_union_verifier_descriptor_t *ud);
+
+/*
+ * The `as_root` functions are normally the only functions called
+ * explicitly in this interface.
+ *
+ * If `fid` is null, the identifier is not checked and is allowed to be entirely absent.
+ *
+ * The buffer must at least be aligned to uoffset_t on systems that
+ * require aligned memory addresses. The buffer pointers alignment is
+ * not significant to internal verification of the buffer.
+ */
+int flatcc_verify_struct_as_root(const void *buf, size_t bufsiz, const char *fid,
+ size_t size, uint16_t align);
+
+int flatcc_verify_struct_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash,
+ size_t size, uint16_t align);
+
+int flatcc_verify_table_as_root(const void *buf, size_t bufsiz, const char *fid,
+ flatcc_table_verifier_f *root_tvf);
+
+int flatcc_verify_table_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash,
+ flatcc_table_verifier_f *root_tvf);
+/*
+ * The buffer header is verified by any of the `_as_root` verifiers, but
+ * this function may be used as a quick sanity check.
+ */
+int flatcc_verify_buffer_header(const void *buf, size_t bufsiz, const char *fid);
+
+int flatcc_verify_typed_buffer_header(const void *buf, size_t bufsiz, flatbuffers_thash_t type_hash);
+
+/*
+ * The following functions are typically called by a generated table
+ * verifier function.
+ */
+
+/* Scalar, enum or struct field. */
+int flatcc_verify_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, size_t size, uint16_t align);
+/* Vector of scalars, enums or structs. */
+int flatcc_verify_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, size_t elem_size, uint16_t align, size_t max_count);
+int flatcc_verify_string_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required);
+int flatcc_verify_string_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required);
+int flatcc_verify_table_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_table_verifier_f tvf);
+int flatcc_verify_table_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_table_verifier_f tvf);
+/* Table verifiers pass 0 as fid. */
+int flatcc_verify_struct_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, const char *fid,
+ size_t size, uint16_t align);
+int flatcc_verify_table_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, const char *fid,
+ uint16_t align, flatcc_table_verifier_f tvf);
+
+/*
+ * A NONE type will not accept a table being present, and a required
+ * union will not accept a type field being absent, and an absent type
+ * field will not accept a table field being present.
+ *
+ * If the above checks out and the type is not NONE, the uvf callback
+ * is executed. It must test each known table type and silently accept
+ * any unknown table type for forward compatibility. A union table
+ * value is verified without the required flag because an absent table
+ * encodes a typed NULL value while an absent type field encodes a
+ * missing union which fails if required.
+ */
+int flatcc_verify_union_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_union_verifier_f uvf);
+
+int flatcc_verify_union_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_union_verifier_f uvf);
+
+int flatcc_verify_union_table(flatcc_union_verifier_descriptor_t *ud, flatcc_table_verifier_f *tvf);
+int flatcc_verify_union_struct(flatcc_union_verifier_descriptor_t *ud, size_t size, uint16_t align);
+int flatcc_verify_union_string(flatcc_union_verifier_descriptor_t *ud);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_VERIFIER_H */
diff --git a/include/flatcc/flatcc_version.h b/include/flatcc/flatcc_version.h
new file mode 100644
index 0000000..78bc9c8
--- /dev/null
+++ b/include/flatcc/flatcc_version.h
@@ -0,0 +1,14 @@
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FLATCC_VERSION_TEXT "0.6.2"
+#define FLATCC_VERSION_MAJOR 0
+#define FLATCC_VERSION_MINOR 6
+#define FLATCC_VERSION_PATCH 2
+/* 1 or 0 */
+#define FLATCC_VERSION_RELEASED 0
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/include/flatcc/portable/LICENSE b/include/flatcc/portable/LICENSE
new file mode 100644
index 0000000..bb7ca57
--- /dev/null
+++ b/include/flatcc/portable/LICENSE
@@ -0,0 +1,14 @@
+Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+Some files also Copyright author of MathGeoLib (https://github.com/juj)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
diff --git a/include/flatcc/portable/README.md b/include/flatcc/portable/README.md
new file mode 100644
index 0000000..512b1a8
--- /dev/null
+++ b/include/flatcc/portable/README.md
@@ -0,0 +1,57 @@
+A small library for adding C11 compatibility to older C compilers, but
+only a small highly useful subset such as static assertions, inline
+functions and alignment.
+
+C++ is not a primary target, but the library has been updated to be more
+C++ friendly based on user feedback.
+
+Many compilers already have the required functionality but with slightly
+different names and arguments.
+
+In addition, compatibility with the Linux `<endian.h>` system file is
+provided, and "punaligned.h" is provided for unaligned memory reads
+which in part depends on endian support.
+
+The library also provides fast integer printing and floating point
+printing and parsing optionally using the grisu3 algorithm, but can fall
+back to strtod and related. The `pgrisu3` folder is header only and
+excludes test cases found in the main grisu3 project the files were
+extracted from. Base64 conversion is also provided.
+
+Integer conversion is not just an optimization. It is more difficult
+than it would appear to portably parse an integer of known size such as
+`uint64_t` up to at most n bytes which is needed for safe parsing. At
+the same time, the sometimes significant performance gains warrants
+custom implementations that might as well be done once and for all.
+
+Files can be included individually, or portable.h may be included to get
+all functionality. If the compiler is C11 compliant, portable.h will not
+include anything, except: it will provide a patch for static assertions
+which clang does not fully support in all versions even with C11 flagged.
+
+The grisu3 header files are the runtime files for the Grisu3 floating
+point conversion to/from text C port. Test coverage is provided separately.
+This library can be used indirectly via pparsefp.h and pprintfp.h.
+
+The `pstatic_assert.h` file is often needed on C11 systems because the
+compiler and standard library may support `_Static_assert` without
+`static_assert`. For compilers without `_Static_assert`, a unique
+identifier is needed for each assertion. This is done non-standard with
+the `__COUNTER__` macro, but has a fallback to `pstatic_assert_scope.h`
+for systems witout the `__COUNTER__` macro. Because of this fallback,
+`pstatic_assert.h` needs to be included in every file using
+`static_assert` in order to increment a scope counter, otherwise there
+is a risk of assert identifier conflicts when `static_assert` happen on
+the same line in different files.
+
+The `paligned_alloc.h` file implements the non-standard `aligned_free`
+to match the C11 standard `aligned_alloc` call. `aligned_free` is
+normally equivalent to `free`, but not on systems where `aligned_free`
+cannot be implemented using a system provived `free` call. Use of
+`aligned_free` is thus optional on some systems, but using it increases
+general portablity at the cost of pure C11 compatibility.
+
+IMPORTANT NOTE: this library has been used on various platforms and
+updated with user feedback but it is impossibly to systematically test
+all platforms so please test for specific uses cases and report
+any issues upstream.
diff --git a/include/flatcc/portable/grisu3_math.h b/include/flatcc/portable/grisu3_math.h
new file mode 100644
index 0000000..cff6e8c
--- /dev/null
+++ b/include/flatcc/portable/grisu3_math.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ * Copyright author of MathGeoLib (https://github.com/juj)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/* 2016-02-02: Updated by mikkelfj
+ *
+ * Extracted from MatGeoLib grisu3.c, Apache 2.0 license, and extended.
+ *
+ * This file is usually include via grisu3_print.h or grisu3_parse.h.
+ *
+ * The original MatGeoLib dtoa_grisu3 implementation is largely
+ * unchanged except for the uint64 to double cast. The remaining changes
+ * are file structure, name changes, and new additions for parsing:
+ *
+ * - Split into header files only:
+ * grisu3_math.h, grisu3_print.h, (added grisu3_parse.h)
+ *
+ * - names prefixed with grisu3_, grisu3_diy_fp_, GRISU3_.
+ * - added static to all functions.
+ * - disabled clang unused function warnings.
+ * - guarded <stdint.h> to allow for alternative impl.
+ * - added extra numeric constants needed for parsing.
+ * - added dec_pow, cast_double_from_diy_fp.
+ * - changed some function names for consistency.
+ * - moved printing specific grisu3 functions to grisu3_print.h.
+ * - changed double to uint64 cast to avoid aliasing.
+ * - added new grisu3_parse.h for parsing doubles.
+ * - grisu3_print_double (dtoa_grisu3) format .1 as 0.1 needed for valid JSON output
+ * and grisu3_parse_double wouldn't consume it.
+ * - grsu3_print_double changed formatting to prefer 0.012 over 1.2e-2.
+ *
+ * These changes make it possible to include the files as headers only
+ * in other software libraries without risking name conflicts, and to
+ * extend the implementation with a port of Googles Double Conversion
+ * strtod functionality for parsing doubles.
+ *
+ * Extracted from: rev. 915501a / Dec 22, 2015
+ * <https://github.com/juj/MathGeoLib/blob/master/src/Math/grisu3.c>
+ * MathGeoLib License: http://www.apache.org/licenses/LICENSE-2.0.html
+ */
+
+#ifndef GRISU3_MATH_H
+#define GRISU3_MATH_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Guarded to allow inclusion of pstdint.h first, if stdint.h is not supported. */
+#ifndef UINT8_MAX
+#include <stdint.h> /* uint64_t etc. */
+#endif
+
+#ifdef GRISU3_NO_ASSERT
+#undef GRISU3_ASSERT
+#define GRISU3_ASSERT(x) ((void)0)
+#endif
+
+#ifndef GRISU3_ASSERT
+#include <assert.h> /* assert */
+#define GRISU3_ASSERT(x) assert(x)
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(disable : 4204) /* nonstandard extension used : non-constant aggregate initializer */
+#endif
+
+#define GRISU3_D64_SIGN 0x8000000000000000ULL
+#define GRISU3_D64_EXP_MASK 0x7FF0000000000000ULL
+#define GRISU3_D64_FRACT_MASK 0x000FFFFFFFFFFFFFULL
+#define GRISU3_D64_IMPLICIT_ONE 0x0010000000000000ULL
+#define GRISU3_D64_EXP_POS 52
+#define GRISU3_D64_EXP_BIAS 1075
+#define GRISU3_D64_DENORM_EXP (-GRISU3_D64_EXP_BIAS + 1)
+#define GRISU3_DIY_FP_FRACT_SIZE 64
+#define GRISU3_D_1_LOG2_10 0.30102999566398114 /* 1 / lg(10) */
+#define GRISU3_MIN_TARGET_EXP -60
+#define GRISU3_MASK32 0xFFFFFFFFULL
+#define GRISU3_MIN_CACHED_EXP -348
+#define GRISU3_MAX_CACHED_EXP 340
+#define GRISU3_CACHED_EXP_STEP 8
+#define GRISU3_D64_MAX_DEC_EXP 309
+#define GRISU3_D64_MIN_DEC_EXP -324
+#define GRISU3_D64_INF GRISU3_D64_EXP_MASK
+
+#define GRISU3_MIN(x,y) ((x) <= (y) ? (x) : (y))
+#define GRISU3_MAX(x,y) ((x) >= (y) ? (x) : (y))
+
+
+typedef struct grisu3_diy_fp
+{
+ uint64_t f;
+ int e;
+} grisu3_diy_fp_t;
+
+typedef struct grisu3_diy_fp_power
+{
+ uint64_t fract;
+ int16_t b_exp, d_exp;
+} grisu3_diy_fp_power_t;
+
+typedef union {
+ uint64_t u64;
+ double d64;
+} grisu3_cast_double_t;
+
+static uint64_t grisu3_cast_uint64_from_double(double d)
+{
+ grisu3_cast_double_t cd;
+ cd.d64 = d;
+ return cd.u64;
+}
+
+static double grisu3_cast_double_from_uint64(uint64_t u)
+{
+ grisu3_cast_double_t cd;
+ cd.u64 = u;
+ return cd.d64;
+}
+
+#define grisu3_double_infinity grisu3_cast_double_from_uint64(GRISU3_D64_INF)
+#define grisu3_double_nan grisu3_cast_double_from_uint64(GRISU3_D64_INF + 1)
+
+static const grisu3_diy_fp_power_t grisu3_diy_fp_pow_cache[] =
+{
+ { 0xfa8fd5a0081c0288ULL, -1220, -348 },
+ { 0xbaaee17fa23ebf76ULL, -1193, -340 },
+ { 0x8b16fb203055ac76ULL, -1166, -332 },
+ { 0xcf42894a5dce35eaULL, -1140, -324 },
+ { 0x9a6bb0aa55653b2dULL, -1113, -316 },
+ { 0xe61acf033d1a45dfULL, -1087, -308 },
+ { 0xab70fe17c79ac6caULL, -1060, -300 },
+ { 0xff77b1fcbebcdc4fULL, -1034, -292 },
+ { 0xbe5691ef416bd60cULL, -1007, -284 },
+ { 0x8dd01fad907ffc3cULL, -980, -276 },
+ { 0xd3515c2831559a83ULL, -954, -268 },
+ { 0x9d71ac8fada6c9b5ULL, -927, -260 },
+ { 0xea9c227723ee8bcbULL, -901, -252 },
+ { 0xaecc49914078536dULL, -874, -244 },
+ { 0x823c12795db6ce57ULL, -847, -236 },
+ { 0xc21094364dfb5637ULL, -821, -228 },
+ { 0x9096ea6f3848984fULL, -794, -220 },
+ { 0xd77485cb25823ac7ULL, -768, -212 },
+ { 0xa086cfcd97bf97f4ULL, -741, -204 },
+ { 0xef340a98172aace5ULL, -715, -196 },
+ { 0xb23867fb2a35b28eULL, -688, -188 },
+ { 0x84c8d4dfd2c63f3bULL, -661, -180 },
+ { 0xc5dd44271ad3cdbaULL, -635, -172 },
+ { 0x936b9fcebb25c996ULL, -608, -164 },
+ { 0xdbac6c247d62a584ULL, -582, -156 },
+ { 0xa3ab66580d5fdaf6ULL, -555, -148 },
+ { 0xf3e2f893dec3f126ULL, -529, -140 },
+ { 0xb5b5ada8aaff80b8ULL, -502, -132 },
+ { 0x87625f056c7c4a8bULL, -475, -124 },
+ { 0xc9bcff6034c13053ULL, -449, -116 },
+ { 0x964e858c91ba2655ULL, -422, -108 },
+ { 0xdff9772470297ebdULL, -396, -100 },
+ { 0xa6dfbd9fb8e5b88fULL, -369, -92 },
+ { 0xf8a95fcf88747d94ULL, -343, -84 },
+ { 0xb94470938fa89bcfULL, -316, -76 },
+ { 0x8a08f0f8bf0f156bULL, -289, -68 },
+ { 0xcdb02555653131b6ULL, -263, -60 },
+ { 0x993fe2c6d07b7facULL, -236, -52 },
+ { 0xe45c10c42a2b3b06ULL, -210, -44 },
+ { 0xaa242499697392d3ULL, -183, -36 },
+ { 0xfd87b5f28300ca0eULL, -157, -28 },
+ { 0xbce5086492111aebULL, -130, -20 },
+ { 0x8cbccc096f5088ccULL, -103, -12 },
+ { 0xd1b71758e219652cULL, -77, -4 },
+ { 0x9c40000000000000ULL, -50, 4 },
+ { 0xe8d4a51000000000ULL, -24, 12 },
+ { 0xad78ebc5ac620000ULL, 3, 20 },
+ { 0x813f3978f8940984ULL, 30, 28 },
+ { 0xc097ce7bc90715b3ULL, 56, 36 },
+ { 0x8f7e32ce7bea5c70ULL, 83, 44 },
+ { 0xd5d238a4abe98068ULL, 109, 52 },
+ { 0x9f4f2726179a2245ULL, 136, 60 },
+ { 0xed63a231d4c4fb27ULL, 162, 68 },
+ { 0xb0de65388cc8ada8ULL, 189, 76 },
+ { 0x83c7088e1aab65dbULL, 216, 84 },
+ { 0xc45d1df942711d9aULL, 242, 92 },
+ { 0x924d692ca61be758ULL, 269, 100 },
+ { 0xda01ee641a708deaULL, 295, 108 },
+ { 0xa26da3999aef774aULL, 322, 116 },
+ { 0xf209787bb47d6b85ULL, 348, 124 },
+ { 0xb454e4a179dd1877ULL, 375, 132 },
+ { 0x865b86925b9bc5c2ULL, 402, 140 },
+ { 0xc83553c5c8965d3dULL, 428, 148 },
+ { 0x952ab45cfa97a0b3ULL, 455, 156 },
+ { 0xde469fbd99a05fe3ULL, 481, 164 },
+ { 0xa59bc234db398c25ULL, 508, 172 },
+ { 0xf6c69a72a3989f5cULL, 534, 180 },
+ { 0xb7dcbf5354e9beceULL, 561, 188 },
+ { 0x88fcf317f22241e2ULL, 588, 196 },
+ { 0xcc20ce9bd35c78a5ULL, 614, 204 },
+ { 0x98165af37b2153dfULL, 641, 212 },
+ { 0xe2a0b5dc971f303aULL, 667, 220 },
+ { 0xa8d9d1535ce3b396ULL, 694, 228 },
+ { 0xfb9b7cd9a4a7443cULL, 720, 236 },
+ { 0xbb764c4ca7a44410ULL, 747, 244 },
+ { 0x8bab8eefb6409c1aULL, 774, 252 },
+ { 0xd01fef10a657842cULL, 800, 260 },
+ { 0x9b10a4e5e9913129ULL, 827, 268 },
+ { 0xe7109bfba19c0c9dULL, 853, 276 },
+ { 0xac2820d9623bf429ULL, 880, 284 },
+ { 0x80444b5e7aa7cf85ULL, 907, 292 },
+ { 0xbf21e44003acdd2dULL, 933, 300 },
+ { 0x8e679c2f5e44ff8fULL, 960, 308 },
+ { 0xd433179d9c8cb841ULL, 986, 316 },
+ { 0x9e19db92b4e31ba9ULL, 1013, 324 },
+ { 0xeb96bf6ebadf77d9ULL, 1039, 332 },
+ { 0xaf87023b9bf0ee6bULL, 1066, 340 }
+};
+
+/* Avoid dependence on lib math to get (int)ceil(v) */
+static int grisu3_iceil(double v)
+{
+ int k = (int)v;
+ if (v < 0) return k;
+ return v - k == 0 ? k : k + 1;
+}
+
+static int grisu3_diy_fp_cached_pow(int exp, grisu3_diy_fp_t *p)
+{
+ int k = grisu3_iceil((exp+GRISU3_DIY_FP_FRACT_SIZE-1) * GRISU3_D_1_LOG2_10);
+ int i = (k-GRISU3_MIN_CACHED_EXP-1) / GRISU3_CACHED_EXP_STEP + 1;
+ p->f = grisu3_diy_fp_pow_cache[i].fract;
+ p->e = grisu3_diy_fp_pow_cache[i].b_exp;
+ return grisu3_diy_fp_pow_cache[i].d_exp;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_minus(grisu3_diy_fp_t x, grisu3_diy_fp_t y)
+{
+ grisu3_diy_fp_t d; d.f = x.f - y.f; d.e = x.e;
+ GRISU3_ASSERT(x.e == y.e && x.f >= y.f);
+ return d;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_multiply(grisu3_diy_fp_t x, grisu3_diy_fp_t y)
+{
+ uint64_t a, b, c, d, ac, bc, ad, bd, tmp;
+ grisu3_diy_fp_t r;
+ a = x.f >> 32; b = x.f & GRISU3_MASK32;
+ c = y.f >> 32; d = y.f & GRISU3_MASK32;
+ ac = a*c; bc = b*c;
+ ad = a*d; bd = b*d;
+ tmp = (bd >> 32) + (ad & GRISU3_MASK32) + (bc & GRISU3_MASK32);
+ tmp += 1U << 31; /* round */
+ r.f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+ r.e = x.e + y.e + 64;
+ return r;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_normalize(grisu3_diy_fp_t n)
+{
+ GRISU3_ASSERT(n.f != 0);
+ while(!(n.f & 0xFFC0000000000000ULL)) { n.f <<= 10; n.e -= 10; }
+ while(!(n.f & GRISU3_D64_SIGN)) { n.f <<= 1; --n.e; }
+ return n;
+}
+
+static grisu3_diy_fp_t grisu3_cast_diy_fp_from_double(double d)
+{
+ grisu3_diy_fp_t fp;
+ uint64_t u64 = grisu3_cast_uint64_from_double(d);
+ if (!(u64 & GRISU3_D64_EXP_MASK)) { fp.f = u64 & GRISU3_D64_FRACT_MASK; fp.e = 1 - GRISU3_D64_EXP_BIAS; }
+ else { fp.f = (u64 & GRISU3_D64_FRACT_MASK) + GRISU3_D64_IMPLICIT_ONE; fp.e = (int)((u64 & GRISU3_D64_EXP_MASK) >> GRISU3_D64_EXP_POS) - GRISU3_D64_EXP_BIAS; }
+ return fp;
+}
+
+static double grisu3_cast_double_from_diy_fp(grisu3_diy_fp_t n)
+{
+ const uint64_t hidden_bit = GRISU3_D64_IMPLICIT_ONE;
+ const uint64_t frac_mask = GRISU3_D64_FRACT_MASK;
+ const int denorm_exp = GRISU3_D64_DENORM_EXP;
+ const int exp_bias = GRISU3_D64_EXP_BIAS;
+ const int exp_pos = GRISU3_D64_EXP_POS;
+
+ grisu3_diy_fp_t v = n;
+ uint64_t e_biased;
+
+ while (v.f > hidden_bit + frac_mask) {
+ v.f >>= 1;
+ ++v.e;
+ }
+ if (v.e < denorm_exp) {
+ return 0.0;
+ }
+ while (v.e > denorm_exp && (v.f & hidden_bit) == 0) {
+ v.f <<= 1;
+ --v.e;
+ }
+ if (v.e == denorm_exp && (v.f & hidden_bit) == 0) {
+ e_biased = 0;
+ } else {
+ e_biased = (uint64_t)(v.e + exp_bias);
+ }
+ return grisu3_cast_double_from_uint64((v.f & frac_mask) | (e_biased << exp_pos));
+}
+
+/* pow10_cache[i] = 10^(i-1) */
+static const unsigned int grisu3_pow10_cache[] = { 0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
+
+static int grisu3_largest_pow10(uint32_t n, int n_bits, uint32_t *power)
+{
+ int guess = ((n_bits + 1) * 1233 >> 12) + 1/*skip first entry*/;
+ if (n < grisu3_pow10_cache[guess]) --guess; /* We don't have any guarantees that 2^n_bits <= n. */
+ *power = grisu3_pow10_cache[guess];
+ return guess;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_MATH_H */
diff --git a/include/flatcc/portable/grisu3_parse.h b/include/flatcc/portable/grisu3_parse.h
new file mode 100644
index 0000000..3d67c9a
--- /dev/null
+++ b/include/flatcc/portable/grisu3_parse.h
@@ -0,0 +1,582 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/*
+ * Port of parts of Google Double Conversion strtod functionality
+ * but with fallback to strtod instead of a bignum implementation.
+ *
+ * Based on grisu3 math from MathGeoLib.
+ *
+ * See also grisu3_math.h comments.
+ */
+
+#ifndef GRISU3_PARSE_H
+#define GRISU3_PARSE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include <stdlib.h>
+#include <limits.h>
+
+#include "grisu3_math.h"
+
+
+/*
+ * The maximum number characters a valid number may contain. The parse
+ * fails if the input length is longer but the character after max len
+ * was part of the number.
+ *
+ * The length should not be set too high because it protects against
+ * overflow in the exponent part derived from the input length.
+ */
+#define GRISU3_NUM_MAX_LEN 1000
+
+/*
+ * The lightweight "portable" C library recognizes grisu3 support if
+ * included first.
+ */
+#define grisu3_parse_double_is_defined 1
+
+/*
+ * Disable to compare performance and to test diy_fp algorithm in
+ * broader range.
+ */
+#define GRISU3_PARSE_FAST_CASE
+
+/* May result in a one off error, otherwise when uncertain, fall back to strtod. */
+//#define GRISU3_PARSE_ALLOW_ERROR
+
+
+/*
+ * The dec output exponent jumps in 8, so the result is offset at most
+ * by 7 when the input is within range.
+ */
+static int grisu3_diy_fp_cached_dec_pow(int d_exp, grisu3_diy_fp_t *p)
+{
+ const int cached_offset = -GRISU3_MIN_CACHED_EXP;
+ const int d_exp_dist = GRISU3_CACHED_EXP_STEP;
+ int i, a_exp;
+
+ GRISU3_ASSERT(GRISU3_MIN_CACHED_EXP <= d_exp);
+ GRISU3_ASSERT(d_exp < GRISU3_MAX_CACHED_EXP + d_exp_dist);
+
+ i = (d_exp + cached_offset) / d_exp_dist;
+ a_exp = grisu3_diy_fp_pow_cache[i].d_exp;
+ p->f = grisu3_diy_fp_pow_cache[i].fract;
+ p->e = grisu3_diy_fp_pow_cache[i].b_exp;
+
+ GRISU3_ASSERT(a_exp <= d_exp);
+ GRISU3_ASSERT(d_exp < a_exp + d_exp_dist);
+
+ return a_exp;
+}
+
+/*
+ * Ported from google double conversion strtod using
+ * MathGeoLibs diy_fp functions for grisu3 in C.
+ *
+ * ulp_half_error is set if needed to trunacted non-zero trialing
+ * characters.
+ *
+ * The actual value we need to encode is:
+ *
+ * (sign ? -1 : 1) * fraction * 2 ^ (exponent - fraction_exp)
+ * where exponent is the base 10 exponent assuming the decimal point is
+ * after the first digit. fraction_exp is the base 10 magnitude of the
+ * fraction or number of significant digits - 1.
+ *
+ * If the exponent is between 0 and 22 and the fraction is encoded in
+ * the lower 53 bits (the largest bit is implicit in a double, but not
+ * in this fraction), then the value can be trivially converted to
+ * double without loss of precision. If the fraction was in fact
+ * multiplied by trailing zeroes that we didn't convert to exponent,
+ * we there are larger values the 53 bits that can also be encoded
+ * trivially - but then it is better to handle this during parsing
+ * if it is worthwhile. We do not optimize for this here, because it
+ * can be done in a simple check before calling, and because it might
+ * not be worthwile to do at all since it cery likely will fail for
+ * numbers printed to be convertible back to double without loss.
+ *
+ * Returns 0 if conversion was not exact. In that case the vale is
+ * either one smaller than the correct one, or the correct one.
+ *
+ * Exponents must be range protected before calling otherwise cached
+ * powers will blow up.
+ *
+ * Google Double Conversion seems to prefer the following notion:
+ *
+ * x >= 10^309 => +Inf
+ * x <= 10^-324 => 0,
+ *
+ * max double: HUGE_VAL = 1.7976931348623157 * 10^308
+ * min double: 4.9406564584124654 * 10^-324
+ *
+ * Values just below or above min/max representable number
+ * may round towards large/small non-Inf/non-neg values.
+ *
+ * but `strtod` seems to return +/-HUGE_VAL on overflow?
+ */
+static int grisu3_diy_fp_encode_double(uint64_t fraction, int exponent, int fraction_exp, int ulp_half_error, double *result)
+{
+ /*
+ * Error is measures in fractions of integers, so we scale up to get
+ * some resolution to represent error expressions.
+ */
+ const int log2_error_one = 3;
+ const int error_one = 1 << log2_error_one;
+ const int denorm_exp = GRISU3_D64_DENORM_EXP;
+ const uint64_t hidden_bit = GRISU3_D64_IMPLICIT_ONE;
+ const int diy_size = GRISU3_DIY_FP_FRACT_SIZE;
+ const int max_digits = 19;
+
+ int error = ulp_half_error ? error_one / 2 : 0;
+ int d_exp = (exponent - fraction_exp);
+ int a_exp;
+ int o_exp;
+ grisu3_diy_fp_t v = { fraction, 0 };
+ grisu3_diy_fp_t cp;
+ grisu3_diy_fp_t rounded;
+ int mag;
+ int prec;
+ int prec_bits;
+ int half_way;
+
+ /* When fractions in a double aren't stored with implicit msb fraction bit. */
+
+ /* Shift fraction to msb. */
+ v = grisu3_diy_fp_normalize(v);
+ /* The half point error moves up while the exponent moves down. */
+ error <<= -v.e;
+
+ a_exp = grisu3_diy_fp_cached_dec_pow(d_exp, &cp);
+
+ /* Interpolate between cached powers at distance 8. */
+ if (a_exp != d_exp) {
+ int adj_exp = d_exp - a_exp - 1;
+ static grisu3_diy_fp_t cp_10_lut[] = {
+ { 0xa000000000000000ULL, -60 },
+ { 0xc800000000000000ULL, -57 },
+ { 0xfa00000000000000ULL, -54 },
+ { 0x9c40000000000000ULL, -50 },
+ { 0xc350000000000000ULL, -47 },
+ { 0xf424000000000000ULL, -44 },
+ { 0x9896800000000000ULL, -40 },
+ };
+ GRISU3_ASSERT(adj_exp >= 0 && adj_exp < 7);
+ v = grisu3_diy_fp_multiply(v, cp_10_lut[adj_exp]);
+
+ /* 20 decimal digits won't always fit in 64 bit.
+ * (`fraction_exp` is one less than significant decimal
+ * digits in fraction, e.g. 1 * 10e0).
+ * If we cannot fit, introduce 1/2 ulp error
+ * (says double conversion reference impl.) */
+ if (1 + fraction_exp + adj_exp > max_digits) {
+ error += error_one / 2;
+ }
+ }
+
+ v = grisu3_diy_fp_multiply(v, cp);
+ /*
+ * Google double conversion claims that:
+ *
+ * The error introduced by a multiplication of a*b equals
+ * error_a + error_b + error_a*error_b/2^64 + 0.5
+ * Substituting a with 'input' and b with 'cached_power' we have
+ * error_b = 0.5 (all cached powers have an error of less than 0.5 ulp),
+ * error_ab = 0 or 1 / error_oner > error_a*error_b/ 2^64
+ *
+ * which in our encoding becomes:
+ * error_a = error_one/2
+ * error_ab = 1 / error_one (rounds up to 1 if error != 0, or 0 * otherwise)
+ * fixed_error = error_one/2
+ *
+ * error += error_a + fixed_error + (error ? 1 : 0)
+ *
+ * (this isn't entirely clear, but that is as close as we get).
+ */
+ error += error_one + (error ? 1 : 0);
+
+ o_exp = v.e;
+ v = grisu3_diy_fp_normalize(v);
+ /* Again, if we shift the significant bits, the error moves along. */
+ error <<= o_exp - v.e;
+
+ /*
+ * The value `v` is bounded by 2^mag which is 64 + v.e. because we
+ * just normalized it by shifting towards msb.
+ */
+ mag = diy_size + v.e;
+
+ /* The effective magnitude of the IEEE double representation. */
+ mag = mag >= diy_size + denorm_exp ? diy_size : mag <= denorm_exp ? 0 : mag - denorm_exp;
+ prec = diy_size - mag;
+ if (prec + log2_error_one >= diy_size) {
+ int e_scale = prec + log2_error_one - diy_size - 1;
+ v.f >>= e_scale;
+ v.e += e_scale;
+ error = (error >> e_scale) + 1 + error_one;
+ prec -= e_scale;
+ }
+ rounded.f = v.f >> prec;
+ rounded.e = v.e + prec;
+ prec_bits = (int)(v.f & ((uint64_t)1 << (prec - 1))) * error_one;
+ half_way = (int)((uint64_t)1 << (prec - 1)) * error_one;
+ if (prec >= half_way + error) {
+ rounded.f++;
+ /* Prevent overflow. */
+ if (rounded.f & (hidden_bit << 1)) {
+ rounded.f >>= 1;
+ rounded.e += 1;
+ }
+ }
+ *result = grisu3_cast_double_from_diy_fp(rounded);
+ return half_way - error >= prec_bits || prec_bits >= half_way + error;
+}
+
+/*
+ * `end` is unchanged if number is handled natively, or it is the result
+ * of strtod parsing in case of fallback.
+ */
+static const char *grisu3_encode_double(const char *buf, const char *end, int sign, uint64_t fraction, int exponent, int fraction_exp, int ulp_half_error, double *result)
+{
+ const int max_d_exp = GRISU3_D64_MAX_DEC_EXP;
+ const int min_d_exp = GRISU3_D64_MIN_DEC_EXP;
+
+ char *v_end;
+
+ /* Both for user experience, and to protect internal power table lookups. */
+ if (fraction == 0 || exponent < min_d_exp) {
+ *result = 0.0;
+ goto done;
+ }
+ if (exponent - 1 > max_d_exp) {
+ *result = grisu3_double_infinity;
+ goto done;
+ }
+
+ /*
+ * `exponent` is the normalized value, fraction_exp is the size of
+ * the representation in the `fraction value`, or one less than
+ * number of significant digits.
+ *
+ * If the final value can be kept in 53 bits and we can avoid
+ * division, then we can convert to double quite fast.
+ *
+ * ulf_half_error only happens when fraction is maxed out, so
+ * fraction_exp > 22 by definition.
+ *
+ * fraction_exp >= 0 always.
+ *
+ * http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
+ */
+
+
+#ifdef GRISU3_PARSE_FAST_CASE
+ if (fraction < (1ULL << 53) && exponent >= 0 && exponent <= 22) {
+ double v = (double)fraction;
+ /* Multiplying by 1e-k instead of dividing by 1ek results in rounding error. */
+ switch (exponent - fraction_exp) {
+ case -22: v /= 1e22; break;
+ case -21: v /= 1e21; break;
+ case -20: v /= 1e20; break;
+ case -19: v /= 1e19; break;
+ case -18: v /= 1e18; break;
+ case -17: v /= 1e17; break;
+ case -16: v /= 1e16; break;
+ case -15: v /= 1e15; break;
+ case -14: v /= 1e14; break;
+ case -13: v /= 1e13; break;
+ case -12: v /= 1e12; break;
+ case -11: v /= 1e11; break;
+ case -10: v /= 1e10; break;
+ case -9: v /= 1e9; break;
+ case -8: v /= 1e8; break;
+ case -7: v /= 1e7; break;
+ case -6: v /= 1e6; break;
+ case -5: v /= 1e5; break;
+ case -4: v /= 1e4; break;
+ case -3: v /= 1e3; break;
+ case -2: v /= 1e2; break;
+ case -1: v /= 1e1; break;
+ case 0: break;
+ case 1: v *= 1e1; break;
+ case 2: v *= 1e2; break;
+ case 3: v *= 1e3; break;
+ case 4: v *= 1e4; break;
+ case 5: v *= 1e5; break;
+ case 6: v *= 1e6; break;
+ case 7: v *= 1e7; break;
+ case 8: v *= 1e8; break;
+ case 9: v *= 1e9; break;
+ case 10: v *= 1e10; break;
+ case 11: v *= 1e11; break;
+ case 12: v *= 1e12; break;
+ case 13: v *= 1e13; break;
+ case 14: v *= 1e14; break;
+ case 15: v *= 1e15; break;
+ case 16: v *= 1e16; break;
+ case 17: v *= 1e17; break;
+ case 18: v *= 1e18; break;
+ case 19: v *= 1e19; break;
+ case 20: v *= 1e20; break;
+ case 21: v *= 1e21; break;
+ case 22: v *= 1e22; break;
+ }
+ *result = v;
+ goto done;
+ }
+#endif
+
+ if (grisu3_diy_fp_encode_double(fraction, exponent, fraction_exp, ulp_half_error, result)) {
+ goto done;
+ }
+#ifdef GRISU3_PARSE_ALLOW_ERROR
+ goto done;
+#endif
+ *result = strtod(buf, &v_end);
+ if (v_end < end) {
+ return v_end;
+ }
+ return end;
+done:
+ if (sign) {
+ *result = -*result;
+ }
+ return end;
+}
+
+/*
+ * Returns buf if number wasn't matched, or null if number starts ok
+ * but contains invalid content.
+ */
+static const char *grisu3_parse_hex_fp(const char *buf, const char *end, int sign, double *result)
+{
+ (void)buf;
+ (void)end;
+ (void)sign;
+ *result = 0.0;
+ /* Not currently supported. */
+ return buf;
+}
+
+/*
+ * Returns end pointer on success, or null, or buf if start is not a number.
+ * Sets result to 0.0 on error.
+ * Reads up to len + 1 bytes from buffer where len + 1 must not be a
+ * valid part of a number, but all of buf, buf + len need not be a
+ * number. Leading whitespace is NOT valid.
+ * Very small numbers are truncated to +/-0.0 and numerically very large
+ * numbers are returns as +/-infinity.
+ *
+ * A value must not end or begin with '.' (like JSON), but can have
+ * leading zeroes (unlike JSON). A single leading zero followed by
+ * an encoding symbol may or may not be interpreted as a non-decimal
+ * encoding prefix, e.g. 0x, but a leading zero followed by a digit is
+ * NOT interpreted as octal.
+ * A single leading negative sign may appear before digits, but positive
+ * sign is not allowed and space after the sign is not allowed.
+ * At most the first 1000 characters of the input is considered.
+ */
+static const char *grisu3_parse_double(const char *buf, size_t len, double *result)
+{
+ const char *mark, *k, *end;
+ int sign = 0, esign = 0;
+ uint64_t fraction = 0;
+ int exponent = 0;
+ int ee = 0;
+ int fraction_exp = 0;
+ int ulp_half_error = 0;
+
+ *result = 0.0;
+
+ end = buf + len + 1;
+
+ /* Failsafe for exponent overflow. */
+ if (len > GRISU3_NUM_MAX_LEN) {
+ end = buf + GRISU3_NUM_MAX_LEN + 1;
+ }
+
+ if (buf == end) {
+ return buf;
+ }
+ mark = buf;
+ if (*buf == '-') {
+ ++buf;
+ sign = 1;
+ if (buf == end) {
+ return 0;
+ }
+ }
+ if (*buf == '0') {
+ ++buf;
+ /* | 0x20 is lower case ASCII. */
+ if (buf != end && (*buf | 0x20) == 'x') {
+ k = grisu3_parse_hex_fp(buf, end, sign, result);
+ if (k == buf) {
+ return mark;
+ }
+ return k;
+ }
+ /* Not worthwhile, except for getting the scale of integer part. */
+ while (buf != end && *buf == '0') {
+ ++buf;
+ }
+ } else {
+ if (*buf < '1' || *buf > '9') {
+ /*
+ * If we didn't see a sign, just don't recognize it as
+ * number, otherwise make it an error.
+ */
+ return sign ? 0 : mark;
+ }
+ fraction = (uint64_t)(*buf++ - '0');
+ }
+ k = buf;
+ /*
+ * We do not catch trailing zeroes when there is no decimal point.
+ * This misses an opportunity for moving the exponent down into the
+ * fast case. But it is unlikely to be worthwhile as it complicates
+ * parsing.
+ */
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ if (fraction >= UINT64_MAX / 10) {
+ fraction += *buf >= '5';
+ ulp_half_error = 1;
+ break;
+ }
+ fraction = fraction * 10 + (uint64_t)(*buf++ - '0');
+ }
+ fraction_exp = (int)(buf - k);
+ /* Skip surplus digits. Trailing zero does not introduce error. */
+ while (buf != end && *buf == '0') {
+ ++exponent;
+ ++buf;
+ }
+ if (buf != end && *buf >= '1' && *buf <= '9') {
+ ulp_half_error = 1;
+ ++exponent;
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++exponent;
+ ++buf;
+ }
+ }
+ if (buf != end && *buf == '.') {
+ ++buf;
+ k = buf;
+ if (*buf < '0' || *buf > '9') {
+ /* We don't accept numbers without leading or trailing digit. */
+ return 0;
+ }
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ if (fraction >= UINT64_MAX / 10) {
+ if (!ulp_half_error) {
+ fraction += *buf >= '5';
+ ulp_half_error = 1;
+ }
+ break;
+ }
+ fraction = fraction * 10 + (uint64_t)(*buf++ - '0');
+ --exponent;
+ }
+ fraction_exp += (int)(buf - k);
+ while (buf != end && *buf == '0') {
+ ++exponent;
+ ++buf;
+ }
+ if (buf != end && *buf >= '1' && *buf <= '9') {
+ ulp_half_error = 1;
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ }
+ /*
+ * Normalized exponent e.g: 1.23434e3 with fraction = 123434,
+ * fraction_exp = 5, exponent = 3.
+ * So value = fraction * 10^(exponent - fraction_exp)
+ */
+ exponent += fraction_exp;
+ if (buf != end && (*buf | 0x20) == 'e') {
+ if (end - buf < 2) {
+ return 0;
+ }
+ ++buf;
+ if (*buf == '+') {
+ ++buf;
+ if (buf == end) {
+ return 0;
+ }
+ } else if (*buf == '-') {
+ esign = 1;
+ ++buf;
+ if (buf == end) {
+ return 0;
+ }
+ }
+ if (*buf < '0' || *buf > '9') {
+ return 0;
+ }
+ ee = *buf++ - '0';
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ /*
+ * This test impacts performance and we do not need an
+ * exact value just one large enough to dominate the fraction_exp.
+ * Subsequent handling maps large absolute ee to 0 or infinity.
+ */
+ if (ee <= 0x7fff) {
+ ee = ee * 10 + *buf - '0';
+ }
+ ++buf;
+ }
+ }
+ exponent = exponent + (esign ? -ee : ee);
+
+ /*
+ * Exponent is now a base 10 normalized exponent so the absolute value
+ * is less the 10^(exponent + 1) for positive exponents. For
+ * denormalized doubles (using 11 bit exponent 0 with a fraction
+ * shiftet down, extra small numbers can be achieved.
+ *
+ * https://en.wikipedia.org/wiki/Double-precision_floating-point_format
+ *
+ * 10^-324 holds the smallest normalized exponent (but not value) and
+ * 10^308 holds the largest exponent. Internally our lookup table is
+ * only safe to use within a range slightly larger than this.
+ * Externally, a slightly larger/smaller value represents NaNs which
+ * are technically also possible to store as a number.
+ *
+ */
+
+ /* This also protects strod fallback parsing. */
+ if (buf == end) {
+ return 0;
+ }
+ return grisu3_encode_double(mark, buf, sign, fraction, exponent, fraction_exp, ulp_half_error, result);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_PARSE_H */
diff --git a/include/flatcc/portable/grisu3_print.h b/include/flatcc/portable/grisu3_print.h
new file mode 100644
index 0000000..d748408
--- /dev/null
+++ b/include/flatcc/portable/grisu3_print.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ * Copyright author of MathGeoLib (https://github.com/juj)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/*
+ * Extracted from MathGeoLib.
+ *
+ * mikkelfj:
+ * - Fixed final output when printing single digit negative exponent to
+ * have leading zero (important for JSON).
+ * - Changed formatting to prefer 0.012 over 1.2-e-2.
+ *
+ * Large portions of the original grisu3.c file has been moved to
+ * grisu3_math.h, the rest is placed here.
+ *
+ * See also comments in grisu3_math.h.
+ *
+ * MatGeoLib grisu3.c comment:
+ *
+ * This file is part of an implementation of the "grisu3" double to string
+ * conversion algorithm described in the research paper
+ *
+ * "Printing Floating-Point Numbers Quickly And Accurately with Integers"
+ * by Florian Loitsch, available at
+ * http://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf
+ */
+
+#ifndef GRISU3_PRINT_H
+#define GRISU3_PRINT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h> /* sprintf, only needed for fallback printing */
+#include <assert.h> /* assert */
+
+#include "grisu3_math.h"
+
+/*
+ * The lightweight "portable" C library recognizes grisu3 support if
+ * included first.
+ */
+#define grisu3_print_double_is_defined 1
+
+/*
+ * Not sure we have an exact definition, but we get up to 23
+ * emperically. There is some math ensuring it does not go awol though,
+ * like 18 digits + exponent or so.
+ * This max should be safe size buffer for printing, including zero term.
+ */
+#define GRISU3_PRINT_MAX 30
+
+static int grisu3_round_weed(char *buffer, int len, uint64_t wp_W, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t ulp)
+{
+ uint64_t wp_Wup = wp_W - ulp;
+ uint64_t wp_Wdown = wp_W + ulp;
+ while(rest < wp_Wup && delta - rest >= ten_kappa
+ && (rest + ten_kappa < wp_Wup || wp_Wup - rest >= rest + ten_kappa - wp_Wup))
+ {
+ --buffer[len-1];
+ rest += ten_kappa;
+ }
+ if (rest < wp_Wdown && delta - rest >= ten_kappa
+ && (rest + ten_kappa < wp_Wdown || wp_Wdown - rest > rest + ten_kappa - wp_Wdown))
+ return 0;
+
+ return 2*ulp <= rest && rest <= delta - 4*ulp;
+}
+
+static int grisu3_digit_gen(grisu3_diy_fp_t low, grisu3_diy_fp_t w, grisu3_diy_fp_t high, char *buffer, int *length, int *kappa)
+{
+ uint64_t unit = 1;
+ grisu3_diy_fp_t too_low = { low.f - unit, low.e };
+ grisu3_diy_fp_t too_high = { high.f + unit, high.e };
+ grisu3_diy_fp_t unsafe_interval = grisu3_diy_fp_minus(too_high, too_low);
+ grisu3_diy_fp_t one = { 1ULL << -w.e, w.e };
+ uint32_t p1 = (uint32_t)(too_high.f >> -one.e);
+ uint64_t p2 = too_high.f & (one.f - 1);
+ uint32_t div;
+ *kappa = grisu3_largest_pow10(p1, GRISU3_DIY_FP_FRACT_SIZE + one.e, &div);
+ *length = 0;
+
+ while(*kappa > 0)
+ {
+ uint64_t rest;
+ char digit = (char)(p1 / div);
+ buffer[*length] = '0' + digit;
+ ++*length;
+ p1 %= div;
+ --*kappa;
+ rest = ((uint64_t)p1 << -one.e) + p2;
+ if (rest < unsafe_interval.f) return grisu3_round_weed(buffer, *length, grisu3_diy_fp_minus(too_high, w).f, unsafe_interval.f, rest, (uint64_t)div << -one.e, unit);
+ div /= 10;
+ }
+
+ for(;;)
+ {
+ char digit;
+ p2 *= 10;
+ unit *= 10;
+ unsafe_interval.f *= 10;
+ /* Integer division by one. */
+ digit = (char)(p2 >> -one.e);
+ buffer[*length] = '0' + digit;
+ ++*length;
+ p2 &= one.f - 1; /* Modulo by one. */
+ --*kappa;
+ if (p2 < unsafe_interval.f) return grisu3_round_weed(buffer, *length, grisu3_diy_fp_minus(too_high, w).f * unit, unsafe_interval.f, p2, one.f, unit);
+ }
+}
+
+static int grisu3(double v, char *buffer, int *length, int *d_exp)
+{
+ int mk, kappa, success;
+ grisu3_diy_fp_t dfp = grisu3_cast_diy_fp_from_double(v);
+ grisu3_diy_fp_t w = grisu3_diy_fp_normalize(dfp);
+
+ /* normalize boundaries */
+ grisu3_diy_fp_t t = { (dfp.f << 1) + 1, dfp.e - 1 };
+ grisu3_diy_fp_t b_plus = grisu3_diy_fp_normalize(t);
+ grisu3_diy_fp_t b_minus;
+ grisu3_diy_fp_t c_mk; /* Cached power of ten: 10^-k */
+ uint64_t u64 = grisu3_cast_uint64_from_double(v);
+ assert(v > 0 && v <= 1.7976931348623157e308); /* Grisu only handles strictly positive finite numbers. */
+ if (!(u64 & GRISU3_D64_FRACT_MASK) && (u64 & GRISU3_D64_EXP_MASK) != 0) { b_minus.f = (dfp.f << 2) - 1; b_minus.e = dfp.e - 2;} /* lower boundary is closer? */
+ else { b_minus.f = (dfp.f << 1) - 1; b_minus.e = dfp.e - 1; }
+ b_minus.f = b_minus.f << (b_minus.e - b_plus.e);
+ b_minus.e = b_plus.e;
+
+ mk = grisu3_diy_fp_cached_pow(GRISU3_MIN_TARGET_EXP - GRISU3_DIY_FP_FRACT_SIZE - w.e, &c_mk);
+
+ w = grisu3_diy_fp_multiply(w, c_mk);
+ b_minus = grisu3_diy_fp_multiply(b_minus, c_mk);
+ b_plus = grisu3_diy_fp_multiply(b_plus, c_mk);
+
+ success = grisu3_digit_gen(b_minus, w, b_plus, buffer, length, &kappa);
+ *d_exp = kappa - mk;
+ return success;
+}
+
+static int grisu3_i_to_str(int val, char *str)
+{
+ int len, i;
+ char *s;
+ char *begin = str;
+ if (val < 0) { *str++ = '-'; val = -val; }
+ s = str;
+
+ for(;;)
+ {
+ int ni = val / 10;
+ int digit = val - ni*10;
+ *s++ = (char)('0' + digit);
+ if (ni == 0)
+ break;
+ val = ni;
+ }
+ *s = '\0';
+ len = (int)(s - str);
+ for(i = 0; i < len/2; ++i)
+ {
+ char ch = str[i];
+ str[i] = str[len-1-i];
+ str[len-1-i] = ch;
+ }
+
+ return (int)(s - begin);
+}
+
+static int grisu3_print_nan(uint64_t v, char *dst)
+{
+ static char hexdigits[16] = "0123456789ABCDEF";
+ int i = 0;
+
+ dst[0] = 'N';
+ dst[1] = 'a';
+ dst[2] = 'N';
+ dst[3] = '(';
+ dst[20] = ')';
+ dst[21] = '\0';
+ dst += 4;
+ for (i = 15; i >= 0; --i) {
+ dst[i] = hexdigits[v & 0x0F];
+ v >>= 4;
+ }
+ return 21;
+}
+
+static int grisu3_print_double(double v, char *dst)
+{
+ int d_exp, len, success, decimals, i;
+ uint64_t u64 = grisu3_cast_uint64_from_double(v);
+ char *s2 = dst;
+ assert(dst);
+
+ /* Prehandle NaNs */
+ if ((u64 << 1) > 0xFFE0000000000000ULL) return grisu3_print_nan(u64, dst);
+ /* Prehandle negative values. */
+ if ((u64 & GRISU3_D64_SIGN) != 0) { *s2++ = '-'; v = -v; u64 ^= GRISU3_D64_SIGN; }
+ /* Prehandle zero. */
+ if (!u64) { *s2++ = '0'; *s2 = '\0'; return (int)(s2 - dst); }
+ /* Prehandle infinity. */
+ if (u64 == GRISU3_D64_EXP_MASK) { *s2++ = 'i'; *s2++ = 'n'; *s2++ = 'f'; *s2 = '\0'; return (int)(s2 - dst); }
+
+ success = grisu3(v, s2, &len, &d_exp);
+ /* If grisu3 was not able to convert the number to a string, then use old sprintf (suboptimal). */
+ if (!success) return sprintf(s2, "%.17g", v) + (int)(s2 - dst);
+
+ /* We now have an integer string of form "151324135" and a base-10 exponent for that number. */
+ /* Next, decide the best presentation for that string by whether to use a decimal point, or the scientific exponent notation 'e'. */
+ /* We don't pick the absolute shortest representation, but pick a balance between readability and shortness, e.g. */
+ /* 1.545056189557677e-308 could be represented in a shorter form */
+ /* 1545056189557677e-323 but that would be somewhat unreadable. */
+ decimals = GRISU3_MIN(-d_exp, GRISU3_MAX(1, len-1));
+
+ /* mikkelfj:
+ * fix zero prefix .1 => 0.1, important for JSON export.
+ * prefer unscientific notation at same length:
+ * -1.2345e-4 over -1.00012345,
+ * -1.0012345 over -1.2345e-3
+ */
+ if (d_exp < 0 && (len + d_exp) > -3 && len <= -d_exp)
+ {
+ /* mikkelfj: fix zero prefix .1 => 0.1, and short exponents 1.3e-2 => 0.013. */
+ memmove(s2 + 2 - d_exp - len, s2, (size_t)len);
+ s2[0] = '0';
+ s2[1] = '.';
+ for (i = 2; i < 2-d_exp-len; ++i) s2[i] = '0';
+ len += i;
+ }
+ else if (d_exp < 0 && len > 1) /* Add decimal point? */
+ {
+ for(i = 0; i < decimals; ++i) s2[len-i] = s2[len-i-1];
+ s2[len++ - decimals] = '.';
+ d_exp += decimals;
+ /* Need scientific notation as well? */
+ if (d_exp != 0) { s2[len++] = 'e'; len += grisu3_i_to_str(d_exp, s2+len); }
+ }
+ /* Add scientific notation? */
+ else if (d_exp < 0 || d_exp > 2) { s2[len++] = 'e'; len += grisu3_i_to_str(d_exp, s2+len); }
+ /* Add zeroes instead of scientific notation? */
+ else if (d_exp > 0) { while(d_exp-- > 0) s2[len++] = '0'; }
+ s2[len] = '\0'; /* grisu3 doesn't null terminate, so ensure termination. */
+ return (int)(s2+len-dst);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_PRINT_H */
diff --git a/include/flatcc/portable/include/README b/include/flatcc/portable/include/README
new file mode 100644
index 0000000..9f991fc
--- /dev/null
+++ b/include/flatcc/portable/include/README
@@ -0,0 +1,4 @@
+This directory holds subdirectories it can be added to the include path
+such that standard and OS specific header includes like <stdint.h>,
+<bool.h> and <endian.h> can succeed without explicitly including
+special headers explicitly.
diff --git a/include/flatcc/portable/include/linux/endian.h b/include/flatcc/portable/include/linux/endian.h
new file mode 100644
index 0000000..38fd1fb
--- /dev/null
+++ b/include/flatcc/portable/include/linux/endian.h
@@ -0,0 +1 @@
+#include "portable/pendian.h"
diff --git a/include/flatcc/portable/include/std/inttypes.h b/include/flatcc/portable/include/std/inttypes.h
new file mode 100644
index 0000000..99b699d
--- /dev/null
+++ b/include/flatcc/portable/include/std/inttypes.h
@@ -0,0 +1 @@
+#include "portable/inttypes.h"
diff --git a/include/flatcc/portable/include/std/stdalign.h b/include/flatcc/portable/include/std/stdalign.h
new file mode 100644
index 0000000..6d51281
--- /dev/null
+++ b/include/flatcc/portable/include/std/stdalign.h
@@ -0,0 +1 @@
+#include "portable/pstdalign.h"
diff --git a/include/flatcc/portable/include/std/stdbool.h b/include/flatcc/portable/include/std/stdbool.h
new file mode 100644
index 0000000..12eb4c7
--- /dev/null
+++ b/include/flatcc/portable/include/std/stdbool.h
@@ -0,0 +1 @@
+#include "portable/pstdbool.h"
diff --git a/include/flatcc/portable/include/std/stdint.h b/include/flatcc/portable/include/std/stdint.h
new file mode 100644
index 0000000..0364471
--- /dev/null
+++ b/include/flatcc/portable/include/std/stdint.h
@@ -0,0 +1 @@
+#include "portable/pstdint.h"
diff --git a/include/flatcc/portable/paligned_alloc.h b/include/flatcc/portable/paligned_alloc.h
new file mode 100644
index 0000000..70b00b9
--- /dev/null
+++ b/include/flatcc/portable/paligned_alloc.h
@@ -0,0 +1,212 @@
+#ifndef PALIGNED_ALLOC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * NOTE: MSVC in general has no aligned alloc function that is
+ * compatible with free and it is not trivial to implement a version
+ * which is. Therefore, to remain portable, end user code needs to
+ * use `aligned_free` which is not part of C11 but defined in this header.
+ *
+ * glibc only provides aligned_alloc when _ISOC11_SOURCE is defined, but
+ * MingW does not support aligned_alloc despite of this, it uses the
+ * the _aligned_malloc as MSVC.
+ *
+ * The same issue is present on some Unix systems not providing
+ * posix_memalign.
+ *
+ * Note that clang and gcc with -std=c11 or -std=c99 will not define
+ * _POSIX_C_SOURCE and thus posix_memalign cannot be detected but
+ * aligned_alloc is not necessarily available either. We assume
+ * that clang always has posix_memalign although it is not strictly
+ * correct. For gcc, use -std=gnu99 or -std=gnu11 or don't use -std in
+ * order to enable posix_memalign, or live with the fallback until using
+ * a system where glibc has a version that supports aligned_alloc.
+ *
+ * For C11 compliant compilers and compilers with posix_memalign,
+ * it is valid to use free instead of aligned_free with the above
+ * caveats.
+ */
+
+#include <stdlib.h>
+
+/*
+ * Define this to see which version is used so the fallback is not
+ * enganged unnecessarily:
+ *
+ * #define PORTABLE_DEBUG_ALIGNED_ALLOC
+ */
+
+#if 0
+#define PORTABLE_DEBUG_ALIGNED_ALLOC
+#endif
+
+#if !defined(PORTABLE_C11_ALIGNED_ALLOC)
+
+/*
+ * PORTABLE_C11_ALIGNED_ALLOC = 1
+ * indicates that the system has builtin aligned_alloc
+ * If it doesn't, the section after detection provides an implemention.
+ */
+#if defined (__MINGW32__)
+/* MingW does not provide aligned_alloc despite defining _ISOC11_SOURCE */
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif defined (_ISOC11_SOURCE)
+/* glibc aligned_alloc detection, but MingW is not truthful */
+#define PORTABLE_C11_ALIGNED_ALLOC 1
+#elif defined (__GLIBC__)
+/* aligned_alloc is not available in glibc just because __STDC_VERSION__ >= 201112L. */
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif defined (__clang__)
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif defined (__APPLE__)
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif defined(__IBMC__)
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L)
+#define PORTABLE_C11_ALIGNED_ALLOC 1
+#else
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#endif
+
+#endif /* PORTABLE_C11_ALIGNED_ALLOC */
+
+/* https://linux.die.net/man/3/posix_memalign */
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(_GNU_SOURCE)
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+
+/* https://forum.kde.org/viewtopic.php?p=66274 */
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(_XOPEN_SOURCE)
+#if _XOPEN_SOURCE >= 600
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+#endif
+
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(_POSIX_C_SOURCE)
+#if _POSIX_C_SOURCE >= 200112L
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+#endif
+
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(__clang__)
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+
+#if !defined(PORTABLE_POSIX_MEMALIGN)
+#define PORTABLE_POSIX_MEMALIGN 0
+#endif
+
+/* https://forum.kde.org/viewtopic.php?p=66274 */
+#if (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L)
+/* C11 or newer */
+#include <stdalign.h>
+#endif
+
+/* C11 or newer */
+#if !defined(aligned_alloc) && !defined(__aligned_alloc_is_defined)
+
+#if PORTABLE_C11_ALIGNED_ALLOC
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: C11_ALIGNED_ALLOC configured"
+#endif
+#elif defined(_MSC_VER) || defined(__MINGW32__)
+
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: Windows _aligned_malloc configured"
+#endif
+
+/* Aligned _aligned_malloc is not compatible with free. */
+#define aligned_alloc(alignment, size) _aligned_malloc(size, alignment)
+#define aligned_free(p) _aligned_free(p)
+#define __aligned_alloc_is_defined 1
+#define __aligned_free_is_defined 1
+
+#elif PORTABLE_POSIX_MEMALIGN
+
+#if defined(__GNUC__)
+#if !defined(__GNUCC__)
+extern int posix_memalign (void **, size_t, size_t);
+#elif __GNUCC__ < 5
+extern int posix_memalign (void **, size_t, size_t);
+#endif
+#endif
+
+static inline void *__portable_aligned_alloc(size_t alignment, size_t size)
+{
+ int err;
+ void *p = 0;
+
+ if (alignment < sizeof(void *)) {
+ alignment = sizeof(void *);
+ }
+ err = posix_memalign(&p, alignment, size);
+ if (err && p) {
+ free(p);
+ p = 0;
+ }
+ return p;
+}
+
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: POSIX_MEMALIGN configured"
+#endif
+
+#define aligned_alloc(alignment, size) __portable_aligned_alloc(alignment, size)
+#define aligned_free(p) free(p)
+#define __aligned_alloc_is_defined 1
+#define __aligned_free_is_defined 1
+
+#else
+
+static inline void *__portable_aligned_alloc(size_t alignment, size_t size)
+{
+ char *raw;
+ void *buf;
+ size_t total_size = (size + alignment - 1 + sizeof(void *));
+
+ if (alignment < sizeof(void *)) {
+ alignment = sizeof(void *);
+ }
+ raw = (char *)(size_t)malloc(total_size);
+ buf = raw + alignment - 1 + sizeof(void *);
+ buf = (void *)(((size_t)buf) & ~(alignment - 1));
+ ((void **)buf)[-1] = raw;
+ return buf;
+}
+
+static inline void __portable_aligned_free(void *p)
+{
+ char *raw;
+
+ if (p) {
+ raw = (char*)((void **)p)[-1];
+ free(raw);
+ }
+}
+
+#define aligned_alloc(alignment, size) __portable_aligned_alloc(alignment, size)
+#define aligned_free(p) __portable_aligned_free(p)
+#define __aligned_alloc_is_defined 1
+#define __aligned_free_is_defined 1
+
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: aligned_alloc malloc fallback configured"
+#endif
+
+#endif
+
+#endif /* aligned_alloc */
+
+#if !defined(aligned_free) && !defined(__aligned_free_is_defined)
+#define aligned_free(p) free(p)
+#define __aligned_free_is_defined 1
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PALIGNED_ALLOC_H */
diff --git a/include/flatcc/portable/pattributes.h b/include/flatcc/portable/pattributes.h
new file mode 100644
index 0000000..9240fa3
--- /dev/null
+++ b/include/flatcc/portable/pattributes.h
@@ -0,0 +1,84 @@
+
+/*
+ * C23 introduces an attribute syntax `[[<attribute>]]`. Prior to that
+ * other non-standard syntaxes such as `__attribute__((<attribute>))`
+ * and `__declspec(<attribute>)` have been supported by some compiler
+ * versions.
+ *
+ * See also:
+ * https://en.cppreference.com/w/c/language/attributes
+ *
+ * There is no portable way to use C23 attributes in older C standards
+ * so in order to use these portably, some macro name needs to be
+ * defined for each attribute that either maps to the older supported
+ * syntax, or ignores the attribute as appropriate.
+ *
+ * The Linux kernel defines certain attributes as macros, such as
+ * `fallthrough`. When adding attributes it seems reasonable to follow
+ * the Linux conventions in lack of any official standard. However, it
+ * is not the intention that this file should mirror the Linux
+ * attributes 1 to 1.
+ *
+ * See also:
+ * https://github.com/torvalds/linux/blob/master/include/linux/compiler_attributes.h
+ *
+ * There is a risk that exposed attribute names may lead to name
+ * conflicts. A conflicting name can be undefined and if necessary used
+ * using `pattribute(<attribute>)`. All attributes can be hidden by
+ * defining `PORTABLE_EXPOSE_ATTRIBUTES=0` in which case
+ * `pattribute(<attribute>)` can still be used and then if a specific
+ * attribute name still needs to be exposed, it can be defined manually
+ * like `#define fallthrough pattribute(fallthrough)`.
+ */
+
+
+#ifndef PATTRIBUTES_H
+#define PATTRIBUTES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef PORTABLE_EXPOSE_ATTRIBUTES
+#define PORTABLE_EXPOSE_ATTRIBUTES 0
+#endif
+
+#ifdef __has_c_attribute
+# define PORTABLE_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
+#else
+# define PORTABLE_HAS_C_ATTRIBUTE(x) 0
+#endif
+
+#ifdef __has_attribute
+# define PORTABLE_HAS_ATTRIBUTE(x) __has_attribute(x)
+#else
+# define PORTABLE_HAS_ATTRIBUTE(x) 0
+#endif
+
+
+/* https://en.cppreference.com/w/c/language/attributes/fallthrough */
+#if PORTABLE_HAS_C_ATTRIBUTE(__fallthrough__)
+# define pattribute_fallthrough [[__fallthrough__]]
+#elif PORTABLE_HAS_ATTRIBUTE(__fallthrough__)
+# define pattribute_fallthrough __attribute__((__fallthrough__))
+#else
+# define pattribute_fallthrough ((void)0)
+#endif
+
+
+#define pattribute(x) pattribute_##x
+
+#if PORTABLE_EXPOSE_ATTRIBUTES
+
+#ifndef fallthrough
+# define fallthrough pattribute(fallthrough)
+#endif
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PATTRIBUTES_H */
diff --git a/include/flatcc/portable/pbase64.h b/include/flatcc/portable/pbase64.h
new file mode 100644
index 0000000..a6812c4
--- /dev/null
+++ b/include/flatcc/portable/pbase64.h
@@ -0,0 +1,448 @@
+#ifndef PBASE64_H
+#define PBASE64_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+
+/* Guarded to allow inclusion of pstdint.h first, if stdint.h is not supported. */
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#define BASE64_EOK 0
+/* 0 or mure full blocks decoded, remaining content may be parsed with fresh buffer. */
+#define BASE64_EMORE 1
+/* The `src_len` argument is required when encoding. */
+#define BASE64_EARGS 2
+/* Unsupported mode, or modifier not supported by mode when encoding. */
+#define BASE64_EMODE 3
+/* Decoding ends at invalid tail length - either by source length or by non-alphabet symbol. */
+#define BASE64_ETAIL 4
+/* Decoding ends at valid tail length but last byte has non-zero bits where it shouldn't have. */
+#define BASE64_EDIRTY 5
+
+static inline const char *base64_strerror(int err);
+
+/* All codecs are URL safe. Only Crockford allow for non-canocical decoding. */
+enum {
+ /* Most common base64 codec, but not url friendly. */
+ base64_mode_rfc4648 = 0,
+
+ /* URL safe version, '+' -> '-', '/' -> '_'. */
+ base64_mode_url = 1,
+
+ /*
+ * Skip ' ', '\r', and '\n' - we do not allow tab because common
+ * uses of base64 such as PEM do not allow tab.
+ */
+ base64_dec_modifier_skipspace = 32,
+
+ /* Padding is excluded by default. Not allowed for zbase64. */
+ base64_enc_modifier_padding = 128,
+
+ /* For internal use or to decide codec of mode. */
+ base64_modifier_mask = 32 + 64 + 128,
+};
+
+/* Encoded size with or without padding. */
+static inline size_t base64_encoded_size(size_t len, int mode);
+
+/*
+ * Decoded size assuming no padding.
+ * If `len` does include padding, the actual size may be less
+ * when decoding, but never more.
+ */
+static inline size_t base64_decoded_size(size_t len);
+
+/*
+ * `dst` must hold ceil(len * 4 / 3) bytes.
+ * `src_len` points to length of source and is updated with length of
+ * parse on both success and failure. If `dst_len` is not null
+ * it is used to store resulting output lengt withh length of decoded
+ * output on both success and failure.
+ * If `hyphen` is non-zero a hyphen is encoded every `hyphen` output bytes.
+ * `mode` selects encoding alphabet defaulting to Crockfords base64.
+ * Returns 0 on success.
+ *
+ * A terminal space can be added with `dst[dst_len++] = ' '` after the
+ * encode call. All non-alphabet can be used as terminators except the
+ * padding character '='. The following characters will work as
+ * terminator for all modes: { '\0', '\n', ' ', '\t' }. A terminator is
+ * optional when the source length is given to the decoder. Note that
+ * crockford also reserves a few extra characters for checksum but the
+ * checksum must be separate from the main buffer and is not supported
+ * by this library.
+ */
+static inline int base64_encode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode);
+
+/*
+ * Decodes according to mode while ignoring encoding modifiers.
+ * `src_len` and `dst_len` are optional pointers. If `src_len` is set it
+ * must contain the length of the input, otherwise the input must be
+ * terminated with a non-alphabet character or valid padding (a single
+ * padding character is accepted) - if the src_len output is needed but
+ * not the input due to guaranteed termination, then set it to
+ * (size_t)-1. `dst_len` must contain length of output buffer if present
+ * and parse will fail with BASE64_EMORE after decoding a block multiple
+ * if dst_len is exhausted - the parse can thus be resumed after
+ * draining destination. `src_len` and `dst_len` are updated with parsed
+ * and decoded length, when present, on both success and failure.
+ * Returns 0 on success. Invalid characters are not considered errors -
+ * they simply terminate the parse, however, if the termination is not
+ * at a block multiple or a valid partial block length then BASE64_ETAIL
+ * without output holding the last full block, if any. BASE64_ETAIL is also
+ * returned if the a valid length holds non-zero unused tail bits.
+ */
+static inline int base64_decode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode);
+
+static inline const char *base64_strerror(int err)
+{
+ switch (err) {
+ case BASE64_EOK: return "ok";
+ case BASE64_EARGS: return "invalid argument";
+ case BASE64_EMODE: return "invalid mode";
+ case BASE64_EMORE: return "destination full";
+ case BASE64_ETAIL: return "invalid tail length";
+ case BASE64_EDIRTY: return "invalid tail content";
+ default: return "unknown error";
+ }
+}
+
+static inline size_t base64_encoded_size(size_t len, int mode)
+{
+ size_t k = len % 3;
+ size_t n = (len * 4 / 3 + 3) & ~(size_t)3;
+ int pad = mode & base64_enc_modifier_padding;
+
+ if (!pad) {
+ switch (k) {
+ case 2:
+ n -= 1;
+ break;
+ case 1:
+ n -= 2;
+ break;
+ default:
+ break;
+ }
+ }
+ return n;
+}
+
+static inline size_t base64_decoded_size(size_t len)
+{
+ size_t k = len % 4;
+ size_t n = len / 4 * 3;
+
+ switch (k) {
+ case 3:
+ return n + 2;
+ case 2:
+ return n + 1;
+ case 1: /* Not valid without padding. */
+ case 0:
+ default:
+ return n;
+ }
+}
+
+static inline int base64_encode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode)
+{
+ const uint8_t *rfc4648_alphabet = (const uint8_t *)
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ const uint8_t *url_alphabet = (const uint8_t *)
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
+
+ const uint8_t *T;
+ uint8_t *dst_base = dst;
+ int pad = mode & base64_enc_modifier_padding;
+ size_t len = 0;
+ int ret = BASE64_EMODE;
+
+ if (!src_len) {
+ ret = BASE64_EARGS;
+ goto done;
+ }
+ len = *src_len;
+ mode = mode & ~base64_modifier_mask;
+ switch (mode) {
+ case base64_mode_rfc4648:
+ T = rfc4648_alphabet;
+ break;
+ case base64_mode_url:
+ T = url_alphabet;
+ break;
+ default:
+ /* Invalid mode. */
+ goto done;
+ }
+
+ ret = BASE64_EOK;
+
+ /* Encodes 4 destination bytes from 3 source bytes. */
+ while (len >= 3) {
+ dst[0] = T[((src[0] >> 2))];
+ dst[1] = T[((src[0] << 4) & 0x30) | (src[1] >> 4)];
+ dst[2] = T[((src[1] << 2) & 0x3c) | (src[2] >> 6)];
+ dst[3] = T[((src[2] & 0x3f))];
+ len -= 3;
+ dst += 4;
+ src += 3;
+ }
+ /* Encodes 8 destination bytes from 1 to 4 source bytes, if any. */
+ switch(len) {
+ case 2:
+ dst[0] = T[((src[0] >> 2))];
+ dst[1] = T[((src[0] << 4) & 0x30) | (src[1] >> 4)];
+ dst[2] = T[((src[1] << 2) & 0x3c)];
+ dst += 3;
+ if (pad) {
+ *dst++ = '=';
+ }
+ break;
+ case 1:
+ dst[0] = T[((src[0] >> 2))];
+ dst[1] = T[((src[0] << 4) & 0x30)];
+ dst += 2;
+ if (pad) {
+ *dst++ = '=';
+ *dst++ = '=';
+ }
+ break;
+ default:
+ pad = 0;
+ break;
+ }
+ len = 0;
+done:
+ if (dst_len) {
+ *dst_len = (size_t)(dst - dst_base);
+ }
+ if (src_len) {
+ *src_len -= len;
+ }
+ return ret;
+}
+
+static inline int base64_decode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode)
+{
+ static const uint8_t cinvalid = 64;
+ static const uint8_t cignore = 65;
+ static const uint8_t cpadding = 66;
+
+ /*
+ * 0..63: 6-bit encoded value.
+ * 64: flags non-alphabet symbols.
+ * 65: codes for ignored symbols.
+ * 66: codes for pad symbol '='.
+ * All codecs consider padding an optional terminator and if present
+ * consumes as many pad bytes as possible up to block termination,
+ * but does not fail if a block is not full.
+ *
+ * We do not currently have any ignored characters but we might
+ * add spaces as per MIME spec, but assuming spaces only happen
+ * at block boundaries this is probalby better handled by repeated
+ * parsing.
+ */
+ static const uint8_t base64rfc4648_decode[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ static const uint8_t base64url_decode[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 63,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ static const uint8_t base64rfc4648_decode_skipspace[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 64, 64, 65, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 65, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ static const uint8_t base64url_decode_skipspace[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 64, 64, 65, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 65, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 63,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ int ret = BASE64_EOK;
+ size_t i, k;
+ uint8_t hold[4];
+ uint8_t *dst_base = dst;
+ size_t limit = (size_t)-1;
+ size_t len = (size_t)-1, mark;
+ const uint8_t *T = base64rfc4648_decode;
+ int skipspace = mode & base64_dec_modifier_skipspace;
+
+ if (src_len) {
+ len = *src_len;
+ }
+ mark = len;
+ mode = mode & ~base64_modifier_mask;
+ switch (mode) {
+ case base64_mode_rfc4648:
+ T = skipspace ? base64rfc4648_decode_skipspace : base64rfc4648_decode;
+ break;
+ case base64_mode_url:
+ T = skipspace ? base64url_decode_skipspace : base64url_decode;
+ break;
+ default:
+ ret = BASE64_EMODE;
+ goto done;
+ }
+
+ if (dst_len && *dst_len > 0) {
+ limit = *dst_len;
+ }
+ while(limit > 0) {
+ for (i = 0; i < 4; ++i) {
+ if (len == i) {
+ k = i;
+ len -= i;
+ goto tail;
+ }
+ if ((hold[i] = T[src[i]]) >= cinvalid) {
+ if (hold[i] == cignore) {
+ ++src;
+ --len;
+ --i;
+ continue;
+ }
+ k = i;
+ /* Strip padding and ignore hyphen in padding, if present. */
+ if (hold[i] == cpadding) {
+ ++i;
+ while (i < len && i < 8) {
+ if (T[src[i]] != cpadding && T[src[i]] != cignore) {
+ break;
+ }
+ ++i;
+ }
+ }
+ len -= i;
+ goto tail;
+ }
+ }
+ if (limit < 3) {
+ goto more;
+ }
+ dst[0] = (uint8_t)((hold[0] << 2) | (hold[1] >> 4));
+ dst[1] = (uint8_t)((hold[1] << 4) | (hold[2] >> 2));
+ dst[2] = (uint8_t)((hold[2] << 6) | (hold[3]));
+ dst += 3;
+ src += 4;
+ limit -= 3;
+ len -= 4;
+ mark = len;
+ }
+done:
+ if (dst_len) {
+ *dst_len = (size_t)(dst - dst_base);
+ }
+ if (src_len) {
+ *src_len -= mark;
+ }
+ return ret;
+
+tail:
+ switch (k) {
+ case 0:
+ break;
+ case 2:
+ if ((hold[1] << 4) & 0xff) {
+ goto dirty;
+ }
+ if (limit < 1) {
+ goto more;
+ }
+ dst[0] = (uint8_t)((hold[0] << 2) | (hold[1] >> 4));
+ dst += 1;
+ break;
+ case 3:
+ if ((hold[2] << 6) & 0xff) {
+ goto dirty;
+ }
+ if (limit < 2) {
+ goto more;
+ }
+ dst[0] = (uint8_t)((hold[0] << 2) | (hold[1] >> 4));
+ dst[1] = (uint8_t)((hold[1] << 4) | (hold[2] >> 2));
+ dst += 2;
+ break;
+ default:
+ ret = BASE64_ETAIL;
+ goto done;
+ }
+ mark = len;
+ goto done;
+dirty:
+ ret = BASE64_EDIRTY;
+ goto done;
+more:
+ ret = BASE64_EMORE;
+ goto done;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PBASE64_H */
diff --git a/include/flatcc/portable/pcrt.h b/include/flatcc/portable/pcrt.h
new file mode 100644
index 0000000..0226be6
--- /dev/null
+++ b/include/flatcc/portable/pcrt.h
@@ -0,0 +1,48 @@
+#ifndef PCRT_H
+#define PCRT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Assertions and pointer violations in debug mode may trigger a dialog
+ * on Windows. When running headless this is not helpful, but
+ * unfortunately it cannot be disabled with a compiler option so code
+ * must be injected into the runtime early in the main function.
+ * A call to the provided `init_headless_crt()` macro does this in
+ * a portable manner.
+ *
+ * See also:
+ * https://stackoverflow.com/questions/13943665/how-can-i-disable-the-debug-assertion-dialog-on-windows
+ */
+
+#if defined(_WIN32)
+
+#include <crtdbg.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static int _portable_msvc_headless_report_hook(int reportType, char *message, int *returnValue)
+{
+ fprintf(stderr, "CRT[%d]: %s\n", reportType, message);
+ *returnValue = 1;
+ exit(1);
+ return 1;
+}
+
+#define init_headless_crt() _CrtSetReportHook(_portable_msvc_headless_report_hook)
+
+#else
+
+#define init_headless_crt() ((void)0)
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PCRT_H */
diff --git a/include/flatcc/portable/pdiagnostic.h b/include/flatcc/portable/pdiagnostic.h
new file mode 100644
index 0000000..b5294f3
--- /dev/null
+++ b/include/flatcc/portable/pdiagnostic.h
@@ -0,0 +1,85 @@
+ /* There is intentionally no include guard in this file. */
+
+
+/*
+ * Usage: optionally disable any of these before including.
+ *
+ * #define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+ * #define PDIAGNOSTIC_IGNORE_UNUSED_VARIABLE
+ * #define PDIAGNOSTIC_IGNORE_UNUSED_PARAMETER
+ * #define PDIAGNOSTIC_IGNORE_UNUSED // all of the above
+ *
+ * #include "pdiagnostic.h"
+ *
+ * Alternatively use #include "pdiagnostic_push/pop.h"
+ */
+
+#ifdef _MSC_VER
+#pragma warning(disable: 4668) /* preprocessor name not defined */
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_MSVC) && defined(_MSC_VER)
+#define PDIAGNOSTIC_AWARE_MSVC 1
+#elif !defined(PDIAGNOSTIC_AWARE_MSVC)
+#define PDIAGNOSTIC_AWARE_MSVC 0
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_CLANG) && defined(__clang__)
+#define PDIAGNOSTIC_AWARE_CLANG 1
+#elif !defined(PDIAGNOSTIC_AWARE_CLANG)
+#define PDIAGNOSTIC_AWARE_CLANG 0
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_GCC) && defined(__GNUC__) && !defined(__clang__)
+/* Can disable some warnings even if push is not available (gcc-4.2 vs gcc-4.7) */
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)
+#define PDIAGNOSTIC_AWARE_GCC 1
+#endif
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_GCC)
+#define PDIAGNOSTIC_AWARE_GCC 0
+#endif
+
+#if defined(PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION) || defined(PDIAGNOSTIC_IGNORE_UNUSED)
+#if PDIAGNOSTIC_AWARE_CLANG
+#pragma clang diagnostic ignored "-Wunused-function"
+#elif PDIAGNOSTIC_AWARE_GCC
+#pragma GCC diagnostic ignored "-Wunused-function"
+#endif
+#endif
+#undef PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+
+#if defined(PDIAGNOSTIC_IGNORE_UNUSED_VARIABLE) || defined(PDIAGNOSTIC_IGNORE_UNUSED)
+#if PDIAGNOSTIC_AWARE_MSVC
+#pragma warning(disable: 4101) /* unused local variable */
+#elif PDIAGNOSTIC_AWARE_CLANG
+#pragma clang diagnostic ignored "-Wunused-variable"
+#elif PDIAGNOSTIC_AWARE_GCC
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#endif
+#endif
+#undef PDIAGNOSTIC_IGNORE_UNUSED_VARIABLE
+
+#if defined(PDIAGNOSTIC_IGNORE_UNUSED_PARAMETER) || defined(PDIAGNOSTIC_IGNORE_UNUSED)
+#if PDIAGNOSTIC_AWARE_CLANG
+#pragma clang diagnostic ignored "-Wunused-parameter"
+#elif PDIAGNOSTIC_AWARE_GCC
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+#endif
+#undef PDIAGNOSTIC_IGNORE_UNUSED_PARAMETER
+
+#undef PDIAGNOSTIC_IGNORE_UNUSED
+
+#if defined (__cplusplus) && __cplusplus < 201103L
+#if PDIAGNOSTIC_AWARE_CLANG
+/* Needed for < C++11 clang C++ static_assert */
+#pragma clang diagnostic ignored "-Wc11-extensions"
+/* Needed for empty macro arguments. */
+#pragma clang diagnostic ignored "-Wc99-extensions"
+/* Needed for trailing commas. */
+#pragma clang diagnostic ignored "-Wc++11-extensions"
+#endif
+#endif
+
diff --git a/include/flatcc/portable/pdiagnostic_pop.h b/include/flatcc/portable/pdiagnostic_pop.h
new file mode 100644
index 0000000..f5e16b3
--- /dev/null
+++ b/include/flatcc/portable/pdiagnostic_pop.h
@@ -0,0 +1,20 @@
+#if defined(PDIAGNOSTIC_PUSHED_MSVC)
+#if PDIAGNOSTIC_PUSHED_MSVC
+#pragma warning( pop )
+#endif // PDIAGNOSTIC_PUSHED_MSVC
+#undef PDIAGNOSTIC_PUSHED_MSVC
+#endif // defined(PDIAGNOSTIC_PUSHED_MSVC)
+
+#if defined(PDIAGNOSTIC_PUSHED_CLANG)
+#if PDIAGNOSTIC_PUSHED_CLANG
+#pragma clang diagnostic pop
+#endif // PDIAGNOSTIC_PUSHED_CLANG
+#undef PDIAGNOSTIC_PUSHED_CLANG
+#endif // defined(PDIAGNOSTIC_PUSHED_CLANG)
+
+#if defined(PDIAGNOSTIC_PUSHED_GCC)
+#if PDIAGNOSTIC_PUSHED_GCC
+#pragma GCC diagnostic pop
+#endif // PDIAGNOSTIC_PUSHED_GCC
+#undef PDIAGNOSTIC_PUSHED_GCC
+#endif // defined(PDIAGNOSTIC_PUSHED_GCC)
diff --git a/include/flatcc/portable/pdiagnostic_push.h b/include/flatcc/portable/pdiagnostic_push.h
new file mode 100644
index 0000000..66586d7
--- /dev/null
+++ b/include/flatcc/portable/pdiagnostic_push.h
@@ -0,0 +1,51 @@
+/*
+ * See also comment in "pdiagnostic.h"
+ *
+ * e.g.
+ * #define PDIAGNOSTIC_IGNORE_USED_FUNCTION
+ * #define PDIAGNOSTIC_IGNORE_USED_VARIABLE
+ * #include "pdiagnostic_push"
+ * ...
+ * #include "pdiagnostic_pop.h"
+ * <eof>
+ *
+ * or if push pop isn't desired:
+ * #define PDIAGNOSTIC_IGNORE_USED_FUNCTION
+ * #define PDIAGNOSTIC_IGNORE_USED_VARIABLE
+ * #include "pdiagnostic.h"
+ * ...
+ * <eof>
+ *
+ *
+ * Some if these warnings cannot be ignored
+ * at the #pragma level, but might in the future.
+ * Use compiler switches like -Wno-unused-function
+ * to work around this.
+ */
+
+#if defined(_MSC_VER)
+#pragma warning( push )
+#define PDIAGNOSTIC_PUSHED_MSVC 1
+#else
+#define PDIAGNOSTIC_PUSHED_MSVC 0
+#endif
+
+#if defined(__clang__)
+#pragma clang diagnostic push
+#define PDIAGNOSTIC_PUSHED_CLANG 1
+#else
+#define PDIAGNOSTIC_PUSHED_CLANG 0
+#endif
+
+#if defined(__GNUC__) && !defined(__clang__)
+#if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+#pragma GCC diagnostic push
+#define PDIAGNOSTIC_PUSHED_GCC 1
+#else
+#define PDIAGNOSTIC_PUSHED_GCC 0
+#endif // GNUC >= 4.6
+#else
+#define PDIAGNOSTIC_PUSHED_GCC 0
+#endif // defined(__GNUC__) && !defined(__clang__)
+
+#include "pdiagnostic.h"
diff --git a/include/flatcc/portable/pendian.h b/include/flatcc/portable/pendian.h
new file mode 100644
index 0000000..122ba8e
--- /dev/null
+++ b/include/flatcc/portable/pendian.h
@@ -0,0 +1,206 @@
+#ifndef PENDIAN_H
+#define PENDIAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Defines platform optimized (as per linux <endian.h>
+ *
+ * le16toh, le32to, le64toh, be16toh, be32toh, be64toh
+ * htole16, htole32, htole64, htobe16, htobe32, htobe64
+ *
+ * Falls back to auto-detect endian conversion which is also fast
+ * if fast byteswap operation was detected.
+ *
+ * Also defines platform optimized:
+ *
+ * bswap16, bswap32, bswap64,
+ *
+ * with fall-back to shift-or implementation.
+ *
+ * For convenience also defines:
+ *
+ * le8to, be8toh, htole8, htobe8
+ * bswap8
+ *
+ * The convience functions makes is simpler to define conversion macros
+ * based on type size.
+ *
+ * NOTE: this implementation expects arguments with no side-effects and
+ * with appropriately sized unsigned arguments. These are expected to be
+ * used with typesafe wrappers.
+ */
+
+#ifndef UINT8_MAX
+#include "pstdint.h"
+#endif
+
+#if defined(__linux__)
+#include <endian.h>
+#elif defined(__OpenBSD__) || defined(__FreeBSD__)
+#include <sys/endian.h>
+#endif
+
+#include "pendian_detect.h"
+
+#if defined(_MSC_VER)
+#if _MSC_VER >= 1300
+#include <stdlib.h>
+#define bswap16 _byteswap_ushort
+#define bswap32 _byteswap_ulong
+#define bswap64 _byteswap_uint64
+#endif
+#elif defined(__clang__)
+#if __has_builtin(__builtin_bswap16)
+#ifndef bswap16
+#define bswap16 __builtin_bswap16
+#endif
+#endif
+#if __has_builtin(__builtin_bswap32)
+#ifndef bswap32
+#define bswap32 __builtin_bswap32
+#endif
+#endif
+#if __has_builtin(__builtin_bswap64)
+#ifndef bswap64
+#define bswap64 __builtin_bswap64
+#endif
+#endif
+#elif defined(__OpenBSD__) || defined(__FreeBSD__)
+#ifndef bswap16
+#define bswap16 swap16
+#endif
+#ifndef bswap32
+#define bswap32 swap32
+#endif
+#ifndef bswap64
+#define bswap64 swap64
+#endif
+#elif defined(__GNUC__) /* Supported since at least GCC 4.4 */
+#ifndef bswap32
+#define bswap32 __builtin_bswap32
+#endif
+#ifndef bswap64
+#define bswap64 __builtin_bswap64
+#endif
+#endif
+
+#ifndef bswap16
+#define bswap16(v) \
+ (((uint16_t)(v) << 8) | ((uint16_t)(v) >> 8))
+#endif
+
+#ifndef bswap32
+#define bswap32(v) \
+ ((((uint32_t)(v) << 24)) \
+ | (((uint32_t)(v) << 8) & UINT32_C(0x00FF0000)) \
+ | (((uint32_t)(v) >> 8) & UINT32_C(0x0000FF00)) \
+ | (((uint32_t)(v) >> 24)))
+#endif
+
+#ifndef bswap64
+#define bswap64(v) \
+ ((((uint64_t)(v) << 56)) \
+ | (((uint64_t)(v) << 40) & UINT64_C(0x00FF000000000000)) \
+ | (((uint64_t)(v) << 24) & UINT64_C(0x0000FF0000000000)) \
+ | (((uint64_t)(v) << 8) & UINT64_C(0x000000FF00000000)) \
+ | (((uint64_t)(v) >> 8) & UINT64_C(0x00000000FF000000)) \
+ | (((uint64_t)(v) >> 24) & UINT64_C(0x0000000000FF0000)) \
+ | (((uint64_t)(v) >> 40) & UINT64_C(0x000000000000FF00)) \
+ | (((uint64_t)(v) >> 56)))
+#endif
+
+#ifndef bswap8
+#define bswap8(v) ((uint8_t)(v))
+#endif
+
+#if !defined(le16toh) && defined(letoh16)
+#define le16toh letoh16
+#define le32toh letoh32
+#define le64toh letoh64
+#endif
+
+#if !defined(be16toh) && defined(betoh16)
+#define be16toh betoh16
+#define be32toh betoh32
+#define be64toh betoh64
+#endif
+
+/* Assume it goes for all. */
+#if !defined(le16toh)
+
+#if defined(__LITTLE_ENDIAN__)
+
+#define le16toh(v) (v)
+#define le32toh(v) (v)
+#define le64toh(v) (v)
+
+#define htole16(v) (v)
+#define htole32(v) (v)
+#define htole64(v) (v)
+
+#define be16toh(v) bswap16(v)
+#define be32toh(v) bswap32(v)
+#define be64toh(v) bswap64(v)
+
+#define htobe16(v) bswap16(v)
+#define htobe32(v) bswap32(v)
+#define htobe64(v) bswap64(v)
+
+#elif defined(__BIG_ENDIAN__)
+
+#define le16toh(v) bswap16(v)
+#define le32toh(v) bswap32(v)
+#define le64toh(v) bswap64(v)
+
+#define htole16(v) bswap16(v)
+#define htole32(v) bswap32(v)
+#define htole64(v) bswap64(v)
+
+#define be16toh(v) (v)
+#define be32toh(v) (v)
+#define be64toh(v) (v)
+
+#define htobe16(v) (v)
+#define htobe32(v) (v)
+#define htobe64(v) (v)
+
+#else
+
+static const int __pendian_test = 1;
+
+#define le16toh(v) (*(char *)&__pendian_test ? (v) : bswap16(v))
+#define le32toh(v) (*(char *)&__pendian_test ? (v) : bswap32(v))
+#define le64toh(v) (*(char *)&__pendian_test ? (v) : bswap64(v))
+
+#define htole16(v) (*(char *)&__pendian_test ? (v) : bswap16(v))
+#define htole32(v) (*(char *)&__pendian_test ? (v) : bswap32(v))
+#define htole64(v) (*(char *)&__pendian_test ? (v) : bswap64(v))
+
+#define be16toh(v) (*(char *)&__pendian_test ? bswap16(v) : (v))
+#define be32toh(v) (*(char *)&__pendian_test ? bswap32(v) : (v))
+#define be64toh(v) (*(char *)&__pendian_test ? bswap64(v) : (v))
+
+#define htobe16(v) (*(char *)&__pendian_test ? bswap16(v) : (v))
+#define htobe32(v) (*(char *)&__pendian_test ? bswap32(v) : (v))
+#define htobe64(v) (*(char *)&__pendian_test ? bswap64(v) : (v))
+
+#endif
+
+#endif /* le16toh */
+
+/* Helpers not part of Linux <endian.h> */
+#if !defined(le8toh)
+#define le8toh(n) (n)
+#define htole8(n) (n)
+#define be8toh(n) (n)
+#define htobe8(n) (n)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PENDIAN_H */
diff --git a/include/flatcc/portable/pendian_detect.h b/include/flatcc/portable/pendian_detect.h
new file mode 100644
index 0000000..1dd62c0
--- /dev/null
+++ b/include/flatcc/portable/pendian_detect.h
@@ -0,0 +1,118 @@
+/*
+ * Uses various known flags to decide endianness and defines:
+ *
+ * __LITTLE_ENDIAN__ or __BIG_ENDIAN__ if not already defined
+ *
+ * and also defines
+ *
+ * __BYTE_ORDER__ to either __ORDER_LITTLE_ENDIAN__ or
+ * __ORDER_BIG_ENDIAN__ if not already defined
+ *
+ * If none of these could be set, __UNKNOWN_ENDIAN__ is defined,
+ * which is not a known flag. If __BYTE_ORDER__ is defined but
+ * not big or little endian, __UNKNOWN_ENDIAN__ is also defined.
+ *
+ * Note: Some systems define __BYTE_ORDER without __ at the end
+ * - this will be mapped to to __BYTE_ORDER__.
+ */
+
+#ifndef PENDIAN_DETECT
+#define PENDIAN_DETECT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef __ORDER_LITTLE_ENDIAN__
+#define __ORDER_LITTLE_ENDIAN__ 1234
+#endif
+
+#ifndef __ORDER_BIG_ENDIAN__
+#define __ORDER_BIG_ENDIAN__ 4321
+#endif
+
+#ifdef __BYTE_ORDER__
+
+#if defined(__LITTLE_ENDIAN__) && __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
+#error __LITTLE_ENDIAN__ inconsistent with __BYTE_ORDER__
+#endif
+
+#if defined(__BIG_ENDIAN__) && __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__
+#error __BIG_ENDIAN__ inconsistent with __BYTE_ORDER__
+#endif
+
+#else /* __BYTE_ORDER__ */
+
+
+#if \
+ defined(__LITTLE_ENDIAN__) || \
+ (defined(__BYTE_ORDER) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN) || \
+ defined(__ARMEL__) || defined(__THUMBEL__) || \
+ defined(__AARCH64EL__) || \
+ (defined(_MSC_VER) && defined(_M_ARM)) || \
+ defined(_MIPSEL) || defined(__MIPSEL) || defined(__MIPSEL__) || \
+ defined(_M_X64) || defined(_M_IX86) || defined(_M_I86) || \
+ defined(__i386__) || defined(__alpha__) || \
+ defined(__ia64) || defined(__ia64__) || \
+ defined(_M_IA64) || defined(_M_ALPHA) || \
+ defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || \
+ defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
+ defined(__bfin__)
+
+#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
+
+#endif
+
+#if \
+ defined (__BIG_ENDIAN__) || \
+ (defined(__BYTE_ORDER) && __BYTE_ORDER == __ORDER_BIG_ENDIAN) || \
+ defined(__ARMEB__) || defined(THUMBEB__) || defined (__AARCH64EB__) || \
+ defined(_MIPSEB) || defined(__MIPSEB) || defined(__MIPSEB__) || \
+ defined(__sparc) || defined(__sparc__) || \
+ defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || \
+ defined(__hpux) || defined(__hppa) || defined(__s390__)
+
+#define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__
+
+#endif
+
+#endif /* __BYTE_ORDER__ */
+
+#ifdef __BYTE_ORDER__
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+
+#ifndef __LITTLE_ENDIAN__
+#define __LITTLE_ENDIAN__ 1
+#endif
+
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+#ifndef __BIG_ENDIAN__
+#define __BIG_ENDIAN__ 1
+#endif
+
+#else
+
+/*
+ * Custom extension - we only define __BYTE_ORDER__ if known big or little.
+ * User code that understands __BYTE_ORDER__ may also assume unkown if
+ * it is not defined by now - this will allow other endian formats than
+ * big or little when supported by compiler.
+ */
+#ifndef __UNKNOWN_ENDIAN__
+#define __UNKNOWN_ENDIAN__ 1
+#endif
+
+#endif
+#endif /* __BYTE_ORDER__ */
+
+#if defined(__LITTLE_ENDIAN__) && defined(__BIG_ENDIAN__)
+#error conflicting definitions of __LITTLE_ENDIAN__ and __BIG_ENDIAN__
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PENDIAN_DETECT */
diff --git a/include/flatcc/portable/pinline.h b/include/flatcc/portable/pinline.h
new file mode 100644
index 0000000..f4f8f27
--- /dev/null
+++ b/include/flatcc/portable/pinline.h
@@ -0,0 +1,19 @@
+#ifndef PINLINE_H
+#define PINLINE_H
+
+#ifndef __cplusplus
+
+#if (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
+/* C99 or newer */
+#elif _MSC_VER >= 1500 /* MSVC 9 or newer */
+#undef inline
+#define inline __inline
+#elif __GNUC__ >= 3 /* GCC 3 or newer */
+#define inline __inline
+#else /* Unknown or ancient */
+#define inline
+#endif
+
+#endif /* __cplusplus */
+
+#endif /* PINLINE_H */
diff --git a/include/flatcc/portable/pinttypes.h b/include/flatcc/portable/pinttypes.h
new file mode 100644
index 0000000..a1be9df
--- /dev/null
+++ b/include/flatcc/portable/pinttypes.h
@@ -0,0 +1,52 @@
+#ifndef PINTTYPES_H
+#define PINTTYPES_H
+
+#ifndef PRId16
+
+#if (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
+/* C99 or newer */
+#include <inttypes.h>
+#else
+
+/*
+ * This is not a complete implementation of <inttypes.h>, just the most
+ * useful printf modifiers.
+ */
+
+#include "pstdint.h"
+
+#ifndef PRINTF_INT64_MODIFIER
+#error "please define PRINTF_INT64_MODIFIER"
+#endif
+
+#ifndef PRId64
+#define PRId64 PRINTF_INT64_MODIFIER "d"
+#define PRIu64 PRINTF_INT64_MODIFIER "u"
+#define PRIx64 PRINTF_INT64_MODIFIER "x"
+#endif
+
+#ifndef PRINTF_INT32_MODIFIER
+#define PRINTF_INT32_MODIFIER "l"
+#endif
+
+#ifndef PRId32
+#define PRId32 PRINTF_INT32_MODIFIER "d"
+#define PRIu32 PRINTF_INT32_MODIFIER "u"
+#define PRIx32 PRINTF_INT32_MODIFIER "x"
+#endif
+
+#ifndef PRINTF_INT16_MODIFIER
+#define PRINTF_INT16_MODIFIER "h"
+#endif
+
+#ifndef PRId16
+#define PRId16 PRINTF_INT16_MODIFIER "d"
+#define PRIu16 PRINTF_INT16_MODIFIER "u"
+#define PRIx16 PRINTF_INT16_MODIFIER "x"
+#endif
+
+# endif /* __STDC__ */
+
+#endif /* PRId16 */
+
+#endif /* PINTTYPES */
diff --git a/include/flatcc/portable/portable.h b/include/flatcc/portable/portable.h
new file mode 100644
index 0000000..7a6a484
--- /dev/null
+++ b/include/flatcc/portable/portable.h
@@ -0,0 +1,2 @@
+/* portable.h is widely used, so we redirect to a less conflicting name. */
+#include "portable_basic.h"
diff --git a/include/flatcc/portable/portable_basic.h b/include/flatcc/portable/portable_basic.h
new file mode 100644
index 0000000..0396f3d
--- /dev/null
+++ b/include/flatcc/portable/portable_basic.h
@@ -0,0 +1,25 @@
+#ifndef PORTABLE_BASIC_H
+#define PORTABLE_BASIC_H
+
+/*
+ * Basic features need to make compilers support the most common moden C
+ * features, and endian / unligned read support as well.
+ *
+ * It is not assumed that this file is always included.
+ * Other include files are independent or include what they need.
+ */
+
+#include "pversion.h"
+#include "pwarnings.h"
+
+/* Featutures that ought to be supported by C11, but some aren't. */
+#include "pinttypes.h"
+#include "pstdalign.h"
+#include "pinline.h"
+#include "pstatic_assert.h"
+
+/* These are not supported by C11 and are general platform abstractions. */
+#include "pendian.h"
+#include "punaligned.h"
+
+#endif /* PORTABLE_BASIC_H */
diff --git a/include/flatcc/portable/pparsefp.h b/include/flatcc/portable/pparsefp.h
new file mode 100644
index 0000000..7fa1c24
--- /dev/null
+++ b/include/flatcc/portable/pparsefp.h
@@ -0,0 +1,226 @@
+#ifndef PPARSEFP_H
+#define PPARSEFP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h> /* memcpy */
+
+/*
+ * Parses a float or double number and returns the length parsed if
+ * successful. The length argument is of limited value due to dependency
+ * on `strtod` - buf[len] must be accessible and must not be part of
+ * a valid number, including hex float numbers..
+ *
+ * Unlike strtod, whitespace is not parsed.
+ *
+ * May return:
+ * - null on error,
+ * - buffer start if first character does not start a number,
+ * - or end of parse on success.
+ *
+ */
+
+#define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+#include "pdiagnostic_push.h"
+
+/*
+ * isinf is needed in order to stay compatible with strtod's
+ * over/underflow handling but isinf has some portability issues.
+ *
+ * Use the parse_double/float_is_range_error instead of isinf directly.
+ * This ensures optimizations can be added when not using strtod.
+ *
+ * On gcc, clang and msvc we can use isinf or equivalent directly.
+ * Other compilers such as xlc may require linking with -lm which may not
+ * be convienent so a default isinf is provided. If isinf is available
+ * and there is a noticable performance issue, define
+ * `PORTABLE_USE_ISINF`. This flag also affects isnan.
+ */
+#if defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || defined(PORTABLE_USE_ISINF)
+#include <math.h>
+#if defined(_MSC_VER) && !defined(isinf)
+#include <float.h>
+#define isnan _isnan
+#define isinf(x) (!_finite(x))
+#endif
+/*
+ * clang-3 through clang-8 but not clang-9 issues incorrect precision
+ * loss warning with -Wconversion flag when cast is absent.
+ */
+#if defined(__clang__)
+#if __clang_major__ >= 3 && __clang_major__ <= 8
+#define parse_double_isinf(x) isinf((float)x)
+#define parse_double_isnan(x) isnan((float)x)
+#endif
+#endif
+#if !defined(parse_double_isinf)
+#define parse_double_isinf isinf
+#endif
+#define parse_float_isinf isinf
+
+#else
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+/* Avoid linking with libmath but depends on float/double being IEEE754 */
+static inline int parse_double_isinf(const double x)
+{
+ uint64_t u64x;
+
+ memcpy(&u64x, &x, sizeof(u64x));
+ return (u64x & 0x7fffffff00000000ULL) == 0x7ff0000000000000ULL;
+}
+
+static inline int parse_float_isinf(float x)
+{
+ uint32_t u32x;
+
+ memcpy(&u32x, &x, sizeof(u32x));
+ return (u32x & 0x7fffffff) == 0x7f800000;
+}
+
+#endif
+
+#if !defined(parse_double_isnan)
+#define parse_double_isnan isnan
+#endif
+#if !defined(parse_float_isnan)
+#define parse_float_isnan isnan
+#endif
+
+/* Returns 0 when in range, 1 on overflow, and -1 on underflow. */
+static inline int parse_double_is_range_error(double x)
+{
+ return parse_double_isinf(x) ? (x < 0.0 ? -1 : 1) : 0;
+}
+
+static inline int parse_float_is_range_error(float x)
+{
+ return parse_float_isinf(x) ? (x < 0.0f ? -1 : 1) : 0;
+}
+
+#ifndef PORTABLE_USE_GRISU3
+#define PORTABLE_USE_GRISU3 1
+#endif
+
+#if PORTABLE_USE_GRISU3
+#include "grisu3_parse.h"
+#endif
+
+#ifdef grisu3_parse_double_is_defined
+static inline const char *parse_double(const char *buf, size_t len, double *result)
+{
+ return grisu3_parse_double(buf, len, result);
+}
+#else
+#include <stdio.h>
+static inline const char *parse_double(const char *buf, size_t len, double *result)
+{
+ char *end;
+
+ (void)len;
+ *result = strtod(buf, &end);
+ return end;
+}
+#endif
+
+static inline const char *parse_float(const char *buf, size_t len, float *result)
+{
+ const char *end;
+ double v;
+ union { uint32_t u32; float f32; } inf;
+ inf.u32 = 0x7f800000;
+
+ end = parse_double(buf, len, &v);
+ *result = (float)v;
+ if (parse_float_isinf(*result)) {
+ *result = v < 0 ? -inf.f32 : inf.f32;
+ return buf;
+ }
+ return end;
+}
+
+/* Inspired by https://bitbashing.io/comparing-floats.html */
+
+/* Return signed ULP distance or INT64_MAX if any value is nan. */
+static inline int64_t parse_double_compare(const double x, const double y)
+{
+ int64_t i64x, i64y;
+
+ if (x == y) return 0;
+ if (parse_double_isnan(x)) return INT64_MAX;
+ if (parse_double_isnan(y)) return INT64_MAX;
+ memcpy(&i64x, &x, sizeof(i64x));
+ memcpy(&i64y, &y, sizeof(i64y));
+ if ((i64x < 0) != (i64y < 0)) return INT64_MAX;
+ return i64x - i64y;
+}
+
+/* Same as double, but INT32_MAX if nan. */
+static inline int32_t parse_float_compare(const float x, const float y)
+{
+ int32_t i32x, i32y;
+
+ if (x == y) return 0;
+ if (parse_float_isnan(x)) return INT32_MAX;
+ if (parse_float_isnan(y)) return INT32_MAX;
+ memcpy(&i32x, &x, sizeof(i32x));
+ memcpy(&i32y, &y, sizeof(i32y));
+ if ((i32x < 0) != (i32y < 0)) return INT32_MAX;
+ return i32x - i32y;
+}
+
+/*
+ * Returns the absolute distance in floating point ULP (representational bit difference).
+ * Uses signed return value so that INT64_MAX and INT32_MAX indicates NaN similar to
+ * the compare function.
+ */
+static inline int64_t parse_double_dist(const double x, const double y)
+{
+ uint64_t m64;
+ int64_t i64;
+
+ i64 = parse_double_compare(x, y);
+ /* Absolute integer value of compare. */
+ m64 = (uint64_t)-(i64 < 0);
+ return (int64_t)(((uint64_t)i64 + m64) ^ m64);
+}
+
+/* Same as double, but INT32_MAX if NaN. */
+static inline int32_t parse_float_dist(const float x, const float y)
+{
+ uint32_t m32;
+ int32_t i32;
+
+ i32 = parse_float_compare(x, y);
+ /* Absolute integer value of compare. */
+ m32 = (uint32_t)-(i32 < 0);
+ return (int32_t)(((uint32_t)i32 + m32) ^ m32);
+}
+
+/*
+ * Returns 1 if no value is NaN, and the difference is at most one ULP (1 bit), and the
+ * sign is the same, and 0 otherwise.
+ */
+static inline int parse_double_is_equal(const double x, const double y)
+{
+ return parse_double_dist(x, y) >> 1 == 0;
+}
+
+/* Same as double, but at lower precision. */
+static inline int parse_float_is_equal(const float x, const float y)
+{
+ return parse_float_dist(x, y) >> 1 == 0;
+}
+
+#include "pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPARSEFP_H */
diff --git a/include/flatcc/portable/pparseint.h b/include/flatcc/portable/pparseint.h
new file mode 100644
index 0000000..96cc99f
--- /dev/null
+++ b/include/flatcc/portable/pparseint.h
@@ -0,0 +1,374 @@
+#ifndef PPARSEINT_H
+#define PPARSEINT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Type specific integer parsers:
+ *
+ * const char *
+ * parse_<type-name>(const char *buf, size_t len, <type> *value, int *status);
+ *
+ * parse_uint64, parse_int64
+ * parse_uint32, parse_int32
+ * parse_uint16, parse_int16
+ * parse_uint8, parse_int8
+ * parse_ushort, parse_short
+ * parse_uint, parse_int
+ * parse_ulong, parse_long
+ *
+ * Leading space must be stripped in advance. Status argument can be
+ * null.
+ *
+ * Returns pointer to end of match and a non-negative status code
+ * on succcess (0 for unsigned, 1 for signed):
+ *
+ * PARSE_INTEGER_UNSIGNED
+ * PARSE_INTEGER_SIGNED
+ *
+ * Returns null with a negative status code and unmodified value on
+ * invalid integer formats:
+ *
+ * PARSE_INTEGER_OVERFLOW
+ * PARSE_INTEGER_UNDERFLOW
+ * PARSE_INTEGER_INVALID
+ *
+ * Returns input buffer with negative status code and unmodified value
+ * if first character does not start an integer (not a sign or a digit).
+ *
+ * PARSE_INTEGER_UNMATCHED
+ * PARSE_INTEGER_END
+ *
+ * The signed parsers only works with two's complement architectures.
+ *
+ * Note: the corresponding parse_float and parse_double parsers do not
+ * have a status argument because +/-Inf and NaN are conventionally used
+ * for this.
+ */
+
+#include "limits.h"
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#define PARSE_INTEGER_UNSIGNED 0
+#define PARSE_INTEGER_SIGNED 1
+#define PARSE_INTEGER_OVERFLOW -1
+#define PARSE_INTEGER_UNDERFLOW -2
+#define PARSE_INTEGER_INVALID -3
+#define PARSE_INTEGER_UNMATCHED -4
+#define PARSE_INTEGER_END -5
+
+/*
+ * Generic integer parser that holds 64-bit unsigned values and stores
+ * sign separately. Leading space is not valid.
+ *
+ * Note: this function differs from the type specific parsers like
+ * parse_int64 by not negating the value when there is a sign. It
+ * differs from parse_uint64 by being able to return a negative
+ * UINT64_MAX successfully.
+ *
+ * This parser is used by all type specific integer parsers.
+ *
+ * Status argument can be null.
+ */
+static const char *parse_integer(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ uint64_t x0, x = 0;
+ const char *k, *end = buf + len;
+ int sign, status_;
+
+ if (!status) {
+ status = &status_;
+ }
+ if (buf == end) {
+ *status = PARSE_INTEGER_END;
+ return buf;
+ }
+ k = buf;
+ sign = *buf == '-';
+ buf += sign;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ x0 = x;
+ x = x * 10 + (uint64_t)(*buf - '0');
+ if (x0 > x) {
+ *status = sign ? PARSE_INTEGER_UNDERFLOW : PARSE_INTEGER_OVERFLOW;
+ return 0;
+ }
+ ++buf;
+ }
+ if (buf == k) {
+ /* No number was matched, but it isn't an invalid number either. */
+ *status = PARSE_INTEGER_UNMATCHED;
+ return buf;
+ }
+ if (buf == k + sign) {
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ }
+ if (buf != end)
+ switch (*buf) {
+ case 'e': case 'E': case '.': case 'p': case 'P':
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ }
+ *value = x;
+ *status = sign;
+ return buf;
+}
+
+/*
+ * Parse hex values like 0xff, -0xff, 0XdeAdBeaf42, cannot be trailed by '.', 'p', or 'P'.
+ * Overflows if string is more than 16 valid hex digits. Otherwise similar to parse_integer.
+ */
+static const char *parse_hex_integer(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ uint64_t x = 0;
+ const char *k, *k2, *end = buf + len;
+ int sign, status_;
+ unsigned char c;
+
+ if (!status) {
+ status = &status_;
+ }
+ if (buf == end) {
+ *status = PARSE_INTEGER_END;
+ return buf;
+ }
+ sign = *buf == '-';
+ buf += sign;
+ if (end - buf < 2 || buf[0] != '0' || (buf[1] | 0x20) != 'x') {
+ *status = PARSE_INTEGER_UNMATCHED;
+ return buf - sign;
+ }
+ buf += 2;
+ k = buf;
+ k2 = end;
+ if (end - buf > 16) {
+ k2 = buf + 16;
+ }
+ while (buf != k2) {
+ c = (unsigned char)*buf;
+ if (c >= '0' && c <= '9') {
+ x = x * 16 + c - '0';
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = x * 16 + c - 'a' + 10;
+ } else {
+ break;
+ }
+ }
+ ++buf;
+ }
+ if (buf == k) {
+ if (sign) {
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ } else {
+ /* No number was matched, but it isn't an invalid number either. */
+ *status = PARSE_INTEGER_UNMATCHED;
+ return buf;
+ }
+ }
+ if (buf == end) {
+ goto done;
+ }
+ c = (unsigned char)*buf;
+ if (buf == k2) {
+ if (c >= '0' && c <= '9') {
+ *status = sign ? PARSE_INTEGER_UNDERFLOW : PARSE_INTEGER_OVERFLOW;
+ return 0;
+ }
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ *status = sign ? PARSE_INTEGER_UNDERFLOW : PARSE_INTEGER_OVERFLOW;
+ return 0;
+ }
+ }
+ switch (c) {
+ case '.': case 'p': case 'P':
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ }
+done:
+ *value = x;
+ *status = sign;
+ return buf;
+}
+
+
+#define __portable_define_parse_unsigned(NAME, TYPE, LIMIT) \
+static inline const char *parse_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+#define __portable_define_parse_hex_unsigned(NAME, TYPE, LIMIT) \
+static inline const char *parse_hex_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_hex_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+/* This assumes two's complement. */
+#define __portable_define_parse_signed(NAME, TYPE, LIMIT) \
+static inline const char *parse_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ if (x <= (uint64_t)(LIMIT) + 1) { \
+ *value = (TYPE)-(int64_t)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+/* This assumes two's complement. */
+#define __portable_define_parse_hex_signed(NAME, TYPE, LIMIT) \
+static inline const char *parse_hex_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_hex_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ if (x <= (uint64_t)(LIMIT) + 1) { \
+ *value = (TYPE)-(int64_t)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+static inline const char *parse_uint64(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ buf = parse_integer(buf, len, value, status);
+ if (*status == PARSE_INTEGER_SIGNED) {
+ *status = PARSE_INTEGER_UNDERFLOW;
+ return 0;
+ }
+ return buf;
+}
+
+static inline const char *parse_hex_uint64(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ buf = parse_hex_integer(buf, len, value, status);
+ if (*status == PARSE_INTEGER_SIGNED) {
+ *status = PARSE_INTEGER_UNDERFLOW;
+ return 0;
+ }
+ return buf;
+}
+
+__portable_define_parse_signed(int64, int64_t, INT64_MAX)
+__portable_define_parse_signed(int32, int32_t, INT32_MAX)
+__portable_define_parse_unsigned(uint16, uint16_t, UINT16_MAX)
+__portable_define_parse_signed(int16, int16_t, INT16_MAX)
+__portable_define_parse_unsigned(uint8, uint8_t, UINT8_MAX)
+__portable_define_parse_signed(int8, int8_t, INT8_MAX)
+
+__portable_define_parse_hex_signed(int64, int64_t, INT64_MAX)
+__portable_define_parse_hex_signed(int32, int32_t, INT32_MAX)
+__portable_define_parse_hex_unsigned(uint16, uint16_t, UINT16_MAX)
+__portable_define_parse_hex_signed(int16, int16_t, INT16_MAX)
+__portable_define_parse_hex_unsigned(uint8, uint8_t, UINT8_MAX)
+__portable_define_parse_hex_signed(int8, int8_t, INT8_MAX)
+
+__portable_define_parse_unsigned(ushort, unsigned short, USHRT_MAX)
+__portable_define_parse_signed(short, short, SHRT_MAX)
+__portable_define_parse_unsigned(uint, unsigned int, UINT_MAX)
+__portable_define_parse_signed(int, int, INT_MAX)
+__portable_define_parse_unsigned(ulong, unsigned long, ULONG_MAX)
+__portable_define_parse_signed(long, unsigned long, LONG_MAX)
+
+__portable_define_parse_hex_unsigned(ushort, unsigned short, USHRT_MAX)
+__portable_define_parse_hex_signed(short, short, SHRT_MAX)
+__portable_define_parse_hex_unsigned(uint, unsigned int, UINT_MAX)
+__portable_define_parse_hex_signed(int, int, INT_MAX)
+__portable_define_parse_hex_unsigned(ulong, unsigned long, ULONG_MAX)
+__portable_define_parse_hex_signed(long, unsigned long, LONG_MAX)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPARSEINT_H */
diff --git a/include/flatcc/portable/pprintfp.h b/include/flatcc/portable/pprintfp.h
new file mode 100644
index 0000000..c2e5c07
--- /dev/null
+++ b/include/flatcc/portable/pprintfp.h
@@ -0,0 +1,39 @@
+#ifndef PPRINTFP_H
+#define PPRINTFP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+#include "pdiagnostic_push.h"
+
+#ifndef PORTABLE_USE_GRISU3
+#define PORTABLE_USE_GRISU3 1
+#endif
+
+
+#if PORTABLE_USE_GRISU3
+#include "grisu3_print.h"
+#endif
+
+#ifdef grisu3_print_double_is_defined
+/* Currently there is not special support for floats. */
+#define print_float(n, p) grisu3_print_double((float)(n), (p))
+#define print_double(n, p) grisu3_print_double((double)(n), (p))
+#else
+#include <stdio.h>
+#define print_float(n, p) sprintf(p, "%.9g", (float)(n))
+#define print_double(n, p) sprintf(p, "%.17g", (double)(n))
+#endif
+
+#define print_hex_float(n, p) sprintf(p, "%a", (float)(n))
+#define print_hex_double(n, p) sprintf(p, "%a", (double)(n))
+
+#include "pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPRINTFP_H */
diff --git a/include/flatcc/portable/pprintint.h b/include/flatcc/portable/pprintint.h
new file mode 100644
index 0000000..d05f376
--- /dev/null
+++ b/include/flatcc/portable/pprintint.h
@@ -0,0 +1,628 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ *
+ * Fast printing of (u)int8/16/32/64_t, (u)int, (u)long.
+ *
+ * Functions take for the
+ *
+ * int print_<type>(type value, char *buf);
+ *
+ * and returns number of characters printed, excluding trailing '\0'
+ * which is also printed. Prints at most 21 characters including zero-
+ * termination.
+ *
+ * The function `print_bool` is a bit different - it simply prints "true\0" for
+ * non-zero integers, and "false\0" otherwise.
+ *
+ * The general algorithm is in-place formatting using binary search log10
+ * followed by duff device loop unrolling div / 100 stages.
+ *
+ * The simpler post copy algorithm also provided for fmt_(u)int uses a
+ * temp buffer and loops over div/100 and post copy to target buffer.
+ *
+ *
+ * Benchmarks on core-i7, 2.2GHz, 64-bit clang/OS-X -O2:
+ *
+ * print_int64: avg 15ns for values between INT64_MIN + (10^7/2 .. 10^7/2)
+ * print_int64: avg 11ns for values between 10^9 + (0..10,000,000).
+ * print_int32: avg 7ns for values cast from INT64_MIN + (10^7/2 .. 10^7/2)
+ * print_int32: avg 7ns for values between 10^9 + (0..10,000,000).
+ * print_int64: avg 13ns for values between 10^16 + (0..10,000,000).
+ * print_int64: avg 5ns for values between 0 and 10,000,000.
+ * print_int32: avg 5ns for values between 0 and 10,000,000.
+ * print_int16: avg 10ns for values cast from 0 and 10,000,000.
+ * print_int8: avg 4ns for values cast from 0 and 10,000,000.
+ *
+ * Post copy algorithm:
+ * print_int: avg 12ns for values between INT64_MIN + (10^7/2 .. 10^7/2)
+ * print_int: avg 14ns for values between 10^9 + (0..10,000,000).
+ * print_long: avg 29ns for values between INT64_MIN + (10^7/2 .. 10^7/2)
+ *
+ * The post copy algorithm is nearly half as fast as the in-place
+ * algorithm, but can also be faster occasionally - possibly because the
+ * optimizer being able to skip the copy step.
+ */
+
+#ifndef PPRINTINT_H
+#define PPRINTINT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include "pattributes.h" /* fallthrough */
+
+#define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+#include "pdiagnostic_push.h"
+
+static int print_bool(int n, char *p);
+
+static int print_uint8(uint8_t n, char *p);
+static int print_uint16(uint16_t n, char *p);
+static int print_uint32(uint32_t n, char *p);
+static int print_uint64(uint64_t n, char *p);
+static int print_int8(int8_t n, char *p);
+static int print_int16(int16_t n, char *p);
+static int print_int32(int32_t n, char *p);
+static int print_int64(int64_t n, char *p);
+
+/*
+ * Uses slightly slower, but more compact alogrithm
+ * that is not hardcoded to implementation size.
+ * Other types may be defined using macros below.
+ */
+static int print_ulong(unsigned long n, char *p);
+static int print_uint(unsigned int n, char *p);
+static int print_int(int n, char *p);
+static int print_long(long n, char *p);
+
+
+#if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+#define __print_unaligned_copy_16(p, q) (*(uint16_t*)(p) = *(uint16_t*)(q))
+#else
+#define __print_unaligned_copy_16(p, q) \
+ ((((uint8_t*)(p))[0] = ((uint8_t*)(q))[0]), \
+ (((uint8_t*)(p))[1] = ((uint8_t*)(q))[1]))
+#endif
+
+static const char __print_digit_pairs[] =
+ "0001020304050607080910111213141516171819"
+ "2021222324252627282930313233343536373839"
+ "4041424344454647484950515253545556575859"
+ "6061626364656667686970717273747576777879"
+ "8081828384858687888990919293949596979899";
+
+#define __print_stage() \
+ p -= 2; \
+ dp = __print_digit_pairs + (n % 100) * 2; \
+ n /= 100; \
+ __print_unaligned_copy_16(p, dp);
+
+#define __print_long_stage() \
+ __print_stage() \
+ __print_stage()
+
+#define __print_short_stage() \
+ *--p = (n % 10) + '0'; \
+ n /= 10;
+
+static int print_bool(int n, char *buf)
+{
+ if (n) {
+ memcpy(buf, "true\0", 5);
+ return 4;
+ } else {
+ memcpy(buf, "false\0", 6);
+ return 5;
+ }
+}
+
+static int print_uint8(uint8_t n, char *p)
+{
+ const char *dp;
+
+ if (n >= 100) {
+ p += 3;
+ *p = '\0';
+ __print_stage();
+ p[-1] = (char)n + '0';
+ return 3;
+ }
+ if (n >= 10) {
+ p += 2;
+ *p = '\0';
+ __print_stage();
+ return 2;
+ }
+ p[1] = '\0';
+ p[0] = (char)n + '0';
+ return 1;
+}
+
+static int print_uint16(uint16_t n, char *p)
+{
+ int k = 0;
+ const char *dp;
+
+ if (n >= 1000) {
+ if(n >= 10000) {
+ k = 5;
+ } else {
+ k = 4;
+ }
+ } else {
+ if(n >= 100) {
+ k = 3;
+ } else if(n >= 10) {
+ k = 2;
+ } else {
+ k = 1;
+ }
+ }
+ p += k;
+ *p = '\0';
+ if (k & 1) {
+ switch (k) {
+ case 5:
+ __print_stage();
+ pattribute(fallthrough);
+ case 3:
+ __print_stage();
+ pattribute(fallthrough);
+ case 1:
+ p[-1] = (char)n + '0';
+ }
+ } else {
+ switch (k) {
+ case 4:
+ __print_stage();
+ pattribute(fallthrough);
+ case 2:
+ __print_stage();
+ }
+ }
+ return k;
+}
+
+static int print_uint32(uint32_t n, char *p)
+{
+ int k = 0;
+ const char *dp;
+
+ if(n >= 10000UL) {
+ if(n >= 10000000UL) {
+ if(n >= 1000000000UL) {
+ k = 10;
+ } else if(n >= 100000000UL) {
+ k = 9;
+ } else {
+ k = 8;
+ }
+ } else {
+ if(n >= 1000000UL) {
+ k = 7;
+ } else if(n >= 100000UL) {
+ k = 6;
+ } else {
+ k = 5;
+ }
+ }
+ } else {
+ if(n >= 100UL) {
+ if(n >= 1000UL) {
+ k = 4;
+ } else {
+ k = 3;
+ }
+ } else {
+ if(n >= 10UL) {
+ k = 2;
+ } else {
+ k = 1UL;
+ }
+ }
+ }
+ p += k;
+ *p = '\0';
+ if (k & 1) {
+ switch (k) {
+ case 9:
+ __print_stage();
+ pattribute(fallthrough);
+ case 7:
+ __print_stage();
+ pattribute(fallthrough);
+ case 5:
+ __print_stage();
+ pattribute(fallthrough);
+ case 3:
+ __print_stage();
+ pattribute(fallthrough);
+ case 1:
+ p[-1] = (char)n + '0';
+ }
+ } else {
+ switch (k) {
+ case 10:
+ __print_stage();
+ pattribute(fallthrough);
+ case 8:
+ __print_stage();
+ pattribute(fallthrough);
+ case 6:
+ __print_stage();
+ pattribute(fallthrough);
+ case 4:
+ __print_stage();
+ pattribute(fallthrough);
+ case 2:
+ __print_stage();
+ }
+ }
+ return k;
+}
+
+static int print_uint64(uint64_t n, char *p)
+{
+ int k = 0;
+ const char *dp;
+ const uint64_t x = 1000000000ULL;
+
+ if (n < x) {
+ return print_uint32((uint32_t)n, p);
+ }
+ if(n >= 10000ULL * x) {
+ if(n >= 10000000ULL * x) {
+ if(n >= 1000000000ULL * x) {
+ if (n >= 10000000000ULL * x) {
+ k = 11 + 9;
+ } else {
+ k = 10 + 9;
+ }
+ } else if(n >= 100000000ULL * x) {
+ k = 9 + 9;
+ } else {
+ k = 8 + 9;
+ }
+ } else {
+ if(n >= 1000000ULL * x) {
+ k = 7 + 9;
+ } else if(n >= 100000ULL * x) {
+ k = 6 + 9;
+ } else {
+ k = 5 + 9;
+ }
+ }
+ } else {
+ if(n >= 100ULL * x) {
+ if(n >= 1000ULL * x) {
+ k = 4 + 9;
+ } else {
+ k = 3 + 9;
+ }
+ } else {
+ if(n >= 10ULL * x) {
+ k = 2 + 9;
+ } else {
+ k = 1 + 9;
+ }
+ }
+ }
+ p += k;
+ *p = '\0';
+ if (k & 1) {
+ switch (k) {
+ case 19:
+ __print_stage();
+ pattribute(fallthrough);
+ case 17:
+ __print_stage();
+ pattribute(fallthrough);
+ case 15:
+ __print_stage();
+ pattribute(fallthrough);
+ case 13:
+ __print_stage();
+ pattribute(fallthrough);
+ case 11:
+ __print_stage()
+ __print_short_stage();
+ }
+ } else {
+ switch (k) {
+ case 20:
+ __print_stage();
+ pattribute(fallthrough);
+ case 18:
+ __print_stage();
+ pattribute(fallthrough);
+ case 16:
+ __print_stage();
+ pattribute(fallthrough);
+ case 14:
+ __print_stage();
+ pattribute(fallthrough);
+ case 12:
+ __print_stage();
+ pattribute(fallthrough);
+ case 10:
+ __print_stage();
+ }
+ }
+ __print_long_stage()
+ __print_long_stage()
+ return k;
+}
+
+static int print_int8(int8_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint8((uint8_t)n, p) + sign;
+}
+
+static int print_int16(int16_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint16((uint16_t)n, p) + sign;
+}
+
+static int print_int32(int32_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint32((uint32_t)n, p) + sign;
+}
+
+static int print_int64(int64_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint64((uint64_t)n, p) + sign;
+}
+
+#define __define_print_int_simple(NAME, UNAME, T, UT) \
+static int UNAME(UT n, char *buf) \
+{ \
+ char tmp[20]; \
+ char* p = tmp + 20; \
+ char* q = p; \
+ unsigned int k, m; \
+ \
+ while (n >= 100) { \
+ p -= 2; \
+ m = (unsigned int)(n % 100) * 2; \
+ n /= 100; \
+ __print_unaligned_copy_16(p, __print_digit_pairs + m); \
+ } \
+ p -= 2; \
+ m = (unsigned int)n * 2; \
+ __print_unaligned_copy_16(p, __print_digit_pairs + m); \
+ if (n < 10) { \
+ ++p; \
+ } \
+ k = (unsigned int)(q - p); \
+ while (p != q) { \
+ *buf++ = *p++; \
+ } \
+ *buf = '\0'; \
+ return (int)k; \
+} \
+ \
+static int NAME(T n, char *buf) \
+{ \
+ int sign = n < 0; \
+ \
+ if (sign) { \
+ *buf++ = '-'; \
+ n = -n; \
+ } \
+ return UNAME((UT)n, buf) + sign; \
+}
+
+__define_print_int_simple(print_int, print_uint, int, unsigned int)
+__define_print_int_simple(print_long, print_ulong, long, unsigned long)
+
+#ifdef PPRINTINT_BENCH
+int main() {
+ int64_t count = 10000000; /* 10^7 */
+#if 0
+ int64_t base = 0;
+ int64_t base = 10000000000000000; /* 10^16 */
+ int64_t base = 1000000000; /* 10^9 */
+#endif
+ int64_t base = INT64_MIN - count/2;
+ char buf[100];
+ int i, k = 0, n = 0;
+ for (i = 0; i < count; i++) {
+ k = print_int64(i + base, buf);
+ n += buf[0] + buf[k - 1];
+ }
+ return n;
+}
+/* Call with time on executable, multiply time in seconds by 100 to get time unit in ns/number. */
+#endif /* PPRINTINT_BENCH */
+
+#ifdef PPRINTINT_TEST
+
+#include <stdio.h>
+#include <string.h>
+
+int main()
+{
+ char buf[21];
+ int failed = 0;
+ int k;
+
+ k = print_uint64(UINT64_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("18446744073709551615", buf)) {
+ printf("UINT64_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int64(INT64_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("9223372036854775807", buf)) {
+ printf("INT64_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int64(INT64_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-9223372036854775808", buf)) {
+ printf("INT64_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_uint32(UINT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("4294967295", buf)) {
+ printf("UINT32_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int32(INT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("2147483647", buf)) {
+ printf("INT32_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int32(INT32_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-2147483648", buf)) {
+ printf("INT32_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_uint16(UINT16_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("65535", buf)) {
+ printf("UINT16_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int16(INT16_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("32767", buf)) {
+ printf("INT16_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int16(INT16_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-32768", buf)) {
+ printf("INT16_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_uint8(UINT8_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("255", buf)) {
+ printf("INT8_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int8(INT8_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("127", buf)) {
+ printf("INT8_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int8(INT8_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-128", buf)) {
+ printf("INT8_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int(INT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("2147483647", buf)) {
+ printf("INT32_MAX didn't print correctly with k = print_int, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int(INT32_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-2147483648", buf)) {
+ printf("INT32_MIN didn't print correctly k = print_int, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_long(INT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("2147483647", buf)) {
+ printf("INT32_MAX didn't print correctly with fmt_long, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_long(INT32_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-2147483648", buf)) {
+ printf("INT32_MIN didn't print correctly fmt_long, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_bool(1, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("true", buf) {
+ printf("1 didn't print 'true' as expected, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_bool(-1, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("true", buf) {
+ printf("-1 didn't print 'true' as expected, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_bool(, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("false", buf) {
+ printf("0 didn't print 'false' as expected, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ if (failed) {
+ printf("FAILED\n");
+ return -1;
+ }
+ printf("SUCCESS\n");
+ return 0;
+}
+#endif /* PPRINTINT_TEST */
+
+#include "pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPRINTINT_H */
diff --git a/include/flatcc/portable/pstatic_assert.h b/include/flatcc/portable/pstatic_assert.h
new file mode 100644
index 0000000..24d5634
--- /dev/null
+++ b/include/flatcc/portable/pstatic_assert.h
@@ -0,0 +1,67 @@
+#ifndef PSTATIC_ASSERT_H
+#define PSTATIC_ASSERT_H
+
+#include <assert.h>
+
+/* Handle clang */
+#ifndef __has_feature
+ #define __has_feature(x) 0
+#endif
+
+#if defined(static_assert)
+#ifndef __static_assert_is_defined
+#define __static_assert_is_defined 1
+#endif
+#endif
+
+/* Handle static_assert as a keyword in C++ and compiler specifics. */
+#if !defined(__static_assert_is_defined)
+
+#if defined(__cplusplus)
+
+#if __cplusplus >= 201103L
+#define __static_assert_is_defined 1
+#elif __has_feature(cxx_static_assert)
+#define __static_assert_is_defined 1
+#elif defined(_MSC_VER) && (_MSC_VER >= 1600)
+#define __static_assert_is_defined 1
+#endif
+
+#else
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1600)
+#define __static_assert_is_defined 1
+#elif __has_feature(c_static_assert)
+#define static_assert(pred, msg) _Static_assert(pred, msg)
+#define __static_assert_is_defined 1
+#elif defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
+/* In case the clib headers are not compliant. */
+#define static_assert(pred, msg) _Static_assert(pred, msg)
+#define __static_assert_is_defined 1
+#endif
+
+#endif /* __cplusplus */
+#endif /* __static_assert_is_defined */
+
+
+#if !defined(__static_assert_is_defined)
+
+#define __PSTATIC_ASSERT_CONCAT_(a, b) static_assert_scope_##a##_line_##b
+#define __PSTATIC_ASSERT_CONCAT(a, b) __PSTATIC_ASSERT_CONCAT_(a, b)
+#ifdef __COUNTER__
+#define static_assert(e, msg) enum { __PSTATIC_ASSERT_CONCAT(__COUNTER__, __LINE__) = 1/(!!(e)) }
+#else
+#include "pstatic_assert_scope.h"
+#define static_assert(e, msg) enum { __PSTATIC_ASSERT_CONCAT(__PSTATIC_ASSERT_COUNTER, __LINE__) = 1/(int)(!!(e)) }
+#endif
+
+#define __static_assert_is_defined 1
+
+#endif /* __static_assert_is_defined */
+
+#endif /* PSTATIC_ASSERT_H */
+
+/* Update scope counter outside of include guard. */
+#ifdef __PSTATIC_ASSERT_COUNTER
+#include "pstatic_assert_scope.h"
+#endif
diff --git a/include/flatcc/portable/pstatic_assert_scope.h b/include/flatcc/portable/pstatic_assert_scope.h
new file mode 100644
index 0000000..71a0c29
--- /dev/null
+++ b/include/flatcc/portable/pstatic_assert_scope.h
@@ -0,0 +1,280 @@
+/*
+ * january, 2017, ported to portable library by mikkelfj.
+ * Based on dbgtools static assert counter, but with renamed macros.
+ */
+
+/*
+ dbgtools - platform independent wrapping of "nice to have" debug functions.
+
+ version 0.1, october, 2013
+
+ https://github.com/wc-duck/dbgtools
+
+ Copyright (C) 2013- Fredrik Kihlander
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Fredrik Kihlander
+*/
+
+/**
+ * Auto-generated header implementing a counter that increases by each include of the file.
+ *
+ * This header will define the macro __PSTATIC_ASSERT_COUNTER to be increased for each inclusion of the file.
+ *
+ * It has been generated with 3 amount of digits resulting in the counter wrapping around after
+ * 10000 inclusions.
+ *
+ * Usage:
+ *
+ * #include "this_header.h"
+ * int a = __PSTATIC_ASSERT_COUNTER; // 0
+ * #include "this_header.h"
+ * int b = __PSTATIC_ASSERT_COUNTER; // 1
+ * #include "this_header.h"
+ * int c = __PSTATIC_ASSERT_COUNTER; // 2
+ * #include "this_header.h"
+ * int d = __PSTATIC_ASSERT_COUNTER; // 3
+ */
+
+#ifndef __PSTATIC_ASSERT_COUNTER
+# define __PSTATIC_ASSERT_COUNTER_0 0
+# define __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_D1_0
+# define __PSTATIC_ASSERT_COUNTER_D2_0
+# define __PSTATIC_ASSERT_COUNTER_D3_0
+#endif /* __PSTATIC_ASSERT_COUNTER */
+
+#if !defined( __PSTATIC_ASSERT_COUNTER_D0_0 )
+# define __PSTATIC_ASSERT_COUNTER_D0_0
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 0
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_1 )
+# define __PSTATIC_ASSERT_COUNTER_D0_1
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 1
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_2 )
+# define __PSTATIC_ASSERT_COUNTER_D0_2
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 2
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_3 )
+# define __PSTATIC_ASSERT_COUNTER_D0_3
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 3
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_4 )
+# define __PSTATIC_ASSERT_COUNTER_D0_4
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 4
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_5 )
+# define __PSTATIC_ASSERT_COUNTER_D0_5
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 5
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_6 )
+# define __PSTATIC_ASSERT_COUNTER_D0_6
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 6
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_7 )
+# define __PSTATIC_ASSERT_COUNTER_D0_7
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 7
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_8 )
+# define __PSTATIC_ASSERT_COUNTER_D0_8
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 8
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_9 )
+# define __PSTATIC_ASSERT_COUNTER_D0_9
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 9
+#else
+# undef __PSTATIC_ASSERT_COUNTER_D0_1
+# undef __PSTATIC_ASSERT_COUNTER_D0_2
+# undef __PSTATIC_ASSERT_COUNTER_D0_3
+# undef __PSTATIC_ASSERT_COUNTER_D0_4
+# undef __PSTATIC_ASSERT_COUNTER_D0_5
+# undef __PSTATIC_ASSERT_COUNTER_D0_6
+# undef __PSTATIC_ASSERT_COUNTER_D0_7
+# undef __PSTATIC_ASSERT_COUNTER_D0_8
+# undef __PSTATIC_ASSERT_COUNTER_D0_9
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 0
+# if !defined( __PSTATIC_ASSERT_COUNTER_D1_0 )
+# define __PSTATIC_ASSERT_COUNTER_D1_0
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 0
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_1 )
+# define __PSTATIC_ASSERT_COUNTER_D1_1
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 1
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_2 )
+# define __PSTATIC_ASSERT_COUNTER_D1_2
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 2
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_3 )
+# define __PSTATIC_ASSERT_COUNTER_D1_3
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 3
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_4 )
+# define __PSTATIC_ASSERT_COUNTER_D1_4
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 4
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_5 )
+# define __PSTATIC_ASSERT_COUNTER_D1_5
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 5
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_6 )
+# define __PSTATIC_ASSERT_COUNTER_D1_6
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 6
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_7 )
+# define __PSTATIC_ASSERT_COUNTER_D1_7
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 7
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_8 )
+# define __PSTATIC_ASSERT_COUNTER_D1_8
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 8
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_9 )
+# define __PSTATIC_ASSERT_COUNTER_D1_9
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 9
+# else
+# undef __PSTATIC_ASSERT_COUNTER_D1_1
+# undef __PSTATIC_ASSERT_COUNTER_D1_2
+# undef __PSTATIC_ASSERT_COUNTER_D1_3
+# undef __PSTATIC_ASSERT_COUNTER_D1_4
+# undef __PSTATIC_ASSERT_COUNTER_D1_5
+# undef __PSTATIC_ASSERT_COUNTER_D1_6
+# undef __PSTATIC_ASSERT_COUNTER_D1_7
+# undef __PSTATIC_ASSERT_COUNTER_D1_8
+# undef __PSTATIC_ASSERT_COUNTER_D1_9
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 0
+# if !defined( __PSTATIC_ASSERT_COUNTER_D2_0 )
+# define __PSTATIC_ASSERT_COUNTER_D2_0
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 0
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_1 )
+# define __PSTATIC_ASSERT_COUNTER_D2_1
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 1
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_2 )
+# define __PSTATIC_ASSERT_COUNTER_D2_2
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 2
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_3 )
+# define __PSTATIC_ASSERT_COUNTER_D2_3
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 3
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_4 )
+# define __PSTATIC_ASSERT_COUNTER_D2_4
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 4
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_5 )
+# define __PSTATIC_ASSERT_COUNTER_D2_5
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 5
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_6 )
+# define __PSTATIC_ASSERT_COUNTER_D2_6
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 6
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_7 )
+# define __PSTATIC_ASSERT_COUNTER_D2_7
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 7
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_8 )
+# define __PSTATIC_ASSERT_COUNTER_D2_8
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 8
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_9 )
+# define __PSTATIC_ASSERT_COUNTER_D2_9
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 9
+# else
+# undef __PSTATIC_ASSERT_COUNTER_D2_1
+# undef __PSTATIC_ASSERT_COUNTER_D2_2
+# undef __PSTATIC_ASSERT_COUNTER_D2_3
+# undef __PSTATIC_ASSERT_COUNTER_D2_4
+# undef __PSTATIC_ASSERT_COUNTER_D2_5
+# undef __PSTATIC_ASSERT_COUNTER_D2_6
+# undef __PSTATIC_ASSERT_COUNTER_D2_7
+# undef __PSTATIC_ASSERT_COUNTER_D2_8
+# undef __PSTATIC_ASSERT_COUNTER_D2_9
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 0
+# if !defined( __PSTATIC_ASSERT_COUNTER_D3_0 )
+# define __PSTATIC_ASSERT_COUNTER_D3_0
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 0
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_1 )
+# define __PSTATIC_ASSERT_COUNTER_D3_1
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 1
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_2 )
+# define __PSTATIC_ASSERT_COUNTER_D3_2
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 2
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_3 )
+# define __PSTATIC_ASSERT_COUNTER_D3_3
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 3
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_4 )
+# define __PSTATIC_ASSERT_COUNTER_D3_4
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 4
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_5 )
+# define __PSTATIC_ASSERT_COUNTER_D3_5
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 5
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_6 )
+# define __PSTATIC_ASSERT_COUNTER_D3_6
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 6
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_7 )
+# define __PSTATIC_ASSERT_COUNTER_D3_7
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 7
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_8 )
+# define __PSTATIC_ASSERT_COUNTER_D3_8
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 8
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_9 )
+# define __PSTATIC_ASSERT_COUNTER_D3_9
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 9
+# else
+# undef __PSTATIC_ASSERT_COUNTER_D3_1
+# undef __PSTATIC_ASSERT_COUNTER_D3_2
+# undef __PSTATIC_ASSERT_COUNTER_D3_3
+# undef __PSTATIC_ASSERT_COUNTER_D3_4
+# undef __PSTATIC_ASSERT_COUNTER_D3_5
+# undef __PSTATIC_ASSERT_COUNTER_D3_6
+# undef __PSTATIC_ASSERT_COUNTER_D3_7
+# undef __PSTATIC_ASSERT_COUNTER_D3_8
+# undef __PSTATIC_ASSERT_COUNTER_D3_9
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 0
+# endif
+# endif
+# endif
+#endif
+
+#define __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO_(digit0,digit1,digit2,digit3) digit0##digit1##digit2##digit3
+#define __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO(digit0,digit1,digit2,digit3) __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO_(digit0,digit1,digit2,digit3)
+#undef __PSTATIC_ASSERT_COUNTER
+#define __PSTATIC_ASSERT_COUNTER __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO(__PSTATIC_ASSERT_COUNTER_3,__PSTATIC_ASSERT_COUNTER_2,__PSTATIC_ASSERT_COUNTER_1,__PSTATIC_ASSERT_COUNTER_0)
diff --git a/include/flatcc/portable/pstdalign.h b/include/flatcc/portable/pstdalign.h
new file mode 100644
index 0000000..169fe27
--- /dev/null
+++ b/include/flatcc/portable/pstdalign.h
@@ -0,0 +1,162 @@
+#ifndef PSTDALIGN_H
+#define PSTDALIGN_H
+
+/*
+ * NOTE: aligned_alloc is defined via paligned_alloc.h
+ * and requires aligned_free to be fully portable although
+ * free also works on C11 and platforms with posix_memalign.
+ *
+ * NOTE: C++11 defines alignas as a keyword but then also defines
+ * __alignas_is_defined.
+ *
+ * C++14 does not define __alignas_is_defined, at least sometimes.
+ *
+ * GCC 8.3 reverts on this and makes C++11 behave the same as C++14
+ * preventing a simple __cplusplus version check from working.
+ *
+ * Clang C++ without std=c++11 or std=c++14 does define alignas
+ * but does so incorrectly wrt. C11 and C++11 semantics because
+ * `alignas(4) float x;` is not recognized.
+ * To fix such issues, either move to a std version, or
+ * include a working stdalign.h for the given compiler before
+ * this file.
+ *
+ * newlib defines _Alignas and _Alignof in sys/cdefs but rely on
+ * gcc version for <stdaligh.h> which can lead to conflicts if
+ * stdalign is not included.
+ *
+ * newlibs need for <stdalign.h> conflicts with broken C++ stdalign
+ * but this can be fixed be using std=C++11 or newer.
+ *
+ * MSVC does not support <stdalign.h> at least up to MSVC 2015,
+ * but does appear to support alignas and alignof keywords in
+ * recent standard C++.
+ *
+ * TCC only supports alignas with a numeric argument like
+ * `alignas(4)`, but not `alignas(float)`.
+ *
+ * If stdalign.h is supported but heuristics in this file are
+ * insufficient to detect this, try including <stdaligh.h> manually
+ * or define HAVE_STDALIGN_H.
+ */
+
+/* https://github.com/dvidelabs/flatcc/issues/130 */
+#ifndef __alignas_is_defined
+#if defined(__cplusplus)
+#if __cplusplus == 201103 && !defined(__clang__) && ((__GNUC__ > 8) || (__GNUC__ == 8 && __GNUC_MINOR__ >= 3))
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+#include <stdalign.h>
+#endif
+#endif
+#endif
+
+/* Allow for alternative solution to be included first. */
+#ifndef __alignas_is_defined
+
+#ifdef __cplusplus
+#if defined(PORTABLE_PATCH_CPLUSPLUS_STDALIGN)
+#include <stdalign.h>
+#undef alignas
+#define alignas(t) __attribute__((__aligned__(t)))
+#endif
+#endif
+
+#if !defined(PORTABLE_HAS_INCLUDE_STDALIGN)
+#if defined(__has_include)
+#if __has_include(<stdalign.h>)
+#define PORTABLE_HAS_INCLUDE_STDALIGN 1
+#else
+#define PORTABLE_HAS_INCLUDE_STDALIGN 0
+#endif
+#endif
+#endif
+
+ /* https://lists.gnu.org/archive/html/bug-gnulib/2015-08/msg00003.html */
+#if defined(__cplusplus)
+#if !defined(_MSC_VER)
+#include <stdalign.h>
+#endif
+#if __cplusplus > 201103
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+#endif
+#elif PORTABLE_HAS_INCLUDE_STDALIGN
+#include <stdalign.h>
+#elif !defined(__clang__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
+#include <stdalign.h>
+#elif defined(HAVE_STDALIGN_H)
+#include <stdaligh.h>
+#endif
+
+#endif /* __alignas_is_defined */
+
+#ifndef __alignas_is_defined
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (!defined(__clang__) && defined(__GNUC__) && \
+ ((__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 7)))
+#undef PORTABLE_C11_STDALIGN_MISSING
+#define PORTABLE_C11_STDALIGN_MISSING
+#endif
+
+#if defined(__IBMC__)
+#undef PORTABLE_C11_STDALIGN_MISSING
+#define PORTABLE_C11_STDALIGN_MISSING
+#endif
+
+#if ((defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) && \
+ !defined(PORTABLE_C11_STDALIGN_MISSING))
+/* C11 or newer */
+#include <stdalign.h>
+#else
+#if defined(__GNUC__) || defined(__IBM_ALIGNOF__) || defined(__clang__)
+
+#ifndef _Alignas
+#define _Alignas(t) __attribute__((__aligned__(t)))
+#endif
+
+#ifndef _Alignof
+#define _Alignof(t) __alignof__(t)
+#endif
+
+#elif defined(_MSC_VER)
+
+#define _Alignas(t) __declspec (align(t))
+#define _Alignof(t) __alignof(t)
+
+#elif defined(__TINYC__)
+
+/* Supports `_Alignas(integer-expression)`, but not `_Alignas(type)`. */
+#define _Alignas(t) __attribute__(aligned(t))
+#define _Alignof(t) __alignof__(t)
+
+#else
+#error please update pstdalign.h with support for current compiler and library
+#endif
+
+#endif /* __STDC__ */
+
+#ifndef alignas
+#define alignas _Alignas
+#endif
+
+#ifndef alignof
+#define alignof _Alignof
+#endif
+
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __alignas__is_defined */
+
+#include "paligned_alloc.h"
+
+#endif /* PSTDALIGN_H */
diff --git a/include/flatcc/portable/pstdbool.h b/include/flatcc/portable/pstdbool.h
new file mode 100644
index 0000000..28fc89c
--- /dev/null
+++ b/include/flatcc/portable/pstdbool.h
@@ -0,0 +1,37 @@
+#ifndef PSTDBOOL_H
+#define PSTDBOOL_H
+
+#if !defined(__cplusplus) && !__bool_true_false_are_defined && !defined(bool) && !defined(__STDBOOL_H)
+
+#ifdef HAVE_STDBOOL_H
+
+#include <stdbool.h>
+
+#elif (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
+/* C99 or newer */
+
+#define bool _Bool
+#define true 1
+#define false 0
+#define __bool_true_false_are_defined 1
+
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
+
+#define bool bool
+#define true true
+#define false false
+#define __bool_true_false_are_defined 1
+
+#else
+
+typedef unsigned char _Portable_bool;
+#define bool _Portable_bool
+#define true 1
+#define false 0
+#define __bool_true_false_are_defined 1
+
+#endif
+
+#endif
+
+#endif /* PSTDBOOL_H */
diff --git a/include/flatcc/portable/pstdint.h b/include/flatcc/portable/pstdint.h
new file mode 100644
index 0000000..d522fed
--- /dev/null
+++ b/include/flatcc/portable/pstdint.h
@@ -0,0 +1,898 @@
+/* A portable stdint.h
+ ****************************************************************************
+ * BSD License:
+ ****************************************************************************
+ *
+ * Copyright (c) 2005-2016 Paul Hsieh
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************
+ *
+ * Version 0.1.15.2
+ *
+ * The ANSI C standard committee, for the C99 standard, specified the
+ * inclusion of a new standard include file called stdint.h. This is
+ * a very useful and long desired include file which contains several
+ * very precise definitions for integer scalar types that is
+ * critically important for making portable several classes of
+ * applications including cryptography, hashing, variable length
+ * integer libraries and so on. But for most developers its likely
+ * useful just for programming sanity.
+ *
+ * The problem is that some compiler vendors chose to ignore the C99
+ * standard and some older compilers have no opportunity to be updated.
+ * Because of this situation, simply including stdint.h in your code
+ * makes it unportable.
+ *
+ * So that's what this file is all about. Its an attempt to build a
+ * single universal include file that works on as many platforms as
+ * possible to deliver what stdint.h is supposed to. Even compilers
+ * that already come with stdint.h can use this file instead without
+ * any loss of functionality. A few things that should be noted about
+ * this file:
+ *
+ * 1) It is not guaranteed to be portable and/or present an identical
+ * interface on all platforms. The extreme variability of the
+ * ANSI C standard makes this an impossibility right from the
+ * very get go. Its really only meant to be useful for the vast
+ * majority of platforms that possess the capability of
+ * implementing usefully and precisely defined, standard sized
+ * integer scalars. Systems which are not intrinsically 2s
+ * complement may produce invalid constants.
+ *
+ * 2) There is an unavoidable use of non-reserved symbols.
+ *
+ * 3) Other standard include files are invoked.
+ *
+ * 4) This file may come in conflict with future platforms that do
+ * include stdint.h. The hope is that one or the other can be
+ * used with no real difference.
+ *
+ * 5) In the current verison, if your platform can't represent
+ * int32_t, int16_t and int8_t, it just dumps out with a compiler
+ * error.
+ *
+ * 6) 64 bit integers may or may not be defined. Test for their
+ * presence with the test: #ifdef INT64_MAX or #ifdef UINT64_MAX.
+ * Note that this is different from the C99 specification which
+ * requires the existence of 64 bit support in the compiler. If
+ * this is not defined for your platform, yet it is capable of
+ * dealing with 64 bits then it is because this file has not yet
+ * been extended to cover all of your system's capabilities.
+ *
+ * 7) (u)intptr_t may or may not be defined. Test for its presence
+ * with the test: #ifdef PTRDIFF_MAX. If this is not defined
+ * for your platform, then it is because this file has not yet
+ * been extended to cover all of your system's capabilities, not
+ * because its optional.
+ *
+ * 8) The following might not been defined even if your platform is
+ * capable of defining it:
+ *
+ * WCHAR_MIN
+ * WCHAR_MAX
+ * (u)int64_t
+ * PTRDIFF_MIN
+ * PTRDIFF_MAX
+ * (u)intptr_t
+ *
+ * 9) The following have not been defined:
+ *
+ * WINT_MIN
+ * WINT_MAX
+ *
+ * 10) The criteria for defining (u)int_least(*)_t isn't clear,
+ * except for systems which don't have a type that precisely
+ * defined 8, 16, or 32 bit types (which this include file does
+ * not support anyways). Default definitions have been given.
+ *
+ * 11) The criteria for defining (u)int_fast(*)_t isn't something I
+ * would trust to any particular compiler vendor or the ANSI C
+ * committee. It is well known that "compatible systems" are
+ * commonly created that have very different performance
+ * characteristics from the systems they are compatible with,
+ * especially those whose vendors make both the compiler and the
+ * system. Default definitions have been given, but its strongly
+ * recommended that users never use these definitions for any
+ * reason (they do *NOT* deliver any serious guarantee of
+ * improved performance -- not in this file, nor any vendor's
+ * stdint.h).
+ *
+ * 12) The following macros:
+ *
+ * PRINTF_INTMAX_MODIFIER
+ * PRINTF_INT64_MODIFIER
+ * PRINTF_INT32_MODIFIER
+ * PRINTF_INT16_MODIFIER
+ * PRINTF_LEAST64_MODIFIER
+ * PRINTF_LEAST32_MODIFIER
+ * PRINTF_LEAST16_MODIFIER
+ * PRINTF_INTPTR_MODIFIER
+ *
+ * are strings which have been defined as the modifiers required
+ * for the "d", "u" and "x" printf formats to correctly output
+ * (u)intmax_t, (u)int64_t, (u)int32_t, (u)int16_t, (u)least64_t,
+ * (u)least32_t, (u)least16_t and (u)intptr_t types respectively.
+ * PRINTF_INTPTR_MODIFIER is not defined for some systems which
+ * provide their own stdint.h. PRINTF_INT64_MODIFIER is not
+ * defined if INT64_MAX is not defined. These are an extension
+ * beyond what C99 specifies must be in stdint.h.
+ *
+ * In addition, the following macros are defined:
+ *
+ * PRINTF_INTMAX_HEX_WIDTH
+ * PRINTF_INT64_HEX_WIDTH
+ * PRINTF_INT32_HEX_WIDTH
+ * PRINTF_INT16_HEX_WIDTH
+ * PRINTF_INT8_HEX_WIDTH
+ * PRINTF_INTMAX_DEC_WIDTH
+ * PRINTF_INT64_DEC_WIDTH
+ * PRINTF_INT32_DEC_WIDTH
+ * PRINTF_INT16_DEC_WIDTH
+ * PRINTF_UINT8_DEC_WIDTH
+ * PRINTF_UINTMAX_DEC_WIDTH
+ * PRINTF_UINT64_DEC_WIDTH
+ * PRINTF_UINT32_DEC_WIDTH
+ * PRINTF_UINT16_DEC_WIDTH
+ * PRINTF_UINT8_DEC_WIDTH
+ *
+ * Which specifies the maximum number of characters required to
+ * print the number of that type in either hexadecimal or decimal.
+ * These are an extension beyond what C99 specifies must be in
+ * stdint.h.
+ *
+ * Compilers tested (all with 0 warnings at their highest respective
+ * settings): Borland Turbo C 2.0, WATCOM C/C++ 11.0 (16 bits and 32
+ * bits), Microsoft Visual C++ 6.0 (32 bit), Microsoft Visual Studio
+ * .net (VC7), Intel C++ 4.0, GNU gcc v3.3.3
+ *
+ * This file should be considered a work in progress. Suggestions for
+ * improvements, especially those which increase coverage are strongly
+ * encouraged.
+ *
+ * Acknowledgements
+ *
+ * The following people have made significant contributions to the
+ * development and testing of this file:
+ *
+ * Chris Howie
+ * John Steele Scott
+ * Dave Thorup
+ * John Dill
+ * Florian Wobbe
+ * Christopher Sean Morrison
+ * Mikkel Fahnoe Jorgensen
+ *
+ */
+
+#include <stddef.h>
+#include <limits.h>
+#include <signal.h>
+
+/*
+ * For gcc with _STDINT_H, fill in the PRINTF_INT*_MODIFIER macros, and
+ * do nothing else. On the Mac OS X version of gcc this is _STDINT_H_.
+ */
+
+#if ((defined(_MSC_VER) && _MSC_VER >= 1600) || (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || (defined (__WATCOMC__) && (defined (_STDINT_H_INCLUDED) || __WATCOMC__ >= 1250)) || (defined(__GNUC__) && (__GNUC__ > 3 || defined(_STDINT_H) || defined(_STDINT_H_) || defined (__UINT_FAST64_TYPE__)) )) && !defined (_PSTDINT_H_INCLUDED)
+#include <stdint.h>
+#define _PSTDINT_H_INCLUDED
+# if defined(__GNUC__) && (defined(__x86_64__) || defined(__ppc64__)) && !(defined(__APPLE__) && defined(__MACH__))
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "l"
+# endif
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+# else
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# ifndef PRINTF_INT32_MODIFIER
+# if (UINT_MAX == UINT32_MAX)
+# define PRINTF_INT32_MODIFIER ""
+# else
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+# endif
+# endif
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT64_MODIFIER
+# endif
+# ifndef PRINTF_INT64_HEX_WIDTH
+# define PRINTF_INT64_HEX_WIDTH "16"
+# endif
+# ifndef PRINTF_UINT64_HEX_WIDTH
+# define PRINTF_UINT64_HEX_WIDTH "16"
+# endif
+# ifndef PRINTF_INT32_HEX_WIDTH
+# define PRINTF_INT32_HEX_WIDTH "8"
+# endif
+# ifndef PRINTF_UINT32_HEX_WIDTH
+# define PRINTF_UINT32_HEX_WIDTH "8"
+# endif
+# ifndef PRINTF_INT16_HEX_WIDTH
+# define PRINTF_INT16_HEX_WIDTH "4"
+# endif
+# ifndef PRINTF_UINT16_HEX_WIDTH
+# define PRINTF_UINT16_HEX_WIDTH "4"
+# endif
+# ifndef PRINTF_INT8_HEX_WIDTH
+# define PRINTF_INT8_HEX_WIDTH "2"
+# endif
+# ifndef PRINTF_UINT8_HEX_WIDTH
+# define PRINTF_UINT8_HEX_WIDTH "2"
+# endif
+# ifndef PRINTF_INT64_DEC_WIDTH
+# define PRINTF_INT64_DEC_WIDTH "19"
+# endif
+# ifndef PRINTF_UINT64_DEC_WIDTH
+# define PRINTF_UINT64_DEC_WIDTH "20"
+# endif
+# ifndef PRINTF_INT32_DEC_WIDTH
+# define PRINTF_INT32_DEC_WIDTH "10"
+# endif
+# ifndef PRINTF_UINT32_DEC_WIDTH
+# define PRINTF_UINT32_DEC_WIDTH "10"
+# endif
+# ifndef PRINTF_INT16_DEC_WIDTH
+# define PRINTF_INT16_DEC_WIDTH "5"
+# endif
+# ifndef PRINTF_UINT16_DEC_WIDTH
+# define PRINTF_UINT16_DEC_WIDTH "5"
+# endif
+# ifndef PRINTF_INT8_DEC_WIDTH
+# define PRINTF_INT8_DEC_WIDTH "3"
+# endif
+# ifndef PRINTF_UINT8_DEC_WIDTH
+# define PRINTF_UINT8_DEC_WIDTH "3"
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_UINT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_UINTMAX_HEX_WIDTH
+# define PRINTF_UINTMAX_HEX_WIDTH PRINTF_UINT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_UINT64_DEC_WIDTH
+# endif
+# ifndef PRINTF_UINTMAX_DEC_WIDTH
+# define PRINTF_UINTMAX_DEC_WIDTH PRINTF_UINT64_DEC_WIDTH
+# endif
+
+/*
+ * Something really weird is going on with Open Watcom. Just pull some of
+ * these duplicated definitions from Open Watcom's stdint.h file for now.
+ */
+
+# if defined (__WATCOMC__) && __WATCOMC__ >= 1250
+# if !defined (INT64_C)
+# define INT64_C(x) (x + (INT64_MAX - INT64_MAX))
+# endif
+# if !defined (UINT64_C)
+# define UINT64_C(x) (x + (UINT64_MAX - UINT64_MAX))
+# endif
+# if !defined (INT32_C)
+# define INT32_C(x) (x + (INT32_MAX - INT32_MAX))
+# endif
+# if !defined (UINT32_C)
+# define UINT32_C(x) (x + (UINT32_MAX - UINT32_MAX))
+# endif
+# if !defined (INT16_C)
+# define INT16_C(x) (x)
+# endif
+# if !defined (UINT16_C)
+# define UINT16_C(x) (x)
+# endif
+# if !defined (INT8_C)
+# define INT8_C(x) (x)
+# endif
+# if !defined (UINT8_C)
+# define UINT8_C(x) (x)
+# endif
+# if !defined (UINT64_MAX)
+# define UINT64_MAX 18446744073709551615ULL
+# endif
+# if !defined (INT64_MAX)
+# define INT64_MAX 9223372036854775807LL
+# endif
+# if !defined (UINT32_MAX)
+# define UINT32_MAX 4294967295UL
+# endif
+# if !defined (INT32_MAX)
+# define INT32_MAX 2147483647L
+# endif
+# if !defined (INTMAX_MAX)
+# define INTMAX_MAX INT64_MAX
+# endif
+# if !defined (INTMAX_MIN)
+# define INTMAX_MIN INT64_MIN
+# endif
+# endif
+#endif
+
+#ifndef _PSTDINT_H_INCLUDED
+#define _PSTDINT_H_INCLUDED
+
+#ifndef SIZE_MAX
+# define SIZE_MAX (~(size_t)0)
+#endif
+
+/*
+ * Deduce the type assignments from limits.h under the assumption that
+ * integer sizes in bits are powers of 2, and follow the ANSI
+ * definitions.
+ */
+
+#ifndef UINT8_MAX
+# define UINT8_MAX 0xff
+#endif
+#if !defined(uint8_t) && !defined(_UINT8_T)
+# if (UCHAR_MAX == UINT8_MAX) || defined (S_SPLINT_S)
+ typedef unsigned char uint8_t;
+# define UINT8_C(v) ((uint8_t) v)
+# else
+# error "Platform not supported"
+# endif
+#endif
+
+#ifndef INT8_MAX
+# define INT8_MAX 0x7f
+#endif
+#ifndef INT8_MIN
+# define INT8_MIN INT8_C(0x80)
+#endif
+#if !defined(int8_t) && !defined(_INT8_T)
+# if (SCHAR_MAX == INT8_MAX) || defined (S_SPLINT_S)
+ typedef signed char int8_t;
+# define INT8_C(v) ((int8_t) v)
+# else
+# error "Platform not supported"
+# endif
+#endif
+
+#ifndef UINT16_MAX
+# define UINT16_MAX 0xffff
+#endif
+#if !defined(uint16_t) && !defined(_UINT16_T)
+#if (UINT_MAX == UINT16_MAX) || defined (S_SPLINT_S)
+ typedef unsigned int uint16_t;
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER ""
+# endif
+# define UINT16_C(v) ((uint16_t) (v))
+#elif (USHRT_MAX == UINT16_MAX)
+ typedef unsigned short uint16_t;
+# define UINT16_C(v) ((uint16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef INT16_MAX
+# define INT16_MAX 0x7fff
+#endif
+#ifndef INT16_MIN
+# define INT16_MIN INT16_C(0x8000)
+#endif
+#if !defined(int16_t) && !defined(_INT16_T)
+#if (INT_MAX == INT16_MAX) || defined (S_SPLINT_S)
+ typedef signed int int16_t;
+# define INT16_C(v) ((int16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER ""
+# endif
+#elif (SHRT_MAX == INT16_MAX)
+ typedef signed short int16_t;
+# define INT16_C(v) ((int16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef UINT32_MAX
+# define UINT32_MAX (0xffffffffUL)
+#endif
+#if !defined(uint32_t) && !defined(_UINT32_T)
+#if (ULONG_MAX == UINT32_MAX) || defined (S_SPLINT_S)
+ typedef unsigned long uint32_t;
+# define UINT32_C(v) v ## UL
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+#elif (UINT_MAX == UINT32_MAX)
+ typedef unsigned int uint32_t;
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+# define UINT32_C(v) v ## U
+#elif (USHRT_MAX == UINT32_MAX)
+ typedef unsigned short uint32_t;
+# define UINT32_C(v) ((unsigned short) (v))
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef INT32_MAX
+# define INT32_MAX (0x7fffffffL)
+#endif
+#ifndef INT32_MIN
+# define INT32_MIN INT32_C(0x80000000)
+#endif
+#if !defined(int32_t) && !defined(_INT32_T)
+#if (LONG_MAX == INT32_MAX) || defined (S_SPLINT_S)
+ typedef signed long int32_t;
+# define INT32_C(v) v ## L
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+#elif (INT_MAX == INT32_MAX)
+ typedef signed int int32_t;
+# define INT32_C(v) v
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#elif (SHRT_MAX == INT32_MAX)
+ typedef signed short int32_t;
+# define INT32_C(v) ((short) (v))
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+/*
+ * The macro stdint_int64_defined is temporarily used to record
+ * whether or not 64 integer support is available. It must be
+ * defined for any 64 integer extensions for new platforms that are
+ * added.
+ */
+
+#undef stdint_int64_defined
+#if (defined(__STDC__) && defined(__STDC_VERSION__)) || defined (S_SPLINT_S)
+# if (__STDC__ && __STDC_VERSION__ >= 199901L) || defined (S_SPLINT_S)
+# define stdint_int64_defined
+ typedef long long int64_t;
+ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# endif
+#endif
+
+#if !defined (stdint_int64_defined)
+# if defined(__GNUC__)
+# define stdint_int64_defined
+ __extension__ typedef long long int64_t;
+ __extension__ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# elif defined(__MWERKS__) || defined (__SUNPRO_C) || defined (__SUNPRO_CC) || defined (__APPLE_CC__) || defined (_LONG_LONG) || defined (_CRAYC) || defined (S_SPLINT_S)
+# define stdint_int64_defined
+ typedef long long int64_t;
+ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# elif (defined(__WATCOMC__) && defined(__WATCOM_INT64__)) || (defined(_MSC_VER) && _INTEGRAL_MAX_BITS >= 64) || (defined (__BORLANDC__) && __BORLANDC__ > 0x460) || defined (__alpha) || defined (__DECC)
+# define stdint_int64_defined
+ typedef __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+# define UINT64_C(v) v ## UI64
+# define INT64_C(v) v ## I64
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "I64"
+# endif
+# endif
+#endif
+
+#if !defined (LONG_LONG_MAX) && defined (INT64_C)
+# define LONG_LONG_MAX INT64_C (9223372036854775807)
+#endif
+#ifndef ULONG_LONG_MAX
+# define ULONG_LONG_MAX UINT64_C (18446744073709551615)
+#endif
+
+#if !defined (INT64_MAX) && defined (INT64_C)
+# define INT64_MAX INT64_C (9223372036854775807)
+#endif
+#if !defined (INT64_MIN) && defined (INT64_C)
+# define INT64_MIN INT64_C (-9223372036854775808)
+#endif
+#if !defined (UINT64_MAX) && defined (INT64_C)
+# define UINT64_MAX UINT64_C (18446744073709551615)
+#endif
+
+/*
+ * Width of hexadecimal for number field.
+ */
+
+#ifndef PRINTF_INT64_HEX_WIDTH
+# define PRINTF_INT64_HEX_WIDTH "16"
+#endif
+#ifndef PRINTF_INT32_HEX_WIDTH
+# define PRINTF_INT32_HEX_WIDTH "8"
+#endif
+#ifndef PRINTF_INT16_HEX_WIDTH
+# define PRINTF_INT16_HEX_WIDTH "4"
+#endif
+#ifndef PRINTF_INT8_HEX_WIDTH
+# define PRINTF_INT8_HEX_WIDTH "2"
+#endif
+#ifndef PRINTF_INT64_DEC_WIDTH
+# define PRINTF_INT64_DEC_WIDTH "19"
+#endif
+#ifndef PRINTF_INT32_DEC_WIDTH
+# define PRINTF_INT32_DEC_WIDTH "10"
+#endif
+#ifndef PRINTF_INT16_DEC_WIDTH
+# define PRINTF_INT16_DEC_WIDTH "5"
+#endif
+#ifndef PRINTF_INT8_DEC_WIDTH
+# define PRINTF_INT8_DEC_WIDTH "3"
+#endif
+#ifndef PRINTF_UINT64_DEC_WIDTH
+# define PRINTF_UINT64_DEC_WIDTH "20"
+#endif
+#ifndef PRINTF_UINT32_DEC_WIDTH
+# define PRINTF_UINT32_DEC_WIDTH "10"
+#endif
+#ifndef PRINTF_UINT16_DEC_WIDTH
+# define PRINTF_UINT16_DEC_WIDTH "5"
+#endif
+#ifndef PRINTF_UINT8_DEC_WIDTH
+# define PRINTF_UINT8_DEC_WIDTH "3"
+#endif
+
+/*
+ * Ok, lets not worry about 128 bit integers for now. Moore's law says
+ * we don't need to worry about that until about 2040 at which point
+ * we'll have bigger things to worry about.
+ */
+
+#ifdef stdint_int64_defined
+ typedef int64_t intmax_t;
+ typedef uint64_t uintmax_t;
+# define INTMAX_MAX INT64_MAX
+# define INTMAX_MIN INT64_MIN
+# define UINTMAX_MAX UINT64_MAX
+# define UINTMAX_C(v) UINT64_C(v)
+# define INTMAX_C(v) INT64_C(v)
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT64_MODIFIER
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_INT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_INT64_DEC_WIDTH
+# endif
+#else
+ typedef int32_t intmax_t;
+ typedef uint32_t uintmax_t;
+# define INTMAX_MAX INT32_MAX
+# define UINTMAX_MAX UINT32_MAX
+# define UINTMAX_C(v) UINT32_C(v)
+# define INTMAX_C(v) INT32_C(v)
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT32_MODIFIER
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_INT32_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_INT32_DEC_WIDTH
+# endif
+#endif
+
+/*
+ * Because this file currently only supports platforms which have
+ * precise powers of 2 as bit sizes for the default integers, the
+ * least definitions are all trivial. Its possible that a future
+ * version of this file could have different definitions.
+ */
+
+#ifndef stdint_least_defined
+ typedef int8_t int_least8_t;
+ typedef uint8_t uint_least8_t;
+ typedef int16_t int_least16_t;
+ typedef uint16_t uint_least16_t;
+ typedef int32_t int_least32_t;
+ typedef uint32_t uint_least32_t;
+# define PRINTF_LEAST32_MODIFIER PRINTF_INT32_MODIFIER
+# define PRINTF_LEAST16_MODIFIER PRINTF_INT16_MODIFIER
+# define UINT_LEAST8_MAX UINT8_MAX
+# define INT_LEAST8_MAX INT8_MAX
+# define UINT_LEAST16_MAX UINT16_MAX
+# define INT_LEAST16_MAX INT16_MAX
+# define UINT_LEAST32_MAX UINT32_MAX
+# define INT_LEAST32_MAX INT32_MAX
+# define INT_LEAST8_MIN INT8_MIN
+# define INT_LEAST16_MIN INT16_MIN
+# define INT_LEAST32_MIN INT32_MIN
+# ifdef stdint_int64_defined
+ typedef int64_t int_least64_t;
+ typedef uint64_t uint_least64_t;
+# define PRINTF_LEAST64_MODIFIER PRINTF_INT64_MODIFIER
+# define UINT_LEAST64_MAX UINT64_MAX
+# define INT_LEAST64_MAX INT64_MAX
+# define INT_LEAST64_MIN INT64_MIN
+# endif
+#endif
+#undef stdint_least_defined
+
+/*
+ * The ANSI C committee pretending to know or specify anything about
+ * performance is the epitome of misguided arrogance. The mandate of
+ * this file is to *ONLY* ever support that absolute minimum
+ * definition of the fast integer types, for compatibility purposes.
+ * No extensions, and no attempt to suggest what may or may not be a
+ * faster integer type will ever be made in this file. Developers are
+ * warned to stay away from these types when using this or any other
+ * stdint.h.
+ */
+
+typedef int_least8_t int_fast8_t;
+typedef uint_least8_t uint_fast8_t;
+typedef int_least16_t int_fast16_t;
+typedef uint_least16_t uint_fast16_t;
+typedef int_least32_t int_fast32_t;
+typedef uint_least32_t uint_fast32_t;
+#define UINT_FAST8_MAX UINT_LEAST8_MAX
+#define INT_FAST8_MAX INT_LEAST8_MAX
+#define UINT_FAST16_MAX UINT_LEAST16_MAX
+#define INT_FAST16_MAX INT_LEAST16_MAX
+#define UINT_FAST32_MAX UINT_LEAST32_MAX
+#define INT_FAST32_MAX INT_LEAST32_MAX
+#define INT_FAST8_MIN INT_LEAST8_MIN
+#define INT_FAST16_MIN INT_LEAST16_MIN
+#define INT_FAST32_MIN INT_LEAST32_MIN
+#ifdef stdint_int64_defined
+ typedef int_least64_t int_fast64_t;
+ typedef uint_least64_t uint_fast64_t;
+# define UINT_FAST64_MAX UINT_LEAST64_MAX
+# define INT_FAST64_MAX INT_LEAST64_MAX
+# define INT_FAST64_MIN INT_LEAST64_MIN
+#endif
+
+#undef stdint_int64_defined
+
+/*
+ * Whatever piecemeal, per compiler thing we can do about the wchar_t
+ * type limits.
+ */
+
+#if defined(__WATCOMC__) || defined(_MSC_VER) || defined (__GNUC__)
+# include <wchar.h>
+# ifndef WCHAR_MIN
+# define WCHAR_MIN 0
+# endif
+# ifndef WCHAR_MAX
+# define WCHAR_MAX ((wchar_t)-1)
+# endif
+#endif
+
+/*
+ * Whatever piecemeal, per compiler/platform thing we can do about the
+ * (u)intptr_t types and limits.
+ */
+
+#if (defined (_MSC_VER) && defined (_UINTPTR_T_DEFINED)) || defined (_UINTPTR_T)
+# define STDINT_H_UINTPTR_T_DEFINED
+#endif
+
+#ifndef STDINT_H_UINTPTR_T_DEFINED
+# if defined (__alpha__) || defined (__ia64__) || defined (__x86_64__) || defined (_WIN64) || defined (__ppc64__)
+# define stdint_intptr_bits 64
+# elif defined (__WATCOMC__) || defined (__TURBOC__)
+# if defined(__TINY__) || defined(__SMALL__) || defined(__MEDIUM__)
+# define stdint_intptr_bits 16
+# else
+# define stdint_intptr_bits 32
+# endif
+# elif defined (__i386__) || defined (_WIN32) || defined (WIN32) || defined (__ppc64__)
+# define stdint_intptr_bits 32
+# elif defined (__INTEL_COMPILER)
+/* TODO -- what did Intel do about x86-64? */
+# else
+/* #error "This platform might not be supported yet" */
+# endif
+
+# ifdef stdint_intptr_bits
+# define stdint_intptr_glue3_i(a,b,c) a##b##c
+# define stdint_intptr_glue3(a,b,c) stdint_intptr_glue3_i(a,b,c)
+# ifndef PRINTF_INTPTR_MODIFIER
+# define PRINTF_INTPTR_MODIFIER stdint_intptr_glue3(PRINTF_INT,stdint_intptr_bits,_MODIFIER)
+# endif
+# ifndef PTRDIFF_MAX
+# define PTRDIFF_MAX stdint_intptr_glue3(INT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef PTRDIFF_MIN
+# define PTRDIFF_MIN stdint_intptr_glue3(INT,stdint_intptr_bits,_MIN)
+# endif
+# ifndef UINTPTR_MAX
+# define UINTPTR_MAX stdint_intptr_glue3(UINT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef INTPTR_MAX
+# define INTPTR_MAX stdint_intptr_glue3(INT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef INTPTR_MIN
+# define INTPTR_MIN stdint_intptr_glue3(INT,stdint_intptr_bits,_MIN)
+# endif
+# ifndef INTPTR_C
+# define INTPTR_C(x) stdint_intptr_glue3(INT,stdint_intptr_bits,_C)(x)
+# endif
+# ifndef UINTPTR_C
+# define UINTPTR_C(x) stdint_intptr_glue3(UINT,stdint_intptr_bits,_C)(x)
+# endif
+ typedef stdint_intptr_glue3(uint,stdint_intptr_bits,_t) uintptr_t;
+ typedef stdint_intptr_glue3( int,stdint_intptr_bits,_t) intptr_t;
+# else
+/* TODO -- This following is likely wrong for some platforms, and does
+ nothing for the definition of uintptr_t. */
+ typedef ptrdiff_t intptr_t;
+# endif
+# define STDINT_H_UINTPTR_T_DEFINED
+#endif
+
+/*
+ * Assumes sig_atomic_t is signed and we have a 2s complement machine.
+ */
+
+#ifndef SIG_ATOMIC_MAX
+# define SIG_ATOMIC_MAX ((((sig_atomic_t) 1) << (sizeof (sig_atomic_t)*CHAR_BIT-1)) - 1)
+#endif
+
+#endif
+
+#if defined (__TEST_PSTDINT_FOR_CORRECTNESS)
+
+/*
+ * Please compile with the maximum warning settings to make sure macros are
+ * not defined more than once.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#define glue3_aux(x,y,z) x ## y ## z
+#define glue3(x,y,z) glue3_aux(x,y,z)
+
+#define DECLU(bits) glue3(uint,bits,_t) glue3(u,bits,) = glue3(UINT,bits,_C) (0);
+#define DECLI(bits) glue3(int,bits,_t) glue3(i,bits,) = glue3(INT,bits,_C) (0);
+
+#define DECL(us,bits) glue3(DECL,us,) (bits)
+
+#define TESTUMAX(bits) glue3(u,bits,) = ~glue3(u,bits,); if (glue3(UINT,bits,_MAX) != glue3(u,bits,)) printf ("Something wrong with UINT%d_MAX\n", bits)
+
+#define REPORTERROR(msg) { err_n++; if (err_first <= 0) err_first = __LINE__; printf msg; }
+
+int main () {
+ int err_n = 0;
+ int err_first = 0;
+ DECL(I,8)
+ DECL(U,8)
+ DECL(I,16)
+ DECL(U,16)
+ DECL(I,32)
+ DECL(U,32)
+#ifdef INT64_MAX
+ DECL(I,64)
+ DECL(U,64)
+#endif
+ intmax_t imax = INTMAX_C(0);
+ uintmax_t umax = UINTMAX_C(0);
+ char str0[256], str1[256];
+
+ sprintf (str0, "%" PRINTF_INT32_MODIFIER "d", INT32_C(2147483647));
+ if (0 != strcmp (str0, "2147483647")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str0));
+ if (atoi(PRINTF_INT32_DEC_WIDTH) != (int) strlen(str0)) REPORTERROR (("Something wrong with PRINTF_INT32_DEC_WIDTH : %s\n", PRINTF_INT32_DEC_WIDTH));
+ sprintf (str0, "%" PRINTF_INT32_MODIFIER "u", UINT32_C(4294967295));
+ if (0 != strcmp (str0, "4294967295")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str0));
+ if (atoi(PRINTF_UINT32_DEC_WIDTH) != (int) strlen(str0)) REPORTERROR (("Something wrong with PRINTF_UINT32_DEC_WIDTH : %s\n", PRINTF_UINT32_DEC_WIDTH));
+#ifdef INT64_MAX
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "d", INT64_C(9223372036854775807));
+ if (0 != strcmp (str1, "9223372036854775807")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str1));
+ if (atoi(PRINTF_INT64_DEC_WIDTH) != (int) strlen(str1)) REPORTERROR (("Something wrong with PRINTF_INT64_DEC_WIDTH : %s, %d\n", PRINTF_INT64_DEC_WIDTH, (int) strlen(str1)));
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "u", UINT64_C(18446744073709550591));
+ if (0 != strcmp (str1, "18446744073709550591")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str1));
+ if (atoi(PRINTF_UINT64_DEC_WIDTH) != (int) strlen(str1)) REPORTERROR (("Something wrong with PRINTF_UINT64_DEC_WIDTH : %s, %d\n", PRINTF_UINT64_DEC_WIDTH, (int) strlen(str1)));
+#endif
+
+ sprintf (str0, "%d %x\n", 0, ~0);
+
+ sprintf (str1, "%d %x\n", i8, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i8 : %s\n", str1));
+ sprintf (str1, "%u %x\n", u8, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u8 : %s\n", str1));
+ sprintf (str1, "%d %x\n", i16, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i16 : %s\n", str1));
+ sprintf (str1, "%u %x\n", u16, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u16 : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INT32_MODIFIER "d %x\n", i32, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i32 : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INT32_MODIFIER "u %x\n", u32, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u32 : %s\n", str1));
+#ifdef INT64_MAX
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "d %x\n", i64, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i64 : %s\n", str1));
+#endif
+ sprintf (str1, "%" PRINTF_INTMAX_MODIFIER "d %x\n", imax, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with imax : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INTMAX_MODIFIER "u %x\n", umax, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with umax : %s\n", str1));
+
+ TESTUMAX(8);
+ TESTUMAX(16);
+ TESTUMAX(32);
+#ifdef INT64_MAX
+ TESTUMAX(64);
+#endif
+
+#define STR(v) #v
+#define Q(v) printf ("sizeof " STR(v) " = %u\n", (unsigned) sizeof (v));
+ if (err_n) {
+ printf ("pstdint.h is not correct. Please use sizes below to correct it:\n");
+ }
+
+ Q(int)
+ Q(unsigned)
+ Q(long int)
+ Q(short int)
+ Q(int8_t)
+ Q(int16_t)
+ Q(int32_t)
+#ifdef INT64_MAX
+ Q(int64_t)
+#endif
+
+ return EXIT_SUCCESS;
+}
+
+#endif
diff --git a/include/flatcc/portable/punaligned.h b/include/flatcc/portable/punaligned.h
new file mode 100644
index 0000000..a380edd
--- /dev/null
+++ b/include/flatcc/portable/punaligned.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2016 Mikkel Fahnøe Jørgensen, dvide.com
+ *
+ * (MIT License)
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * - The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * - The Software is provided "as is", without warranty of any kind, express or
+ * implied, including but not limited to the warranties of merchantability,
+ * fitness for a particular purpose and noninfringement. In no event shall the
+ * authors or copyright holders be liable for any claim, damages or other
+ * liability, whether in an action of contract, tort or otherwise, arising from,
+ * out of or in connection with the Software or the use or other dealings in the
+ * Software.
+ */
+
+#ifndef PUNLIGNED_H
+#define PUNLIGNED_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef PORTABLE_UNALIGNED_ACCESS
+
+#if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+#define PORTABLE_UNALIGNED_ACCESS 1
+#else
+#define PORTABLE_UNALIGNED_ACCESS 0
+#endif
+
+#endif
+
+/* `unaligned_read_16` might not be defined if endianness was not determined. */
+#if !defined(unaligned_read_le16toh)
+
+#include "pendian.h"
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#if PORTABLE_UNALIGNED_ACCESS
+
+#define unaligned_read_16(p) (*(uint16_t*)(p))
+#define unaligned_read_32(p) (*(uint32_t*)(p))
+#define unaligned_read_64(p) (*(uint64_t*)(p))
+
+#define unaligned_read_le16toh(p) le16toh(*(uint16_t*)(p))
+#define unaligned_read_le32toh(p) le32toh(*(uint32_t*)(p))
+#define unaligned_read_le64toh(p) le64toh(*(uint64_t*)(p))
+
+#define unaligned_read_be16toh(p) be16toh(*(uint16_t*)(p))
+#define unaligned_read_be32toh(p) be32toh(*(uint32_t*)(p))
+#define unaligned_read_be64toh(p) be64toh(*(uint64_t*)(p))
+
+#define unaligned_write_16(p, v) (*(uint16_t*)(p) = (uint16_t)(v))
+#define unaligned_write_32(p, v) (*(uint32_t*)(p) = (uint32_t)(v))
+#define unaligned_write_64(p, v) (*(uint64_t*)(p) = (uint64_t)(v))
+
+#define unaligned_write_htole16(p, v) (*(uint16_t*)(p) = htole16(v))
+#define unaligned_write_htole32(p, v) (*(uint32_t*)(p) = htole32(v))
+#define unaligned_write_htole64(p, v) (*(uint64_t*)(p) = htole64(v))
+
+#define unaligned_write_htobe16(p, v) (*(uint16_t*)(p) = htobe16(v))
+#define unaligned_write_htobe32(p, v) (*(uint32_t*)(p) = htobe32(v))
+#define unaligned_write_htobe64(p, v) (*(uint64_t*)(p) = htobe64(v))
+
+#else
+
+#define unaligned_read_le16toh(p) ( \
+ (((uint16_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint16_t)(((uint8_t *)(p))[1])) << 8))
+
+#define unaligned_read_le32toh(p) ( \
+ (((uint32_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint32_t)(((uint8_t *)(p))[1])) << 8) | \
+ (((uint32_t)(((uint8_t *)(p))[2])) << 16) | \
+ (((uint32_t)(((uint8_t *)(p))[3])) << 24))
+
+#define unaligned_read_le64toh(p) ( \
+ (((uint64_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint64_t)(((uint8_t *)(p))[1])) << 8) | \
+ (((uint64_t)(((uint8_t *)(p))[2])) << 16) | \
+ (((uint64_t)(((uint8_t *)(p))[3])) << 24) | \
+ (((uint64_t)(((uint8_t *)(p))[4])) << 32) | \
+ (((uint64_t)(((uint8_t *)(p))[5])) << 40) | \
+ (((uint64_t)(((uint8_t *)(p))[6])) << 48) | \
+ (((uint64_t)(((uint8_t *)(p))[7])) << 56))
+
+#define unaligned_read_be16toh(p) ( \
+ (((uint16_t)(((uint8_t *)(p))[0])) << 8) | \
+ (((uint16_t)(((uint8_t *)(p))[1])) << 0))
+
+#define unaligned_read_be32toh(p) ( \
+ (((uint32_t)(((uint8_t *)(p))[0])) << 24) | \
+ (((uint32_t)(((uint8_t *)(p))[1])) << 16) | \
+ (((uint32_t)(((uint8_t *)(p))[2])) << 8) | \
+ (((uint32_t)(((uint8_t *)(p))[3])) << 0))
+
+#define unaligned_read_be64toh(p) ( \
+ (((uint64_t)(((uint8_t *)(p))[0])) << 56) | \
+ (((uint64_t)(((uint8_t *)(p))[1])) << 48) | \
+ (((uint64_t)(((uint8_t *)(p))[2])) << 40) | \
+ (((uint64_t)(((uint8_t *)(p))[3])) << 32) | \
+ (((uint64_t)(((uint8_t *)(p))[4])) << 24) | \
+ (((uint64_t)(((uint8_t *)(p))[5])) << 16) | \
+ (((uint64_t)(((uint8_t *)(p))[6])) << 8) | \
+ (((uint64_t)(((uint8_t *)(p))[7])) << 0))
+
+#define unaligned_write_htole16(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint16_t)(v)) >> 0); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint16_t)(v)) >> 8); \
+ } while (0)
+
+#define unaligned_write_htole32(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint32_t)(v)) >> 0); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint32_t)(v)) >> 8); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint32_t)(v)) >> 16); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint32_t)(v)) >> 24); \
+ } while (0)
+
+#define unaligned_write_htole64(p) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint64_t)(v)) >> 0); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint64_t)(v)) >> 8); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint64_t)(v)) >> 16); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint64_t)(v)) >> 24); \
+ ((uint8_t *)(p))[4] = (uint8_t)(((uint64_t)(v)) >> 32); \
+ ((uint8_t *)(p))[5] = (uint8_t)(((uint64_t)(v)) >> 40); \
+ ((uint8_t *)(p))[6] = (uint8_t)(((uint64_t)(v)) >> 48); \
+ ((uint8_t *)(p))[7] = (uint8_t)(((uint64_t)(v)) >> 56); \
+ } while (0)
+
+#define unaligned_write_htobe16(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint16_t)(v)) >> 8); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint16_t)(v)) >> 0); \
+ } while (0)
+
+#define unaligned_write_htobe32(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint32_t)(v)) >> 24); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint32_t)(v)) >> 16); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint32_t)(v)) >> 8); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint32_t)(v)) >> 0); \
+ } while (0)
+
+#define unaligned_write_htobe64(p) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint64_t)(v)) >> 56); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint64_t)(v)) >> 48); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint64_t)(v)) >> 40); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint64_t)(v)) >> 32); \
+ ((uint8_t *)(p))[4] = (uint8_t)(((uint64_t)(v)) >> 24); \
+ ((uint8_t *)(p))[5] = (uint8_t)(((uint64_t)(v)) >> 16); \
+ ((uint8_t *)(p))[6] = (uint8_t)(((uint64_t)(v)) >> 8); \
+ ((uint8_t *)(p))[7] = (uint8_t)(((uint64_t)(v)) >> 0); \
+ } while (0)
+
+#if __LITTLE_ENDIAN__
+#define unaligned_read_16(p) unaligned_read_le16toh(p)
+#define unaligned_read_32(p) unaligned_read_le32toh(p)
+#define unaligned_read_64(p) unaligned_read_le64toh(p)
+
+#define unaligned_write_16(p) unaligned_write_htole16(p)
+#define unaligned_write_32(p) unaligned_write_htole32(p)
+#define unaligned_write_64(p) unaligned_write_htole64(p)
+#endif
+
+#if __BIG_ENDIAN__
+#define unaligned_read_16(p) unaligned_read_be16toh(p)
+#define unaligned_read_32(p) unaligned_read_be32toh(p)
+#define unaligned_read_64(p) unaligned_read_be64toh(p)
+
+#define unaligned_write_16(p) unaligned_write_htobe16(p)
+#define unaligned_write_32(p) unaligned_write_htobe32(p)
+#define unaligned_write_64(p) unaligned_write_htobe64(p)
+#endif
+
+#endif
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PUNALIGNED_H */
diff --git a/include/flatcc/portable/pversion.h b/include/flatcc/portable/pversion.h
new file mode 100644
index 0000000..d434104
--- /dev/null
+++ b/include/flatcc/portable/pversion.h
@@ -0,0 +1,6 @@
+#define PORTABLE_VERSION_TEXT "0.2.6-pre"
+#define PORTABLE_VERSION_MAJOR 0
+#define PORTABLE_VERSION_MINOR 2
+#define PORTABLE_VERSION_PATCH 6
+/* 1 or 0 */
+#define PORTABLE_VERSION_RELEASED 0
diff --git a/include/flatcc/portable/pwarnings.h b/include/flatcc/portable/pwarnings.h
new file mode 100644
index 0000000..f420861
--- /dev/null
+++ b/include/flatcc/portable/pwarnings.h
@@ -0,0 +1,52 @@
+#ifndef PWARNINGS_H
+#define PWARNINGS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * See also pdiagnostics.h headers for per file control of common
+ * warnings.
+ *
+ * This file is intended for global disabling of warnings that shouldn't
+ * be present in C11 or perhaps C99, or a generally just noise where
+ * recent clang / gcc compile cleanly with high warning levels.
+ */
+
+#if defined(_MSC_VER)
+/* Needed when flagging code in or out and more. */
+#pragma warning(disable: 4127) /* conditional expression is constant */
+/* happens also in MS's own headers. */
+#pragma warning(disable: 4668) /* preprocessor name not defined */
+/* MSVC does not respect double parenthesis for intent */
+#pragma warning(disable: 4706) /* assignment within conditional expression */
+/* `inline` only advisory anyway. */
+#pragma warning(disable: 4710) /* function not inlined */
+/* Well, we don't intend to add the padding manually. */
+#pragma warning(disable: 4820) /* x bytes padding added in struct */
+
+/*
+ * Don't warn that fopen etc. are unsafe
+ *
+ * Define a compiler flag like `-D_CRT_SECURE_NO_WARNINGS` in the build.
+ * For some reason it doesn't work when defined here.
+ *
+ * #define _CRT_SECURE_NO_WARNINGS
+ */
+
+/*
+ * Anonymous union in struct is valid in C11 and has been supported in
+ * GCC and Clang for a while, but it is not C99. MSVC also handles it,
+ * but warns. Truly portable code should perhaps not use this feature,
+ * but this is not the place to complain about it.
+ */
+#pragma warning(disable: 4201) /* nonstandard extension used: nameless struct/union */
+
+#endif /* _MSV_VER */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PWARNINGS_H */
diff --git a/include/flatcc/reflection/README b/include/flatcc/reflection/README
new file mode 100644
index 0000000..3c7207a
--- /dev/null
+++ b/include/flatcc/reflection/README
@@ -0,0 +1,19 @@
+Generated by flatcc
+
+Keep checked in - needed by flatcc to generate binary schema.
+
+NOTE TO CONTRIBUTORS: DO NOT EDIT THESE FILES BY HAND
+
+If you need to change anything here, it is done in the code generator,
+possibly followed by running `reflection/generate_code.sh` from the
+project root. But please only do this for testing do not include the
+generated files in a pull request unless agreed otherwise, and if so,
+do it in a separate commit.
+
+Normally new reflection code is generated during a release which also
+updates the version number in comments and there is no reason to update
+reflection on every commit unless it breaks something fundamentally.
+
+There is a build option `FLATCC_REFLECTION` to disable reflection which
+is helpful while making changes that affect the content of these files
+in a way that would prevent the flatcc compiler from building.
diff --git a/include/flatcc/reflection/flatbuffers_common_builder.h b/include/flatcc/reflection/flatbuffers_common_builder.h
new file mode 100644
index 0000000..a4be1ce
--- /dev/null
+++ b/include/flatcc/reflection/flatbuffers_common_builder.h
@@ -0,0 +1,685 @@
+#ifndef FLATBUFFERS_COMMON_BUILDER_H
+#define FLATBUFFERS_COMMON_BUILDER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+/* Common FlatBuffers build functionality for C. */
+
+#include "flatcc/flatcc_prologue.h"
+#ifndef FLATBUILDER_H
+#include "flatcc/flatcc_builder.h"
+#endif
+typedef flatcc_builder_t flatbuffers_builder_t;
+typedef flatcc_builder_ref_t flatbuffers_ref_t;
+typedef flatcc_builder_ref_t flatbuffers_vec_ref_t;
+typedef flatcc_builder_union_ref_t flatbuffers_union_ref_t;
+typedef flatcc_builder_union_vec_ref_t flatbuffers_union_vec_ref_t;
+/* integer return code (ref and ptr always fail on 0) */
+#define flatbuffers_failed(x) ((x) < 0)
+typedef flatbuffers_ref_t flatbuffers_root_t;
+#define flatbuffers_root(ref) ((flatbuffers_root_t)(ref))
+
+#define __flatbuffers_memoize_begin(B, src)\
+do { flatcc_builder_ref_t _ref; if ((_ref = flatcc_builder_refmap_find((B), (src)))) return _ref; } while (0)
+#define __flatbuffers_memoize_end(B, src, op) do { return flatcc_builder_refmap_insert((B), (src), (op)); } while (0)
+#define __flatbuffers_memoize(B, src, op) do { __flatbuffers_memoize_begin(B, src); __flatbuffers_memoize_end(B, src, op); } while (0)
+
+#define __flatbuffers_build_buffer(NS)\
+typedef NS ## ref_t NS ## buffer_ref_t;\
+static inline int NS ## buffer_start(NS ## builder_t *B, const NS ##fid_t fid)\
+{ return flatcc_builder_start_buffer(B, fid, 0, 0); }\
+static inline int NS ## buffer_start_with_size(NS ## builder_t *B, const NS ##fid_t fid)\
+{ return flatcc_builder_start_buffer(B, fid, 0, flatcc_builder_with_size); }\
+static inline int NS ## buffer_start_aligned(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\
+{ return flatcc_builder_start_buffer(B, fid, block_align, 0); }\
+static inline int NS ## buffer_start_aligned_with_size(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\
+{ return flatcc_builder_start_buffer(B, fid, block_align, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t NS ## buffer_end(NS ## builder_t *B, NS ## ref_t root)\
+{ return flatcc_builder_end_buffer(B, root); }
+
+#define __flatbuffers_build_table_root(NS, N, FID, TFID)\
+static inline int N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, FID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_typed_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, TFID) ? -1 : N ## _start(B); }\
+static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start(B, TFID)) return 0;return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }
+
+#define __flatbuffers_build_table_prolog(NS, N, FID, TFID)\
+__flatbuffers_build_table_vector_ops(NS, N ## _vec, N)\
+__flatbuffers_build_table_root(NS, N, FID, TFID)
+
+#define __flatbuffers_build_struct_root(NS, N, A, FID, TFID)\
+static inline N ## _t *N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, FID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_typed_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, TFID) ? 0 : N ## _start(B); }\
+static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_pe_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end_pe(B)); }\
+static inline NS ## buffer_ref_t N ## _end_pe_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end_pe(B)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, FID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, 0); }\
+static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, FID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, TFID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, 0); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, TFID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, 0); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, 0); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }
+
+#define __flatbuffers_build_nested_table_root(NS, N, TN, FID, TFID)\
+static inline int N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? -1 : TN ## _start(B); }\
+static inline int N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? -1 : TN ## _start(B); }\
+static inline int N ## _end_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _table_t t)\
+{ return N ## _add(B, TN ## _clone_as_root(B, t)); }\
+static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _table_t t)\
+{ return N ## _add(B, TN ## _clone_as_typed_root(B, t)); }
+
+#define __flatbuffers_build_nested_struct_root(NS, N, TN, A, FID, TFID)\
+static inline TN ## _t *N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\
+static inline TN ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\
+static inline int N ## _end_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_pe_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end_pe(B))); }\
+static inline int N ## _create_as_root(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ return N ## _add(B, flatcc_builder_create_buffer(B, FID, 0,\
+ TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\
+static inline int N ## _create_as_typed_root(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ return N ## _add(B, flatcc_builder_create_buffer(B, TFID, 0,\
+ TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\
+static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _struct_t p)\
+{ return N ## _add(B, TN ## _clone_as_root(B, p)); }\
+static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _struct_t p)\
+{ return N ## _add(B, TN ## _clone_as_typed_root(B, p)); }
+
+#define __flatbuffers_build_vector_ops(NS, V, N, TN, T)\
+static inline T *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return (T *)flatcc_builder_extend_vector(B, len); }\
+static inline T *V ## _append(NS ## builder_t *B, const T *data, size_t len)\
+{ return (T *)flatcc_builder_append_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_vector(B, len); }\
+static inline T *V ## _edit(NS ## builder_t *B)\
+{ return (T *)flatcc_builder_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_vector_count(B); }\
+static inline T *V ## _push(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? (memcpy(_p, p, TN ## __size()), _p) : 0; }\
+static inline T *V ## _push_copy(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\
+static inline T *V ## _push_clone(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\
+static inline T *V ## _push_create(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _assign(_p __ ## TN ## _call_args) : 0; }
+
+#define __flatbuffers_build_vector(NS, N, T, S, A)\
+typedef NS ## ref_t N ## _vec_ref_t;\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_end_pe(NS ## builder_t *B)\
+{ return flatcc_builder_end_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { size_t i, n; T *p = (T *)flatcc_builder_vector_edit(B);\
+ for (i = 0, n = flatcc_builder_vector_count(B); i < n; ++i)\
+ { N ## _to_pe(N ## __ptr_add(p, i)); }} return flatcc_builder_end_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_create_pe(NS ## builder_t *B, const T *data, size_t len)\
+{ return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const T *data, size_t len)\
+{ if (!NS ## is_native_pe()) { size_t i; T *p; int ret = flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); if (ret) { return ret; }\
+ p = (T *)flatcc_builder_extend_vector(B, len); if (!p) return 0;\
+ for (i = 0; i < len; ++i) { N ## _copy_to_pe(N ## __ptr_add(p, i), N ## __const_ptr_add(data, i)); }\
+ return flatcc_builder_end_vector(B); } else return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\
+{ __flatbuffers_memoize(B, vec, flatcc_builder_create_vector(B, vec, N ## _vec_len(vec), S, A, FLATBUFFERS_COUNT_MAX(S))); }\
+static inline N ## _vec_ref_t N ## _vec_slice(NS ## builder_t *B, N ##_vec_t vec, size_t index, size_t len)\
+{ size_t n = N ## _vec_len(vec); if (index >= n) index = n; n -= index; if (len > n) len = n;\
+ return flatcc_builder_create_vector(B, N ## __const_ptr_add(vec, index), len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+__flatbuffers_build_vector_ops(NS, N ## _vec, N, N, T)
+
+#define __flatbuffers_build_union_vector_ops(NS, V, N, TN)\
+static inline TN ## _union_ref_t *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_union_vector(B, len); }\
+static inline TN ## _union_ref_t *V ## _append(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\
+{ return flatcc_builder_append_union_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_union_vector(B, len); }\
+static inline TN ## _union_ref_t *V ## _edit(NS ## builder_t *B)\
+{ return (TN ## _union_ref_t *) flatcc_builder_union_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_union_vector_count(B); }\
+static inline TN ## _union_ref_t *V ## _push(NS ## builder_t *B, const TN ## _union_ref_t ref)\
+{ return flatcc_builder_union_vector_push(B, ref); }\
+static inline TN ## _union_ref_t *V ## _push_clone(NS ## builder_t *B, TN ## _union_t u)\
+{ return TN ## _vec_push(B, TN ## _clone(B, u)); }
+
+#define __flatbuffers_build_union_vector(NS, N)\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_union_vector(B); }\
+static inline N ## _union_vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_union_vector(B); }\
+static inline N ## _union_vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _union_ref_t *data, size_t len)\
+{ return flatcc_builder_create_union_vector(B, data, len); }\
+__flatbuffers_build_union_vector_ops(NS, N ## _vec, N, N)\
+/* Preserves DAG structure separately for type and value vector, so a type vector could be shared for many value vectors. */\
+static inline N ## _union_vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_union_vec_t vec)\
+{ N ## _union_vec_ref_t _uvref, _ret = { 0, 0 }; NS ## union_ref_t _uref; size_t _i, _len;\
+ if (vec.type == 0) return _ret;\
+ _uvref.type = flatcc_builder_refmap_find(B, vec.type); _uvref.value = flatcc_builder_refmap_find(B, vec.value);\
+ _len = N ## _union_vec_len(vec); if (_uvref.type == 0) {\
+ _uvref.type = flatcc_builder_refmap_insert(B, vec.type, (flatcc_builder_create_type_vector(B, vec.type, _len))); }\
+ if (_uvref.type == 0) return _ret; if (_uvref.value == 0) {\
+ if (flatcc_builder_start_offset_vector(B)) return _ret;\
+ for (_i = 0; _i < _len; ++_i) { _uref = N ## _clone(B, N ## _union_vec_at(vec, _i));\
+ if (!_uref.value || !(flatcc_builder_offset_vector_push(B, _uref.value))) return _ret; }\
+ _uvref.value = flatcc_builder_refmap_insert(B, vec.value, flatcc_builder_end_offset_vector(B));\
+ if (_uvref.value == 0) return _ret; } return _uvref; }
+
+#define __flatbuffers_build_string_vector_ops(NS, N)\
+static inline int N ## _push_start(NS ## builder_t *B)\
+{ return NS ## string_start(B); }\
+static inline NS ## string_ref_t *N ## _push_end(NS ## builder_t *B)\
+{ return NS ## string_vec_push(B, NS ## string_end(B)); }\
+static inline NS ## string_ref_t *N ## _push_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return NS ## string_vec_push(B, NS ## string_create(B, s, len)); }\
+static inline NS ## string_ref_t *N ## _push_create_str(NS ## builder_t *B, const char *s)\
+{ return NS ## string_vec_push(B, NS ## string_create_str(B, s)); }\
+static inline NS ## string_ref_t *N ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return NS ## string_vec_push(B, NS ## string_create_strn(B, s, max_len)); }\
+static inline NS ## string_ref_t *N ## _push_clone(NS ## builder_t *B, NS ## string_t string)\
+{ return NS ## string_vec_push(B, NS ## string_clone(B, string)); }\
+static inline NS ## string_ref_t *N ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return NS ## string_vec_push(B, NS ## string_slice(B, string, index, len)); }
+
+#define __flatbuffers_build_table_vector_ops(NS, N, TN)\
+static inline int N ## _push_start(NS ## builder_t *B)\
+{ return TN ## _start(B); }\
+static inline TN ## _ref_t *N ## _push_end(NS ## builder_t *B)\
+{ return N ## _push(B, TN ## _end(B)); }\
+static inline TN ## _ref_t *N ## _push_create(NS ## builder_t *B __ ## TN ##_formal_args)\
+{ return N ## _push(B, TN ## _create(B __ ## TN ## _call_args)); }
+
+#define __flatbuffers_build_offset_vector_ops(NS, V, N, TN)\
+static inline TN ## _ref_t *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_offset_vector(B, len); }\
+static inline TN ## _ref_t *V ## _append(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\
+{ return flatcc_builder_append_offset_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_offset_vector(B, len); }\
+static inline TN ## _ref_t *V ## _edit(NS ## builder_t *B)\
+{ return (TN ## _ref_t *)flatcc_builder_offset_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_offset_vector_count(B); }\
+static inline TN ## _ref_t *V ## _push(NS ## builder_t *B, const TN ## _ref_t ref)\
+{ return ref ? flatcc_builder_offset_vector_push(B, ref) : 0; }
+
+#define __flatbuffers_build_offset_vector(NS, N)\
+typedef NS ## ref_t N ## _vec_ref_t;\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_offset_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_offset_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _ref_t *data, size_t len)\
+{ return flatcc_builder_create_offset_vector(B, data, len); }\
+__flatbuffers_build_offset_vector_ops(NS, N ## _vec, N, N)\
+static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\
+{ int _ret; N ## _ref_t _e; size_t _i, _len; __flatbuffers_memoize_begin(B, vec);\
+ _len = N ## _vec_len(vec); if (flatcc_builder_start_offset_vector(B)) return 0;\
+ for (_i = 0; _i < _len; ++_i) { if (!(_e = N ## _clone(B, N ## _vec_at(vec, _i)))) return 0;\
+ if (!flatcc_builder_offset_vector_push(B, _e)) return 0; }\
+ __flatbuffers_memoize_end(B, vec, flatcc_builder_end_offset_vector(B)); }\
+
+#define __flatbuffers_build_string_ops(NS, N)\
+static inline char *N ## _append(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_append_string(B, s, len); }\
+static inline char *N ## _append_str(NS ## builder_t *B, const char *s)\
+{ return flatcc_builder_append_string_str(B, s); }\
+static inline char *N ## _append_strn(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_append_string_strn(B, s, len); }\
+static inline size_t N ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_string_len(B); }\
+static inline char *N ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_string(B, len); }\
+static inline char *N ## _edit(NS ## builder_t *B)\
+{ return flatcc_builder_string_edit(B); }\
+static inline int N ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_string(B, len); }
+
+#define __flatbuffers_build_string(NS)\
+typedef NS ## ref_t NS ## string_ref_t;\
+static inline int NS ## string_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_string(B); }\
+static inline NS ## string_ref_t NS ## string_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_string(B); }\
+static inline NS ## ref_t NS ## string_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_create_string(B, s, len); }\
+static inline NS ## ref_t NS ## string_create_str(NS ## builder_t *B, const char *s)\
+{ return flatcc_builder_create_string_str(B, s); }\
+static inline NS ## ref_t NS ## string_create_strn(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_create_string_strn(B, s, len); }\
+static inline NS ## string_ref_t NS ## string_clone(NS ## builder_t *B, NS ## string_t string)\
+{ __flatbuffers_memoize(B, string, flatcc_builder_create_string(B, string, NS ## string_len(string))); }\
+static inline NS ## string_ref_t NS ## string_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ size_t n = NS ## string_len(string); if (index >= n) index = n; n -= index; if (len > n) len = n;\
+ return flatcc_builder_create_string(B, string + index, len); }\
+__flatbuffers_build_string_ops(NS, NS ## string)\
+__flatbuffers_build_offset_vector(NS, NS ## string)
+
+#define __flatbuffers_copy_from_pe(P, P2, N) (*(P) = N ## _read_from_pe(P2), (P))
+#define __flatbuffers_from_pe(P, N) (*(P) = N ## _read_from_pe(P), (P))
+#define __flatbuffers_copy_to_pe(P, P2, N) (N ## _write_to_pe((P), *(P2)), (P))
+#define __flatbuffers_to_pe(P, N) (N ## _write_to_pe((P), *(P)), (P))
+#define __flatbuffers_define_fixed_array_primitives(NS, N, T)\
+static inline T *N ## _array_copy(T *p, const T *p2, size_t n)\
+{ memcpy(p, p2, n * sizeof(T)); return p; }\
+static inline T *N ## _array_copy_from_pe(T *p, const T *p2, size_t n)\
+{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\
+ for (i = 0; i < n; ++i) N ## _copy_from_pe(&p[i], &p2[i]); return p; }\
+static inline T *N ## _array_copy_to_pe(T *p, const T *p2, size_t n)\
+{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\
+ for (i = 0; i < n; ++i) N ## _copy_to_pe(&p[i], &p2[i]); return p; }
+#define __flatbuffers_define_scalar_primitives(NS, N, T)\
+static inline T *N ## _from_pe(T *p) { return __ ## NS ## from_pe(p, N); }\
+static inline T *N ## _to_pe(T *p) { return __ ## NS ## to_pe(p, N); }\
+static inline T *N ## _copy(T *p, const T *p2) { *p = *p2; return p; }\
+static inline T *N ## _copy_from_pe(T *p, const T *p2)\
+{ return __ ## NS ## copy_from_pe(p, p2, N); }\
+static inline T *N ## _copy_to_pe(T *p, const T *p2) \
+{ return __ ## NS ## copy_to_pe(p, p2, N); }\
+static inline T *N ## _assign(T *p, const T v0) { *p = v0; return p; }\
+static inline T *N ## _assign_from_pe(T *p, T v0)\
+{ *p = N ## _read_from_pe(&v0); return p; }\
+static inline T *N ## _assign_to_pe(T *p, T v0)\
+{ N ## _write_to_pe(p, v0); return p; }
+#define __flatbuffers_build_scalar(NS, N, T)\
+__ ## NS ## define_scalar_primitives(NS, N, T)\
+__ ## NS ## define_fixed_array_primitives(NS, N, T)\
+__ ## NS ## build_vector(NS, N, T, sizeof(T), sizeof(T))
+/* Depends on generated copy_to/from_pe functions, and the type. */
+#define __flatbuffers_define_struct_primitives(NS, N)\
+static inline N ## _t *N ##_to_pe(N ## _t *p)\
+{ if (!NS ## is_native_pe()) { N ## _copy_to_pe(p, p); }; return p; }\
+static inline N ## _t *N ##_from_pe(N ## _t *p)\
+{ if (!NS ## is_native_pe()) { N ## _copy_from_pe(p, p); }; return p; }\
+static inline N ## _t *N ## _clear(N ## _t *p) { return (N ## _t *)memset(p, 0, N ## __size()); }
+
+/* Depends on generated copy/assign_to/from_pe functions, and the type. */
+#define __flatbuffers_build_struct(NS, N, S, A, FID, TFID)\
+__ ## NS ## define_struct_primitives(NS, N)\
+typedef NS ## ref_t N ## _ref_t;\
+static inline N ## _t *N ## _start(NS ## builder_t *B)\
+{ return (N ## _t *)flatcc_builder_start_struct(B, S, A); }\
+static inline N ## _ref_t N ## _end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { N ## _to_pe((N ## _t *)flatcc_builder_struct_edit(B)); }\
+ return flatcc_builder_end_struct(B); }\
+static inline N ## _ref_t N ## _end_pe(NS ## builder_t *B)\
+{ return flatcc_builder_end_struct(B); }\
+static inline N ## _ref_t N ## _create(NS ## builder_t *B __ ## N ## _formal_args)\
+{ N ## _t *_p = N ## _start(B); if (!_p) return 0; N ##_assign_to_pe(_p __ ## N ## _call_args);\
+ return N ## _end_pe(B); }\
+static inline N ## _ref_t N ## _clone(NS ## builder_t *B, N ## _struct_t p)\
+{ N ## _t *_p; __flatbuffers_memoize_begin(B, p); _p = N ## _start(B); if (!_p) return 0;\
+ N ## _copy(_p, p); __flatbuffers_memoize_end(B, p, N ##_end_pe(B)); }\
+__flatbuffers_build_vector(NS, N, N ## _t, S, A)\
+__flatbuffers_build_struct_root(NS, N, A, FID, TFID)\
+
+#define __flatbuffers_struct_clear_field(p) memset((p), 0, sizeof(*(p)))
+#define __flatbuffers_build_table(NS, N, K)\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_table(B, K); }\
+static inline N ## _ref_t N ## _end(NS ## builder_t *B)\
+{ FLATCC_ASSERT(flatcc_builder_check_required(B, __ ## N ## _required,\
+ sizeof(__ ## N ## _required) / sizeof(__ ## N ## _required[0]) - 1));\
+ return flatcc_builder_end_table(B); }\
+__flatbuffers_build_offset_vector(NS, N)
+
+#define __flatbuffers_build_table_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _ref_t ref)\
+{ TN ## _ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ?\
+ ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return TN ## _start(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _end(B)); }\
+static inline TN ## _ref_t N ## _create(NS ## builder_t *B __ ## TN ##_formal_args)\
+{ return N ## _add(B, TN ## _create(B __ ## TN ## _call_args)); }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _table_t p)\
+{ return N ## _add(B, TN ## _clone(B, p)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _table_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_union_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _union_ref_t uref)\
+{ NS ## ref_t *_p; TN ## _union_type_t *_pt; if (uref.type == TN ## _NONE) return 0; if (uref.value == 0) return -1;\
+ if (!(_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1, sizeof(*_pt), sizeof(*_pt)))) return -1;\
+ *_pt = uref.type; if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uref.value; return 0; }\
+static inline int N ## _add_type(NS ## builder_t *B, TN ## _union_type_t type)\
+{ TN ## _union_type_t *_pt; if (type == TN ## _NONE) return 0; return (_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1,\
+ sizeof(*_pt), sizeof(*_pt))) ? ((*_pt = type), 0) : -1; }\
+static inline int N ## _add_value(NS ## builder_t *B, TN ## _union_ref_t uref)\
+{ NS ## ref_t *p; if (uref.type == TN ## _NONE) return 0; return (p = flatcc_builder_table_add_offset(B, ID)) ?\
+ ((*p = uref.value), 0) : -1; }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _union_t p)\
+{ return N ## _add(B, TN ## _clone(B, p)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _union_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }
+
+/* M is the union value name and T is its type, i.e. the qualified name. */
+#define __flatbuffers_build_union_table_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+static inline int N ## _ ## M ## _start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline int N ## _ ## M ## _end(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end(B);\
+ return ref ? N ## _ ## M ## _add(B, ref) : -1; }\
+static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _table_t t)\
+{ T ## _ref_t ref = T ## _clone(B, t);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }
+
+/* M is the union value name and T is its type, i.e. the qualified name. */
+#define __flatbuffers_build_union_struct_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+static inline T ## _t *N ## _ ## M ## _start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline int N ## _ ## M ## _end(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end(B);\
+ return ref ? N ## _ ## M ## _add(B, ref) : -1; }\
+static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _end_pe(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end_pe(B);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _struct_t p)\
+{ T ## _ref_t ref = T ## _clone(B, p);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }
+#define __flatbuffers_build_union_string_value_field(NS, N, NU, M)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+__flatbuffers_build_string_field_ops(NS, N ## _ ## M)
+
+/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type
+ * S: sizeof of scalar type, A: alignment of type T, default value V of type T. */
+#define __flatbuffers_build_scalar_field(ID, NS, N, TN, T, S, A, V, TT)\
+static inline int N ## _add(NS ## builder_t *B, const T v)\
+{ T *_p; if (v == V) return 0; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+static inline int N ## _force_add(NS ## builder_t *B, const T v)\
+{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+/* Clone does not skip default values and expects pe endian content. */\
+static inline int N ## _clone(NS ## builder_t *B, const T *p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+/* Transferring a missing field is a nop success with 0 as result. */\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }
+
+/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type
+ * S: sizeof of scalar type, A: alignment of type T. */
+#define __flatbuffers_build_scalar_optional_field(ID, NS, N, TN, T, S, A, TT)\
+static inline int N ## _add(NS ## builder_t *B, const T v)\
+{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+/* Clone does not skip default values and expects pe endian content. */\
+static inline int N ## _clone(NS ## builder_t *B, const T *p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+/* Transferring a missing field is a nop success with 0 as result. */\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_struct_field(ID, NS, N, TN, S, A, TT)\
+static inline TN ## _t *N ## _start(NS ## builder_t *B)\
+{ return (TN ## _t *)flatcc_builder_table_add(B, ID, S, A); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { TN ## _to_pe((TN ## _t *)flatcc_builder_table_edit(B, S)); } return 0; }\
+static inline int N ## _end_pe(NS ## builder_t *B) { return 0; }\
+static inline int N ## _create(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_assign_to_pe(_p __ ## TN ## _call_args);\
+ return 0; }\
+static inline int N ## _add(NS ## builder_t *B, const TN ## _t *p)\
+{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_copy_to_pe(_p, p); return 0; }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _struct_t p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _struct_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_vector_field(ID, NS, N, TN, T, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\
+{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return TN ## _vec_start(B); }\
+static inline int N ## _end_pe(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _vec_end_pe(B)); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _vec_end(B)); }\
+static inline int N ## _create_pe(NS ## builder_t *B, const T *data, size_t len)\
+{ return N ## _add(B, TN ## _vec_create_pe(B, data, len)); }\
+static inline int N ## _create(NS ## builder_t *B, const T *data, size_t len)\
+{ return N ## _add(B, TN ## _vec_create(B, data, len)); }\
+static inline int N ## _slice(NS ## builder_t *B, TN ## _vec_t vec, size_t index, size_t len)\
+{ return N ## _add(B, TN ## _vec_slice(B, vec, index, len)); }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\
+__flatbuffers_build_vector_ops(NS, N, N, TN, T)\
+
+#define __flatbuffers_build_offset_vector_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\
+{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_offset_vector(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_offset_vector(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_offset_vector(B, data, len)); }\
+__flatbuffers_build_offset_vector_ops(NS, N, N, TN)\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+/* depends on N ## _add which differs for union member fields and ordinary fields */\
+#define __flatbuffers_build_string_field_ops(NS, N)\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_string(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_string(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const char *s, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_string(B, s, len)); }\
+static inline int N ## _create_str(NS ## builder_t *B, const char *s)\
+{ return N ## _add(B, flatcc_builder_create_string_str(B, s)); }\
+static inline int N ## _create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return N ## _add(B, flatcc_builder_create_string_strn(B, s, max_len)); }\
+static inline int N ## _clone(NS ## builder_t *B, NS ## string_t string)\
+{ return N ## _add(B, NS ## string_clone(B, string)); }\
+static inline int N ## _slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return N ## _add(B, NS ## string_slice(B, string, index, len)); }\
+__flatbuffers_build_string_ops(NS, N)
+
+#define __flatbuffers_build_string_field(ID, NS, N, TT)\
+static inline int N ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ NS ## string_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+__flatbuffers_build_string_field_ops(NS, N)\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ NS ## string_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_table_vector_field(ID, NS, N, TN, TT)\
+__flatbuffers_build_offset_vector_field(ID, NS, N, TN, TT)\
+__flatbuffers_build_table_vector_ops(NS, N, TN)
+
+#define __flatbuffers_build_union_vector_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _union_vec_ref_t uvref)\
+{ NS ## vec_ref_t *_p; if (!uvref.type || !uvref.value) return uvref.type == uvref.value ? 0 : -1;\
+ if (!(_p = flatcc_builder_table_add_offset(B, ID - 1))) return -1; *_p = uvref.type;\
+ if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uvref.value; return 0; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_union_vector(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_union_vector(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_union_vector(B, data, len)); }\
+__flatbuffers_build_union_vector_ops(NS, N, N, TN)\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _union_vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _union_vec_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_union_table_vector_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _table_t t)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, t))); }
+
+#define __flatbuffers_build_union_struct_vector_value_field(NS, N, NU, M, T)\
+static inline T ## _t *N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _struct_t p)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, p))); }
+
+#define __flatbuffers_build_union_string_vector_value_field(NS, N, NU, M)\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return NS ## string_start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create(B, s, len))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_str(NS ## builder_t *B, const char *s)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_str(B, s))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_strn(B, s, max_len))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, NS ## string_t string)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_clone(B, string))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_slice(B, string, index, len))); }
+
+#define __flatbuffers_build_string_vector_field(ID, NS, N, TT)\
+__flatbuffers_build_offset_vector_field(ID, NS, N, NS ## string, TT)\
+__flatbuffers_build_string_vector_ops(NS, N)
+
+#define __flatbuffers_char_formal_args , char v0
+#define __flatbuffers_char_call_args , v0
+#define __flatbuffers_uint8_formal_args , uint8_t v0
+#define __flatbuffers_uint8_call_args , v0
+#define __flatbuffers_int8_formal_args , int8_t v0
+#define __flatbuffers_int8_call_args , v0
+#define __flatbuffers_bool_formal_args , flatbuffers_bool_t v0
+#define __flatbuffers_bool_call_args , v0
+#define __flatbuffers_uint16_formal_args , uint16_t v0
+#define __flatbuffers_uint16_call_args , v0
+#define __flatbuffers_uint32_formal_args , uint32_t v0
+#define __flatbuffers_uint32_call_args , v0
+#define __flatbuffers_uint64_formal_args , uint64_t v0
+#define __flatbuffers_uint64_call_args , v0
+#define __flatbuffers_int16_formal_args , int16_t v0
+#define __flatbuffers_int16_call_args , v0
+#define __flatbuffers_int32_formal_args , int32_t v0
+#define __flatbuffers_int32_call_args , v0
+#define __flatbuffers_int64_formal_args , int64_t v0
+#define __flatbuffers_int64_call_args , v0
+#define __flatbuffers_float_formal_args , float v0
+#define __flatbuffers_float_call_args , v0
+#define __flatbuffers_double_formal_args , double v0
+#define __flatbuffers_double_call_args , v0
+
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_char, char)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint8, uint8_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int8, int8_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_bool, flatbuffers_bool_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint16, uint16_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint32, uint32_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint64, uint64_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int16, int16_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int32, int32_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int64, int64_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_float, float)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_double, double)
+
+__flatbuffers_build_string(flatbuffers_)
+
+__flatbuffers_build_buffer(flatbuffers_)
+#include "flatcc/flatcc_epilogue.h"
+#endif /* FLATBUFFERS_COMMON_BUILDER_H */
diff --git a/include/flatcc/reflection/flatbuffers_common_reader.h b/include/flatcc/reflection/flatbuffers_common_reader.h
new file mode 100644
index 0000000..c575308
--- /dev/null
+++ b/include/flatcc/reflection/flatbuffers_common_reader.h
@@ -0,0 +1,578 @@
+#ifndef FLATBUFFERS_COMMON_READER_H
+#define FLATBUFFERS_COMMON_READER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+/* Common FlatBuffers read functionality for C. */
+
+#include "flatcc/flatcc_prologue.h"
+#include "flatcc/flatcc_flatbuffers.h"
+
+
+#define __flatbuffers_read_scalar_at_byteoffset(N, p, o) N ## _read_from_pe((uint8_t *)(p) + (o))
+#define __flatbuffers_read_scalar(N, p) N ## _read_from_pe(p)
+#define __flatbuffers_read_vt(ID, offset, t)\
+flatbuffers_voffset_t offset = 0;\
+{ flatbuffers_voffset_t id__tmp, *vt__tmp;\
+ FLATCC_ASSERT(t != 0 && "null pointer table access");\
+ id__tmp = ID;\
+ vt__tmp = (flatbuffers_voffset_t *)((uint8_t *)(t) -\
+ __flatbuffers_soffset_read_from_pe(t));\
+ if (__flatbuffers_voffset_read_from_pe(vt__tmp) >= sizeof(vt__tmp[0]) * (id__tmp + 3u)) {\
+ offset = __flatbuffers_voffset_read_from_pe(vt__tmp + id__tmp + 2);\
+ }\
+}
+#define __flatbuffers_field_present(ID, t) { __flatbuffers_read_vt(ID, offset__tmp, t) return offset__tmp != 0; }
+#define __flatbuffers_scalar_field(T, ID, t)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ return (const T *)((uint8_t *)(t) + offset__tmp);\
+ }\
+ return 0;\
+}
+#define __flatbuffers_define_scalar_field(ID, N, NK, TK, T, V)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+{ __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+}\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+{ __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+}\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _table_t t__tmp)\
+__flatbuffers_scalar_field(T, ID, t__tmp)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)\
+__flatbuffers_define_scan_by_scalar_field(N, NK, T)
+#define __flatbuffers_define_scalar_optional_field(ID, N, NK, TK, T, V)\
+__flatbuffers_define_scalar_field(ID, N, NK, TK, T, V)\
+static inline TK ## _option_t N ## _ ## NK ## _option(N ## _table_t t__tmp)\
+{ TK ## _option_t ret; __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ ret.is_null = offset__tmp == 0; ret.value = offset__tmp ?\
+ __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+ return ret; }
+#define __flatbuffers_struct_field(T, ID, t, r)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ return (T)((uint8_t *)(t) + offset__tmp);\
+ }\
+ FLATCC_ASSERT(!(r) && "required field missing");\
+ return 0;\
+}
+#define __flatbuffers_offset_field(T, ID, t, r, adjust)\
+{\
+ flatbuffers_uoffset_t *elem__tmp;\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ elem__tmp = (flatbuffers_uoffset_t *)((uint8_t *)(t) + offset__tmp);\
+ /* Add sizeof so C api can have raw access past header field. */\
+ return (T)((uint8_t *)(elem__tmp) + adjust +\
+ __flatbuffers_uoffset_read_from_pe(elem__tmp));\
+ }\
+ FLATCC_ASSERT(!(r) && "required field missing");\
+ return 0;\
+}
+#define __flatbuffers_vector_field(T, ID, t, r) __flatbuffers_offset_field(T, ID, t, r, sizeof(flatbuffers_uoffset_t))
+#define __flatbuffers_table_field(T, ID, t, r) __flatbuffers_offset_field(T, ID, t, r, 0)
+#define __flatbuffers_define_struct_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_struct_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_struct_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_vector_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_table_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_table_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_table_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_string_field(ID, N, NK, r)\
+static inline flatbuffers_string_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(flatbuffers_string_t, ID, t__tmp, r)\
+static inline flatbuffers_string_t N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(flatbuffers_string_t, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)\
+__flatbuffers_define_scan_by_string_field(N, NK)
+#define __flatbuffers_vec_len(vec)\
+{ return (vec) ? (size_t)__flatbuffers_uoffset_read_from_pe((flatbuffers_uoffset_t *)vec - 1) : 0; }
+#define __flatbuffers_string_len(s) __flatbuffers_vec_len(s)
+static inline size_t flatbuffers_vec_len(const void *vec)
+__flatbuffers_vec_len(vec)
+#define __flatbuffers_scalar_vec_at(N, vec, i)\
+{ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range");\
+ return __flatbuffers_read_scalar(N, &(vec)[i]); }
+#define __flatbuffers_struct_vec_at(vec, i)\
+{ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range"); return (vec) + (i); }
+/* `adjust` skips past the header for string vectors. */
+#define __flatbuffers_offset_vec_at(T, vec, i, adjust)\
+{ const flatbuffers_uoffset_t *elem__tmp = (vec) + (i);\
+ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range");\
+ return (T)((uint8_t *)(elem__tmp) + (size_t)__flatbuffers_uoffset_read_from_pe(elem__tmp) + (adjust)); }
+#define __flatbuffers_define_scalar_vec_len(N)\
+static inline size_t N ## _vec_len(N ##_vec_t vec__tmp)\
+{ return flatbuffers_vec_len(vec__tmp); }
+#define __flatbuffers_define_scalar_vec_at(N, T) \
+static inline T N ## _vec_at(N ## _vec_t vec__tmp, size_t i__tmp)\
+__flatbuffers_scalar_vec_at(N, vec__tmp, i__tmp)
+typedef const char *flatbuffers_string_t;
+static inline size_t flatbuffers_string_len(flatbuffers_string_t s)
+__flatbuffers_string_len(s)
+typedef const flatbuffers_uoffset_t *flatbuffers_string_vec_t;
+typedef flatbuffers_uoffset_t *flatbuffers_string_mutable_vec_t;
+static inline size_t flatbuffers_string_vec_len(flatbuffers_string_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline flatbuffers_string_t flatbuffers_string_vec_at(flatbuffers_string_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_string_t, vec, i, sizeof(vec[0]))
+typedef const void *flatbuffers_generic_t;
+typedef void *flatbuffers_mutable_generic_t;
+static inline flatbuffers_string_t flatbuffers_string_cast_from_generic(const flatbuffers_generic_t p)
+{ return p ? ((const char *)p) + __flatbuffers_uoffset__size() : 0; }
+typedef const flatbuffers_uoffset_t *flatbuffers_generic_vec_t;
+typedef flatbuffers_uoffset_t *flatbuffers_generic_table_mutable_vec_t;
+static inline size_t flatbuffers_generic_vec_len(flatbuffers_generic_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline flatbuffers_generic_t flatbuffers_generic_vec_at(flatbuffers_generic_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_generic_t, vec, i, 0)
+static inline flatbuffers_generic_t flatbuffers_generic_vec_at_as_string(flatbuffers_generic_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_generic_t, vec, i, sizeof(vec[0]))
+typedef struct flatbuffers_union {
+ flatbuffers_union_type_t type;
+ flatbuffers_generic_t value;
+} flatbuffers_union_t;
+typedef struct flatbuffers_union_vec {
+ const flatbuffers_union_type_t *type;
+ const flatbuffers_uoffset_t *value;
+} flatbuffers_union_vec_t;
+typedef struct flatbuffers_mutable_union {
+ flatbuffers_union_type_t type;
+ flatbuffers_mutable_generic_t value;
+} flatbuffers_mutable_union_t;
+typedef struct flatbuffers_mutable_union_vec {
+ flatbuffers_union_type_t *type;
+ flatbuffers_uoffset_t *value;
+} flatbuffers_mutable_union_vec_t;
+static inline flatbuffers_mutable_union_t flatbuffers_mutable_union_cast(flatbuffers_union_t u__tmp)\
+{ flatbuffers_mutable_union_t mu = { u__tmp.type, (flatbuffers_mutable_generic_t)u__tmp.value };\
+ return mu; }
+static inline flatbuffers_mutable_union_vec_t flatbuffers_mutable_union_vec_cast(flatbuffers_union_vec_t uv__tmp)\
+{ flatbuffers_mutable_union_vec_t muv =\
+ { (flatbuffers_union_type_t *)uv__tmp.type, (flatbuffers_uoffset_t *)uv__tmp.value }; return muv; }
+#define __flatbuffers_union_type_field(ID, t)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(__flatbuffers_utype, t, offset__tmp) : 0;\
+}
+static inline flatbuffers_string_t flatbuffers_string_cast_from_union(const flatbuffers_union_t u__tmp)\
+{ return flatbuffers_string_cast_from_generic(u__tmp.value); }
+#define __flatbuffers_define_union_field(NS, ID, N, NK, T, r)\
+static inline T ## _union_type_t N ## _ ## NK ## _type_get(N ## _table_t t__tmp)\
+__## NS ## union_type_field(((ID) - 1), t__tmp)\
+static inline NS ## generic_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\
+static inline T ## _union_type_t N ## _ ## NK ## _type(N ## _table_t t__tmp)\
+__## NS ## union_type_field(((ID) - 1), t__tmp)\
+static inline NS ## generic_t N ## _ ## NK(N ## _table_t t__tmp)\
+__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__## NS ## field_present(ID, t__tmp)\
+static inline T ## _union_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\
+{ T ## _union_t u__tmp = { 0, 0 }; u__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\
+ if (u__tmp.type == 0) return u__tmp; u__tmp.value = N ## _ ## NK ## _get(t__tmp); return u__tmp; }\
+static inline NS ## string_t N ## _ ## NK ## _as_string(N ## _table_t t__tmp)\
+{ return NS ## string_cast_from_generic(N ## _ ## NK ## _get(t__tmp)); }\
+
+#define __flatbuffers_define_union_vector_ops(NS, T)\
+static inline size_t T ## _union_vec_len(T ## _union_vec_t uv__tmp)\
+{ return NS ## vec_len(uv__tmp.type); }\
+static inline T ## _union_t T ## _union_vec_at(T ## _union_vec_t uv__tmp, size_t i__tmp)\
+{ T ## _union_t u__tmp = { 0, 0 }; size_t n__tmp = NS ## vec_len(uv__tmp.type);\
+ FLATCC_ASSERT(n__tmp > (i__tmp) && "index out of range"); u__tmp.type = uv__tmp.type[i__tmp];\
+ /* Unknown type is treated as NONE for schema evolution. */\
+ if (u__tmp.type == 0) return u__tmp;\
+ u__tmp.value = NS ## generic_vec_at(uv__tmp.value, i__tmp); return u__tmp; }\
+static inline NS ## string_t T ## _union_vec_at_as_string(T ## _union_vec_t uv__tmp, size_t i__tmp)\
+{ return (NS ## string_t) NS ## generic_vec_at_as_string(uv__tmp.value, i__tmp); }\
+
+#define __flatbuffers_define_union_vector(NS, T)\
+typedef NS ## union_vec_t T ## _union_vec_t;\
+typedef NS ## mutable_union_vec_t T ## _mutable_union_vec_t;\
+static inline T ## _mutable_union_vec_t T ## _mutable_union_vec_cast(T ## _union_vec_t u__tmp)\
+{ return NS ## mutable_union_vec_cast(u__tmp); }\
+__## NS ## define_union_vector_ops(NS, T)
+#define __flatbuffers_define_union(NS, T)\
+typedef NS ## union_t T ## _union_t;\
+typedef NS ## mutable_union_t T ## _mutable_union_t;\
+static inline T ## _mutable_union_t T ## _mutable_union_cast(T ## _union_t u__tmp)\
+{ return NS ## mutable_union_cast(u__tmp); }\
+__## NS ## define_union_vector(NS, T)
+#define __flatbuffers_define_union_vector_field(NS, ID, N, NK, T, r)\
+__## NS ## define_vector_field(ID - 1, N, NK ## _type, T ## _vec_t, r)\
+__## NS ## define_vector_field(ID, N, NK, flatbuffers_generic_vec_t, r)\
+static inline T ## _union_vec_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\
+{ T ## _union_vec_t uv__tmp; uv__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\
+ uv__tmp.value = N ## _ ## NK(t__tmp);\
+ FLATCC_ASSERT(NS ## vec_len(uv__tmp.type) == NS ## vec_len(uv__tmp.value)\
+ && "union vector type length mismatch"); return uv__tmp; }
+#include <string.h>
+static const size_t flatbuffers_not_found = (size_t)-1;
+static const size_t flatbuffers_end = (size_t)-1;
+#define __flatbuffers_identity(n) (n)
+#define __flatbuffers_min(a, b) ((a) < (b) ? (a) : (b))
+/* Subtraction doesn't work for unsigned types. */
+#define __flatbuffers_scalar_cmp(x, y, n) ((x) < (y) ? -1 : (x) > (y))
+static inline int __flatbuffers_string_n_cmp(flatbuffers_string_t v, const char *s, size_t n)
+{ size_t nv = flatbuffers_string_len(v); int x = strncmp(v, s, nv < n ? nv : n);
+ return x != 0 ? x : nv < n ? -1 : nv > n; }
+/* `n` arg unused, but needed by string find macro expansion. */
+static inline int __flatbuffers_string_cmp(flatbuffers_string_t v, const char *s, size_t n) { (void)n; return strcmp(v, s); }
+/* A = identity if searching scalar vectors rather than key fields. */
+/* Returns lowest matching index or not_found. */
+#define __flatbuffers_find_by_field(A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t a__tmp = 0, b__tmp, m__tmp; if (!(b__tmp = L(V))) { return flatbuffers_not_found; }\
+ --b__tmp;\
+ while (a__tmp < b__tmp) {\
+ m__tmp = a__tmp + ((b__tmp - a__tmp) >> 1);\
+ v__tmp = A(E(V, m__tmp));\
+ if ((D(v__tmp, (K), (Kn))) < 0) {\
+ a__tmp = m__tmp + 1;\
+ } else {\
+ b__tmp = m__tmp;\
+ }\
+ }\
+ if (a__tmp == b__tmp) {\
+ v__tmp = A(E(V, a__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return a__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_find_by_scalar_field(A, V, E, L, K, T)\
+__flatbuffers_find_by_field(A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_find_by_string_field(A, V, E, L, K)\
+__flatbuffers_find_by_field(A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_find_by_string_n_field(A, V, E, L, K, Kn)\
+__flatbuffers_find_by_field(A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_define_find_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, TK key__tmp)\
+__flatbuffers_find_by_scalar_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, TK)
+#define __flatbuffers_define_scalar_find(N, T)\
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_find_by_scalar_field(__flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_find_by_string_field(N, NK) \
+/* Note: find only works on vectors sorted by this field. */\
+static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_find_by_string_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_find_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_find_by_string_n_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)
+#define __flatbuffers_define_default_find_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_find_by_ ## NK(vec__tmp, key__tmp); }
+#define __flatbuffers_define_default_find_by_string_field(N, NK) \
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_find_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_find_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_find_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }
+/* A = identity if searching scalar vectors rather than key fields. */
+/* Returns lowest matching index or not_found. */
+#define __flatbuffers_scan_by_field(b, e, A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t i__tmp;\
+ for (i__tmp = b; i__tmp < e; ++i__tmp) {\
+ v__tmp = A(E(V, i__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return i__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_rscan_by_field(b, e, A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t i__tmp = e;\
+ while (i__tmp-- > b) {\
+ v__tmp = A(E(V, i__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return i__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_scan_by_scalar_field(b, e, A, V, E, L, K, T)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_scan_by_string_field(b, e, A, V, E, L, K)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_scan_by_string_n_field(b, e, A, V, E, L, K, Kn)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_rscan_by_scalar_field(b, e, A, V, E, L, K, T)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_rscan_by_string_field(b, e, A, V, E, L, K)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_rscan_by_string_n_field(b, e, A, V, E, L, K, Kn)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_define_scan_by_scalar_field(N, NK, T)\
+static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_scalar_scan(N, T)\
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(0, N ## _vec_len(vec__tmp), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(0, N ## _vec_len(vec__tmp), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_scan_by_string_field(N, NK) \
+static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_scan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_scan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_scan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+__flatbuffers_scan_by_string_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_scan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_scan_by_string_n_field(begin__tmp, __flatbuffers_min( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_rscan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_rscan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_rscan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+__flatbuffers_rscan_by_string_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_rscan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_rscan_by_string_n_field(begin__tmp, __flatbuffers_min( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)
+#define __flatbuffers_define_default_scan_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_scan_by_ ## NK(vec__tmp, key__tmp); }\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\
+{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_rscan_by_ ## NK(vec__tmp, key__tmp); }\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\
+{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }
+#define __flatbuffers_define_default_scan_by_string_field(N, NK) \
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_scan_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_scan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_scan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\
+static inline size_t N ## _vec_scan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_scan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_rscan_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_rscan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_rscan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\
+static inline size_t N ## _vec_rscan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_rscan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }
+#define __flatbuffers_heap_sort(N, X, A, E, L, TK, TE, D, S)\
+static inline void __ ## N ## X ## __heap_sift_down(\
+ N ## _mutable_vec_t vec__tmp, size_t start__tmp, size_t end__tmp)\
+{ size_t child__tmp, root__tmp; TK v1__tmp, v2__tmp, vroot__tmp;\
+ root__tmp = start__tmp;\
+ while ((root__tmp << 1) <= end__tmp) {\
+ child__tmp = root__tmp << 1;\
+ if (child__tmp < end__tmp) {\
+ v1__tmp = A(E(vec__tmp, child__tmp));\
+ v2__tmp = A(E(vec__tmp, child__tmp + 1));\
+ if (D(v1__tmp, v2__tmp) < 0) {\
+ child__tmp++;\
+ }\
+ }\
+ vroot__tmp = A(E(vec__tmp, root__tmp));\
+ v1__tmp = A(E(vec__tmp, child__tmp));\
+ if (D(vroot__tmp, v1__tmp) < 0) {\
+ S(vec__tmp, root__tmp, child__tmp, TE);\
+ root__tmp = child__tmp;\
+ } else {\
+ return;\
+ }\
+ }\
+}\
+static inline void __ ## N ## X ## __heap_sort(N ## _mutable_vec_t vec__tmp)\
+{ size_t start__tmp, end__tmp, size__tmp;\
+ size__tmp = L(vec__tmp); if (size__tmp == 0) return; end__tmp = size__tmp - 1; start__tmp = size__tmp >> 1;\
+ do { __ ## N ## X ## __heap_sift_down(vec__tmp, start__tmp, end__tmp); } while (start__tmp--);\
+ while (end__tmp > 0) { \
+ S(vec__tmp, 0, end__tmp, TE);\
+ __ ## N ## X ## __heap_sift_down(vec__tmp, 0, --end__tmp); } }
+#define __flatbuffers_define_sort_by_field(N, NK, TK, TE, D, S)\
+ __flatbuffers_heap_sort(N, _sort_by_ ## NK, N ## _ ## NK ## _get, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\
+static inline void N ## _vec_sort_by_ ## NK(N ## _mutable_vec_t vec__tmp)\
+{ __ ## N ## _sort_by_ ## NK ## __heap_sort(vec__tmp); }
+#define __flatbuffers_define_sort(N, TK, TE, D, S)\
+__flatbuffers_heap_sort(N, , __flatbuffers_identity, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\
+static inline void N ## _vec_sort(N ## _mutable_vec_t vec__tmp) { __ ## N ## __heap_sort(vec__tmp); }
+#define __flatbuffers_scalar_diff(x, y) ((x) < (y) ? -1 : (x) > (y))
+#define __flatbuffers_string_diff(x, y) __flatbuffers_string_n_cmp((x), (const char *)(y), flatbuffers_string_len(y))
+#define __flatbuffers_value_swap(vec, a, b, TE) { TE x__tmp = vec[b]; vec[b] = vec[a]; vec[a] = x__tmp; }
+#define __flatbuffers_uoffset_swap(vec, a, b, TE)\
+{ TE ta__tmp, tb__tmp, d__tmp;\
+ d__tmp = (TE)((a - b) * sizeof(vec[0]));\
+ ta__tmp = __flatbuffers_uoffset_read_from_pe(vec + b) - d__tmp;\
+ tb__tmp = __flatbuffers_uoffset_read_from_pe(vec + a) + d__tmp;\
+ __flatbuffers_uoffset_write_to_pe(vec + a, ta__tmp);\
+ __flatbuffers_uoffset_write_to_pe(vec + b, tb__tmp); }
+#define __flatbuffers_scalar_swap(vec, a, b, TE) __flatbuffers_value_swap(vec, a, b, TE)
+#define __flatbuffers_string_swap(vec, a, b, TE) __flatbuffers_uoffset_swap(vec, a, b, TE)
+#define __flatbuffers_struct_swap(vec, a, b, TE) __flatbuffers_value_swap(vec, a, b, TE)
+#define __flatbuffers_table_swap(vec, a, b, TE) __flatbuffers_uoffset_swap(vec, a, b, TE)
+#define __flatbuffers_define_struct_sort_by_scalar_field(N, NK, TK, TE)\
+ __flatbuffers_define_sort_by_field(N, NK, TK, TE, __flatbuffers_scalar_diff, __flatbuffers_struct_swap)
+#define __flatbuffers_define_table_sort_by_scalar_field(N, NK, TK)\
+ __flatbuffers_define_sort_by_field(N, NK, TK, flatbuffers_uoffset_t, __flatbuffers_scalar_diff, __flatbuffers_table_swap)
+#define __flatbuffers_define_table_sort_by_string_field(N, NK)\
+ __flatbuffers_define_sort_by_field(N, NK, flatbuffers_string_t, flatbuffers_uoffset_t, __flatbuffers_string_diff, __flatbuffers_table_swap)
+#define __flatbuffers_define_scalar_sort(N, T) __flatbuffers_define_sort(N, T, T, __flatbuffers_scalar_diff, __flatbuffers_scalar_swap)
+#define __flatbuffers_define_string_sort() __flatbuffers_define_sort(flatbuffers_string, flatbuffers_string_t, flatbuffers_uoffset_t, __flatbuffers_string_diff, __flatbuffers_string_swap)
+#define __flatbuffers_sort_vector_field(N, NK, T, t)\
+{ T ## _mutable_vec_t v__tmp = (T ## _mutable_vec_t) N ## _ ## NK ## _get(t);\
+ if (v__tmp) T ## _vec_sort(v__tmp); }
+#define __flatbuffers_sort_table_field(N, NK, T, t)\
+{ T ## _sort((T ## _mutable_table_t)N ## _ ## NK ## _get(t)); }
+#define __flatbuffers_sort_union_field(N, NK, T, t)\
+{ T ## _sort(T ## _mutable_union_cast(N ## _ ## NK ## _union(t))); }
+#define __flatbuffers_sort_table_vector_field_elements(N, NK, T, t)\
+{ T ## _vec_t v__tmp = N ## _ ## NK ## _get(t); size_t i__tmp, n__tmp;\
+ n__tmp = T ## _vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\
+ T ## _sort((T ## _mutable_table_t)T ## _vec_at(v__tmp, i__tmp)); }}
+#define __flatbuffers_sort_union_vector_field_elements(N, NK, T, t)\
+{ T ## _union_vec_t v__tmp = N ## _ ## NK ## _union(t); size_t i__tmp, n__tmp;\
+ n__tmp = T ## _union_vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\
+ T ## _sort(T ## _mutable_union_cast(T ## _union_vec_at(v__tmp, i__tmp))); }}
+#define __flatbuffers_define_scalar_vector(N, T)\
+typedef const T *N ## _vec_t;\
+typedef T *N ## _mutable_vec_t;\
+__flatbuffers_define_scalar_vec_len(N)\
+__flatbuffers_define_scalar_vec_at(N, T)\
+__flatbuffers_define_scalar_find(N, T)\
+__flatbuffers_define_scalar_scan(N, T)\
+__flatbuffers_define_scalar_sort(N, T)
+
+#define __flatbuffers_define_integer_type(N, T, W)\
+__flatcc_define_integer_accessors(N, T, W, flatbuffers_endian)\
+__flatbuffers_define_scalar_vector(N, T)
+__flatbuffers_define_scalar_vector(flatbuffers_bool, flatbuffers_bool_t)
+__flatbuffers_define_scalar_vector(flatbuffers_char, char)
+__flatbuffers_define_scalar_vector(flatbuffers_uint8, uint8_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int8, int8_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint16, uint16_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int16, int16_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint32, uint32_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int32, int32_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint64, uint64_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int64, int64_t)
+__flatbuffers_define_scalar_vector(flatbuffers_float, float)
+__flatbuffers_define_scalar_vector(flatbuffers_double, double)
+__flatbuffers_define_scalar_vector(flatbuffers_union_type, flatbuffers_union_type_t)
+static inline size_t flatbuffers_string_vec_find(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_find_by_string_field(__flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_find_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_find_by_string_n_field(__flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_scan(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_scan_by_string_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_scan_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_scan_by_string_n_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_scan_ex(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s)
+__flatbuffers_scan_by_string_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_scan_ex_n(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s, size_t n)
+__flatbuffers_scan_by_string_n_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_rscan(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_rscan_by_string_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_rscan_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_rscan_by_string_n_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_rscan_ex(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s)
+__flatbuffers_rscan_by_string_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_rscan_ex_n(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s, size_t n)
+__flatbuffers_rscan_by_string_n_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+__flatbuffers_define_string_sort()
+#define __flatbuffers_define_struct_scalar_fixed_array_field(N, NK, TK, T, L)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0;\
+ return __flatbuffers_read_scalar(TK, &(t__tmp->NK[i__tmp])); }\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? t__tmp->NK : 0; }\
+static inline size_t N ## _ ## NK ## _get_len(void) { return L; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp, size_t i__tmp)\
+{ return N ## _ ## NK ## _get(t__tmp, i__tmp); }
+#define __flatbuffers_define_struct_struct_fixed_array_field(N, NK, T, L)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }static inline T N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? t__tmp->NK : 0; }\
+static inline size_t N ## _ ## NK ## _get_len(void) { return L; }\
+static inline T N ## _ ## NK(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }
+#define __flatbuffers_define_struct_scalar_field(N, NK, TK, T)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp)\
+{ return t__tmp ? __flatbuffers_read_scalar(TK, &(t__tmp->NK)) : 0; }\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? &(t__tmp->NK) : 0; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp)\
+{ return t__tmp ? __flatbuffers_read_scalar(TK, &(t__tmp->NK)) : 0; }\
+__flatbuffers_define_scan_by_scalar_field(N, NK, T)
+#define __flatbuffers_define_struct_struct_field(N, NK, T)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }
+/* If fid is null, the function returns true without testing as buffer is not expected to have any id. */
+static inline int flatbuffers_has_identifier(const void *buffer, const char *fid)
+{ flatbuffers_thash_t id, id2 = 0; if (fid == 0) { return 1; };
+ id2 = flatbuffers_type_hash_from_string(fid);
+ id = __flatbuffers_thash_read_from_pe(((flatbuffers_uoffset_t *)buffer) + 1);
+ return id2 == 0 || id == id2; }
+static inline int flatbuffers_has_type_hash(const void *buffer, flatbuffers_thash_t thash)
+{ return thash == 0 || (__flatbuffers_thash_read_from_pe((flatbuffers_uoffset_t *)buffer + 1) == thash); }
+
+static inline flatbuffers_thash_t flatbuffers_get_type_hash(const void *buffer)
+{ return __flatbuffers_thash_read_from_pe((flatbuffers_uoffset_t *)buffer + 1); }
+
+#define flatbuffers_verify_endian() flatbuffers_has_identifier("\x00\x00\x00\x00" "1234", "1234")
+static inline void *flatbuffers_read_size_prefix(void *b, size_t *size_out)
+{ if (size_out) { *size_out = (size_t)__flatbuffers_uoffset_read_from_pe(b); }
+ return (uint8_t *)b + sizeof(flatbuffers_uoffset_t); }
+/* Null file identifier accepts anything, otherwise fid should be 4 characters. */
+#define __flatbuffers_read_root(T, K, buffer, fid)\
+ ((!buffer || !flatbuffers_has_identifier(buffer, fid)) ? 0 :\
+ ((T ## _ ## K ## t)(((uint8_t *)buffer) +\
+ __flatbuffers_uoffset_read_from_pe(buffer))))
+#define __flatbuffers_read_typed_root(T, K, buffer, thash)\
+ ((!buffer || !flatbuffers_has_type_hash(buffer, thash)) ? 0 :\
+ ((T ## _ ## K ## t)(((uint8_t *)buffer) +\
+ __flatbuffers_uoffset_read_from_pe(buffer))))
+#define __flatbuffers_nested_buffer_as_root(C, N, T, K)\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_root_with_identifier(C ## _ ## table_t t__tmp, const char *fid__tmp)\
+{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, fid__tmp); }\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_typed_root(C ## _ ## table_t t__tmp)\
+{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, C ## _ ## type_identifier); }\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_root(C ## _ ## table_t t__tmp)\
+{ const char *fid__tmp = T ## _file_identifier;\
+ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, fid__tmp); }
+#define __flatbuffers_buffer_as_root(N, K)\
+static inline N ## _ ## K ## t N ## _as_root_with_identifier(const void *buffer__tmp, const char *fid__tmp)\
+{ return __flatbuffers_read_root(N, K, buffer__tmp, fid__tmp); }\
+static inline N ## _ ## K ## t N ## _as_root_with_type_hash(const void *buffer__tmp, flatbuffers_thash_t thash__tmp)\
+{ return __flatbuffers_read_typed_root(N, K, buffer__tmp, thash__tmp); }\
+static inline N ## _ ## K ## t N ## _as_root(const void *buffer__tmp)\
+{ const char *fid__tmp = N ## _file_identifier;\
+ return __flatbuffers_read_root(N, K, buffer__tmp, fid__tmp); }\
+static inline N ## _ ## K ## t N ## _as_typed_root(const void *buffer__tmp)\
+{ return __flatbuffers_read_typed_root(N, K, buffer__tmp, N ## _type_hash); }
+#define __flatbuffers_struct_as_root(N) __flatbuffers_buffer_as_root(N, struct_)
+#define __flatbuffers_table_as_root(N) __flatbuffers_buffer_as_root(N, table_)
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* FLATBUFFERS_COMMON_H */
diff --git a/include/flatcc/reflection/reflection_builder.h b/include/flatcc/reflection/reflection_builder.h
new file mode 100644
index 0000000..65aef73
--- /dev/null
+++ b/include/flatcc/reflection/reflection_builder.h
@@ -0,0 +1,457 @@
+#ifndef REFLECTION_BUILDER_H
+#define REFLECTION_BUILDER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef REFLECTION_READER_H
+#include "reflection_reader.h"
+#endif
+#ifndef FLATBUFFERS_COMMON_BUILDER_H
+#include "flatbuffers_common_builder.h"
+#endif
+#include "flatcc/flatcc_prologue.h"
+#undef flatbuffers_identifier
+#define flatbuffers_identifier "BFBS"
+#undef flatbuffers_extension
+#define flatbuffers_extension "bfbs"
+
+#define __reflection_BaseType_formal_args , reflection_BaseType_enum_t v0
+#define __reflection_BaseType_call_args , v0
+__flatbuffers_build_scalar(flatbuffers_, reflection_BaseType, reflection_BaseType_enum_t)
+
+static const flatbuffers_voffset_t __reflection_Type_required[] = { 0 };
+typedef flatbuffers_ref_t reflection_Type_ref_t;
+static reflection_Type_ref_t reflection_Type_clone(flatbuffers_builder_t *B, reflection_Type_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Type, 4)
+
+static const flatbuffers_voffset_t __reflection_KeyValue_required[] = { 0, 0 };
+typedef flatbuffers_ref_t reflection_KeyValue_ref_t;
+static reflection_KeyValue_ref_t reflection_KeyValue_clone(flatbuffers_builder_t *B, reflection_KeyValue_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_KeyValue, 2)
+
+static const flatbuffers_voffset_t __reflection_EnumVal_required[] = { 0, 0 };
+typedef flatbuffers_ref_t reflection_EnumVal_ref_t;
+static reflection_EnumVal_ref_t reflection_EnumVal_clone(flatbuffers_builder_t *B, reflection_EnumVal_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_EnumVal, 5)
+
+static const flatbuffers_voffset_t __reflection_Enum_required[] = { 0, 1, 3, 0 };
+typedef flatbuffers_ref_t reflection_Enum_ref_t;
+static reflection_Enum_ref_t reflection_Enum_clone(flatbuffers_builder_t *B, reflection_Enum_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Enum, 6)
+
+static const flatbuffers_voffset_t __reflection_Field_required[] = { 0, 1, 0 };
+typedef flatbuffers_ref_t reflection_Field_ref_t;
+static reflection_Field_ref_t reflection_Field_clone(flatbuffers_builder_t *B, reflection_Field_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Field, 12)
+
+static const flatbuffers_voffset_t __reflection_Object_required[] = { 0, 1, 0 };
+typedef flatbuffers_ref_t reflection_Object_ref_t;
+static reflection_Object_ref_t reflection_Object_clone(flatbuffers_builder_t *B, reflection_Object_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Object, 7)
+
+static const flatbuffers_voffset_t __reflection_RPCCall_required[] = { 0, 1, 2, 0 };
+typedef flatbuffers_ref_t reflection_RPCCall_ref_t;
+static reflection_RPCCall_ref_t reflection_RPCCall_clone(flatbuffers_builder_t *B, reflection_RPCCall_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_RPCCall, 5)
+
+static const flatbuffers_voffset_t __reflection_Service_required[] = { 0, 0 };
+typedef flatbuffers_ref_t reflection_Service_ref_t;
+static reflection_Service_ref_t reflection_Service_clone(flatbuffers_builder_t *B, reflection_Service_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Service, 4)
+
+static const flatbuffers_voffset_t __reflection_Schema_required[] = { 0, 1, 0 };
+typedef flatbuffers_ref_t reflection_Schema_ref_t;
+static reflection_Schema_ref_t reflection_Schema_clone(flatbuffers_builder_t *B, reflection_Schema_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Schema, 6)
+
+#define __reflection_Type_formal_args , reflection_BaseType_enum_t v0, reflection_BaseType_enum_t v1, int32_t v2, uint16_t v3
+#define __reflection_Type_call_args , v0, v1, v2, v3
+static inline reflection_Type_ref_t reflection_Type_create(flatbuffers_builder_t *B __reflection_Type_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Type, reflection_Type_file_identifier, reflection_Type_type_identifier)
+
+#define __reflection_KeyValue_formal_args , flatbuffers_string_ref_t v0, flatbuffers_string_ref_t v1
+#define __reflection_KeyValue_call_args , v0, v1
+static inline reflection_KeyValue_ref_t reflection_KeyValue_create(flatbuffers_builder_t *B __reflection_KeyValue_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_KeyValue, reflection_KeyValue_file_identifier, reflection_KeyValue_type_identifier)
+
+#define __reflection_EnumVal_formal_args ,\
+ flatbuffers_string_ref_t v0, int64_t v1, reflection_Object_ref_t v2, reflection_Type_ref_t v3, flatbuffers_string_vec_ref_t v4
+#define __reflection_EnumVal_call_args ,\
+ v0, v1, v2, v3, v4
+static inline reflection_EnumVal_ref_t reflection_EnumVal_create(flatbuffers_builder_t *B __reflection_EnumVal_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_EnumVal, reflection_EnumVal_file_identifier, reflection_EnumVal_type_identifier)
+
+#define __reflection_Enum_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_EnumVal_vec_ref_t v1, flatbuffers_bool_t v2, reflection_Type_ref_t v3, reflection_KeyValue_vec_ref_t v4, flatbuffers_string_vec_ref_t v5
+#define __reflection_Enum_call_args ,\
+ v0, v1, v2, v3, v4, v5
+static inline reflection_Enum_ref_t reflection_Enum_create(flatbuffers_builder_t *B __reflection_Enum_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Enum, reflection_Enum_file_identifier, reflection_Enum_type_identifier)
+
+#define __reflection_Field_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_Type_ref_t v1, uint16_t v2, uint16_t v3,\
+ int64_t v4, double v5, flatbuffers_bool_t v6, flatbuffers_bool_t v7,\
+ flatbuffers_bool_t v8, reflection_KeyValue_vec_ref_t v9, flatbuffers_string_vec_ref_t v10, flatbuffers_bool_t v11
+#define __reflection_Field_call_args ,\
+ v0, v1, v2, v3,\
+ v4, v5, v6, v7,\
+ v8, v9, v10, v11
+static inline reflection_Field_ref_t reflection_Field_create(flatbuffers_builder_t *B __reflection_Field_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Field, reflection_Field_file_identifier, reflection_Field_type_identifier)
+
+#define __reflection_Object_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_Field_vec_ref_t v1, flatbuffers_bool_t v2, int32_t v3,\
+ int32_t v4, reflection_KeyValue_vec_ref_t v5, flatbuffers_string_vec_ref_t v6
+#define __reflection_Object_call_args ,\
+ v0, v1, v2, v3,\
+ v4, v5, v6
+static inline reflection_Object_ref_t reflection_Object_create(flatbuffers_builder_t *B __reflection_Object_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Object, reflection_Object_file_identifier, reflection_Object_type_identifier)
+
+#define __reflection_RPCCall_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_Object_ref_t v1, reflection_Object_ref_t v2, reflection_KeyValue_vec_ref_t v3, flatbuffers_string_vec_ref_t v4
+#define __reflection_RPCCall_call_args ,\
+ v0, v1, v2, v3, v4
+static inline reflection_RPCCall_ref_t reflection_RPCCall_create(flatbuffers_builder_t *B __reflection_RPCCall_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_RPCCall, reflection_RPCCall_file_identifier, reflection_RPCCall_type_identifier)
+
+#define __reflection_Service_formal_args , flatbuffers_string_ref_t v0, reflection_RPCCall_vec_ref_t v1, reflection_KeyValue_vec_ref_t v2, flatbuffers_string_vec_ref_t v3
+#define __reflection_Service_call_args , v0, v1, v2, v3
+static inline reflection_Service_ref_t reflection_Service_create(flatbuffers_builder_t *B __reflection_Service_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Service, reflection_Service_file_identifier, reflection_Service_type_identifier)
+
+#define __reflection_Schema_formal_args ,\
+ reflection_Object_vec_ref_t v0, reflection_Enum_vec_ref_t v1, flatbuffers_string_ref_t v2, flatbuffers_string_ref_t v3, reflection_Object_ref_t v4, reflection_Service_vec_ref_t v5
+#define __reflection_Schema_call_args ,\
+ v0, v1, v2, v3, v4, v5
+static inline reflection_Schema_ref_t reflection_Schema_create(flatbuffers_builder_t *B __reflection_Schema_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Schema, reflection_Schema_file_identifier, reflection_Schema_type_identifier)
+
+__flatbuffers_build_scalar_field(0, flatbuffers_, reflection_Type_base_type, reflection_BaseType, reflection_BaseType_enum_t, 1, 1, INT8_C(0), reflection_Type)
+__flatbuffers_build_scalar_field(1, flatbuffers_, reflection_Type_element, reflection_BaseType, reflection_BaseType_enum_t, 1, 1, INT8_C(0), reflection_Type)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Type_index, flatbuffers_int32, int32_t, 4, 4, INT32_C(-1), reflection_Type)
+__flatbuffers_build_scalar_field(3, flatbuffers_, reflection_Type_fixed_length, flatbuffers_uint16, uint16_t, 2, 2, UINT16_C(0), reflection_Type)
+
+static inline reflection_Type_ref_t reflection_Type_create(flatbuffers_builder_t *B __reflection_Type_formal_args)
+{
+ if (reflection_Type_start(B)
+ || reflection_Type_index_add(B, v2)
+ || reflection_Type_fixed_length_add(B, v3)
+ || reflection_Type_base_type_add(B, v0)
+ || reflection_Type_element_add(B, v1)) {
+ return 0;
+ }
+ return reflection_Type_end(B);
+}
+
+static reflection_Type_ref_t reflection_Type_clone(flatbuffers_builder_t *B, reflection_Type_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Type_start(B)
+ || reflection_Type_index_pick(B, t)
+ || reflection_Type_fixed_length_pick(B, t)
+ || reflection_Type_base_type_pick(B, t)
+ || reflection_Type_element_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Type_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_KeyValue_key, reflection_KeyValue)
+__flatbuffers_build_string_field(1, flatbuffers_, reflection_KeyValue_value, reflection_KeyValue)
+
+static inline reflection_KeyValue_ref_t reflection_KeyValue_create(flatbuffers_builder_t *B __reflection_KeyValue_formal_args)
+{
+ if (reflection_KeyValue_start(B)
+ || reflection_KeyValue_key_add(B, v0)
+ || reflection_KeyValue_value_add(B, v1)) {
+ return 0;
+ }
+ return reflection_KeyValue_end(B);
+}
+
+static reflection_KeyValue_ref_t reflection_KeyValue_clone(flatbuffers_builder_t *B, reflection_KeyValue_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_KeyValue_start(B)
+ || reflection_KeyValue_key_pick(B, t)
+ || reflection_KeyValue_value_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_KeyValue_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_EnumVal_name, reflection_EnumVal)
+__flatbuffers_build_scalar_field(1, flatbuffers_, reflection_EnumVal_value, flatbuffers_int64, int64_t, 8, 8, INT64_C(0), reflection_EnumVal)
+__flatbuffers_build_table_field(2, flatbuffers_, reflection_EnumVal_object, reflection_Object, reflection_EnumVal)
+__flatbuffers_build_table_field(3, flatbuffers_, reflection_EnumVal_union_type, reflection_Type, reflection_EnumVal)
+__flatbuffers_build_string_vector_field(4, flatbuffers_, reflection_EnumVal_documentation, reflection_EnumVal)
+
+static inline reflection_EnumVal_ref_t reflection_EnumVal_create(flatbuffers_builder_t *B __reflection_EnumVal_formal_args)
+{
+ if (reflection_EnumVal_start(B)
+ || reflection_EnumVal_value_add(B, v1)
+ || reflection_EnumVal_name_add(B, v0)
+ || reflection_EnumVal_object_add(B, v2)
+ || reflection_EnumVal_union_type_add(B, v3)
+ || reflection_EnumVal_documentation_add(B, v4)) {
+ return 0;
+ }
+ return reflection_EnumVal_end(B);
+}
+
+static reflection_EnumVal_ref_t reflection_EnumVal_clone(flatbuffers_builder_t *B, reflection_EnumVal_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_EnumVal_start(B)
+ || reflection_EnumVal_value_pick(B, t)
+ || reflection_EnumVal_name_pick(B, t)
+ || reflection_EnumVal_object_pick(B, t)
+ || reflection_EnumVal_union_type_pick(B, t)
+ || reflection_EnumVal_documentation_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_EnumVal_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Enum_name, reflection_Enum)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Enum_values, reflection_EnumVal, reflection_Enum)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Enum_is_union, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Enum)
+__flatbuffers_build_table_field(3, flatbuffers_, reflection_Enum_underlying_type, reflection_Type, reflection_Enum)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(4, flatbuffers_, reflection_Enum_attributes, reflection_KeyValue, reflection_Enum)
+__flatbuffers_build_string_vector_field(5, flatbuffers_, reflection_Enum_documentation, reflection_Enum)
+
+static inline reflection_Enum_ref_t reflection_Enum_create(flatbuffers_builder_t *B __reflection_Enum_formal_args)
+{
+ if (reflection_Enum_start(B)
+ || reflection_Enum_name_add(B, v0)
+ || reflection_Enum_values_add(B, v1)
+ || reflection_Enum_underlying_type_add(B, v3)
+ || reflection_Enum_attributes_add(B, v4)
+ || reflection_Enum_documentation_add(B, v5)
+ || reflection_Enum_is_union_add(B, v2)) {
+ return 0;
+ }
+ return reflection_Enum_end(B);
+}
+
+static reflection_Enum_ref_t reflection_Enum_clone(flatbuffers_builder_t *B, reflection_Enum_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Enum_start(B)
+ || reflection_Enum_name_pick(B, t)
+ || reflection_Enum_values_pick(B, t)
+ || reflection_Enum_underlying_type_pick(B, t)
+ || reflection_Enum_attributes_pick(B, t)
+ || reflection_Enum_documentation_pick(B, t)
+ || reflection_Enum_is_union_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Enum_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Field_name, reflection_Field)
+__flatbuffers_build_table_field(1, flatbuffers_, reflection_Field_type, reflection_Type, reflection_Field)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Field_id, flatbuffers_uint16, uint16_t, 2, 2, UINT16_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(3, flatbuffers_, reflection_Field_offset, flatbuffers_uint16, uint16_t, 2, 2, UINT16_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(4, flatbuffers_, reflection_Field_default_integer, flatbuffers_int64, int64_t, 8, 8, INT64_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(5, flatbuffers_, reflection_Field_default_real, flatbuffers_double, double, 8, 8, 0.0000000000000000, reflection_Field)
+__flatbuffers_build_scalar_field(6, flatbuffers_, reflection_Field_deprecated, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(7, flatbuffers_, reflection_Field_required, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(8, flatbuffers_, reflection_Field_key, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(9, flatbuffers_, reflection_Field_attributes, reflection_KeyValue, reflection_Field)
+__flatbuffers_build_string_vector_field(10, flatbuffers_, reflection_Field_documentation, reflection_Field)
+__flatbuffers_build_scalar_field(11, flatbuffers_, reflection_Field_optional, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+
+static inline reflection_Field_ref_t reflection_Field_create(flatbuffers_builder_t *B __reflection_Field_formal_args)
+{
+ if (reflection_Field_start(B)
+ || reflection_Field_default_integer_add(B, v4)
+ || reflection_Field_default_real_add(B, v5)
+ || reflection_Field_name_add(B, v0)
+ || reflection_Field_type_add(B, v1)
+ || reflection_Field_attributes_add(B, v9)
+ || reflection_Field_documentation_add(B, v10)
+ || reflection_Field_id_add(B, v2)
+ || reflection_Field_offset_add(B, v3)
+ || reflection_Field_deprecated_add(B, v6)
+ || reflection_Field_required_add(B, v7)
+ || reflection_Field_key_add(B, v8)
+ || reflection_Field_optional_add(B, v11)) {
+ return 0;
+ }
+ return reflection_Field_end(B);
+}
+
+static reflection_Field_ref_t reflection_Field_clone(flatbuffers_builder_t *B, reflection_Field_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Field_start(B)
+ || reflection_Field_default_integer_pick(B, t)
+ || reflection_Field_default_real_pick(B, t)
+ || reflection_Field_name_pick(B, t)
+ || reflection_Field_type_pick(B, t)
+ || reflection_Field_attributes_pick(B, t)
+ || reflection_Field_documentation_pick(B, t)
+ || reflection_Field_id_pick(B, t)
+ || reflection_Field_offset_pick(B, t)
+ || reflection_Field_deprecated_pick(B, t)
+ || reflection_Field_required_pick(B, t)
+ || reflection_Field_key_pick(B, t)
+ || reflection_Field_optional_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Field_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Object_name, reflection_Object)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Object_fields, reflection_Field, reflection_Object)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Object_is_struct, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Object)
+__flatbuffers_build_scalar_field(3, flatbuffers_, reflection_Object_minalign, flatbuffers_int32, int32_t, 4, 4, INT32_C(0), reflection_Object)
+__flatbuffers_build_scalar_field(4, flatbuffers_, reflection_Object_bytesize, flatbuffers_int32, int32_t, 4, 4, INT32_C(0), reflection_Object)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(5, flatbuffers_, reflection_Object_attributes, reflection_KeyValue, reflection_Object)
+__flatbuffers_build_string_vector_field(6, flatbuffers_, reflection_Object_documentation, reflection_Object)
+
+static inline reflection_Object_ref_t reflection_Object_create(flatbuffers_builder_t *B __reflection_Object_formal_args)
+{
+ if (reflection_Object_start(B)
+ || reflection_Object_name_add(B, v0)
+ || reflection_Object_fields_add(B, v1)
+ || reflection_Object_minalign_add(B, v3)
+ || reflection_Object_bytesize_add(B, v4)
+ || reflection_Object_attributes_add(B, v5)
+ || reflection_Object_documentation_add(B, v6)
+ || reflection_Object_is_struct_add(B, v2)) {
+ return 0;
+ }
+ return reflection_Object_end(B);
+}
+
+static reflection_Object_ref_t reflection_Object_clone(flatbuffers_builder_t *B, reflection_Object_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Object_start(B)
+ || reflection_Object_name_pick(B, t)
+ || reflection_Object_fields_pick(B, t)
+ || reflection_Object_minalign_pick(B, t)
+ || reflection_Object_bytesize_pick(B, t)
+ || reflection_Object_attributes_pick(B, t)
+ || reflection_Object_documentation_pick(B, t)
+ || reflection_Object_is_struct_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Object_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_RPCCall_name, reflection_RPCCall)
+__flatbuffers_build_table_field(1, flatbuffers_, reflection_RPCCall_request, reflection_Object, reflection_RPCCall)
+__flatbuffers_build_table_field(2, flatbuffers_, reflection_RPCCall_response, reflection_Object, reflection_RPCCall)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(3, flatbuffers_, reflection_RPCCall_attributes, reflection_KeyValue, reflection_RPCCall)
+__flatbuffers_build_string_vector_field(4, flatbuffers_, reflection_RPCCall_documentation, reflection_RPCCall)
+
+static inline reflection_RPCCall_ref_t reflection_RPCCall_create(flatbuffers_builder_t *B __reflection_RPCCall_formal_args)
+{
+ if (reflection_RPCCall_start(B)
+ || reflection_RPCCall_name_add(B, v0)
+ || reflection_RPCCall_request_add(B, v1)
+ || reflection_RPCCall_response_add(B, v2)
+ || reflection_RPCCall_attributes_add(B, v3)
+ || reflection_RPCCall_documentation_add(B, v4)) {
+ return 0;
+ }
+ return reflection_RPCCall_end(B);
+}
+
+static reflection_RPCCall_ref_t reflection_RPCCall_clone(flatbuffers_builder_t *B, reflection_RPCCall_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_RPCCall_start(B)
+ || reflection_RPCCall_name_pick(B, t)
+ || reflection_RPCCall_request_pick(B, t)
+ || reflection_RPCCall_response_pick(B, t)
+ || reflection_RPCCall_attributes_pick(B, t)
+ || reflection_RPCCall_documentation_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_RPCCall_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Service_name, reflection_Service)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Service_calls, reflection_RPCCall, reflection_Service)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(2, flatbuffers_, reflection_Service_attributes, reflection_KeyValue, reflection_Service)
+__flatbuffers_build_string_vector_field(3, flatbuffers_, reflection_Service_documentation, reflection_Service)
+
+static inline reflection_Service_ref_t reflection_Service_create(flatbuffers_builder_t *B __reflection_Service_formal_args)
+{
+ if (reflection_Service_start(B)
+ || reflection_Service_name_add(B, v0)
+ || reflection_Service_calls_add(B, v1)
+ || reflection_Service_attributes_add(B, v2)
+ || reflection_Service_documentation_add(B, v3)) {
+ return 0;
+ }
+ return reflection_Service_end(B);
+}
+
+static reflection_Service_ref_t reflection_Service_clone(flatbuffers_builder_t *B, reflection_Service_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Service_start(B)
+ || reflection_Service_name_pick(B, t)
+ || reflection_Service_calls_pick(B, t)
+ || reflection_Service_attributes_pick(B, t)
+ || reflection_Service_documentation_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Service_end(B));
+}
+
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(0, flatbuffers_, reflection_Schema_objects, reflection_Object, reflection_Schema)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Schema_enums, reflection_Enum, reflection_Schema)
+__flatbuffers_build_string_field(2, flatbuffers_, reflection_Schema_file_ident, reflection_Schema)
+__flatbuffers_build_string_field(3, flatbuffers_, reflection_Schema_file_ext, reflection_Schema)
+__flatbuffers_build_table_field(4, flatbuffers_, reflection_Schema_root_table, reflection_Object, reflection_Schema)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(5, flatbuffers_, reflection_Schema_services, reflection_Service, reflection_Schema)
+
+static inline reflection_Schema_ref_t reflection_Schema_create(flatbuffers_builder_t *B __reflection_Schema_formal_args)
+{
+ if (reflection_Schema_start(B)
+ || reflection_Schema_objects_add(B, v0)
+ || reflection_Schema_enums_add(B, v1)
+ || reflection_Schema_file_ident_add(B, v2)
+ || reflection_Schema_file_ext_add(B, v3)
+ || reflection_Schema_root_table_add(B, v4)
+ || reflection_Schema_services_add(B, v5)) {
+ return 0;
+ }
+ return reflection_Schema_end(B);
+}
+
+static reflection_Schema_ref_t reflection_Schema_clone(flatbuffers_builder_t *B, reflection_Schema_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Schema_start(B)
+ || reflection_Schema_objects_pick(B, t)
+ || reflection_Schema_enums_pick(B, t)
+ || reflection_Schema_file_ident_pick(B, t)
+ || reflection_Schema_file_ext_pick(B, t)
+ || reflection_Schema_root_table_pick(B, t)
+ || reflection_Schema_services_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Schema_end(B));
+}
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* REFLECTION_BUILDER_H */
diff --git a/include/flatcc/reflection/reflection_reader.h b/include/flatcc/reflection/reflection_reader.h
new file mode 100644
index 0000000..bf6a0e9
--- /dev/null
+++ b/include/flatcc/reflection/reflection_reader.h
@@ -0,0 +1,411 @@
+#ifndef REFLECTION_READER_H
+#define REFLECTION_READER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef FLATBUFFERS_COMMON_READER_H
+#include "flatbuffers_common_reader.h"
+#endif
+#include "flatcc/flatcc_flatbuffers.h"
+#ifndef __alignas_is_defined
+#include <stdalign.h>
+#endif
+#include "flatcc/flatcc_prologue.h"
+#undef flatbuffers_identifier
+#define flatbuffers_identifier "BFBS"
+#undef flatbuffers_extension
+#define flatbuffers_extension "bfbs"
+
+
+typedef const struct reflection_Type_table *reflection_Type_table_t;
+typedef struct reflection_Type_table *reflection_Type_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Type_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Type_mutable_vec_t;
+typedef const struct reflection_KeyValue_table *reflection_KeyValue_table_t;
+typedef struct reflection_KeyValue_table *reflection_KeyValue_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_KeyValue_vec_t;
+typedef flatbuffers_uoffset_t *reflection_KeyValue_mutable_vec_t;
+typedef const struct reflection_EnumVal_table *reflection_EnumVal_table_t;
+typedef struct reflection_EnumVal_table *reflection_EnumVal_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_EnumVal_vec_t;
+typedef flatbuffers_uoffset_t *reflection_EnumVal_mutable_vec_t;
+typedef const struct reflection_Enum_table *reflection_Enum_table_t;
+typedef struct reflection_Enum_table *reflection_Enum_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Enum_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Enum_mutable_vec_t;
+typedef const struct reflection_Field_table *reflection_Field_table_t;
+typedef struct reflection_Field_table *reflection_Field_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Field_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Field_mutable_vec_t;
+typedef const struct reflection_Object_table *reflection_Object_table_t;
+typedef struct reflection_Object_table *reflection_Object_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Object_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Object_mutable_vec_t;
+typedef const struct reflection_RPCCall_table *reflection_RPCCall_table_t;
+typedef struct reflection_RPCCall_table *reflection_RPCCall_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_RPCCall_vec_t;
+typedef flatbuffers_uoffset_t *reflection_RPCCall_mutable_vec_t;
+typedef const struct reflection_Service_table *reflection_Service_table_t;
+typedef struct reflection_Service_table *reflection_Service_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Service_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Service_mutable_vec_t;
+typedef const struct reflection_Schema_table *reflection_Schema_table_t;
+typedef struct reflection_Schema_table *reflection_Schema_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Schema_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Schema_mutable_vec_t;
+#ifndef reflection_Type_file_identifier
+#define reflection_Type_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Type_file_identifier */
+#ifndef reflection_Type_identifier
+#define reflection_Type_identifier "BFBS"
+#endif
+#define reflection_Type_type_hash ((flatbuffers_thash_t)0x44c8fe5e)
+#define reflection_Type_type_identifier "\x5e\xfe\xc8\x44"
+#ifndef reflection_Type_file_extension
+#define reflection_Type_file_extension "bfbs"
+#endif
+#ifndef reflection_KeyValue_file_identifier
+#define reflection_KeyValue_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_KeyValue_file_identifier */
+#ifndef reflection_KeyValue_identifier
+#define reflection_KeyValue_identifier "BFBS"
+#endif
+#define reflection_KeyValue_type_hash ((flatbuffers_thash_t)0x8c761eaa)
+#define reflection_KeyValue_type_identifier "\xaa\x1e\x76\x8c"
+#ifndef reflection_KeyValue_file_extension
+#define reflection_KeyValue_file_extension "bfbs"
+#endif
+#ifndef reflection_EnumVal_file_identifier
+#define reflection_EnumVal_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_EnumVal_file_identifier */
+#ifndef reflection_EnumVal_identifier
+#define reflection_EnumVal_identifier "BFBS"
+#endif
+#define reflection_EnumVal_type_hash ((flatbuffers_thash_t)0x9531c946)
+#define reflection_EnumVal_type_identifier "\x46\xc9\x31\x95"
+#ifndef reflection_EnumVal_file_extension
+#define reflection_EnumVal_file_extension "bfbs"
+#endif
+#ifndef reflection_Enum_file_identifier
+#define reflection_Enum_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Enum_file_identifier */
+#ifndef reflection_Enum_identifier
+#define reflection_Enum_identifier "BFBS"
+#endif
+#define reflection_Enum_type_hash ((flatbuffers_thash_t)0xacffa90f)
+#define reflection_Enum_type_identifier "\x0f\xa9\xff\xac"
+#ifndef reflection_Enum_file_extension
+#define reflection_Enum_file_extension "bfbs"
+#endif
+#ifndef reflection_Field_file_identifier
+#define reflection_Field_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Field_file_identifier */
+#ifndef reflection_Field_identifier
+#define reflection_Field_identifier "BFBS"
+#endif
+#define reflection_Field_type_hash ((flatbuffers_thash_t)0x9f7e408a)
+#define reflection_Field_type_identifier "\x8a\x40\x7e\x9f"
+#ifndef reflection_Field_file_extension
+#define reflection_Field_file_extension "bfbs"
+#endif
+#ifndef reflection_Object_file_identifier
+#define reflection_Object_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Object_file_identifier */
+#ifndef reflection_Object_identifier
+#define reflection_Object_identifier "BFBS"
+#endif
+#define reflection_Object_type_hash ((flatbuffers_thash_t)0xb09729bd)
+#define reflection_Object_type_identifier "\xbd\x29\x97\xb0"
+#ifndef reflection_Object_file_extension
+#define reflection_Object_file_extension "bfbs"
+#endif
+#ifndef reflection_RPCCall_file_identifier
+#define reflection_RPCCall_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_RPCCall_file_identifier */
+#ifndef reflection_RPCCall_identifier
+#define reflection_RPCCall_identifier "BFBS"
+#endif
+#define reflection_RPCCall_type_hash ((flatbuffers_thash_t)0xe2d586f1)
+#define reflection_RPCCall_type_identifier "\xf1\x86\xd5\xe2"
+#ifndef reflection_RPCCall_file_extension
+#define reflection_RPCCall_file_extension "bfbs"
+#endif
+#ifndef reflection_Service_file_identifier
+#define reflection_Service_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Service_file_identifier */
+#ifndef reflection_Service_identifier
+#define reflection_Service_identifier "BFBS"
+#endif
+#define reflection_Service_type_hash ((flatbuffers_thash_t)0xf31a13b5)
+#define reflection_Service_type_identifier "\xb5\x13\x1a\xf3"
+#ifndef reflection_Service_file_extension
+#define reflection_Service_file_extension "bfbs"
+#endif
+#ifndef reflection_Schema_file_identifier
+#define reflection_Schema_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Schema_file_identifier */
+#ifndef reflection_Schema_identifier
+#define reflection_Schema_identifier "BFBS"
+#endif
+#define reflection_Schema_type_hash ((flatbuffers_thash_t)0xfaf93779)
+#define reflection_Schema_type_identifier "\x79\x37\xf9\xfa"
+#ifndef reflection_Schema_file_extension
+#define reflection_Schema_file_extension "bfbs"
+#endif
+
+typedef int8_t reflection_BaseType_enum_t;
+__flatbuffers_define_integer_type(reflection_BaseType, reflection_BaseType_enum_t, 8)
+#define reflection_BaseType_None ((reflection_BaseType_enum_t)INT8_C(0))
+#define reflection_BaseType_UType ((reflection_BaseType_enum_t)INT8_C(1))
+#define reflection_BaseType_Bool ((reflection_BaseType_enum_t)INT8_C(2))
+#define reflection_BaseType_Byte ((reflection_BaseType_enum_t)INT8_C(3))
+#define reflection_BaseType_UByte ((reflection_BaseType_enum_t)INT8_C(4))
+#define reflection_BaseType_Short ((reflection_BaseType_enum_t)INT8_C(5))
+#define reflection_BaseType_UShort ((reflection_BaseType_enum_t)INT8_C(6))
+#define reflection_BaseType_Int ((reflection_BaseType_enum_t)INT8_C(7))
+#define reflection_BaseType_UInt ((reflection_BaseType_enum_t)INT8_C(8))
+#define reflection_BaseType_Long ((reflection_BaseType_enum_t)INT8_C(9))
+#define reflection_BaseType_ULong ((reflection_BaseType_enum_t)INT8_C(10))
+#define reflection_BaseType_Float ((reflection_BaseType_enum_t)INT8_C(11))
+#define reflection_BaseType_Double ((reflection_BaseType_enum_t)INT8_C(12))
+#define reflection_BaseType_String ((reflection_BaseType_enum_t)INT8_C(13))
+#define reflection_BaseType_Vector ((reflection_BaseType_enum_t)INT8_C(14))
+#define reflection_BaseType_Obj ((reflection_BaseType_enum_t)INT8_C(15))
+#define reflection_BaseType_Union ((reflection_BaseType_enum_t)INT8_C(16))
+#define reflection_BaseType_Array ((reflection_BaseType_enum_t)INT8_C(17))
+#define reflection_BaseType_MaxBaseType ((reflection_BaseType_enum_t)INT8_C(18))
+
+static inline const char *reflection_BaseType_name(reflection_BaseType_enum_t value)
+{
+ switch (value) {
+ case reflection_BaseType_None: return "None";
+ case reflection_BaseType_UType: return "UType";
+ case reflection_BaseType_Bool: return "Bool";
+ case reflection_BaseType_Byte: return "Byte";
+ case reflection_BaseType_UByte: return "UByte";
+ case reflection_BaseType_Short: return "Short";
+ case reflection_BaseType_UShort: return "UShort";
+ case reflection_BaseType_Int: return "Int";
+ case reflection_BaseType_UInt: return "UInt";
+ case reflection_BaseType_Long: return "Long";
+ case reflection_BaseType_ULong: return "ULong";
+ case reflection_BaseType_Float: return "Float";
+ case reflection_BaseType_Double: return "Double";
+ case reflection_BaseType_String: return "String";
+ case reflection_BaseType_Vector: return "Vector";
+ case reflection_BaseType_Obj: return "Obj";
+ case reflection_BaseType_Union: return "Union";
+ case reflection_BaseType_Array: return "Array";
+ case reflection_BaseType_MaxBaseType: return "MaxBaseType";
+ default: return "";
+ }
+}
+
+static inline int reflection_BaseType_is_known_value(reflection_BaseType_enum_t value)
+{
+ switch (value) {
+ case reflection_BaseType_None: return 1;
+ case reflection_BaseType_UType: return 1;
+ case reflection_BaseType_Bool: return 1;
+ case reflection_BaseType_Byte: return 1;
+ case reflection_BaseType_UByte: return 1;
+ case reflection_BaseType_Short: return 1;
+ case reflection_BaseType_UShort: return 1;
+ case reflection_BaseType_Int: return 1;
+ case reflection_BaseType_UInt: return 1;
+ case reflection_BaseType_Long: return 1;
+ case reflection_BaseType_ULong: return 1;
+ case reflection_BaseType_Float: return 1;
+ case reflection_BaseType_Double: return 1;
+ case reflection_BaseType_String: return 1;
+ case reflection_BaseType_Vector: return 1;
+ case reflection_BaseType_Obj: return 1;
+ case reflection_BaseType_Union: return 1;
+ case reflection_BaseType_Array: return 1;
+ case reflection_BaseType_MaxBaseType: return 1;
+ default: return 0;
+ }
+}
+
+
+
+struct reflection_Type_table { uint8_t unused__; };
+
+static inline size_t reflection_Type_vec_len(reflection_Type_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Type_table_t reflection_Type_vec_at(reflection_Type_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Type_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Type)
+
+__flatbuffers_define_scalar_field(0, reflection_Type, base_type, reflection_BaseType, reflection_BaseType_enum_t, INT8_C(0))
+__flatbuffers_define_scalar_field(1, reflection_Type, element, reflection_BaseType, reflection_BaseType_enum_t, INT8_C(0))
+__flatbuffers_define_scalar_field(2, reflection_Type, index, flatbuffers_int32, int32_t, INT32_C(-1))
+__flatbuffers_define_scalar_field(3, reflection_Type, fixed_length, flatbuffers_uint16, uint16_t, UINT16_C(0))
+
+struct reflection_KeyValue_table { uint8_t unused__; };
+
+static inline size_t reflection_KeyValue_vec_len(reflection_KeyValue_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_KeyValue_table_t reflection_KeyValue_vec_at(reflection_KeyValue_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_KeyValue_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_KeyValue)
+
+__flatbuffers_define_string_field(0, reflection_KeyValue, key, 1)
+__flatbuffers_define_find_by_string_field(reflection_KeyValue, key)
+__flatbuffers_define_table_sort_by_string_field(reflection_KeyValue, key)
+__flatbuffers_define_default_find_by_string_field(reflection_KeyValue, key)
+__flatbuffers_define_default_scan_by_string_field(reflection_KeyValue, key)
+#define reflection_KeyValue_vec_sort reflection_KeyValue_vec_sort_by_key
+__flatbuffers_define_string_field(1, reflection_KeyValue, value, 0)
+
+struct reflection_EnumVal_table { uint8_t unused__; };
+
+static inline size_t reflection_EnumVal_vec_len(reflection_EnumVal_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_EnumVal_table_t reflection_EnumVal_vec_at(reflection_EnumVal_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_EnumVal_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_EnumVal)
+
+__flatbuffers_define_string_field(0, reflection_EnumVal, name, 1)
+__flatbuffers_define_scalar_field(1, reflection_EnumVal, value, flatbuffers_int64, int64_t, INT64_C(0))
+/* Note: find only works on vectors sorted by this field. */
+__flatbuffers_define_find_by_scalar_field(reflection_EnumVal, value, int64_t)
+__flatbuffers_define_table_sort_by_scalar_field(reflection_EnumVal, value, int64_t)
+__flatbuffers_define_default_find_by_scalar_field(reflection_EnumVal, value, int64_t)
+__flatbuffers_define_default_scan_by_scalar_field(reflection_EnumVal, value, int64_t)
+#define reflection_EnumVal_vec_sort reflection_EnumVal_vec_sort_by_value
+__flatbuffers_define_table_field(2, reflection_EnumVal, object, reflection_Object_table_t, 0)
+__flatbuffers_define_table_field(3, reflection_EnumVal, union_type, reflection_Type_table_t, 0)
+__flatbuffers_define_vector_field(4, reflection_EnumVal, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Enum_table { uint8_t unused__; };
+
+static inline size_t reflection_Enum_vec_len(reflection_Enum_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Enum_table_t reflection_Enum_vec_at(reflection_Enum_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Enum_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Enum)
+
+__flatbuffers_define_string_field(0, reflection_Enum, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Enum, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Enum, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Enum, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Enum, name)
+#define reflection_Enum_vec_sort reflection_Enum_vec_sort_by_name
+__flatbuffers_define_vector_field(1, reflection_Enum, values, reflection_EnumVal_vec_t, 1)
+__flatbuffers_define_scalar_field(2, reflection_Enum, is_union, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_table_field(3, reflection_Enum, underlying_type, reflection_Type_table_t, 1)
+__flatbuffers_define_vector_field(4, reflection_Enum, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(5, reflection_Enum, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Field_table { uint8_t unused__; };
+
+static inline size_t reflection_Field_vec_len(reflection_Field_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Field_table_t reflection_Field_vec_at(reflection_Field_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Field_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Field)
+
+__flatbuffers_define_string_field(0, reflection_Field, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Field, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Field, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Field, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Field, name)
+#define reflection_Field_vec_sort reflection_Field_vec_sort_by_name
+__flatbuffers_define_table_field(1, reflection_Field, type, reflection_Type_table_t, 1)
+__flatbuffers_define_scalar_field(2, reflection_Field, id, flatbuffers_uint16, uint16_t, UINT16_C(0))
+__flatbuffers_define_scalar_field(3, reflection_Field, offset, flatbuffers_uint16, uint16_t, UINT16_C(0))
+__flatbuffers_define_scalar_field(4, reflection_Field, default_integer, flatbuffers_int64, int64_t, INT64_C(0))
+__flatbuffers_define_scalar_field(5, reflection_Field, default_real, flatbuffers_double, double, 0.0000000000000000)
+__flatbuffers_define_scalar_field(6, reflection_Field, deprecated, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_scalar_field(7, reflection_Field, required, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_scalar_field(8, reflection_Field, key, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_vector_field(9, reflection_Field, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(10, reflection_Field, documentation, flatbuffers_string_vec_t, 0)
+__flatbuffers_define_scalar_field(11, reflection_Field, optional, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+
+struct reflection_Object_table { uint8_t unused__; };
+
+static inline size_t reflection_Object_vec_len(reflection_Object_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Object_table_t reflection_Object_vec_at(reflection_Object_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Object_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Object)
+
+__flatbuffers_define_string_field(0, reflection_Object, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Object, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Object, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Object, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Object, name)
+#define reflection_Object_vec_sort reflection_Object_vec_sort_by_name
+__flatbuffers_define_vector_field(1, reflection_Object, fields, reflection_Field_vec_t, 1)
+__flatbuffers_define_scalar_field(2, reflection_Object, is_struct, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_scalar_field(3, reflection_Object, minalign, flatbuffers_int32, int32_t, INT32_C(0))
+__flatbuffers_define_scalar_field(4, reflection_Object, bytesize, flatbuffers_int32, int32_t, INT32_C(0))
+__flatbuffers_define_vector_field(5, reflection_Object, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(6, reflection_Object, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_RPCCall_table { uint8_t unused__; };
+
+static inline size_t reflection_RPCCall_vec_len(reflection_RPCCall_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_RPCCall_table_t reflection_RPCCall_vec_at(reflection_RPCCall_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_RPCCall_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_RPCCall)
+
+__flatbuffers_define_string_field(0, reflection_RPCCall, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_RPCCall, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_RPCCall, name)
+__flatbuffers_define_default_find_by_string_field(reflection_RPCCall, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_RPCCall, name)
+#define reflection_RPCCall_vec_sort reflection_RPCCall_vec_sort_by_name
+__flatbuffers_define_table_field(1, reflection_RPCCall, request, reflection_Object_table_t, 1)
+__flatbuffers_define_table_field(2, reflection_RPCCall, response, reflection_Object_table_t, 1)
+__flatbuffers_define_vector_field(3, reflection_RPCCall, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(4, reflection_RPCCall, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Service_table { uint8_t unused__; };
+
+static inline size_t reflection_Service_vec_len(reflection_Service_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Service_table_t reflection_Service_vec_at(reflection_Service_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Service_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Service)
+
+__flatbuffers_define_string_field(0, reflection_Service, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Service, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Service, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Service, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Service, name)
+#define reflection_Service_vec_sort reflection_Service_vec_sort_by_name
+__flatbuffers_define_vector_field(1, reflection_Service, calls, reflection_RPCCall_vec_t, 0)
+__flatbuffers_define_vector_field(2, reflection_Service, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(3, reflection_Service, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Schema_table { uint8_t unused__; };
+
+static inline size_t reflection_Schema_vec_len(reflection_Schema_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Schema_table_t reflection_Schema_vec_at(reflection_Schema_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Schema_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Schema)
+
+__flatbuffers_define_vector_field(0, reflection_Schema, objects, reflection_Object_vec_t, 1)
+__flatbuffers_define_vector_field(1, reflection_Schema, enums, reflection_Enum_vec_t, 1)
+__flatbuffers_define_string_field(2, reflection_Schema, file_ident, 0)
+__flatbuffers_define_string_field(3, reflection_Schema, file_ext, 0)
+__flatbuffers_define_table_field(4, reflection_Schema, root_table, reflection_Object_table_t, 0)
+__flatbuffers_define_vector_field(5, reflection_Schema, services, reflection_Service_vec_t, 0)
+
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* REFLECTION_READER_H */
diff --git a/include/flatcc/reflection/reflection_verifier.h b/include/flatcc/reflection/reflection_verifier.h
new file mode 100644
index 0000000..5b5bd37
--- /dev/null
+++ b/include/flatcc/reflection/reflection_verifier.h
@@ -0,0 +1,308 @@
+#ifndef REFLECTION_VERIFIER_H
+#define REFLECTION_VERIFIER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef REFLECTION_READER_H
+#include "reflection_reader.h"
+#endif
+#include "flatcc/flatcc_verifier.h"
+#include "flatcc/flatcc_prologue.h"
+
+static int reflection_Type_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_KeyValue_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_EnumVal_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Enum_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Field_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Object_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_RPCCall_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Service_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Schema_verify_table(flatcc_table_verifier_descriptor_t *td);
+
+static int reflection_Type_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_field(td, 0, 1, 1) /* base_type */)) return ret;
+ if ((ret = flatcc_verify_field(td, 1, 1, 1) /* element */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 4, 4) /* index */)) return ret;
+ if ((ret = flatcc_verify_field(td, 3, 2, 2) /* fixed_length */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Type_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Type_identifier, &reflection_Type_verify_table);
+}
+
+static inline int reflection_Type_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Type_type_identifier, &reflection_Type_verify_table);
+}
+
+static inline int reflection_Type_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Type_verify_table);
+}
+
+static inline int reflection_Type_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Type_verify_table);
+}
+
+static int reflection_KeyValue_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* key */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 1, 0) /* value */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_KeyValue_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_KeyValue_identifier, &reflection_KeyValue_verify_table);
+}
+
+static inline int reflection_KeyValue_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_KeyValue_type_identifier, &reflection_KeyValue_verify_table);
+}
+
+static inline int reflection_KeyValue_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_KeyValue_verify_table);
+}
+
+static inline int reflection_KeyValue_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_KeyValue_verify_table);
+}
+
+static int reflection_EnumVal_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_field(td, 1, 8, 8) /* value */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 2, 0, &reflection_Object_verify_table) /* object */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 3, 0, &reflection_Type_verify_table) /* union_type */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 4, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_EnumVal_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_EnumVal_identifier, &reflection_EnumVal_verify_table);
+}
+
+static inline int reflection_EnumVal_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_EnumVal_type_identifier, &reflection_EnumVal_verify_table);
+}
+
+static inline int reflection_EnumVal_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_EnumVal_verify_table);
+}
+
+static inline int reflection_EnumVal_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_EnumVal_verify_table);
+}
+
+static int reflection_Enum_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 1, &reflection_EnumVal_verify_table) /* values */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 1, 1) /* is_union */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 3, 1, &reflection_Type_verify_table) /* underlying_type */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 4, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 5, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Enum_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Enum_identifier, &reflection_Enum_verify_table);
+}
+
+static inline int reflection_Enum_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Enum_type_identifier, &reflection_Enum_verify_table);
+}
+
+static inline int reflection_Enum_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Enum_verify_table);
+}
+
+static inline int reflection_Enum_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Enum_verify_table);
+}
+
+static int reflection_Field_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 1, 1, &reflection_Type_verify_table) /* type */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 2, 2) /* id */)) return ret;
+ if ((ret = flatcc_verify_field(td, 3, 2, 2) /* offset */)) return ret;
+ if ((ret = flatcc_verify_field(td, 4, 8, 8) /* default_integer */)) return ret;
+ if ((ret = flatcc_verify_field(td, 5, 8, 8) /* default_real */)) return ret;
+ if ((ret = flatcc_verify_field(td, 6, 1, 1) /* deprecated */)) return ret;
+ if ((ret = flatcc_verify_field(td, 7, 1, 1) /* required */)) return ret;
+ if ((ret = flatcc_verify_field(td, 8, 1, 1) /* key */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 9, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 10, 0) /* documentation */)) return ret;
+ if ((ret = flatcc_verify_field(td, 11, 1, 1) /* optional */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Field_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Field_identifier, &reflection_Field_verify_table);
+}
+
+static inline int reflection_Field_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Field_type_identifier, &reflection_Field_verify_table);
+}
+
+static inline int reflection_Field_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Field_verify_table);
+}
+
+static inline int reflection_Field_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Field_verify_table);
+}
+
+static int reflection_Object_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 1, &reflection_Field_verify_table) /* fields */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 1, 1) /* is_struct */)) return ret;
+ if ((ret = flatcc_verify_field(td, 3, 4, 4) /* minalign */)) return ret;
+ if ((ret = flatcc_verify_field(td, 4, 4, 4) /* bytesize */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 5, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 6, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Object_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Object_identifier, &reflection_Object_verify_table);
+}
+
+static inline int reflection_Object_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Object_type_identifier, &reflection_Object_verify_table);
+}
+
+static inline int reflection_Object_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Object_verify_table);
+}
+
+static inline int reflection_Object_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Object_verify_table);
+}
+
+static int reflection_RPCCall_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 1, 1, &reflection_Object_verify_table) /* request */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 2, 1, &reflection_Object_verify_table) /* response */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 3, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 4, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_RPCCall_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_RPCCall_identifier, &reflection_RPCCall_verify_table);
+}
+
+static inline int reflection_RPCCall_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_RPCCall_type_identifier, &reflection_RPCCall_verify_table);
+}
+
+static inline int reflection_RPCCall_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_RPCCall_verify_table);
+}
+
+static inline int reflection_RPCCall_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_RPCCall_verify_table);
+}
+
+static int reflection_Service_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 0, &reflection_RPCCall_verify_table) /* calls */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 2, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 3, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Service_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Service_identifier, &reflection_Service_verify_table);
+}
+
+static inline int reflection_Service_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Service_type_identifier, &reflection_Service_verify_table);
+}
+
+static inline int reflection_Service_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Service_verify_table);
+}
+
+static inline int reflection_Service_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Service_verify_table);
+}
+
+static int reflection_Schema_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 0, 1, &reflection_Object_verify_table) /* objects */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 1, &reflection_Enum_verify_table) /* enums */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 2, 0) /* file_ident */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 3, 0) /* file_ext */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 4, 0, &reflection_Object_verify_table) /* root_table */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 5, 0, &reflection_Service_verify_table) /* services */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Schema_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Schema_identifier, &reflection_Schema_verify_table);
+}
+
+static inline int reflection_Schema_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Schema_type_identifier, &reflection_Schema_verify_table);
+}
+
+static inline int reflection_Schema_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Schema_verify_table);
+}
+
+static inline int reflection_Schema_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Schema_verify_table);
+}
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* REFLECTION_VERIFIER_H */
diff --git a/include/flatcc/support/README b/include/flatcc/support/README
new file mode 100644
index 0000000..d9f6ec0
--- /dev/null
+++ b/include/flatcc/support/README
@@ -0,0 +1 @@
+support files mainly used for testing
diff --git a/include/flatcc/support/cdump.h b/include/flatcc/support/cdump.h
new file mode 100644
index 0000000..b589362
--- /dev/null
+++ b/include/flatcc/support/cdump.h
@@ -0,0 +1,38 @@
+#ifndef CDUMP_H
+#define CDUMP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+/* Generates a constant a C byte array. */
+static void cdump(const char *name, void *addr, size_t len, FILE *fp) {
+ unsigned int i;
+ unsigned char *pc = (unsigned char*)addr;
+
+ // Output description if given.
+ name = name ? name : "dump";
+ fprintf(fp, "const unsigned char %s[] = {", name);
+
+ // Process every byte in the data.
+ for (i = 0; i < (unsigned int)len; i++) {
+ // Multiple of 16 means new line (with line offset).
+
+ if ((i % 16) == 0) {
+ fprintf(fp, "\n ");
+ } else if ((i % 8) == 0) {
+ fprintf(fp, " ");
+ }
+
+ fprintf(fp, " 0x%02x,", pc[i]);
+ }
+ fprintf(fp, "\n};\n");
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* CDUMP_H */
diff --git a/include/flatcc/support/elapsed.h b/include/flatcc/support/elapsed.h
new file mode 100644
index 0000000..ba3bd73
--- /dev/null
+++ b/include/flatcc/support/elapsed.h
@@ -0,0 +1,73 @@
+#ifndef ELAPSED_H
+#define ELAPSED_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+/* Based on http://stackoverflow.com/a/8583395 */
+#if !defined(_WIN32)
+#include <sys/time.h>
+static double elapsed_realtime(void) { // returns 0 seconds first time called
+ static struct timeval t0;
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ if (!t0.tv_sec)
+ t0 = tv;
+ return (double)(tv.tv_sec - t0.tv_sec) + (double)(tv.tv_usec - t0.tv_usec) / 1e6;
+}
+#else
+#include <windows.h>
+#ifndef FatalError
+#define FatalError(s) do { perror(s); exit(-1); } while(0)
+#endif
+static double elapsed_realtime(void) { // granularity about 50 microsecs on my machine
+ static LARGE_INTEGER freq, start;
+ LARGE_INTEGER count;
+ if (!QueryPerformanceCounter(&count))
+ FatalError("QueryPerformanceCounter");
+ if (!freq.QuadPart) { // one time initialization
+ if (!QueryPerformanceFrequency(&freq))
+ FatalError("QueryPerformanceFrequency");
+ start = count;
+ }
+ return (double)(count.QuadPart - start.QuadPart) / freq.QuadPart;
+}
+#endif
+
+/* end Based on stackoverflow */
+
+static int show_benchmark(const char *descr, double t1, double t2, size_t size, int rep, const char *reptext)
+{
+ double tdiff = t2 - t1;
+ double nstime;
+
+ printf("operation: %s\n", descr);
+ printf("elapsed time: %.3f (s)\n", tdiff);
+ printf("iterations: %d\n", rep);
+ printf("size: %lu (bytes)\n", (unsigned long)size);
+ printf("bandwidth: %.3f (MB/s)\n", (double)rep * (double)size / 1e6 / tdiff);
+ printf("throughput in ops per sec: %.3f\n", rep / tdiff);
+ if (reptext && rep != 1) {
+ printf("throughput in %s ops per sec: %.3f\n", reptext, 1 / tdiff);
+ }
+ nstime = tdiff * 1e9 / rep;
+ if (nstime < 1000) {
+ printf("time per op: %.3f (ns)\n", nstime);
+ } else if (nstime < 1e6) {
+ printf("time per op: %.3f (us)\n", nstime / 1000);
+ } else if (nstime < 1e9) {
+ printf("time per op: %.3f (ms)\n", nstime / 1e6);
+ } else {
+ printf("time per op: %.3f (s)\n", nstime / 1e9);
+ }
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ELAPSED_H */
diff --git a/include/flatcc/support/hexdump.h b/include/flatcc/support/hexdump.h
new file mode 100644
index 0000000..7b6f9b8
--- /dev/null
+++ b/include/flatcc/support/hexdump.h
@@ -0,0 +1,47 @@
+#ifndef HEXDUMP_H
+#define HEXDUMP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+/* Based on: http://stackoverflow.com/a/7776146 */
+static void hexdump(const char *desc, const void *addr, size_t len, FILE *fp) {
+ unsigned int i;
+ unsigned char buf[17];
+ const unsigned char *pc = (const unsigned char*)addr;
+
+ /* Output description if given. */
+ if (desc != NULL) fprintf(fp, "%s:\n", desc);
+
+ for (i = 0; i < (unsigned int)len; i++) {
+
+ if ((i % 16) == 0) {
+ if (i != 0) fprintf(fp, " |%s|\n", buf);
+ fprintf(fp, "%08x ", i);
+ } else if ((i % 8) == 0) {
+ fprintf(fp, " ");
+ }
+ fprintf(fp, " %02x", pc[i]);
+ if ((pc[i] < 0x20) || (pc[i] > 0x7e)) {
+ buf[i % 16] = '.';
+ } else {
+ buf[i % 16] = pc[i];
+ }
+ buf[(i % 16) + 1] = '\0';
+ }
+ if (i % 16 <= 8 && i % 16 != 0) fprintf(fp, " ");
+ while ((i % 16) != 0) {
+ fprintf(fp, " ");
+ i++;
+ }
+ fprintf(fp, " |%s|\n", buf);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* HEXDUMP_H */
diff --git a/include/flatcc/support/readfile.h b/include/flatcc/support/readfile.h
new file mode 100644
index 0000000..209875f
--- /dev/null
+++ b/include/flatcc/support/readfile.h
@@ -0,0 +1,66 @@
+#ifndef READFILE_H
+#define READFILE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+
+static char *readfile(const char *filename, size_t max_size, size_t *size_out)
+{
+ FILE *fp;
+ long k;
+ size_t size, pos, n, _out;
+ char *buf;
+
+ size_out = size_out ? size_out : &_out;
+
+ fp = fopen(filename, "rb");
+ size = 0;
+ buf = 0;
+
+ if (!fp) {
+ goto fail;
+ }
+ fseek(fp, 0L, SEEK_END);
+ k = ftell(fp);
+ if (k < 0) goto fail;
+ size = (size_t)k;
+ *size_out = size;
+ if (max_size > 0 && size > max_size) {
+ goto fail;
+ }
+ rewind(fp);
+ buf = (char *)malloc(size ? size : 1);
+ if (!buf) {
+ goto fail;
+ }
+ pos = 0;
+ while ((n = fread(buf + pos, 1, size - pos, fp))) {
+ pos += n;
+ }
+ if (pos != size) {
+ goto fail;
+ }
+ fclose(fp);
+ *size_out = size;
+ return buf;
+
+fail:
+ if (fp) {
+ fclose(fp);
+ }
+ if (buf) {
+ free(buf);
+ }
+ *size_out = size;
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* READFILE_H */