aboutsummaryrefslogtreecommitdiff
path: root/flatcc/src
diff options
context:
space:
mode:
Diffstat (limited to 'flatcc/src')
-rw-r--r--flatcc/src/cli/CMakeLists.txt20
-rw-r--r--flatcc/src/cli/flatcc_cli.c505
-rw-r--r--flatcc/src/compiler/CMakeLists.txt43
-rw-r--r--flatcc/src/compiler/catalog.h217
-rw-r--r--flatcc/src/compiler/codegen.h46
-rw-r--r--flatcc/src/compiler/codegen_c.c285
-rw-r--r--flatcc/src/compiler/codegen_c.h397
-rw-r--r--flatcc/src/compiler/codegen_c_builder.c2159
-rw-r--r--flatcc/src/compiler/codegen_c_json_parser.c1850
-rw-r--r--flatcc/src/compiler/codegen_c_json_printer.c732
-rw-r--r--flatcc/src/compiler/codegen_c_reader.c1928
-rw-r--r--flatcc/src/compiler/codegen_c_sort.c171
-rw-r--r--flatcc/src/compiler/codegen_c_sort.h9
-rw-r--r--flatcc/src/compiler/codegen_c_sorter.c355
-rw-r--r--flatcc/src/compiler/codegen_c_verifier.c327
-rw-r--r--flatcc/src/compiler/codegen_schema.c581
-rw-r--r--flatcc/src/compiler/coerce.c266
-rw-r--r--flatcc/src/compiler/coerce.h13
-rw-r--r--flatcc/src/compiler/fileio.c225
-rw-r--r--flatcc/src/compiler/fileio.h86
-rw-r--r--flatcc/src/compiler/flatcc.c511
-rw-r--r--flatcc/src/compiler/hash_tables/README.txt2
-rw-r--r--flatcc/src/compiler/hash_tables/name_table.c21
-rw-r--r--flatcc/src/compiler/hash_tables/schema_table.c21
-rw-r--r--flatcc/src/compiler/hash_tables/scope_table.c177
-rw-r--r--flatcc/src/compiler/hash_tables/symbol_table.c22
-rw-r--r--flatcc/src/compiler/hash_tables/value_set.c60
-rw-r--r--flatcc/src/compiler/keywords.h56
-rw-r--r--flatcc/src/compiler/parser.c1550
-rw-r--r--flatcc/src/compiler/parser.h213
-rw-r--r--flatcc/src/compiler/pstrutil.h58
-rw-r--r--flatcc/src/compiler/semantics.c1962
-rw-r--r--flatcc/src/compiler/semantics.h12
-rw-r--r--flatcc/src/compiler/symbols.h457
-rw-r--r--flatcc/src/runtime/CMakeLists.txt16
-rw-r--r--flatcc/src/runtime/builder.c2035
-rw-r--r--flatcc/src/runtime/emitter.c269
-rw-r--r--flatcc/src/runtime/json_parser.c1297
-rw-r--r--flatcc/src/runtime/json_printer.c1486
-rw-r--r--flatcc/src/runtime/refmap.c248
-rw-r--r--flatcc/src/runtime/verifier.c617
41 files changed, 21305 insertions, 0 deletions
diff --git a/flatcc/src/cli/CMakeLists.txt b/flatcc/src/cli/CMakeLists.txt
new file mode 100644
index 0000000..40facac
--- /dev/null
+++ b/flatcc/src/cli/CMakeLists.txt
@@ -0,0 +1,20 @@
+include_directories (
+ "${PROJECT_SOURCE_DIR}/include"
+ "${PROJECT_SOURCE_DIR}/config"
+)
+
+add_executable(flatcc_cli
+ flatcc_cli.c
+)
+
+target_link_libraries(flatcc_cli
+ flatcc
+)
+
+# Rename because the libflatcc library and the flatcc executable would
+# conflict if they had the same target name `flatcc`.
+set_target_properties(flatcc_cli PROPERTIES OUTPUT_NAME flatcc)
+
+if (FLATCC_INSTALL)
+ install(TARGETS flatcc_cli DESTINATION bin)
+endif()
diff --git a/flatcc/src/cli/flatcc_cli.c b/flatcc/src/cli/flatcc_cli.c
new file mode 100644
index 0000000..9a03dec
--- /dev/null
+++ b/flatcc/src/cli/flatcc_cli.c
@@ -0,0 +1,505 @@
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "flatcc/flatcc.h"
+#include "config.h"
+
+#define VERSION FLATCC_VERSION_TEXT
+#define TITLE FLATCC_TITLE_TEXT
+
+void usage(FILE *fp)
+{
+ fprintf(fp, "%s\n", TITLE);
+ fprintf(fp, "version: %s\n", VERSION);
+ fprintf(fp, "usage: flatcc [options] file [...]\n");
+ fprintf(fp, "options:\n"
+ " --reader (default) Generate reader\n"
+ " -c, --common Generate common include header(s)\n"
+ " --common_reader Generate common reader include header(s)\n"
+ " --common_builder Generate common builder include header(s)\n"
+ " -w, --builder Generate builders (writable buffers)\n"
+ " -v, --verifier Generate verifier\n"
+ " -r, --recursive Recursively generate included schema files\n"
+ " -a Generate all (like -cwvr)\n"
+ " -g Use _get suffix only to avoid conflicts\n"
+ " -d Dependency file like gcc -MMD\n"
+ " -I<inpath> Search path for include files (multiple allowed)\n"
+ " -o<outpath> Write files relative to this path (dir must exist)\n"
+ " --stdout Concatenate all output to stdout\n"
+ " --outfile=<file> Like --stdout, but to a file.\n"
+ " --depfile=<file> Dependency file like gcc -MF.\n"
+ " --deptarget=<file> Override --depfile target like gcc -MT.\n"
+ " --prefix=<prefix> Add prefix to all generated names (no _ added)\n"
+ " --common-prefix=<prefix> Replace 'flatbuffers' prefix in common files\n"
+#if FLATCC_REFLECTION
+ " --schema Generate binary schema (.bfbs)\n"
+ " --schema-length=no Add length prefix to binary schema\n"
+#endif
+ " --verifier Generate verifier for schema\n"
+ " --json-parser Generate json parser for schema\n"
+ " --json-printer Generate json printer for schema\n"
+ " --json Generate both json parser and printer for schema\n"
+ " --version Show version\n"
+ " -h | --help Help message\n"
+ );
+}
+
+void help(FILE *fp)
+{
+ usage(fp);
+ fprintf(fp,
+ "\n"
+ "This is a flatbuffer compatible compiler implemented in C generating C\n"
+ "source. It is largely compatible with the flatc compiler provided by\n"
+ "Google Fun Propulsion Lab but does not support JSON objects or binary\n"
+ "schema.\n"
+ "\n"
+ "By example 'flatcc monster.fbs' generates a 'monster.h' file which\n"
+ "provides functions to read a flatbuffer. A common include header is also\n"
+ "required. The common file is generated with the -c option. The reader\n"
+ "has no external dependencies.\n"
+ "\n"
+ "The -w (--builder) option enables code generation to build buffers:\n"
+ "`flatbuffers -w monster.fbs` will generate `monster.h` and\n"
+ "`monster_builder.h`, and also a builder specific common file with the\n"
+ "-cw option. The builder must link with the extern `flatbuilder` library.\n"
+ "\n"
+ "-v (--verifier) generates a verifier file per schema. It depends on the\n"
+ "runtime library but not on other generated files, except other included\n"
+ "verifiers.\n"
+ "\n"
+ "-r (--recursive) generates all schema included recursively.\n"
+ "\n"
+ "--reader is the default option to generate reader output but can be used\n"
+ "explicitly together with other options that would otherwise disable it.\n"
+ "\n"
+ "All C output can be concated to a single file using --stdout or\n"
+ "--outfile with content produced in dependency order. The outfile is\n"
+ "relative to cwd.\n"
+ "\n"
+ "-g Only add '_get' suffix to read accessors such that, for example,\n"
+ "only 'Monster_name_get(monster)` will be generated and not also\n"
+ "'Monster_name(monster)'. This avoids potential conflicts with\n"
+ "other generated symbols when a schema change is impractical.\n"
+ "\n"
+ "-d generates a dependency file, e.g. 'monster.fbs.d' in the output dir.\n"
+ "\n"
+ "--depfile implies -d but accepts an explicit filename with a path\n"
+ "relative to cwd. The dependency files content is a gnu make rule with a\n"
+ "target followed by the included schema files The target must match how\n"
+ "it is seen by the rest of the build system and defaults to e.g.\n"
+ "'monster_reader.h' or 'monster.bfbs' paths relative to the working\n"
+ "directory.\n"
+ "\n"
+ "--deptarget overrides the default target for --depfile, simiar to gcc -MT.\n"
+ "\n"
+
+#if FLATCC_REFLECTION
+ "--schema will generate a binary .bfbs file for each top-level schema file.\n"
+ "Can be used with --stdout if no C output is specified. When used with multiple\n"
+ "files --schema-length=yes is recommend.\n"
+ "\n"
+ "--schema-length adds a length prefix of type uoffset_t to binary schema so\n"
+ "they can be concatenated - the aligned buffer starts after the prefix.\n"
+ "\n"
+#else
+ "Flatbuffers binary schema support (--schema) has been disabled."
+ "\n"
+#endif
+ "--json-parser generates a file that implements a fast typed json parser for\n"
+ "the schema. It depends on some flatcc headers and the runtime library but\n"
+ "not on other generated files except other parsers from included schema.\n"
+ "\n"
+ "--json-printer generates a file that implements json printers for the schema\n"
+ "and has dependencies similar to --json-parser.\n"
+ "\n"
+ "--json is generates both printer and parser.\n"
+ "\n"
+#if FLATCC_REFLECTION
+#if 0 /* Disable deprecated features. */
+ "DEPRECATED:\n"
+ " --schema-namespace controls if typenames in schema are prefixed a namespace.\n"
+ " namespaces should always be present.\n"
+ "\n"
+#endif
+#endif
+ "The generated source can redefine offset sizes by including a modified\n"
+ "`flatcc_types.h` file. The flatbuilder library must then be compiled with the\n"
+ "same `flatcc_types.h` file. In this case --prefix and --common-prefix options\n"
+ "may be helpful to avoid conflict with standard offset sizes.\n"
+ "\n"
+ "The output size may seem bulky, but most content is rarely used inline\n"
+ "functions and macros. The compiled binary need not be large.\n"
+ "\n"
+ "The generated source assumes C11 functionality for alignment, compile\n"
+ "time assertions and inline functions but an optional set of portability\n"
+ "headers can be included to work with most any compiler. The portability\n"
+ "layer is not throughly tested so a platform specific test is required\n"
+ "before production use. Upstream patches are welcome.\n");
+}
+
+enum { noarg, suffixarg, nextarg };
+
+int parse_bool_arg(const char *a)
+{
+ if (strcmp(a, "0") == 0 || strcmp(a, "no") == 0) {
+ return 0;
+ }
+ if (strcmp(a, "1") == 0 || strcmp(a, "yes") == 0) {
+ return 1;
+ }
+ fprintf(stderr, "invalid boolean argument: '%s', must be '0', '1', 'yes' or 'no'\n", a);
+ return -1;
+}
+
+int match_long_arg(const char *option, const char *s, size_t n)
+{
+ return strncmp(option, s, n) == 0 && strlen(option) == n;
+}
+
+int set_opt(flatcc_options_t *opts, const char *s, const char *a)
+{
+ int ret = noarg;
+ size_t n = strlen(s);
+ const char *v = strchr(s, '=');
+ if (v) {
+ a = v + 1;
+ n = (size_t)(v - s);
+ }
+ if (*s == 'h' || 0 == strcmp("-help", s)) {
+ /* stdout so less and more works. */
+ help(stdout);
+ exit(0);
+ }
+ if (0 == strcmp("-version", s)) {
+ fprintf(stdout, "%s\n", TITLE);
+ fprintf(stdout, "version: %s\n", VERSION);
+ exit(0);
+ }
+ if (0 == strcmp("-stdout", s)) {
+ opts->gen_stdout = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-common", s)) {
+ opts->cgen_common_reader = 1;
+ opts->cgen_common_builder = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-common_reader", s)) {
+ opts->cgen_common_reader = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-common_builder", s)) {
+ opts->cgen_common_builder = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-reader", s)) {
+ opts->cgen_reader = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-builder", s)) {
+ opts->cgen_builder = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-verifier", s)) {
+ opts->cgen_verifier = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-recursive", s)) {
+ opts->cgen_recursive = 1;
+ return noarg;
+ }
+#if FLATCC_REFLECTION
+ if (0 == strcmp("-schema", s)) {
+ opts->bgen_bfbs = 1;
+ return noarg;
+ }
+#endif
+ if (0 == strcmp("-json-parser", s)) {
+ opts->cgen_json_parser = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-json-printer", s)) {
+ opts->cgen_json_printer = 1;
+ return noarg;
+ }
+ if (0 == strcmp("-json", s)) {
+ opts->cgen_json_parser = 1;
+ opts->cgen_json_printer = 1;
+ return noarg;
+ }
+#if FLATCC_REFLECTION
+#if 0 /* Disable deprecated features. */
+ if (match_long_arg("-schema-namespace", s, n)) {
+ fprintf(stderr, "warning: --schema-namespace is deprecated\n"
+ " a namespace is added by default and should always be present\n");
+ if (!a) {
+ fprintf(stderr, "--schema-namespace option needs an argument\n");
+ exit(-1);
+ }
+ if(0 > (opts->bgen_qualify_names = parse_bool_arg(a))) {
+ exit(-1);
+ }
+ return v ? noarg : nextarg;
+ }
+ if (match_long_arg("-schema-length", s, n)) {
+ if (!a) {
+ fprintf(stderr, "--schema-length option needs an argument\n");
+ exit(-1);
+ }
+ if(0 > (opts->bgen_length_prefix = parse_bool_arg(a))) {
+ exit(-1);
+ }
+ return v ? noarg : nextarg;
+ }
+#endif
+#endif
+ if (match_long_arg("-depfile", s, n)) {
+ if (!a) {
+ fprintf(stderr, "--depfile option needs an argument\n");
+ exit(-1);
+ }
+ opts->gen_depfile = a;
+ opts->gen_dep = 1;
+ return v ? noarg : nextarg;
+ }
+ if (match_long_arg("-deptarget", s, n)) {
+ if (!a) {
+ fprintf(stderr, "--deptarget option needs an argument\n");
+ exit(-1);
+ }
+ opts->gen_deptarget = a;
+ return v ? noarg : nextarg;
+ }
+ if (match_long_arg("-outfile", s, n)) {
+ if (!a) {
+ fprintf(stderr, "--outfile option needs an argument\n");
+ exit(-1);
+ }
+ opts->gen_outfile= a;
+ return v ? noarg : nextarg;
+ }
+ if (match_long_arg("-common-prefix", s, n)) {
+ if (!a) {
+ fprintf(stderr, "--common-prefix option needs an argument\n");
+ exit(-1);
+ }
+ opts->nsc = a;
+ return v ? noarg : nextarg;
+ }
+ if (match_long_arg("-prefix", s, n)) {
+ if (!a) {
+ fprintf(stderr, "-n option needs an argument\n");
+ exit(-1);
+ }
+ opts->ns = a;
+ return v ? noarg : nextarg;
+ }
+ switch (*s) {
+ case '-':
+ fprintf(stderr, "invalid option: -%s\n", s);
+ exit(-1);
+ case 'I':
+ if (s[1]) {
+ ret = suffixarg;
+ a = s + 1;
+ } else if (!a) {
+ fprintf(stderr, "-I option needs an argument\n");
+ exit(-1);
+ } else {
+ ret = nextarg;
+ }
+ opts->inpaths[opts->inpath_count++] = a;
+ return ret;
+ case 'o':
+ if (opts->outpath) {
+ fprintf(stderr, "-o option can only be specified once\n");
+ exit(-1);
+ }
+ if (s[1]) {
+ ret = suffixarg;
+ a = s + 1;
+ } else if (!a) {
+ fprintf(stderr, "-o option needs an argument\n");
+ exit(-1);
+ } else {
+ ret = nextarg;
+ }
+ opts->outpath = a;
+ return ret;
+ case 'w':
+ opts->cgen_builder = 1;
+ return noarg;
+ case 'v':
+ opts->cgen_verifier = 1;
+ return noarg;
+ case 'c':
+ opts->cgen_common_reader = 1;
+ opts->cgen_common_builder = 1;
+ return noarg;
+ case 'r':
+ opts->cgen_recursive = 1;
+ return noarg;
+ case 'g':
+ opts->cgen_no_conflicts = 1;
+ return noarg;
+ case 'd':
+ opts->gen_dep = 1;
+ return noarg;
+ case 'a':
+ opts->cgen_reader = 1;
+ opts->cgen_builder = 1;
+ opts->cgen_verifier = 1;
+ opts->cgen_common_reader = 1;
+ opts->cgen_common_builder = 1;
+ opts->cgen_recursive = 1;
+ return noarg;
+ default:
+ fprintf(stderr, "invalid option: -%c\n", *s);
+ exit(-1);
+ }
+ return noarg;
+}
+
+int get_opt(flatcc_options_t *opts, const char *s, const char *a)
+{
+ if (s[1] == '-') {
+ return nextarg == set_opt(opts, s + 1, a);
+ }
+ ++s;
+ if (*s == 0) {
+ fprintf(stderr, "- is not a valid option\n");
+ exit(-1);
+ }
+ while (*s) {
+ switch (set_opt(opts, s, a)) {
+ case noarg:
+ ++s;
+ continue;
+ case suffixarg:
+ return 0;
+ case nextarg:
+ return 1;
+ }
+ }
+ return noarg;
+}
+
+void parse_opts(int argc, const char *argv[], flatcc_options_t *opts)
+{
+ int i;
+ const char *s, *a;
+
+ for (i = 1; i < argc; ++i) {
+ if (argv[i][0] == '-') {
+ s = argv[i];
+ a = i + 1 < argc ? argv[i + 1] : 0;
+ i += get_opt(opts, s, a);
+ } else {
+ opts->srcpaths[opts->srcpath_count++] = argv[i];
+ }
+ }
+}
+
+int main(int argc, const char *argv[])
+{
+ flatcc_options_t opts;
+ flatcc_context_t ctx = 0;
+ int i, ret, cgen;
+ const char **src;
+
+ ctx = 0;
+ ret = 0;
+ if (argc < 2) {
+ usage(stderr);
+ exit(-1);
+ }
+ flatcc_init_options(&opts);
+ if (!(opts.inpaths = malloc((size_t)argc * sizeof(char *)))) {
+ fprintf(stderr, "memory allocation failure\n");
+ exit(-1);
+ }
+ if (!(opts.srcpaths = malloc((size_t)argc * sizeof(char *)))) {
+ fprintf(stderr, "memory allocation failure\n");
+ free((void *)opts.inpaths);
+ exit(-1);
+ }
+
+ parse_opts(argc, argv, &opts);
+ if (opts.cgen_builder && opts.cgen_common_reader) {
+ opts.cgen_common_builder = 1;
+ }
+ if (opts.srcpath_count == 0) {
+ /* No input files, so only generate header(s). */
+ if (!(opts.cgen_common_reader || opts.cgen_common_builder) || opts.bgen_bfbs) {
+ fprintf(stderr, "filename missing\n");
+ goto fail;
+ }
+ if (!(ctx = flatcc_create_context(&opts, 0, 0, 0))) {
+ fprintf(stderr, "internal error: failed to create parsing context\n");
+ goto fail;
+ }
+ if (flatcc_generate_files(ctx)) {
+ goto fail;
+ }
+ flatcc_destroy_context(ctx);
+ ctx = 0;
+ goto done;
+ }
+ cgen = opts.cgen_reader || opts.cgen_builder || opts.cgen_verifier
+ || opts.cgen_common_reader || opts.cgen_common_builder
+ || opts.cgen_json_parser || opts.cgen_json_printer;
+ if (!opts.bgen_bfbs && (!cgen || opts.cgen_builder || opts.cgen_verifier)) {
+ /* Assume default if no other output specified when deps required it. */
+ opts.cgen_reader = 1;
+ }
+ if (opts.bgen_bfbs && cgen) {
+ if (opts.gen_stdout) {
+ fprintf(stderr, "--stdout cannot be used with mixed text and binary output");
+ goto fail;
+ }
+ if (opts.gen_outfile) {
+ fprintf(stderr, "--outfile cannot be used with mixed text and binary output");
+ goto fail;
+ }
+ }
+ if (opts.gen_deptarget && !opts.gen_depfile) {
+ fprintf(stderr, "--deptarget cannot be used without --depfile");
+ goto fail;
+ }
+ if (opts.gen_stdout && opts.gen_outfile) {
+ fprintf(stderr, "--outfile cannot be used with --stdout");
+ goto fail;
+ }
+ for (i = 0, src = opts.srcpaths; i < opts.srcpath_count; ++i, ++src) {
+ if (!(ctx = flatcc_create_context(&opts, *src, 0, 0))) {
+ fprintf(stderr, "internal error: failed to create parsing context\n");
+ goto fail;
+ }
+ if (flatcc_parse_file(ctx, *src)) {
+ goto fail;
+ }
+ if (flatcc_generate_files(ctx)) {
+ goto fail;
+ }
+ flatcc_destroy_context(ctx);
+ ctx = 0;
+ /* for --stdout and --outfile options: append to file and skip generating common headers. */
+ opts.gen_append = 1;
+ }
+ goto done;
+fail:
+ ret = -1;
+done:
+ if (ctx) {
+ flatcc_destroy_context(ctx);
+ ctx = 0;
+ }
+ if (ret) {
+ fprintf(stderr, "output failed\n");
+ }
+ free((void *)opts.inpaths);
+ free((void *)opts.srcpaths);
+ return ret;
+}
diff --git a/flatcc/src/compiler/CMakeLists.txt b/flatcc/src/compiler/CMakeLists.txt
new file mode 100644
index 0000000..ce31819
--- /dev/null
+++ b/flatcc/src/compiler/CMakeLists.txt
@@ -0,0 +1,43 @@
+include_directories (
+ "${PROJECT_SOURCE_DIR}/external"
+ "${PROJECT_SOURCE_DIR}/include"
+ "${PROJECT_SOURCE_DIR}/config"
+)
+
+set (SOURCES
+ ${PROJECT_SOURCE_DIR}/external/hash/cmetrohash64.c
+ ${PROJECT_SOURCE_DIR}/external/hash/str_set.c
+ ${PROJECT_SOURCE_DIR}/external/hash/ptr_set.c
+ hash_tables/symbol_table.c
+ hash_tables/scope_table.c
+ hash_tables/name_table.c
+ hash_tables/schema_table.c
+ hash_tables/value_set.c
+ fileio.c
+ parser.c
+ semantics.c
+ coerce.c
+ flatcc.c
+ codegen_c.c
+ codegen_c_reader.c
+ codegen_c_sort.c
+ codegen_c_builder.c
+ codegen_c_verifier.c
+ codegen_c_sorter.c
+ codegen_c_json_parser.c
+ codegen_c_json_printer.c
+ # needed for building binary schema
+ ../runtime/builder.c
+ ../runtime/emitter.c
+ ../runtime/refmap.c
+)
+
+if (FLATCC_REFLECTION)
+ set (SOURCES ${SOURCES} codegen_schema.c)
+endif(FLATCC_REFLECTION)
+
+add_library(flatcc ${SOURCES})
+
+if (FLATCC_INSTALL)
+ install(TARGETS flatcc DESTINATION ${lib_dir})
+endif()
diff --git a/flatcc/src/compiler/catalog.h b/flatcc/src/compiler/catalog.h
new file mode 100644
index 0000000..de2947f
--- /dev/null
+++ b/flatcc/src/compiler/catalog.h
@@ -0,0 +1,217 @@
+#ifndef CATALOG_H
+#define CATALOG_H
+
+#include <stdlib.h>
+#include "symbols.h"
+
+/* Helper to build more intuitive schema data with fully qualified names. */
+
+
+typedef struct entry entry_t;
+typedef entry_t object_entry_t;
+typedef entry_t enum_entry_t;
+typedef entry_t service_entry_t;
+typedef struct scope_entry scope_entry_t;
+
+struct entry {
+ fb_compound_type_t *ct;
+ char *name;
+};
+
+struct scope_entry {
+ fb_scope_t *scope;
+ char *name;
+};
+
+typedef struct catalog catalog_t;
+
+struct catalog {
+ int qualify_names;
+ int nobjects;
+ int nenums;
+ int nservices;
+ size_t name_table_size;
+ object_entry_t *objects;
+ enum_entry_t *enums;
+ service_entry_t *services;
+ char *name_table;
+ object_entry_t *next_object;
+ enum_entry_t *next_enum;
+ service_entry_t *next_service;
+ char *next_name;
+ fb_schema_t *schema;
+};
+
+#include <stdio.h>
+
+static void count_symbol(void *context, fb_symbol_t *sym)
+{
+ catalog_t *catalog = context;
+ fb_ref_t *scope_name;
+ size_t n = 0;
+ fb_compound_type_t *ct;
+
+ if (!(ct = get_compound_if_visible(catalog->schema, sym))) {
+ return;
+ }
+
+ /*
+ * Find out how much space the name requires. We must store each
+ * name in full for sorting because comparing a variable number of
+ * parent scope names is otherwise tricky.
+ */
+ if (catalog->qualify_names) {
+ scope_name = ct->scope->name;
+ while (scope_name) {
+ /* + 1 for '.'. */
+ n += (size_t)scope_name->ident->len + 1;
+ scope_name = scope_name->link;
+ }
+ }
+ /* + 1 for '\0'. */
+ n += (size_t)sym->ident->len + 1;
+ catalog->name_table_size += n;
+
+ switch (sym->kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ ++catalog->nobjects;
+ break;
+ case fb_is_union:
+ case fb_is_enum:
+ ++catalog->nenums;
+ break;
+ case fb_is_rpc_service:
+ ++catalog->nservices;
+ break;
+ default: return;
+ }
+}
+
+static void install_symbol(void *context, fb_symbol_t *sym)
+{
+ catalog_t *catalog = context;
+ fb_ref_t *scope_name;
+ int n = 0;
+ char *s, *name;
+ fb_compound_type_t *ct;
+
+ if (!(ct = get_compound_if_visible(catalog->schema, sym))) {
+ return;
+ }
+
+ s = catalog->next_name;
+ name = s;
+ if (catalog->qualify_names) {
+ scope_name = ct->scope->name;
+ while (scope_name) {
+ n = (int)scope_name->ident->len;
+ memcpy(s, scope_name->ident->text, (size_t)n);
+ s += n;
+ *s++ = '.';
+ scope_name = scope_name->link;
+ }
+ }
+ n = (int)sym->ident->len;
+ memcpy(s, sym->ident->text, (size_t)n);
+ s += n;
+ *s++ = '\0';
+ catalog->next_name = s;
+
+ switch (sym->kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ catalog->next_object->ct = (fb_compound_type_t *)sym;
+ catalog->next_object->name = name;
+ catalog->next_object++;
+ break;
+ case fb_is_union:
+ case fb_is_enum:
+ catalog->next_enum->ct = (fb_compound_type_t *)sym;
+ catalog->next_enum->name = name;
+ catalog->next_enum++;
+ break;
+ case fb_is_rpc_service:
+ catalog->next_service->ct = (fb_compound_type_t *)sym;
+ catalog->next_service->name = name;
+ catalog->next_service++;
+ break;
+ default: break;
+ }
+}
+
+static void count_symbols(void *context, fb_scope_t *scope)
+{
+ fb_symbol_table_visit(&scope->symbol_index, count_symbol, context);
+}
+
+static void install_symbols(void *context, fb_scope_t *scope)
+{
+ fb_symbol_table_visit(&scope->symbol_index, install_symbol, context);
+}
+
+static int compare_entries(const void *x, const void *y)
+{
+ return strcmp(((const entry_t *)x)->name, ((const entry_t *)y)->name);
+}
+
+static void sort_entries(entry_t *entries, int count)
+{
+ int i;
+
+ qsort(entries, (size_t)count, sizeof(entries[0]), compare_entries);
+
+ for (i = 0; i < count; ++i) {
+ entries[i].ct->export_index = (size_t)i;
+ }
+}
+
+static void clear_catalog(catalog_t *catalog)
+{
+ if (catalog->objects) {
+ free(catalog->objects);
+ }
+ if (catalog->enums) {
+ free(catalog->enums);
+ }
+ if (catalog->services) {
+ free(catalog->services);
+ }
+ if (catalog->name_table) {
+ free(catalog->name_table);
+ }
+ memset(catalog, 0, sizeof(*catalog));
+}
+
+static int build_catalog(catalog_t *catalog, fb_schema_t *schema, int qualify_names, fb_scope_table_t *index)
+{
+ memset(catalog, 0, sizeof(*catalog));
+ catalog->qualify_names = qualify_names;
+ catalog->schema = schema;
+
+ /* Build support datastructures before export. */
+ fb_scope_table_visit(index, count_symbols, catalog);
+ catalog->objects = calloc((size_t)catalog->nobjects, sizeof(catalog->objects[0]));
+ catalog->enums = calloc((size_t)catalog->nenums, sizeof(catalog->enums[0]));
+ catalog->services = calloc((size_t)catalog->nservices, sizeof(catalog->services[0]));
+ catalog->name_table = malloc(catalog->name_table_size);
+ catalog->next_object = catalog->objects;
+ catalog->next_enum = catalog->enums;
+ catalog->next_service = catalog->services;
+ catalog->next_name = catalog->name_table;
+ if ((!catalog->objects && catalog->nobjects > 0) ||
+ (!catalog->enums && catalog->nenums > 0) ||
+ (!catalog->services && catalog->nservices > 0) ||
+ (!catalog->name_table && catalog->name_table_size > 0)) {
+ clear_catalog(catalog);
+ return -1;
+ }
+ fb_scope_table_visit(index, install_symbols, catalog);
+ /* Presort objects and enums because the sorted index is required in Type tables. */
+ sort_entries(catalog->objects, catalog->nobjects);
+ sort_entries(catalog->enums, catalog->nenums);
+ sort_entries(catalog->services, catalog->nservices);
+ return 0;
+}
+
+#endif /* CATALOG_H */
diff --git a/flatcc/src/compiler/codegen.h b/flatcc/src/compiler/codegen.h
new file mode 100644
index 0000000..2798767
--- /dev/null
+++ b/flatcc/src/compiler/codegen.h
@@ -0,0 +1,46 @@
+#ifndef CODEGEN_H
+#define CODEGEN_H
+
+#include "symbols.h"
+#include "parser.h"
+
+typedef struct fb_output fb_output_t;
+
+struct fb_output {
+ /*
+ * Common namespace across files. May differ from namespace
+ * for consistent use of type names.
+ */
+ char nsc[FLATCC_NAMESPACE_MAX + 2];
+ char nscup[FLATCC_NAMESPACE_MAX + 2];
+
+ FILE *fp;
+ fb_schema_t *S;
+ fb_options_t *opts;
+ fb_scope_t *current_scope;
+ int indent;
+ int spacing;
+ int tmp_indent;
+};
+
+int __flatcc_fb_init_output_c(fb_output_t *out, fb_options_t *opts);
+#define fb_init_output_c __flatcc_fb_init_output_c
+void __flatcc_fb_end_output_c(fb_output_t *out);
+#define fb_end_output_c __flatcc_fb_end_output_c
+
+int __flatcc_fb_codegen_common_c(fb_output_t *out);
+#define fb_codegen_common_c __flatcc_fb_codegen_common_c
+
+int __flatcc_fb_codegen_c(fb_output_t *out, fb_schema_t *S);
+#define fb_codegen_c __flatcc_fb_codegen_c
+
+void *__flatcc_fb_codegen_bfbs_to_buffer(fb_options_t *opts, fb_schema_t *S, void *buffer, size_t *size);
+#define fb_codegen_bfbs_to_buffer __flatcc_fb_codegen_bfbs_to_buffer
+
+void *__flatcc_fb_codegen_bfbs_alloc_buffer(fb_options_t *opts, fb_schema_t *S, size_t *size);
+#define fb_codegen_bfbs_alloc_buffer __flatcc_fb_codegen_bfbs_alloc_buffer
+
+int __flatcc_fb_codegen_bfbs_to_file(fb_options_t *opts, fb_schema_t *S);
+#define fb_codegen_bfbs_to_file __flatcc_fb_codegen_bfbs_to_file
+
+#endif /* CODEGEN_H */
diff --git a/flatcc/src/compiler/codegen_c.c b/flatcc/src/compiler/codegen_c.c
new file mode 100644
index 0000000..5e5fe0e
--- /dev/null
+++ b/flatcc/src/compiler/codegen_c.c
@@ -0,0 +1,285 @@
+#include "codegen_c.h"
+#include "fileio.h"
+#include "pstrutil.h"
+#include "../../external/hash/str_set.h"
+
+int fb_open_output_file(fb_output_t *out, const char *name, size_t len, const char *ext)
+{
+ char *path;
+ int ret;
+ const char *prefix = out->opts->outpath ? out->opts->outpath : "";
+ size_t prefix_len = strlen(prefix);
+
+ if (out->fp) {
+ return 0;
+ }
+ checkmem((path = fb_create_join_path_n(prefix, prefix_len, name, len, ext, 1)));
+ out->fp = fopen(path, "wb");
+ ret = 0;
+ if (!out->fp) {
+ fprintf(stderr, "error opening file for write: %s\n", path);
+ ret = -1;
+ }
+ free(path);
+ return ret;
+}
+
+void fb_close_output_file(fb_output_t *out)
+{
+ /* Concatenate covers either stdout or a file. */
+ if (!out->opts->gen_outfile && !out->opts->gen_stdout && out->fp) {
+ fclose(out->fp);
+ out->fp = 0;
+ }
+ /* Keep out->fp open for next file. */
+}
+
+void fb_end_output_c(fb_output_t *out)
+{
+ if (out->fp != stdout && out->fp) {
+ fclose(out->fp);
+ }
+ out->fp = 0;
+}
+
+/*
+ * If used with --stdout or concat=<file>, we assume there
+ * are no other language outputs at the same time.
+ */
+int fb_init_output_c(fb_output_t *out, fb_options_t *opts)
+{
+ const char *nsc;
+ char *path = 0;
+ size_t n;
+ const char *mode = opts->gen_append ? "ab" : "wb";
+ const char *prefix = opts->outpath ? opts->outpath : "";
+ int ret = -1;
+
+ memset(out, 0, sizeof(*out));
+ out->opts = opts;
+ nsc = opts->nsc;
+ if (nsc) {
+ n = strlen(opts->nsc);
+ if (n > FLATCC_NAMESPACE_MAX) {
+ fprintf(stderr, "common namespace argument is limited to %i characters\n", (int)FLATCC_NAMESPACE_MAX);
+ return -1;
+ }
+ } else {
+ nsc = FLATCC_DEFAULT_NAMESPACE_COMMON;
+ n = strlen(nsc);
+ }
+ strncpy(out->nsc, nsc, FLATCC_NAMESPACE_MAX);
+ out->nsc[FLATCC_NAMESPACE_MAX] = '\0';
+ if (n) {
+ out->nsc[n] = '_';
+ out->nsc[n + 1] = '\0';
+ }
+ pstrcpyupper(out->nscup, out->nsc);
+ out->nscup[n] = '\0'; /* No trailing _ */
+ out->spacing = opts->cgen_spacing;
+ if (opts->gen_stdout) {
+ out->fp = stdout;
+ return 0;
+ }
+ if (!out->opts->gen_outfile) {
+ /* Normal operation to multiple header filers. */
+ return 0;
+ }
+ checkmem((path = fb_create_join_path(prefix, out->opts->gen_outfile, "", 1)));
+ out->fp = fopen(path, mode);
+ if (!out->fp) {
+ fprintf(stderr, "error opening file for write: %s\n", path);
+ ret = -1;
+ goto done;
+ }
+ ret = 0;
+done:
+ if (path) {
+ free(path);
+ }
+ return ret;
+}
+
+static void _str_set_destructor(void *context, char *item)
+{
+ (void)context;
+
+ free(item);
+}
+
+/*
+ * Removal of duplicate inclusions is only for a cleaner output - it is
+ * not stricly necessary because the preprocessor handles include
+ * guards. The guards are required to deal with concatenated files
+ * regardless unless we generate special code for concatenation.
+ */
+void fb_gen_c_includes(fb_output_t *out, const char *ext, const char *extup)
+{
+ fb_include_t *inc = out->S->includes;
+ char *basename, *basenameup, *s;
+ str_set_t set;
+
+ fb_clear(set);
+
+ /* Don't include our own file. */
+ str_set_insert_item(&set, fb_copy_path(out->S->basenameup), ht_keep);
+ while (inc) {
+ checkmem((basename = fb_create_basename(
+ inc->name.s.s, (size_t)inc->name.s.len, out->opts->default_schema_ext)));
+ inc = inc->link;
+ checkmem((basenameup = fb_copy_path(basename)));
+ s = basenameup;
+ while (*s) {
+ *s = (char)toupper(*s);
+ ++s;
+ }
+ if (str_set_insert_item(&set, basenameup, ht_keep)) {
+ free(basenameup);
+ free(basename);
+ continue;
+ }
+ /* The include guard is needed when concatening output. */
+ fprintf(out->fp,
+ "#ifndef %s%s\n"
+ "#include \"%s%s\"\n"
+ "#endif\n",
+ basenameup, extup, basename, ext);
+ free(basename);
+ /* `basenameup` stored in str_set. */
+ }
+ str_set_destroy(&set, _str_set_destructor, 0);
+}
+
+int fb_copy_scope(fb_scope_t *scope, char *buf)
+{
+ size_t n, len;
+ fb_ref_t *name;
+
+ len = (size_t)scope->prefix.len;
+ for (name = scope->name; name; name = name->link) {
+ n = (size_t)name->ident->len;
+ len += n + 1;
+ }
+ if (len > FLATCC_NAMESPACE_MAX + 1) {
+ buf[0] = '\0';
+ return -1;
+ }
+ len = (size_t)scope->prefix.len;
+ memcpy(buf, scope->prefix.s, len);
+ for (name = scope->name; name; name = name->link) {
+ n = (size_t)name->ident->len;
+ memcpy(buf + len, name->ident->text, n);
+ len += n + 1;
+ buf[len - 1] = '_';
+ }
+ buf[len] = '\0';
+ return (int)len;
+}
+
+void fb_scoped_symbol_name(fb_scope_t *scope, fb_symbol_t *sym, fb_scoped_name_t *sn)
+{
+ fb_token_t *t = sym->ident;
+
+ if (sn->scope != scope) {
+ if (0 > (sn->scope_len = fb_copy_scope(scope, sn->text))) {
+ sn->scope_len = 0;
+ fprintf(stderr, "skipping too long namespace\n");
+ }
+ }
+ sn->len = (int)t->len;
+ sn->total_len = sn->scope_len + sn->len;
+ if (sn->total_len > FLATCC_NAME_BUFSIZ - 1) {
+ fprintf(stderr, "warning: truncating identifier: %.*s\n", sn->len, t->text);
+ sn->len = FLATCC_NAME_BUFSIZ - sn->scope_len - 1;
+ sn->total_len = sn->scope_len + sn->len;
+ }
+ memcpy(sn->text + sn->scope_len, t->text, (size_t)sn->len);
+ sn->text[sn->total_len] = '\0';
+}
+
+int fb_codegen_common_c(fb_output_t *out)
+{
+ size_t nsc_len;
+ int ret;
+
+ nsc_len = strlen(out->nsc) - 1;
+ ret = 0;
+ if (out->opts->cgen_common_reader) {
+ if (fb_open_output_file(out, out->nsc, nsc_len, "_common_reader.h")) {
+ return -1;
+ }
+ ret = fb_gen_common_c_header(out);
+ fb_close_output_file(out);
+ }
+ if (!ret && out->opts->cgen_common_builder) {
+ if (fb_open_output_file(out, out->nsc, nsc_len, "_common_builder.h")) {
+ return -1;
+ }
+ fb_gen_common_c_builder_header(out);
+ fb_close_output_file(out);
+ }
+ return ret;
+}
+
+int fb_codegen_c(fb_output_t *out, fb_schema_t *S)
+{
+ size_t basename_len;
+ /* OK if no files were processed. */
+ int ret = 0;
+
+ out->S = S;
+ out->current_scope = fb_scope_table_find(&S->root_schema->scope_index, 0, 0);
+ basename_len = strlen(out->S->basename);
+ if (out->opts->cgen_reader) {
+ if (fb_open_output_file(out, out->S->basename, basename_len, "_reader.h")) {
+ ret = -1;
+ goto done;
+ }
+ if ((ret = fb_gen_c_reader(out))) {
+ goto done;
+ }
+ fb_close_output_file(out);
+ }
+ if (out->opts->cgen_builder) {
+ if (fb_open_output_file(out, out->S->basename, basename_len, "_builder.h")) {
+ ret = -1;
+ goto done;
+ }
+ if ((ret = fb_gen_c_builder(out))) {
+ goto done;
+ }
+ fb_close_output_file(out);
+ }
+ if (out->opts->cgen_verifier) {
+ if (fb_open_output_file(out, out->S->basename, basename_len, "_verifier.h")) {
+ ret = -1;
+ goto done;
+ }
+ if ((ret = fb_gen_c_verifier(out))) {
+ goto done;
+ }
+ fb_close_output_file(out);
+ }
+ if (out->opts->cgen_json_parser) {
+ if (fb_open_output_file(out, out->S->basename, basename_len, "_json_parser.h")) {
+ ret = -1;
+ goto done;
+ }
+ if ((ret = fb_gen_c_json_parser(out))) {
+ goto done;
+ }
+ fb_close_output_file(out);
+ }
+ if (out->opts->cgen_json_printer) {
+ if (fb_open_output_file(out, out->S->basename, basename_len, "_json_printer.h")) {
+ ret = -1;
+ goto done;
+ }
+ if ((ret = fb_gen_c_json_printer(out))) {
+ goto done;
+ }
+ fb_close_output_file(out);
+ }
+done:
+ return ret;
+}
diff --git a/flatcc/src/compiler/codegen_c.h b/flatcc/src/compiler/codegen_c.h
new file mode 100644
index 0000000..6eba54a
--- /dev/null
+++ b/flatcc/src/compiler/codegen_c.h
@@ -0,0 +1,397 @@
+#ifndef CODEGEN_C_H
+#define CODEGEN_C_H
+
+#include <assert.h>
+#include <stdarg.h>
+
+#include "symbols.h"
+#include "parser.h"
+#include "codegen.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+#define __FLATCC_ERROR_TYPE "INTERNAL_ERROR_UNEXPECTED_TYPE"
+
+#ifndef gen_panic
+#define gen_panic(context, msg) fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, msg), assert(0), exit(-1)
+#endif
+
+
+static inline void token_name(fb_token_t *t, int *n, const char **s) {
+ *n = (int)t->len;
+ *s = t->text;
+}
+
+typedef char fb_symbol_text_t[FLATCC_NAME_BUFSIZ];
+typedef struct fb_scoped_name fb_scoped_name_t;
+
+/* Should be zeroed because scope is cached across updates. */
+struct fb_scoped_name {
+ fb_symbol_text_t text;
+ fb_scope_t *scope;
+ int scope_len, len, total_len;
+};
+
+#define fb_clear(x) (memset(&(x), 0, sizeof(x)))
+
+/* Returns length or -1 if length exceeds namespace max. */
+int __flatcc_fb_copy_scope(fb_scope_t *scope, char *buf);
+#define fb_copy_scope __flatcc_fb_copy_scope
+
+void __flatcc_fb_scoped_symbol_name(fb_scope_t *scope, fb_symbol_t *sym, fb_scoped_name_t *sn);
+#define fb_scoped_symbol_name __flatcc_fb_scoped_symbol_name
+
+static inline void fb_compound_name(fb_compound_type_t *ct, fb_scoped_name_t *sn)
+{
+ fb_scoped_symbol_name(ct->scope, &ct->symbol, sn);
+}
+
+static inline void symbol_name(fb_symbol_t *sym, int *n, const char **s) {
+ token_name(sym->ident, n, s);
+}
+
+static inline const char *scalar_type_ns(fb_scalar_type_t scalar_type, const char *ns)
+{
+ return scalar_type == fb_bool ? ns : "";
+}
+
+static inline const char *scalar_type_prefix(fb_scalar_type_t scalar_type)
+{
+ const char *tname;
+ switch (scalar_type) {
+ case fb_ulong:
+ tname = "uint64";
+ break;
+ case fb_uint:
+ tname = "uint32";
+ break;
+ case fb_ushort:
+ tname = "uint16";
+ break;
+ case fb_char:
+ tname = "char";
+ break;
+ case fb_ubyte:
+ tname = "uint8";
+ break;
+ case fb_bool:
+ tname = "bool";
+ break;
+ case fb_long:
+ tname = "int64";
+ break;
+ case fb_int:
+ tname = "int32";
+ break;
+ case fb_short:
+ tname = "int16";
+ break;
+ case fb_byte:
+ tname = "int8";
+ break;
+ case fb_float:
+ tname = "float";
+ break;
+ case fb_double:
+ tname = "double";
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ tname = __FLATCC_ERROR_TYPE;
+ break;
+ }
+ return tname;
+}
+
+static inline const char *scalar_type_name(fb_scalar_type_t scalar_type)
+{
+ const char *tname;
+ switch (scalar_type) {
+ case fb_ulong:
+ tname = "uint64_t";
+ break;
+ case fb_uint:
+ tname = "uint32_t";
+ break;
+ case fb_ushort:
+ tname = "uint16_t";
+ break;
+ case fb_char:
+ tname = "char";
+ break;
+ case fb_ubyte:
+ tname = "uint8_t";
+ break;
+ case fb_bool:
+ tname = "bool_t";
+ break;
+ case fb_long:
+ tname = "int64_t";
+ break;
+ case fb_int:
+ tname = "int32_t";
+ break;
+ case fb_short:
+ tname = "int16_t";
+ break;
+ case fb_byte:
+ tname = "int8_t";
+ break;
+ case fb_float:
+ tname = "float";
+ break;
+ case fb_double:
+ tname = "double";
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ tname = __FLATCC_ERROR_TYPE;
+ break;
+ }
+ return tname;
+}
+
+static inline const char *scalar_vector_type_name(fb_scalar_type_t scalar_type)
+{
+ const char *tname;
+ switch (scalar_type) {
+ case fb_ulong:
+ tname = "uint64_vec_t";
+ break;
+ case fb_uint:
+ tname = "uint32_vec_t";
+ break;
+ case fb_ushort:
+ tname = "uint16_vec_t";
+ break;
+ case fb_char:
+ tname = "char_vec_t";
+ break;
+ case fb_ubyte:
+ tname = "uint8_vec_t";
+ break;
+ case fb_bool:
+ tname = "uint8_vec_t";
+ break;
+ case fb_long:
+ tname = "int64_vec_t";
+ break;
+ case fb_int:
+ tname = "int32_vec_t";
+ break;
+ case fb_short:
+ tname = "int16_vec_t";
+ break;
+ case fb_byte:
+ tname = "int8_vec_t";
+ break;
+ case fb_float:
+ tname = "float_vec_t";
+ break;
+ case fb_double:
+ tname = "double_vec_t";
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ tname = __FLATCC_ERROR_TYPE;
+ break;
+ }
+ return tname;
+}
+
+/* Only for integers. */
+static inline const char *scalar_cast(fb_scalar_type_t scalar_type)
+{
+ const char *cast;
+ switch (scalar_type) {
+ case fb_ulong:
+ cast = "UINT64_C";
+ break;
+ case fb_uint:
+ cast = "UINT32_C";
+ break;
+ case fb_ushort:
+ cast = "UINT16_C";
+ break;
+ case fb_char:
+ cast = "char";
+ break;
+ case fb_ubyte:
+ cast = "UINT8_C";
+ break;
+ case fb_bool:
+ cast = "UINT8_C";
+ break;
+ case fb_long:
+ cast = "INT64_C";
+ break;
+ case fb_int:
+ cast = "INT32_C";
+ break;
+ case fb_short:
+ cast = "INT16_C";
+ break;
+ case fb_byte:
+ cast = "INT8_C";
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ cast = "";
+ break;
+ }
+ return cast;
+}
+
+typedef char fb_literal_t[100];
+
+static inline size_t print_literal(fb_scalar_type_t scalar_type, const fb_value_t *value, fb_literal_t literal)
+{
+ const char *cast;
+
+ switch (value->type) {
+ case vt_uint:
+ cast = scalar_cast(scalar_type);
+ return (size_t)sprintf(literal, "%s(%"PRIu64")", cast, (uint64_t)value->u);
+ break;
+ case vt_int:
+ cast = scalar_cast(scalar_type);
+ return (size_t)sprintf(literal, "%s(%"PRId64")", cast, (int64_t)value->i);
+ break;
+ case vt_bool:
+ cast = scalar_cast(scalar_type);
+ return (size_t)sprintf(literal, "%s(%u)", cast, (unsigned)value->b);
+ break;
+ case vt_float:
+ /*
+ * .9g ensures sufficient precision in 32-bit floats and
+ * .17g ensures sufficient precision for 64-bit floats (double).
+ * The '#' forces a decimal point that would not be printed
+ * for integers which would result in the wrong type in C
+ * source.
+ */
+ if (scalar_type == fb_float) {
+ return (size_t)sprintf(literal, "%#.9gf", (float)value->f);
+ } else {
+ return (size_t)sprintf(literal, "%#.17g", (double)value->f);
+ }
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ *literal = 0;
+ return 0;
+ }
+}
+
+static inline const char *scalar_suffix(fb_scalar_type_t scalar_type)
+{
+ const char *suffix;
+ switch (scalar_type) {
+ case fb_ulong:
+ suffix = "ULL";
+ break;
+ case fb_uint:
+ suffix = "UL";
+ break;
+ case fb_ushort:
+ suffix = "U";
+ break;
+ case fb_char:
+ suffix = "";
+ break;
+ case fb_ubyte:
+ suffix = "U";
+ break;
+ case fb_bool:
+ suffix = "U";
+ break;
+ case fb_long:
+ suffix = "LL";
+ break;
+ case fb_int:
+ suffix = "L";
+ break;
+ case fb_short:
+ suffix = "";
+ break;
+ case fb_byte:
+ suffix = "";
+ break;
+ case fb_double:
+ suffix = "";
+ break;
+ case fb_float:
+ suffix = "F";
+ break;
+ default:
+ gen_panic(0, "internal error: unexpected type during code generation");
+ suffix = "";
+ break;
+ }
+ return suffix;
+}
+
+/* See also: https://github.com/philsquared/Catch/issues/376 */
+static inline int gen_prologue(fb_output_t *out)
+{
+ if (out->opts->cgen_pragmas) {
+ fprintf(out->fp, "#include \"flatcc/flatcc_prologue.h\"\n");
+ }
+ return 0;
+}
+
+static inline int gen_epilogue(fb_output_t *out)
+{
+ if (out->opts->cgen_pragmas) {
+ fprintf(out->fp, "#include \"flatcc/flatcc_epilogue.h\"\n");
+ }
+ return 0;
+}
+
+/* This assumes the output context is named out which it is by convention. */
+#define indent() (out->indent++)
+#define unindent() { assert(out->indent); out->indent--; }
+#define margin() { out->tmp_indent = out->indent; out->indent = 0; }
+#define unmargin() { out->indent = out->tmp_indent; }
+
+/* Redefine names to avoid polluting library namespace. */
+
+int __flatcc_fb_init_output_c(fb_output_t *out, fb_options_t *opts);
+#define fb_init_output_c __flatcc_fb_init_output_c
+
+int __flatcc_fb_open_output_file(fb_output_t *out, const char *name, size_t len, const char *ext);
+#define fb_open_output_file __flatcc_fb_open_output_file
+
+void __flatcc_fb_close_output_file(fb_output_t *out);
+#define fb_close_output_file __flatcc_fb_close_output_file
+
+void __flatcc_fb_gen_c_includes(fb_output_t *out, const char *ext, const char *extup);
+#define fb_gen_c_includes __flatcc_fb_gen_c_includes
+
+int __flatcc_fb_gen_common_c_header(fb_output_t *out);
+#define fb_gen_common_c_header __flatcc_fb_gen_common_c_header
+
+int __flatcc_fb_gen_common_c_builder_header(fb_output_t *out);
+#define fb_gen_common_c_builder_header __flatcc_fb_gen_common_c_builder_header
+
+int __flatcc_fb_gen_c_reader(fb_output_t *out);
+#define fb_gen_c_reader __flatcc_fb_gen_c_reader
+
+int __flatcc_fb_gen_c_builder(fb_output_t *out);
+#define fb_gen_c_builder __flatcc_fb_gen_c_builder
+
+int __flatcc_fb_gen_c_verifier(fb_output_t *out);
+#define fb_gen_c_verifier __flatcc_fb_gen_c_verifier
+
+int __flatcc_fb_gen_c_sorter(fb_output_t *out);
+#define fb_gen_c_sorter __flatcc_fb_gen_c_sorter
+
+int __flatcc_fb_gen_c_json_parser(fb_output_t *out);
+#define fb_gen_c_json_parser __flatcc_fb_gen_c_json_parser
+
+int __flatcc_fb_gen_c_json_printer(fb_output_t *out);
+#define fb_gen_c_json_printer __flatcc_fb_gen_c_json_printer
+
+#endif /* CODEGEN_C_H */
diff --git a/flatcc/src/compiler/codegen_c_builder.c b/flatcc/src/compiler/codegen_c_builder.c
new file mode 100644
index 0000000..ffa105d
--- /dev/null
+++ b/flatcc/src/compiler/codegen_c_builder.c
@@ -0,0 +1,2159 @@
+#include <string.h>
+
+#include "codegen_c.h"
+
+int fb_gen_common_c_builder_header(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+ const char *nscup = out->nscup;
+
+ fprintf(out->fp, "#ifndef %s_COMMON_BUILDER_H\n", nscup);
+ fprintf(out->fp, "#define %s_COMMON_BUILDER_H\n", nscup);
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ fprintf(out->fp, "/* Common FlatBuffers build functionality for C. */\n\n");
+ gen_prologue(out);
+
+ fprintf(out->fp, "#ifndef FLATBUILDER_H\n");
+ fprintf(out->fp, "#include \"flatcc/flatcc_builder.h\"\n");
+ fprintf(out->fp, "#endif\n");
+ if (strcmp(nsc, "flatcc_builder_")) {
+ fprintf(out->fp, "typedef flatcc_builder_t %sbuilder_t;\n", nsc);
+ fprintf(out->fp, "typedef flatcc_builder_ref_t %sref_t;\n", nsc);
+ fprintf(out->fp, "typedef flatcc_builder_ref_t %svec_ref_t;\n", nsc);
+ fprintf(out->fp, "typedef flatcc_builder_union_ref_t %sunion_ref_t;\n", nsc);
+ fprintf(out->fp, "typedef flatcc_builder_union_vec_ref_t %sunion_vec_ref_t;\n", nsc);
+ fprintf(out->fp, "/* integer return code (ref and ptr always fail on 0) */\n"
+ "#define %sfailed(x) ((x) < 0)\n", nsc);
+ }
+ fprintf(out->fp, "typedef %sref_t %sroot_t;\n", nsc, nsc);
+ fprintf(out->fp, "#define %sroot(ref) ((%sroot_t)(ref))\n", nsc, nsc);
+ if (strcmp(nsc, "flatbuffers_")) {
+ fprintf(out->fp, "#define %sis_native_pe flatbuffers_is_native_pe\n", nsc);
+ fprintf(out->fp, "typedef flatbuffers_fid_t %sfid_t;\n", nsc);
+ }
+ fprintf(out->fp, "\n");
+
+ fprintf(out->fp,
+ "#define __%smemoize_begin(B, src)\\\n"
+ "do { flatcc_builder_ref_t _ref; if ((_ref = flatcc_builder_refmap_find((B), (src)))) return _ref; } while (0)\n"
+ "#define __%smemoize_end(B, src, op) do { return flatcc_builder_refmap_insert((B), (src), (op)); } while (0)\n"
+ "#define __%smemoize(B, src, op) do { __%smemoize_begin(B, src); __%smemoize_end(B, src, op); } while (0)\n"
+ "\n",
+ nsc, nsc, nsc, nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_buffer(NS)\\\n"
+ "typedef NS ## ref_t NS ## buffer_ref_t;\\\n"
+ "static inline int NS ## buffer_start(NS ## builder_t *B, const NS ##fid_t fid)\\\n"
+ "{ return flatcc_builder_start_buffer(B, fid, 0, 0); }\\\n"
+ "static inline int NS ## buffer_start_with_size(NS ## builder_t *B, const NS ##fid_t fid)\\\n"
+ "{ return flatcc_builder_start_buffer(B, fid, 0, flatcc_builder_with_size); }\\\n"
+ "static inline int NS ## buffer_start_aligned(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\\\n"
+ "{ return flatcc_builder_start_buffer(B, fid, block_align, 0); }\\\n"
+ "static inline int NS ## buffer_start_aligned_with_size(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\\\n"
+ "{ return flatcc_builder_start_buffer(B, fid, block_align, flatcc_builder_with_size); }\\\n"
+ "static inline NS ## buffer_ref_t NS ## buffer_end(NS ## builder_t *B, NS ## ref_t root)\\\n"
+ "{ return flatcc_builder_end_buffer(B, root); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_table_root(NS, N, FID, TFID)\\\n"
+ "static inline int N ## _start_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, FID) ? -1 : N ## _start(B); }\\\n"
+ "static inline int N ## _start_as_root_with_size(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start_with_size(B, FID) ? -1 : N ## _start(B); }\\\n"
+ "static inline int N ## _start_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, TFID) ? -1 : N ## _start(B); }\\\n"
+ "static inline int N ## _start_as_typed_root_with_size(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start_with_size(B, TFID) ? -1 : N ## _start(B); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end(B)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end(B)); }\\\n"
+ /*
+ * Unlike structs, we do no use flatcc_builder_create_buffer
+ * because we would have to manage alignment, and we save very
+ * little because tables require stack allocations in any case.
+ */
+ "static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ if (NS ## buffer_start(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _table_t t)\\\n"
+ "{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _table_t t)\\\n"
+ "{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _table_t t)\\\n"
+ "{ if (NS ## buffer_start(B, TFID)) return 0;return NS ## buffer_end(B, N ## _clone(B, t)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _table_t t)\\\n"
+ "{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_table_prolog(NS, N, FID, TFID)\\\n"
+ "__%sbuild_table_vector_ops(NS, N ## _vec, N)\\\n"
+ "__%sbuild_table_root(NS, N, FID, TFID)\n"
+ "\n",
+ nsc, nsc, nsc);
+
+
+ fprintf(out->fp,
+ "#define __%sbuild_struct_root(NS, N, A, FID, TFID)\\\n"
+ "static inline N ## _t *N ## _start_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, FID) ? 0 : N ## _start(B); }\\\n"
+ "static inline N ## _t *N ## _start_as_root_with_size(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start_with_size(B, FID) ? 0 : N ## _start(B); }\\\n"
+ "static inline N ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, TFID) ? 0 : N ## _start(B); }\\\n"
+ "static inline N ## _t *N ## _start_as_typed_root_with_size(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start_with_size(B, TFID) ? 0 : N ## _start(B); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end(B)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end(B)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_pe_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end_pe(B)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _end_pe_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_end(B, N ## _end_pe(B)); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ return flatcc_builder_create_buffer(B, FID, 0,\\\n"
+ " N ## _create(B __ ## N ## _call_args), A, 0); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ return flatcc_builder_create_buffer(B, FID, 0,\\\n"
+ " N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ return flatcc_builder_create_buffer(B, TFID, 0,\\\n"
+ " N ## _create(B __ ## N ## _call_args), A, 0); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ return flatcc_builder_create_buffer(B, TFID, 0,\\\n"
+ " N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _struct_t p)\\\n"
+ "{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, 0); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _struct_t p)\\\n"
+ "{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _struct_t p)\\\n"
+ "{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, 0); }\\\n"
+ "static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _struct_t p)\\\n"
+ "{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_nested_table_root(NS, N, TN, FID, TFID)\\\n"
+ "static inline int N ## _start_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, FID) ? -1 : TN ## _start(B); }\\\n"
+ "static inline int N ## _start_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, TFID) ? -1 : TN ## _start(B); }\\\n"
+ "static inline int N ## _end_as_root(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\\\n"
+ "static inline int N ## _end_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\\\n"
+ "static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\\\n"
+ " align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\\\n"
+ "static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\\\n"
+ " align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\\\n"
+ "static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _table_t t)\\\n"
+ "{ return N ## _add(B, TN ## _clone_as_root(B, t)); }\\\n"
+ "static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _table_t t)\\\n"
+ "{ return N ## _add(B, TN ## _clone_as_typed_root(B, t)); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_nested_struct_root(NS, N, TN, A, FID, TFID)\\\n"
+ "static inline TN ## _t *N ## _start_as_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\\\n"
+ "static inline TN ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\\\n"
+ "static inline int N ## _end_as_root(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\\\n"
+ "static inline int N ## _end_as_typed_root(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\\\n"
+ "static inline int N ## _end_pe_as_root(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, NS ## buffer_end(B, TN ## _end_pe(B))); }\\\n"
+ "static inline int N ## _create_as_root(NS ## builder_t *B __ ## TN ## _formal_args)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_buffer(B, FID, 0,\\\n"
+ " TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\\\n"
+ "static inline int N ## _create_as_typed_root(NS ## builder_t *B __ ## TN ## _formal_args)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_buffer(B, TFID, 0,\\\n"
+ " TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\\\n"
+ "static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\\\n"
+ " align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\\\n"
+ "static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\\\n"
+ " align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\\\n"
+ "static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _struct_t p)\\\n"
+ "{ return N ## _add(B, TN ## _clone_as_root(B, p)); }\\\n"
+ "static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _struct_t p)\\\n"
+ "{ return N ## _add(B, TN ## _clone_as_typed_root(B, p)); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_vector_ops(NS, V, N, TN, T)\\\n"
+ "static inline T *V ## _extend(NS ## builder_t *B, size_t len)\\\n"
+ "{ return (T *)flatcc_builder_extend_vector(B, len); }\\\n"
+ "static inline T *V ## _append(NS ## builder_t *B, const T *data, size_t len)\\\n"
+ "{ return (T *)flatcc_builder_append_vector(B, data, len); }\\\n"
+ "static inline int V ## _truncate(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_truncate_vector(B, len); }\\\n"
+ "static inline T *V ## _edit(NS ## builder_t *B)\\\n"
+ "{ return (T *)flatcc_builder_vector_edit(B); }\\\n"
+ "static inline size_t V ## _reserved_len(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_vector_count(B); }\\\n"
+ "static inline T *V ## _push(NS ## builder_t *B, const T *p)\\\n"
+ "{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? (memcpy(_p, p, TN ## __size()), _p) : 0; }\\\n"
+ "static inline T *V ## _push_copy(NS ## builder_t *B, const T *p)\\\n"
+ "{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\\\n"
+ /* push_clone is the same as a for push_copy for scalar and struct vectors
+ * but copy has different semantics as a standalone operation so we can't use
+ * clone to implement push_clone - it would create a reference to a struct. */
+ "static inline T *V ## _push_clone(NS ## builder_t *B, const T *p)\\\n"
+ "{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\\\n"
+ "static inline T *V ## _push_create(NS ## builder_t *B __ ## TN ## _formal_args)\\\n"
+ "{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _assign(_p __ ## TN ## _call_args) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ /* NS: common namespace, N: typename, T: element type, S: elem size, A: alignment */
+ "#define __%sbuild_vector(NS, N, T, S, A)\\\n"
+ "typedef NS ## ref_t N ## _vec_ref_t;\\\n"
+ "static inline int N ## _vec_start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_end_pe(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_end_vector(B); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\\\n"
+ "{ if (!NS ## is_native_pe()) { size_t i, n; T *p = (T *)flatcc_builder_vector_edit(B);\\\n"
+ " for (i = 0, n = flatcc_builder_vector_count(B); i < n; ++i)\\\n"
+ " { N ## _to_pe(N ## __ptr_add(p, i)); }} return flatcc_builder_end_vector(B); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_create_pe(NS ## builder_t *B, const T *data, size_t len)\\\n"
+ "{ return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const T *data, size_t len)\\\n"
+ "{ if (!NS ## is_native_pe()) { size_t i; T *p; int ret = flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); if (ret) { return ret; }\\\n"
+ " p = (T *)flatcc_builder_extend_vector(B, len); if (!p) return 0;\\\n"
+ " for (i = 0; i < len; ++i) { N ## _copy_to_pe(N ## __ptr_add(p, i), N ## __const_ptr_add(data, i)); }\\\n"
+ " return flatcc_builder_end_vector(B); } else return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\\\n"
+ "{ __%smemoize(B, vec, flatcc_builder_create_vector(B, vec, N ## _vec_len(vec), S, A, FLATBUFFERS_COUNT_MAX(S))); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_slice(NS ## builder_t *B, N ##_vec_t vec, size_t index, size_t len)\\\n"
+ "{ size_t n = N ## _vec_len(vec); if (index >= n) index = n; n -= index; if (len > n) len = n;\\\n"
+ " return flatcc_builder_create_vector(B, N ## __const_ptr_add(vec, index), len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\\\n"
+ "__%sbuild_vector_ops(NS, N ## _vec, N, N, T)\n"
+ "\n",
+ nsc, nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_vector_ops(NS, V, N, TN)\\\n"
+ "static inline TN ## _union_ref_t *V ## _extend(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_extend_union_vector(B, len); }\\\n"
+ "static inline TN ## _union_ref_t *V ## _append(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\\\n"
+ "{ return flatcc_builder_append_union_vector(B, data, len); }\\\n"
+ "static inline int V ## _truncate(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_truncate_union_vector(B, len); }\\\n"
+ "static inline TN ## _union_ref_t *V ## _edit(NS ## builder_t *B)\\\n"
+ "{ return (TN ## _union_ref_t *) flatcc_builder_union_vector_edit(B); }\\\n"
+ "static inline size_t V ## _reserved_len(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_union_vector_count(B); }\\\n"
+ "static inline TN ## _union_ref_t *V ## _push(NS ## builder_t *B, const TN ## _union_ref_t ref)\\\n"
+ "{ return flatcc_builder_union_vector_push(B, ref); }\\\n"
+ "static inline TN ## _union_ref_t *V ## _push_clone(NS ## builder_t *B, TN ## _union_t u)\\\n"
+ "{ return TN ## _vec_push(B, TN ## _clone(B, u)); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_vector(NS, N)\\\n"
+ "static inline int N ## _vec_start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_union_vector(B); }\\\n"
+ "static inline N ## _union_vec_ref_t N ## _vec_end(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_end_union_vector(B); }\\\n"
+ "static inline N ## _union_vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _union_ref_t *data, size_t len)\\\n"
+ "{ return flatcc_builder_create_union_vector(B, data, len); }\\\n"
+ "__%sbuild_union_vector_ops(NS, N ## _vec, N, N)\\\n"
+ "/* Preserves DAG structure separately for type and value vector, so a type vector could be shared for many value vectors. */\\\n"
+ "static inline N ## _union_vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_union_vec_t vec)\\\n"
+ "{ N ## _union_vec_ref_t _uvref, _ret = { 0, 0 }; NS ## union_ref_t _uref; size_t _i, _len;\\\n"
+ " if (vec.type == 0) return _ret;\\\n"
+ " _uvref.type = flatcc_builder_refmap_find(B, vec.type); _uvref.value = flatcc_builder_refmap_find(B, vec.value);\\\n"
+ " _len = N ## _union_vec_len(vec); if (_uvref.type == 0) {\\\n"
+ " _uvref.type = flatcc_builder_refmap_insert(B, vec.type, (flatcc_builder_create_type_vector(B, vec.type, _len))); }\\\n"
+ " if (_uvref.type == 0) return _ret; if (_uvref.value == 0) {\\\n"
+ " if (flatcc_builder_start_offset_vector(B)) return _ret;\\\n"
+ " for (_i = 0; _i < _len; ++_i) { _uref = N ## _clone(B, N ## _union_vec_at(vec, _i));\\\n"
+ " if (!_uref.value || !(flatcc_builder_offset_vector_push(B, _uref.value))) return _ret; }\\\n"
+ " _uvref.value = flatcc_builder_refmap_insert(B, vec.value, flatcc_builder_end_offset_vector(B));\\\n"
+ " if (_uvref.value == 0) return _ret; } return _uvref; }\n"
+ "\n",
+ nsc, nsc);
+
+ /* In addtion to offset_vector_ops... */
+ fprintf(out->fp,
+ "#define __%sbuild_string_vector_ops(NS, N)\\\n"
+ "static inline int N ## _push_start(NS ## builder_t *B)\\\n"
+ "{ return NS ## string_start(B); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_end(NS ## builder_t *B)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_end(B)); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_create(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_create(B, s, len)); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_create_str(NS ## builder_t *B, const char *s)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_create_str(B, s)); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_create_strn(B, s, max_len)); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_clone(NS ## builder_t *B, NS ## string_t string)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_clone(B, string)); }\\\n"
+ "static inline NS ## string_ref_t *N ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\\\n"
+ "{ return NS ## string_vec_push(B, NS ## string_slice(B, string, index, len)); }\n"
+ "\n",
+ nsc);
+
+ /* In addtion to offset_vector_ops... */
+ fprintf(out->fp,
+ "#define __%sbuild_table_vector_ops(NS, N, TN)\\\n"
+ "static inline int N ## _push_start(NS ## builder_t *B)\\\n"
+ "{ return TN ## _start(B); }\\\n"
+ "static inline TN ## _ref_t *N ## _push_end(NS ## builder_t *B)\\\n"
+ "{ return N ## _push(B, TN ## _end(B)); }\\\n"
+ "static inline TN ## _ref_t *N ## _push_create(NS ## builder_t *B __ ## TN ##_formal_args)\\\n"
+ "{ return N ## _push(B, TN ## _create(B __ ## TN ## _call_args)); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_offset_vector_ops(NS, V, N, TN)\\\n"
+ "static inline TN ## _ref_t *V ## _extend(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_extend_offset_vector(B, len); }\\\n"
+ "static inline TN ## _ref_t *V ## _append(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\\\n"
+ "{ return flatcc_builder_append_offset_vector(B, data, len); }\\\n"
+ "static inline int V ## _truncate(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_truncate_offset_vector(B, len); }\\\n"
+ "static inline TN ## _ref_t *V ## _edit(NS ## builder_t *B)\\\n"
+ "{ return (TN ## _ref_t *)flatcc_builder_offset_vector_edit(B); }\\\n"
+ "static inline size_t V ## _reserved_len(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_offset_vector_count(B); }\\\n"
+ "static inline TN ## _ref_t *V ## _push(NS ## builder_t *B, const TN ## _ref_t ref)\\\n"
+ "{ return ref ? flatcc_builder_offset_vector_push(B, ref) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_offset_vector(NS, N)\\\n"
+ "typedef NS ## ref_t N ## _vec_ref_t;\\\n"
+ "static inline int N ## _vec_start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_offset_vector(B); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_end_offset_vector(B); }\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _ref_t *data, size_t len)\\\n"
+ "{ return flatcc_builder_create_offset_vector(B, data, len); }\\\n"
+ "__%sbuild_offset_vector_ops(NS, N ## _vec, N, N)\\\n"
+ "static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\\\n"
+ "{ int _ret; N ## _ref_t _e; size_t _i, _len; __%smemoize_begin(B, vec);\\\n"
+ " _len = N ## _vec_len(vec); if (flatcc_builder_start_offset_vector(B)) return 0;\\\n"
+ " for (_i = 0; _i < _len; ++_i) { if (!(_e = N ## _clone(B, N ## _vec_at(vec, _i)))) return 0;\\\n"
+ " if (!flatcc_builder_offset_vector_push(B, _e)) return 0; }\\\n"
+ " __%smemoize_end(B, vec, flatcc_builder_end_offset_vector(B)); }\\\n"
+ "\n",
+ nsc, nsc, nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_string_ops(NS, N)\\\n"
+ "static inline char *N ## _append(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return flatcc_builder_append_string(B, s, len); }\\\n"
+ "static inline char *N ## _append_str(NS ## builder_t *B, const char *s)\\\n"
+ "{ return flatcc_builder_append_string_str(B, s); }\\\n"
+ "static inline char *N ## _append_strn(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return flatcc_builder_append_string_strn(B, s, len); }\\\n"
+ "static inline size_t N ## _reserved_len(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_string_len(B); }\\\n"
+ "static inline char *N ## _extend(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_extend_string(B, len); }\\\n"
+ "static inline char *N ## _edit(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_string_edit(B); }\\\n"
+ "static inline int N ## _truncate(NS ## builder_t *B, size_t len)\\\n"
+ "{ return flatcc_builder_truncate_string(B, len); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_string(NS)\\\n"
+ "typedef NS ## ref_t NS ## string_ref_t;\\\n"
+ "static inline int NS ## string_start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_string(B); }\\\n"
+ "static inline NS ## string_ref_t NS ## string_end(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_end_string(B); }\\\n"
+ "static inline NS ## ref_t NS ## string_create(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return flatcc_builder_create_string(B, s, len); }\\\n"
+ "static inline NS ## ref_t NS ## string_create_str(NS ## builder_t *B, const char *s)\\\n"
+ "{ return flatcc_builder_create_string_str(B, s); }\\\n"
+ "static inline NS ## ref_t NS ## string_create_strn(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return flatcc_builder_create_string_strn(B, s, len); }\\\n"
+ "static inline NS ## string_ref_t NS ## string_clone(NS ## builder_t *B, NS ## string_t string)\\\n"
+ "{ __%smemoize(B, string, flatcc_builder_create_string(B, string, NS ## string_len(string))); }\\\n"
+ "static inline NS ## string_ref_t NS ## string_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\\\n"
+ "{ size_t n = NS ## string_len(string); if (index >= n) index = n; n -= index; if (len > n) len = n;\\\n"
+ " return flatcc_builder_create_string(B, string + index, len); }\\\n"
+ "__%sbuild_string_ops(NS, NS ## string)\\\n"
+ "__%sbuild_offset_vector(NS, NS ## string)\n"
+ "\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%scopy_from_pe(P, P2, N) (*(P) = N ## _read_from_pe(P2), (P))\n"
+ "#define __%sfrom_pe(P, N) (*(P) = N ## _read_from_pe(P), (P))\n"
+ "#define __%scopy_to_pe(P, P2, N) (N ## _write_to_pe((P), *(P2)), (P))\n"
+ "#define __%sto_pe(P, N) (N ## _write_to_pe((P), *(P)), (P))\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_fixed_array_primitives(NS, N, T)\\\n"
+ "static inline T *N ## _array_copy(T *p, const T *p2, size_t n)\\\n"
+ "{ memcpy(p, p2, n * sizeof(T)); return p; }\\\n"
+ "static inline T *N ## _array_copy_from_pe(T *p, const T *p2, size_t n)\\\n"
+ "{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\\\n"
+ " for (i = 0; i < n; ++i) N ## _copy_from_pe(&p[i], &p2[i]); return p; }\\\n"
+ "static inline T *N ## _array_copy_to_pe(T *p, const T *p2, size_t n)\\\n"
+ "{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\\\n"
+ " for (i = 0; i < n; ++i) N ## _copy_to_pe(&p[i], &p2[i]); return p; }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_primitives(NS, N, T)\\\n"
+ "static inline T *N ## _from_pe(T *p) { return __ ## NS ## from_pe(p, N); }\\\n"
+ "static inline T *N ## _to_pe(T *p) { return __ ## NS ## to_pe(p, N); }\\\n"
+ "static inline T *N ## _copy(T *p, const T *p2) { *p = *p2; return p; }\\\n"
+ "static inline T *N ## _copy_from_pe(T *p, const T *p2)\\\n"
+ "{ return __ ## NS ## copy_from_pe(p, p2, N); }\\\n"
+ "static inline T *N ## _copy_to_pe(T *p, const T *p2) \\\n"
+ "{ return __ ## NS ## copy_to_pe(p, p2, N); }\\\n"
+ "static inline T *N ## _assign(T *p, const T v0) { *p = v0; return p; }\\\n"
+ "static inline T *N ## _assign_from_pe(T *p, T v0)\\\n"
+ "{ *p = N ## _read_from_pe(&v0); return p; }\\\n"
+ "static inline T *N ## _assign_to_pe(T *p, T v0)\\\n"
+ "{ N ## _write_to_pe(p, v0); return p; }\n"
+ "#define __%sbuild_scalar(NS, N, T)\\\n"
+ "__ ## NS ## define_scalar_primitives(NS, N, T)\\\n"
+ "__ ## NS ## define_fixed_array_primitives(NS, N, T)\\\n"
+ "__ ## NS ## build_vector(NS, N, T, sizeof(T), sizeof(T))\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "/* Depends on generated copy_to/from_pe functions, and the type. */\n"
+ "#define __%sdefine_struct_primitives(NS, N)\\\n"
+ "static inline N ## _t *N ##_to_pe(N ## _t *p)\\\n"
+ "{ if (!NS ## is_native_pe()) { N ## _copy_to_pe(p, p); }; return p; }\\\n"
+ "static inline N ## _t *N ##_from_pe(N ## _t *p)\\\n"
+ "{ if (!NS ## is_native_pe()) { N ## _copy_from_pe(p, p); }; return p; }\\\n"
+ "static inline N ## _t *N ## _clear(N ## _t *p) { return (N ## _t *)memset(p, 0, N ## __size()); }\n"
+ "\n"
+
+ /*
+ * NOTE: structs can both be inline and independent blocks. They
+ * are independent as buffer roots, and also as union members.
+ * _clone applied to a struct type name creates a reference to
+ * an independent block, but this is ambigous. Structs also
+ * support _copy which is the inline equivalent of _clone for
+ * inline. There is also the distinction between _clone applied
+ * to a field name, clone applied to a type name, and _clone
+ * applied to a _vec_push operation. For field names and push
+ * operations, _clone is unambigiously inline and similar to
+ * _copy. So the ambigiouty is when applying _clone to a type
+ * name where _copy and _clone are different. Unions can safely
+ * implement clone on structs members via _clone because union
+ * members are indendendent blocks whereas push_clone must be
+ * implemented with _copy because structs are inline in
+ * (non-union) vectors. Structs in union-vectors are independent
+ * but these simply the unions clone operation (which is a
+ * generated function).
+ */
+ "/* Depends on generated copy/assign_to/from_pe functions, and the type. */\n"
+ "#define __%sbuild_struct(NS, N, S, A, FID, TFID)\\\n"
+ "__ ## NS ## define_struct_primitives(NS, N)\\\n"
+ "typedef NS ## ref_t N ## _ref_t;\\\n"
+ "static inline N ## _t *N ## _start(NS ## builder_t *B)\\\n"
+ "{ return (N ## _t *)flatcc_builder_start_struct(B, S, A); }\\\n"
+ "static inline N ## _ref_t N ## _end(NS ## builder_t *B)\\\n"
+ "{ if (!NS ## is_native_pe()) { N ## _to_pe((N ## _t *)flatcc_builder_struct_edit(B)); }\\\n"
+ " return flatcc_builder_end_struct(B); }\\\n"
+ "static inline N ## _ref_t N ## _end_pe(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_end_struct(B); }\\\n"
+ "static inline N ## _ref_t N ## _create(NS ## builder_t *B __ ## N ## _formal_args)\\\n"
+ "{ N ## _t *_p = N ## _start(B); if (!_p) return 0; N ##_assign_to_pe(_p __ ## N ## _call_args);\\\n"
+ " return N ## _end_pe(B); }\\\n"
+ "static inline N ## _ref_t N ## _clone(NS ## builder_t *B, N ## _struct_t p)\\\n"
+ "{ N ## _t *_p; __%smemoize_begin(B, p); _p = N ## _start(B); if (!_p) return 0;\\\n"
+ " N ## _copy(_p, p); __%smemoize_end(B, p, N ##_end_pe(B)); }\\\n"
+ "__%sbuild_vector(NS, N, N ## _t, S, A)\\\n"
+ "__%sbuild_struct_root(NS, N, A, FID, TFID)\\\n"
+ "\n",
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sstruct_clear_field(p) memset((p), 0, sizeof(*(p)))\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_table(NS, N, K)\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_table(B, K); }\\\n"
+ "static inline N ## _ref_t N ## _end(NS ## builder_t *B)\\\n"
+ "{ FLATCC_ASSERT(flatcc_builder_check_required(B, __ ## N ## _required,\\\n"
+ " sizeof(__ ## N ## _required) / sizeof(__ ## N ## _required[0]) - 1));\\\n"
+ " return flatcc_builder_end_table(B); }\\\n"
+ "__%sbuild_offset_vector(NS, N)\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_table_field(ID, NS, N, TN, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, TN ## _ref_t ref)\\\n"
+ "{ TN ## _ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ?\\\n"
+ " ((*_p = ref), 0) : -1; }\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return TN ## _start(B); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, TN ## _end(B)); }\\\n"
+ "static inline TN ## _ref_t N ## _create(NS ## builder_t *B __ ## TN ##_formal_args)\\\n"
+ "{ return N ## _add(B, TN ## _create(B __ ## TN ## _call_args)); }\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _table_t p)\\\n"
+ "{ return N ## _add(B, TN ## _clone(B, p)); }\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _table_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_field(ID, NS, N, TN, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, TN ## _union_ref_t uref)\\\n"
+ "{ NS ## ref_t *_p; TN ## _union_type_t *_pt; if (uref.type == TN ## _NONE) return 0; if (uref.value == 0) return -1;\\\n"
+ " if (!(_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1, sizeof(*_pt), sizeof(*_pt)))) return -1;\\\n"
+ " *_pt = uref.type; if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uref.value; return 0; }\\\n"
+ "static inline int N ## _add_type(NS ## builder_t *B, TN ## _union_type_t type)\\\n"
+ "{ TN ## _union_type_t *_pt; if (type == TN ## _NONE) return 0; return (_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1,\\\n"
+ " sizeof(*_pt), sizeof(*_pt))) ? ((*_pt = type), 0) : -1; }\\\n"
+ "static inline int N ## _add_value(NS ## builder_t *B, TN ## _union_ref_t uref)\\\n"
+ "{ NS ## ref_t *p; if (uref.type == TN ## _NONE) return 0; return (p = flatcc_builder_table_add_offset(B, ID)) ?\\\n"
+ " ((*p = uref.value), 0) : -1; }\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _union_t p)\\\n"
+ "{ return N ## _add(B, TN ## _clone(B, p)); }\\\n"
+ /* `_pick` is not supported on specific union members because the source dictates the type. */
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _union_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "/* M is the union value name and T is its type, i.e. the qualified name. */\n"
+ "#define __%sbuild_union_table_value_field(NS, N, NU, M, T)\\\n"
+ "static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\\\n"
+ "{ return N ## _add(B, NU ## _as_ ## M (ref)); }\\\n"
+ "static inline int N ## _ ## M ## _start(NS ## builder_t *B)\\\n"
+ "{ return T ## _start(B); }\\\n"
+ "static inline int N ## _ ## M ## _end(NS ## builder_t *B)\\\n"
+ "{ T ## _ref_t ref = T ## _end(B);\\\n"
+ " return ref ? N ## _ ## M ## _add(B, ref) : -1; }\\\n"
+ "static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\\\n"
+ "{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\\\n"
+ " return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\\\n"
+ "static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _table_t t)\\\n"
+ "{ T ## _ref_t ref = T ## _clone(B, t);\\\n"
+ " return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "/* M is the union value name and T is its type, i.e. the qualified name. */\n"
+ "#define __%sbuild_union_struct_value_field(NS, N, NU, M, T)\\\n"
+ "static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\\\n"
+ "{ return N ## _add(B, NU ## _as_ ## M (ref)); }\\\n"
+ "static inline T ## _t *N ## _ ## M ## _start(NS ## builder_t *B)\\\n"
+ "{ return T ## _start(B); }\\\n"
+ "static inline int N ## _ ## M ## _end(NS ## builder_t *B)\\\n"
+ "{ T ## _ref_t ref = T ## _end(B);\\\n"
+ " return ref ? N ## _ ## M ## _add(B, ref) : -1; }\\\n"
+ "static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\\\n"
+ "{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\\\n"
+ " return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\\\n"
+ "static inline int N ## _ ## M ## _end_pe(NS ## builder_t *B)\\\n"
+ "{ T ## _ref_t ref = T ## _end_pe(B);\\\n"
+ " return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\\\n"
+ "static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _struct_t p)\\\n"
+ "{ T ## _ref_t ref = T ## _clone(B, p);\\\n"
+ " return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_string_value_field(NS, N, NU, M)\\\n"
+ "static inline int N ## _ ## M ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\\\n"
+ "{ return N ## _add(B, NU ## _as_ ## M (ref)); }\\\n"
+ "__%sbuild_string_field_ops(NS, N ## _ ## M)\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type\n"
+ " * S: sizeof of scalar type, A: alignment of type T, default value V of type T. */\n"
+ "#define __%sbuild_scalar_field(ID, NS, N, TN, T, S, A, V, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, const T v)\\\n"
+ "{ T *_p; if (v == V) return 0; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\\\n"
+ " TN ## _assign_to_pe(_p, v); return 0; }\\\n"
+ "static inline int N ## _force_add(NS ## builder_t *B, const T v)\\\n"
+ "{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\\\n"
+ " TN ## _assign_to_pe(_p, v); return 0; }\\\n"
+ "/* Clone does not skip default values and expects pe endian content. */\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, const T *p)\\\n"
+ "{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\\\n"
+ "/* Transferring a missing field is a nop success with 0 as result. */\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type\n"
+ " * S: sizeof of scalar type, A: alignment of type T. */\n"
+ "#define __%sbuild_scalar_optional_field(ID, NS, N, TN, T, S, A, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, const T v)\\\n"
+ "{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\\\n"
+ " TN ## _assign_to_pe(_p, v); return 0; }\\\n"
+ "/* Clone does not skip default values and expects pe endian content. */\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, const T *p)\\\n"
+ "{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\\\n"
+ "/* Transferring a missing field is a nop success with 0 as result. */\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_struct_field(ID, NS, N, TN, S, A, TT)\\\n"
+ "static inline TN ## _t *N ## _start(NS ## builder_t *B)\\\n"
+ "{ return (TN ## _t *)flatcc_builder_table_add(B, ID, S, A); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ if (!NS ## is_native_pe()) { TN ## _to_pe((TN ## _t *)flatcc_builder_table_edit(B, S)); } return 0; }\\\n"
+ "static inline int N ## _end_pe(NS ## builder_t *B) { return 0; }\\\n"
+ "static inline int N ## _create(NS ## builder_t *B __ ## TN ## _formal_args)\\\n"
+ "{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_assign_to_pe(_p __ ## TN ## _call_args);\\\n"
+ " return 0; }\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, const TN ## _t *p)\\\n"
+ "{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_copy_to_pe(_p, p); return 0; }\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _struct_t p)\\\n"
+ "{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _struct_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc);
+
+ /* This goes for scalar, struct, and enum vectors. */
+ fprintf(out->fp,
+ "#define __%sbuild_vector_field(ID, NS, N, TN, T, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\\\n"
+ "{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return TN ## _vec_start(B); }\\\n"
+ "static inline int N ## _end_pe(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, TN ## _vec_end_pe(B)); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, TN ## _vec_end(B)); }\\\n"
+ "static inline int N ## _create_pe(NS ## builder_t *B, const T *data, size_t len)\\\n"
+ "{ return N ## _add(B, TN ## _vec_create_pe(B, data, len)); }\\\n"
+ "static inline int N ## _create(NS ## builder_t *B, const T *data, size_t len)\\\n"
+ "{ return N ## _add(B, TN ## _vec_create(B, data, len)); }\\\n"
+ "static inline int N ## _slice(NS ## builder_t *B, TN ## _vec_t vec, size_t index, size_t len)\\\n"
+ "{ return N ## _add(B, TN ## _vec_slice(B, vec, index, len)); }\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\\\n"
+ "{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\\\n"
+ "__%sbuild_vector_ops(NS, N, N, TN, T)\\\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_offset_vector_field(ID, NS, N, TN, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\\\n"
+ "{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_offset_vector(B); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, flatcc_builder_end_offset_vector(B)); }\\\n"
+ "static inline int N ## _create(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_offset_vector(B, data, len)); }\\\n"
+ "__%sbuild_offset_vector_ops(NS, N, N, TN)\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\\\n"
+ "{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "/* depends on N ## _add which differs for union member fields and ordinary fields */\\\n"
+ "#define __%sbuild_string_field_ops(NS, N)\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_string(B); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, flatcc_builder_end_string(B)); }\\\n"
+ "static inline int N ## _create(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_string(B, s, len)); }\\\n"
+ "static inline int N ## _create_str(NS ## builder_t *B, const char *s)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_string_str(B, s)); }\\\n"
+ "static inline int N ## _create_strn(NS ## builder_t *B, const char *s, size_t max_len)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_string_strn(B, s, max_len)); }\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, NS ## string_t string)\\\n"
+ "{ return N ## _add(B, NS ## string_clone(B, string)); }\\\n"
+ "static inline int N ## _slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\\\n"
+ "{ return N ## _add(B, NS ## string_slice(B, string, index, len)); }\\\n"
+ "__%sbuild_string_ops(NS, N)\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_string_field(ID, NS, N, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\\\n"
+ "{ NS ## string_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\\\n"
+ "__%sbuild_string_field_ops(NS, N)\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ NS ## string_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_table_vector_field(ID, NS, N, TN, TT)\\\n"
+ "__%sbuild_offset_vector_field(ID, NS, N, TN, TT)\\\n"
+ "__%sbuild_table_vector_ops(NS, N, TN)\n"
+ "\n",
+ nsc, nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_vector_field(ID, NS, N, TN, TT)\\\n"
+ "static inline int N ## _add(NS ## builder_t *B, TN ## _union_vec_ref_t uvref)\\\n"
+ "{ NS ## vec_ref_t *_p; if (!uvref.type || !uvref.value) return uvref.type == uvref.value ? 0 : -1;\\\n"
+ " if (!(_p = flatcc_builder_table_add_offset(B, ID - 1))) return -1; *_p = uvref.type;\\\n"
+ " if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uvref.value; return 0; }\\\n"
+ "static inline int N ## _start(NS ## builder_t *B)\\\n"
+ "{ return flatcc_builder_start_union_vector(B); }\\\n"
+ "static inline int N ## _end(NS ## builder_t *B)\\\n"
+ "{ return N ## _add(B, flatcc_builder_end_union_vector(B)); }\\\n"
+ "static inline int N ## _create(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\\\n"
+ "{ return N ## _add(B, flatcc_builder_create_union_vector(B, data, len)); }\\\n"
+ "__%sbuild_union_vector_ops(NS, N, N, TN)\\\n"
+ "static inline int N ## _clone(NS ## builder_t *B, TN ## _union_vec_t vec)\\\n"
+ "{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\\\n"
+ "static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\\\n"
+ "{ TN ## _union_vec_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }\n"
+ "\n",
+ nsc, nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_table_vector_value_field(NS, N, NU, M, T)\\\n"
+ "static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\\\n"
+ "{ return T ## _start(B); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _table_t t)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, t))); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_struct_vector_value_field(NS, N, NU, M, T)\\\n"
+ "static inline T ## _t *N ## _ ## M ## _push_start(NS ## builder_t *B)\\\n"
+ "{ return T ## _start(B); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _struct_t p)\\\n"
+ /* Here we create an independent struct block, so T ## _clone is appropriate as opposed to T ## _copy. */
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, p))); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_union_string_vector_value_field(NS, N, NU, M)\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, NS ## string_ref_t ref)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\\\n"
+ "static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\\\n"
+ "{ return NS ## string_start(B); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_end(B))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B, const char *s, size_t len)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create(B, s, len))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_str(NS ## builder_t *B, const char *s)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_str(B, s))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_strn(B, s, max_len))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, NS ## string_t string)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_clone(B, string))); }\\\n"
+ "static inline NU ## _union_ref_t *N ## _ ## M ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\\\n"
+ "{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_slice(B, string, index, len))); }\n"
+ "\n",
+ nsc);
+
+ fprintf(out->fp,
+ "#define __%sbuild_string_vector_field(ID, NS, N, TT)\\\n"
+ "__%sbuild_offset_vector_field(ID, NS, N, NS ## string, TT)\\\n"
+ "__%sbuild_string_vector_ops(NS, N)\n"
+ "\n",
+ nsc, nsc, nsc);
+
+ fprintf(out->fp, "#define __%schar_formal_args , char v0\n", nsc);
+ fprintf(out->fp, "#define __%schar_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%suint8_formal_args , uint8_t v0\n", nsc);
+ fprintf(out->fp, "#define __%suint8_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sint8_formal_args , int8_t v0\n", nsc);
+ fprintf(out->fp, "#define __%sint8_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sbool_formal_args , %sbool_t v0\n", nsc, nsc);
+ fprintf(out->fp, "#define __%sbool_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%suint16_formal_args , uint16_t v0\n", nsc);
+ fprintf(out->fp, "#define __%suint16_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%suint32_formal_args , uint32_t v0\n", nsc);
+ fprintf(out->fp, "#define __%suint32_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%suint64_formal_args , uint64_t v0\n", nsc);
+ fprintf(out->fp, "#define __%suint64_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sint16_formal_args , int16_t v0\n", nsc);
+ fprintf(out->fp, "#define __%sint16_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sint32_formal_args , int32_t v0\n", nsc);
+ fprintf(out->fp, "#define __%sint32_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sint64_formal_args , int64_t v0\n", nsc);
+ fprintf(out->fp, "#define __%sint64_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sfloat_formal_args , float v0\n", nsc);
+ fprintf(out->fp, "#define __%sfloat_call_args , v0\n", nsc);
+ fprintf(out->fp, "#define __%sdouble_formal_args , double v0\n", nsc);
+ fprintf(out->fp, "#define __%sdouble_call_args , v0\n", nsc);
+ fprintf(out->fp, "\n");
+ fprintf(out->fp, "__%sbuild_scalar(%s, %schar, char)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %suint8, uint8_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sint8, int8_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sbool, %sbool_t)\n", nsc, nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %suint16, uint16_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %suint32, uint32_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %suint64, uint64_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sint16, int16_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sint32, int32_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sint64, int64_t)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sfloat, float)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %sdouble, double)\n", nsc, nsc, nsc);
+ fprintf(out->fp, "\n");
+ fprintf(out->fp, "__%sbuild_string(%s)\n", nsc, nsc);
+ fprintf(out->fp, "\n");
+
+ fprintf(out->fp, "__%sbuild_buffer(%s)\n", nsc, nsc);
+ gen_epilogue(out);
+ fprintf(out->fp, "#endif /* %s_COMMON_BUILDER_H */\n", nscup);
+ return 0;
+}
+
+static int gen_builder_pretext(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+ const char *nscup = out->nscup;
+
+ fprintf(out->fp,
+ "#ifndef %s_BUILDER_H\n"
+ "#define %s_BUILDER_H\n",
+ out->S->basenameup, out->S->basenameup);
+
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ fprintf(out->fp, "#ifndef %s_READER_H\n", out->S->basenameup);
+ fprintf(out->fp, "#include \"%s_reader.h\"\n", out->S->basename);
+ fprintf(out->fp, "#endif\n");
+ fprintf(out->fp, "#ifndef %s_COMMON_BUILDER_H\n", nscup);
+ fprintf(out->fp, "#include \"%scommon_builder.h\"\n", nsc);
+ fprintf(out->fp, "#endif\n");
+
+ fb_gen_c_includes(out, "_builder.h", "_BUILDER_H");
+
+ gen_prologue(out);
+
+ /*
+ * Even if defined in the reader header, we must redefine it here
+ * because another file might sneak in and update.
+ */
+ if (out->S->file_identifier.type == vt_string) {
+ fprintf(out->fp,
+ "#undef %sidentifier\n"
+ "#define %sidentifier \"%.*s\"\n",
+ nsc,
+ nsc, out->S->file_identifier.s.len, out->S->file_identifier.s.s);
+ } else {
+ fprintf(out->fp,
+ "#ifndef %sidentifier\n"
+ "#define %sidentifier 0\n"
+ "#endif\n",
+ nsc, nsc);
+ }
+ if (out->S->file_extension.type == vt_string) {
+ fprintf(out->fp,
+ "#undef %sextension\n"
+ "#define %sextension \"%.*s\"\n",
+ nsc,
+ nsc, out->S->file_extension.s.len, out->S->file_extension.s.s);
+ } else {
+ fprintf(out->fp,
+ "#ifndef %sextension\n"
+ "#define %sextension \"%s\"\n"
+ "#endif\n",
+ nsc, nsc, out->opts->default_bin_ext);
+ }
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int get_total_struct_field_count(fb_compound_type_t *ct)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ int count = 0;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ switch (member->type.type) {
+ /* struct arrays count as 1 but struct fields are expanded */
+ case vt_compound_type_ref:
+ if (member->type.ct->symbol.kind == fb_is_struct) {
+ count += get_total_struct_field_count(member->type.ct);
+ continue;
+ }
+ ++count;
+ break;
+ default:
+ ++count;
+ break;
+ }
+ }
+ return count;
+}
+
+static inline void gen_comma(fb_output_t *out, int index, int count, int is_macro)
+{
+ char *cont = is_macro ? "\\\n" : "\n";
+
+ if (count == 0) {
+ return;
+ }
+ if (index == 0) {
+ if (count > 4) {
+ fprintf(out->fp, ",%s ", cont);
+ } else {
+ fprintf(out->fp, ", ");
+ }
+ } else {
+ if (index % 4 || count - index <= 2) {
+ fprintf(out->fp, ", ");
+ } else {
+ fprintf(out->fp, ",%s ", cont);
+ }
+ }
+}
+
+static int gen_builder_struct_args(fb_output_t *out, fb_compound_type_t *ct, int index, int len, int is_macro)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *tname, *tname_ns;
+ fb_scoped_name_t snref;
+
+ fb_clear(snref);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ switch (member->type.type) {
+ case vt_fixed_array_compound_type_ref:
+ gen_comma(out, index, len, is_macro);
+ fb_compound_name(member->type.ct, &snref);
+ if (member->type.ct->symbol.kind == fb_is_struct) {
+ fprintf(out->fp, "const %s_t v%i[%i]", snref.text, index++, (int)member->type.len);
+ } else {
+ fprintf(out->fp, "%s_enum_t v%i[%i]", snref.text, index++, (int)member->type.len);
+ }
+ break;
+ case vt_compound_type_ref:
+ if (member->type.ct->symbol.kind == fb_is_struct) {
+ index = gen_builder_struct_args(out, member->type.ct, index, len, is_macro);
+ continue;
+ }
+ gen_comma(out, index, len, is_macro);
+ fb_compound_name(member->type.ct, &snref);
+ fprintf(out->fp, "%s_enum_t v%i", snref.text, index++);
+ break;
+ case vt_fixed_array_type:
+ gen_comma(out, index, len, is_macro);
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ fprintf(out->fp, "const %s%s v%i[%i]", tname_ns, tname, index++, (int)member->type.len);
+ break;
+ case vt_scalar_type:
+ gen_comma(out, index, len, is_macro);
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ fprintf(out->fp, "%s%s v%i", tname_ns, tname, index++);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected struct member type");
+ continue;
+ }
+ }
+ return index;
+}
+
+static int gen_builder_struct_call_list(fb_output_t *out, fb_compound_type_t *ct, int index, int arg_count, int is_macro)
+{
+ int i;
+ int len = get_total_struct_field_count(ct);
+
+ for (i = 0; i < len; ++i) {
+ gen_comma(out, i, arg_count, is_macro);
+ fprintf(out->fp, "v%i", index++);
+ }
+ return index;
+}
+
+enum { no_conversion, convert_from_pe, convert_to_pe };
+
+/* Note: returned index is not correct when using from_ptr since it doesn't track arguments, but it shouldn't matter. */
+static int gen_builder_struct_field_assign(fb_output_t *out, fb_compound_type_t *ct, int index, int arg_count,
+ int conversion, int from_ptr)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ int n, len;
+ const char *s;
+ int deprecated_index = 0;
+ const char *kind, *tprefix;
+ fb_scoped_name_t snref;
+
+ fb_clear(snref);
+ switch (conversion) {
+ case convert_to_pe: kind = "_to_pe"; break;
+ case convert_from_pe: kind = "_from_pe"; break;
+ default: kind = ""; break;
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+
+ if (index > 0) {
+ if (index % 4 == 0) {
+ fprintf(out->fp, ";\n ");
+ } else {
+ fprintf(out->fp, "; ");
+ }
+ }
+ switch (member->type.type) {
+ case vt_fixed_array_compound_type_ref:
+ len = (int)member->type.len;
+ fb_compound_name(member->type.ct, &snref);
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "__%sstruct_clear_field(p->__deprecated%i)",
+ nsc, deprecated_index);
+ ++deprecated_index;
+ ++index;
+ continue;
+ }
+ if (from_ptr) {
+ fprintf(out->fp, "%s_array_copy%s(p->%.*s, p2->%.*s, %d)",
+ snref.text, kind, n, s, n, s, len);
+ } else {
+ fprintf(out->fp, "%s_array_copy%s(p->%.*s, v%i, %d)",
+ snref.text, kind, n, s, index, len);
+ }
+ ++index;
+ continue;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ if (member->type.ct->symbol.kind == fb_is_struct) {
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "__%sstruct_clear_field(p->__deprecated%i)",
+ nsc, deprecated_index);
+ deprecated_index++;
+ index += get_total_struct_field_count(member->type.ct);
+ continue;
+ }
+ if (from_ptr) {
+ fprintf(out->fp, "%s_copy%s(&p->%.*s, &p2->%.*s)", snref.text, kind, n, s, n, s);
+ /* `index` does not count children, but it doesn't matter here. */
+ ++index;
+ } else {
+ fprintf(out->fp, "%s_assign%s(&p->%.*s", snref.text, kind, n, s);
+ index = gen_builder_struct_call_list(out, member->type.ct, index, arg_count, 0);
+ fprintf(out->fp, ")");
+ }
+ continue;
+ }
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "__%sstruct_clear_field(p->__deprecated%i)",
+ nsc, deprecated_index);
+ ++deprecated_index;
+ ++index;
+ continue;
+ }
+ switch (member->size == 1 ? no_conversion : conversion) {
+ case convert_from_pe:
+ if (from_ptr) {
+ fprintf(out->fp, "%s_copy_from_pe(&p->%.*s, &p2->%.*s)",
+ snref.text, n, s, n, s);
+ } else {
+ fprintf(out->fp, "%s_assign_from_pe(&p->%.*s, v%i)",
+ snref.text, n, s, index);
+ }
+ break;
+ case convert_to_pe:
+ if (from_ptr) {
+ fprintf(out->fp, "%s_copy_to_pe(&p->%.*s, &p2->%.*s)",
+ snref.text, n, s, n, s);
+ } else {
+ fprintf(out->fp, "%s_assign_to_pe(&p->%.*s, v%i)",
+ snref.text, n, s, index);
+ }
+ break;
+ default:
+ if (from_ptr) {
+ fprintf(out->fp, "p->%.*s = p2->%.*s", n, s, n, s);
+ } else {
+ fprintf(out->fp, "p->%.*s = v%i", n, s, index);
+ }
+ break;
+ }
+ ++index;
+ continue;
+ case vt_fixed_array_type:
+ tprefix = scalar_type_prefix(member->type.st);
+ len = (int)member->type.len;
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "__%sstruct_clear_field(p->__deprecated%i)",
+ nsc, deprecated_index);
+ ++deprecated_index;
+ ++index;
+ continue;
+ }
+ if (from_ptr) {
+ fprintf(out->fp, "%s%s_array_copy%s(p->%.*s, p2->%.*s, %d)",
+ nsc, tprefix, kind, n, s, n, s, len);
+ } else {
+ fprintf(out->fp, "%s%s_array_copy%s(p->%.*s, v%i, %d)",
+ nsc, tprefix, kind, n, s, index, len);
+ }
+ ++index;
+ break;
+ case vt_scalar_type:
+ tprefix = scalar_type_prefix(member->type.st);
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "__%sstruct_clear_field(p->__deprecated%i)",
+ nsc, deprecated_index);
+ ++deprecated_index;
+ ++index;
+ continue;
+ }
+ switch (member->size == 1 ? no_conversion : conversion) {
+ case convert_from_pe:
+ if (from_ptr) {
+ fprintf(out->fp, "%s%s_copy_from_pe(&p->%.*s, &p2->%.*s)",
+ nsc, tprefix, n, s, n, s);
+ } else {
+ fprintf(out->fp, "%s%s_assign_from_pe(&p->%.*s, v%i)",
+ nsc, tprefix, n, s, index);
+ }
+ break;
+ case convert_to_pe:
+ if (from_ptr) {
+ fprintf(out->fp, "%s%s_copy_to_pe(&p->%.*s, &p2->%.*s)",
+ nsc, tprefix, n, s, n, s);
+ } else {
+ fprintf(out->fp, "%s%s_assign_to_pe(&p->%.*s, v%i)",
+ nsc, tprefix, n, s, index);
+ }
+ break;
+ default:
+ if (from_ptr) {
+ fprintf(out->fp, "p->%.*s = p2->%.*s", n, s, n, s);
+ } else {
+ fprintf(out->fp, "p->%.*s = v%i", n, s, index);
+ }
+ break;
+ }
+ ++index;
+ break;
+ default:
+ gen_panic(out, "internal error: type error");
+ continue;
+ }
+ }
+ if (arg_count > 0) {
+ fprintf(out->fp, ";\n ");
+ }
+ return index;
+}
+
+static void gen_builder_struct(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ int arg_count;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ assert(ct->symbol.kind == fb_is_struct);
+
+ fb_compound_name(ct, &snt);
+
+ arg_count = get_total_struct_field_count(ct);
+ fprintf(out->fp, "#define __%s_formal_args ", snt.text);
+ gen_builder_struct_args(out, ct, 0, arg_count, 1);
+ fprintf(out->fp, "\n#define __%s_call_args ", snt.text);
+ gen_builder_struct_call_list(out, ct, 0, arg_count, 1);
+ fprintf(out->fp, "\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_assign(%s_t *p",
+ snt.text, snt.text, snt.text);
+ gen_builder_struct_args(out, ct, 0, arg_count, 0);
+ fprintf(out->fp, ")\n{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, no_conversion, 0);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_copy(%s_t *p, const %s_t *p2)\n",
+ snt.text, snt.text, snt.text, snt.text);
+ fprintf(out->fp, "{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, no_conversion, 1);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_assign_to_pe(%s_t *p",
+ snt.text, snt.text, snt.text);
+ gen_builder_struct_args(out, ct, 0, arg_count, 0);
+ fprintf(out->fp, ")\n{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, convert_to_pe, 0);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_copy_to_pe(%s_t *p, const %s_t *p2)\n",
+ snt.text, snt.text, snt.text, snt.text);
+ fprintf(out->fp, "{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, convert_to_pe, 1);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_assign_from_pe(%s_t *p",
+ snt.text, snt.text, snt.text);
+ gen_builder_struct_args(out, ct, 0, arg_count, 0);
+ fprintf(out->fp, ")\n{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, convert_from_pe, 0);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp,
+ "static inline %s_t *%s_copy_from_pe(%s_t *p, const %s_t *p2)\n",
+ snt.text, snt.text, snt.text, snt.text);
+ fprintf(out->fp, "{ ");
+ gen_builder_struct_field_assign(out, ct, 0, arg_count, convert_from_pe, 1);
+ fprintf(out->fp, "return p; }\n");
+ fprintf(out->fp, "__%sbuild_struct(%s, %s, %"PRIu64", %u, %s_file_identifier, %s_type_identifier)\n",
+ nsc, nsc, snt.text, (uint64_t)ct->size, ct->align, snt.text, snt.text);
+
+ if (ct->size > 0) {
+ fprintf(out->fp, "__%sdefine_fixed_array_primitives(%s, %s, %s_t)\n",
+ nsc, nsc, snt.text, snt.text);
+ }
+}
+
+static int get_create_table_arg_count(fb_compound_type_t *ct)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ int count = 0;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ ++count;
+ }
+ return count;
+}
+
+static int gen_builder_table_call_list(fb_output_t *out, fb_compound_type_t *ct, int arg_count, int is_macro)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ int index = 0;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ gen_comma(out, index, arg_count, is_macro);
+ fprintf(out->fp, "v%"PRIu64"", (uint64_t)member->id);
+ ++index;
+ }
+ return index;
+}
+
+
+static int gen_required_table_fields(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ int index;
+ int arg_count;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ arg_count = get_create_table_arg_count(ct);
+ index = 0;
+ fb_compound_name(ct, &snt);
+ fprintf(out->fp, "static const %svoffset_t __%s_required[] = {", nsc, snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ if (member->metadata_flags & fb_f_required) {
+ if (index > 0) {
+ gen_comma(out, index, arg_count, 0);
+ } else {
+ fprintf(out->fp, " ");
+ }
+ fprintf(out->fp, "%u", (unsigned)member->id);
+ index++;
+ }
+ }
+ /* Add extra element to avoid null arrays. */
+ if (index > 0) {
+ fprintf(out->fp, ", 0 };\n");
+ } else {
+ fprintf(out->fp, " 0 };\n");
+ }
+ return index;
+}
+
+static int gen_builder_table_args(fb_output_t *out, fb_compound_type_t *ct, int arg_count, int is_macro)
+{
+ const char *nsc = out->nsc;
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ const char *tname, *tname_ns;
+ int index;
+ fb_scoped_name_t snref;
+
+ fb_clear(snref);
+ /* Just to help the comma. */
+ index = 0;
+ /* We use the id to name arguments so sorted assignment can find the arguments trivially. */
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ gen_comma(out, index++, arg_count, is_macro);
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ fprintf(out->fp, "%s_t *v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ case fb_is_enum:
+ fprintf(out->fp, "%s_enum_t v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ case fb_is_table:
+ fprintf(out->fp, "%s_ref_t v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ case fb_is_union:
+ /* Unions jump an index because it is two fields. */
+ fprintf(out->fp, "%s_union_ref_t v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table field type");
+ continue;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ case fb_is_enum:
+ case fb_is_table:
+ fprintf(out->fp, "%s_vec_ref_t v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ case fb_is_union:
+ fprintf(out->fp, "%s_union_vec_ref_t v%"PRIu64"", snref.text, (uint64_t)member->id);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table table type");
+ continue;
+ }
+ break;
+ case vt_scalar_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ fprintf(out->fp, "%s%s v%"PRIu64"", tname_ns, tname, (uint64_t)member->id);
+ break;
+ case vt_vector_type:
+ tname = scalar_type_prefix(member->type.st);
+ fprintf(out->fp, "%s%s_vec_ref_t v%"PRIu64"", nsc, tname, (uint64_t)member->id);
+ break;
+ case vt_string_type:
+ fprintf(out->fp, "%sstring_ref_t v%"PRIu64"", nsc, (uint64_t)member->id);
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp, "%sstring_vec_ref_t v%"PRIu64"", nsc, (uint64_t)member->id);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table member type");
+ continue;
+ }
+ }
+ return index;
+}
+
+static int gen_builder_create_table_decl(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ int arg_count;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ arg_count = get_create_table_arg_count(ct);
+ fprintf(out->fp, "#define __%s_formal_args ", snt.text);
+ gen_builder_table_args(out, ct, arg_count, 1);
+ fprintf(out->fp, "\n#define __%s_call_args ", snt.text);
+ gen_builder_table_call_list(out, ct, arg_count, 1);
+ fprintf(out->fp, "\n");
+
+ /* `_clone` fw decl must be place before build_table macro and `_create` must be placed after. */
+ fprintf(out->fp,
+ "static inline %s_ref_t %s_create(%sbuilder_t *B __%s_formal_args);\n",
+ snt.text, snt.text, nsc, snt.text);
+ return 0;
+}
+
+static int gen_builder_create_table(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ int n;
+ const char *s;
+ int patch_union = !(ct->metadata_flags & fb_f_original_order);
+ int has_union = 0;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static inline %s_ref_t %s_create(%sbuilder_t *B __%s_formal_args)\n",
+ snt.text, snt.text, nsc, snt.text);
+
+ fprintf(out->fp, "{\n if (%s_start(B)", snt.text);
+ for (member = ct->ordered_members; member; member = member->order) {
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ symbol_name(&member->symbol, &n, &s);
+ if (member->type.type == vt_compound_type_ref && member->type.ct->symbol.kind == fb_is_union) {
+ has_union = 1;
+ if (patch_union) {
+ fprintf(out->fp, "\n || %s_%.*s_add_value(B, v%"PRIu64")", snt.text, n, s, (uint64_t)member->id);
+ continue;
+ }
+ }
+ fprintf(out->fp, "\n || %s_%.*s_add(B, v%"PRIu64")", snt.text, n, s, (uint64_t)member->id);
+ }
+ if (patch_union && has_union) {
+ for (member = ct->ordered_members; member; member = member->order) {
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ if (member->type.type == vt_compound_type_ref && member->type.ct->symbol.kind == fb_is_union) {
+ symbol_name(&member->symbol, &n, &s);
+ fprintf(out->fp, "\n || %s_%.*s_add_type(B, v%"PRIu64".type)", snt.text, n, s, (uint64_t)member->id);
+ }
+ }
+ }
+ fprintf(out->fp, ") {\n return 0;\n }\n return %s_end(B);\n}\n\n", snt.text);
+ return 0;
+}
+
+static int gen_builder_structs(fb_output_t *out)
+{
+ fb_compound_type_t *ct;
+
+ /* Generate structs in topologically sorted order. */
+ for (ct = out->S->ordered_structs; ct; ct = ct->order) {
+ gen_builder_struct(out, ct);
+ fprintf(out->fp, "\n");
+ }
+ return 0;
+}
+
+static int gen_builder_table(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "typedef %sref_t %s_ref_t;\n",
+ nsc, snt.text);
+ fprintf(out->fp,
+ "static %s_ref_t %s_clone(%sbuilder_t *B, %s_table_t t);\n",
+ snt.text, snt.text, nsc, snt.text);
+ fprintf(out->fp, "__%sbuild_table(%s, %s, %"PRIu64")\n",
+ nsc, nsc, snt.text, (uint64_t)ct->count);
+ return 0;
+}
+
+static int gen_builder_table_prolog(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp, "__%sbuild_table_prolog(%s, %s, %s_file_identifier, %s_type_identifier)\n",
+ nsc, nsc, snt.text, snt.text, snt.text);
+ return 0;
+}
+
+static int gen_union_fields(fb_output_t *out, const char *st, int n, const char *s,
+ fb_compound_type_t *ct, int is_vector)
+{
+ const char *nsc = out->nsc;
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ const char *su;
+ int nu;
+ fb_scoped_name_t snref;
+ fb_scoped_name_t snu;
+ const char *kind = is_vector ? "vector_value" : "value";
+
+ fb_clear(snref);
+ fb_clear(snu);
+ fb_compound_name(ct, &snref);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &nu, &su);
+ switch (member->type.type) {
+ case vt_missing:
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snu);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ "__%sbuild_union_table_%s_field(%s, %s_%.*s, %s, %.*s, %s)\n",
+ nsc, kind, nsc, st, n, s, snref.text, nu, su, snu.text);
+ break;
+ case fb_is_struct:
+ fprintf(out->fp,
+ "__%sbuild_union_struct_%s_field(%s, %s_%.*s, %s, %.*s, %s)\n",
+ nsc, kind, nsc, st, n, s, snref.text, nu, su, snu.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected union member compound type");
+ return -1;
+ }
+ break;
+ case vt_string_type:
+ fprintf(out->fp,
+ "__%sbuild_union_string_%s_field(%s, %s_%.*s, %s, %.*s)\n",
+ nsc, kind, nsc, st, n, s, snref.text, nu, su);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected union member type");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int gen_builder_table_fields(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *s, *tprefix, *tname, *tname_ns;
+ int n;
+ int is_optional;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+ fb_literal_t literal;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(&member->symbol, &n, &s);
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "/* Skipping build of deprecated field: '%s_%.*s' */\n\n", snt.text, n, s);
+ continue;
+ }
+ is_optional = member->flags & fb_fm_optional;
+ switch (member->type.type) {
+ case vt_scalar_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ tprefix = scalar_type_prefix(member->type.st);
+ if (is_optional) {
+ fprintf(out->fp,
+ "__%sbuild_scalar_optional_field(%"PRIu64", %s, %s_%.*s, %s%s, %s%s, %"PRIu64", %u, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, nsc, tprefix, tname_ns, tname,
+ (uint64_t)member->size, member->align, snt.text);
+ } else {
+ print_literal(member->type.st, &member->value, literal);
+ fprintf(out->fp,
+ "__%sbuild_scalar_field(%"PRIu64", %s, %s_%.*s, %s%s, %s%s, %"PRIu64", %u, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, nsc, tprefix, tname_ns, tname,
+ (uint64_t)member->size, member->align, literal, snt.text);
+ }
+ break;
+ case vt_vector_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ tprefix = scalar_type_prefix(member->type.st);
+ fprintf(out->fp,
+ "__%sbuild_vector_field(%"PRIu64", %s, %s_%.*s, %s%s, %s%s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, nsc, tprefix, tname_ns, tname, snt.text);
+ /* [ubyte] vectors can nest buffers. */
+ if (member->nest) {
+ switch (member->nest->symbol.kind) {
+ case fb_is_table:
+ fb_compound_name((fb_compound_type_t *)(&member->nest->symbol), &snref);
+ fprintf(out->fp, "__%sbuild_nested_table_root(%s, %s_%.*s, %s, %s_identifier, %s_type_identifier)\n",
+ nsc, nsc, snt.text, n, s, snref.text, snref.text, snref.text);
+ break;
+ case fb_is_struct:
+ fb_compound_name((fb_compound_type_t *)(&member->nest->symbol), &snref);
+ fprintf(out->fp, "__%sbuild_nested_struct_root(%s, %s_%.*s, %s, %u, %s_identifier, %s_type_identifier)\n",
+ nsc, nsc, snt.text, n, s, snref.text,
+ (unsigned)((fb_compound_type_t *)(member->nest))->align, snref.text, snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected nested type");
+ continue;
+ }
+ }
+ break;
+ case vt_string_type:
+ fprintf(out->fp,
+ "__%sbuild_string_field(%"PRIu64", %s, %s_%.*s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snt.text);
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp,
+ "__%sbuild_string_vector_field(%"PRIu64", %s, %s_%.*s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snt.text);
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ fprintf(out->fp,
+ "__%sbuild_struct_field(%"PRIu64", %s, %s_%.*s, %s, %"PRIu64", %u, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, (uint64_t)member->size, member->align, snt.text);
+ break;
+ case fb_is_table:
+ fprintf(out->fp,
+ "__%sbuild_table_field(%"PRIu64", %s, %s_%.*s, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snt.text);
+ break;
+ case fb_is_enum:
+ if (is_optional) {
+ fprintf(out->fp,
+ "__%sbuild_scalar_optional_field(%"PRIu64", %s, %s_%.*s, %s, %s_enum_t, %"PRIu64", %u, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snref.text,
+ (uint64_t)member->size, member->align, snt.text);
+ } else {
+ print_literal(member->type.ct->type.st, &member->value, literal);
+ fprintf(out->fp,
+ "__%sbuild_scalar_field(%"PRIu64", %s, %s_%.*s, %s, %s_enum_t, %"PRIu64", %u, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snref.text,
+ (uint64_t)member->size, member->align, literal, snt.text);
+ }
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "__%sbuild_union_field(%"PRIu64", %s, %s_%.*s, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snt.text);
+ gen_union_fields(out, snt.text, n, s, member->type.ct, 0);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound type in table during code generation");
+ break;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ if (member->type.ct->symbol.flags & fb_indexed) {
+ fprintf(out->fp, "/* vector has keyed elements */\n");
+ }
+ fprintf(out->fp,
+ "__%sbuild_vector_field(%"PRIu64", %s, %s_%.*s, %s, %s_t, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snref.text, snt.text);
+ break;
+ case fb_is_table:
+ if (member->type.ct->symbol.flags & fb_indexed) {
+ fprintf(out->fp, "/* vector has keyed elements */\n");
+ }
+ fprintf(out->fp,
+ "__%sbuild_table_vector_field(%"PRIu64", %s, %s_%.*s, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snt.text);
+ break;
+ case fb_is_enum:
+ fprintf(out->fp,
+ "__%sbuild_vector_field(%"PRIu64", %s, %s_%.*s, %s, %s_enum_t, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snref.text, snt.text);
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "__%sbuild_union_vector_field(%"PRIu64", %s, %s_%.*s, %s, %s)\n",
+ nsc, (uint64_t)member->id, nsc, snt.text, n, s, snref.text, snt.text);
+ gen_union_fields(out, snt.text, n, s, member->type.ct, 1);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected vector compound type in table during code generation");
+ break;
+ }
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table member type during code generation");
+ break;
+ }
+ }
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+/*
+ * NOTE:
+ *
+ * Cloning a table might lead to a combinatorial explosion if the source
+ * has many shared references in a DAG. In many cases this might not be
+ * an issue, but it it is deduplication will be necessary. Deduplication
+ * is not specific to cloning but especially relevant here. Because
+ * deduplication carries an overhead in runtime and complexity it is not
+ * part of the core cloning operation. Cloning of unions and vectors with
+ * references have similar concerns.
+ *
+ * A deduplication operation would internally look like like this:
+ *
+ * dedup_clone_table(builder, dedup_map, src_ptr)
+ * {
+ * ref = get_cloned_ref(dedup_map, src_ptr)
+ * if (!ref) {
+ * ref = clone_table(builder, src_ptr);
+ * set_cloned_ref(dedup_map, src_ptr, ref);
+ * }
+ * return ref;
+ * }
+ *
+ * where dedup_map is a map from a pointer to a builder reference and
+ * where the dedup_map is dedicated to a single builder and may cover
+ * multiple source buffers as long as they have separate memory
+ * locations - otherwise a separate dedup map must be used for each
+ * source buffer.
+ *
+ * Note that the clone operation is not safe without a safe source
+ * buffer so clone cannot be used to make a buffer with overlapping data
+ * safe (e.g. a string and a table referencing the same memory). Even if
+ * the source passes basic verification the result might not. To make
+ * clone safe it would be necessariry to remember the type as well, for
+ * example by adding a type specifier to the dedup_map.
+ *
+ * In the following we do not implement deduplication.
+ */
+static int gen_builder_clone_table(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ const char *s;
+ int n;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ /*
+ * We could optimize this by cloning the entire table memory block
+ * and then update update only the references. The builder has
+ * direct vtable operations to support this - this would not work
+ * properly if there are deprecated fields to be stripped or if the
+ * default value has changed - and, more complicated: it is
+ * necessary to know what table alignment needs to be which require
+ * inspection of all fields, or a worst case assumption. So at least
+ * for now, we clone by picking one field at a time.
+ */
+
+ fprintf(out->fp,
+ "static %s_ref_t %s_clone(%sbuilder_t *B, %s_table_t t)\n",
+ snt.text, snt.text, nsc, snt.text);
+
+ fprintf(out->fp,
+ "{\n"
+ " __%smemoize_begin(B, t);\n"
+ " if (%s_start(B)", nsc, snt.text);
+ for (member = ct->ordered_members; member; member = member->order) {
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ symbol_name(&member->symbol, &n, &s);
+ switch (member->type.type) {
+ case vt_scalar_type:
+ case vt_vector_type: /* This includes nested buffers - they are just transferred as bytes. */
+ case vt_string_type:
+ case vt_vector_string_type:
+ fprintf(out->fp, "\n || %s_%.*s_pick(B, t)", snt.text, n, s);
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ case fb_is_enum:
+ case fb_is_union:
+ fprintf(out->fp, "\n || %s_%.*s_pick(B, t)", snt.text, n, s);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound type in table during code generation");
+ break;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ case fb_is_enum:
+ case fb_is_union:
+ fprintf(out->fp, "\n || %s_%.*s_pick(B, t)", snt.text, n, s);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected vector compound type in table during code generation");
+ break;
+ }
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table member type during code generation");
+ break;
+ }
+ }
+ fprintf(out->fp, ") {\n"
+ " return 0;\n"
+ " }\n"
+ " __%smemoize_end(B, t, %s_end(B));\n}\n", nsc, snt.text);
+ return 0;
+}
+
+static int gen_builder_enums(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+ fb_symbol_t *sym;
+ int was_here = 0;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_enum:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "#define __%s_formal_args , %s_enum_t v0\n"
+ "#define __%s_call_args , v0\n",
+ snt.text, snt.text,
+ snt.text);
+ fprintf(out->fp, "__%sbuild_scalar(%s, %s, %s_enum_t)\n",
+ nsc, nsc, snt.text, snt.text);
+ was_here = 1;
+ break;
+ default:
+ continue;
+ }
+ }
+ if (was_here) {
+ fprintf(out->fp, "\n");
+ }
+ return 0;
+}
+
+/*
+ * Scope resolution is a bit fuzzy in unions -
+ *
+ * Googles flatc compiler allows dot notation in unions but not enums.
+ * C++ generates unqualified enum members (i.e. MyGame.Example.Monster
+ * becomes Monster) in the generated enum but still refers to the
+ * specific table type in the given namespace. This makes it possible
+ * to have name conflicts, and flatc raises these like other enum
+ * conficts.
+ *
+ * We use the same approach and this is why we both look up compound
+ * name and symbol name for the same member but the code generator
+ * is not concerned with how the scope is parsed or how errors are
+ * flagged - it just expects members to be unique.
+ */
+static int gen_union(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *s;
+ int n;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ fb_compound_name((fb_compound_type_t *)member->type.ct, &snref);
+ symbol_name(sym, &n, &s);
+ fprintf(out->fp,
+ "static inline %s_union_ref_t %s_as_%.*s(%s_ref_t ref)\n"
+ "{ %s_union_ref_t uref; uref.type = %s_%.*s; uref.value = ref; return uref; }\n",
+ snt.text, snt.text, n, s, snref.text,
+ snt.text, snt.text, n, s);
+ break;
+ case vt_string_type:
+ symbol_name(sym, &n, &s);
+ fprintf(out->fp,
+ "static inline %s_union_ref_t %s_as_%.*s(%sstring_ref_t ref)\n"
+ "{ %s_union_ref_t uref; uref.type = %s_%.*s; uref.value = ref; return uref; }\n",
+ snt.text, snt.text, n, s, nsc,
+ snt.text, snt.text, n, s);
+ break;
+ case vt_missing:
+ fprintf(out->fp,
+ "static inline %s_union_ref_t %s_as_NONE(void)\n"
+ "{ %s_union_ref_t uref; uref.type = %s_NONE; uref.value = 0; return uref; }\n",
+ snt.text, snt.text, snt.text, snt.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected union value type");
+ break;
+ }
+ }
+ fprintf(out->fp,
+ "__%sbuild_union_vector(%s, %s)\n\n",
+ nsc, nsc, snt.text);
+ return 0;
+}
+
+static int gen_union_clone(fb_output_t *out, fb_compound_type_t *ct)
+{
+ const char *nsc = out->nsc;
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *s;
+ int n;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static %s_union_ref_t %s_clone(%sbuilder_t *B, %s_union_t u)\n{\n switch (u.type) {\n",
+ snt.text, snt.text, nsc, snt.text);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ fb_compound_name((fb_compound_type_t *)member->type.ct, &snref);
+ symbol_name(sym, &n, &s);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ " case %u: return %s_as_%.*s(%s_clone(B, (%s_table_t)u.value));\n",
+ (unsigned)member->value.u, snt.text, n, s, snref.text, snref.text);
+ break;
+ case fb_is_struct:
+ fprintf(out->fp,
+ " case %u: return %s_as_%.*s(%s_clone(B, (%s_struct_t)u.value));\n",
+ (unsigned)member->value.u, snt.text, n, s, snref.text, snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected union value type");
+ break;
+ }
+ break;
+ case vt_string_type:
+ symbol_name(sym, &n, &s);
+ fprintf(out->fp,
+ " case %u: return %s_as_%.*s(%sstring_clone(B, u.value));\n",
+ (unsigned)member->value.u, snt.text, n, s, nsc);
+ break;
+ case vt_missing:
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected union value type");
+ break;
+ }
+ }
+
+ /* Unknown unions are dropped. */
+ fprintf(out->fp,
+ " default: return %s_as_NONE();\n"
+ " }\n}\n",
+ snt.text);
+ return 0;
+}
+
+
+static int gen_builder_union_decls(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+ fb_symbol_t *sym;
+ int was_here = 0;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "typedef %sunion_ref_t %s_union_ref_t;\n"
+ "typedef %sunion_vec_ref_t %s_union_vec_ref_t;\n",
+ nsc, snt.text, nsc, snt.text);
+ fprintf(out->fp,
+ "static %s_union_ref_t %s_clone(%sbuilder_t *B, %s_union_t t);\n",
+ snt.text, snt.text, nsc, snt.text);
+ was_here = 1;
+ break;
+ default:
+ continue;
+ }
+ }
+ if (was_here) {
+ fprintf(out->fp, "\n");
+ }
+ return 0;
+}
+
+static int gen_builder_unions(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ gen_union(out, (fb_compound_type_t *)sym);
+ gen_union_clone(out, (fb_compound_type_t *)sym);
+ fprintf(out->fp, "\n");
+ break;
+ default:
+ continue;
+ }
+ }
+ return 0;
+}
+
+static int gen_builder_table_decls(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ /*
+ * Because tables are recursive, we need the type and `start/end/add`
+ * operations before the fields. We also need create for push_create
+ * but it needs all dependent types, so create is fw declared
+ * in a subsequent step. The actual create impl. then follows
+ * after the table fields.
+ */
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_required_table_fields(out, (fb_compound_type_t *)sym);
+ gen_builder_table(out, (fb_compound_type_t *)sym);
+ fprintf(out->fp, "\n");
+ break;
+ default:
+ continue;
+ }
+ }
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_builder_create_table_decl(out, (fb_compound_type_t *)sym);
+ gen_builder_table_prolog(out, (fb_compound_type_t *)sym);
+ fprintf(out->fp, "\n");
+ break;
+ default:
+ continue;
+ }
+ }
+ return 0;
+}
+
+static int gen_builder_tables(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_builder_table_fields(out, (fb_compound_type_t *)sym);
+ gen_builder_create_table(out, (fb_compound_type_t *)sym);
+ gen_builder_clone_table(out, (fb_compound_type_t *)sym);
+ fprintf(out->fp, "\n");
+ break;
+ default:
+ continue;
+ }
+ }
+ return 0;
+}
+
+static int gen_builder_footer(fb_output_t *out)
+{
+ gen_epilogue(out);
+ fprintf(out->fp,
+ "#endif /* %s_BUILDER_H */\n",
+ out->S->basenameup);
+ return 0;
+}
+
+int fb_gen_c_builder(fb_output_t *out)
+{
+ gen_builder_pretext(out);
+ gen_builder_enums(out);
+ gen_builder_structs(out);
+ gen_builder_union_decls(out);
+ gen_builder_table_decls(out);
+ gen_builder_unions(out);
+ gen_builder_tables(out);
+ gen_builder_footer(out);
+ return 0;
+}
diff --git a/flatcc/src/compiler/codegen_c_json_parser.c b/flatcc/src/compiler/codegen_c_json_parser.c
new file mode 100644
index 0000000..307ce76
--- /dev/null
+++ b/flatcc/src/compiler/codegen_c_json_parser.c
@@ -0,0 +1,1850 @@
+#include <stdlib.h>
+#include "codegen_c.h"
+#include "flatcc/flatcc_types.h"
+#include "catalog.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+#define PRINTLN_SPMAX 64
+static char println_spaces[PRINTLN_SPMAX];
+
+static void println(fb_output_t *out, const char * format, ...)
+{
+ int i = out->indent * out->opts->cgen_spacing;
+ va_list ap;
+
+ if (println_spaces[0] == 0) {
+ memset(println_spaces, 0x20, PRINTLN_SPMAX);
+ }
+ /* Don't indent on blank lines. */
+ if (*format) {
+ while (i > PRINTLN_SPMAX) {
+ fprintf(out->fp, "%.*s", (int)PRINTLN_SPMAX, println_spaces);
+ i -= PRINTLN_SPMAX;
+ }
+ /* Use modulo to reset margin if we go too far. */
+ fprintf(out->fp, "%.*s", i, println_spaces);
+ va_start (ap, format);
+ vfprintf (out->fp, format, ap);
+ va_end (ap);
+ }
+ fprintf(out->fp, "\n");
+}
+
+/*
+ * Unknown fields and unknown union members can be failed
+ * rather than ignored with a config flag.
+ *
+ * Default values an be forced with a config flat.
+ *
+ * Forward schema isn't perfect: Unknown symbolic constants
+ * cannot be used with known fields but will be ignored
+ * in ignored fields.
+ */
+
+static int gen_json_parser_pretext(fb_output_t *out)
+{
+ println(out, "#ifndef %s_JSON_PARSER_H", out->S->basenameup);
+ println(out, "#define %s_JSON_PARSER_H", out->S->basenameup);
+ println(out, "");
+ println(out, "/* " FLATCC_GENERATED_BY " */");
+ println(out, "");
+ println(out, "#include \"flatcc/flatcc_json_parser.h\"");
+ fb_gen_c_includes(out, "_json_parser.h", "_JSON_PARSER_H");
+ gen_prologue(out);
+ println(out, "");
+ return 0;
+}
+
+static int gen_json_parser_footer(fb_output_t *out)
+{
+ gen_epilogue(out);
+ println(out, "#endif /* %s_JSON_PARSER_H */", out->S->basenameup);
+ return 0;
+}
+
+typedef struct dict_entry dict_entry_t;
+struct dict_entry {
+ const char *text;
+ int len;
+ void *data;
+ int hint;
+};
+
+/* Returns length of name that reminds after tag at current position. */
+static int get_dict_suffix_len(dict_entry_t *de, int pos)
+{
+ int n;
+
+ n = de->len;
+ if (pos + 8 > n) {
+ return 0;
+ }
+ return n - pos - 8;
+}
+
+/*
+ * Returns the length name that reminds if it terminates at the tag
+ * and 0 if it has a suffix.
+ */
+static int get_dict_tag_len(dict_entry_t *de, int pos)
+{
+ int n;
+
+ n = de->len;
+ if (pos + 8 >= n) {
+ return n - pos;
+ }
+ return 0;
+}
+
+/*
+ * 8 byte word part of the name starting at characert `pos` in big
+ * endian encoding with first char always at msb, zero padded at lsb.
+ * Returns length of tag [0;8].
+ */
+static int get_dict_tag(dict_entry_t *de, int pos, uint64_t *tag, uint64_t *mask,
+ const char **tag_name, int *tag_len)
+{
+ int i, n = 0;
+ const char *a = 0;
+ uint64_t w = 0;
+
+ if (pos > de->len) {
+ goto done;
+ }
+ a = de->text + pos;
+ n = de->len - pos;
+ if (n > 8) {
+ n = 8;
+ }
+ i = n;
+ while (i--) {
+ w |= ((uint64_t)a[i]) << (56 - (i * 8));
+ }
+ *tag = w;
+ *mask = ~(((uint64_t)(1) << (8 - n) * 8) - 1);
+done:
+ if (tag_name) {
+ *tag_name = a;
+ }
+ if (tag_len) {
+ *tag_len = n;
+ }
+ return n;
+}
+
+
+/*
+ * Find the median, but move earlier if the previous entry
+ * is a strict prefix within the range.
+ *
+ * `b` is inclusive.
+ *
+ * The `pos` is a window into the key at an 8 byte multiple.
+ *
+ * Only consider the range `[pos;pos+8)` and move the median
+ * up if an earlier key is a prefix or match within this
+ * window. This is needed to handle trailing data in
+ * a compared external key, and also to handle sub-tree
+ * branching when two keys has same tag at pos.
+ *
+ * Worst case we get a linear search of length 8 if all
+ * keys are perfect prefixes of their successor key:
+ * `a, ab, abc, ..., abcdefgh`
+ * While the midpoint stills seeks towards 'a' for longer
+ * such sequences, the branch logic will pool those
+ * squences the share prefix groups of length 8.
+ */
+static int split_dict_left(dict_entry_t *dict, int a, int b, int pos)
+{
+ int m = a + (b - a) / 2;
+ uint64_t wf = 0, wg = 0, wmf = 0, wmg = 0;
+
+ while (m > a) {
+ get_dict_tag(&dict[m - 1], pos, &wf, &wmf, 0, 0);
+ get_dict_tag(&dict[m], pos, &wg, &wmg, 0, 0);
+ if (((wf ^ wg) & wmf) != 0) {
+ return m;
+ }
+ --m;
+ }
+ return m;
+}
+
+/*
+ * When multiple tags are identical after split_dict_left has moved
+ * intersection up so a == m, we need to split in the opposite direction
+ * to ensure progress untill all tags in the range are identical
+ * at which point the trie must descend.
+ *
+ * If all tags are the same from intersection to end, b + 1 is returned
+ * which is not a valid element.
+ */
+static int split_dict_right(dict_entry_t *dict, int a, int b, int pos)
+{
+ int m = a + (b - a) / 2;
+ uint64_t wf = 0, wg = 0, wmf = 0, wmg = 0;
+
+ while (m < b) {
+ get_dict_tag(&dict[m], pos, &wf, &wmf, 0, 0);
+ get_dict_tag(&dict[m + 1], pos, &wg, &wmg, 0, 0);
+ if (((wf ^ wg) & wmf) != 0) {
+ return m + 1;
+ }
+ ++m;
+ }
+ return m + 1;
+}
+
+/*
+ * Returns the first index where the tag does not terminate at
+ * [pos..pos+7], or b + 1 if none exists.
+ */
+static int split_dict_descend(dict_entry_t *dict, int a, int b, int pos)
+{
+ while (a <= b) {
+ if (0 < get_dict_suffix_len(&dict[a], pos)) {
+ break;
+ }
+ ++a;
+ }
+ return a;
+}
+
+
+static int dict_cmp(const void *x, const void *y)
+{
+ const dict_entry_t *a = x, *b = y;
+ int k, n = a->len > b->len ? b->len : a->len;
+
+ k = memcmp(a->text, b->text, (size_t)n);
+ return k ? k : a->len - b->len;
+}
+
+/* Includes union vectors. */
+static inline int is_union_member(fb_member_t *member)
+{
+ return (member->type.type == vt_compound_type_ref || member->type.type == vt_vector_compound_type_ref)
+ && member->type.ct->symbol.kind == fb_is_union;
+}
+
+static dict_entry_t *build_compound_dict(fb_compound_type_t *ct, int *count_out)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ size_t n;
+ dict_entry_t *dict, *de;
+ char *strbuf = 0;
+ size_t strbufsiz = 0;
+ int is_union;
+ size_t union_index = 0;
+
+ n = 0;
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ is_union = is_union_member(member);
+ if (is_union) {
+ ++n;
+ strbufsiz += (size_t)member->symbol.ident->len + 6;
+ }
+ ++n;
+ }
+ *count_out = (int)n;
+ if (n == 0) {
+ return 0;
+ }
+ dict = malloc(n * sizeof(dict_entry_t) + strbufsiz);
+ if (!dict) {
+ return 0;
+ }
+ strbuf = (char *)dict + n * sizeof(dict_entry_t);
+ de = dict;
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ de->text = member->symbol.ident->text;
+ de->len = (int)member->symbol.ident->len;
+ de->data = member;
+ de->hint = 0;
+ ++de;
+ is_union = is_union_member(member);
+ if (is_union) {
+ member->export_index = union_index++;
+ de->len = (int)member->symbol.ident->len + 5;
+ de->text = strbuf;
+ memcpy(strbuf, member->symbol.ident->text, (size_t)member->symbol.ident->len);
+ strbuf += member->symbol.ident->len;
+ strcpy(strbuf, "_type");
+ strbuf += 6;
+ de->data = member;
+ de->hint = 1;
+ ++de;
+ }
+ }
+ qsort(dict, n, sizeof(dict[0]), dict_cmp);
+ return dict;
+}
+
+typedef struct {
+ int count;
+ fb_schema_t *schema;
+ dict_entry_t *de;
+} install_enum_context_t;
+
+static void count_visible_enum_symbol(void *context, fb_symbol_t *sym)
+{
+ install_enum_context_t *p = context;
+
+ if (get_enum_if_visible(p->schema, sym)) {
+ p->count++;
+ }
+}
+
+static void install_visible_enum_symbol(void *context, fb_symbol_t *sym)
+{
+ install_enum_context_t *p = context;
+
+ if (get_enum_if_visible(p->schema, sym)) {
+ p->de->text = sym->ident->text;
+ p->de->len = (int)sym->ident->len;
+ p->de->data = sym;
+ p->de++;
+ }
+}
+
+/*
+ * A scope dictionary contains all the enum types defined under the given
+ * namespace of the scope. The actually namespace is not contained in
+ * the name - it is an implicit prefix. It is used when looking up a
+ * symbolic constant assigned to a field such that the constant is first
+ * searched for in the same scope (namespace) as the one that defined
+ * the table owning the field assigned to. If that fails, a global
+ * namespace prefixed lookup is needed, but this is separate from this
+ * dictionary. In case of conflicts the local scope takes precedence
+ * and must be searched first. Because each table parsed can a have a
+ * unique local scope, we cannot install the the unprefixed lookup in
+ * the same dictionary as the global lookup.
+ *
+ * NOTE: the scope may have been contanimated by being expanded by a
+ * parent schema so we check that each symbol is visible to the current
+ * schema. If we didn't do this, we would risk referring to enum parsers
+ * that are not included in the generated source. The default empty
+ * namespace (i.e. scope) is an example where this easily could happen.
+ */
+static dict_entry_t *build_local_scope_dict(fb_schema_t *schema, fb_scope_t *scope, int *count_out)
+{
+ dict_entry_t *dict;
+ install_enum_context_t iec;
+
+ fb_clear(iec);
+
+ iec.schema = schema;
+
+ fb_symbol_table_visit(&scope->symbol_index, count_visible_enum_symbol, &iec);
+ *count_out = iec.count;
+
+ if (iec.count == 0) {
+ return 0;
+ }
+ dict = malloc((size_t)iec.count * sizeof(dict[0]));
+ if (!dict) {
+ return 0;
+ }
+ iec.de = dict;
+ fb_symbol_table_visit(&scope->symbol_index, install_visible_enum_symbol, &iec);
+ qsort(dict, (size_t)iec.count, sizeof(dict[0]), dict_cmp);
+ return dict;
+}
+
+static dict_entry_t *build_global_scope_dict(catalog_t *catalog, int *count_out)
+{
+ size_t i, n = (size_t)catalog->nenums;
+ dict_entry_t *dict;
+
+ *count_out = (int)n;
+ if (n == 0) {
+ return 0;
+ }
+ dict = malloc(n * sizeof(dict[0]));
+ if (!dict) {
+ return 0;
+ }
+ for (i = 0; i < (size_t)catalog->nenums; ++i) {
+ dict[i].text = catalog->enums[i].name;
+ dict[i].len = (int)strlen(catalog->enums[i].name);
+ dict[i].data = catalog->enums[i].ct;
+ dict[i].hint = 0;
+ }
+ qsort(dict, (size_t)catalog->nenums, sizeof(dict[0]), dict_cmp);
+ *count_out = catalog->nenums;
+ return dict;
+}
+
+static void clear_dict(dict_entry_t *dict)
+{
+ if (dict) {
+ free(dict);
+ }
+}
+
+static int gen_field_match_handler(fb_output_t *out, fb_compound_type_t *ct, void *data, int is_union_type)
+{
+ fb_member_t *member = data;
+ fb_scoped_name_t snref;
+ fb_symbol_text_t scope_name;
+
+ int is_struct_container;
+ int is_string = 0;
+ int is_enum = 0;
+ int is_vector = 0;
+ int is_offset = 0;
+ int is_scalar = 0;
+ int is_optional = 0;
+ int is_table = 0;
+ int is_struct = 0;
+ int is_union = 0;
+ int is_union_vector = 0;
+ int is_union_type_vector = 0;
+ int is_base64 = 0;
+ int is_base64url = 0;
+ int is_nested = 0;
+ int is_array = 0;
+ int is_char_array = 0;
+ size_t array_len = 0;
+ fb_scalar_type_t st = 0;
+ const char *tname_prefix = "n/a", *tname = "n/a"; /* suppress compiler warnigns */
+ fb_literal_t literal;
+
+ fb_clear(snref);
+
+ fb_copy_scope(ct->scope, scope_name);
+ is_struct_container = ct->symbol.kind == fb_is_struct;
+ is_optional = !!(member->flags & fb_fm_optional);
+
+ switch (member->type.type) {
+ case vt_vector_type:
+ case vt_vector_compound_type_ref:
+ case vt_vector_string_type:
+ is_vector = 1;
+ break;
+ }
+
+ switch (member->type.type) {
+ case vt_fixed_array_compound_type_ref:
+ case vt_vector_compound_type_ref:
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ is_enum = member->type.ct->symbol.kind == fb_is_enum;
+ is_struct = member->type.ct->symbol.kind == fb_is_struct;
+ is_table = member->type.ct->symbol.kind == fb_is_table;
+ is_union = member->type.ct->symbol.kind == fb_is_union && !is_union_type;
+ if (is_enum) {
+ st = member->type.ct->type.st;
+ is_scalar = 1;
+ }
+ break;
+ case vt_vector_string_type:
+ case vt_string_type:
+ is_string = 1;
+ break;
+ case vt_vector_type:
+ /* Nested types are processed twice, once as an array, once as an object. */
+ is_nested = member->nest != 0;
+ is_base64 = member->metadata_flags & fb_f_base64;
+ is_base64url = member->metadata_flags & fb_f_base64url;
+ is_scalar = 1;
+ st = member->type.st;
+ break;
+ case vt_fixed_array_type:
+ is_scalar = 1;
+ is_array = 1;
+ array_len = member->type.len;
+ st = member->type.st;
+ break;
+ case vt_scalar_type:
+ is_scalar = 1;
+ st = member->type.st;
+ break;
+ }
+ if (member->type.type == vt_fixed_array_compound_type_ref) {
+ assert(is_struct_container);
+ is_array = 1;
+ array_len = member->type.len;
+ }
+ if (is_base64 || is_base64url) {
+ /* Even if it is nested, parse it as a regular base64 or base64url encoded vector. */
+ if (st != fb_ubyte || !is_vector) {
+ gen_panic(out, "internal error: unexpected base64 or base64url field type\n");
+ return -1;
+ }
+ is_nested = 0;
+ is_vector = 0;
+ is_scalar = 0;
+ }
+ if (is_union_type) {
+ is_scalar = 0;
+ }
+ if (is_vector && is_union_type) {
+ is_union_type_vector = 1;
+ is_vector = 0;
+ }
+ if (is_vector && is_union) {
+ is_union_vector = 1;
+ is_vector = 0;
+ }
+ if (is_array && is_scalar && st == fb_char) {
+ is_array = 0;
+ is_scalar = 0;
+ is_char_array = 1;
+ }
+ if (is_nested == 1) {
+ println(out, "if (buf != end && *buf == '[') { /* begin nested */"); indent();
+ }
+repeat_nested:
+ if (is_nested == 2) {
+ unindent(); println(out, "} else { /* nested */"); indent();
+ fb_compound_name((fb_compound_type_t *)&member->nest->symbol, &snref);
+ if (member->nest->symbol.kind == fb_is_table) {
+ is_table = 1;
+ } else {
+ is_struct = 1;
+ }
+ is_vector = 0;
+ is_scalar = 0;
+ println(out, "if (flatcc_builder_start_buffer(ctx->ctx, 0, 0, 0)) goto failed;");
+ }
+ is_offset = !is_scalar && !is_struct && !is_union_type;
+
+ if (is_scalar) {
+ tname_prefix = scalar_type_prefix(st);
+ tname = st == fb_bool ? "uint8_t" : scalar_type_name(st);
+ }
+
+ /* Other types can also be vector, so we wrap. */
+ if (is_vector) {
+ if (is_offset) {
+ println(out, "if (flatcc_builder_start_offset_vector(ctx->ctx)) goto failed;");
+ } else {
+ println(out,
+ "if (flatcc_builder_start_vector(ctx->ctx, %"PRIu64", %hu, UINT64_C(%"PRIu64"))) goto failed;",
+ (uint64_t)member->size, (short)member->align,
+ (uint64_t)FLATBUFFERS_COUNT_MAX(member->size));
+ }
+ }
+ if (is_array) {
+ if (is_scalar) {
+ println(out, "size_t count = %d;", array_len);
+ println(out, "%s *base = (%s *)((size_t)struct_base + %"PRIu64");",
+ tname, tname, (uint64_t)member->offset);
+ }
+ else {
+ println(out, "size_t count = %d;", array_len);
+ println(out, "void *base = (void *)((size_t)struct_base + %"PRIu64");",
+ (uint64_t)member->offset);
+ }
+ }
+ if (is_char_array) {
+ println(out, "char *base = (char *)((size_t)struct_base + %"PRIu64");",
+ (uint64_t)member->offset);
+ println(out, "buf = flatcc_json_parser_char_array(ctx, buf, end, base, %d);", array_len);
+ }
+ if (is_array || is_vector) {
+ println(out, "buf = flatcc_json_parser_array_start(ctx, buf, end, &more);");
+ /* Note that we reuse `more` which is safe because it is updated at the end of the main loop. */
+ println(out, "while (more) {"); indent();
+ }
+ if (is_scalar) {
+ println(out, "%s val = 0;", tname);
+ println(out, "static flatcc_json_parser_integral_symbol_f *symbolic_parsers[] = {");
+ indent(); indent();
+ /*
+ * The scope name may be empty when no namespace is used. In that
+ * case the global scope is the same, but performance the
+ * duplicate doesn't matter.
+ */
+ if (is_enum) {
+ println(out, "%s_parse_json_enum,", snref.text);
+ println(out, "%s_local_%sjson_parser_enum,", out->S->basename, scope_name);
+ println(out, "%s_global_json_parser_enum, 0 };", out->S->basename);
+ } else {
+ println(out, "%s_local_%sjson_parser_enum,", out->S->basename, scope_name);
+ println(out, "%s_global_json_parser_enum, 0 };", out->S->basename);
+ }
+ unindent(); unindent();
+ }
+ /* It is not safe to acquire the pointer before building element table or string. */
+ if (is_vector && !is_offset) {
+ println(out, "if (!(pval = flatcc_builder_extend_vector(ctx->ctx, 1))) goto failed;");
+ }
+ if (is_struct_container) {
+ if (!is_array && !is_char_array) {
+ /* `struct_base` is given as argument to struct parsers. */
+ println(out, "pval = (void *)((size_t)struct_base + %"PRIu64");", (uint64_t)member->offset);
+ }
+ } else if (is_struct && !is_vector) {
+ /* Same logic as scalars in tables, but scalars must be tested for default. */
+ println(out,
+ "if (!(pval = flatcc_builder_table_add(ctx->ctx, %"PRIu64", %"PRIu64", %"PRIu16"))) goto failed;",
+ (uint64_t)member->id, (uint64_t)member->size, (uint16_t)member->align);
+ }
+ if (is_scalar) {
+ println(out, "buf = flatcc_json_parser_%s(ctx, (mark = buf), end, &val);", tname_prefix);
+ println(out, "if (mark == buf) {"); indent();
+ println(out, "buf = flatcc_json_parser_symbolic_%s(ctx, (mark = buf), end, symbolic_parsers, &val);", tname_prefix);
+ println(out, "if (buf == mark || buf == end) goto failed;");
+ unindent(); println(out, "}");
+ if (!is_struct_container && !is_vector && !is_base64 && !is_base64url) {
+#if !FLATCC_JSON_PARSE_FORCE_DEFAULTS
+ /* We need to create a check for the default value and create a table field if not the default. */
+ if (!is_optional) {
+ if (!print_literal(st, &member->value, literal)) return -1;
+ println(out, "if (val != %s || (ctx->flags & flatcc_json_parser_f_force_add)) {", literal); indent();
+ }
+#endif
+ println(out, "if (!(pval = flatcc_builder_table_add(ctx->ctx, %"PRIu64", %"PRIu64", %hu))) goto failed;",
+ (uint64_t)member->id, (uint64_t)member->size, (short)member->align);
+#if !FLATCC_JSON_PARSE_FORCE_DEFAULTS
+#endif
+ }
+ /* For scalars in table field, and in struct container. */
+ if (is_array) {
+ println(out, "if (count) {"); indent();
+ println(out, "%s%s_write_to_pe(base, val);", out->nsc, tname_prefix);
+ println(out, "--count;");
+ println(out, "++base;");
+ unindent(); println(out, "} else if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {"); indent();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);");
+ unindent(); println(out, "}");
+ } else {
+ println(out, "%s%s_write_to_pe(pval, val);", out->nsc, tname_prefix);
+ }
+ if (!is_struct_container && !is_vector && !(is_scalar && is_optional)) {
+ unindent(); println(out, "}");
+ }
+ } else if (is_struct) {
+ if (is_array) {
+ println(out, "if (count) {"); indent();
+ println(out, "buf = %s_parse_json_struct_inline(ctx, buf, end, base);", snref.text);
+ println(out, "--count;");
+ println(out, "base = (void *)((size_t)base + %"PRIu64");", member->type.ct->size);
+ unindent(); println(out, "} else if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {"); indent();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);");
+ unindent(); println(out, "}");
+ } else {
+ println(out, "buf = %s_parse_json_struct_inline(ctx, buf, end, pval);", snref.text);
+ }
+ } else if (is_string) {
+ println(out, "buf = flatcc_json_parser_build_string(ctx, buf, end, &ref);");
+ } else if (is_base64 || is_base64url) {
+ println(out, "buf = flatcc_json_parser_build_uint8_vector_base64(ctx, buf, end, &ref, %u);",
+ !is_base64);
+ } else if (is_table) {
+ println(out, "buf = %s_parse_json_table(ctx, buf, end, &ref);", snref.text);
+ } else if (is_union) {
+ if (is_union_vector) {
+ println(out, "buf = flatcc_json_parser_union_vector(ctx, buf, end, %"PRIu64", %"PRIu64", h_unions, %s_parse_json_union);",
+ (uint64_t)member->export_index, member->id, snref.text);
+ } else {
+ println(out, "buf = flatcc_json_parser_union(ctx, buf, end, %"PRIu64", %"PRIu64", h_unions, %s_parse_json_union);",
+ (uint64_t)member->export_index, member->id, snref.text);
+ }
+ } else if (is_union_type) {
+ println(out, "static flatcc_json_parser_integral_symbol_f *symbolic_parsers[] = {");
+ indent(); indent();
+ println(out, "%s_parse_json_enum,", snref.text);
+ println(out, "%s_local_%sjson_parser_enum,", out->S->basename, scope_name);
+ println(out, "%s_global_json_parser_enum, 0 };", out->S->basename);
+ unindent(); unindent();
+ if (is_union_type_vector) {
+ println(out, "buf = flatcc_json_parser_union_type_vector(ctx, buf, end, %"PRIu64", %"PRIu64", h_unions, symbolic_parsers, %s_parse_json_union, %s_json_union_accept_type);",
+ (uint64_t)member->export_index, member->id, snref.text, snref.text);
+ } else {
+ println(out, "buf = flatcc_json_parser_union_type(ctx, buf, end, %"PRIu64", %"PRIu64", h_unions, symbolic_parsers, %s_parse_json_union);",
+ (uint64_t)member->export_index, member->id, snref.text);
+ }
+ } else if (!is_vector && !is_char_array) {
+ gen_panic(out, "internal error: unexpected type for trie member\n");
+ return -1;
+ }
+ if (is_vector) {
+ if (is_offset) {
+ /* Deal with table and string vector elements - unions cannot be elements. */
+ println(out, "if (!ref || !(pref = flatcc_builder_extend_offset_vector(ctx->ctx, 1))) goto failed;");
+ /* We don't need to worry about endian conversion - offsets vectors fix this automatically. */
+ println(out, "*pref = ref;");
+ }
+ println(out, "buf = flatcc_json_parser_array_end(ctx, buf, end, &more);");
+ unindent(); println(out, "}");
+ if (is_offset) {
+ println(out, "ref = flatcc_builder_end_offset_vector(ctx->ctx);");
+ } else {
+ println(out, "ref = flatcc_builder_end_vector(ctx->ctx);");
+ }
+ }
+ if (is_array) {
+ println(out, "buf = flatcc_json_parser_array_end(ctx, buf, end, &more);");
+ unindent(); println(out, "}");
+ println(out, "if (count) {"); indent();
+ println(out, "if (ctx->flags & flatcc_json_parser_f_reject_array_underflow) {"); indent();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_underflow);");
+ unindent(); println(out, "}");
+ if (is_scalar) {
+ println(out, "memset(base, 0, count * sizeof(*base));");
+ } else {
+ println(out, "memset(base, 0, count * %"PRIu64");", (uint64_t)member->type.ct->size);
+ }
+ unindent(); println(out, "}");
+ }
+ if (is_nested == 1) {
+ is_nested = 2;
+ goto repeat_nested;
+ }
+ if (is_nested == 2) {
+ println(out, "if (!ref) goto failed;");
+ println(out, "ref = flatcc_builder_end_buffer(ctx->ctx, ref);");
+ unindent(); println(out, "} /* end nested */");
+ }
+ if (is_nested || is_vector || is_table || is_string || is_base64 || is_base64url) {
+ println(out, "if (!ref || !(pref = flatcc_builder_table_add_offset(ctx->ctx, %"PRIu64"))) goto failed;", member->id);
+ println(out, "*pref = ref;");
+ }
+ return 0;
+}
+
+static void gen_field_match(fb_output_t *out, fb_compound_type_t *ct, void *data, int hint, int n)
+{
+ println(out, "buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, %d);", n);
+ println(out, "if (mark != buf) {"); indent();
+ gen_field_match_handler(out, ct, data, hint);
+ unindent(); println(out, "} else {"); indent();
+}
+
+/* This also handles union type enumerations. */
+static void gen_enum_match_handler(fb_output_t *out, fb_compound_type_t *ct, void *data, int unused_hint)
+{
+ fb_member_t *member = data;
+
+ (void)unused_hint;
+
+ /*
+ * This is rather unrelated to the rest, we just use the same
+ * trie generation logic. Here we simply need to assign a known
+ * value to the enum parsers output arguments.
+ */
+ switch (ct->type.st) {
+ case fb_bool:
+ case fb_ubyte:
+ case fb_ushort:
+ case fb_uint:
+ case fb_ulong:
+ println(out, "*value = UINT64_C(%"PRIu64"), *value_sign = 0;",
+ member->value.u);
+ break;
+ case fb_byte:
+ case fb_short:
+ case fb_int:
+ case fb_long:
+ if (member->value.i < 0) {
+ println(out, "*value = UINT64_C(%"PRIu64"), *value_sign = 1;", member->value.i);
+ } else {
+ println(out, "*value = UINT64_C(%"PRIu64"), *value_sign = 0;", member->value.i);
+ }
+ break;
+ default:
+ gen_panic(out, "internal error: invalid enum type\n");
+ }
+}
+
+static void gen_enum_match(fb_output_t *out, fb_compound_type_t *ct, void *data, int hint, int n)
+{
+ println(out, "buf = flatcc_json_parser_match_constant(ctx, (mark = buf), end, %d, aggregate);", n);
+ println(out, "if (buf != mark) {"); indent();
+ gen_enum_match_handler(out, ct, data, hint);
+ unindent(); println(out, "} else {"); indent();
+}
+
+static void gen_scope_match_handler(fb_output_t *out, fb_compound_type_t *unused_ct, void *data, int unused_hint)
+{
+ fb_compound_type_t *ct = data;
+ fb_scoped_name_t snt;
+
+ (void)unused_ct;
+ (void)unused_hint;
+ assert(ct->symbol.kind == fb_is_enum || ct->symbol.kind == fb_is_union);
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+ /* May be included from another file. Unions also have _enum parsers. */
+ println(out, "buf = %s_parse_json_enum(ctx, buf, end, value_type, value, aggregate);", snt.text);
+}
+
+static void gen_scope_match(fb_output_t *out, fb_compound_type_t *ct, void *data, int hint, int n)
+{
+ println(out, "buf = flatcc_json_parser_match_scope(ctx, (mark = buf), end, %d);", n);
+ println(out, "if (buf != mark) {"); indent();
+ gen_scope_match_handler(out, ct, data, hint);
+ unindent(); println(out, "} else {"); indent();
+}
+
+static void gen_field_unmatched(fb_output_t *out)
+{
+ println(out, "buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);");
+}
+
+static void gen_enum_unmatched(fb_output_t *out)
+{
+ println(out, "return unmatched;");
+}
+
+static void gen_scope_unmatched(fb_output_t *out)
+{
+ println(out, "return unmatched;");
+}
+
+/*
+ * Generate a trie for all members or a compound type.
+ * This may be a struct or a table.
+ *
+ * We have a ternary trie where a search word w compares:
+ * w < wx_tag is one branch [a;x), iff a < x.
+ * w > wx_tag is another branch (y;b], iff b > y
+ * and w == wx_tag is a third branch [x;y].
+ *
+ * The sets [a;x) and (y;b] may be empty in which case a non-match
+ * action is triggered.
+ *
+ * [x..y] is a set of one or more fields that share the same tag at the
+ * current position. The first (and only the first) field name in this
+ * set may terminate withint the current tag (when suffix length k ==
+ * 0). There is therefore potentially both a direct field action and a
+ * sub-tree action. Once there is only one field in the set and the
+ * field name terminates within the current tag, the search word is
+ * masked and tested against the field tag and the search word is also
+ * tested for termination in the buffer at the first position after the
+ * field match. If the termination was not found a non-match action is
+ * triggered.
+ *
+ * A non-match action may be to silently consume the rest of the
+ * search identifier and then the json value, or to report and
+ * error.
+ *
+ * A match action triggers a json value parse of a known type
+ * which updates into a flatcc builder object. If the type is
+ * basic (string or scalar) the update simple, otherwise if
+ * the type is within the same schema, we push context
+ * and switch to parse the nested type, otherwise we call
+ * a parser in another schema. When a trie is done, we
+ * switch back context if in the same schema. The context
+ * lives on a stack. This avoids deep recursion because
+ * schema parsers are not mutually recursive.
+ *
+ * The trie is also used to parse enums and scopes (namespace prefixes)
+ * with a slight modification.
+ */
+
+enum trie_type { table_trie, struct_trie, enum_trie, local_scope_trie, global_scope_trie };
+typedef struct trie trie_t;
+
+typedef void gen_match_f(fb_output_t *out, fb_compound_type_t *ct, void *data, int hint, int n);
+typedef void gen_unmatched_f(fb_output_t *out);
+
+struct trie {
+ dict_entry_t *dict;
+ gen_match_f *gen_match;
+ gen_unmatched_f *gen_unmatched;
+ /* Not used with scopes. */
+ fb_compound_type_t *ct;
+ int type;
+ int union_total;
+ int label;
+};
+
+/*
+ * This function is a final handler of the `gen_trie` function. Often
+ * just to handle a single match, but also to handle a prefix range
+ * special case like keys in `{ a, alpha, alpha2 }`.
+ *
+ * (See also special case of two non-prefix keys below).
+ *
+ * We know that all keys [a..b] have length in the range [pos..pos+8)
+ * and also that key x is proper prefix of key x + 1, x in [a..b).
+ *
+ * It is possible that `a == b`.
+ *
+ * We conduct a binary search by testing the middle for masked match and
+ * gradually refine until we do not have a match or have a single
+ * element match.
+ *
+ * (An alternative algorithm xors 8 byte tag with longest prefix and
+ * finds ceiling of log 2 using a few bit logic operations or intrinsic
+ * zero count and creates a jump table of at most 8 elements, but is
+ * hardly worthwhile vs 3 comparisons and 3 AND operations and often
+ * less than that.)
+ *
+ * Once we have a single element match we need to confirm the successor
+ * symbol is not any valid key - this differs among trie types and is
+ * therefore the polymorph match logic handles the final confirmed match
+ * or mismatch.
+ *
+ * Each trie type has special operation for implementing a matched and
+ * a failed match. Our job is to call these for each key in the range.
+ *
+ * While not the original intention, the `gen_prefix_trie` also handles the
+ * special case where the set has two keys where one is not a prefix of
+ * the other, but both terminate in the same tag. In this case we can
+ * immediately do an exact match test and skip the less than
+ * comparision. We need no special code for this, assuming the function
+ * is called correctly. This significantly reduces the branching in a
+ * case like "Red, Green, Blue".
+ *
+ * If `label` is positive, it is used to jump to additional match logic
+ * when a prefix was not matched. If 0 there is no additional logic and
+ * the symbol is considered unmatched immediately.
+ */
+static void gen_prefix_trie(fb_output_t *out, trie_t *trie, int a, int b, int pos, int label)
+{
+ int m, n;
+ uint64_t tag = 00, mask = 0;
+ const char *name;
+ int len;
+
+ /*
+ * Weigh the intersection towards the longer prefix. Notably if we
+ * have two keys it makes no sense to check the shorter key first.
+ */
+ m = a + (b - a + 1) / 2;
+
+ n = get_dict_tag(&trie->dict[m], pos, &tag, &mask, &name, &len);
+ if (n == 8) {
+ println(out, "if (w == 0x%"PRIx64") { /* \"%.*s\" */", tag, len, name); indent();
+ } else {
+ println(out, "if ((w & 0x%"PRIx64") == 0x%"PRIx64") { /* \"%.*s\" */",
+ mask, tag, len, name); indent();
+ }
+ if (m == a) {
+ /* There can be only one. */
+ trie->gen_match(out, trie->ct, trie->dict[m].data, trie->dict[m].hint, n);
+ if (label > 0) {
+ println(out, "goto pfguard%d;", label);
+ } else {
+ trie->gen_unmatched(out);
+ }
+ unindent(); println(out, "}");
+ unindent(); println(out, "} else { /* \"%.*s\" */", len, name); indent();
+ if (label > 0) {
+ println(out, "goto pfguard%d;", label);
+ } else {
+ trie->gen_unmatched(out);
+ }
+ } else {
+ if (m == b) {
+ trie->gen_match(out, trie->ct, trie->dict[m].data, trie->dict[m].hint, n);
+ if (label > 0) {
+ println(out, "goto pfguard%d;", label);
+ } else {
+ trie->gen_unmatched(out);
+ }
+ unindent(); println(out, "}");
+ } else {
+ gen_prefix_trie(out, trie, m, b, pos, label);
+ }
+ unindent(); println(out, "} else { /* \"%.*s\" */", len, name); indent();
+ gen_prefix_trie(out, trie, a, m - 1, pos, label);
+ }
+ unindent(); println(out, "} /* \"%.*s\" */", len, name);
+}
+
+static void gen_trie(fb_output_t *out, trie_t *trie, int a, int b, int pos)
+{
+ int x, k;
+ uint64_t tag = 0, mask = 0;
+ const char *name = "";
+ int len = 0, has_prefix_key = 0, prefix_guard = 0, has_descend;
+ int label = 0;
+
+ /*
+ * Process a trie at the level given by pos. A single level covers
+ * one tag.
+ *
+ * A tag is a range of 8 characters [pos..pos+7] that is read as a
+ * single big endian word and tested as against a ternary trie
+ * generated in code. In generated code the tag is stored in "w".
+ *
+ * Normally trailing data in a tag is not a problem
+ * because the difference between two keys happen in the middle and
+ * trailing data is not valid key material. When the difference is
+ * at the end, we get a lot of special cases to handle.
+ *
+ * Regardless, when we believe we have a match, a final check is
+ * made to ensure that the next character after the match is not a
+ * valid key character - for quoted keys a valid termiantot is a
+ * quote, for unquoted keys it can be one of several characters -
+ * therefore quoted keys are faster to parse, even if they consume
+ * more space. The trie does not care about these details, the
+ * gen_match function handles this transparently for different
+ * symbol types.
+ */
+
+
+ /*
+ * If we have one or two keys that terminate in this tag, there is no
+ * need to do a branch test before matching exactly.
+ *
+ * We observe that `gen_prefix_trie` actually handles this
+ * case well, even though it was not designed for it.
+ */
+ if ((get_dict_suffix_len(&trie->dict[a], pos) == 0) &&
+ (b == a || (b == a + 1 && get_dict_suffix_len(&trie->dict[b], pos) == 0))) {
+ gen_prefix_trie(out, trie, a, b, pos, 0);
+ return;
+ }
+
+ /*
+ * Due trie nature, we have a left, middle, and right range where
+ * the middle range all compare the same at the current trie level
+ * when masked against shortest (and first) key in middle range.
+ */
+ x = split_dict_left(trie->dict, a, b, pos);
+
+ if (x > a) {
+ /*
+ * This is normal early branch with a key `a < x < b` such that
+ * any shared prefix ranges do not span x.
+ */
+ get_dict_tag(&trie->dict[x], pos, &tag, &mask, &name, &len);
+ println(out, "if (w < 0x%"PRIx64") { /* branch \"%.*s\" */", tag, len, name); indent();
+ gen_trie(out, trie, a, x - 1, pos);
+ unindent(); println(out, "} else { /* branch \"%.*s\" */", len, name); indent();
+ gen_trie(out, trie, x, b, pos);
+ unindent(); println(out, "} /* branch \"%.*s\" */", len, name);
+ return;
+ }
+ x = split_dict_right(trie->dict, a, b, pos);
+
+ /*
+ * [a .. x-1] is a non-empty sequence of prefixes,
+ * for example { a123, a1234, a12345 }.
+ * The keys might not terminate in the current tag. To find those
+ * that do, we will evaluate k such that:
+ * [a .. k-1] are prefixes that terminate in the current tag if any
+ * such exists.
+ * [x..b] are keys that are prefixes up to at least pos + 7 but
+ * do not terminate in the current tag.
+ * [k..x-1] are prefixes that do not termiante in the current tag.
+ * Note that they might not be prefixes when considering more than the
+ * current tag.
+ * The range [a .. x-1] can ge generated with `gen_prefix_trie`.
+ *
+ * We generally have the form
+ *
+ * [a..b] =
+ * (a)<prefixes>, (k-1)<descend-prefix>, (k)<descend>, (x)<reminder>
+ *
+ * Where <prefixes> are keys that terminate at the current tag.
+ * <descend> are keys that have the prefixes as prefix but do not
+ * terminate at the current tag.
+ * <descend-prerfix> is a single key that terminates exactly
+ * where the tag ends. If there are no descend keys it is part of
+ * prefixes, otherwise it is tested as a special case.
+ * <reminder> are any keys larger than the prefixes.
+ *
+ * The reminder keys cannot be tested before we are sure that no
+ * prefix is matching at least no prefixes that is not a
+ * descend-prefix. This is because less than comparisons are
+ * affected by trailing data within the tag caused by prefixes
+ * terminating early. Trailing data is not a problem if two keys are
+ * longer than the point where they differ even if they terminate
+ * within the current tag.
+ *
+ * Thus, if we have non-empty <descend> and non-empty <reminder>,
+ * the reminder must guard against any matches in prefix but not
+ * against any matches in <descend>. If <descend> is empty and
+ * <prefixes> == <descend-prefix> a guard is also not needed.
+ */
+
+ /* Find first prefix that does not terminate at the current level, or x if absent */
+ k = split_dict_descend(trie->dict, a, x - 1, pos);
+ has_descend = k < x;
+
+ /* If we have a descend, process that in isolation. */
+ if (has_descend) {
+ has_prefix_key = k > a && get_dict_tag_len(&trie->dict[k - 1], pos) == 8;
+ get_dict_tag(&trie->dict[k], pos, &tag, &mask, &name, &len);
+ println(out, "if (w == 0x%"PRIx64") { /* descend \"%.*s\" */", tag, len, name); indent();
+ if (has_prefix_key) {
+ /* We have a key that terminates at the descend prefix. */
+ println(out, "/* descend prefix key \"%.*s\" */", len, name);
+ trie->gen_match(out, trie->ct, trie->dict[k - 1].data, trie->dict[k - 1].hint, 8);
+ println(out, "/* descend suffix \"%.*s\" */", len, name);
+ }
+ println(out, "buf += 8;");
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, trie, k, x - 1, pos + 8);
+ if (has_prefix_key) {
+ unindent(); println(out, "} /* desend suffix \"%.*s\" */", len, name);
+ /* Here we move the <descend-prefix> key out of the <descend> range. */
+ --k;
+ }
+ unindent(); println(out, "} else { /* descend \"%.*s\" */", len, name); indent();
+ }
+ prefix_guard = a < k && x <= b;
+ if (prefix_guard) {
+ label = ++trie->label;
+ }
+ if (a < k) {
+ gen_prefix_trie(out, trie, a, k - 1, pos, label);
+ }
+ if (prefix_guard) {
+ /* All prefixes tested, but none matched. */
+ println(out, "goto endpfguard%d;", label);
+ margin();
+ println(out, "pfguard%d:", label);
+ unmargin();
+ }
+ if (x <= b) {
+ gen_trie(out, trie, x, b, pos);
+ } else if (a >= k) {
+ trie->gen_unmatched(out);
+ }
+ if (prefix_guard) {
+ margin();
+ println(out, "endpfguard%d:", label);
+ unmargin();
+ println(out, "(void)0;");
+ }
+ if (has_descend) {
+ unindent(); println(out, "} /* descend \"%.*s\" */", len, name);
+ }
+}
+
+
+/*
+ * Parsing symbolic constants:
+ *
+ * An enum parser parses the local symbols and translate them into
+ * numeric values.
+ *
+ * If a symbol wasn't matched, e.g. "Red", it might be matched with
+ * "Color.Red" but the enum parser does not handle this.
+ *
+ * Instead a scope parser maps each type in the scope to a call
+ * to an enum parser, e.g. "Color." maps to a color enum parser
+ * that understands "Red". If this also fails, a call is made
+ * to a global scope parser that maps a namespace to a local
+ * scope parser, for example "Graphics.Color.Red" first
+ * recognizes the namespace "Graphics." which may or may not
+ * be the same as the local scope tried earlier, then "Color."
+ * is matched and finally "Red".
+ *
+ * The scope and namespace parsers may cover extend namespaces from
+ * include files so each file calls into dependencies as necessary.
+ * This means the same scope can have multiple parsers and must
+ * therefore be name prefixed by the basename of the include file.
+ *
+ * The enums can only exist in a single file.
+ *
+ * The local scope is defined as the scope in which the consuming
+ * fields container is defined, so if Pen is a table in Graphics
+ * with a field named "ink" and the pen is parsed as
+ * { "ink": "Color.Red" }, then Color would be parsed in the
+ * Graphics scope. If ink was and enum of type Color, the enum
+ * parser would be tried first. If ink was, say, an integer
+ * type, it would not try an enum parse first but try the local
+ * scope, then the namespace scope.
+ *
+ * It is permitted to have multiple symbols in a string when
+ * the enum type has flag attribute so values can be or'ed together.
+ * The parser does not attempt to validate this and will simple
+ * 'or' together multiple values after coercing each to the
+ * receiving field type: "Has.ink Has.shape Has.brush".
+ */
+
+
+/*
+ * Used by scalar/enum/union_type table fields to look up symbolic
+ * constants in same scope as the table was defined, thus avoiding
+ * namespace prefix.
+ *
+ * Theh matched name then calls into the type specific parser which
+ * may be in a dependent file.
+ *
+ * Because each scope may be extended in dependent schema files
+ * we recreate the scope in full in each file.
+ */
+static void gen_local_scope_parser(void *context, fb_scope_t *scope)
+{
+ fb_output_t *out = context;
+ int n = 0;
+ trie_t trie;
+ fb_symbol_text_t scope_name;
+
+ fb_clear(trie);
+ fb_copy_scope(scope, scope_name);
+ if (((trie.dict = build_local_scope_dict(out->S, scope, &n)) == 0) && n > 0) {
+ gen_panic(out, "internal error: could not build dictionary for json parser\n");
+ return;
+ }
+ /* Not used for scopes. */
+ trie.ct = 0;
+ trie.type = local_scope_trie;
+ trie.gen_match = gen_scope_match;
+ trie.gen_unmatched = gen_scope_unmatched;
+ println(out, "static const char *%s_local_%sjson_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,",
+ out->S->basename, scope_name);
+ indent(); indent();
+ println(out, "int *value_type, uint64_t *value, int *aggregate)");
+ unindent(); unindent();
+ println(out, "{"); indent();
+ if (n == 0) {
+ println(out, "/* Scope has no enum / union types to look up. */");
+ println(out, "return buf; /* unmatched; */");
+ unindent(); println(out, "}");
+ } else {
+ println(out, "const char *unmatched = buf;");
+ println(out, "const char *mark;");
+ println(out, "uint64_t w;");
+ println(out, "");
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, &trie, 0, n - 1, 0);
+ println(out, "return buf;");
+ unindent(); println(out, "}");
+ }
+ println(out, "");
+ clear_dict(trie.dict);
+}
+
+/*
+ * This parses namespace prefixed types. Because scopes can be extended
+ * in dependent schema files, each file has its own global scope parser.
+ * The matched types call into type specific parsers that may be in
+ * a dependent file.
+ *
+ * When a local scope is also parsed, it should be tried before the
+ * global scope.
+ */
+static int gen_global_scope_parser(fb_output_t *out)
+{
+ int n = 0;
+ trie_t trie;
+ catalog_t catalog;
+
+ fb_clear(trie);
+ if (build_catalog(&catalog, out->S, 1, &out->S->root_schema->scope_index)) {
+ return -1;
+ }
+
+ if ((trie.dict = build_global_scope_dict(&catalog, &n)) == 0 && n > 0) {
+ clear_catalog(&catalog);
+ gen_panic(out, "internal error: could not build dictionary for json parser\n");
+ return -1;
+ }
+ /* Not used for scopes. */
+ trie.ct = 0;
+ trie.type = global_scope_trie;
+ trie.gen_match = gen_scope_match;
+ trie.gen_unmatched = gen_scope_unmatched;
+ println(out, "static const char *%s_global_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,", out->S->basename);
+ indent(); indent();
+ println(out, "int *value_type, uint64_t *value, int *aggregate)");
+ unindent(); unindent();
+ println(out, "{"); indent();
+ if (n == 0) {
+ println(out, "/* Global scope has no enum / union types to look up. */");
+ println(out, "return buf; /* unmatched; */");
+ unindent(); println(out, "}");
+ } else {
+ println(out, "const char *unmatched = buf;");
+ println(out, "const char *mark;");
+ println(out, "uint64_t w;");
+ println(out, "");
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, &trie, 0, n - 1, 0);
+ println(out, "return buf;");
+ unindent(); println(out, "}");
+ }
+ println(out, "");
+ clear_dict(trie.dict);
+ clear_catalog(&catalog);
+ return 0;
+}
+
+/*
+ * Constants have the form `"Red"` or `Red` but may also be part
+ * of a list of flags: `"Normal High Average"` or `Normal High
+ * Average`. `more` indicates more symbols follow.
+ *
+ * Returns input argument if there was no valid match,
+ * `end` on syntax error, and `more=1` if matched and
+ * there are more constants to parse.
+ * Applies the mached and coerced constant to `pval`
+ * with a binary `or` operation so `pval` must be initialized
+ * to 0 before teh first constant in a list.
+ */
+static int gen_enum_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+ int n = 0;
+ trie_t trie;
+
+ fb_clear(trie);
+ assert(ct->symbol.kind == fb_is_enum || ct->symbol.kind == fb_is_union);
+
+ if ((trie.dict = build_compound_dict(ct, &n)) == 0 && n > 0) {
+ gen_panic(out, "internal error: could not build dictionary for json parser\n");
+ return -1;
+ }
+ trie.ct = ct;
+ trie.type = enum_trie;
+ trie.gen_match = gen_enum_match;
+ trie.gen_unmatched = gen_enum_unmatched;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ println(out, "static const char *%s_parse_json_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,", snt.text);
+ indent(); indent();
+ println(out, "int *value_sign, uint64_t *value, int *aggregate)");
+ unindent(); unindent();
+ println(out, "{"); indent();
+ if (n == 0) {
+ println(out, "/* Enum has no fields. */");
+ println(out, "*aggregate = 0;");
+ println(out, "return buf; /* unmatched; */");
+ unindent(); println(out, "}");
+ } else {
+ println(out, "const char *unmatched = buf;");
+ println(out, "const char *mark;");
+ println(out, "uint64_t w;");
+ println(out, "");
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, &trie, 0, n - 1, 0);
+ println(out, "return buf;");
+ unindent(); println(out, "}");
+ }
+ println(out, "");
+ clear_dict(trie.dict);
+ return 0;
+}
+
+/*
+ * We do not check for duplicate settings or missing struct fields.
+ * Missing fields are zeroed.
+ *
+ * TODO: we should track nesting level because nested structs do not
+ * interact with the builder so the builders level limit will not kick
+ * in. As long as we get input from our own parser we should, however,
+ * be reasonable safe as nesting is bounded.
+ */
+static int gen_struct_parser_inline(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+ int n;
+ trie_t trie;
+
+ fb_clear(trie);
+ assert(ct->symbol.kind == fb_is_struct);
+ if ((trie.dict = build_compound_dict(ct, &n)) == 0 && n > 0) {
+ gen_panic(out, "internal error: could not build dictionary for json parser\n");
+ return -1;
+ }
+ trie.ct = ct;
+ trie.type = struct_trie;
+ trie.gen_match = gen_field_match;
+ trie.gen_unmatched = gen_field_unmatched;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+ println(out, "static const char *%s_parse_json_struct_inline(flatcc_json_parser_t *ctx, const char *buf, const char *end, void *struct_base)", snt.text);
+ println(out, "{"); indent();
+ println(out, "int more;");
+ if (n > 0) {
+ println(out, "flatcc_builder_ref_t ref;");
+ println(out, "void *pval;");
+ println(out, "const char *mark;");
+ println(out, "uint64_t w;");
+ }
+ println(out, "");
+ println(out, "buf = flatcc_json_parser_object_start(ctx, buf, end, &more);");
+ println(out, "while (more) {"); indent();
+ if (n == 0) {
+ println(out, "/* Empty struct. */");
+ println(out, "buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);");
+ } else {
+ println(out, "buf = flatcc_json_parser_symbol_start(ctx, buf, end);");
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, &trie, 0, n - 1, 0);
+ }
+ println(out, "buf = flatcc_json_parser_object_end(ctx, buf, end , &more);");
+ unindent(); println(out, "}");
+ println(out, "return buf;");
+ if (n > 0) {
+ /* Set runtime error if no other error was set already. */
+ margin();
+ println(out, "failed:");
+ unmargin();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);");
+ }
+ unindent(); println(out, "}");
+ println(out, "");
+ clear_dict(trie.dict);
+ return 0;
+}
+
+static int gen_struct_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+
+ assert(ct->symbol.kind == fb_is_struct);
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+ println(out, "static const char *%s_parse_json_struct(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result)", snt.text);
+ println(out, "{"); indent();
+ println(out, "void *pval;");
+ println(out, "");
+ println(out, "*result = 0;");
+ println(out, "if (!(pval = flatcc_builder_start_struct(ctx->ctx, %"PRIu64", %"PRIu16"))) goto failed;",
+ (uint64_t)ct->size, (uint16_t)ct->align);
+ println(out, "buf = %s_parse_json_struct_inline(ctx, buf, end, pval);", snt.text);
+ println(out, "if (ctx->error || !(*result = flatcc_builder_end_struct(ctx->ctx))) goto failed;");
+ println(out, "return buf;");
+ margin();
+ println(out, "failed:");
+ unmargin();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);");
+ unindent(); println(out, "}");
+ println(out, "");
+ println(out, "static inline int %s_parse_json_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx, const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid)", snt.text);
+ println(out, "{"); indent();
+ println(out, "return flatcc_json_parser_struct_as_root(B, ctx, buf, bufsiz, flags, fid, %s_parse_json_struct);",
+ snt.text);
+ unindent(); println(out, "}");
+ println(out, "");
+ return 0;
+}
+
+static int gen_table_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+ fb_member_t *member;
+ int first, i, n;
+ int is_union, is_required;
+ trie_t trie;
+
+ fb_clear(trie);
+ assert(ct->symbol.kind == fb_is_table);
+ if ((trie.dict = build_compound_dict(ct, &n)) == 0 && n > 0) {
+ gen_panic(out, "internal error: could not build dictionary for json parser\n");
+ return -1;
+ }
+ trie.ct = ct;
+ trie.type = table_trie;
+ trie.gen_match = gen_field_match;
+ trie.gen_unmatched = gen_field_unmatched;
+
+ trie.union_total = 0;
+ for (i = 0; i < n; ++i) {
+ trie.union_total += !!trie.dict[i].hint;
+ }
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+ println(out, "static const char *%s_parse_json_table(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result)", snt.text);
+ println(out, "{"); indent();
+ println(out, "int more;");
+
+ if (n > 0) {
+ println(out, "void *pval;");
+ println(out, "flatcc_builder_ref_t ref, *pref;");
+ println(out, "const char *mark;");
+ println(out, "uint64_t w;");
+ }
+ if (trie.union_total) {
+ println(out, "size_t h_unions;");
+ }
+ println(out, "");
+ println(out, "*result = 0;");
+ println(out, "if (flatcc_builder_start_table(ctx->ctx, %"PRIu64")) goto failed;",
+ ct->count);
+ if (trie.union_total) {
+ println(out, "if (end == flatcc_json_parser_prepare_unions(ctx, buf, end, %"PRIu64", &h_unions)) goto failed;", (uint64_t)trie.union_total);
+ }
+ println(out, "buf = flatcc_json_parser_object_start(ctx, buf, end, &more);");
+ println(out, "while (more) {"); indent();
+ println(out, "buf = flatcc_json_parser_symbol_start(ctx, buf, end);");
+ if (n > 0) {
+ println(out, "w = flatcc_json_parser_symbol_part(buf, end);");
+ gen_trie(out, &trie, 0, n - 1, 0);
+ } else {
+ println(out, "/* Table has no fields. */");
+ println(out, "buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);");
+ }
+ println(out, "buf = flatcc_json_parser_object_end(ctx, buf, end, &more);");
+ unindent(); println(out, "}");
+ println(out, "if (ctx->error) goto failed;");
+ for (first = 1, i = 0; i < n; ++i) {
+ member = trie.dict[i].data;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ is_union = is_union_member(member);
+ is_required = member->metadata_flags & fb_f_required;
+ if (is_required) {
+ if (first) {
+ println(out, "if (!flatcc_builder_check_required_field(ctx->ctx, %"PRIu64")", member->id - !!is_union);
+ indent();
+ } else {
+ println(out, "|| !flatcc_builder_check_required_field(ctx->ctx, %"PRIu64")", member->id - !!is_union);
+ }
+ first = 0;
+ }
+ }
+ if (!first) {
+ unindent(); println(out, ") {"); indent();
+ println(out, "buf = flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_required);");
+ println(out, "goto failed;");
+ unindent(); println(out, "}");
+ }
+ if (trie.union_total) {
+ println(out, "buf = flatcc_json_parser_finalize_unions(ctx, buf, end, h_unions);");
+ }
+ println(out, "if (!(*result = flatcc_builder_end_table(ctx->ctx))) goto failed;");
+ println(out, "return buf;");
+ /* Set runtime error if no other error was set already. */
+ margin();
+ println(out, "failed:");
+ unmargin();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);");
+ unindent(); println(out, "}");
+ println(out, "");
+ println(out, "static inline int %s_parse_json_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx, const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid)", snt.text);
+ println(out, "{"); indent();
+ println(out, "return flatcc_json_parser_table_as_root(B, ctx, buf, bufsiz, flags, fid, %s_parse_json_table);",
+ snt.text);
+ unindent(); println(out, "}");
+ println(out, "");
+ clear_dict(trie.dict);
+ return 0;
+}
+
+static int gen_union_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt, snref;
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ int n;
+ const char *s;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+ println(out, "static const char *%s_parse_json_union(flatcc_json_parser_t *ctx, const char *buf, const char *end, uint8_t type, flatcc_builder_ref_t *result)", snt.text);
+ println(out, "{"); indent();
+ println(out, "");
+ println(out, "*result = 0;");
+ println(out, "switch (type) {");
+ println(out, "case 0: /* NONE */"); indent();
+ println(out, "return flatcc_json_parser_none(ctx, buf, end);");
+ unindent();
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+ switch (member->type.type) {
+ case vt_missing:
+ /* NONE is of type vt_missing and already handled. */
+ continue;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ println(out, "case %u: /* %.*s */", (unsigned)member->value.u, n, s); indent();
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ println(out, "buf = %s_parse_json_table(ctx, buf, end, result);", snref.text);
+ break;
+ case fb_is_struct:
+ println(out, "buf = %s_parse_json_struct(ctx, buf, end, result);", snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound union member type\n");
+ return -1;
+ }
+ println(out, "break;");
+ unindent();
+ continue;
+ case vt_string_type:
+ println(out, "case %u: /* %.*s */", (unsigned)member->value.u, n, s); indent();
+ println(out, "buf = flatcc_json_parser_build_string(ctx, buf, end, result);");
+ println(out, "break;");
+ unindent();
+ continue;
+ default:
+ gen_panic(out, "internal error: unexpected union member type\n");
+ return -1;
+ }
+ }
+ /* Unknown union, but not an error if we allow schema forwarding. */
+ println(out, "default:"); indent();
+ println(out, "if (!(ctx->flags & flatcc_json_parser_f_skip_unknown)) {"); indent();
+ println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unknown_union);");
+ unindent(); println(out, "} else {"); indent();
+ println(out, "return flatcc_json_parser_generic_json(ctx, buf, end);");
+ unindent(); println(out, "}");
+ unindent(); println(out, "}");
+ println(out, "if (ctx->error) return buf;");
+ println(out, "if (!*result) {");
+ indent(); println(out, "return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);");
+ unindent(); println(out, "}");
+ println(out, "return buf;");
+ unindent(); println(out, "}");
+ println(out, "");
+ return 0;
+}
+
+static int gen_union_accept_type(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt, snref;
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ int n;
+ const char *s;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+ println(out, "static int %s_json_union_accept_type(uint8_t type)", snt.text);
+ println(out, "{"); indent();
+ println(out, "switch (type) {");
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+ if (member->type.type == vt_missing) {
+ println(out, "case 0: return 1; /* NONE */");
+ continue;
+ }
+ println(out, "case %u: return 1; /* %.*s */", (unsigned)member->value.u, n, s);
+ }
+ /* Unknown union, but not an error if we allow schema forwarding. */
+ println(out, "default: return 0;"); indent();
+ unindent(); println(out, "}");
+ unindent(); println(out, "}");
+ println(out, "");
+ return 0;
+}
+
+static void gen_local_scope_prototype(void *context, fb_scope_t *scope)
+{
+ fb_output_t *out = context;
+ fb_symbol_text_t scope_name;
+
+ fb_copy_scope(scope, scope_name);
+
+ println(out, "static const char *%s_local_%sjson_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,",
+ out->S->basename, scope_name);
+ println(out, "int *value_type, uint64_t *value, int *aggregate);");
+}
+
+static int gen_root_table_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ println(out, "static int %s_parse_json(flatcc_builder_t *B, flatcc_json_parser_t *ctx,", out->S->basename);
+ indent(); indent();
+ println(out, "const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags)");
+ unindent(); unindent();
+ println(out, "{"); indent();
+ println(out, "flatcc_json_parser_t parser;");
+ println(out, "flatcc_builder_ref_t root;");
+ println(out, "");
+ println(out, "ctx = ctx ? ctx : &parser;");
+ println(out, "flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);");
+ if (out->S->file_identifier.type == vt_string) {
+ println(out, "if (flatcc_builder_start_buffer(B, \"%.*s\", 0, 0)) return -1;",
+ out->S->file_identifier.s.len, out->S->file_identifier.s.s);
+ } else {
+ println(out, "if (flatcc_builder_start_buffer(B, 0, 0, 0)) return -1;");
+ }
+ println(out, "%s_parse_json_table(ctx, buf, buf + bufsiz, &root);", snt.text);
+ println(out, "if (ctx->error) {"); indent();
+ println(out, "return ctx->error;");
+ unindent(); println(out, "}");
+ println(out, "if (!flatcc_builder_end_buffer(B, root)) return -1;");
+ println(out, "ctx->end_loc = buf;");
+ println(out, "return 0;");
+ unindent(); println(out, "}");
+ println(out, "");
+ return 0;
+}
+
+static int gen_root_struct_parser(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ println(out, "static int %s_parse_json(flatcc_builder_t *B, flatcc_json_parser_t *ctx,", out->S->basename);
+ indent(); indent();
+ println(out, "const char *buf, size_t bufsiz, int flags)");
+ unindent(); unindent();
+ println(out, "{"); indent();
+ println(out, "flatcc_json_parser_t ctx_;");
+ println(out, "flatcc_builder_ref_t root;");
+ println(out, "");
+ println(out, "ctx = ctx ? ctx : &ctx_;");
+ println(out, "flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);");
+ if (out->S->file_identifier.type == vt_string) {
+ println(out, "if (flatcc_builder_start_buffer(B, \"%.*s\", 0, 0)) return -1;",
+ out->S->file_identifier.s.len, out->S->file_identifier.s.s);
+ } else {
+ println(out, "if (flatcc_builder_start_buffer(B, 0, 0, 0)) return -1;");
+ }
+ println(out, "buf = %s_parse_json_struct(ctx, buf, buf + bufsiz, &root);", snt.text);
+ println(out, "if (ctx->error) {"); indent();
+ println(out, "return ctx->error;");
+ unindent(); println(out, "}");
+ println(out, "if (!flatcc_builder_end_buffer(B, root)) return -1;");
+ println(out, "ctx->end_loc = buf;");
+ println(out, "return 0;");
+ unindent(); println(out, "}");
+ println(out, "");
+ return 0;
+}
+
+
+static int gen_root_parser(fb_output_t *out)
+{
+ fb_symbol_t *root_type = out->S->root_type.type;
+
+ if (!root_type) {
+ return 0;
+ }
+ if (root_type) {
+ switch (root_type->kind) {
+ case fb_is_table:
+ return gen_root_table_parser(out, (fb_compound_type_t *)root_type);
+ case fb_is_struct:
+ return gen_root_struct_parser(out, (fb_compound_type_t *)root_type);
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int gen_json_parser_prototypes(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_scoped_name_t snt;
+ fb_symbol_t *root_type = out->S->root_type.type;
+
+ fb_clear(snt);
+
+ if (root_type)
+ switch (root_type->kind) {
+ case fb_is_table:
+ case fb_is_struct:
+ println(out, "/*");
+ println(out, " * Parses the default root table or struct of the schema and constructs a FlatBuffer.");
+ println(out, " *");
+ println(out, " * Builder `B` must be initialized. `ctx` can be null but will hold");
+ println(out, " * hold detailed error info on return when available.");
+ println(out, " * Returns 0 on success, or error code.");
+ println(out, " * `flags` : 0 by default, `flatcc_json_parser_f_skip_unknown` silently");
+ println(out, " * ignores unknown table and structs fields, and union types.");
+ println(out, " */");
+ println(out, "static int %s_parse_json(flatcc_builder_t *B, flatcc_json_parser_t *ctx,",
+ out->S->basename);
+ indent(); indent();
+ println(out, "const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags);");
+ unindent(); unindent();
+ println(out, "");
+ break;
+ default:
+ break;
+ }
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ println(out, "static const char *%s_parse_json_union(flatcc_json_parser_t *ctx, const char *buf, const char *end, uint8_t type, flatcc_builder_ref_t *pref);", snt.text);
+ println(out, "static int %s_json_union_accept_type(uint8_t type);", snt.text);
+ /* A union also has an enum parser to get the type. */
+ println(out, "static const char *%s_parse_json_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,", snt.text);
+ indent(); indent();
+ println(out, "int *value_type, uint64_t *value, int *aggregate);");
+ unindent(); unindent();
+ break;
+ case fb_is_struct:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ println(out, "static const char *%s_parse_json_struct_inline(flatcc_json_parser_t *ctx, const char *buf, const char *end, void *struct_base);", snt.text);
+ println(out, "static const char *%s_parse_json_struct(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result);", snt.text);
+ break;
+ case fb_is_table:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ println(out, "static const char *%s_parse_json_table(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result);", snt.text);
+ break;
+ case fb_is_enum:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ println(out, "static const char *%s_parse_json_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,", snt.text);
+ indent(); indent();
+ println(out, "int *value_type, uint64_t *value, int *aggregate);", snt.text);
+ unindent(); unindent();
+ break;
+ }
+ }
+ fb_scope_table_visit(&out->S->root_schema->scope_index, gen_local_scope_prototype, out);
+ println(out, "static const char *%s_global_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,", out->S->basename);
+ indent(); indent();
+ println(out, "int *value_type, uint64_t *value, int *aggregate);");
+ unindent(); unindent();
+ println(out, "");
+ return 0;
+}
+
+static int gen_json_parsers(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ gen_union_parser(out, (fb_compound_type_t *)sym);
+ gen_union_accept_type(out, (fb_compound_type_t *)sym);
+ gen_enum_parser(out, (fb_compound_type_t *)sym);
+ break;
+ case fb_is_struct:
+ gen_struct_parser_inline(out, (fb_compound_type_t *)sym);
+ gen_struct_parser(out, (fb_compound_type_t *)sym);
+ break;
+ case fb_is_table:
+ gen_table_parser(out, (fb_compound_type_t *)sym);
+ break;
+ case fb_is_enum:
+ gen_enum_parser(out, (fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ fb_scope_table_visit(&out->S->root_schema->scope_index, gen_local_scope_parser, out);
+ gen_global_scope_parser(out);
+ gen_root_parser(out);
+ return 0;
+}
+
+int fb_gen_c_json_parser(fb_output_t *out)
+{
+ gen_json_parser_pretext(out);
+ gen_json_parser_prototypes(out);
+ gen_json_parsers(out);
+ gen_json_parser_footer(out);
+ return 0;
+}
diff --git a/flatcc/src/compiler/codegen_c_json_printer.c b/flatcc/src/compiler/codegen_c_json_printer.c
new file mode 100644
index 0000000..efc4c3d
--- /dev/null
+++ b/flatcc/src/compiler/codegen_c_json_printer.c
@@ -0,0 +1,732 @@
+#include "codegen_c.h"
+#include "flatcc/flatcc_types.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+static int gen_json_printer_pretext(fb_output_t *out)
+{
+ fprintf(out->fp,
+ "#ifndef %s_JSON_PRINTER_H\n"
+ "#define %s_JSON_PRINTER_H\n",
+ out->S->basenameup, out->S->basenameup);
+
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ fprintf(out->fp, "#include \"flatcc/flatcc_json_printer.h\"\n");
+ fb_gen_c_includes(out, "_json_printer.h", "_JSON_PRINTER_H");
+ gen_prologue(out);
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int gen_json_printer_footer(fb_output_t *out)
+{
+ gen_epilogue(out);
+ fprintf(out->fp,
+ "#endif /* %s_JSON_PRINTER_H */\n",
+ out->S->basenameup);
+ return 0;
+}
+
+static int gen_json_printer_enum(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ const char *tp, *tn, *ns;
+ int bit_flags;
+ uint64_t mask = 0;
+ char *constwrap = "";
+ char *ut = "";
+ fb_scalar_type_t st = ct->type.st;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+ tp = scalar_type_prefix(st);
+ tn = scalar_type_name(st);
+ ns = scalar_type_ns(st, out->nsc);
+
+ bit_flags = !!(ct->metadata_flags & fb_f_bit_flags);
+ if (bit_flags) {
+ switch (ct->size) {
+ case 1:
+ mask = UINT8_MAX, constwrap = "UINT8_C", ut = "uint8_t";
+ break;
+ case 2:
+ mask = UINT16_MAX, constwrap = "UINT16_C", ut = "uint16_t";
+ break;
+ case 4:
+ mask = UINT32_MAX, constwrap = "UINT32_C", ut = "uint32_t";
+ break;
+ default:
+ mask = UINT64_MAX, constwrap = "UINT64_C", ut = "uint64_t";
+ break;
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->value.type) {
+ case vt_uint:
+ mask &= ~(uint64_t)member->value.u;
+ break;
+ case vt_int:
+ mask &= ~(uint64_t)member->value.i;
+ break;
+ case vt_bool:
+ mask &= ~(uint64_t)member->value.b;
+ break;
+ }
+ }
+ }
+
+ fprintf(out->fp,
+ "static void %s_print_json_enum(flatcc_json_printer_t *ctx, %s%s v)\n{\n",
+ snt.text, ns, tn);
+ if (bit_flags) {
+ if (strcmp(ut, tn)) {
+ fprintf(out->fp, " %s x = (%s)v;\n", ut, ut);
+ } else {
+ fprintf(out->fp, " %s x = v;\n", ut);
+ }
+ fprintf(out->fp,
+ " int multiple = 0 != (x & (x - 1));\n"
+ " int i = 0;\n");
+
+ fprintf(out->fp, "\n");
+ /*
+ * If the value is not entirely within the known bit flags, print as
+ * a number.
+ */
+ if (mask) {
+ fprintf(out->fp,
+ " if ((x & %s(0x%"PRIx64")) || x == 0) {\n"
+ " flatcc_json_printer_%s(ctx, v);\n"
+ " return;\n"
+ " }\n",
+ constwrap, mask, tp);
+ }
+ /*
+ * Test if multiple bits set. We may have a configuration option
+ * that requires multiple flags to be quoted like `color: "Red Green"`
+ * but unquoted if just a single value like `color: Green`.
+ *
+ * The index `i` is used to add space separators much like an
+ * index is provided for struct members to handle comma.
+ */
+ fprintf(out->fp, " flatcc_json_printer_delimit_enum_flags(ctx, multiple);\n");
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->value.type) {
+ case vt_uint:
+ fprintf(out->fp, " if (x & %s(0x%"PRIx64")) flatcc_json_printer_enum_flag(ctx, i++, \"%.*s\", %ld);\n",
+ constwrap, member->value.u, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_int:
+ fprintf(out->fp, " if (x & %s(0x%"PRIx64")) flatcc_json_printer_enum_flag(ctx, i++, \"%.*s\", %ld);\n",
+ constwrap, (uint64_t)member->value.i, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_bool:
+ fprintf(out->fp, " if (x & %s(0x%"PRIx64")) flatcc_json_printer_enum_flag(ctx, i++, \"%.*s\", %ld);\n",
+ constwrap, (uint64_t)member->value.b, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected value type for enum json_print");
+ break;
+ }
+ }
+ fprintf(out->fp, " flatcc_json_printer_delimit_enum_flags(ctx, multiple);\n");
+ } else {
+ fprintf(out->fp, "\n switch (v) {\n");
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->value.type) {
+ case vt_uint:
+ fprintf(out->fp, " case %s(%"PRIu64"): flatcc_json_printer_enum(ctx, \"%.*s\", %ld); break;\n",
+ constwrap, member->value.u, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_int:
+ fprintf(out->fp, " case %s(%"PRId64"): flatcc_json_printer_enum(ctx, \"%.*s\", %ld); break;\n",
+ constwrap, member->value.i, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_bool:
+ fprintf(out->fp, " case %s(%u): flatcc_json_printer_enum(ctx, \"%.*s\", %ld); break;\n",
+ constwrap, member->value.b, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected value type for enum json_print");
+ break;
+ }
+ }
+ fprintf(out->fp,
+ " default: flatcc_json_printer_%s(ctx, v); break;\n"
+ " }\n",
+ tp);
+ }
+ fprintf(out->fp, "}\n\n");
+ return 0;
+}
+
+static int gen_json_printer_union_type(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static void %s_print_json_union_type(flatcc_json_printer_t *ctx, flatbuffers_utype_t type)\n"
+ "{\n switch (type) {\n",
+ snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->type.type == vt_missing) {
+ continue;
+ }
+ fprintf(out->fp,
+ " case %u:\n"
+ " flatcc_json_printer_enum(ctx, \"%.*s\", %ld);\n"
+ " break;\n",
+ (unsigned)member->value.u, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ }
+ fprintf(out->fp,
+ " default:\n"
+ " flatcc_json_printer_enum(ctx, \"NONE\", 4);\n"
+ " break;\n");
+ fprintf(out->fp,
+ " }\n}\n\n");
+ return 0;
+}
+
+static int gen_json_printer_union_member(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static void %s_print_json_union(flatcc_json_printer_t *ctx, flatcc_json_printer_union_descriptor_t *ud)\n"
+ "{\n switch (ud->type) {\n",
+ snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->type.type) {
+ case vt_missing:
+ continue;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ " case %u:\n"
+ " flatcc_json_printer_union_table(ctx, ud, %s_print_json_table);\n"
+ " break;\n",
+ (unsigned)member->value.u, snref.text);
+ continue;
+ case fb_is_struct:
+ fprintf(out->fp,
+ " case %u:\n"
+ " flatcc_json_printer_union_struct(ctx, ud, %s_print_json_struct);\n"
+ " break;\n",
+ (unsigned)member->value.u, snref.text);
+ continue;
+ default:
+ gen_panic(out, "internal error: unexpected union type\n");
+ return -1;
+ }
+ case vt_string_type:
+ fprintf(out->fp,
+ " case %u:\n"
+ " flatcc_json_printer_union_string(ctx, ud);\n"
+ " break;\n",
+ (unsigned)member->value.u);
+ continue;
+ default:
+ gen_panic(out, "internal error: unexpected union type\n");
+ return -1;
+ }
+ }
+ fprintf(out->fp,
+ " default:\n"
+ " break;\n");
+ fprintf(out->fp,
+ " }\n}\n\n");
+ return 0;
+}
+
+static int gen_json_printer_union(fb_output_t *out, fb_compound_type_t *ct)
+{
+ gen_json_printer_union_type(out, ct);
+ gen_json_printer_union_member(out, ct);
+ return 0;
+}
+
+static int gen_json_printer_struct(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ int index = 0;
+ const char *tp;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static void %s_print_json_struct(flatcc_json_printer_t *ctx, const void *p)\n"
+ "{\n",
+ snt.text);
+ for (sym = ct->members; sym; ++index, sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ switch (member->type.type) {
+ case vt_scalar_type:
+ tp = scalar_type_prefix(member->type.st);
+ fprintf(
+ out->fp,
+ " flatcc_json_printer_%s_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_fixed_array_type:
+ tp = scalar_type_prefix(member->type.st);
+ fprintf(
+ out->fp,
+ " flatcc_json_printer_%s_array_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %d);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len, member->type.len);
+ break;
+ case vt_fixed_array_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+#if FLATCC_JSON_PRINT_MAP_ENUMS
+ tp = scalar_type_prefix(member->type.ct->type.st);
+ fprintf(out->fp,
+ " flatcc_json_printer_%s_enum_array_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %d, %s_print_json_enum);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len, member->type.len, snref.text);
+ break;
+#else
+ tp = scalar_type_prefix(member->type.ct->type.st);
+ fprintf(
+ out->fp,
+ " flatcc_json_printer_%s_array_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %d);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len, member->type.len);
+ break;
+#endif
+ case fb_is_struct:
+ fprintf(out->fp,
+ " flatcc_json_printer_embedded_struct_array_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %"PRIu64", %"PRIu64", %s_print_json_struct);\n",
+ index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len,
+ (uint64_t)member->type.ct->size, (uint64_t)member->type.len, snref.text);
+ }
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+#if FLATCC_JSON_PRINT_MAP_ENUMS
+ tp = scalar_type_prefix(member->type.ct->type.st);
+ fprintf(out->fp,
+ " flatcc_json_printer_%s_enum_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %s_print_json_enum);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+#else
+ tp = scalar_type_prefix(member->type.ct->type.st);
+ fprintf(
+ out->fp,
+ " flatcc_json_printer_%s_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld);\n",
+ tp, index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+#endif
+ case fb_is_struct:
+ fprintf(out->fp,
+ " flatcc_json_printer_embedded_struct_field(ctx, %d, p, %"PRIu64", \"%.*s\", %ld, %s_print_json_struct);\n",
+ index, (uint64_t)member->offset, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+ }
+ break;
+ }
+ }
+ fprintf(out->fp, "}\n\n");
+ fprintf(out->fp,
+ "static inline int %s_print_json_as_root(flatcc_json_printer_t *ctx, const void *buf, size_t bufsiz, const char *fid)\n"
+ "{\n return flatcc_json_printer_struct_as_root(ctx, buf, bufsiz, fid, %s_print_json_struct);\n}\n\n",
+ snt.text, snt.text);
+ return 0;
+}
+
+static int gen_json_printer_table(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ const char *tp;
+ int is_optional;
+ int ret = 0;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ /* Fields are printed in field id order for consistency across schema version. */
+ fprintf(out->fp,
+ "static void %s_print_json_table(flatcc_json_printer_t *ctx, flatcc_json_printer_table_descriptor_t *td)\n"
+ "{",
+ snt.text);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ sym = &member->symbol;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ is_optional = !!(member->flags & fb_fm_optional);
+ fprintf(out->fp, "\n ");
+ switch (member->type.type) {
+ case vt_scalar_type:
+ tp = scalar_type_prefix(member->type.st);
+ if (is_optional) {
+ fprintf( out->fp,
+ "flatcc_json_printer_%s_optional_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ } else {
+ fb_literal_t literal;
+ if (!print_literal(member->type.st, &member->value, literal)) return -1;
+ fprintf( out->fp,
+ "flatcc_json_printer_%s_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, literal);
+ }
+ break;
+ case vt_vector_type:
+ if (member->metadata_flags & (fb_f_base64 | fb_f_base64url)) {
+ fprintf(out->fp,
+ "flatcc_json_printer_uint8_vector_base64_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %u);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len,
+ !(member->metadata_flags & fb_f_base64));
+ } else if (member->nest) {
+ fb_compound_name((fb_compound_type_t *)&member->nest->symbol, &snref);
+ if (member->nest->symbol.kind == fb_is_table) {
+ /*
+ * Always set fid to 0 since it is difficult to know what is right.
+ * We do know the type from the field attribute.
+ */
+ fprintf(out->fp,
+ "flatcc_json_printer_table_as_nested_root(ctx, td, %"PRIu64", \"%.*s\", %ld, 0, %s_print_json_table);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ } else {
+ /*
+ * Always set fid to 0 since it is difficult to know what is right.
+ * We do know the type from the field attribute.
+ */
+ fprintf(out->fp,
+ "flatcc_json_printer_struct_as_nested_root(ctx, td, %"PRIu64", \"%.*s\", %ld, 0, %s_print_json_struct);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ }
+ } else {
+ tp = scalar_type_prefix(member->type.st);
+ fprintf(out->fp,
+ "flatcc_json_printer_%s_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ }
+ break;
+ case vt_string_type:
+ fprintf(out->fp,
+ "flatcc_json_printer_string_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp,
+ "flatcc_json_printer_string_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+ tp = scalar_type_prefix(member->type.ct->type.st);
+#if FLATCC_JSON_PRINT_MAP_ENUMS
+ if (is_optional) {
+ fprintf(out->fp,
+ "flatcc_json_printer_%s_enum_optional_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s_print_json_enum);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ } else {
+ fb_literal_t literal;
+ if (!print_literal(member->type.ct->type.st, &member->value, literal)) return -1;
+ fprintf(out->fp,
+ "flatcc_json_printer_%s_enum_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s, %s_print_json_enum);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, literal, snref.text);
+ }
+#else
+ if (is_optional) {
+ fprintf( out->fp,
+ "flatcc_json_printer_%s_optional_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ } else {
+ fb_literal_t literal;
+ if (!print_literal(member->type.ct->type.st, &member->value, literal)) return -1;
+ fprintf( out->fp,
+ "flatcc_json_printer_%s_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, literal);
+ }
+#endif
+ break;
+ case fb_is_struct:
+ fprintf(out->fp,
+ "flatcc_json_printer_struct_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s_print_json_struct);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+ case fb_is_table:
+ fprintf(out->fp,
+ "flatcc_json_printer_table_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s_print_json_table);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "flatcc_json_printer_union_field(ctx, td, %"PRIu64", \"%.*s\", %ld, "
+ "%s_print_json_union_type, %s_print_json_union);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text, snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound type for table json_print");
+ goto fail;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ "flatcc_json_printer_table_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s_print_json_table);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+ case fb_is_enum:
+ tp = scalar_type_prefix(member->type.ct->type.st);
+#if FLATCC_JSON_PRINT_MAP_ENUMS
+ fprintf(out->fp,
+ "flatcc_json_printer_%s_enum_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %s_print_json_enum);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text);
+ break;
+#else
+ fprintf(out->fp,
+ "flatcc_json_printer_%s_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld);",
+ tp, member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len);
+ break;
+#endif
+ case fb_is_struct:
+ fprintf(out->fp,
+ "flatcc_json_printer_struct_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld, %"PRIu64", %s_print_json_struct);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, (uint64_t)member->size, snref.text);
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "flatcc_json_printer_union_vector_field(ctx, td, %"PRIu64", \"%.*s\", %ld, "
+ "%s_print_json_union_type, %s_print_json_union);",
+ member->id, (int)sym->ident->len, sym->ident->text, sym->ident->len, snref.text, snref.text);
+ break;
+
+ default:
+ gen_panic(out, "internal error: unexpected vector compound type for table json_print");
+ goto fail;
+ }
+ break;
+ }
+ }
+ fprintf(out->fp, "\n}\n\n");
+ fprintf(out->fp,
+ "static inline int %s_print_json_as_root(flatcc_json_printer_t *ctx, const void *buf, size_t bufsiz, const char *fid)\n"
+ "{\n return flatcc_json_printer_table_as_root(ctx, buf, bufsiz, fid, %s_print_json_table);\n}\n\n",
+ snt.text, snt.text);
+done:
+ return ret;
+fail:
+ ret = -1;
+ goto done;
+}
+
+/*
+ * Only tables are mutually recursive. Structs are sorted and unions are
+ * defined earlier, depending on the table prototypes.
+ */
+static int gen_json_printer_prototypes(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_scoped_name_t snt;
+ fb_symbol_t *root_type = out->S->root_type.type;
+
+ fb_clear(snt);
+
+ if (root_type)
+ switch (root_type->kind) {
+ case fb_is_table:
+ case fb_is_struct:
+ fprintf(out->fp,
+ "/*\n"
+ " * Prints the default root table or struct from a buffer which must have\n"
+ " * the schema declared file identifier, if any. It is also possible to\n"
+ " * call the type specific `print_json_as_root` function wich accepts an\n"
+ " * optional identifier (or 0) as argument. The printer `ctx` object must\n"
+ " * be initialized with the appropriate output type, or it can be 0 which\n"
+ " * defaults to stdout. NOTE: `ctx` is not generally allowed to be null, only\n"
+ " * here for a simplified interface.\n"
+ " */\n");
+ fprintf(out->fp,
+ "static int %s_print_json(flatcc_json_printer_t *ctx, const char *buf, size_t bufsiz);\n\n",
+ out->S->basename);
+ break;
+ default:
+ break;
+ }
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "static void %s_print_json_union_type(flatcc_json_printer_t *ctx, flatbuffers_utype_t type);\n"
+ "static void %s_print_json_union(flatcc_json_printer_t *ctx, flatcc_json_printer_union_descriptor_t *ud);\n",
+ snt.text, snt.text);
+ break;
+ case fb_is_table:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "static void %s_print_json_table(flatcc_json_printer_t *ctx, flatcc_json_printer_table_descriptor_t *td);\n",
+ snt.text);
+ break;
+ case fb_is_struct:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "static void %s_print_json_struct(flatcc_json_printer_t *ctx, const void *p);\n",
+ snt.text);
+ break;
+ }
+ }
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int gen_json_printer_enums(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_enum:
+ gen_json_printer_enum(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+static int gen_json_printer_unions(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ gen_json_printer_union(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+static int gen_json_printer_structs(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ gen_json_printer_struct(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+static int gen_json_printer_tables(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_json_printer_table(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+/* Same for structs and tables. */
+static int gen_root_type_printer(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static int %s_print_json(flatcc_json_printer_t *ctx, const char *buf, size_t bufsiz)\n",
+ out->S->basename);
+ fprintf(out->fp,
+ "{\n"
+ " flatcc_json_printer_t printer;\n"
+ "\n"
+ " if (ctx == 0) {\n"
+ " ctx = &printer;\n"
+ " flatcc_json_printer_init(ctx, 0);\n"
+ " }\n"
+ " return %s_print_json_as_root(ctx, buf, bufsiz, ",
+ snt.text);
+ if (out->S->file_identifier.type == vt_string) {
+ fprintf(out->fp,
+ "\"%.*s\");\n",
+ out->S->file_identifier.s.len, out->S->file_identifier.s.s);
+ } else {
+ fprintf(out->fp,
+ "0);");
+ }
+ fprintf(out->fp,
+ "}\n\n");
+ return 0;
+}
+
+static int gen_json_root_printer(fb_output_t *out)
+{
+ fb_symbol_t *root_type = out->S->root_type.type;
+
+ if (!root_type) {
+ return 0;
+ }
+ if (root_type) {
+ switch (root_type->kind) {
+ case fb_is_table:
+ case fb_is_struct:
+ return gen_root_type_printer(out, (fb_compound_type_t *)root_type);
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+int fb_gen_c_json_printer(fb_output_t *out)
+{
+ gen_json_printer_pretext(out);
+ gen_json_printer_prototypes(out);
+ gen_json_printer_enums(out);
+ gen_json_printer_unions(out);
+ gen_json_printer_structs(out);
+ gen_json_printer_tables(out);
+ gen_json_root_printer(out);
+ gen_json_printer_footer(out);
+ return 0;
+}
diff --git a/flatcc/src/compiler/codegen_c_reader.c b/flatcc/src/compiler/codegen_c_reader.c
new file mode 100644
index 0000000..6de0f21
--- /dev/null
+++ b/flatcc/src/compiler/codegen_c_reader.c
@@ -0,0 +1,1928 @@
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "codegen_c.h"
+#include "codegen_c_sort.h"
+
+static inline int match_kw_identifier(fb_symbol_t *sym)
+{
+ return (sym->ident->len == 10 &&
+ memcmp(sym->ident->text, "identifier", 10) == 0);
+}
+
+/*
+ * Use of file identifiers for undeclared roots is fuzzy, but we need an
+ * identifer for all, so we use the one defined for the current schema
+ * file and allow the user to override. This avoids tedious runtime file
+ * id arguments to all create calls.
+ *
+ * As later addition to FlatBuffers, type hashes may replace file
+ * identifiers when explicitly stated. These are FNV-1a hashes of the
+ * fully qualified type name (dot separated).
+ *
+ * We generate the type hash both as a native integer constants for use
+ * in switch statements, and encoded as a little endian C string for use
+ * as a file identifier.
+ */
+static void print_type_identifier(fb_output_t *out, fb_compound_type_t *ct)
+{
+ uint8_t buf[17];
+ uint8_t *p;
+ uint8_t x;
+ int i;
+ const char *nsc = out->nsc;
+ fb_scoped_name_t snt;
+ const char *name;
+ uint32_t type_hash;
+ int conflict = 0;
+ fb_symbol_t *sym;
+ const char *file_identifier;
+ int file_identifier_len;
+ const char *quote;
+
+ fb_clear(snt);
+
+ fb_compound_name(ct, &snt);
+ name = snt.text;
+ type_hash = ct->type_hash;
+
+ /*
+ * It's not practical to detect all possible name conflicts, but
+ * 'identifier' is common enough to require special handling.
+ */
+ for (sym = ct->members; sym; sym = sym->link) {
+ if (match_kw_identifier(sym)) {
+ conflict = 1;
+ break;
+ }
+ }
+ if (out->S->file_identifier.type == vt_string) {
+ quote = "\"";
+ file_identifier = out->S->file_identifier.s.s;
+ file_identifier_len = out->S->file_identifier.s.len;
+ } else {
+ quote = "";
+ file_identifier = "0";
+ file_identifier_len = 1;
+ }
+ fprintf(out->fp,
+ "#ifndef %s_file_identifier\n"
+ "#define %s_file_identifier %s%.*s%s\n"
+ "#endif\n",
+ name, name, quote, file_identifier_len, file_identifier, quote);
+ if (!conflict) {
+ /* For backwards compatibility. */
+ fprintf(out->fp,
+ "/* deprecated, use %s_file_identifier */\n"
+ "#ifndef %s_identifier\n"
+ "#define %s_identifier %s%.*s%s\n"
+ "#endif\n",
+ name, name, name, quote, file_identifier_len, file_identifier, quote);
+ }
+ fprintf(out->fp,
+ "#define %s_type_hash ((%sthash_t)0x%lx)\n",
+ name, nsc, (unsigned long)(type_hash));
+ p = buf;
+ i = 4;
+ while (i--) {
+ *p++ = '\\';
+ *p++ = 'x';
+ x = type_hash & 0x0f;
+ x += x > 9 ? 'a' - 10 : '0';
+ type_hash >>= 4;
+ p[1] = x;
+ x = type_hash & 0x0f;
+ x += x > 9 ? 'a' - 10 : '0';
+ type_hash >>= 4;
+ p[0] = x;
+ p += 2;
+ }
+ *p = '\0';
+ fprintf(out->fp,
+ "#define %s_type_identifier \"%s\"\n",
+ name, buf);
+}
+
+static void print_file_extension(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+ const char *name;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+ name = snt.text;
+
+ if (out->S->file_extension.type == vt_string) {
+ fprintf(out->fp,
+ "#ifndef %s_file_extension\n"
+ "#define %s_file_extension \"%.*s\"\n"
+ "#endif\n",
+ name, name, out->S->file_extension.s.len, out->S->file_extension.s.s);
+ } else {
+ fprintf(out->fp,
+ "#ifndef %s_file_extension\n"
+ "#define %s_file_extension \"%s\"\n"
+ "#endif\n",
+ name, name, out->opts->default_bin_ext);
+ }
+}
+
+/* Finds first occurrence of matching key when vector is sorted on the named field. */
+static void gen_find(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+
+ /*
+ * E: Element accessor (elem = E(vector, index)).
+ * L: Length accessor (length = L(vector)).
+ * A: Field accessor (or the identity function), result must match the diff function D's first arg.
+ * V: The vector to search (assuming sorted).
+ * T: The scalar, enum or string key type, (either the element, or a field of the element).
+ * K: The search key.
+ * Kn: optional key length so external strings do not have to be zero terminated.
+ * D: the diff function D(v, K, Kn) :: v - <K, Kn>
+ *
+ * returns index (0..len - 1), or not_found (-1).
+ */
+ fprintf(out->fp,
+ "#include <string.h>\n"
+ "static const size_t %snot_found = (size_t)-1;\n"
+ "static const size_t %send = (size_t)-1;\n"
+ "#define __%sidentity(n) (n)\n"
+ "#define __%smin(a, b) ((a) < (b) ? (a) : (b))\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "/* Subtraction doesn't work for unsigned types. */\n"
+ "#define __%sscalar_cmp(x, y, n) ((x) < (y) ? -1 : (x) > (y))\n"
+ "static inline int __%sstring_n_cmp(%sstring_t v, const char *s, size_t n)\n"
+ "{ size_t nv = %sstring_len(v); int x = strncmp(v, s, nv < n ? nv : n);\n"
+ " return x != 0 ? x : nv < n ? -1 : nv > n; }\n"
+ "/* `n` arg unused, but needed by string find macro expansion. */\n"
+ "static inline int __%sstring_cmp(%sstring_t v, const char *s, size_t n) { (void)n; return strcmp(v, s); }\n",
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "/* A = identity if searching scalar vectors rather than key fields. */\n"
+ "/* Returns lowest matching index or not_found. */\n"
+ "#define __%sfind_by_field(A, V, E, L, K, Kn, T, D)\\\n"
+ "{ T v__tmp; size_t a__tmp = 0, b__tmp, m__tmp; if (!(b__tmp = L(V))) { return %snot_found; }\\\n"
+ " --b__tmp;\\\n"
+ " while (a__tmp < b__tmp) {\\\n"
+ " m__tmp = a__tmp + ((b__tmp - a__tmp) >> 1);\\\n"
+ " v__tmp = A(E(V, m__tmp));\\\n"
+ " if ((D(v__tmp, (K), (Kn))) < 0) {\\\n"
+ " a__tmp = m__tmp + 1;\\\n"
+ " } else {\\\n"
+ " b__tmp = m__tmp;\\\n"
+ " }\\\n"
+ " }\\\n"
+ " if (a__tmp == b__tmp) {\\\n"
+ " v__tmp = A(E(V, a__tmp));\\\n"
+ " if (D(v__tmp, (K), (Kn)) == 0) {\\\n"
+ " return a__tmp;\\\n"
+ " }\\\n"
+ " }\\\n"
+ " return %snot_found;\\\n"
+ "}\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sfind_by_scalar_field(A, V, E, L, K, T)\\\n"
+ "__%sfind_by_field(A, V, E, L, K, 0, T, __%sscalar_cmp)\n"
+ "#define __%sfind_by_string_field(A, V, E, L, K)\\\n"
+ "__%sfind_by_field(A, V, E, L, K, 0, %sstring_t, __%sstring_cmp)\n"
+ "#define __%sfind_by_string_n_field(A, V, E, L, K, Kn)\\\n"
+ "__%sfind_by_field(A, V, E, L, K, Kn, %sstring_t, __%sstring_n_cmp)\n",
+ nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_find_by_scalar_field(N, NK, TK)\\\n"
+ "static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, TK key__tmp)\\\n"
+ "__%sfind_by_scalar_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, TK)\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_find(N, T)\\\n"
+ "static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, T key__tmp)\\\n"
+ "__%sfind_by_scalar_field(__%sidentity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_find_by_string_field(N, NK) \\\n"
+ "/* Note: find only works on vectors sorted by this field. */\\\n"
+ "static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "__%sfind_by_string_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\\\n"
+ "static inline size_t N ## _vec_find_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "__%sfind_by_string_n_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_default_find_by_scalar_field(N, NK, TK)\\\n"
+ "static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, TK key__tmp)\\\n"
+ "{ return N ## _vec_find_by_ ## NK(vec__tmp, key__tmp); }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_default_find_by_string_field(N, NK) \\\n"
+ "static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "{ return N ## _vec_find_by_ ## NK(vec__tmp, s__tmp); }\\\n"
+ "static inline size_t N ## _vec_find_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "{ return N ## _vec_find_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\n",
+ nsc);
+}
+
+static void gen_union(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+
+ fprintf(out->fp,
+ "typedef struct %sunion {\n"
+ " %sunion_type_t type;\n"
+ " %sgeneric_t value;\n"
+ "} %sunion_t;\n"
+ "typedef struct %sunion_vec {\n"
+ " const %sunion_type_t *type;\n"
+ " const %suoffset_t *value;\n"
+ "} %sunion_vec_t;\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "typedef struct %smutable_union {\n"
+ " %sunion_type_t type;\n"
+ " %smutable_generic_t value;\n"
+ "} %smutable_union_t;\n"
+ "typedef struct %smutable_union_vec {\n"
+ " %sunion_type_t *type;\n"
+ " %suoffset_t *value;\n"
+ "} %smutable_union_vec_t;\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline %smutable_union_t %smutable_union_cast(%sunion_t u__tmp)\\\n"
+ "{ %smutable_union_t mu = { u__tmp.type, (%smutable_generic_t)u__tmp.value };\\\n"
+ " return mu; }\n",
+ nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline %smutable_union_vec_t %smutable_union_vec_cast(%sunion_vec_t uv__tmp)\\\n"
+ "{ %smutable_union_vec_t muv =\\\n"
+ " { (%sunion_type_t *)uv__tmp.type, (%suoffset_t *)uv__tmp.value }; return muv; }\n",
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sunion_type_field(ID, t)\\\n"
+ "{\\\n"
+ " __%sread_vt(ID, offset__tmp, t)\\\n"
+ " return offset__tmp ? __%sread_scalar_at_byteoffset(__%sutype, t, offset__tmp) : 0;\\\n"
+ "}\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline %sstring_t %sstring_cast_from_union(const %sunion_t u__tmp)\\\n"
+ "{ return %sstring_cast_from_generic(u__tmp.value); }\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_union_field(NS, ID, N, NK, T, r)\\\n"
+ "static inline T ## _union_type_t N ## _ ## NK ## _type_get(N ## _table_t t__tmp)\\\n"
+ "__## NS ## union_type_field(((ID) - 1), t__tmp)\\\n"
+ "static inline NS ## generic_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\\\n", nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "static inline T ## _union_type_t N ## _ ## NK ## _type(N ## _table_t t__tmp)\\\n"
+ "__## NS ## union_type_field(((ID) - 1), t__tmp)\\\n"
+ "static inline NS ## generic_t N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\\\n");
+ }
+ fprintf(out->fp,
+ "static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__## NS ## field_present(ID, t__tmp)\\\n"
+ "static inline T ## _union_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\\\n"
+ "{ T ## _union_t u__tmp = { 0, 0 }; u__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\\\n"
+ " if (u__tmp.type == 0) return u__tmp; u__tmp.value = N ## _ ## NK ## _get(t__tmp); return u__tmp; }\\\n"
+ "static inline NS ## string_t N ## _ ## NK ## _as_string(N ## _table_t t__tmp)\\\n"
+ "{ return NS ## string_cast_from_generic(N ## _ ## NK ## _get(t__tmp)); }\\\n"
+ "\n");
+ fprintf(out->fp,
+ "#define __%sdefine_union_vector_ops(NS, T)\\\n"
+ "static inline size_t T ## _union_vec_len(T ## _union_vec_t uv__tmp)\\\n"
+ "{ return NS ## vec_len(uv__tmp.type); }\\\n"
+ "static inline T ## _union_t T ## _union_vec_at(T ## _union_vec_t uv__tmp, size_t i__tmp)\\\n"
+ "{ T ## _union_t u__tmp = { 0, 0 }; size_t n__tmp = NS ## vec_len(uv__tmp.type);\\\n"
+ " FLATCC_ASSERT(n__tmp > (i__tmp) && \"index out of range\"); u__tmp.type = uv__tmp.type[i__tmp];\\\n"
+ " /* Unknown type is treated as NONE for schema evolution. */\\\n"
+ " if (u__tmp.type == 0) return u__tmp;\\\n"
+ " u__tmp.value = NS ## generic_vec_at(uv__tmp.value, i__tmp); return u__tmp; }\\\n"
+ "static inline NS ## string_t T ## _union_vec_at_as_string(T ## _union_vec_t uv__tmp, size_t i__tmp)\\\n"
+ "{ return (NS ## string_t) NS ## generic_vec_at_as_string(uv__tmp.value, i__tmp); }\\\n"
+ "\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_union_vector(NS, T)\\\n"
+ "typedef NS ## union_vec_t T ## _union_vec_t;\\\n"
+ "typedef NS ## mutable_union_vec_t T ## _mutable_union_vec_t;\\\n"
+ "static inline T ## _mutable_union_vec_t T ## _mutable_union_vec_cast(T ## _union_vec_t u__tmp)\\\n"
+ "{ return NS ## mutable_union_vec_cast(u__tmp); }\\\n"
+ "__## NS ## define_union_vector_ops(NS, T)\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_union(NS, T)\\\n"
+ "typedef NS ## union_t T ## _union_t;\\\n"
+ "typedef NS ## mutable_union_t T ## _mutable_union_t;\\\n"
+ "static inline T ## _mutable_union_t T ## _mutable_union_cast(T ## _union_t u__tmp)\\\n"
+ "{ return NS ## mutable_union_cast(u__tmp); }\\\n"
+ "__## NS ## define_union_vector(NS, T)\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_union_vector_field(NS, ID, N, NK, T, r)\\\n"
+ "__## NS ## define_vector_field(ID - 1, N, NK ## _type, T ## _vec_t, r)\\\n"
+ "__## NS ## define_vector_field(ID, N, NK, flatbuffers_generic_vec_t, r)\\\n"
+ "static inline T ## _union_vec_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\\\n"
+ "{ T ## _union_vec_t uv__tmp; uv__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\\\n"
+ " uv__tmp.value = N ## _ ## NK(t__tmp);\\\n"
+ " FLATCC_ASSERT(NS ## vec_len(uv__tmp.type) == NS ## vec_len(uv__tmp.value)\\\n"
+ " && \"union vector type length mismatch\"); return uv__tmp; }\n",
+ nsc);
+}
+
+/* Linearly finds first occurrence of matching key, doesn't require vector to be sorted. */
+static void gen_scan(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+
+ /*
+ * E: Element accessor (elem = E(vector, index)).
+ * L: Length accessor (length = L(vector)).
+ * A: Field accessor (or the identity function), result must match the diff function D's first arg.
+ * V: The vector to search (assuming sorted).
+ * T: The scalar, enum or string key type, (either the element, or a field of the element).
+ * K: The search key.
+ * Kn: optional key length so external strings do not have to be zero terminated.
+ * D: the diff function D(v, K, Kn) :: v - <K, Kn>
+ *
+ * returns index (0..len - 1), or not_found (-1).
+ */
+ fprintf(out->fp,
+ "/* A = identity if searching scalar vectors rather than key fields. */\n"
+ "/* Returns lowest matching index or not_found. */\n"
+ "#define __%sscan_by_field(b, e, A, V, E, L, K, Kn, T, D)\\\n"
+ "{ T v__tmp; size_t i__tmp;\\\n"
+ " for (i__tmp = b; i__tmp < e; ++i__tmp) {\\\n"
+ " v__tmp = A(E(V, i__tmp));\\\n"
+ " if (D(v__tmp, (K), (Kn)) == 0) {\\\n"
+ " return i__tmp;\\\n"
+ " }\\\n"
+ " }\\\n"
+ " return %snot_found;\\\n"
+ "}\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%srscan_by_field(b, e, A, V, E, L, K, Kn, T, D)\\\n"
+ "{ T v__tmp; size_t i__tmp = e;\\\n"
+ " while (i__tmp-- > b) {\\\n"
+ " v__tmp = A(E(V, i__tmp));\\\n"
+ " if (D(v__tmp, (K), (Kn)) == 0) {\\\n"
+ " return i__tmp;\\\n"
+ " }\\\n"
+ " }\\\n"
+ " return %snot_found;\\\n"
+ "}\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sscan_by_scalar_field(b, e, A, V, E, L, K, T)\\\n"
+ "__%sscan_by_field(b, e, A, V, E, L, K, 0, T, __%sscalar_cmp)\n"
+ "#define __%sscan_by_string_field(b, e, A, V, E, L, K)\\\n"
+ "__%sscan_by_field(b, e, A, V, E, L, K, 0, %sstring_t, __%sstring_cmp)\n"
+ "#define __%sscan_by_string_n_field(b, e, A, V, E, L, K, Kn)\\\n"
+ "__%sscan_by_field(b, e, A, V, E, L, K, Kn, %sstring_t, __%sstring_n_cmp)\n",
+ nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%srscan_by_scalar_field(b, e, A, V, E, L, K, T)\\\n"
+ "__%srscan_by_field(b, e, A, V, E, L, K, 0, T, __%sscalar_cmp)\n"
+ "#define __%srscan_by_string_field(b, e, A, V, E, L, K)\\\n"
+ "__%srscan_by_field(b, e, A, V, E, L, K, 0, %sstring_t, __%sstring_cmp)\n"
+ "#define __%srscan_by_string_n_field(b, e, A, V, E, L, K, Kn)\\\n"
+ "__%srscan_by_field(b, e, A, V, E, L, K, Kn, %sstring_t, __%sstring_n_cmp)\n",
+ nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scan_by_scalar_field(N, NK, T)\\\n"
+ "static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\\\n"
+ "__%sscan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\\\n"
+ "__%sscan_by_scalar_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\\\n"
+ "__%srscan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\\\n"
+ "__%srscan_by_scalar_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_scan(N, T)\\\n"
+ "static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, T key__tmp)\\\n"
+ "__%sscan_by_scalar_field(0, N ## _vec_len(vec__tmp), __%sidentity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\\\n"
+ "__%sscan_by_scalar_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), __%sidentity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, T key__tmp)\\\n"
+ "__%srscan_by_scalar_field(0, N ## _vec_len(vec__tmp), __%sidentity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\\\n"
+ "static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\\\n"
+ "__%srscan_by_scalar_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), __%sidentity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scan_by_string_field(N, NK) \\\n"
+ "static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "__%sscan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\\\n"
+ "static inline size_t N ## _vec_scan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "__%sscan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\\\n"
+ "static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\\\n"
+ "__%sscan_by_string_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\\\n"
+ "static inline size_t N ## _vec_scan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "__%sscan_by_string_n_field(begin__tmp, __%smin( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\\\n"
+ "static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "__%srscan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\\\n"
+ "static inline size_t N ## _vec_rscan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "__%srscan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\\\n"
+ "static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\\\n"
+ "__%srscan_by_string_field(begin__tmp, __%smin(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\\\n"
+ "static inline size_t N ## _vec_rscan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "__%srscan_by_string_n_field(begin__tmp, __%smin( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_default_scan_by_scalar_field(N, NK, TK)\\\n"
+ "static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, TK key__tmp)\\\n"
+ "{ return N ## _vec_scan_by_ ## NK(vec__tmp, key__tmp); }\\\n"
+ "static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\\\n"
+ "{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, TK key__tmp)\\\n"
+ "{ return N ## _vec_rscan_by_ ## NK(vec__tmp, key__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\\\n"
+ "{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_default_scan_by_string_field(N, NK) \\\n"
+ "static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "{ return N ## _vec_scan_by_ ## NK(vec__tmp, s__tmp); }\\\n"
+ "static inline size_t N ## _vec_scan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "{ return N ## _vec_scan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\\\n"
+ "static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\\\n"
+ "{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\\\n"
+ "static inline size_t N ## _vec_scan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "{ return N ## _vec_scan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, const char *s__tmp)\\\n"
+ "{ return N ## _vec_rscan_by_ ## NK(vec__tmp, s__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "{ return N ## _vec_rscan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\\\n"
+ "{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\\\n"
+ "static inline size_t N ## _vec_rscan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\\\n"
+ "{ return N ## _vec_rscan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }\n",
+ nsc);
+}
+
+static void gen_helpers(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+
+ fprintf(out->fp,
+ /*
+ * Include the basic primitives for accessing flatbuffer data types independent
+ * of endianness.
+ *
+ * The included file must define the basic types and accessors
+ * prefixed with the common namespace which by default is
+ * "flatbuffers_".
+ */
+ "#include \"flatcc/flatcc_flatbuffers.h\"\n"
+ "\n\n");
+ /*
+ * The remapping of basic types to the common namespace makes it
+ * possible to have different definitions. The generic
+ * `flatbuffers_uoffset_t` etc. cannot be trusted to have one specific
+ * size since it depends on the included `flatcc/flatcc_types.h`
+ * filer, but the namespace prefixed types can be trusted if used carefully.
+ * For example the common namespace could be `flatbuffers_large_`
+ * when allowing for 64 bit offsets.
+ */
+ if (strcmp(nsc, "flatbuffers_")) {
+ fprintf(out->fp,
+ "typedef flatbuffers_uoffset_t %suoffset_t;\n"
+ "typedef flatbuffers_soffset_t %ssoffset_t;\n"
+ "typedef flatbuffers_voffset_t %svoffset_t;\n"
+ "typedef flatbuffers_utype_t %sutype_t;\n"
+ "typedef flatbuffers_bool_t %sbool_t;\n"
+ "\n",
+ nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define %sendian flatbuffers_endian\n"
+ "__flatcc_define_basic_scalar_accessors(%s, flatbuffers_endian)"
+ "__flatcc_define_integer_accessors(%sbool, flatbuffers_bool_t,\\\n"
+ " FLATBUFFERS_BOOL_WIDTH, flatbuffers_endian)\\\n"
+ "__flatcc_define_integer_accessors(%sunion_type, flatbuffers_union_type_t,\n"
+ " FLATBUFFERS_UTYPE_WIDTH, flatbuffers_endian)\\\n",
+ "\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "__flatcc_define_integer_accessors(__%suoffset, flatbuffers_uoffset_t,\n"
+ " FLATBUFFERS_UOFFSET_WIDTH, flatbuffers_endian)\n"
+ "__flatcc_define_integer_accessors(__%ssoffset, flatbuffers_soffset_t,\n"
+ " FLATBUFFERS_SOFFSET_WIDTH, flatbuffers_endian)\n"
+ "__flatcc_define_integer_accessors(__%svoffset, flatbuffers_voffset_t,\n"
+ " FLATBUFFERS_VOFFSET_WIDTH, flatbuffers_endian)\n"
+ "__flatcc_define_integer_accessors(__%sutype, flatbuffers_utype_t,\n"
+ " FLATBUFFERS_UTYPE_WIDTH, flatbuffers_endian)\n"
+ "__flatcc_define_integer_accessors(__%sthash, flatbuffers_thash_t,\n"
+ " FLATBUFFERS_THASH_WIDTH, flatbuffers_endian)\n",
+ nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#ifndef %s_WRAP_NAMESPACE\n"
+ "#define %s_WRAP_NAMESPACE(ns, x) ns ## _ ## x\n"
+ "#endif\n",
+ out->nscup, out->nscup);
+ }
+ /* Build out a more elaborate type system based in the primitives included. */
+ fprintf(out->fp,
+ "#define __%sread_scalar_at_byteoffset(N, p, o) N ## _read_from_pe((uint8_t *)(p) + (o))\n"
+ "#define __%sread_scalar(N, p) N ## _read_from_pe(p)\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sread_vt(ID, offset, t)\\\n"
+ "%svoffset_t offset = 0;\\\n"
+ "{ %svoffset_t id__tmp, *vt__tmp;\\\n"
+ " FLATCC_ASSERT(t != 0 && \"null pointer table access\");\\\n"
+ " id__tmp = ID;\\\n"
+ " vt__tmp = (%svoffset_t *)((uint8_t *)(t) -\\\n"
+ " __%ssoffset_read_from_pe(t));\\\n"
+ " if (__%svoffset_read_from_pe(vt__tmp) >= sizeof(vt__tmp[0]) * (id__tmp + 3u)) {\\\n"
+ " offset = __%svoffset_read_from_pe(vt__tmp + id__tmp + 2);\\\n"
+ " }\\\n"
+ "}\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sfield_present(ID, t) { __%sread_vt(ID, offset__tmp, t) return offset__tmp != 0; }\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sscalar_field(T, ID, t)\\\n"
+ "{\\\n"
+ " __%sread_vt(ID, offset__tmp, t)\\\n"
+ " if (offset__tmp) {\\\n"
+ " return (const T *)((uint8_t *)(t) + offset__tmp);\\\n"
+ " }\\\n"
+ " return 0;\\\n"
+ "}\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_field(ID, N, NK, TK, T, V)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "{ __%sread_vt(ID, offset__tmp, t__tmp)\\\n"
+ " return offset__tmp ? __%sread_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\\\n"
+ "}\\\n", nsc, nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "static inline T N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "{ __%sread_vt(ID, offset__tmp, t__tmp)\\\n"
+ " return offset__tmp ? __%sread_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\\\n"
+ "}\\\n", nsc, nsc);
+ }
+ fprintf(out->fp,
+ "static inline const T *N ## _ ## NK ## _get_ptr(N ## _table_t t__tmp)\\\n"
+ "__%sscalar_field(T, ID, t__tmp)\\\n", nsc);
+ fprintf(out->fp,
+ "static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__%sfield_present(ID, t__tmp)",nsc);
+ if (out->opts->allow_scan_for_all_fields) {
+ fprintf(out->fp, "\\\n__%sdefine_scan_by_scalar_field(N, NK, T)\n", nsc);
+ } else {
+ fprintf(out->fp, "\n");
+ }
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_optional_field(ID, N, NK, TK, T, V)\\\n"
+ "__%sdefine_scalar_field(ID, N, NK, TK, T, V)\\\n"
+ "static inline TK ## _option_t N ## _ ## NK ## _option(N ## _table_t t__tmp)\\\n"
+ "{ TK ## _option_t ret; __%sread_vt(ID, offset__tmp, t__tmp)\\\n"
+ " ret.is_null = offset__tmp == 0; ret.value = offset__tmp ?\\\n"
+ " __%sread_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\\\n"
+ " return ret; }\n", nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sstruct_field(T, ID, t, r)\\\n"
+ "{\\\n"
+ " __%sread_vt(ID, offset__tmp, t)\\\n"
+ " if (offset__tmp) {\\\n"
+ " return (T)((uint8_t *)(t) + offset__tmp);\\\n"
+ " }\\\n"
+ " FLATCC_ASSERT(!(r) && \"required field missing\");\\\n"
+ " return 0;\\\n"
+ "}\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%soffset_field(T, ID, t, r, adjust)\\\n"
+ "{\\\n"
+ " %suoffset_t *elem__tmp;\\\n"
+ " __%sread_vt(ID, offset__tmp, t)\\\n"
+ " if (offset__tmp) {\\\n"
+ " elem__tmp = (%suoffset_t *)((uint8_t *)(t) + offset__tmp);\\\n"
+ " /* Add sizeof so C api can have raw access past header field. */\\\n"
+ " return (T)((uint8_t *)(elem__tmp) + adjust +\\\n"
+ " __%suoffset_read_from_pe(elem__tmp));\\\n"
+ " }\\\n"
+ " FLATCC_ASSERT(!(r) && \"required field missing\");\\\n"
+ " return 0;\\\n"
+ "}\n",
+ nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%svector_field(T, ID, t, r) __%soffset_field(T, ID, t, r, sizeof(%suoffset_t))\n"
+ "#define __%stable_field(T, ID, t, r) __%soffset_field(T, ID, t, r, 0)\n",
+ nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_struct_field(ID, N, NK, T, r)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "__%sstruct_field(T, ID, t__tmp, r)", nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "__%sstruct_field(T, ID, t__tmp, r)", nsc);
+ }
+ fprintf(out->fp,
+ "\\\nstatic inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__%sfield_present(ID, t__tmp)\n", nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_vector_field(ID, N, NK, T, r)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "__%svector_field(T, ID, t__tmp, r)", nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "__%svector_field(T, ID, t__tmp, r)", nsc);
+ }
+ fprintf(out->fp,
+ "\\\nstatic inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__%sfield_present(ID, t__tmp)\n", nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_table_field(ID, N, NK, T, r)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "__%stable_field(T, ID, t__tmp, r)", nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "__%stable_field(T, ID, t__tmp, r)", nsc);
+ }
+ fprintf(out->fp,
+ "\\\nstatic inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__%sfield_present(ID, t__tmp)\n", nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_string_field(ID, N, NK, r)\\\n"
+ "static inline %sstring_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\\\n"
+ "__%svector_field(%sstring_t, ID, t__tmp, r)", nsc, nsc, nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline %sstring_t N ## _ ## NK(N ## _table_t t__tmp)\\\n"
+ "__%svector_field(%sstring_t, ID, t__tmp, r)", nsc, nsc, nsc);
+ }
+ fprintf(out->fp,
+ "\\\nstatic inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\\\n"
+ "__%sfield_present(ID, t__tmp)", nsc);
+ if (out->opts->allow_scan_for_all_fields) {
+ fprintf(out->fp, "\\\n__%sdefine_scan_by_string_field(N, NK)\n", nsc);
+ } else {
+ fprintf(out->fp, "\n");
+ }
+ fprintf(out->fp,
+ "#define __%svec_len(vec)\\\n"
+ "{ return (vec) ? (size_t)__%suoffset_read_from_pe((flatbuffers_uoffset_t *)vec - 1) : 0; }\n"
+ "#define __%sstring_len(s) __%svec_len(s)\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline size_t %svec_len(const void *vec)\n"
+ "__%svec_len(vec)\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ /* Tb is the base type for loads. */
+ "#define __%sscalar_vec_at(N, vec, i)\\\n"
+ "{ FLATCC_ASSERT(%svec_len(vec) > (i) && \"index out of range\");\\\n"
+ " return __%sread_scalar(N, &(vec)[i]); }\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sstruct_vec_at(vec, i)\\\n"
+ "{ FLATCC_ASSERT(%svec_len(vec) > (i) && \"index out of range\"); return (vec) + (i); }\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "/* `adjust` skips past the header for string vectors. */\n"
+ "#define __%soffset_vec_at(T, vec, i, adjust)\\\n"
+ "{ const %suoffset_t *elem__tmp = (vec) + (i);\\\n"
+ " FLATCC_ASSERT(%svec_len(vec) > (i) && \"index out of range\");\\\n"
+ " return (T)((uint8_t *)(elem__tmp) + (size_t)__%suoffset_read_from_pe(elem__tmp) + (adjust)); }\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_vec_len(N)\\\n"
+ "static inline size_t N ## _vec_len(N ##_vec_t vec__tmp)\\\n"
+ "{ return %svec_len(vec__tmp); }\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_vec_at(N, T) \\\n"
+ "static inline T N ## _vec_at(N ## _vec_t vec__tmp, size_t i__tmp)\\\n"
+ "__%sscalar_vec_at(N, vec__tmp, i__tmp)\n",
+ nsc, nsc);
+ fprintf(out->fp,
+ "typedef const char *%sstring_t;\n"
+ "static inline size_t %sstring_len(%sstring_t s)\n"
+ "__%sstring_len(s)\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "typedef const %suoffset_t *%sstring_vec_t;\n"
+ "typedef %suoffset_t *%sstring_mutable_vec_t;\n"
+ "static inline size_t %sstring_vec_len(%sstring_vec_t vec)\n"
+ "__%svec_len(vec)\n"
+ "static inline %sstring_t %sstring_vec_at(%sstring_vec_t vec, size_t i)\n"
+ "__%soffset_vec_at(%sstring_t, vec, i, sizeof(vec[0]))\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp, "typedef const void *%sgeneric_t;\n", nsc);
+ fprintf(out->fp, "typedef void *%smutable_generic_t;\n", nsc);
+ fprintf(out->fp,
+ "static inline %sstring_t %sstring_cast_from_generic(const %sgeneric_t p)\n"
+ "{ return p ? ((const char *)p) + __%suoffset__size() : 0; }\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "typedef const %suoffset_t *%sgeneric_vec_t;\n"
+ "typedef %suoffset_t *%sgeneric_table_mutable_vec_t;\n"
+ "static inline size_t %sgeneric_vec_len(%sgeneric_vec_t vec)\n"
+ "__%svec_len(vec)\n"
+ "static inline %sgeneric_t %sgeneric_vec_at(%sgeneric_vec_t vec, size_t i)\n"
+ "__%soffset_vec_at(%sgeneric_t, vec, i, 0)\n"
+ "static inline %sgeneric_t %sgeneric_vec_at_as_string(%sgeneric_vec_t vec, size_t i)\n"
+ "__%soffset_vec_at(%sgeneric_t, vec, i, sizeof(vec[0]))\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ gen_union(out);
+ gen_find(out);
+ gen_scan(out);
+ if (out->opts->cgen_sort) {
+ gen_sort(out);
+ fprintf(out->fp,
+ "#define __%ssort_vector_field(N, NK, T, t)\\\n"
+ "{ T ## _mutable_vec_t v__tmp = (T ## _mutable_vec_t) N ## _ ## NK ## _get(t);\\\n"
+ " if (v__tmp) T ## _vec_sort(v__tmp); }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%ssort_table_field(N, NK, T, t)\\\n"
+ "{ T ## _sort((T ## _mutable_table_t)N ## _ ## NK ## _get(t)); }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%ssort_union_field(N, NK, T, t)\\\n"
+ "{ T ## _sort(T ## _mutable_union_cast(N ## _ ## NK ## _union(t))); }\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%ssort_table_vector_field_elements(N, NK, T, t)\\\n"
+ "{ T ## _vec_t v__tmp = N ## _ ## NK ## _get(t); size_t i__tmp, n__tmp;\\\n"
+ " n__tmp = T ## _vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\\\n"
+ " T ## _sort((T ## _mutable_table_t)T ## _vec_at(v__tmp, i__tmp)); }}\n",
+ nsc);
+ fprintf(out->fp,
+ "#define __%ssort_union_vector_field_elements(N, NK, T, t)\\\n"
+ "{ T ## _union_vec_t v__tmp = N ## _ ## NK ## _union(t); size_t i__tmp, n__tmp;\\\n"
+ " n__tmp = T ## _union_vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\\\n"
+ " T ## _sort(T ## _mutable_union_cast(T ## _union_vec_at(v__tmp, i__tmp))); }}\n",
+ nsc);
+ } else {
+ fprintf(out->fp, "/* sort disabled */\n");
+ }
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_vector(N, T)\\\n"
+ "typedef const T *N ## _vec_t;\\\n"
+ "typedef T *N ## _mutable_vec_t;\\\n"
+ "__%sdefine_scalar_vec_len(N)\\\n"
+ "__%sdefine_scalar_vec_at(N, T)\\\n"
+ "__%sdefine_scalar_find(N, T)\\\n"
+ "__%sdefine_scalar_scan(N, T)",
+ nsc, nsc, nsc, nsc, nsc);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp, "\\\n__%sdefine_scalar_sort(N, T)\n", nsc);
+ } else {
+ fprintf(out->fp, "\n");
+ }
+ fprintf(out->fp, "\n");
+ /* Elaborate on the included basic type system. */
+ fprintf(out->fp,
+ "#define __%sdefine_integer_type(N, T, W)\\\n"
+ "__flatcc_define_integer_accessors(N, T, W, %sendian)\\\n"
+ "__%sdefine_scalar_vector(N, T)\n",
+ nsc, nsc, nsc);
+ fprintf(out->fp,
+ "__%sdefine_scalar_vector(%sbool, %sbool_t)\n"
+ "__%sdefine_scalar_vector(%schar, char)\n"
+ "__%sdefine_scalar_vector(%suint8, uint8_t)\n"
+ "__%sdefine_scalar_vector(%sint8, int8_t)\n"
+ "__%sdefine_scalar_vector(%suint16, uint16_t)\n"
+ "__%sdefine_scalar_vector(%sint16, int16_t)\n"
+ "__%sdefine_scalar_vector(%suint32, uint32_t)\n"
+ "__%sdefine_scalar_vector(%sint32, int32_t)\n"
+ "__%sdefine_scalar_vector(%suint64, uint64_t)\n"
+ "__%sdefine_scalar_vector(%sint64, int64_t)\n"
+ "__%sdefine_scalar_vector(%sfloat, float)\n"
+ "__%sdefine_scalar_vector(%sdouble, double)\n"
+ "__%sdefine_scalar_vector(%sunion_type, %sunion_type_t)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline size_t %sstring_vec_find(%sstring_vec_t vec, const char *s)\n"
+ "__%sfind_by_string_field(__%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s)\n"
+ "static inline size_t %sstring_vec_find_n(%sstring_vec_t vec, const char *s, size_t n)\n"
+ "__%sfind_by_string_n_field(__%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s, n)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline size_t %sstring_vec_scan(%sstring_vec_t vec, const char *s)\n"
+ "__%sscan_by_string_field(0, %sstring_vec_len(vec), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s)\n"
+ "static inline size_t %sstring_vec_scan_n(%sstring_vec_t vec, const char *s, size_t n)\n"
+ "__%sscan_by_string_n_field(0, %sstring_vec_len(vec), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s, n)\n"
+ "static inline size_t %sstring_vec_scan_ex(%sstring_vec_t vec, size_t begin, size_t end, const char *s)\n"
+ "__%sscan_by_string_field(begin, __%smin(end, %sstring_vec_len(vec)), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s)\n"
+ "static inline size_t %sstring_vec_scan_ex_n(%sstring_vec_t vec, size_t begin, size_t end, const char *s, size_t n)\n"
+ "__%sscan_by_string_n_field(begin, __%smin(end, %sstring_vec_len(vec)), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s, n)\n"
+ "static inline size_t %sstring_vec_rscan(%sstring_vec_t vec, const char *s)\n"
+ "__%srscan_by_string_field(0, %sstring_vec_len(vec), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s)\n"
+ "static inline size_t %sstring_vec_rscan_n(%sstring_vec_t vec, const char *s, size_t n)\n"
+ "__%srscan_by_string_n_field(0, %sstring_vec_len(vec), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s, n)\n"
+ "static inline size_t %sstring_vec_rscan_ex(%sstring_vec_t vec, size_t begin, size_t end, const char *s)\n"
+ "__%srscan_by_string_field(begin, __%smin(end, %sstring_vec_len(vec)), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s)\n"
+ "static inline size_t %sstring_vec_rscan_ex_n(%sstring_vec_t vec, size_t begin, size_t end, const char *s, size_t n)\n"
+ "__%srscan_by_string_n_field(begin, __%smin(end, %sstring_vec_len(vec)), __%sidentity, vec, %sstring_vec_at, %sstring_vec_len, s, n)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc,
+ nsc, nsc, nsc, nsc);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp, "__%sdefine_string_sort()\n", nsc);
+ }
+ fprintf(out->fp,
+ "#define __%sdefine_struct_scalar_fixed_array_field(N, NK, TK, T, L)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\\\n"
+ "{ if (!t__tmp || i__tmp >= L) return 0;\\\n"
+ " return __%sread_scalar(TK, &(t__tmp->NK[i__tmp])); }\\\n"
+ "static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\\\n"
+ "{ return t__tmp ? t__tmp->NK : 0; }\\\n"
+ "static inline size_t N ## _ ## NK ## _get_len(void) { return L; }",
+ nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK (N ## _struct_t t__tmp, size_t i__tmp)\\\n"
+ "{ return N ## _ ## NK ## _get(t__tmp, i__tmp); }");
+ }
+ fprintf(out->fp, "\n");;
+ fprintf(out->fp,
+ "#define __%sdefine_struct_struct_fixed_array_field(N, NK, T, L)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\\\n"
+ "{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }"
+ "static inline T N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\\\n"
+ "{ return t__tmp ? t__tmp->NK : 0; }\\\n"
+ "static inline size_t N ## _ ## NK ## _get_len(void) { return L; }",
+ nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK(N ## _struct_t t__tmp, size_t i__tmp)\\\n"
+ "{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }");
+ }
+ fprintf(out->fp, "\n");
+ fprintf(out->fp,
+ "#define __%sdefine_struct_scalar_field(N, NK, TK, T)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp)\\\n"
+ "{ return t__tmp ? __%sread_scalar(TK, &(t__tmp->NK)) : 0; }\\\n"
+ "static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\\\n"
+ "{ return t__tmp ? &(t__tmp->NK) : 0; }",
+ nsc, nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK (N ## _struct_t t__tmp)\\\n"
+ "{ return t__tmp ? __%sread_scalar(TK, &(t__tmp->NK)) : 0; }",
+ nsc);
+ }
+ if (out->opts->allow_scan_for_all_fields) {
+ fprintf(out->fp, "\\\n__%sdefine_scan_by_scalar_field(N, NK, T)\n", nsc);
+ } else {
+ fprintf(out->fp, "\n");
+ }
+ fprintf(out->fp,
+ "#define __%sdefine_struct_struct_field(N, NK, T)\\\n"
+ "static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }",
+ nsc);
+ if (!out->opts->cgen_no_conflicts) {
+ fprintf(out->fp,
+ "\\\nstatic inline T N ## _ ## NK (N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }\n");
+ } else {
+ fprintf(out->fp, "\n");
+ }
+ fprintf(out->fp,
+ "/* If fid is null, the function returns true without testing as buffer is not expected to have any id. */\n"
+ "static inline int %shas_identifier(const void *buffer, const char *fid)\n"
+ "{ %sthash_t id, id2 = 0; if (fid == 0) { return 1; };\n"
+ " id2 = %stype_hash_from_string(fid);\n"
+ " id = __%sthash_read_from_pe(((%suoffset_t *)buffer) + 1);\n"
+ " return id2 == 0 || id == id2; }\n"
+ "static inline int %shas_type_hash(const void *buffer, %sthash_t thash)\n"
+ "{ return thash == 0 || (__%sthash_read_from_pe((%suoffset_t *)buffer + 1) == thash); }\n\n"
+ "static inline %sthash_t %sget_type_hash(const void *buffer)\n"
+ "{ return __%sthash_read_from_pe((flatbuffers_uoffset_t *)buffer + 1); }\n\n"
+ "#define %sverify_endian() %shas_identifier(\"\\x00\\x00\\x00\\x00\" \"1234\", \"1234\")\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "static inline void *%sread_size_prefix(void *b, size_t *size_out)\n"
+ "{ if (size_out) { *size_out = (size_t)__%suoffset_read_from_pe(b); }\n"
+ " return (uint8_t *)b + sizeof(%suoffset_t); }\n", nsc, nsc, nsc);
+ fprintf(out->fp,
+ "/* Null file identifier accepts anything, otherwise fid should be 4 characters. */\n"
+ "#define __%sread_root(T, K, buffer, fid)\\\n"
+ " ((!buffer || !%shas_identifier(buffer, fid)) ? 0 :\\\n"
+ " ((T ## _ ## K ## t)(((uint8_t *)buffer) +\\\n"
+ " __%suoffset_read_from_pe(buffer))))\n"
+ "#define __%sread_typed_root(T, K, buffer, thash)\\\n"
+ " ((!buffer || !%shas_type_hash(buffer, thash)) ? 0 :\\\n"
+ " ((T ## _ ## K ## t)(((uint8_t *)buffer) +\\\n"
+ " __%suoffset_read_from_pe(buffer))))\n",
+ nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%snested_buffer_as_root(C, N, T, K)\\\n"
+ "static inline T ## _ ## K ## t C ## _ ## N ## _as_root_with_identifier(C ## _ ## table_t t__tmp, const char *fid__tmp)\\\n"
+ "{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __%sread_root(T, K, buffer__tmp, fid__tmp); }\\\n"
+ "static inline T ## _ ## K ## t C ## _ ## N ## _as_typed_root(C ## _ ## table_t t__tmp)\\\n"
+ "{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __%sread_root(T, K, buffer__tmp, C ## _ ## type_identifier); }\\\n"
+ "static inline T ## _ ## K ## t C ## _ ## N ## _as_root(C ## _ ## table_t t__tmp)\\\n"
+ "{ const char *fid__tmp = T ## _file_identifier;\\\n"
+ " const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __%sread_root(T, K, buffer__tmp, fid__tmp); }\n",
+ nsc, nsc, nsc, nsc);
+ fprintf(out->fp,
+ "#define __%sbuffer_as_root(N, K)\\\n"
+ "static inline N ## _ ## K ## t N ## _as_root_with_identifier(const void *buffer__tmp, const char *fid__tmp)\\\n"
+ "{ return __%sread_root(N, K, buffer__tmp, fid__tmp); }\\\n"
+ "static inline N ## _ ## K ## t N ## _as_root_with_type_hash(const void *buffer__tmp, %sthash_t thash__tmp)\\\n"
+ "{ return __%sread_typed_root(N, K, buffer__tmp, thash__tmp); }\\\n"
+ "static inline N ## _ ## K ## t N ## _as_root(const void *buffer__tmp)\\\n"
+ "{ const char *fid__tmp = N ## _file_identifier;\\\n"
+ " return __%sread_root(N, K, buffer__tmp, fid__tmp); }\\\n"
+ "static inline N ## _ ## K ## t N ## _as_typed_root(const void *buffer__tmp)\\\n"
+ "{ return __%sread_typed_root(N, K, buffer__tmp, N ## _type_hash); }\n"
+ "#define __%sstruct_as_root(N) __%sbuffer_as_root(N, struct_)\n"
+ "#define __%stable_as_root(N) __%sbuffer_as_root(N, table_)\n",
+ nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc, nsc);
+ fprintf(out->fp, "\n");
+}
+
+int fb_gen_common_c_header(fb_output_t *out)
+{
+ const char *nscup = out->nscup;
+
+ fprintf(out->fp,
+ "#ifndef %s_COMMON_READER_H\n"
+ "#define %s_COMMON_READER_H\n",
+ nscup, nscup);
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ fprintf(out->fp, "/* Common FlatBuffers read functionality for C. */\n\n");
+ if (!out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "/*"
+ " * This code is generated without support for vector sort operations\n"
+ " * but find operations are supported on pre-sorted vectors.\n"
+ " */\n");
+ }
+ gen_prologue(out);
+ gen_helpers(out);
+ gen_epilogue(out);
+ fprintf(out->fp,
+ "#endif /* %s_COMMON_H */\n",
+ nscup);
+ return 0;
+}
+
+static void gen_pretext(fb_output_t *out)
+{
+ const char *nsc = out->nsc;
+ const char *nscup = out->nscup;
+
+ int do_pad = out->opts->cgen_pad;
+
+ fprintf(out->fp,
+ "#ifndef %s_READER_H\n"
+ "#define %s_READER_H\n",
+ out->S->basenameup, out->S->basenameup);
+
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ if (do_pad) {
+ fprintf(out->fp,
+ "/*\n"
+ " * Generated with 'pad' option which expects #pragma pack(1) and\n"
+ " * #pragma pack() to be supported, and which adds extra padding\n"
+ " * fields to structs.\n"
+ " *\n"
+ " * This is mostly relevant for some micro controller platforms, but\n"
+ " * may also be needed with 'force_align' attributes > 16.\n"
+ " *\n"
+ " * The default output uses C11 <stdalign.h> alignas(n) which can be\n"
+ " * defined as `__attribute__((aligned (n)))` or similar on many\n"
+ " * older platforms.\n"
+ " */\n"
+ "\n");
+ }
+
+ fprintf(out->fp,
+ "#ifndef %s_COMMON_READER_H\n"
+ "#include \"%scommon_reader.h\"\n"
+ "#endif\n",
+ nscup, nsc);
+ fb_gen_c_includes(out, "_reader.h", "_READER_H");
+
+ /*
+ * Must be in included in every file using static_assert to ensure
+ * static_assert_scope.h counter can avoid conflicts.
+ */
+ fprintf(out->fp,
+ "#include \"flatcc/flatcc_flatbuffers.h\"\n");
+ if (!do_pad) {
+ fprintf(out->fp,
+ "#ifndef __alignas_is_defined\n"
+ "#include <stdalign.h>\n"
+ "#endif\n");
+ }
+ gen_prologue(out);
+ if (out->S->file_identifier.type == vt_string) {
+ fprintf(out->fp,
+ "#undef %sidentifier\n"
+ "#define %sidentifier \"%.*s\"\n",
+ nsc,
+ nsc, out->S->file_identifier.s.len, out->S->file_identifier.s.s);
+ } else {
+ fprintf(out->fp,
+ "#ifndef %sidentifier\n"
+ "#define %sidentifier 0\n"
+ "#endif\n",
+ nsc, nsc);
+ }
+ if (out->S->file_extension.type == vt_string) {
+ fprintf(out->fp,
+ "#undef %sextension\n"
+ "#define %sextension \"%.*s\"\n",
+ nsc,
+ nsc, out->S->file_extension.s.len, out->S->file_extension.s.s);
+ } else {
+ fprintf(out->fp,
+ "#ifndef %sextension\n"
+ "#define %sextension \"%s\"\n"
+ "#endif\n",
+ nsc, nsc, out->opts->default_bin_ext);
+ }
+ fprintf(out->fp, "\n");
+}
+
+static void gen_footer(fb_output_t *out)
+{
+ gen_epilogue(out);
+ fprintf(out->fp, "#endif /* %s_READER_H */\n", out->S->basenameup);
+}
+
+static void gen_forward_decl(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+ const char *nsc = out->nsc;
+
+ fb_clear(snt);
+
+ assert(ct->symbol.kind == fb_is_struct || ct->symbol.kind == fb_is_table);
+
+ fb_compound_name(ct, &snt);
+ if (ct->symbol.kind == fb_is_struct) {
+ if (ct->size == 0) {
+ gen_panic(out, "internal error: unexpected empty struct");
+ return;
+ } else {
+ fprintf(out->fp, "typedef struct %s %s_t;\n",
+ snt.text, snt.text);
+ }
+ fprintf(out->fp, "typedef const %s_t *%s_struct_t;\n",
+ snt.text, snt.text);
+ fprintf(out->fp, "typedef %s_t *%s_mutable_struct_t;\n",
+ snt.text, snt.text);
+ fprintf(out->fp, "typedef const %s_t *%s_vec_t;\n",
+ snt.text, snt.text);
+ fprintf(out->fp, "typedef %s_t *%s_mutable_vec_t;\n",
+ snt.text, snt.text);
+ } else {
+ fprintf(out->fp, "typedef const struct %s_table *%s_table_t;\n",
+ snt.text, snt.text);
+ fprintf(out->fp, "typedef struct %s_table *%s_mutable_table_t;\n",
+ snt.text, snt.text);
+ fprintf(out->fp, "typedef const %suoffset_t *%s_vec_t;\n", nsc, snt.text);
+ fprintf(out->fp, "typedef %suoffset_t *%s_mutable_vec_t;\n", nsc, snt.text);
+ }
+}
+
+static inline void print_doc(fb_output_t *out, const char *indent, fb_doc_t *doc)
+{
+ long ln = 0;
+ int first = 1;
+ if (doc == 0) {
+ return;
+ }
+ while (doc) {
+ if (ln != doc->ident->linenum) {
+ if (first) {
+ /* Not all C compilers understand // comments. */
+ fprintf(out->fp, "%s/** ", indent);
+ ln = doc->ident->linenum;
+ } else {
+ fprintf(out->fp, "\n%s * ", indent);
+ }
+ }
+ first = 0;
+ fprintf(out->fp, "%.*s", (int)doc->ident->len, doc->ident->text);
+ ln = doc->ident->linenum;
+ doc = doc->link;
+ }
+ fprintf(out->fp, " */\n");
+}
+
+static void gen_struct(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ unsigned align;
+ size_t offset = 0;
+ const char *tname, *tname_ns, *tname_prefix;
+ int n, len;
+ const char *s;
+ unsigned pad_index = 0, deprecated_index = 0, pad;
+ const char *kind;
+ int do_pad = out->opts->cgen_pad;
+ int is_primary_key, current_key_processed;
+ const char *nsc = out->nsc;
+
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+
+ fb_clear(snt);
+ fb_clear(snref);
+
+ assert(ct->symbol.kind == fb_is_struct);
+ assert(ct->align > 0 || ct->count == 0);
+ assert(ct->size > 0 || ct->count == 0);
+
+ fb_compound_name(ct, &snt);
+ print_doc(out, "", ct->doc);
+ if (ct->size == 0) {
+ gen_panic(out, "internal error: unexpected empty struct");
+ } else {
+ if (do_pad) {
+ fprintf(out->fp, "#pragma pack(1)\n");
+ }
+ /*
+ * Unfortunately the following is not valid in C11:
+ *
+ * struct alignas(4) mystruct { ... };
+ *
+ * we can only use alignas on members (unlike C++, and unlike
+ * non-portable C compiler variants).
+ *
+ * By padding the first element to the struct size we get around
+ * this problem. It shouldn't strictly be necessary to add padding
+ * fields, but compilers might not support padding above 16 bytes,
+ * so we do that as a precaution with an optional compiler flag.
+ */
+ fprintf(out->fp, "struct %s {\n", snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ current_key_processed = 0;
+ member = (fb_member_t *)sym;
+ is_primary_key = ct->primary_key == member;
+ print_doc(out, " ", member->doc);
+ symbol_name(sym, &n, &s);
+ align = offset == 0 ? ct->align : member->align;
+ if (do_pad && (pad = (unsigned)(member->offset - offset))) {
+ fprintf(out->fp, " uint8_t __padding%u[%u];\n",
+ pad_index++, pad);
+ }
+ if (member->metadata_flags & fb_f_deprecated) {
+ pad = (unsigned)member->size;
+ if (do_pad) {
+ fprintf(out->fp, " uint8_t __deprecated%u[%u]; /* was: '%.*s' */\n",
+ deprecated_index++, pad, n, s);
+ } else {
+ fprintf(out->fp, " alignas(%u) uint8_t __deprecated%u[%u]; /* was: '%.*s' */\n",
+ align, deprecated_index++, pad, n, s);
+ }
+ offset = (unsigned)(member->offset + member->size);
+ continue;
+ }
+ switch (member->type.type) {
+ case vt_fixed_array_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ len = (int)member->type.len;
+ if (do_pad) {
+ fprintf(out->fp, " %s%s ", tname_ns, tname);
+ } else {
+ fprintf(out->fp, " alignas(%u) %s%s ", align, tname_ns, tname);
+ }
+ fprintf(out->fp, "%.*s[%d];\n", n, s, len);
+ break;
+ case vt_scalar_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ if (do_pad) {
+ fprintf(out->fp, " %s%s ", tname_ns, tname);
+ } else {
+ fprintf(out->fp, " alignas(%u) %s%s ", align, tname_ns, tname);
+ }
+ fprintf(out->fp, "%.*s;\n", n, s);
+ break;
+ case vt_fixed_array_compound_type_ref:
+ assert(member->type.ct->symbol.kind == fb_is_struct || member->type.ct->symbol.kind == fb_is_enum);
+ kind = member->type.ct->symbol.kind == fb_is_struct ? "" : "enum_";
+ fb_compound_name(member->type.ct, &snref);
+ len = (int)member->type.len;
+ if (do_pad) {
+ fprintf(out->fp, " %s_%st ", snref.text, kind);
+ } else {
+ fprintf(out->fp, " alignas(%u) %s_%st ", align, snref.text, kind);
+ }
+ fprintf(out->fp, "%.*s[%d];\n", n, s, len);
+ break;
+ case vt_compound_type_ref:
+ assert(member->type.ct->symbol.kind == fb_is_struct || member->type.ct->symbol.kind == fb_is_enum);
+ kind = member->type.ct->symbol.kind == fb_is_struct ? "" : "enum_";
+ fb_compound_name(member->type.ct, &snref);
+ if (do_pad) {
+ fprintf(out->fp, " %s_%st ", snref.text, kind);
+ } else {
+ fprintf(out->fp, " alignas(%u) %s_%st ", align, snref.text, kind);
+ }
+ fprintf(out->fp, "%.*s;\n", n, s);
+ break;
+ default:
+ fprintf(out->fp, " %s ", __FLATCC_ERROR_TYPE);
+ fprintf(out->fp, "%.*s;\n", n, s);
+ gen_panic(out, "internal error: unexpected type during code generation");
+ break;
+ }
+ offset = (unsigned)(member->offset + member->size);
+ }
+ if (do_pad && (pad = (unsigned)(ct->size - offset))) {
+ fprintf(out->fp, " uint8_t __padding%u[%u];\n",
+ pad_index, pad);
+ }
+ fprintf(out->fp, "};\n");
+ if (do_pad) {
+ fprintf(out->fp, "#pragma pack()\n");
+ }
+ fprintf(out->fp,
+ "static_assert(sizeof(%s_t) == %"PRIu64", \"struct size mismatch\");\n\n",
+ snt.text, (uint64_t)ct->size);
+ fprintf(out->fp,
+ "static inline const %s_t *%s__const_ptr_add(const %s_t *p, size_t i) { return p + i; }\n", snt.text, snt.text, snt.text);
+ fprintf(out->fp,
+ "static inline %s_t *%s__ptr_add(%s_t *p, size_t i) { return p + i; }\n", snt.text, snt.text, snt.text);
+ fprintf(out->fp,
+ "static inline %s_struct_t %s_vec_at(%s_vec_t vec, size_t i)\n"
+ "__%sstruct_vec_at(vec, i)\n",
+ snt.text, snt.text, snt.text,
+ nsc);
+ }
+ fprintf(out->fp, "static inline size_t %s__size(void) { return %"PRIu64"; }\n",
+ snt.text, (uint64_t)ct->size);
+ fprintf(out->fp,
+ "static inline size_t %s_vec_len(%s_vec_t vec)\n"
+ "__%svec_len(vec)\n",
+ snt.text, snt.text, nsc);
+ fprintf(out->fp,
+ "__%sstruct_as_root(%s)\n",
+ nsc, snt.text);
+ fprintf(out->fp, "\n");
+
+ /* Create accessors which respect endianness and which return 0 on null struct access. */
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ is_primary_key = ct->primary_key == member;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ symbol_name(&member->symbol, &n, &s);
+ switch (member->type.type) {
+ case vt_fixed_array_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ tname_prefix = scalar_type_prefix(member->type.st);
+ fprintf(out->fp,
+ "__%sdefine_struct_scalar_fixed_array_field(%s, %.*s, %s%s, %s%s, %d)\n",
+ nsc, snt.text, n, s, nsc, tname_prefix, tname_ns, tname, member->type.len);
+ /* TODO: if member->type.st == fb_char add string specific methods. */
+ break;
+ case vt_scalar_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ tname_prefix = scalar_type_prefix(member->type.st);
+ fprintf(out->fp,
+ "__%sdefine_struct_scalar_field(%s, %.*s, %s%s, %s%s)\n",
+ nsc, snt.text, n, s, nsc, tname_prefix, tname_ns, tname);
+ if (!out->opts->allow_scan_for_all_fields && (member->metadata_flags & fb_f_key)) {
+ fprintf(out->fp,
+ "__%sdefine_scan_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (!is_primary_key) {
+ fprintf(out->fp, "/* Note: this is not the primary key field on this struct. */\n");
+ }
+ fprintf(out->fp, "/* Note: find only works on vectors sorted by this field. */\n");
+ fprintf(out->fp,
+ "__%sdefine_find_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "__%sdefine_struct_sort_by_scalar_field(%s, %.*s, %s%s, %s_t)\n",
+ nsc, snt.text, n, s, tname_ns, tname, snt.text);
+ }
+ if (is_primary_key) {
+ fprintf(out->fp,
+ "__%sdefine_default_find_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ fprintf(out->fp,
+ "__%sdefine_default_scan_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "#define %s_vec_sort %s_vec_sort_by_%.*s\n",
+ snt.text, snt.text, n, s);
+ }
+ }
+ current_key_processed = 1;
+ }
+ break;
+ case vt_fixed_array_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+ fprintf(out->fp,
+ "__%sdefine_struct_scalar_fixed_array_field(%s, %.*s, %s, %s_enum_t, %d)\n",
+ nsc, snt.text, n, s, snref.text, snref.text, member->type.len);
+ break;
+ case fb_is_struct:
+ fprintf(out->fp,
+ "__%sdefine_struct_struct_fixed_array_field(%s, %.*s, %s_struct_t, %d)\n",
+ nsc, snt.text, n, s, snref.text, member->type.len);
+ break;
+ }
+ break;
+
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+ fprintf(out->fp,
+ "__%sdefine_struct_scalar_field(%s, %.*s, %s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text, snref.text);
+ if (!out->opts->allow_scan_for_all_fields && (member->metadata_flags & fb_f_key)) {
+ fprintf(out->fp,
+ "__%sdefine_scan_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (!is_primary_key) {
+ fprintf(out->fp, "/* Note: this is not the primary key of this table. */\n");
+ }
+ fprintf(out->fp, "/* Note: find only works on vectors sorted by this field. */\n");
+ fprintf(out->fp,
+ "__%sdefine_find_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "__%sdefine_struct_sort_by_scalar_field(%s, %.*s, %s_enum_t, %s_t)\n",
+ nsc, snt.text, n, s, snref.text, snt.text);
+ }
+ if (is_primary_key) {
+ fprintf(out->fp,
+ "__%sdefine_default_find_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ fprintf(out->fp,
+ "__%sdefine_default_scan_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "#define %s_vec_sort %s_vec_sort_by_%.*s\n",
+ snt.text, snt.text, n, s);
+ }
+ }
+ current_key_processed = 1;
+ }
+ break;
+ case fb_is_struct:
+ /*
+ * For completeness provide an accessor which returns member pointer
+ * or null if container struct is null.
+ */
+ fprintf(out->fp,
+ "__%sdefine_struct_struct_field(%s, %.*s, %s_struct_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ }
+
+ }
+ if ((member->metadata_flags & fb_f_key) && !current_key_processed) {
+ fprintf(out->fp,
+ "/* Note: field has key, but there is no support for find by fields of this type. */\n");
+ /*
+ * If the first key already exists, but was for an unsupported
+ * type, we do not map the next possible key to generic find.
+ */
+ }
+ }
+ fprintf(out->fp, "\n");
+}
+
+/*
+ * Enums are integers, but we cannot control the size.
+ * To produce a typesafe and portable result, we generate constants
+ * instead.
+ */
+static void gen_enum(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *tname, *tname_ns, *s, *kind;
+ fb_literal_t literal;
+ int n, w;
+ int is_union;
+ fb_scoped_name_t snt;
+ const char *nsc = out->nsc;
+
+ fb_clear(snt);
+
+ assert(ct->symbol.kind == fb_is_enum || ct->symbol.kind == fb_is_union);
+ assert(ct->type.type == vt_scalar_type);
+
+ tname_ns = scalar_type_ns(ct->type.st, nsc);
+ tname = scalar_type_name(ct->type.st);
+
+ w = (int)ct->size * 8;
+
+ is_union = ct->symbol.kind != fb_is_enum;
+ kind = is_union ? "union_type" : "enum";
+ fb_compound_name(ct, &snt);
+ print_doc(out, "", ct->doc);
+ fprintf(out->fp,
+ "typedef %s%s %s_%s_t;\n",
+ tname_ns, tname, snt.text, kind);
+ fprintf(out->fp,
+ "__%sdefine_integer_type(%s, %s_%s_t, %u)\n",
+ nsc, snt.text, snt.text, kind, w);
+ if (is_union) {
+ fprintf(out->fp,
+ "__%sdefine_union(%s, %s)\n",
+ nsc, nsc, snt.text);
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ print_doc(out, "", member->doc);
+ symbol_name(&member->symbol, &n, &s);
+ print_literal(ct->type.st, &member->value, literal);
+ /*
+ * This must be a define, not a static const integer, otherwise it
+ * won't work in switch statements - except with GNU extensions.
+ */
+ fprintf(out->fp,
+ "#define %s_%.*s ((%s_%s_t)%s)\n",
+ snt.text, n, s, snt.text, kind, literal);
+ }
+ fprintf(out->fp, "\n");
+
+ if (is_union) {
+ fprintf(out->fp, "static inline const char *%s_type_name(%s_union_type_t type)\n"
+ "{\n",
+ snt.text, snt.text);
+ } else {
+ fprintf(out->fp, "static inline const char *%s_name(%s_enum_t value)\n"
+ "{\n",
+ snt.text, snt.text);
+ }
+
+
+ if (is_union) {
+ fprintf(out->fp, " switch (type) {\n");
+ } else {
+ fprintf(out->fp, " switch (value) {\n");
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(&member->symbol, &n, &s);
+ if (sym->flags & fb_duplicate) {
+ fprintf(out->fp,
+ " /* case %s_%.*s: return \"%.*s\"; (duplicate) */\n",
+ snt.text, n, s, n, s);
+ } else {
+ fprintf(out->fp,
+ " case %s_%.*s: return \"%.*s\";\n",
+ snt.text, n, s, n, s);
+ }
+ }
+ fprintf(out->fp,
+ " default: return \"\";\n"
+ " }\n"
+ "}\n");
+ fprintf(out->fp, "\n");
+
+ if (is_union) {
+ fprintf(out->fp, "static inline int %s_is_known_type(%s_union_type_t type)\n"
+ "{\n",
+ snt.text, snt.text);
+ } else {
+ fprintf(out->fp, "static inline int %s_is_known_value(%s_enum_t value)\n"
+ "{\n",
+ snt.text, snt.text);
+ }
+ if (is_union) {
+ fprintf(out->fp, " switch (type) {\n");
+ } else {
+ fprintf(out->fp, " switch (value) {\n");
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(&member->symbol, &n, &s);
+ if (sym->flags & fb_duplicate) {
+ fprintf(out->fp,
+ " /* case %s_%.*s: return 1; (duplicate) */\n",
+ snt.text, n, s);
+ } else {
+ fprintf(out->fp,
+ " case %s_%.*s: return 1;\n",
+ snt.text, n, s);
+ }
+ }
+ fprintf(out->fp,
+ " default: return 0;\n"
+ " }\n"
+ "}\n");
+ fprintf(out->fp, "\n");
+
+}
+
+static void gen_nested_root(fb_output_t *out, fb_symbol_t *root_type, fb_symbol_t *container, fb_symbol_t *member)
+{
+ const char *s;
+ int n;
+ const char *kind;
+ const char *nsc = out->nsc;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snc;
+
+ fb_clear(snt);
+ fb_clear(snc);
+ if (!root_type) {
+ return;
+ }
+ /*
+ * Current flatc compiler only accepts tables, but here we support
+ * both tables and structs in so far the parser and analyzer
+ * allows for it.
+ */
+ switch (root_type->kind) {
+ case fb_is_table:
+ kind = "table_";
+ break;
+ case fb_is_struct:
+ kind = "struct_";
+ break;
+ default:
+ gen_panic(out, "internal error: roots can only be structs or tables");
+ return;
+ }
+ fb_compound_name((fb_compound_type_t *)root_type, &snt);
+ assert(container->kind == fb_is_table);
+ fb_compound_name((fb_compound_type_t *)container, &snc);
+ symbol_name(member, &n, &s);
+ fprintf(out->fp, "__%snested_buffer_as_root(%s, %.*s, %s, %s)\n", nsc, snc.text, n, s, snt.text, kind);
+}
+
+static void gen_table(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_member_t *member;
+ fb_symbol_t *sym;
+ const char *s, *tname, *tname_ns, *tname_prefix;
+ int n, r;
+ int is_primary_key, current_key_processed;
+ const char *nsc = out->nsc;
+ fb_scoped_name_t snt;
+ fb_scoped_name_t snref;
+ fb_literal_t literal;
+ int is_optional;
+
+ assert(ct->symbol.kind == fb_is_table);
+
+ fb_clear(snt);
+ fb_clear(snref);
+
+ fprintf(out->fp, "\n");
+ fb_compound_name(ct, &snt);
+ print_doc(out, "", ct->doc);
+ fprintf(out->fp,
+ /*
+ * We don't really need the struct, but it provides better
+ * type safety than a typedef void *.
+ */
+ "struct %s_table { uint8_t unused__; };\n"
+ "\n",
+ snt.text);
+ fprintf(out->fp,
+ "static inline size_t %s_vec_len(%s_vec_t vec)\n"
+ "__%svec_len(vec)\n",
+ snt.text, snt.text, nsc);
+ fprintf(out->fp,
+ "static inline %s_table_t %s_vec_at(%s_vec_t vec, size_t i)\n"
+ "__%soffset_vec_at(%s_table_t, vec, i, 0)\n",
+ snt.text, snt.text, snt.text, nsc, snt.text);
+ fprintf(out->fp,
+ "__%stable_as_root(%s)\n",
+ nsc, snt.text);
+ fprintf(out->fp, "\n");
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ current_key_processed = 0;
+ member = (fb_member_t *)sym;
+ is_primary_key = ct->primary_key == member;
+ is_optional = !!(member->flags & fb_fm_optional);
+ print_doc(out, "", member->doc);
+ /*
+ * In flatc, there can at most one key field, and it should be
+ * scalar or string. Here we export all keys using the
+ * <table>_vec_find_by_<fieldname> convention and let the parser deal with
+ * semantics. Keys on unsupported fields are ignored. The first
+ * valid find operation is also mapped to just <table>_vec_find.
+ */
+ symbol_name(&member->symbol, &n, &s);
+ if (member->metadata_flags & fb_f_deprecated) {
+ fprintf(out->fp, "/* Skipping deprecated field: '%s_%.*s' */\n\n", snt.text, n, s);
+ continue;
+ }
+ r = (member->metadata_flags & fb_f_required) != 0;
+ switch (member->type.type) {
+ case vt_scalar_type:
+ tname_ns = scalar_type_ns(member->type.st, nsc);
+ tname = scalar_type_name(member->type.st);
+ tname_prefix = scalar_type_prefix(member->type.st);
+ print_literal(member->type.st, &member->value, literal);
+ if (is_optional) {
+ fprintf(out->fp,
+ "__%sdefine_scalar_optional_field(%"PRIu64", %s, %.*s, %s%s, %s%s, %s)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, nsc, tname_prefix, tname_ns, tname, literal);
+ } else {
+ fprintf(out->fp,
+ "__%sdefine_scalar_field(%"PRIu64", %s, %.*s, %s%s, %s%s, %s)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, nsc, tname_prefix, tname_ns, tname, literal);
+ }
+ if (!out->opts->allow_scan_for_all_fields && (member->metadata_flags & fb_f_key)) {
+ fprintf(out->fp,
+ "__%sdefine_scan_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (!is_primary_key) {
+ fprintf(out->fp, "/* Note: this is not the primary key of this table. */\n");
+ }
+ fprintf(out->fp, "/* Note: find only works on vectors sorted by this field. */\n");
+ fprintf(out->fp,
+ "__%sdefine_find_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "__%sdefine_table_sort_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ }
+ if (is_primary_key) {
+ fprintf(out->fp,
+ "__%sdefine_default_find_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ fprintf(out->fp,
+ "__%sdefine_default_scan_by_scalar_field(%s, %.*s, %s%s)\n",
+ nsc, snt.text, n, s, tname_ns, tname);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "#define %s_vec_sort %s_vec_sort_by_%.*s\n",
+ snt.text, snt.text, n, s);
+ }
+ }
+ current_key_processed = 1;
+ }
+ break;
+ case vt_vector_type:
+ /* They all use a namespace. */
+ tname = scalar_vector_type_name(member->type.st);
+ tname_ns = nsc;
+ fprintf(out->fp,
+ "__%sdefine_vector_field(%"PRIu64", %s, %.*s, %s%s, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, tname_ns, tname, r);
+ if (member->nest) {
+ gen_nested_root(out, &member->nest->symbol, &ct->symbol, &member->symbol);
+ }
+ break;
+ case vt_string_type:
+ fprintf(out->fp,
+ "__%sdefine_string_field(%"PRIu64", %s, %.*s, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, r);
+ if (!out->opts->allow_scan_for_all_fields && (member->metadata_flags & fb_f_key)) {
+ fprintf(out->fp,
+ "__%sdefine_scan_by_string_field(%s, %.*s)\n",
+ nsc, snt.text, n, s);
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (!is_primary_key) {
+ fprintf(out->fp, "/* Note: this is not the primary key of this table. */\n");
+ }
+ fprintf(out->fp,
+ "__%sdefine_find_by_string_field(%s, %.*s)\n",
+ nsc, snt.text, n, s);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "__%sdefine_table_sort_by_string_field(%s, %.*s)\n",
+ nsc, snt.text, n, s);
+ }
+ if (is_primary_key) {
+ fprintf(out->fp,
+ "__%sdefine_default_find_by_string_field(%s, %.*s)\n",
+ nsc, snt.text, n, s);
+ fprintf(out->fp,
+ "__%sdefine_default_scan_by_string_field(%s, %.*s)\n",
+ nsc, snt.text, n, s);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "#define %s_vec_sort %s_vec_sort_by_%.*s\n",
+ snt.text, snt.text, n, s);
+ }
+ }
+ current_key_processed = 1;
+ }
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp,
+ "__%sdefine_vector_field(%"PRIu64", %s, %.*s, %sstring_vec_t, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, nsc, r);
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ fprintf(out->fp,
+ "__%sdefine_struct_field(%"PRIu64", %s, %.*s, %s_struct_t, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, snref.text, r);
+ break;
+ case fb_is_table:
+ fprintf(out->fp,
+ "__%sdefine_table_field(%"PRIu64", %s, %.*s, %s_table_t, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, snref.text, r);
+ break;
+ case fb_is_enum:
+ print_literal(member->type.ct->type.st, &member->value, literal);
+ if (is_optional) {
+ fprintf(out->fp,
+ "__%sdefine_scalar_optional_field(%"PRIu64", %s, %.*s, %s, %s_enum_t, %s)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, snref.text, snref.text, literal);
+ } else {
+ fprintf(out->fp,
+ "__%sdefine_scalar_field(%"PRIu64", %s, %.*s, %s, %s_enum_t, %s)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, snref.text, snref.text, literal);
+ }
+ if (!out->opts->allow_scan_for_all_fields && (member->metadata_flags & fb_f_key)) {
+ fprintf(out->fp,
+ "__%sdefine_scan_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (!is_primary_key) {
+ fprintf(out->fp, "/* Note: this is not the primary key of this table. */\n");
+ }
+ fprintf(out->fp, "/* Note: find only works on vectors sorted by this field. */\n");
+ fprintf(out->fp,
+ "__%sdefine_find_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "__%sdefine_table_sort_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ }
+ if (is_primary_key) {
+ fprintf(out->fp,
+ "__%sdefine_default_find_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ fprintf(out->fp,
+ "__%sdefine_default_scan_by_scalar_field(%s, %.*s, %s_enum_t)\n",
+ nsc, snt.text, n, s, snref.text);
+ if (out->opts->cgen_sort) {
+ fprintf(out->fp,
+ "#define %s_vec_sort %s_vec_sort_by_%.*s\n",
+ snt.text, snt.text, n, s);
+ }
+ }
+ current_key_processed = 1;
+ }
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "__%sdefine_union_field(%s, %"PRIu64", %s, %.*s, %s, %u)\n",
+ nsc, nsc, (uint64_t)member->id, snt.text, n, s, snref.text, r);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound type in table during code generation");
+ break;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_struct:
+ break;
+ case fb_is_table:
+ break;
+ case fb_is_enum:
+ break;
+ case fb_is_union:
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected vector compound type in table during code generation");
+ break;
+ }
+ if (member->type.ct->symbol.kind == fb_is_union) {
+ fprintf(out->fp,
+ "__%sdefine_union_vector_field(%s, %"PRIu64", %s, %.*s, %s, %u)\n",
+ nsc, nsc, (uint64_t)member->id, snt.text, n, s, snref.text, r);
+ } else {
+ fprintf(out->fp,
+ "__%sdefine_vector_field(%"PRIu64", %s, %.*s, %s_vec_t, %u)\n",
+ nsc, (uint64_t)member->id, snt.text, n, s, snref.text, r);
+ }
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected table member type during code generation");
+ break;
+ }
+ if ((member->metadata_flags & fb_f_key) && !current_key_processed) {
+ fprintf(out->fp,
+ "/* Note: field has key, but there is no support for find by fields of this type. */\n");
+ /*
+ * If the first key already exists, but was for an unsupported
+ * type, we do not map the next possible key to generic find.
+ */
+ }
+ }
+}
+
+int fb_gen_c_reader(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_compound_type_t *ct;
+
+ gen_pretext(out);
+
+ for (ct = out->S->ordered_structs; ct; ct = ct->order) {
+ gen_forward_decl(out, ct);
+ }
+ fprintf(out->fp, "\n");
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_forward_decl(out, (fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ /* Must be placed early due to nested buffer circular references. */
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ /* Fall through. */
+ case fb_is_table:
+ print_type_identifier(out, (fb_compound_type_t *)sym);
+ print_file_extension(out, (fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ fprintf(out->fp, "\n");
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ /* Enums must come before structs in case they are referenced. */
+ case fb_is_enum:
+ gen_enum(out, (fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ fprintf(out->fp, "\n");
+ /* Generate structs in topologically sorted order. */
+ for (ct = out->S->ordered_structs; ct; ct = ct->order) {
+ gen_struct(out, ct);
+ }
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_enum:
+ case fb_is_struct:
+ /* Already generated. */
+ break;
+ case fb_is_union:
+ gen_enum(out, (fb_compound_type_t *)sym);
+ break;
+ case fb_is_table:
+ gen_table(out, (fb_compound_type_t *)sym);
+ break;
+ case fb_is_rpc_service:
+ /* Ignore. */
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected schema component");
+ break;
+ }
+ }
+ fprintf(out->fp, "\n");
+
+ if (out->opts->cgen_sort) {
+ fb_gen_c_sorter(out);
+ }
+
+ gen_footer(out);
+ return 0;
+}
diff --git a/flatcc/src/compiler/codegen_c_sort.c b/flatcc/src/compiler/codegen_c_sort.c
new file mode 100644
index 0000000..4319f96
--- /dev/null
+++ b/flatcc/src/compiler/codegen_c_sort.c
@@ -0,0 +1,171 @@
+#include "codegen_c_sort.h"
+
+/*
+ * We choose heapsort because it is about as fast as quicksort, avoids
+ * recursion, the code is compact which makes it practical to specialize for
+ * different vector types, it can sort the flatbuffer arrays in-place,
+ * and it has only a few places with comparisons. Furthermore, heapsort
+ * has worst case (n log n) upperbound where quicksort has O(n^2) which
+ * is an attack vector, and could be a problem with large datasets
+ * The sort is not stable.
+ *
+ * Some arguments are similar to those of the __%sfind_by_field macro.
+ *
+ * NS: The namespace
+ * N: the name of the vector type
+ * X: the name suffix when there are multiple sorts for same vector type.
+ * E: Element accessor (elem = E(vector, index)).
+ * L: Vector length.
+ * A: Field accessor (or the identity function), result must match the diff function D.
+ * TK: The scalar, enum or string key type, (either the element, or a field of the element).
+ * TE: The raw element type - uoffset_t for tables and strings.
+ * for swap.
+ * D: The diff function, but unlike __find_by_field, the second
+ * argument is returned by A, not a search key, and there is no third argument.
+ * S: Swap operation - must handle offset change when offset elements are moved.
+ */
+
+int gen_sort(fb_output_t *out)
+{
+ fprintf(out->fp,
+ "#define __%sheap_sort(N, X, A, E, L, TK, TE, D, S)\\\n"
+ "static inline void __ ## N ## X ## __heap_sift_down(\\\n"
+ " N ## _mutable_vec_t vec__tmp, size_t start__tmp, size_t end__tmp)\\\n"
+ "{ size_t child__tmp, root__tmp; TK v1__tmp, v2__tmp, vroot__tmp;\\\n"
+ " root__tmp = start__tmp;\\\n"
+ " while ((root__tmp << 1) <= end__tmp) {\\\n"
+ " child__tmp = root__tmp << 1;\\\n"
+ " if (child__tmp < end__tmp) {\\\n"
+ " v1__tmp = A(E(vec__tmp, child__tmp));\\\n"
+ " v2__tmp = A(E(vec__tmp, child__tmp + 1));\\\n"
+ " if (D(v1__tmp, v2__tmp) < 0) {\\\n"
+ " child__tmp++;\\\n"
+ " }\\\n"
+ " }\\\n"
+ " vroot__tmp = A(E(vec__tmp, root__tmp));\\\n"
+ " v1__tmp = A(E(vec__tmp, child__tmp));\\\n"
+ " if (D(vroot__tmp, v1__tmp) < 0) {\\\n"
+ " S(vec__tmp, root__tmp, child__tmp, TE);\\\n"
+ " root__tmp = child__tmp;\\\n"
+ " } else {\\\n"
+ " return;\\\n"
+ " }\\\n"
+ " }\\\n"
+ "}\\\n"
+ "static inline void __ ## N ## X ## __heap_sort(N ## _mutable_vec_t vec__tmp)\\\n"
+ "{ size_t start__tmp, end__tmp, size__tmp;\\\n"
+ " size__tmp = L(vec__tmp); if (size__tmp == 0) return; end__tmp = size__tmp - 1; start__tmp = size__tmp >> 1;\\\n"
+ " do { __ ## N ## X ## __heap_sift_down(vec__tmp, start__tmp, end__tmp); } while (start__tmp--);\\\n"
+ " while (end__tmp > 0) { \\\n"
+ " S(vec__tmp, 0, end__tmp, TE);\\\n"
+ " __ ## N ## X ## __heap_sift_down(vec__tmp, 0, --end__tmp); } }\n",
+ out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_sort_by_field(N, NK, TK, TE, D, S)\\\n"
+ " __%sheap_sort(N, _sort_by_ ## NK, N ## _ ## NK ## _get, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\\\n"
+ "static inline void N ## _vec_sort_by_ ## NK(N ## _mutable_vec_t vec__tmp)\\\n"
+ "{ __ ## N ## _sort_by_ ## NK ## __heap_sort(vec__tmp); }\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_sort(N, TK, TE, D, S)\\\n"
+ "__%sheap_sort(N, , __%sidentity, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\\\n"
+ "static inline void N ## _vec_sort(N ## _mutable_vec_t vec__tmp) { __ ## N ## __heap_sort(vec__tmp); }\n",
+ out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ /* Subtractions doesn't work for unsigned types. */
+ "#define __%sscalar_diff(x, y) ((x) < (y) ? -1 : (x) > (y))\n"
+ "#define __%sstring_diff(x, y) __%sstring_n_cmp((x), (const char *)(y), %sstring_len(y))\n",
+ out->nsc, out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%svalue_swap(vec, a, b, TE) { TE x__tmp = vec[b]; vec[b] = vec[a]; vec[a] = x__tmp; }\n"
+ "#define __%suoffset_swap(vec, a, b, TE)\\\n"
+ "{ TE ta__tmp, tb__tmp, d__tmp;\\\n"
+ " d__tmp = (TE)((a - b) * sizeof(vec[0]));\\\n"
+ " ta__tmp = __flatbuffers_uoffset_read_from_pe(vec + b) - d__tmp;\\\n"
+ " tb__tmp = __flatbuffers_uoffset_read_from_pe(vec + a) + d__tmp;\\\n"
+ " __flatbuffers_uoffset_write_to_pe(vec + a, ta__tmp);\\\n"
+ " __flatbuffers_uoffset_write_to_pe(vec + b, tb__tmp); }\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sscalar_swap(vec, a, b, TE) __%svalue_swap(vec, a, b, TE)\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sstring_swap(vec, a, b, TE) __%suoffset_swap(vec, a, b, TE)\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sstruct_swap(vec, a, b, TE) __%svalue_swap(vec, a, b, TE)\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%stable_swap(vec, a, b, TE) __%suoffset_swap(vec, a, b, TE)\n",
+ out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_struct_sort_by_scalar_field(N, NK, TK, TE)\\\n"
+ " __%sdefine_sort_by_field(N, NK, TK, TE, __%sscalar_diff, __%sstruct_swap)\n",
+ out->nsc, out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_table_sort_by_scalar_field(N, NK, TK)\\\n"
+ " __%sdefine_sort_by_field(N, NK, TK, %suoffset_t, __%sscalar_diff, __%stable_swap)\n",
+ out->nsc, out->nsc, out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_table_sort_by_string_field(N, NK)\\\n"
+ " __%sdefine_sort_by_field(N, NK, %sstring_t, %suoffset_t, __%sstring_diff, __%stable_swap)\n",
+ out->nsc, out->nsc, out->nsc, out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_scalar_sort(N, T) __%sdefine_sort(N, T, T, __%sscalar_diff, __%sscalar_swap)\n",
+ out->nsc, out->nsc, out->nsc, out->nsc);
+ fprintf(out->fp,
+ "#define __%sdefine_string_sort() __%sdefine_sort(%sstring, %sstring_t, %suoffset_t, __%sstring_diff, __%sstring_swap)\n",
+ out->nsc, out->nsc, out->nsc, out->nsc, out->nsc, out->nsc, out->nsc);
+ return 0;
+}
+
+/* reference implementation */
+#if 0
+
+/* from github swenson/sort */
+/* heap sort: based on wikipedia */
+static __inline void HEAP_SIFT_DOWN(SORT_TYPE *dst, const int64_t start, const int64_t end) {
+ int64_t root = start;
+
+ while ((root << 1) <= end) {
+ int64_t child = root << 1;
+
+ if ((child < end) && (SORT_CMP(dst[child], dst[child + 1]) < 0)) {
+ child++;
+ }
+
+ if (SORT_CMP(dst[root], dst[child]) < 0) {
+ SORT_SWAP(dst[root], dst[child]);
+ root = child;
+ } else {
+ return;
+ }
+ }
+}
+
+static __inline void HEAPIFY(SORT_TYPE *dst, const size_t size) {
+ int64_t start = size >> 1;
+
+ while (start >= 0) {
+ HEAP_SIFT_DOWN(dst, start, size - 1);
+ start--;
+ }
+}
+
+void HEAP_SORT(SORT_TYPE *dst, const size_t size) {
+ /* don't bother sorting an array of size 0 */
+ if (size == 0) {
+ return;
+ }
+
+ int64_t end = size - 1;
+ HEAPIFY(dst, size);
+
+ while (end > 0) {
+ SORT_SWAP(dst[end], dst[0]);
+ HEAP_SIFT_DOWN(dst, 0, end - 1);
+ end--;
+ }
+}
+
+#endif
diff --git a/flatcc/src/compiler/codegen_c_sort.h b/flatcc/src/compiler/codegen_c_sort.h
new file mode 100644
index 0000000..27f79c5
--- /dev/null
+++ b/flatcc/src/compiler/codegen_c_sort.h
@@ -0,0 +1,9 @@
+#ifndef CODEGEN_SORT_C_H
+#define CODEGEN_SORT_C_H
+
+#include "codegen_c.h"
+
+int __flatcc_gen_sort(fb_output_t *out);
+#define gen_sort __flatcc_gen_sort
+
+#endif /* CODEGEN_SORT_C_H */
diff --git a/flatcc/src/compiler/codegen_c_sorter.c b/flatcc/src/compiler/codegen_c_sorter.c
new file mode 100644
index 0000000..3c40b1a
--- /dev/null
+++ b/flatcc/src/compiler/codegen_c_sorter.c
@@ -0,0 +1,355 @@
+#include "codegen_c.h"
+
+#include "flatcc/flatcc_types.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+/* Used internally to identify sortable objects. */
+enum {
+ /* object contains at least one direct vector that needs sorting */
+ direct_sortable = 1,
+ /* object contains at least one indirect vector that needs sorting */
+ indirect_sortable = 1,
+ /* object contains at least one direct or indirect vector that needs sorting */
+ sortable = 3,
+};
+
+static int gen_union_sorter(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ int n;
+ const char *s;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static void %s_sort(%s_mutable_union_t u)\n{\n switch (u.type) {\n",
+ snt.text, snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ if (member->type.ct->export_index & sortable) {
+ fprintf(out->fp,
+ " case %s_%.*s: %s_sort(u.value); break;\n",
+ snt.text, n, s, snref.text);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ fprintf(out->fp,
+ " default: break;\n }\n}\n\n");
+ return 0;
+}
+
+static int gen_table_sorter(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ const char *tname_prefix;
+ const char *nsc = out->nsc;
+ const char *s;
+ int n;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static void %s_sort(%s_mutable_table_t t)\n{\n",
+ snt.text, snt.text);
+
+ fprintf(out->fp, " if (!t) return;\n");
+ /* sort all children before sorting current table */
+ if (ct->export_index & indirect_sortable)
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ symbol_name(sym, &n, &s);
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ if (!(member->type.ct->export_index & sortable)) continue;
+ fprintf(out->fp,
+ " __%ssort_table_field(%s, %.*s, %s, t);\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ case fb_is_union:
+ if (!(member->type.ct->export_index & sortable)) continue;
+ fprintf(out->fp,
+ " __%ssort_union_field(%s, %.*s, %s, t);\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ default:
+ continue;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ if (!(member->type.ct->export_index & sortable)) continue;
+ fprintf(out->fp,
+ " __%ssort_table_vector_field_elements(%s, %.*s, %s, t);\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ case fb_is_union:
+ /* Although union vectors cannot be sorted, their content can be. */
+ if (!(member->type.ct->export_index & sortable)) continue;
+ fprintf(out->fp,
+ " __%ssort_union_vector_field_elements(%s, %.*s, %s, t);\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ default:
+ continue;
+ }
+ break;
+ }
+ }
+ if (ct->export_index & direct_sortable)
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+ if (!(member->metadata_flags & fb_f_sorted)) continue;
+ switch (member->type.type) {
+ case vt_vector_type:
+ tname_prefix = scalar_type_prefix(member->type.st);
+ fprintf(out->fp,
+ " __%ssort_vector_field(%s, %.*s, %s%s, t)\n",
+ nsc, snt.text, n, s, nsc, tname_prefix);
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp,
+ " __%ssort_vector_field(%s, %.*s, %s%s, t)\n",
+ nsc, snt.text, n, s, nsc, "string");
+ break;
+ case vt_vector_compound_type_ref:
+ if (!member->type.ct->primary_key) {
+ gen_panic(out, "internal error: unexpected type during code generation");
+ return -1;
+ }
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ case fb_is_struct:
+ fprintf(out->fp,
+ " __%ssort_vector_field(%s, %.*s, %s, t)\n",
+ nsc, snt.text, n, s, snref.text);
+ break;
+ /* Union vectors cannot be sorted. */
+ default:
+ break;
+ }
+ break;
+ }
+ }
+ fprintf(out->fp, "}\n\n");
+ return 0;
+}
+
+static int gen_table_sorter_prototypes(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_scoped_name_t snt;
+ fb_compound_type_t *ct;
+
+ fb_clear(snt);
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ ct = (fb_compound_type_t *)sym;
+ if (ct->export_index & sortable) {
+ fb_compound_name(ct, &snt);
+ fprintf(out->fp,
+ "static void %s_sort(%s_mutable_table_t t);\n",
+ snt.text, snt.text);
+ }
+ }
+ }
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int gen_union_sorters(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_compound_type_t *ct;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ ct = (fb_compound_type_t *)sym;
+ if (ct->export_index & sortable) {
+ gen_union_sorter(out, ct);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int gen_table_sorters(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_compound_type_t *ct;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ ct = (fb_compound_type_t *)sym;
+ if (ct->export_index & sortable) {
+ gen_table_sorter(out, ct);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Return 1 if the table or union is known to be sortable,
+ * and 0 if that information is not available.
+ *
+ * Note that if neither a table nor its direct children have
+ * sortable vectors, the table might still be sortable via a
+ * union member or via deeper nested tables. By iterating
+ * repeatedly over all objects, the indirect_sortable
+ * property eventually propagetes to all affected objects.
+ * At that point no object will change its return value
+ * on repeated calls.
+ */
+static int mark_member_sortable(fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+ if (member->metadata_flags & fb_f_sorted) {
+ ct->export_index |= direct_sortable;
+ }
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ case fb_is_union:
+ break;
+ default:
+ continue;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ case fb_is_union:
+ break;
+ default:
+ continue;
+ }
+ break;
+ default:
+ continue;
+ }
+ if (member->type.ct->export_index & (sortable | indirect_sortable)) {
+ ct->export_index |= indirect_sortable;
+ }
+ }
+ return !!(ct->export_index & sortable);
+}
+
+static void init_sortable(fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ switch (member->type.type) {
+ case vt_compound_type_ref:
+ case vt_vector_compound_type_ref:
+ member->type.ct->export_index = 0;
+ break;
+ default:
+ continue;
+ }
+ }
+ ct->export_index = 0;
+}
+
+/*
+ * Use fix-point iteration to implement a breadth first
+ * search for tables and unions that can be sorted. The
+ * problem is slightly tricky due to self-referential types:
+ * a graph colored depth-first search might terminate before
+ * it is known whether any non-direct descendants are
+ * sortable.
+ */
+static int mark_sortable(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ int old_count = -1, count = 0;
+
+ /* Initialize state kept in the custom export_index symbol table field. */
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ case fb_is_union:
+ init_sortable((fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ /* Perform fix-point iteration search. */
+ while (old_count != count) {
+ old_count = count;
+ count = 0;
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ case fb_is_union:
+ count += mark_member_sortable((fb_compound_type_t *)sym);
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+/* To be generated towards the end of _reader.h when sort option is active. */
+int fb_gen_c_sorter(fb_output_t *out)
+{
+ mark_sortable(out);
+ gen_table_sorter_prototypes(out);
+ gen_union_sorters(out);
+ gen_table_sorters(out);
+ return 0;
+}
diff --git a/flatcc/src/compiler/codegen_c_verifier.c b/flatcc/src/compiler/codegen_c_verifier.c
new file mode 100644
index 0000000..9b1a048
--- /dev/null
+++ b/flatcc/src/compiler/codegen_c_verifier.c
@@ -0,0 +1,327 @@
+#include "codegen_c.h"
+
+#include "flatcc/flatcc_types.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+static int gen_verifier_pretext(fb_output_t *out)
+{
+ fprintf(out->fp,
+ "#ifndef %s_VERIFIER_H\n"
+ "#define %s_VERIFIER_H\n",
+ out->S->basenameup, out->S->basenameup);
+
+ fprintf(out->fp, "\n/* " FLATCC_GENERATED_BY " */\n\n");
+ /* Needed to get the file identifiers */
+ fprintf(out->fp, "#ifndef %s_READER_H\n", out->S->basenameup);
+ fprintf(out->fp, "#include \"%s_reader.h\"\n", out->S->basename);
+ fprintf(out->fp, "#endif\n");
+ fprintf(out->fp, "#include \"flatcc/flatcc_verifier.h\"\n");
+ fb_gen_c_includes(out, "_verifier.h", "_VERIFIER_H");
+ gen_prologue(out);
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int gen_verifier_footer(fb_output_t *out)
+{
+ gen_epilogue(out);
+ fprintf(out->fp,
+ "#endif /* %s_VERIFIER_H */\n",
+ out->S->basenameup);
+ return 0;
+}
+
+static int gen_union_verifier(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ int n;
+ const char *s;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static int %s_union_verifier(flatcc_union_verifier_descriptor_t *ud)\n{\n switch (ud->type) {\n",
+ snt.text);
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ symbol_name(sym, &n, &s);
+ switch (member->type.type) {
+ case vt_missing:
+ /* NONE is of type vt_missing and already handled. */
+ continue;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ " case %u: return flatcc_verify_union_table(ud, %s_verify_table); /* %.*s */\n",
+ (unsigned)member->value.u, snref.text, n, s);
+ continue;
+ case fb_is_struct:
+ fprintf(out->fp,
+ " case %u: return flatcc_verify_union_struct(ud, %"PRIu64", %"PRIu16"); /* %.*s */\n",
+ (unsigned)member->value.u, member->type.ct->size, member->type.ct->align, n, s);
+ continue;
+ default:
+ gen_panic(out, "internal error: unexpected compound type for union verifier");
+ return -1;
+ }
+ case vt_string_type:
+ fprintf(out->fp,
+ " case %u: return flatcc_verify_union_string(ud); /* %.*s */\n",
+ (unsigned)member->value.u, n, s);
+ continue;
+ default:
+ gen_panic(out, "internal error: unexpected type for union verifier");
+ return -1;
+ }
+ }
+ fprintf(out->fp,
+ " default: return flatcc_verify_ok;\n }\n}\n\n");
+ return 0;
+}
+
+static int gen_table_verifier(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ fb_scoped_name_t snt, snref;
+ int required, first = 1;
+ const char *nsc = out->nsc;
+
+ fb_clear(snt);
+ fb_clear(snref);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static int %s_verify_table(flatcc_table_verifier_descriptor_t *td)\n{\n",
+ snt.text);
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+
+ if (first) {
+ fprintf(out->fp, " int ret;\n if ((ret = ");
+ } else {
+ fprintf(out->fp, ")) return ret;\n if ((ret = ");
+ }
+ first = 0;
+ required = (member->metadata_flags & fb_f_required) != 0;
+ switch (member->type.type) {
+ case vt_scalar_type:
+ fprintf(
+ out->fp,
+ "flatcc_verify_field(td, %"PRIu64", %"PRIu64", %"PRIu16")",
+ member->id, member->size, member->align);
+ break;
+ case vt_vector_type:
+ if (member->nest) {
+ fb_compound_name((fb_compound_type_t *)&member->nest->symbol, &snref);
+ if (member->nest->symbol.kind == fb_is_table) {
+ fprintf(out->fp,
+ "flatcc_verify_table_as_nested_root(td, %"PRIu64", "
+ "%u, 0, %"PRIu16", %s_verify_table)",
+ member->id, required, member->align, snref.text);
+ } else {
+ fprintf(out->fp,
+ "flatcc_verify_struct_as_nested_root(td, %"PRIu64", "
+ "%u, 0, %"PRIu64", %"PRIu16")",
+ member->id, required, member->size, member->align);
+ }
+ } else {
+ fprintf(out->fp,
+ "flatcc_verify_vector_field(td, %"PRIu64", %d, %"PRIu64", %"PRIu16", INT64_C(%"PRIu64"))",
+ member->id, required, member->size, member->align, (uint64_t)FLATBUFFERS_COUNT_MAX(member->size));
+ };
+ break;
+ case vt_string_type:
+ fprintf(out->fp,
+ "flatcc_verify_string_field(td, %"PRIu64", %d)",
+ member->id, required);
+ break;
+ case vt_vector_string_type:
+ fprintf(out->fp,
+ "flatcc_verify_string_vector_field(td, %"PRIu64", %d)",
+ member->id, required);
+ break;
+ case vt_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_enum:
+ case fb_is_struct:
+ fprintf(out->fp,
+ "flatcc_verify_field(td, %"PRIu64", %"PRIu64", %"PRIu16")",
+ member->id, member->size, member->align);
+ break;
+ case fb_is_table:
+ fprintf(out->fp,
+ "flatcc_verify_table_field(td, %"PRIu64", %d, &%s_verify_table)",
+ member->id, required, snref.text);
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "flatcc_verify_union_field(td, %"PRIu64", %d, &%s_union_verifier)",
+ member->id, required, snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected compound type for table verifier");
+ return -1;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ fb_compound_name(member->type.ct, &snref);
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ fprintf(out->fp,
+ "flatcc_verify_table_vector_field(td, %"PRIu64", %d, &%s_verify_table)",
+ member->id, required, snref.text);
+ break;
+ case fb_is_enum:
+ case fb_is_struct:
+ fprintf(out->fp,
+ "flatcc_verify_vector_field(td, %"PRIu64", %d, %"PRIu64", %"PRIu16", INT64_C(%"PRIu64"))",
+ member->id, required, member->size, member->align, (uint64_t)FLATBUFFERS_COUNT_MAX(member->size));
+ break;
+ case fb_is_union:
+ fprintf(out->fp,
+ "flatcc_verify_union_vector_field(td, %"PRIu64", %d, &%s_union_verifier)",
+ member->id, required, snref.text);
+ break;
+ default:
+ gen_panic(out, "internal error: unexpected vector compound type for table verifier");
+ return -1;
+ }
+ break;
+ }
+ fprintf(out->fp, " /* %.*s */", (int)sym->ident->len, sym->ident->text);
+ }
+ if (!first) {
+ fprintf(out->fp, ")) return ret;\n");
+ }
+ fprintf(out->fp, " return flatcc_verify_ok;\n");
+ fprintf(out->fp, "}\n\n");
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root(const void *buf, size_t bufsiz)\n"
+ "{\n return flatcc_verify_table_as_root(buf, bufsiz, %s_identifier, &%s_verify_table);\n}\n\n",
+ snt.text, snt.text, snt.text);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_typed_root(const void *buf, size_t bufsiz)\n"
+ "{\n return flatcc_verify_table_as_root(buf, bufsiz, %s_type_identifier, &%s_verify_table);\n}\n\n",
+ snt.text, snt.text, snt.text);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)\n"
+ "{\n return flatcc_verify_table_as_root(buf, bufsiz, fid, &%s_verify_table);\n}\n\n",
+ snt.text, snt.text);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, %sthash_t thash)\n"
+ "{\n return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &%s_verify_table);\n}\n\n",
+ snt.text, nsc, snt.text);
+ return 0;
+}
+
+static int gen_struct_verifier(fb_output_t *out, fb_compound_type_t *ct)
+{
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+ fb_compound_name(ct, &snt);
+
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root(const void *buf, size_t bufsiz)\n"
+ "{\n return flatcc_verify_struct_as_root(buf, bufsiz, %s_identifier, %"PRIu64", %"PRIu16");\n}\n\n",
+ snt.text, snt.text, ct->size, ct->align);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_typed_root(const void *buf, size_t bufsiz)\n"
+ "{\n return flatcc_verify_struct_as_typed_root(buf, bufsiz, %s_type_hash, %"PRIu64", %"PRIu16");\n}\n\n",
+ snt.text, snt.text, ct->size, ct->align);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, %sthash_t thash)\n"
+ "{\n return flatcc_verify_struct_as_typed_root(buf, bufsiz, thash, %"PRIu64", %"PRIu16");\n}\n\n",
+ snt.text, out->nsc, ct->size, ct->align);
+ fprintf(out->fp,
+ "static inline int %s_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)\n"
+ "{\n return flatcc_verify_struct_as_root(buf, bufsiz, fid, %"PRIu64", %"PRIu16");\n}\n\n",
+ snt.text, ct->size, ct->align);
+ return 0;
+}
+
+static int gen_verifier_prototypes(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+ fb_scoped_name_t snt;
+
+ fb_clear(snt);
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ fb_compound_name((fb_compound_type_t *)sym, &snt);
+ fprintf(out->fp,
+ "static int %s_verify_table(flatcc_table_verifier_descriptor_t *td);\n",
+ snt.text);
+ }
+ }
+ fprintf(out->fp, "\n");
+ return 0;
+}
+
+static int gen_union_verifiers(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_union:
+ gen_union_verifier(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+static int gen_struct_verifiers(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ gen_struct_verifier(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+static int gen_table_verifiers(fb_output_t *out)
+{
+ fb_symbol_t *sym;
+
+ for (sym = out->S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ gen_table_verifier(out, (fb_compound_type_t *)sym);
+ }
+ }
+ return 0;
+}
+
+int fb_gen_c_verifier(fb_output_t *out)
+{
+ gen_verifier_pretext(out);
+ gen_verifier_prototypes(out);
+ gen_union_verifiers(out);
+ gen_struct_verifiers(out);
+ gen_table_verifiers(out);
+ gen_verifier_footer(out);
+ return 0;
+}
diff --git a/flatcc/src/compiler/codegen_schema.c b/flatcc/src/compiler/codegen_schema.c
new file mode 100644
index 0000000..d0c9fde
--- /dev/null
+++ b/flatcc/src/compiler/codegen_schema.c
@@ -0,0 +1,581 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include "flatcc/reflection/reflection_builder.h"
+#include "symbols.h"
+#include "parser.h"
+#include "codegen.h"
+#include "fileio.h"
+/* Needed to store length prefix. */
+#include "catalog.h"
+
+#define BaseType(x) FLATBUFFERS_WRAP_NAMESPACE(reflection_BaseType, x)
+
+static flatbuffers_bool_t is_optional_type(fb_value_t type, int optional, int required)
+{
+ if (required) return 0;
+ if (optional) return 1;
+ if (type.type == vt_scalar_type) return 0;
+ if (type.type == vt_compound_type_ref && type.ct->symbol.kind == fb_is_enum) return 0;
+ return 1;
+}
+
+static reflection_Type_ref_t export_type(flatcc_builder_t *B, fb_value_t type)
+{
+ fb_scalar_type_t st = fb_missing_type;
+ int32_t index = -1;
+ reflection_BaseType_enum_t base_type = BaseType(None);
+ reflection_BaseType_enum_t element = BaseType(None);
+ reflection_BaseType_enum_t primitive = BaseType(None);
+ uint16_t fixed_length = 0;
+
+ switch (type.type) {
+ case vt_scalar_type:
+ st = type.st;
+ break;
+ case vt_vector_type:
+ st = type.st;
+ base_type = BaseType(Vector);
+ break;
+ case vt_vector_string_type:
+ element = BaseType(String);
+ base_type = BaseType(Vector);
+ break;
+ case vt_vector_compound_type_ref:
+ index = (int32_t)type.ct->export_index;
+ switch (type.ct->symbol.kind) {
+ case fb_is_enum:
+ st = type.ct->type.st;
+ base_type = BaseType(Vector);
+ break;
+ case fb_is_struct:
+ case fb_is_table:
+ base_type = BaseType(Vector);
+ element = BaseType(Obj);
+ break;
+ case fb_is_union:
+ base_type = BaseType(Vector);
+ element = BaseType(Union);
+ break;
+ default:
+ break;
+ }
+ break;
+ case vt_string_type:
+ base_type = BaseType(String);
+ break;
+ case vt_compound_type_ref:
+ index = (int32_t)type.ct->export_index;
+ switch (type.ct->symbol.kind) {
+ case fb_is_enum:
+ st = type.ct->type.st;
+ break;
+ case fb_is_struct:
+ case fb_is_table:
+ base_type = BaseType(Obj);
+ break;
+ case fb_is_union:
+ base_type = BaseType(Union);
+ break;
+ default:
+ index = -1;
+ break;
+ }
+ break;
+ case vt_fixed_array_type:
+ st = type.st;
+ base_type = BaseType(Array);
+ fixed_length = (uint16_t)type.len;
+ break;
+ case vt_fixed_array_string_type:
+ break;
+ element = BaseType(Byte);
+ base_type = BaseType(Array);
+ fixed_length = (uint16_t)type.len;
+ break;
+ case vt_fixed_array_compound_type_ref:
+ index = (int32_t)type.ct->export_index;
+ switch (type.ct->symbol.kind) {
+ case fb_is_enum:
+ st = type.ct->type.st;
+ break;
+ case fb_is_struct:
+ case fb_is_table:
+ element = BaseType(Obj);
+ break;
+ case fb_is_union:
+ element = BaseType(Union);
+ break;
+ default:
+ break;
+ }
+ base_type = BaseType(Array);
+ fixed_length = (uint16_t)type.len;
+ break;
+ default:
+ break;
+ }
+ /* If st is set, resolve scalar type and set it to base_type or element. */
+ switch (st) {
+ case fb_missing_type: break;
+ case fb_ulong: primitive = BaseType(ULong); break;
+ case fb_uint: primitive = BaseType(UInt); break;
+ case fb_ushort: primitive = BaseType(UShort); break;
+ case fb_ubyte: primitive = BaseType(UByte); break;
+ case fb_bool: primitive = BaseType(Bool); break;
+ case fb_long: primitive = BaseType(Long); break;
+ case fb_int: primitive = BaseType(Int); break;
+ case fb_short: primitive = BaseType(Short); break;
+ case fb_byte: primitive = BaseType(Byte); break;
+ case fb_double: primitive = BaseType(Double); break;
+ case fb_float: primitive = BaseType(Float); break;
+ /* TODO: Googles flatc tool does not have char arrays so we use Byte as element type */
+ case fb_char: primitive = BaseType(Byte); break;
+ default: break;
+ }
+
+ if (base_type == BaseType(None)) {
+ base_type = primitive;
+ } else if (base_type == BaseType(Vector) || base_type == BaseType(Array)) {
+ if (element == BaseType(None)) {
+ element = primitive;
+ }
+ }
+ return reflection_Type_create(B, base_type, element, index, fixed_length);
+}
+
+static void export_attributes(flatcc_builder_t *B, fb_metadata_t *m)
+{
+ for (; m; m = m->link) {
+ reflection_KeyValue_vec_push_start(B);
+ reflection_KeyValue_key_create_strn(B, m->ident->text, (size_t)m->ident->len);
+ if (m->value.type == vt_string) {
+ reflection_KeyValue_value_create_strn(B, m->value.s.s, (size_t)m->value.s.len);
+ }
+ reflection_KeyValue_vec_push_end(B);
+ }
+}
+
+static void export_fields(flatcc_builder_t *B, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+ flatbuffers_bool_t has_key, deprecated, required, optional, key_processed = 0;
+ int64_t default_integer;
+ double default_real;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ /*
+ * Unlike `flatc` we allow multiple keys in the parser, but
+ * there is no way to tell which key is default in the
+ * reflection schema because the fields are sorted, so we only
+ * export the default (first) key.
+ */
+ has_key = !key_processed && (member->metadata_flags & fb_f_key) != 0;
+ required = (member->metadata_flags & fb_f_required) != 0;
+ default_integer = 0;
+ default_real = 0.0;
+ deprecated = (member->metadata_flags & fb_f_deprecated) != 0;
+ /*
+ * Flag is only set when `= null` is used in the schema, but
+ * non-scalar types are optional by default and therfore also
+ * true in the binary schema.
+ */
+ optional = is_optional_type(member->type, !!(member->flags & fb_fm_optional), required);
+
+ if ((member->type.type == vt_compound_type_ref || member->type.type == vt_vector_compound_type_ref)
+ && member->type.ct->symbol.kind == fb_is_union) {
+ reflection_Field_vec_push_start(B);
+ reflection_Field_name_start(B);
+ reflection_Field_name_append(B, member->symbol.ident->text, (size_t)member->symbol.ident->len);
+ reflection_Field_name_append(B, "_type", 5);
+ reflection_Field_name_end(B);
+ switch(member->type.type) {
+ case vt_compound_type_ref:
+ reflection_Field_type_create(B, BaseType(UType), BaseType(None), -1, 0);
+ break;
+ case vt_vector_compound_type_ref:
+ reflection_Field_type_create(B, BaseType(Vector), BaseType(UType), -1, 0);
+ break;
+ }
+ reflection_Field_offset_add(B, (uint16_t)(member->id - 1 + 2) * sizeof(flatbuffers_voffset_t));
+ reflection_Field_id_add(B, (uint16_t)(member->id - 1));
+ reflection_Field_deprecated_add(B, deprecated);
+ reflection_Field_vec_push_end(B);
+ }
+ reflection_Field_vec_push_start(B);
+ reflection_Field_name_create(B, member->symbol.ident->text, (size_t)member->symbol.ident->len);
+ reflection_Field_type_add(B, export_type(B, member->type));
+ switch (ct->symbol.kind) {
+ case fb_is_table:
+ switch (member->value.type) {
+ case vt_uint:
+ default_integer = (int64_t)member->value.u;
+ break;
+ case vt_int:
+ default_integer = (int64_t)member->value.i;
+ break;
+ case vt_bool:
+ default_integer = (int64_t)member->value.b;
+ break;
+ case vt_float:
+ default_real = member->value.f;
+ break;
+ }
+ reflection_Field_default_integer_add(B, default_integer);
+ reflection_Field_default_real_add(B, default_real);
+ reflection_Field_id_add(B, (uint16_t)member->id);
+ reflection_Field_offset_add(B, (uint16_t)(member->id + 2) * sizeof(flatbuffers_voffset_t));
+ reflection_Field_key_add(B, has_key);
+ reflection_Field_required_add(B, required);
+ reflection_Field_optional_add(B, optional);
+ break;
+ case fb_is_struct:
+ reflection_Field_offset_add(B, (uint16_t)member->offset);
+ break;
+ default: break;
+ }
+ /* Deprecated struct fields not supported by `flatc` but is here as an option. */
+ reflection_Field_deprecated_add(B, deprecated);
+ if (member->metadata) {
+ reflection_Field_attributes_start(B);
+ export_attributes(B, member->metadata);
+ reflection_Field_attributes_end(B);
+ }
+ reflection_Field_vec_push_end(B);
+ key_processed |= has_key;
+ }
+}
+
+/* `vec` is filled with references to the constructed objects. */
+static void export_objects(flatcc_builder_t *B, object_entry_t *objects, int nobjects,
+ reflection_Object_ref_t *object_map)
+{
+ int i, is_struct;
+ fb_compound_type_t *ct;
+
+ for (i = 0; i < nobjects; ++i) {
+ ct = objects[i].ct;
+ reflection_Object_start(B);
+ reflection_Object_name_create_str(B, objects[i].name);
+ /*
+ * We can post sort-fields because the index is not used, unlike
+ * objects and enums.
+ */
+ reflection_Object_fields_start(B);
+ export_fields(B, ct);
+ reflection_Object_fields_end(B);
+ is_struct = ct->symbol.kind == fb_is_struct;
+ if (is_struct) {
+ reflection_Object_bytesize_add(B, (int32_t)ct->size);
+ }
+ reflection_Object_is_struct_add(B, (flatbuffers_bool_t)is_struct);
+ reflection_Object_minalign_add(B, ct->align);
+ if (ct->metadata) {
+ reflection_Object_attributes_start(B);
+ export_attributes(B, ct->metadata);
+ reflection_Object_attributes_end(B);
+ }
+ object_map[i] = reflection_Object_end(B);
+ }
+ reflection_Schema_objects_create(B, object_map, (size_t)nobjects);
+}
+
+static void export_enumval(flatcc_builder_t *B, fb_member_t *member, reflection_Object_ref_t *object_map)
+{
+ int is_union = object_map != 0;
+
+ reflection_EnumVal_vec_push_start(B);
+ reflection_EnumVal_name_create(B, member->symbol.ident->text, (size_t)member->symbol.ident->len);
+ if (is_union) {
+ if (member->type.type == vt_compound_type_ref) {
+ /* object is deprecated in favor of union_type to support mixed union types. */
+ reflection_EnumVal_object_add(B, object_map[member->type.ct->export_index]);
+ }
+ reflection_EnumVal_union_type_add(B, export_type(B, member->type));
+ }
+ reflection_EnumVal_value_add(B, (int64_t)member->value.u);
+ reflection_EnumVal_vec_push_end(B);
+}
+
+static void export_enums(flatcc_builder_t *B, enum_entry_t *enums, int nenums,
+ reflection_Object_ref_t *object_map)
+{
+ int i, is_union;
+ fb_compound_type_t *ct;
+ fb_symbol_t *sym;
+
+ reflection_Schema_enums_start(B);
+ for (i = 0; i < nenums; ++i) {
+ ct = enums[i].ct;
+ is_union = ct->symbol.kind == fb_is_union;
+ reflection_Enum_vec_push_start(B);
+ reflection_Enum_name_create_str(B, enums[i].name);
+ reflection_Enum_values_start(B);
+ for (sym = ct->members; sym; sym = sym->link) {
+ export_enumval(B, (fb_member_t *)sym, is_union ? object_map : 0);
+ }
+ reflection_Enum_values_end(B);
+ reflection_Enum_is_union_add(B, (flatbuffers_bool_t)is_union);
+ reflection_Enum_underlying_type_add(B, export_type(B, ct->type));
+ if (ct->metadata) {
+ reflection_Enum_attributes_start(B);
+ export_attributes(B, ct->metadata);
+ reflection_Enum_attributes_end(B);
+ }
+ reflection_Enum_vec_push_end(B);
+ }
+ reflection_Schema_enums_end(B);
+}
+
+static void export_root_type(flatcc_builder_t *B, fb_symbol_t * root_type,
+ reflection_Object_ref_t *object_map)
+{
+ fb_compound_type_t *ct;
+ if (root_type) {
+ /*
+ * We could also store a struct object here, but since the
+ * binrary schema says root_table, not root_type as in the text
+ * schema, it would be misleading.
+ */
+ if (root_type->kind == fb_is_table) {
+ ct = (fb_compound_type_t *)root_type;
+ reflection_Schema_root_table_add(B, object_map[ct->export_index]);
+ }
+ }
+}
+
+static void export_call(flatcc_builder_t *B, fb_member_t *member, reflection_Object_ref_t *object_map)
+{
+ reflection_RPCCall_vec_push_start(B);
+ reflection_RPCCall_name_create(B, member->symbol.ident->text, (size_t)member->symbol.ident->len);
+ reflection_RPCCall_request_add(B, object_map[member->req_type.ct->export_index]);
+ reflection_RPCCall_response_add(B, object_map[member->type.ct->export_index]);
+ if (member->metadata) {
+ reflection_RPCCall_attributes_start(B);
+ export_attributes(B, member->metadata);
+ reflection_RPCCall_attributes_end(B);
+ }
+ reflection_RPCCall_vec_push_end(B);
+}
+
+static void export_services(flatcc_builder_t *B, service_entry_t *services, int nservices,
+ reflection_Object_ref_t *object_map)
+{
+ int i;
+ fb_compound_type_t *ct;
+ fb_symbol_t *sym;
+
+ reflection_Schema_services_start(B);
+ for (i = 0; i < nservices; ++i) {
+ ct = services[i].ct;
+ reflection_Service_vec_push_start(B);
+ reflection_Service_name_create_str(B, services[i].name);
+ reflection_Service_calls_start(B);
+ for (sym = ct->members; sym; sym = sym->link) {
+ export_call(B, (fb_member_t *)sym, object_map);
+ }
+ reflection_Service_calls_end(B);
+ if (ct->metadata) {
+ reflection_Service_attributes_start(B);
+ export_attributes(B, ct->metadata);
+ reflection_Service_attributes_end(B);
+ }
+ reflection_Service_vec_push_end(B);
+ }
+ reflection_Schema_services_end(B);
+}
+
+static int export_schema(flatcc_builder_t *B, fb_options_t *opts, fb_schema_t *S)
+{
+ catalog_t catalog;
+ reflection_Object_ref_t *object_map = 0;
+
+ if (build_catalog(&catalog, S, opts->bgen_qualify_names, &S->root_schema->scope_index)) {
+ return -1;
+ }
+
+ if (catalog.nobjects > 0 && !(object_map = malloc((size_t)catalog.nobjects * sizeof(object_map[0])))) {
+ clear_catalog(&catalog);
+ return -1;
+ }
+
+ /* Build the schema. */
+
+ if (opts->bgen_length_prefix) {
+ reflection_Schema_start_as_root_with_size(B);
+ } else {
+ reflection_Schema_start_as_root(B);
+ }
+ if (S->file_identifier.type == vt_string) {
+ reflection_Schema_file_ident_create(B,
+ S->file_identifier.s.s, (size_t)S->file_identifier.s.len);
+ }
+ if (S->file_extension.type == vt_string) {
+ reflection_Schema_file_ext_create(B,
+ S->file_extension.s.s, (size_t)S->file_extension.s.len);
+ }
+ export_objects(B, catalog.objects, catalog.nobjects, object_map);
+ export_enums(B, catalog.enums, catalog.nenums, object_map);
+ export_root_type(B, S->root_type.type, object_map);
+ export_services(B, catalog.services, catalog.nservices, object_map);
+
+ reflection_Schema_end_as_root(B);
+
+ /* Clean up support datastructures. */
+
+ clear_catalog(&catalog);
+ if (object_map) {
+ free(object_map);
+ }
+ return 0;
+}
+
+/*
+ * We do not not sort attributes because we would loose ordering
+ * information between different attributes, and between same named
+ * attributes because the sort is not stable.
+ *
+ * The C bindings has a scan interface that can find attributes
+ * in order of appearance.
+ *
+ * Field sorting is done on the finished buffer.
+ */
+static void sort_objects(void *buffer)
+{
+ size_t i;
+ reflection_Schema_table_t schema;
+ reflection_Object_vec_t objects;
+ reflection_Object_table_t object;
+ reflection_Field_vec_t fields;
+ reflection_Field_mutable_vec_t mfields;
+
+ schema = reflection_Schema_as_root(buffer);
+ objects = reflection_Schema_objects(schema);
+ for (i = 0; i < reflection_Object_vec_len(objects); ++i) {
+ object = reflection_Object_vec_at(objects, i);
+ fields = reflection_Object_fields(object);
+ if (fields) {
+ mfields = (reflection_Field_mutable_vec_t)fields;
+ reflection_Field_vec_sort(mfields);
+ }
+ }
+}
+
+static FILE *open_file(fb_options_t *opts, fb_schema_t *S)
+{
+ FILE *fp = 0;
+ char *path = 0, *ext = 0;
+ const char *prefix = opts->outpath ? opts->outpath : "";
+ size_t len, prefix_len = strlen(prefix);
+ const char *name;
+
+ name = S->basename;
+ len = strlen(name);
+
+ ext = fb_create_path_ext(".", flatbuffers_extension);
+ /* We generally should not use cgen options here, but in this case it makes sense. */
+ if (opts->gen_stdout) {
+ return stdout;
+ }
+ checkmem((path = fb_create_join_path_n(prefix, prefix_len, name, len, ext, 1)));
+ fp = fopen(path, "wb");
+ if (!fp) {
+ fprintf(stderr, "error opening file for writing binary schema: %s\n", path);
+ }
+ free(path);
+ free(ext);
+ return fp;
+}
+
+static void close_file(FILE *fp)
+{
+ if (fp && fp != stdout) {
+ fclose(fp);
+ }
+}
+
+/*
+ * Normally enums are required to be ascending in the schema and
+ * therefore there is no need to sort enums. If not, we export them in
+ * the order defined anyway becuase there is no well-defined ordering
+ * and blindly sorting the content would just loose more information.
+ *
+ * In conclusion: find by enum value is only supported when enums are
+ * defined in consequtive order.
+ *
+ * refers to: `opts->ascending_enum`
+ *
+ * `size` must hold the maximum buffer size.
+ * Returns intput buffer if successful and updates size argument.
+ */
+void *fb_codegen_bfbs_to_buffer(fb_options_t *opts, fb_schema_t *S, void *buffer, size_t *size)
+{
+ flatcc_builder_t builder, *B;
+
+ B = &builder;
+ flatcc_builder_init(B);
+ export_schema(B, opts, S);
+ if (!flatcc_builder_copy_buffer(B, buffer, *size)) {
+ goto done;
+ }
+ sort_objects(buffer);
+done:
+ *size = flatcc_builder_get_buffer_size(B);
+ flatcc_builder_clear(B);
+ return buffer;
+}
+
+/*
+ * Like to_buffer, but returns allocated buffer.
+ * Updates size argument with buffer size if not null.
+ * Returned buffer must be deallocatd with `free`.
+ * The buffer is malloc aligned which should suffice for reflection buffers.
+ */
+void *fb_codegen_bfbs_alloc_buffer(fb_options_t *opts, fb_schema_t *S, size_t *size)
+{
+ flatcc_builder_t builder, *B;
+ void *buffer = 0;
+
+ B = &builder;
+ flatcc_builder_init(B);
+ if (export_schema(B, opts, S)) {
+ goto done;
+ }
+ if (!(buffer = flatcc_builder_finalize_buffer(B, size))) {
+ goto done;
+ }
+ sort_objects(buffer);
+done:
+ flatcc_builder_clear(B);
+ return buffer;
+}
+
+int fb_codegen_bfbs_to_file(fb_options_t *opts, fb_schema_t *S)
+{
+ void *buffer;
+ size_t size;
+ FILE *fp;
+ int ret = -1;
+
+ fp = open_file(opts, S);
+ if (!fp) {
+ return -1;
+ }
+ buffer = fb_codegen_bfbs_alloc_buffer(opts, S, &size);
+ if (!buffer) {
+ fprintf(stderr, "failed to generate binary schema\n");
+ goto done;
+ }
+ if (size != fwrite(buffer, 1, size, fp)) {
+ fprintf(stderr, "could not write binary schema to file\n");
+ goto done;
+ }
+ ret = 0;
+done:
+ if (buffer) {
+ free(buffer);
+ }
+ close_file(fp);
+ return ret;
+}
diff --git a/flatcc/src/compiler/coerce.c b/flatcc/src/compiler/coerce.c
new file mode 100644
index 0000000..6ab12a6
--- /dev/null
+++ b/flatcc/src/compiler/coerce.c
@@ -0,0 +1,266 @@
+#include "coerce.h"
+
+/*
+ * Be aware that some value variants represents actual values (e.g.
+ * vt_int), and others represent a type (e.g. vt_scalar) which holds a
+ * type identifier token. Here we implicitly expect a vt_scalar type as
+ * first argument, but only receive the token. The second argument is a
+ * value literal. Our job is to decide if the value fits within the
+ * given type. Our internal representation already ensures that value
+ * fits within a 64bit signed or unsigned integer, or double; otherwise
+ * the parser would have set vt_invalid type on the value.
+ *
+ * If the value is invalid, success is returned because the
+ * error is presumably already generated. If the value is some other
+ * type than expect, an error is generated.
+ *
+ * Symbolic names are not allowed as values here.
+ *
+ * Converts positive integers to signed type and unsigned integers to
+ * signed type, integers to floats and floats to integers.
+ *
+ * Optionally allows 1 to be assigned as true and 0 as false, and vice
+ * versa when allow_boolean_conversion is enabled.
+ *
+ * Returns 0 on success, -1 on error.
+ */
+int fb_coerce_scalar_type(fb_parser_t *P, fb_symbol_t *sym, fb_scalar_type_t st, fb_value_t *value)
+{
+ double d;
+ float f;
+
+ if (!value->type) {
+ return 0;
+ }
+ /*
+ * The parser only produces negative vt_int values, which simplifies
+ * the logic, but to make this operation robust against multiple
+ * coercion steps, we first convert back to uint if the assumption turns
+ * out false.
+ */
+ if (value->type == vt_int && value->i >= 0) {
+ value->type = vt_uint;
+ value->u = (uint64_t)value->i;
+ }
+ if (value->type == vt_invalid) {
+ /* Silently ignore past errors. */
+ return 0;
+ }
+ if (value->type == vt_bool && st != fb_bool && P->opts.allow_boolean_conversion) {
+ value->type = vt_uint;
+ value->u = (uint64_t)value->b;
+ assert(value->u == 1 || value->u == 0);
+ }
+ switch (st) {
+ case fb_ulong:
+ if (value->type != vt_uint) {
+ error_sym(P, sym, "64-bit uint32_t type only accepts unsigned integers");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ case fb_uint:
+ if (value->type != vt_uint) {
+ error_sym(P, sym, "32-bit unsigned int type only accepts unsigned integers");
+ value->type = vt_invalid;
+ return -1;
+ }
+ if (value->u > UINT32_MAX) {
+ error_sym(P, sym, "32-bit unsigned int overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ case fb_ushort:
+ if (value->type != vt_uint) {
+ error_sym(P, sym, "16-bit unsigned short type only accepts unsigned integers");
+ value->type = vt_invalid;
+ return -1;
+ }
+ if (value->u > UINT16_MAX) {
+ error_sym(P, sym, "16-bit unsigned short overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ case fb_char:
+ /* Although C treats char as signed by default, flatcc treats it as unsigned. */
+ case fb_ubyte:
+ if (value->type != vt_uint) {
+ error_sym(P, sym, "8-bit unsigned byte type only accepts unsigned integers");
+ value->type = vt_invalid;
+ return -1;
+ }
+ if (value->u > UINT8_MAX) {
+ error_sym(P, sym, "8-bit unsigned byte overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ case fb_long:
+ if (value->type == vt_int) {
+ /* Native format is always ok, or parser would have failed. */
+ return 0;
+ }
+ if (value->type == vt_uint) {
+ if (value->u >= (1ULL << 63)) {
+ error_sym(P, sym, "64-bit signed int32_t overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->i = (int64_t)value->u;
+ value->type = vt_int;
+ return 0;
+ }
+ error_sym(P, sym, "64-bit int32_t type only accepts integers");
+ value->type = vt_invalid;
+ return -1;
+ case fb_int:
+ if (value->type == vt_int) {
+ if (value->i < INT32_MIN) {
+ error_sym(P, sym, "32-bit signed int underflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ }
+ if (value->type == vt_uint) {
+ if (value->i > INT32_MAX) {
+ error_sym(P, sym, "32-bit signed int overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->i = (int64_t)value->u;
+ value->type = vt_int;
+ return 0;
+ }
+ error_sym(P, sym, "32-bit signed int type only accepts integers");
+ value->type = vt_invalid;
+ return -1;
+ case fb_short:
+ if (value->type == vt_int) {
+ if (value->i < INT16_MIN) {
+ error_sym(P, sym, "16-bit signed short underflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ }
+ if (value->type == vt_uint) {
+ if (value->i > INT16_MAX) {
+ error_sym(P, sym, "16-bit signed short overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->i = (int64_t)value->u;
+ value->type = vt_int;
+ return 0;
+ }
+ error_sym(P, sym, "16-bit signed short type only accepts integers");
+ value->type = vt_invalid;
+ return -1;
+ case fb_byte:
+ if (value->type == vt_int) {
+ if (value->i < INT8_MIN) {
+ error_sym(P, sym, "8-bit signed byte underflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ }
+ if (value->type == vt_uint) {
+ if (value->i > INT8_MAX) {
+ error_sym(P, sym, "8-bit signed byte overflow");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->i = (int64_t)value->u;
+ value->type = vt_int;
+ return 0;
+ }
+ error_sym(P, sym, "8-bit signed byte type only accepts integers");
+ value->type = vt_invalid;
+ return -1;
+ case fb_bool:
+ if (value->type == vt_uint && P->opts.allow_boolean_conversion) {
+ if (value->u > 1) {
+ error_sym(P, sym, "boolean integer conversion only accepts 0 (false) or 1 (true)");
+ value->type = vt_invalid;
+ return -1;
+ }
+ } else if (value->type != vt_bool) {
+ error_sym(P, sym, "boolean type only accepts 'true' or 'false' as values");
+ value->type = vt_invalid;
+ return -1;
+ }
+ return 0;
+ case fb_double:
+ switch (value->type) {
+ case vt_int:
+ d = (double)value->i;
+ if ((int64_t)d != value->i) {
+ /* We could make this a warning. */
+ error_sym(P, sym, "precision loss in 64-bit double type assignment");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->f = d;
+ value->type = vt_float;
+ return 0;
+ case vt_uint:
+ d = (double)value->u;
+ if ((uint64_t)d != value->u) {
+ /* We could make this a warning. */
+ error_sym(P, sym, "precision loss in 64-bit double type assignment");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->f = d;
+ value->type = vt_float;
+ return 0;
+ case vt_float:
+ /* Double is our internal repr., so not loss at this point. */
+ return 0;
+ default:
+ error_sym(P, sym, "64-bit double type only accepts integer and float values");
+ value->type = vt_invalid;
+ return -1;
+ }
+ case fb_float:
+ switch (value->type) {
+ case vt_int:
+ f = (float)value->i;
+ if ((int64_t)f != value->i) {
+ /* We could make this a warning. */
+ error_sym(P, sym, "precision loss in 32-bit float type assignment");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->f = f;
+ value->type = vt_float;
+ return 0;
+ case vt_uint:
+ f = (float)value->u;
+ if ((uint64_t)f != value->u) {
+ /* We could make this a warning. */
+ error_sym(P, sym, "precision loss in 32-bit float type assignment");
+ value->type = vt_invalid;
+ return -1;
+ }
+ value->f = f;
+ value->type = vt_float;
+ return 0;
+ case vt_float:
+ return 0;
+ default:
+ error_sym(P, sym, "32-bit float type only accepts integer and float values");
+ value->type = vt_invalid;
+ return -1;
+ }
+ default:
+ error_sym(P, sym, "scalar type expected");
+ value->type = vt_invalid;
+ return -1;
+ }
+}
+
diff --git a/flatcc/src/compiler/coerce.h b/flatcc/src/compiler/coerce.h
new file mode 100644
index 0000000..91c43f2
--- /dev/null
+++ b/flatcc/src/compiler/coerce.h
@@ -0,0 +1,13 @@
+#ifndef COERCE_H
+#define COERCE_H
+
+#include <assert.h>
+
+#include "symbols.h"
+#include "parser.h"
+
+int __flatcc_fb_coerce_scalar_type(fb_parser_t *P,
+ fb_symbol_t *sym, fb_scalar_type_t st, fb_value_t *value);
+#define fb_coerce_scalar_type __flatcc_fb_coerce_scalar_type
+
+#endif /* COERCE_H */
diff --git a/flatcc/src/compiler/fileio.c b/flatcc/src/compiler/fileio.c
new file mode 100644
index 0000000..56d88c1
--- /dev/null
+++ b/flatcc/src/compiler/fileio.c
@@ -0,0 +1,225 @@
+#include <string.h>
+#include <stdio.h>
+
+/* Ensures portable headers are included such as inline. */
+#include "config.h"
+#include "fileio.h"
+#include "pstrutil.h"
+
+char *fb_copy_path_n(const char *path, size_t len)
+{
+ size_t n;
+ char *s;
+
+ n = strnlen(path, len);
+ if ((s = malloc(n + 1))) {
+ memcpy(s, path, n);
+ s[n] = '\0';
+ }
+ return s;
+}
+
+char *fb_copy_path(const char *path)
+{
+ size_t n;
+ char *s;
+
+ n = strlen(path);
+ if ((s = malloc(n + 1))) {
+ memcpy(s, path, n);
+ s[n] = '\0';
+ }
+ return s;
+}
+
+size_t fb_chomp(const char *path, size_t len, const char *ext)
+{
+ size_t ext_len = ext ? strlen(ext) : 0;
+ if (len > ext_len && 0 == strncmp(path + len - ext_len, ext, ext_len)) {
+ len -= ext_len;
+ }
+ return len;
+}
+
+char *fb_create_join_path_n(const char *prefix, size_t prefix_len,
+ const char *suffix, size_t suffix_len, const char *ext, int path_sep)
+{
+ char *path;
+ size_t ext_len = ext ? strlen(ext) : 0;
+ size_t n;
+
+ if (!prefix ||
+ (suffix_len > 0 && (suffix[0] == '/' || suffix[0] == '\\')) ||
+ (suffix_len > 1 && suffix[1] == ':')) {
+ prefix_len = 0;
+ }
+ if (path_sep && (prefix_len == 0 ||
+ (prefix[prefix_len - 1] == '/' || prefix[prefix_len - 1] == '\\'))) {
+ path_sep = 0;
+ }
+ path = malloc(prefix_len + !!path_sep + suffix_len + ext_len + 1);
+ if (!path) {
+ return 0;
+ }
+ n = 0;
+ if (prefix_len > 0) {
+ memcpy(path, prefix, prefix_len);
+ n += prefix_len;
+ }
+ if (path_sep) {
+ path[n++] = '/';
+ }
+ memcpy(path + n, suffix, suffix_len);
+ n += suffix_len;
+ memcpy(path + n, ext, ext_len);
+ n += ext_len;
+ path[n] = '\0';
+ return path;
+}
+
+char *fb_create_join_path(const char *prefix, const char *suffix, const char *ext, int path_sep)
+{
+ return fb_create_join_path_n(prefix, prefix ? strlen(prefix) : 0,
+ suffix, suffix ? strlen(suffix) : 0, ext, path_sep);
+}
+
+char *fb_create_path_ext_n(const char *path, size_t path_len, const char *ext)
+{
+ return fb_create_join_path_n(0, 0, path, path_len, ext, 0);
+}
+
+char *fb_create_path_ext(const char *path, const char *ext)
+{
+ return fb_create_join_path(0, path, ext, 0);
+}
+
+char *fb_create_make_path_n(const char *path, size_t len)
+{
+ size_t i, j, n;
+ char *s;
+
+ if (len == 1 && (path[0] == ' ' || path[0] == '\\')) {
+ if (!(s = malloc(3))) {
+ return 0;
+ }
+ s[0] = '\\';
+ s[1] = path[0];
+ s[2] = '\0';
+ return s;
+ }
+ if (len <= 1) {
+ return fb_copy_path_n(path, len);
+ }
+ for (i = 0, n = len; i < len - 1; ++i) {
+ if (path[i] == '\\' && path[i + 1] == ' ') {
+ ++n;
+ }
+ n += path[i] == ' ';
+ }
+ n += path[i] == ' ';
+ if (!(s = malloc(n + 1))) {
+ return 0;
+ }
+ for (i = 0, j = 0; i < len - 1; ++i, ++j) {
+ if (path[i] == '\\' && path[i + 1] == ' ') {
+ s[j++] = '\\';
+ }
+ if (path[i] == ' ') {
+ s[j++] = '\\';
+ }
+ s[j] = path[i];
+ }
+ if (path[i] == ' ') {
+ s[j++] = '\\';
+ }
+ s[j++] = path[i];
+ s[j] = 0;
+ return s;
+}
+
+char *fb_create_make_path(const char *path)
+{
+ return fb_create_make_path_n(path, strlen(path));
+}
+
+size_t fb_find_basename(const char *path, size_t len)
+{
+ char *p = (char *)path;
+
+ p += len;
+ while(p != path) {
+ --p;
+ if (*p == '/' || *p == '\\') {
+ ++p;
+ break;
+ }
+ }
+ return (size_t)(p - path);
+}
+
+char *fb_create_basename(const char *path, size_t len, const char *ext)
+{
+ size_t pos;
+ char *s;
+
+ pos = fb_find_basename(path, len);
+ path += pos;
+ len -= pos;
+ len = fb_chomp(path, len, ext);
+ if ((s = malloc(len + 1))) {
+ memcpy(s, path, len);
+ s[len] = '\0';
+ }
+ return s;
+}
+
+char *fb_read_file(const char *filename, size_t max_size, size_t *size_out)
+{
+ FILE *fp;
+ long k;
+ size_t size, pos, n, _out;
+ char *buf;
+
+ size_out = size_out ? size_out : &_out;
+
+ fp = fopen(filename, "rb");
+ size = 0;
+ buf = 0;
+
+ if (!fp) {
+ goto fail;
+ }
+ fseek(fp, 0L, SEEK_END);
+ k = ftell(fp);
+ if (k < 0) goto fail;
+ size = (size_t)k;
+ *size_out = size;
+ if (max_size > 0 && size > max_size) {
+ goto fail;
+ }
+ rewind(fp);
+ buf = malloc(size ? size : 1);
+ if (!buf) {
+ goto fail;
+ }
+ pos = 0;
+ while ((n = fread(buf + pos, 1, size - pos, fp))) {
+ pos += n;
+ }
+ if (pos != size) {
+ goto fail;
+ }
+ fclose(fp);
+ *size_out = size;
+ return buf;
+
+fail:
+ if (fp) {
+ fclose(fp);
+ }
+ if (buf) {
+ free(buf);
+ }
+ *size_out = size;
+ return 0;
+}
diff --git a/flatcc/src/compiler/fileio.h b/flatcc/src/compiler/fileio.h
new file mode 100644
index 0000000..5a46f6d
--- /dev/null
+++ b/flatcc/src/compiler/fileio.h
@@ -0,0 +1,86 @@
+#ifndef FILES_H
+#define FILES_H
+
+#include <stdlib.h>
+
+/*
+ * Returns an allocated copy of the path truncated to len if len is
+ * shorter. Free returned string subsequently. Also truncates to less
+ * than len if path contains null characters.
+ */
+char *__flatcc_fb_copy_path_n(const char *path, size_t len);
+#define fb_copy_path_n __flatcc_fb_copy_path_n
+
+/* Returns an allocated copy of path. Free returned string subsequently. */
+char *__flatcc_fb_copy_path(const char *path);
+#define fb_copy_path __flatcc_fb_copy_path
+
+/*
+ * Joins two paths. The prefix can optionally be null.
+ * Free returned string subsequently. If `path_sep` is true, prefix is
+ * separated from suffix with a path separator if not already present.
+ */
+char *__flatcc_fb_create_join_path_n(const char *prefix, size_t prefix_len,
+ const char *suffix, size_t suffix_len, const char *ext, int path_sep);
+#define fb_create_join_path_n __flatcc_fb_create_join_path_n
+
+char *__flatcc_fb_create_join_path(const char *prefix, const char * suffix, const char *ext, int path_sep);
+#define fb_create_join_path __flatcc_fb_create_join_path
+
+/* Adds extension to path in a new copy. */
+char *__flatcc_fb_create_path_ext_n(const char *path, size_t path_len, const char *ext);
+#define fb_create_path_ext_n __flatcc_fb_create_path_ext_n
+
+char *__flatcc_fb_create_path_ext(const char *path, const char *ext);
+#define fb_create_path_ext __flatcc_fb_create_path_ext
+
+/*
+ * Creates a path with spaces escaped in a sort of gcc/Gnu Make
+ * compatible way, primarily for use with dependency files.
+ *
+ * http://clang.llvm.org/doxygen/DependencyFile_8cpp_source.html
+ *
+ * We should escape a backslash only if followed by space.
+ * We should escape a space in all cases.
+ * We ought to handle to #, but don't because gcc fails to do so.
+ *
+ * This is dictated by how clang and gcc generates makefile
+ * dependency rules for gnu make.
+ *
+ * This is not intended for strings used for system calls, but rather
+ * for writing to files where a quoted format is not supported.
+ *
+ */
+char *__flatcc_fb_create_make_path_n(const char *path, size_t path_len);
+#define fb_create_make_path_n __flatcc_fb_create_make_path_n
+
+char *__flatcc_fb_create_make_path(const char *path);
+#define fb_create_make_path __flatcc_fb_create_make_path
+
+/*
+ * Creates a new filename stripped from path prefix and optional ext
+ * suffix. Free returned string subsequently.
+ */
+char *__flatcc_fb_create_basename(const char *path, size_t len, const char *ext);
+#define fb_create_basename __flatcc_fb_create_basename
+
+/* Free returned buffer subsequently. Stores file size in `size_out` arg.
+ * if `max_size` is 0 the file as read regardless of size, otherwise
+ * if the file size exceeds `max_size` then `size_out` is set to the
+ * actual size and null is returend. */
+char *__flatcc_fb_read_file(const char *filename, size_t max_size, size_t *size_out);
+#define fb_read_file __flatcc_fb_read_file
+
+
+/*
+ * Returns offset into source path representing the longest suffix
+ * string with no path separator.
+ */
+size_t __flatcc_fb_find_basename(const char *path, size_t len);
+#define fb_find_basename __flatcc_fb_find_basename
+
+/* Returns input length or length reduced by ext len if ext is a proper suffix. */
+size_t __flatcc_fb_chomp(const char *path, size_t len, const char *ext);
+#define fb_chomp __flatcc_fb_chomp
+
+#endif /* FILES_H */
diff --git a/flatcc/src/compiler/flatcc.c b/flatcc/src/compiler/flatcc.c
new file mode 100644
index 0000000..3111b4c
--- /dev/null
+++ b/flatcc/src/compiler/flatcc.c
@@ -0,0 +1,511 @@
+#include <assert.h>
+#include "config.h"
+#include "parser.h"
+#include "semantics.h"
+#include "fileio.h"
+#include "codegen.h"
+#include "flatcc/flatcc.h"
+
+#define checkfree(s) if (s) { free(s); s = 0; }
+
+void flatcc_init_options(flatcc_options_t *opts)
+{
+ memset(opts, 0, sizeof(*opts));
+
+ opts->max_schema_size = FLATCC_MAX_SCHEMA_SIZE;
+ opts->max_include_depth = FLATCC_MAX_INCLUDE_DEPTH;
+ opts->max_include_count = FLATCC_MAX_INCLUDE_COUNT;
+ opts->allow_boolean_conversion = FLATCC_ALLOW_BOOLEAN_CONVERSION;
+ opts->allow_enum_key = FLATCC_ALLOW_ENUM_KEY;
+ opts->allow_enum_struct_field = FLATCC_ALLOW_ENUM_STRUCT_FIELD;
+ opts->allow_multiple_key_fields = FLATCC_ALLOW_MULTIPLE_KEY_FIELDS;
+ opts->allow_primary_key = FLATCC_ALLOW_PRIMARY_KEY;
+ opts->allow_scan_for_all_fields = FLATCC_ALLOW_SCAN_FOR_ALL_FIELDS;
+ opts->allow_string_key = FLATCC_ALLOW_STRING_KEY;
+ opts->allow_struct_field_deprecate = FLATCC_ALLOW_STRUCT_FIELD_DEPRECATE;
+ opts->allow_struct_field_key = FLATCC_ALLOW_STRUCT_FIELD_KEY;
+ opts->allow_struct_root = FLATCC_ALLOW_STRUCT_ROOT;
+ opts->ascending_enum = FLATCC_ASCENDING_ENUM;
+ opts->hide_later_enum = FLATCC_HIDE_LATER_ENUM;
+ opts->hide_later_struct = FLATCC_HIDE_LATER_STRUCT;
+ opts->offset_size = FLATCC_OFFSET_SIZE;
+ opts->voffset_size = FLATCC_VOFFSET_SIZE;
+ opts->utype_size = FLATCC_UTYPE_SIZE;
+ opts->bool_size = FLATCC_BOOL_SIZE;
+
+ opts->require_root_type = FLATCC_REQUIRE_ROOT_TYPE;
+ opts->strict_enum_init = FLATCC_STRICT_ENUM_INIT;
+ /*
+ * Index 0 is table elem count, and index 1 is table size
+ * so max count is reduced by 2, meaning field id's
+ * must be between 0 and vt_max_count - 1.
+ * Usually, the table is 16-bit, so FLATCC_VOFFSET_SIZE = 2.
+ * Strange expression to avoid shift overflow on 64 bit size.
+ */
+ opts->vt_max_count = ((1LL << (FLATCC_VOFFSET_SIZE * 8 - 1)) - 1) * 2;
+
+ opts->default_schema_ext = FLATCC_DEFAULT_SCHEMA_EXT;
+ opts->default_bin_schema_ext = FLATCC_DEFAULT_BIN_SCHEMA_EXT;
+ opts->default_bin_ext = FLATCC_DEFAULT_BIN_EXT;
+
+ opts->cgen_no_conflicts = FLATCC_CGEN_NO_CONFLICTS;
+
+ opts->cgen_pad = FLATCC_CGEN_PAD;
+ opts->cgen_sort = FLATCC_CGEN_SORT;
+ opts->cgen_pragmas = FLATCC_CGEN_PRAGMAS;
+
+ opts->cgen_common_reader = 0;
+ opts->cgen_common_builder = 0;
+ opts->cgen_reader = 0;
+ opts->cgen_builder = 0;
+ opts->cgen_json_parser = 0;
+ opts->cgen_spacing = FLATCC_CGEN_SPACING;
+
+ opts->bgen_bfbs = FLATCC_BGEN_BFBS;
+ opts->bgen_qualify_names = FLATCC_BGEN_QUALIFY_NAMES;
+ opts->bgen_length_prefix = FLATCC_BGEN_LENGTH_PREFIX;
+}
+
+flatcc_context_t flatcc_create_context(flatcc_options_t *opts, const char *name,
+ flatcc_error_fun error_out, void *error_ctx)
+{
+ fb_parser_t *P;
+
+ if (!(P = malloc(sizeof(*P)))) {
+ return 0;
+ }
+ if (fb_init_parser(P, opts, name, error_out, error_ctx, 0)) {
+ free(P);
+ return 0;
+ }
+ return P;
+}
+
+static flatcc_context_t __flatcc_create_child_context(flatcc_options_t *opts, const char *name,
+ fb_parser_t *P_parent)
+{
+ fb_parser_t *P;
+
+ if (!(P = malloc(sizeof(*P)))) {
+ return 0;
+ }
+ if (fb_init_parser(P, opts, name, P_parent->error_out, P_parent->error_ctx, P_parent->schema.root_schema)) {
+ free(P);
+ return 0;
+ }
+ return P;
+}
+
+/* TODO: handle include files via some sort of buffer read callback
+ * and possible transfer file based parser to this logic. */
+int flatcc_parse_buffer(flatcc_context_t ctx, const char *buf, size_t buflen)
+{
+ fb_parser_t *P = ctx;
+
+ /* Currently includes cannot be handled by buffers, so they should done. */
+ P->opts.disable_includes = 1;
+ if ((size_t)buflen > P->opts.max_schema_size && P->opts.max_schema_size > 0) {
+ fb_print_error(P, "input exceeds maximum allowed size\n");
+ return -1;
+ }
+ /* Add self to set of visible schema. */
+ ptr_set_insert_item(&P->schema.visible_schema, &P->schema, ht_keep);
+ return fb_parse(P, buf, buflen, 0) || fb_build_schema(P) ? -1 : 0;
+}
+
+static void visit_dep(void *context, void *ptr)
+{
+ fb_schema_t *parent = context;
+ fb_schema_t *dep = ptr;
+
+ ptr_set_insert_item(&parent->visible_schema, dep, ht_keep);
+}
+
+static void add_visible_schema(fb_schema_t *parent, fb_schema_t *dep)
+{
+ ptr_set_visit(&dep->visible_schema, visit_dep, parent);
+}
+
+static int __parse_include_file(fb_parser_t *P_parent, const char *filename)
+{
+ flatcc_context_t *ctx = 0;
+ fb_parser_t *P = 0;
+ fb_root_schema_t *rs;
+ flatcc_options_t *opts = &P_parent->opts;
+ fb_schema_t *dep;
+
+ rs = P_parent->schema.root_schema;
+ if (rs->include_depth >= opts->max_include_depth && opts->max_include_depth > 0) {
+ fb_print_error(P_parent, "include nesting level too deep\n");
+ return -1;
+ }
+ if (rs->include_count >= opts->max_include_count && opts->max_include_count > 0) {
+ fb_print_error(P_parent, "include count limit exceeded\n");
+ return -1;
+ }
+ if (!(ctx = __flatcc_create_child_context(opts, filename, P_parent))) {
+ return -1;
+ }
+ P = (fb_parser_t *)ctx;
+ /* Don't parse the same file twice, or any other file with same name. */
+ if ((dep = fb_schema_table_find_item(&rs->include_index, &P->schema))) {
+ add_visible_schema(&P_parent->schema, dep);
+ flatcc_destroy_context(ctx);
+ return 0;
+ }
+ P->dependencies = P_parent->dependencies;
+ P_parent->dependencies = P;
+ P->referer_path = P_parent->path;
+ /* Each parser has a root schema instance, but only the root parsers instance is used. */
+ rs->include_depth++;
+ rs->include_count++;
+ if (flatcc_parse_file(ctx, filename)) {
+ return -1;
+ }
+ add_visible_schema(&P_parent->schema, &P->schema);
+ return 0;
+}
+
+/*
+ * The depends file format is a make rule:
+ *
+ * <outputfile> : <dep1-file> <dep2-file> ...
+ *
+ * like -MMD option for gcc/clang:
+ * lib.o.d generated with content:
+ *
+ * lib.o : header1.h header2.h
+ *
+ * We use a file name <basename>.depends for schema <basename>.fbs with content:
+ *
+ * <basename>_reader.h : <included-schema-1> ...
+ *
+ * The .d extension could mean the D language and we don't have sensible
+ * .o.d name because of multiple outputs, so .depends is better.
+ *
+ * (the above above is subject to the configuration of extensions).
+ *
+ * TODO:
+ * perhaps we should optionally add a dependency to the common reader
+ * and builder files when they are generated separately as they should in
+ * concurrent builds.
+ *
+ * TODO:
+ * 1. we should have a file for every output we produce (_builder.h * etc.)
+ * 2. reader might not even be in the output, e.g. verifier only.
+ * 3. multiple outputs doesn't work with ninja build 1.7.1, so just
+ * use reader for now, and possible add an option for multiple
+ * outputs later.
+ *
+ * http://stackoverflow.com/questions/11855386/using-g-with-mmd-in-makefile-to-automatically-generate-dependencies
+ * https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485
+ *
+ * Spaces in gnu make:
+ * https://www.cmcrossroads.com/article/gnu-make-meets-file-names-spaces-them
+ * See comments on gnu make handling of spaces.
+ * http://clang.llvm.org/doxygen/DependencyFile_8cpp_source.html
+ */
+static int __flatcc_gen_depends_file(fb_parser_t *P)
+{
+ FILE *fp = 0;
+ const char *outpath, *basename;
+ const char *depfile, *deproot, *depext;
+ const char *targetfile, *targetsuffix, *targetroot;
+ char *path = 0, *deppath = 0, *tmppath = 0, *targetpath = 0;
+ int ret = -1;
+
+ /*
+ * The dependencies list is only correct for root files as it is a
+ * linear list. To deal with children, we would have to filter via
+ * the visible schema hash table, but we don't really need that.
+ */
+ assert(P->referer_path == 0);
+
+ outpath = P->opts.outpath ? P->opts.outpath : "";
+ basename = P->schema.basename;
+ targetfile = P->opts.gen_deptarget;
+
+
+ /* The following is mostly considering build tools generating
+ * a depfile as Ninja build would use it. It is a bit strict
+ * on path variations and currenlty doesn't accept multiple
+ * build products in a build rule (Ninja 1.7.1).
+ *
+ * Make depfile relative to cwd so the user can add output if
+ * needed, otherwise it is not possible, or difficult, to use a path given
+ * by a build tool, relative the cwd. If --depfile is not given,
+ * then -d is given or we would not be here. In that case we add an
+ * extension "<basename>.fbs.d" in the outpath.
+ *
+ * A general problem is that the outpath may be a build root dir or
+ * a current subdir for a custom build rule while the dep file
+ * content needs the same path every time, not just an equivalent
+ * path. For dependencies, we can rely on the input schema path.
+ * The input search paths may because confusion but we choose the
+ * discovered path relative to cwd consistently for each schema file
+ * encountered.
+ *
+ * The target file (<target>: <include1.fbs> <include2.fbs> ...)
+ * is tricky because it is not unique - but we can chose <schema>_reader.h
+ * or <schema>.bfbs prefixed with outpath. The user should choose an
+ * outpath relative to cwd or an absolute path depending on what the
+ * build system prefers. This may not be so easy in praxis, but what
+ * can we do?
+ *
+ * It is important to note the default target and the default
+ * depfile name is not just a convenience. Sometimes it is much
+ * simpler to use this version over an explicit path, sometimes
+ * perhaps not so much.
+ */
+
+ if (P->opts.gen_depfile) {
+ depfile = P->opts.gen_depfile;
+ deproot = "";
+ depext = "";
+ } else {
+ depfile = basename;
+ deproot = outpath;
+ depext = FLATCC_DEFAULT_DEP_EXT;
+ }
+ if (targetfile) {
+ targetsuffix = "";
+ targetroot = "";
+ } else {
+ targetsuffix = P->opts.bgen_bfbs
+ ? FLATCC_DEFAULT_BIN_SCHEMA_EXT
+ : FLATCC_DEFAULT_DEP_TARGET_SUFFIX;
+ targetfile = basename;
+ targetroot = outpath;
+ }
+
+ checkmem(path = fb_create_join_path(deproot, depfile, depext, 1));
+
+ checkmem(tmppath = fb_create_join_path(targetroot, targetfile, targetsuffix, 1));
+ /* Handle spaces in dependency file. */
+ checkmem((targetpath = fb_create_make_path(tmppath)));
+ checkfree(tmppath);
+
+ fp = fopen(path, "wb");
+ if (!fp) {
+ fb_print_error(P, "could not open dependency file for output: %s\n", path);
+ goto done;
+ }
+ fprintf(fp, "%s:", targetpath);
+
+ /* Don't depend on root schema. */
+ P = P->dependencies;
+ while (P) {
+ checkmem((deppath = fb_create_make_path(P->path)));
+ fprintf(fp, " %s", deppath);
+ P = P->dependencies;
+ checkfree(deppath);
+ }
+ fprintf(fp, "\n");
+ ret = 0;
+
+done:
+ checkfree(path);
+ checkfree(tmppath);
+ checkfree(targetpath);
+ checkfree(deppath);
+ if (fp) {
+ fclose(fp);
+ }
+ return ret;
+}
+
+int flatcc_parse_file(flatcc_context_t ctx, const char *filename)
+{
+ fb_parser_t *P = ctx;
+ size_t inpath_len, filename_len;
+ char *buf, *path, *include_file;
+ const char *inpath;
+ size_t size;
+ fb_name_t *inc;
+ int i, ret, is_root;
+
+ filename_len = strlen(filename);
+ /* Don't parse the same file twice, or any other file with same basename. */
+ if (fb_schema_table_insert_item(&P->schema.root_schema->include_index, &P->schema, ht_keep)) {
+ return 0;
+ }
+ buf = 0;
+ path = 0;
+ include_file = 0;
+ ret = -1;
+ is_root = !P->referer_path;
+
+ /*
+ * For root files, read file relative to working dir first. For
+ * included files (`referer_path` set), first try include paths
+ * in order, then path relative to including file.
+ */
+ if (is_root) {
+ if (!(buf = fb_read_file(filename, P->opts.max_schema_size, &size))) {
+ if (size + P->schema.root_schema->total_source_size > P->opts.max_schema_size && P->opts.max_schema_size > 0) {
+ fb_print_error(P, "input exceeds maximum allowed size\n");
+ goto done;
+ }
+ } else {
+ checkmem((path = fb_copy_path(filename)));
+ }
+ }
+ for (i = 0; !buf && i < P->opts.inpath_count; ++i) {
+ inpath = P->opts.inpaths[i];
+ inpath_len = strlen(inpath);
+ checkmem((path = fb_create_join_path_n(inpath, inpath_len, filename, filename_len, "", 1)));
+ if (!(buf = fb_read_file(path, P->opts.max_schema_size, &size))) {
+ free(path);
+ path = 0;
+ if (size > P->opts.max_schema_size && P->opts.max_schema_size > 0) {
+ fb_print_error(P, "input exceeds maximum allowed size\n");
+ goto done;
+ }
+ }
+ }
+ if (!buf && !is_root) {
+ inpath = P->referer_path;
+ inpath_len = fb_find_basename(inpath, strlen(inpath));
+ checkmem((path = fb_create_join_path_n(inpath, inpath_len, filename, filename_len, "", 1)));
+ if (!(buf = fb_read_file(path, P->opts.max_schema_size, &size))) {
+ free(path);
+ path = 0;
+ if (size > P->opts.max_schema_size && P->opts.max_schema_size > 0) {
+ fb_print_error(P, "input exceeds maximum allowed size\n");
+ goto done;
+ }
+ }
+ }
+ if (!buf) {
+ fb_print_error(P, "error reading included schema file: %s\n", filename);
+ goto done;
+ }
+ P->schema.root_schema->total_source_size += size;
+ P->path = path;
+ /* Parser owns path. */
+ path = 0;
+ /*
+ * Even if we do not have the recursive option set, we still
+ * need to parse all include files to make sense of the current
+ * file.
+ */
+ if (!fb_parse(P, buf, size, 1)) {
+ /* Parser owns buffer. */
+ buf = 0;
+ inc = P->schema.includes;
+ while (inc) {
+ checkmem((include_file = fb_copy_path_n(inc->name.s.s, (size_t)inc->name.s.len)));
+ if (__parse_include_file(P, include_file)) {
+ goto done;
+ }
+ free(include_file);
+ include_file = 0;
+ inc = inc->link;
+ }
+ /* Add self to set of visible schema. */
+ ptr_set_insert_item(&P->schema.visible_schema, &P->schema, ht_keep);
+ if (fb_build_schema(P)) {
+ goto done;
+ }
+ /*
+ * We choose to only generate optional .depends files for root level
+ * files. These will contain all nested files regardless of
+ * recursive file generation flags.
+ */
+ if (P->opts.gen_dep && is_root) {
+ if (__flatcc_gen_depends_file(P)) {
+ goto done;
+ }
+ }
+ ret = 0;
+ }
+
+done:
+ /* Parser owns buffer so don't free it here. */
+ checkfree(path);
+ checkfree(include_file);
+ return ret;
+}
+
+#if FLATCC_REFLECTION
+int flatcc_generate_binary_schema_to_buffer(flatcc_context_t ctx, void *buf, size_t bufsiz)
+{
+ fb_parser_t *P = ctx;
+
+ if (fb_codegen_bfbs_to_buffer(&P->opts, &P->schema, buf, &bufsiz)) {
+ return (int)bufsiz;
+ }
+ return -1;
+}
+
+void *flatcc_generate_binary_schema(flatcc_context_t ctx, size_t *size)
+{
+ fb_parser_t *P = ctx;
+
+ return fb_codegen_bfbs_alloc_buffer(&P->opts, &P->schema, size);
+}
+#endif
+
+int flatcc_generate_files(flatcc_context_t ctx)
+{
+ fb_parser_t *P = ctx, *P_leaf;
+ fb_output_t *out, output;
+ int ret = 0;
+ out = &output;
+
+ if (!P || P->failed) {
+ return -1;
+ }
+ P_leaf = 0;
+ while (P) {
+ P->inverse_dependencies = P_leaf;
+ P_leaf = P;
+ P = P->dependencies;
+ }
+ P = ctx;
+#if FLATCC_REFLECTION
+ if (P->opts.bgen_bfbs) {
+ if (fb_codegen_bfbs_to_file(&P->opts, &P->schema)) {
+ return -1;
+ }
+ }
+#endif
+
+ if (fb_init_output_c(out, &P->opts)) {
+ return -1;
+ }
+ /* This does not require a parse first. */
+ if (!P->opts.gen_append && (ret = fb_codegen_common_c(out))) {
+ goto done;
+ }
+ /* If no file parsed - just common files if at all. */
+ if (!P->has_schema) {
+ goto done;
+ }
+ if (!P->opts.cgen_recursive) {
+ ret = fb_codegen_c(out, &P->schema);
+ goto done;
+ }
+ /* Make sure stdout and outfile output is generated in the right order. */
+ P = P_leaf;
+ while (!ret && P) {
+ ret = P->failed || fb_codegen_c(out, &P->schema);
+ P = P->inverse_dependencies;
+ }
+done:
+ fb_end_output_c(out);
+ return ret;
+}
+
+void flatcc_destroy_context(flatcc_context_t ctx)
+{
+ fb_parser_t *P = ctx, *dep = 0;
+
+ while (P) {
+ dep = P->dependencies;
+ fb_clear_parser(P);
+ free(P);
+ P = dep;
+ }
+}
diff --git a/flatcc/src/compiler/hash_tables/README.txt b/flatcc/src/compiler/hash_tables/README.txt
new file mode 100644
index 0000000..dc71a59
--- /dev/null
+++ b/flatcc/src/compiler/hash_tables/README.txt
@@ -0,0 +1,2 @@
+Each generic hashtable type requires an often small independent
+compilation unit so we keep these here.
diff --git a/flatcc/src/compiler/hash_tables/name_table.c b/flatcc/src/compiler/hash_tables/name_table.c
new file mode 100644
index 0000000..ec0f7c2
--- /dev/null
+++ b/flatcc/src/compiler/hash_tables/name_table.c
@@ -0,0 +1,21 @@
+ /* Note: only one hash table can be implemented a single file. */
+#include "../symbols.h"
+#include "hash/hash_table_def.h"
+DEFINE_HASH_TABLE(fb_name_table)
+
+#include "hash/hash_table_impl.h"
+
+static inline int ht_match(const void *key, size_t len, fb_name_t *name)
+{
+ return len == (size_t)name->name.s.len && memcmp(key, name->name.s.s, len) == 0;
+}
+
+static inline const void *ht_key(fb_name_t *name)
+{
+ return name->name.s.s;
+}
+
+static inline size_t ht_key_len(fb_name_t *name)
+{
+ return (size_t)name->name.s.len;
+}
diff --git a/flatcc/src/compiler/hash_tables/schema_table.c b/flatcc/src/compiler/hash_tables/schema_table.c
new file mode 100644
index 0000000..2a7e322
--- /dev/null
+++ b/flatcc/src/compiler/hash_tables/schema_table.c
@@ -0,0 +1,21 @@
+ /* Note: only one hash table can be implemented a single file. */
+#include "../symbols.h"
+#include "hash/hash_table_def.h"
+DEFINE_HASH_TABLE(fb_schema_table)
+
+#include "hash/hash_table_impl.h"
+
+static inline int ht_match(const void *key, size_t len, fb_schema_t *schema)
+{
+ return len == (size_t)schema->name.name.s.len && memcmp(key, schema->name.name.s.s, len) == 0;
+}
+
+static inline const void *ht_key(fb_schema_t *schema)
+{
+ return schema->name.name.s.s;
+}
+
+static inline size_t ht_key_len(fb_schema_t *schema)
+{
+ return (size_t)schema->name.name.s.len;
+}
diff --git a/flatcc/src/compiler/hash_tables/scope_table.c b/flatcc/src/compiler/hash_tables/scope_table.c
new file mode 100644
index 0000000..7a7df3b
--- /dev/null
+++ b/flatcc/src/compiler/hash_tables/scope_table.c
@@ -0,0 +1,177 @@
+ /* Note: only one hash table can be implemented a single file. */
+
+
+/*
+ * The generic hash table is designed to make the key length optional
+ * and we do not need it because our key is a terminated token list.
+ *
+ * The token list avoids having to allocated a new string and the
+ * associated issues of memory management. In most cases the search key
+ * is also a similar token list.
+ *
+ * However, on occasion we need to look up an unparsed string of dot
+ * separated scopes (nested_flatbuffer attributes). This is not
+ * trivially possible without reverting to allocating the strings.
+ * We could parse the attribute into tokens but it is also non-trivial
+ * because the token buffer breaks pointers when reallocating and
+ * the parse output is considered read-only at this point.
+ *
+ * We can however, use a trick to overcome this because the hash table
+ * does not enforce that the search key has same representation as the
+ * stored key. We can use the key length to switch between key types.
+ *
+ * When the key is paresed to a token list:
+ *
+ * enemy: MyGame . Example.Monster
+ *
+ * the spaces in dots may be ignored by the parser.
+ * Spaces must be handled explicitly or disallowed when the key is
+ * parsed as an attribute string (only the quoted content):
+ *
+ * (nested_flatbuffer:"MyGame.Example.Monster")
+ *
+ * vs
+ *
+ * (nested_flatbuffer:"MyGame . Example.Monster")
+ *
+ * Googles flatc allows spaces in the token stream where dots are
+ * operators, but not in attribute strings which are supposed to
+ * be unique so we follow that convention.
+ *
+ * On both key representations, preprocessing must strip the trailing
+ * symbol stored within the scope before lookup - minding that this
+ * lookup only finds the scope itself. For token lists this can be
+ * done by either zero terminating the list early, or by issuing
+ * a negative length (after cast to int) of elements to consider. For
+ * string keys the key length should be made to the length to be
+ * considered.
+ *
+ * If the scope string is zero length, a null key should be issued
+ * with zero length. This is indistinguishly from a null length token
+ * list - both indicating a global scope - null thus being a valid key.
+ *
+ * Note: it is important to not use a non-null zero length string
+ * as key.
+ */
+
+#include "../symbols.h"
+
+static inline size_t scope_hash(const void *s, size_t len);
+#define HT_HASH_FUNCTION scope_hash
+
+#include "hash/hash_table_def.h"
+DEFINE_HASH_TABLE(fb_scope_table)
+#include "hash/hash_table_impl.h"
+
+/* Null is a valid key used for root scopes. */
+static inline int ht_match(const void *key, size_t len, fb_scope_t *scope)
+{
+ const fb_ref_t *name = scope->name;
+ int count = (int)len;
+ size_t n1, n2, i;
+
+ /* Note: `name` may be null here - this is the global scope name. */
+ if (count <= 0) {
+ const fb_ref_t *keyname = key;
+ /*
+ * If count is negative, this is the token count of the key
+ * which may have suffix to be ignored, otherwise the key is the
+ * full list.
+ */
+ /* `key` is a ref list (a list of tokens). */
+ while (name && keyname) {
+ n1 = (size_t)name->ident->len;
+ n2 = (size_t)keyname->ident->len;
+ if (n1 != n2 || strncmp(name->ident->text, keyname->ident->text, n1)) {
+ return 0;
+ }
+ name = name->link;
+ keyname = keyname->link;
+ if (++count == 0) {
+ return name == 0;
+ }
+ }
+ if (name || keyname) {
+ return 0;
+ }
+ return 1;
+ } else {
+ /* `key` is a dotted string. */
+ const char *s1, *s2 = key;
+ while (name) {
+ s1 = name->ident->text;
+ n1 = (size_t)name->ident->len;
+ if (n1 > len) {
+ return 0;
+ }
+ for (i = 0; i < n1; ++i) {
+ if (s1[i] != s2[i]) {
+ return 0;
+ }
+ }
+ if (n1 == len) {
+ return name->link == 0;
+ }
+ if (s2[i] != '.') {
+ return 0;
+ }
+ len -= n1 + 1;
+ s2 += n1 + 1;
+ name = name->link;
+ }
+ return 0;
+ }
+}
+
+static inline const void *ht_key(fb_scope_t *scope)
+{
+ return scope->name;
+}
+
+static inline size_t ht_key_len(fb_scope_t *scope)
+{
+ (void)scope;
+ /*
+ * Must be zero because the result is passed to ht_match
+ * when comparing two stored items for hash conflicts.
+ * Only external lookup keys can be non-zero.
+ */
+ return 0;
+}
+
+static inline size_t scope_hash(const void *key, size_t len)
+{
+ size_t h = 0, i;
+ int count = (int)len;
+
+ if (count <= 0) {
+ const fb_ref_t *name = key;
+
+ while (name) {
+ h ^= ht_strn_hash_function(name->ident->text, (size_t)name->ident->len);
+ h = ht_int_hash_function((void *)h, 0);
+ name = name->link;
+ if (++count == 0) {
+ break;
+ }
+ }
+ return h;
+ } else {
+ const char *s = key;
+ for (;;) {
+ for (i = 0; i < len; ++i) {
+ if (s[i] == '.') {
+ break;
+ }
+ }
+ h ^= ht_strn_hash_function(s, i);
+ h = ht_int_hash_function((void *)h, 0);
+ if (i == len) {
+ break;
+ }
+ len -= i + 1;
+ s += i + 1;
+ }
+ return h;
+ }
+}
diff --git a/flatcc/src/compiler/hash_tables/symbol_table.c b/flatcc/src/compiler/hash_tables/symbol_table.c
new file mode 100644
index 0000000..bc13d8a
--- /dev/null
+++ b/flatcc/src/compiler/hash_tables/symbol_table.c
@@ -0,0 +1,22 @@
+ /* Note: only one hash table can be implemented a single file. */
+#include "../symbols.h"
+#include "hash/hash_table_def.h"
+DEFINE_HASH_TABLE(fb_symbol_table)
+#include "hash/hash_table_impl.h"
+
+static inline int ht_match(const void *key, size_t len, fb_symbol_t *sym)
+{
+ return len == ht_key_len(sym) && memcmp(key, ht_key(sym), len) == 0;
+}
+
+static inline const void *ht_key(fb_symbol_t *sym)
+{
+ return sym->ident->text;
+}
+
+static inline size_t ht_key_len(fb_symbol_t *sym)
+{
+ fb_token_t *ident = sym->ident;
+
+ return (size_t)ident->len;
+}
diff --git a/flatcc/src/compiler/hash_tables/value_set.c b/flatcc/src/compiler/hash_tables/value_set.c
new file mode 100644
index 0000000..d623c36
--- /dev/null
+++ b/flatcc/src/compiler/hash_tables/value_set.c
@@ -0,0 +1,60 @@
+ /* Note: only one hash table can be implemented a single file. */
+#include "../symbols.h"
+#include "hash/ht_hash_function.h"
+
+static size_t value_hash_function(const void *key, size_t key_len)
+{
+ const fb_value_t *value = key;
+
+ (void)key_len;
+
+ switch (value->type) {
+ case vt_int:
+ return ht_int_hash_function((void *)(size_t)(value->i ^ value->type), sizeof(value->i));
+ case vt_uint:
+ return ht_int_hash_function((void *)(size_t)(value->u ^ value->type), sizeof(value->u));
+ case vt_bool:
+ return ht_int_hash_function((void *)(size_t)(value->b ^ value->type), sizeof(value->b));
+ default:
+ return 0;
+ }
+}
+
+#define HT_HASH_FUNCTION value_hash_function
+
+#include "hash/hash_table_def.h"
+DEFINE_HASH_TABLE(fb_value_set)
+#include "hash/hash_table_impl.h"
+
+static inline int ht_match(const void *key, size_t len, fb_value_t *item)
+{
+ const fb_value_t *value = key;
+
+ (void)len;
+
+ if (value->type != item->type) {
+ return 0;
+ }
+ switch (value->type) {
+ case vt_int:
+ return value->i == item->i;
+ case vt_uint:
+ return value->u == item->u;
+ case vt_bool:
+ return value->b == item->b;
+ default:
+ return 0;
+ }
+}
+
+static inline const void *ht_key(fb_value_t *value)
+{
+ return value;
+}
+
+static inline size_t ht_key_len(fb_value_t *value)
+{
+ (void)value;
+
+ return 0;
+}
diff --git a/flatcc/src/compiler/keywords.h b/flatcc/src/compiler/keywords.h
new file mode 100644
index 0000000..51e0ae8
--- /dev/null
+++ b/flatcc/src/compiler/keywords.h
@@ -0,0 +1,56 @@
+/*
+ * FlatBuffers keyword table
+ *
+ * See luthor project test files for more details on keyword table
+ * syntax.
+ *
+ * In brief: Keywords are assigned a hash key that is easy
+ * for the lexer to test.
+ *
+ * The first char is length of keyword, two next chars are the leading
+ * to characters of the keyword, and the last char is the last char of
+ * the keyword. For keywords longer than 9 add length to '0' in the
+ * first character. For keywords shorter than 3 characters, see luthor
+ * project - we don't need it. The keywords should be sorted.
+ */
+
+LEX_KW_TABLE_BEGIN
+ lex_kw(int, '3', 'i', 'n', 't')
+ lex_kw(bool, '4', 'b', 'o', 'l')
+ lex_kw(byte, '4', 'b', 'y', 'e')
+ lex_kw(char, '4', 'c', 'h', 'r')
+ lex_kw(enum, '4', 'e', 'n', 'm')
+ lex_kw(int8, '4', 'i', 'n', '8')
+ lex_kw(long, '4', 'l', 'o', 'g')
+ lex_kw(null, '4', 'n', 'u', 'l')
+ lex_kw(true, '4', 't', 'r', 'e')
+ lex_kw(uint, '4', 'u', 'i', 't')
+ lex_kw(false, '5', 'f', 'a', 'e')
+ lex_kw(float, '5', 'f', 'l', 't')
+ lex_kw(int32, '5', 'i', 'n', '2')
+ lex_kw(int16, '5', 'i', 'n', '6')
+ lex_kw(int64, '5', 'i', 'n', '4')
+ lex_kw(table, '5', 't', 'a', 'e')
+ lex_kw(ubyte, '5', 'u', 'b', 'e')
+ lex_kw(uint8, '5', 'u', 'i', '8')
+ lex_kw(ulong, '5', 'u', 'l', 'g')
+ lex_kw(union, '5', 'u', 'n', 'n')
+ lex_kw(short, '5', 's', 'h', 't')
+ lex_kw(double, '6', 'd', 'o', 'e')
+ lex_kw(string, '6', 's', 't', 'g')
+ lex_kw(struct, '6', 's', 't', 't')
+ lex_kw(uint32, '6', 'u', 'i', '2')
+ lex_kw(uint16, '6', 'u', 'i', '6')
+ lex_kw(uint64, '6', 'u', 'i', '4')
+ lex_kw(ushort, '6', 'u', 's', 't')
+ lex_kw(float32, '7', 'f', 'l', '2')
+ lex_kw(float64, '7', 'f', 'l', '4')
+ lex_kw(include, '7', 'i', 'n', 'e')
+ lex_kw(attribute, '9', 'a', 't', 'e')
+ lex_kw(namespace, '9', 'n', 'a', 'e')
+ lex_kw(root_type, '9', 'r', 'o', 'e')
+ lex_kw(rpc_service, '0' + 11, 'r', 'p', 'e')
+ lex_kw(file_extension, '0' + 14, 'f', 'i', 'n')
+ lex_kw(file_identifier, '0' + 15, 'f', 'i', 'r')
+LEX_KW_TABLE_END
+
diff --git a/flatcc/src/compiler/parser.c b/flatcc/src/compiler/parser.c
new file mode 100644
index 0000000..4f31e0b
--- /dev/null
+++ b/flatcc/src/compiler/parser.c
@@ -0,0 +1,1550 @@
+/*
+ * FlatBuffers IDL parser.
+ *
+ * Originally based on the numeric parser in the Luthor lexer project.
+ *
+ * We are moving away from TDOP approach because the grammer doesn't
+ * really benefit from it. We use the same overall framework.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <stdarg.h>
+
+#include "semantics.h"
+#include "codegen.h"
+#include "fileio.h"
+#include "pstrutil.h"
+#include "flatcc/portable/pparseint.h"
+
+void fb_default_error_out(void *err_ctx, const char *buf, size_t len)
+{
+ (void)err_ctx;
+
+ fwrite(buf, 1, len, stderr);
+}
+
+int fb_print_error(fb_parser_t *P, const char * format, ...)
+{
+ int n;
+ va_list ap;
+ char buf[ERROR_BUFSIZ];
+
+ va_start (ap, format);
+ n = vsnprintf (buf, ERROR_BUFSIZ, format, ap);
+ va_end (ap);
+ if (n >= ERROR_BUFSIZ) {
+ strcpy(buf + ERROR_BUFSIZ - 5, "...\n");
+ n = ERROR_BUFSIZ - 1;
+ }
+ P->error_out(P->error_ctx, buf, (size_t)n);
+ return n;
+}
+
+const char *error_find_file_of_token(fb_parser_t *P, fb_token_t *t)
+{
+ /*
+ * Search token in dependent buffers if not in current token
+ * buffer. We can do this as a linear search because we limit the
+ * number of output errors.
+ */
+ while (P) {
+ if (P->ts <= t && P->te > t) {
+ return P->schema.errorname;
+ }
+ P = P->dependencies;
+ }
+ return "";
+}
+
+void error_report(fb_parser_t *P, fb_token_t *t, const char *msg, fb_token_t *peer, const char *s, size_t len)
+{
+ const char *file, *peer_file;
+
+ if (t && !s) {
+ s = t->text;
+ len = (size_t)t->len;
+ }
+ if (!msg) {
+ msg = "";
+ }
+ if (!s) {
+ s = "";
+ len = 0;
+ }
+ if (t && !peer) {
+ file = error_find_file_of_token(P, t);
+ fb_print_error(P, "%s:%ld:%ld: error: '%.*s': %s\n",
+ file, (long)t->linenum, (long)t->pos, len, s, msg);
+ } else if (t && peer) {
+ file = error_find_file_of_token(P, t);
+ peer_file = error_find_file_of_token(P, peer);
+ fb_print_error(P, "%s:%ld:%ld: error: '%.*s': %s: %s:%ld:%ld: '%.*s'\n",
+ file, (long)t->linenum, (long)t->pos, len, s, msg,
+ peer_file, (long)peer->linenum, (long)peer->pos, (int)peer->len, peer->text);
+ } else if (!t && !peer) {
+ fb_print_error(P, "error: %s\n", msg);
+ } else if (peer) {
+ peer_file = error_find_file_of_token(P, peer);
+ fb_print_error(P, "error: %s: %s:%ld:%ld: '%.*s'\n",
+ msg,
+ peer_file, (long)peer->linenum, (long)peer->pos, (int)peer->len, peer->text);
+ } else {
+ fb_print_error(P, "internal error: unexpected state\n");
+ }
+ ++P->failed;
+}
+
+void error_ref_sym(fb_parser_t *P, fb_ref_t *ref, const char *msg, fb_symbol_t *s2)
+{
+ fb_ref_t *p;
+ char buf[FLATCC_MAX_IDENT_SHOW + 1];
+ size_t k = FLATCC_MAX_IDENT_SHOW;
+ size_t n = 0;
+ size_t n0 = 0;
+ int truncated = 0;
+
+ p = ref;
+ while (p && k > 0) {
+ if (n0 > 0) {
+ buf[n0] = '.';
+ --k;
+ ++n0;
+ }
+ n = (size_t)p->ident->len;
+ if (k < n) {
+ n = k;
+ truncated = 1;
+ }
+ memcpy(buf + n0, p->ident->text, n);
+ k -= n;
+ n0 += n;
+ p = p->link;
+ }
+ if (p) truncated = 1;
+ buf[n0] = '\0';
+ if (n0 > 0) {
+ --n0;
+ }
+ if (truncated) {
+ memcpy(buf + FLATCC_MAX_IDENT_SHOW + 1 - 4, "...\0", 4);
+ n0 = FLATCC_MAX_IDENT_SHOW;
+ }
+ error_report(P, ref->ident, msg, s2 ? s2->ident : 0, buf, n0);
+}
+
+//#define LEX_DEBUG
+
+/* Flatbuffers reserve keywords. */
+#define LEX_KEYWORDS
+
+#define LEX_C_BLOCK_COMMENT
+/*
+ * Flatbuffers also support /// on a single line for documentation but
+ * we can handle that within the normal line comment parsing logic.
+ */
+#define LEX_C99_LINE_COMMENT
+/*
+ * String escapes are not defined in fb schema but it only uses strings
+ * for attribute, namespace, file ext, and file id. For JSON objects we
+ * use C string escapes but control characters must be detected.
+ */
+#define LEX_C_STRING
+
+/* Accept numbers like -0x42 as integer literals. */
+#define LEX_HEX_NUMERIC
+
+#define lex_isblank(c) ((c) == ' ' || (c) == '\t')
+
+#include "parser.h"
+
+#ifdef LEX_DEBUG
+
+static void print_token(fb_token_t *t)
+{
+ lex_fprint_token(stderr, t->id, t->text, t->text + t->len, t->linenum, t->pos);
+}
+
+static void debug_token(const char *info, fb_token_t *t)
+{
+ fprintf(stderr, "%s\n ", info);
+ print_token(t);
+}
+#else
+#define debug_token(info, t) ((void)0)
+#endif
+
+static void revert_metadata(fb_metadata_t **list)
+{
+ REVERT_LIST(fb_metadata_t, link, list);
+}
+
+static void revert_symbols(fb_symbol_t **list)
+{
+ REVERT_LIST(fb_symbol_t, link, list);
+}
+
+static void revert_names(fb_name_t **list)
+{
+ REVERT_LIST(fb_name_t, link, list);
+}
+
+static inline fb_doc_t *fb_add_doc(fb_parser_t *P, fb_token_t *t)
+{
+ fb_doc_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->ident = t;
+ p->link = P->doc;
+ P->doc = p;
+ return p;
+}
+
+#define fb_assign_doc(P, p) {\
+ revert_symbols(&P->doc); p->doc = P->doc; P->doc = 0; }
+
+static inline fb_compound_type_t *fb_add_table(fb_parser_t *P)
+{
+ fb_compound_type_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = P->schema.symbols;
+ p->symbol.kind = fb_is_table;
+ P->schema.symbols = &p->symbol;
+ p->scope = P->current_scope;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline fb_compound_type_t *fb_add_struct(fb_parser_t *P)
+{
+ fb_compound_type_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = P->schema.symbols;
+ p->symbol.kind = fb_is_struct;
+ P->schema.symbols = &p->symbol;
+ p->scope = P->current_scope;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline fb_compound_type_t *fb_add_rpc_service(fb_parser_t *P)
+{
+ fb_compound_type_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = P->schema.symbols;
+ p->symbol.kind = fb_is_rpc_service;
+ P->schema.symbols = &p->symbol;
+ p->scope = P->current_scope;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline fb_compound_type_t *fb_add_enum(fb_parser_t *P)
+{
+ fb_compound_type_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = P->schema.symbols;
+ p->symbol.kind = fb_is_enum;
+ P->schema.symbols = &p->symbol;
+ p->scope = P->current_scope;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline fb_compound_type_t *fb_add_union(fb_parser_t *P)
+{
+ fb_compound_type_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = P->schema.symbols;
+ p->symbol.kind = fb_is_union;
+ P->schema.symbols = &p->symbol;
+ p->scope = P->current_scope;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline fb_ref_t *fb_add_ref(fb_parser_t *P, fb_token_t *t)
+{
+ fb_ref_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->ident = t;
+ return p;
+}
+
+static inline fb_attribute_t *fb_add_attribute(fb_parser_t *P)
+{
+ fb_attribute_t *p;
+
+ p = new_elem(P, sizeof(*p));
+ p->name.link = P->schema.attributes;
+ P->schema.attributes = &p->name;
+ return p;
+}
+
+static inline fb_include_t *fb_add_include(fb_parser_t *P)
+{
+ fb_include_t *p;
+ p = new_elem(P, sizeof(*p));
+ p->link = P->schema.includes;
+ return P->schema.includes = p;
+}
+
+static inline fb_scope_t *fb_add_scope(fb_parser_t *P, fb_ref_t *name)
+{
+ fb_scope_t *p;
+
+ p = fb_scope_table_find(&P->schema.root_schema->scope_index, name, 0);
+ if (p) {
+ return p;
+ }
+ p = new_elem(P, sizeof(*p));
+ p->name = name;
+ p->prefix = P->schema.prefix;
+
+ fb_scope_table_insert_item(&P->schema.root_schema->scope_index, p, ht_keep);
+ return p;
+}
+
+static inline fb_metadata_t *fb_add_metadata(fb_parser_t *P, fb_metadata_t **metadata)
+{
+ fb_metadata_t *p;
+ p = new_elem(P, sizeof(*p));
+ p->link = *metadata;
+ return *metadata = p;
+}
+
+static inline fb_member_t *fb_add_member(fb_parser_t *P, fb_symbol_t **members)
+{
+ fb_member_t *p;
+ p = new_elem(P, sizeof(*p));
+ p->symbol.link = *members;
+ p->symbol.kind = fb_is_member;
+ *members = (fb_symbol_t *)p;
+ fb_assign_doc(P, p);
+ return p;
+}
+
+static inline int is_end(fb_token_t *t)
+{
+ return t->id == LEX_TOK_EOF;
+}
+
+static fb_token_t *next(fb_parser_t *P)
+{
+again:
+ ++P->token;
+ if (P->token == P->te) {
+ /* We keep returning end of token to help binary operators etc., if any. */
+ --P->token;
+ assert(0);
+ switch (P->token->id) {
+ case LEX_TOK_EOS: case LEX_TOK_EOB: case LEX_TOK_EOF:
+ P->token->id = LEX_TOK_EOF;
+ return P->token;
+ }
+ error_tok(P, P->token, "unexpected end of input");
+ }
+ if (P->token->id == tok_kw_doc_comment) {
+ /* Note: we can have blanks that are control characters here, such as \t. */
+ fb_add_doc(P, P->token);
+ goto again;
+ }
+ debug_token("next", P->token);
+ return P->token;
+}
+
+static void recover(fb_parser_t *P, long token_id, int consume)
+{
+ while (!is_end(P->token)) {
+ if (P->token->id == token_id) {
+ if (consume) {
+ next(P);
+ }
+ P->doc = 0;
+ return;
+ }
+ next(P);
+ }
+}
+
+static void recover2(fb_parser_t *P, long token_id, int consume, long token_id_2, int consume_2)
+{
+ while (!is_end(P->token)) {
+ if (P->token->id == token_id) {
+ if (consume) {
+ next(P);
+ }
+ P->doc = 0;
+ return;
+ }
+ if (P->token->id == token_id_2) {
+ if (consume_2) {
+ next(P);
+ }
+ P->doc = 0;
+ return;
+ }
+ next(P);
+ }
+}
+
+static inline fb_token_t *optional(fb_parser_t *P, long id) {
+ fb_token_t *t = 0;
+ if (P->token->id == id) {
+ t = P->token;
+ next(P);
+ }
+ return t;
+}
+
+static inline fb_token_t *match(fb_parser_t *P, long id, char *msg) {
+ fb_token_t *t = 0;
+ if (P->token->id == id) {
+ t = P->token;
+ next(P);
+ } else {
+ error_tok(P, P->token, msg);
+ }
+ return t;
+}
+
+/*
+ * When a keyword should also be accepted as an identifier.
+ * This is useful for JSON where field naems are visible.
+ * Since field names are not referenced within the schema,
+ * this is generally safe. Enums can also be resererved but
+ * they can then not be used as default values. Table names
+ * and other type names should not be remapped as they can then
+ * not by used as a type name for other fields.
+ */
+#if FLATCC_ALLOW_KW_FIELDS
+static inline void remap_field_ident(fb_parser_t *P)
+{
+ if (P->token->id >= LEX_TOK_KW_BASE && P->token->id < LEX_TOK_KW_END) {
+ P->token->id = LEX_TOK_ID;
+ }
+}
+#else
+static inline void remap_field_ident(fb_parser_t *P) { (void)P; }
+#endif
+
+#if FLATCC_ALLOW_KW_ENUMS
+static inline void remap_enum_ident(fb_parser_t *P)
+{
+ if (P->token->id >= LEX_TOK_KW_BASE && P->token->id < LEX_TOK_KW_END) {
+ P->token->id = LEX_TOK_ID;
+ }
+}
+#else
+static inline void remap_enum_ident(fb_parser_t *P) { (void)P; }
+#endif
+
+static fb_token_t *advance(fb_parser_t *P, long id, const char *msg, fb_token_t *peer)
+{
+ /*
+ * `advance` is generally used at end of statements so it is a
+ * convenient place to get rid of rogue doc comments we can't attach
+ * to anything meaningful.
+ */
+ P->doc = 0;
+ if (P->token->id != id) {
+ error_tok_2(P, P->token, msg, peer);
+ return P->token;
+ }
+ return next(P);
+}
+
+static void read_integer_value(fb_parser_t *P, fb_token_t *t, fb_value_t *v, int sign)
+{
+ int status;
+
+ v->type = vt_uint;
+ /* The token does not store the sign internally. */
+ parse_integer(t->text, (size_t)t->len, &v->u, &status);
+ if (status != PARSE_INTEGER_UNSIGNED) {
+ v->type = vt_invalid;
+ error_tok(P, t, "invalid integer format");
+ }
+ if (sign) {
+ v->i = -(int64_t)v->u;
+ v->type = vt_int;
+#ifdef FLATCC_FAIL_ON_INT_SIGN_OVERFLOW
+ /* Sometimes we might want this, so don't fail by default. */
+ if (v->i > 0) {
+ v->type = vt_invalid;
+ error_tok(P, t, "sign overflow in integer format");
+ }
+#endif
+ }
+}
+
+static void read_hex_value(fb_parser_t *P, fb_token_t *t, fb_value_t *v, int sign)
+{
+ int status;
+
+ v->type = vt_uint;
+ /* The token does not store the sign internally. */
+ parse_hex_integer(t->text, (size_t)t->len, &v->u, &status);
+ if (status != PARSE_INTEGER_UNSIGNED) {
+ v->type = vt_invalid;
+ error_tok(P, t, "invalid hex integer format");
+ }
+ if (sign) {
+ v->i = -(int64_t)v->u;
+ v->type = vt_int;
+#ifdef FLATCC_FAIL_ON_INT_SIGN_OVERFLOW
+ /* Sometimes we might want this, so don't fail by default. */
+ if (v->i > 0) {
+ v->type = vt_invalid;
+ error_tok(P, t, "sign overflow in hex integer format");
+ }
+#endif
+ }
+}
+
+static void read_float_value(fb_parser_t *P, fb_token_t *t, fb_value_t *v, int sign)
+{
+ char *end;
+
+ v->type = vt_float;
+ v->f = strtod(t->text, &end);
+ if (end != t->text + t->len) {
+ v->type = vt_invalid;
+ error_tok(P, t, "invalid float format");
+ } else if (t->text[0] == '.') {
+ v->type = vt_invalid;
+ /* The FB spec requires this, in line with the JSON format. */
+ error_tok(P, t, "numeric values must start with a digit");
+ } else if (sign) {
+ v->f = -v->f;
+ }
+}
+
+/*
+ * We disallow escape characters, newlines and other control characters,
+ * but especially escape characters because they would require us to
+ * reallocate the string and convert the escaped characters. We also
+ * disallow non-utf8 characters, but we do not check for it. The tab
+ * character could meaningfully be accepted, but we don't.
+ *
+ * String literals are only used to name attributes, namespaces,
+ * file identifiers and file externsions, so we really have no need
+ * for these extra featuresescape .
+ *
+ * JSON strings should be handled separately, if or when supported -
+ * either by converting escapes and reallocating the string, or
+ * simply by ignoring the escape errors and use the string unmodified.
+ */
+static void parse_string_literal(fb_parser_t *P, fb_value_t *v)
+{
+ fb_token_t *t;
+
+ v->type = vt_string;
+ v->s.s = 0;
+ v->s.len = 0;
+
+ for (;;) {
+ t = P->token;
+ switch (t->id) {
+ case LEX_TOK_STRING_PART:
+ if (v->s.s == 0) {
+ v->s.s = (char *)t->text;
+ }
+ break;
+ case LEX_TOK_STRING_ESCAPE:
+ v->type = vt_invalid;
+ error_tok(P, t, "escape not allowed in strings");
+ break;
+ case LEX_TOK_STRING_CTRL:
+ v->type = vt_invalid;
+ error_tok_as_string(P, t, "control characters not allowed in strings", "?", 1);
+ break;
+ case LEX_TOK_STRING_NEWLINE:
+ v->type = vt_invalid;
+ error_tok(P, t, "newline not allowed in strings");
+ break;
+ case LEX_TOK_STRING_UNTERMINATED:
+ case LEX_TOK_STRING_END:
+ goto done;
+
+ default:
+ error_tok(P, t, "internal error: unexpected token in string");
+ v->type = vt_invalid;
+ goto done;
+ }
+ next(P);
+ }
+done:
+ /*
+ * If we were to ignore all errors, we would get the full
+ * string as is excluding delimiting quotes.
+ */
+ if (v->s.s) {
+ v->s.len = (int)(P->token->text - v->s.s);
+ }
+ if (!match(P, LEX_TOK_STRING_END, "unterminated string")) {
+ v->type = vt_invalid;
+ }
+}
+
+/* Current token must be an identifier. */
+static void parse_ref(fb_parser_t *P, fb_ref_t **ref)
+{
+ *ref = fb_add_ref(P, P->token);
+ next(P);
+ ref = &((*ref)->link);
+ while (optional(P, '.')) {
+ if (P->token->id != LEX_TOK_ID) {
+ error_tok(P, P->token, "namespace prefix expected identifier");
+ break;
+ }
+ *ref = fb_add_ref(P, P->token);
+ ref = &((*ref)->link);
+ next(P);
+ }
+}
+
+/* `flags` */
+enum { allow_string_value = 1, allow_id_value = 2, allow_null_value = 4 };
+static void parse_value(fb_parser_t *P, fb_value_t *v, int flags, const char *error_msg)
+{
+ fb_token_t *t;
+ fb_token_t *sign;
+
+ sign = optional(P, '-');
+ t = P->token;
+
+ switch (t->id) {
+ case LEX_TOK_HEX:
+ read_hex_value(P, t, v, sign != 0);
+ break;
+ case LEX_TOK_INT:
+ read_integer_value(P, t, v, sign != 0);
+ break;
+ case LEX_TOK_FLOAT:
+ read_float_value(P, t, v, sign != 0);
+ break;
+ case tok_kw_true:
+ v->b = 1;
+ v->type = vt_bool;
+ break;
+ case tok_kw_false:
+ v->b = 0;
+ v->type = vt_bool;
+ break;
+ case tok_kw_null:
+ if (!(flags & allow_null_value)) {
+ v->type = vt_invalid;
+ error_tok(P, t, error_msg);
+ return;
+ }
+ v->type = vt_null;
+ break;
+ case LEX_TOK_STRING_BEGIN:
+ next(P);
+ parse_string_literal(P, v);
+ if (!(flags & allow_string_value)) {
+ v->type = vt_invalid;
+ error_tok(P, t, error_msg);
+ return;
+ }
+ if (sign) {
+ v->type = vt_invalid;
+ error_tok(P, t, "string constants cannot be signed");
+ return;
+ }
+ return;
+ case LEX_TOK_ID:
+ parse_ref(P, &v->ref);
+ v->type = vt_name_ref;
+ if (sign) {
+ v->type = vt_invalid;
+ /* Technically they could, but we do not allow it. */
+ error_tok(P, t, "named values cannot be signed");
+ }
+ return;
+ default:
+ /* We might have consumed a sign, but never mind that. */
+ error_tok(P, t, error_msg);
+ return;
+ }
+ if (sign && v->type == vt_bool) {
+ v->type = vt_invalid;
+ error_tok(P, t, "boolean constants cannot be signed");
+ }
+ next(P);
+}
+
+static void parse_fixed_array_size(fb_parser_t *P, fb_token_t *ttype, fb_value_t *v)
+{
+ const char *error_msg = "fixed length array length expected to be an unsigned integer";
+ fb_value_t vsize;
+ fb_token_t *tlen = P->token;
+
+ parse_value(P, &vsize, 0, error_msg);
+ if (vsize.type != vt_uint) {
+ error_tok(P, tlen, error_msg);
+ v->type = vt_invalid;
+ return;
+ }
+ if (v->type == vt_invalid) return;
+ switch (v->type) {
+ case vt_vector_type:
+ v->type = vt_fixed_array_type;
+ break;
+ case vt_vector_type_ref:
+ v->type = vt_fixed_array_type_ref;
+ break;
+ case vt_vector_string_type:
+ v->type = vt_fixed_array_string_type;
+ break;
+ case vt_invalid:
+ return;
+ default:
+ error_tok(P, ttype, "invalid fixed length array type");
+ v->type = vt_invalid;
+ return;
+ }
+ if (vsize.u == 0) {
+ error_tok(P, tlen, "fixed length array length cannot be 0");
+ v->type = vt_invalid;
+ return;
+ }
+ /*
+ * This allows for safe 64-bit multiplication by elements no
+ * larger than 2^32-1 and also fits into the value len field.
+ * without extra size cost.
+ */
+ if (vsize.u > UINT32_MAX) {
+ error_tok(P, tlen, "fixed length array length overflow");
+ v->type = vt_invalid;
+ return;
+ }
+ v->len = (uint32_t)vsize.u;
+}
+
+/* ':' must already be matched */
+static void parse_type(fb_parser_t *P, fb_value_t *v)
+{
+ fb_token_t *t = 0;
+ fb_token_t *ttype = 0;
+ fb_token_t *t0 = P->token;
+ int vector = 0;
+
+ v->len = 1;
+ v->type = vt_invalid;
+ while ((t = optional(P, '['))) {
+ ++vector;
+ }
+ if (vector > 1) {
+ error_tok(P, t0, "vector type can only be one-dimensional");
+ }
+ ttype = P->token;
+ switch (ttype->id) {
+ case tok_kw_int:
+ case tok_kw_bool:
+ case tok_kw_byte:
+ case tok_kw_long:
+ case tok_kw_uint:
+ case tok_kw_float:
+ case tok_kw_short:
+ case tok_kw_char:
+ case tok_kw_ubyte:
+ case tok_kw_ulong:
+ case tok_kw_ushort:
+ case tok_kw_double:
+ case tok_kw_int8:
+ case tok_kw_int16:
+ case tok_kw_int32:
+ case tok_kw_int64:
+ case tok_kw_uint8:
+ case tok_kw_uint16:
+ case tok_kw_uint32:
+ case tok_kw_uint64:
+ case tok_kw_float32:
+ case tok_kw_float64:
+ v->t = P->token;
+ v->type = vector ? vt_vector_type : vt_scalar_type;
+ next(P);
+ break;
+ case tok_kw_string:
+ v->t = P->token;
+ v->type = vector ? vt_vector_string_type : vt_string_type;
+ next(P);
+ break;
+ case LEX_TOK_ID:
+ parse_ref(P, &v->ref);
+ v->type = vector ? vt_vector_type_ref : vt_type_ref;
+ break;
+ case ']':
+ error_tok(P, t, "vector type cannot be empty");
+ break;
+ default:
+ error_tok(P, ttype, "invalid type specifier");
+ break;
+ }
+ if (vector && optional(P, ':')) {
+ parse_fixed_array_size(P, ttype, v);
+ }
+ while (optional(P, ']') && vector--) {
+ }
+ if (vector) {
+ error_tok_2(P, t, "vector type missing ']' to match", t0);
+ }
+ if ((t = optional(P, ']'))) {
+ error_tok_2(P, t, "extra ']' not matching", t0);
+ while (optional(P, ']')) {
+ }
+ }
+ if (ttype->id == tok_kw_char && v->type != vt_invalid) {
+ if (v->type != vt_fixed_array_type) {
+ error_tok(P, ttype, "char can only be used as a fixed length array type [char:<n>]");
+ v->type = vt_invalid;
+ }
+ }
+}
+
+static fb_metadata_t *parse_metadata(fb_parser_t *P)
+{
+ fb_token_t *t, *t0;
+ fb_metadata_t *md = 0;
+
+ if (!(t0 = optional(P, '('))) {
+ return 0;
+ }
+ if ((t = optional(P, LEX_TOK_ID)))
+ for (;;) {
+ fb_add_metadata(P, &md);
+ md->ident = t;
+ if (optional(P, ':')) {
+ parse_value(P, &md->value, allow_string_value, "scalar or string value expected");
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ return md;
+ }
+ if (!optional(P, ',')) {
+ break;
+ }
+ if (!(t = match(P, LEX_TOK_ID, "attribute name expected identifier after ','"))) {
+ break;
+ }
+ }
+ advance(P, ')', "metadata expected ')' to match", t0);
+ revert_metadata(&md);
+ return md;
+}
+
+static void parse_field(fb_parser_t *P, fb_member_t *fld)
+{
+ fb_token_t *t;
+
+ remap_field_ident(P);
+ if (!(t = match(P, LEX_TOK_ID, "field expected identifier"))) {
+ goto fail;
+ }
+ fld->symbol.ident = t;
+ if (!match(P, ':', "field expected ':' before mandatory type")) {
+ goto fail;
+ }
+ parse_type(P, &fld->type);
+ if (optional(P, '=')) {
+ /*
+ * Because types can be named references, we do not check the
+ * default assignment before the schema is fully parsed.
+ * We allow the initializer to be a name in case it is an enum
+ * name.
+ */
+ parse_value(P, &fld->value, allow_id_value | allow_null_value, "initializer must be of scalar type or null");
+ }
+ fld->metadata = parse_metadata(P);
+ advance(P, ';', "field must be terminated with ';'", 0);
+ return;
+fail:
+ recover2(P, ';', 1, '}', 0);
+}
+
+static void parse_method(fb_parser_t *P, fb_member_t *fld)
+{
+ fb_token_t *t;
+ if (!(t = match(P, LEX_TOK_ID, "method expected identifier"))) {
+ goto fail;
+ }
+ fld->symbol.ident = t;
+ if (!match(P, '(', "method expected '(' after identifier")) {
+ goto fail;
+ }
+ parse_type(P, &fld->req_type);
+ if (!match(P, ')', "method expected ')' after request type")) {
+ goto fail;
+ }
+ if (!match(P, ':', "method expected ':' before mandatory response type")) {
+ goto fail;
+ }
+ parse_type(P, &fld->type);
+ if ((t = optional(P, '='))) {
+ error_tok(P, t, "method does not accept an initializer");
+ goto fail;
+ }
+ fld->metadata = parse_metadata(P);
+ advance(P, ';', "method must be terminated with ';'", 0);
+ return;
+fail:
+ recover2(P, ';', 1, '}', 0);
+}
+
+/* `enum` must already be matched. */
+static void parse_enum_decl(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_token_t *t, *t0;
+ fb_member_t *member;
+
+ if (!(ct->symbol.ident = match(P, LEX_TOK_ID, "enum declaration expected identifier"))) {
+ goto fail;
+ }
+ if (optional(P, ':')) {
+ parse_type(P, &ct->type);
+ if (ct->type.type != vt_scalar_type) {
+ error_tok(P, ct->type.t, "integral type expected");
+ } else {
+ switch (ct->type.t->id) {
+ case tok_kw_float:
+ case tok_kw_double:
+ case tok_kw_float32:
+ case tok_kw_float64:
+ error_tok(P, ct->type.t, "integral type expected");
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ ct->metadata = parse_metadata(P);
+ if (!((t0 = match(P, '{', "enum declaration expected '{'")))) {
+ goto fail;
+ }
+ for (;;) {
+ remap_enum_ident(P);
+ if (!(t = match(P, LEX_TOK_ID,
+ "member identifier expected"))) {
+ goto fail;
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ goto fail;
+ }
+ member = fb_add_member(P, &ct->members);
+ member->symbol.ident = t;
+ if (optional(P, '=')) {
+ t = P->token;
+ parse_value(P, &member->value, 0, "integral constant expected");
+ /* Leave detailed type (e.g. no floats) and range checking to a later stage. */
+ }
+ /*
+ * Trailing comma is optional in flatc but not in grammar, we
+ * follow flatc.
+ */
+ if (!optional(P, ',') || P->token->id == '}') {
+ break;
+ }
+ P->doc = 0;
+ }
+ if (t0) {
+ advance(P, '}', "enum missing closing '}' to match", t0);
+ }
+ revert_symbols(&ct->members);
+ return;
+fail:
+ recover(P, '}', 1);
+}
+
+/* `union` must already be matched. */
+static void parse_union_decl(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_token_t *t0;
+ fb_member_t *member;
+ fb_ref_t *ref;
+ fb_token_t *t;
+
+ if (!(ct->symbol.ident = match(P, LEX_TOK_ID, "union declaration expected identifier"))) {
+ goto fail;
+ }
+ ct->metadata = parse_metadata(P);
+ if (!((t0 = match(P, '{', "union declaration expected '{'")))) {
+ goto fail;
+ }
+ for (;;) {
+ if (P->token->id != LEX_TOK_ID) {
+ error_tok(P, P->token, "union expects an identifier");
+ goto fail;
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ goto fail;
+ }
+ t = P->token;
+ member = fb_add_member(P, &ct->members);
+ parse_ref(P, &ref);
+ member->type.ref = ref;
+ member->type.type = vt_type_ref;
+ while (ref->link) {
+ ref = ref->link;
+ }
+ /* The union member name is the unqualified reference. */
+ member->symbol.ident = ref->ident;
+ if (optional(P, ':')) {
+ if (member->type.ref->link) {
+ error_tok(P, t, "qualified union member name cannot have an explicit type");
+ }
+ parse_type(P, &member->type);
+ /* Leave type checking to later stage. */
+ }
+ if (optional(P, '=')) {
+ parse_value(P, &member->value, 0, "integral constant expected");
+ /* Leave detailed type (e.g. no floats) and range checking to a later stage. */
+ }
+ if (!optional(P, ',') || P->token->id == '}') {
+ break;
+ }
+ P->doc = 0;
+ }
+ advance(P, '}', "union missing closing '}' to match", t0);
+ revert_symbols(&ct->members);
+ /* Add implicit `NONE` member first in the list. */
+ member = fb_add_member(P, &ct->members);
+ member->symbol.ident = &P->t_none;
+ return;
+fail:
+ recover2(P, ';', 1, '}', 0);
+}
+
+/* `struct` , `table`, or 'rpc_service' must already be matched. */
+static void parse_compound_type(fb_parser_t *P, fb_compound_type_t *ct, long token)
+{
+ fb_token_t *t = 0;
+
+ if (!(t = match(P, LEX_TOK_ID, "Declaration expected an identifier"))) {
+ goto fail;
+ }
+ ct->symbol.ident = t;
+ ct->metadata = parse_metadata(P);
+ if (!(match(P, '{', "Declaration expected '{'"))) {
+ goto fail;
+ }
+ t = P->token;
+
+/* Allow empty tables and structs. */
+#if 0
+ if (P->token->id == '}') {
+ error_tok(P, t, "table / struct declaration cannot be empty");
+ }
+#endif
+ while (P->token->id != '}') {
+ if (token == tok_kw_rpc_service) {
+ parse_method(P, fb_add_member(P, &ct->members));
+ } else {
+ parse_field(P, fb_add_member(P, &ct->members));
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ goto fail;
+ }
+ }
+ if (!optional(P, '}') && t) {
+ error_tok_2(P, P->token, "Declaration missing closing '}' to match", t);
+ }
+ revert_symbols(&ct->members);
+ return;
+fail:
+ recover(P, '}', 1);
+}
+
+static void parse_namespace(fb_parser_t *P)
+{
+ fb_ref_t *ref = 0;
+ fb_token_t *t = P->token;
+
+ if (optional(P, ';') && t) {
+ /* Revert to global namespace. */
+ P->current_scope = 0;
+ return;
+ }
+ if (P->token->id != LEX_TOK_ID) {
+ error_tok(P, P->token, "namespace expects an identifier");
+ recover(P, ';', 1);
+ return;
+ }
+ parse_ref(P, &ref);
+ advance(P, ';', "missing ';' expected by namespace at", t);
+ P->current_scope = fb_add_scope(P, ref);
+}
+
+static void parse_root_type(fb_parser_t *P, fb_root_type_t *rt)
+{
+ fb_token_t *t = P->token;
+
+ if (rt->name) {
+ error_tok(P, P->token, "root_type already set");
+ }
+ parse_ref(P, &rt->name);
+ rt->scope = P->current_scope;
+ advance(P, ';', "missing ';' expected by root_type at", t);
+}
+
+static void parse_include(fb_parser_t *P)
+{
+ fb_token_t *t = P->token;
+
+ while (optional(P, tok_kw_include)) {
+ if (P->opts.disable_includes) {
+ error_tok(P, t, "include statements not supported by current environment");
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ return;
+ }
+ if (!match(P, LEX_TOK_STRING_BEGIN,
+ "include expected a string literal as filename")) {
+ recover(P, ';', 1);
+ }
+ parse_string_literal(P, &fb_add_include(P)->name);
+ match(P, ';', "include statement expected ';'");
+ }
+}
+
+static void parse_attribute(fb_parser_t *P, fb_attribute_t *a)
+{
+ fb_token_t *t = P->token;
+
+ if (match(P, LEX_TOK_STRING_BEGIN, "attribute expected string literal")) {
+ parse_string_literal(P, &a->name.name);
+ if (a->name.name.s.len == 0) {
+ error_tok_as_string(P, t, "attribute name cannot be empty", 0, 0);
+ }
+ }
+ match(P, ';', "attribute expected ';'");
+}
+
+static void parse_file_extension(fb_parser_t *P, fb_value_t *v)
+{
+ if (v->type == vt_string) {
+ error_tok_as_string(P, P->token, "file extension already set", v->s.s, (size_t)v->s.len);
+ }
+ if (!match(P, LEX_TOK_STRING_BEGIN, "file_extension expected string literal")) {
+ goto fail;
+ }
+ parse_string_literal(P, v);
+ match(P, ';', "file_extension expected ';'");
+ return;
+fail:
+ recover(P, ';', 1);
+}
+
+static void parse_file_identifier(fb_parser_t *P, fb_value_t *v)
+{
+ fb_token_t *t;
+ if (v->type != vt_missing) {
+ error_tok_as_string(P, P->token, "file identifier already set", v->s.s, (size_t)v->s.len);
+ }
+ if (!match(P, LEX_TOK_STRING_BEGIN, "file_identifier expected string literal")) {
+ goto fail;
+ }
+ t = P->token;
+ parse_string_literal(P, v);
+ if (v->s.s && v->s.len != 4) {
+ v->type = vt_invalid;
+ error_tok(P, t, "file_identifier must be 4 characters");
+ }
+ match(P, ';', "file_identifier expected ';'");
+ return;
+fail:
+ recover(P, ';', 1);
+}
+
+static void parse_schema_decl(fb_parser_t *P)
+{
+ switch(P->token->id) {
+ case tok_kw_namespace:
+ next(P);
+ parse_namespace(P);
+ break;
+ case tok_kw_file_extension:
+ next(P);
+ parse_file_extension(P, &P->schema.file_extension);
+ break;
+ case tok_kw_file_identifier:
+ next(P);
+ parse_file_identifier(P, &P->schema.file_identifier);
+ break;
+ case tok_kw_root_type:
+ next(P);
+ parse_root_type(P, &P->schema.root_type);
+ break;
+ case tok_kw_attribute:
+ next(P);
+ parse_attribute(P, fb_add_attribute(P));
+ break;
+ case tok_kw_struct:
+ next(P);
+ parse_compound_type(P, fb_add_struct(P), tok_kw_struct);
+ break;
+ case tok_kw_table:
+ next(P);
+ parse_compound_type(P, fb_add_table(P), tok_kw_table);
+ break;
+ case tok_kw_rpc_service:
+ next(P);
+ parse_compound_type(P, fb_add_rpc_service(P), tok_kw_rpc_service);
+ break;
+ case tok_kw_enum:
+ next(P);
+ parse_enum_decl(P, fb_add_enum(P));
+ break;
+ case tok_kw_union:
+ next(P);
+ parse_union_decl(P, fb_add_union(P));
+ break;
+ case tok_kw_include:
+ error_tok(P, P->token, "include statements must be placed first in the schema");
+ break;
+ case '{':
+ error_tok(P, P->token, "JSON objects in schema file is not supported - but a schema specific JSON parser can be generated");
+ break;
+ case LEX_TOK_CTRL:
+ error_tok_as_string(P, P->token, "unexpected control character in schema definition", "?", 1);
+ break;
+ case LEX_TOK_COMMENT_CTRL:
+ error_tok_as_string(P, P->token, "unexpected control character in comment", "?", 1);
+ break;
+ case LEX_TOK_COMMENT_UNTERMINATED:
+ error_tok_as_string(P, P->token, "unterminated comment", "<eof>", 5);
+ break;
+ default:
+ error_tok(P, P->token, "unexpected token in schema definition");
+ break;
+ }
+}
+
+static int parse_schema(fb_parser_t *P)
+{
+ fb_token_t *t, *t0;
+ parse_include(P);
+ t = P->token;
+ for (;;) {
+ if (is_end(t)) {
+ break;
+ }
+ if (P->failed >= FLATCC_MAX_ERRORS) {
+ return -1;
+ }
+ t0 = t;
+ parse_schema_decl(P);
+ t = P->token;
+ if (t == t0) {
+ if (P->failed) {
+ return -1;
+ }
+ error_tok(P, t, "extra tokens in input");
+ return -1;
+ }
+ }
+ revert_names(&P->schema.attributes);
+ revert_symbols(&P->schema.symbols);
+ return 0;
+}
+
+static inline void clear_elem_buffers(fb_parser_t *P)
+{
+ void **p, **p2;
+
+ p = P->elem_buffers;
+ while (p) {
+ p2 = *((void**)p);
+ free(p);
+ p = p2;
+ };
+}
+
+static void push_token(fb_parser_t *P, long id, const char *first, const char *last)
+{
+ size_t offset;
+ fb_token_t *t;
+
+ P->te = P->ts + P->tcapacity;
+ if (P->token == P->te) {
+ offset = (size_t)(P->token - P->ts);
+ P->tcapacity = P->tcapacity ? 2 * P->tcapacity : 1024;
+ P->ts = realloc(P->ts, (size_t)P->tcapacity * sizeof(fb_token_t));
+ checkmem(P->ts);
+ P->te = P->ts + P->tcapacity;
+ P->token = P->ts + offset;
+ }
+ t = P->token;
+ t->id = id;
+ t->text = first;
+ t->len = (long)(last - first);
+ t->linenum = P->linenum;
+ t->pos = (long)(first - P->line + 1);
+ ++P->token;
+}
+
+/*
+ * If the file contains a control character, we can get multiple
+ * comments per line.
+ */
+static inline void push_comment(fb_parser_t *P, const char *first, const char *last)
+{
+ if (P->doc_mode) {
+ push_token(P, tok_kw_doc_comment, first, last);
+ }
+}
+
+static void inject_token(fb_token_t *t, const char *lex, long id)
+{
+ t->id = id;
+ t->text = lex;
+ t->len = (long)strlen(lex);
+ t->pos = 0;
+ t->linenum = 0;
+}
+
+/* --- Customize lexer --- */
+
+/* Depends on the `context` argument given to the lex function. */
+#define ctx(name) (((fb_parser_t *)context)->name)
+
+#define lex_emit_newline(first, last) (ctx(linenum)++, ctx(line) = last)
+
+#define lex_emit_string_newline(first, last) \
+ (ctx(linenum)++, ctx(line) = last, \
+ push_token((fb_parser_t*)context, LEX_TOK_STRING_NEWLINE, first, last))
+
+/*
+ * Add emtpy comment on comment start - otherwise we miss empty lines.
+ * Save is_doc becuase comment_part does not remember.
+ */
+#define lex_emit_comment_begin(first, last, is_doc) \
+ { ctx(doc_mode) = is_doc; push_comment((fb_parser_t*)context, last, last); }
+#define lex_emit_comment_part(first, last) push_comment((fb_parser_t*)context, first, last)
+#define lex_emit_comment_end(first, last) (ctx(doc_mode) = 0)
+
+/* By default emitted as lex_emit_other which would be ignored. */
+#define lex_emit_comment_unterminated(pos) \
+ push_token((fb_parser_t*)context, LEX_TOK_COMMENT_UNTERMINATED, pos, pos)
+
+#define lex_emit_comment_ctrl(pos) \
+ if (lex_isblank(*pos)) { \
+ push_comment((fb_parser_t*)context, pos, pos + 1); \
+ } else { \
+ push_token((fb_parser_t*)context, LEX_TOK_COMMENT_CTRL, \
+ pos, pos + 1); \
+ }
+
+/*
+ * Provide hook to lexer for emitting tokens. We can override many
+ * things, but most default to calling lex_emit, so that is all we need
+ * to handle.
+ *
+ * `context` is a magic name available to macros in the lexer.
+ */
+#define lex_emit(token, first, last) \
+ push_token((fb_parser_t*)context, token, first, last)
+
+/*
+ * We could just eos directly as it defaults to emit, but formally we
+ * should use the eof marker which is always zero, so parser can check
+ * for it easily, if needed.
+ */
+#define lex_emit_eos(first, last) \
+ push_token((fb_parser_t*)context, LEX_TOK_EOF, first, last)
+
+/*
+ * This event happens in place of eos if we exhaust the input buffer.
+ * In this case we treat this as end of input, but this choice prevents
+ * us from parsing across multiple buffers.
+ */
+#define lex_emit_eob(pos) \
+ push_token((fb_parser_t*)context, LEX_TOK_EOF, pos, pos)
+
+/*
+ * Luthor is our speedy generic lexer - it knows most common operators
+ * and therefore allows us to fail meaningfully on those that we don't
+ * support here, which is most.
+ */
+#include "lex/luthor.c"
+
+#include "keywords.h"
+
+/* Root schema `rs` is null for top level parser. */
+int fb_init_parser(fb_parser_t *P, fb_options_t *opts, const char *name,
+ fb_error_fun error_out, void *error_ctx, fb_root_schema_t *rs)
+{
+ size_t n, name_len;
+ char *s;
+
+ memset(P, 0, sizeof(*P));
+
+ if (error_out) {
+ P->error_out = error_out;
+ P->error_ctx = error_ctx;
+ } else {
+ P->error_out = fb_default_error_out;
+ }
+ if (opts) {
+ memcpy(&P->opts, opts, sizeof(*opts));
+ } else {
+ flatcc_init_options(&P->opts);
+ }
+ P->schema.root_schema = rs ? rs : &P->schema.root_schema_instance;
+ switch (P->opts.offset_size) {
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ error(P, "invalid offset configured, must be 2, 4 (default), or 8");
+ return -1;
+ }
+ switch (P->opts.voffset_size) {
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ error(P, "invalid voffset configured, must be 2 (default), 4, or 8");
+ return -1;
+ }
+ if (!name) {
+ /* Mostly for testing, just so we always have a name. */
+ name = FLATCC_DEFAULT_FILENAME;
+ }
+ if (name == 0) {
+ name = "";
+ }
+ name_len = strlen(name);
+ checkmem((P->schema.basename = fb_create_basename(name, name_len, opts->default_schema_ext)));
+ n = strlen(P->schema.basename);
+ checkmem(s = fb_copy_path_n(P->schema.basename, n));
+ pstrntoupper(s, n);
+ P->schema.basenameup = s;
+ P->schema.name.name.s.s = s;
+ P->schema.name.name.s.len = (int)n;
+ checkmem((P->schema.errorname = fb_create_basename(name, name_len, "")));
+ if (opts->ns) {
+ P->schema.prefix.s = (char *)opts->ns;
+ P->schema.prefix.len = (int)strlen(opts->ns);
+ }
+ P->current_scope = fb_add_scope(P, 0);
+ assert(P->current_scope == fb_scope_table_find(&P->schema.root_schema->scope_index, 0, 0));
+ return 0;
+}
+
+/*
+ * Main entry function for this specific parser type.
+ * We expect a zero terminated string.
+ *
+ * The parser structure is uninitialized upon entry, and should be
+ * cleared with `clear_flatbuffer_parser` subsequently.
+ *
+ * Datastructures point into the token buffer and into the input
+ * buffer, so the parser and input should not be cleared prematurely.
+ *
+ * The input buffer must remain valid until the parser is cleared
+ * because the internal represenation stores pointers into the buffer.
+ *
+ * `own_buffer` indicates that the the buffer should be deallocated when
+ * the parser is cleaned up.
+ */
+int fb_parse(fb_parser_t *P, const char *input, size_t len, int own_buffer)
+{
+ static const char *id_none = "NONE";
+ static const char *id_ubyte = "ubyte";
+
+ P->line = input;
+ P->linenum = 1;
+
+ /* Used with union defaults. */
+ inject_token(&P->t_none, id_none, LEX_TOK_ID);
+ inject_token(&P->t_ubyte, id_ubyte, tok_kw_ubyte);
+
+ if (own_buffer) {
+ P->managed_input = input;
+ }
+ lex(input, len, 0, P);
+
+ P->te = P->token;
+ P->token = P->ts;
+ /* Only used while processing table id's. */
+ checkmem((P->tmp_field_marker = malloc(sizeof(P->tmp_field_marker[0]) * (size_t)P->opts.vt_max_count)));
+ checkmem((P->tmp_field_index = malloc(sizeof(P->tmp_field_index[0]) * (size_t)P->opts.vt_max_count)));
+ if (P->token->id == tok_kw_doc_comment) {
+ next(P);
+ }
+ parse_schema(P);
+ return P->failed;
+}
+
+static void __destroy_scope_item(void *item, fb_scope_t *scope)
+{
+ /* Each scope points into table that is cleared separately. */
+ (void)item;
+
+ fb_symbol_table_clear(&scope->symbol_index);
+}
+
+void fb_clear_parser(fb_parser_t *P)
+{
+ fb_symbol_t *sym;
+ fb_compound_type_t *ct;
+
+ for (sym = P->schema.symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ case fb_is_rpc_service:
+ case fb_is_enum:
+ case fb_is_union:
+ ct = (fb_compound_type_t *)sym;
+ fb_symbol_table_clear(&ct->index);
+ fb_value_set_clear(&ct->value_set);
+ }
+ }
+ fb_schema_table_clear(&P->schema.root_schema_instance.include_index);
+ fb_name_table_clear(&P->schema.root_schema_instance.attribute_index);
+ ptr_set_clear(&P->schema.visible_schema);
+ if (P->tmp_field_marker) {
+ free(P->tmp_field_marker);
+ }
+ if (P->tmp_field_index) {
+ free(P->tmp_field_index);
+ }
+ if (P->ts) {
+ free(P->ts);
+ }
+ if (P->schema.basename) {
+ free((void *)P->schema.basename);
+ }
+ if (P->schema.basenameup) {
+ free((void *)P->schema.basenameup);
+ }
+ if (P->schema.errorname) {
+ free((void *)P->schema.errorname);
+ }
+ /*
+ * P->referer_path in included files points to parent P->path, so
+ * don't free it, and don't access it after this point.
+ */
+ if (P->path) {
+ free((void *)P->path);
+ }
+ fb_scope_table_destroy(&P->schema.root_schema_instance.scope_index,
+ __destroy_scope_item, 0);
+ /* Destroy last since destructor has references into elem buffer. */
+ clear_elem_buffers(P);
+ if (P->managed_input) {
+ free((void *)P->managed_input);
+ }
+ memset(P, 0, sizeof(*P));
+}
diff --git a/flatcc/src/compiler/parser.h b/flatcc/src/compiler/parser.h
new file mode 100644
index 0000000..ef2ecc1
--- /dev/null
+++ b/flatcc/src/compiler/parser.h
@@ -0,0 +1,213 @@
+#ifndef PARSER_H
+#define PARSER_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "../../config/config.h"
+#include "flatcc/flatcc.h"
+#include "symbols.h"
+
+#define ELEM_BUFSIZ (64 * 1024)
+#define ERROR_BUFSIZ 200
+
+#define REVERT_LIST(TYPE, FIELD, HEAD) \
+ do { \
+ TYPE *tmp__next, *tmp__prev = 0, *tmp__link = *(HEAD); \
+ while (tmp__link) { \
+ tmp__next = tmp__link->FIELD; \
+ tmp__link->FIELD = tmp__prev; \
+ tmp__prev = tmp__link; \
+ tmp__link = tmp__next; \
+ } \
+ *(HEAD) = tmp__prev; \
+ } while (0)
+
+typedef struct fb_parser fb_parser_t;
+typedef flatcc_options_t fb_options_t;
+
+typedef void (*fb_error_fun)(void *err_ctx, const char *buf, size_t len);
+
+void __flatcc_fb_default_error_out(void *err_ctx, const char *buf, size_t len);
+#define fb_default_error_out __flatcc_fb_default_error_out
+
+int __flatcc_fb_print_error(fb_parser_t *P, const char * format, ...);
+#define fb_print_error __flatcc_fb_print_error
+
+struct fb_parser {
+ fb_parser_t *dependencies;
+ fb_parser_t *inverse_dependencies;
+ fb_error_fun error_out;
+ void *error_ctx;
+
+ const char *managed_input;
+
+ fb_token_t *ts, *te;
+ int tcapacity;
+ int doc_mode;
+ fb_doc_t *doc;
+ fb_token_t *token;
+
+ size_t elem_end;
+ void *elem_buffers;
+ size_t elem;
+ size_t offset_size;
+
+ const char *line;
+ long linenum;
+
+ /* Internal id (not a pointer into token stream). */
+ fb_token_t t_none;
+ fb_token_t t_ubyte;
+
+ int failed;
+
+ unsigned char *tmp_field_marker;
+ fb_symbol_t **tmp_field_index;
+ int nesting_level;
+
+ int has_schema;
+ fb_options_t opts;
+ fb_schema_t schema;
+ fb_scope_t *current_scope;
+ char *path;
+ char *referer_path;
+};
+
+static inline void checkmem(const void *p)
+{
+ if (!p) {
+ fprintf(stderr, "error: out of memory, aborting...\n");
+ exit(1);
+ }
+}
+
+static inline void *new_elem(fb_parser_t *P, size_t size)
+{
+ size_t elem;
+ void *buf;
+
+ size = (size + 15) & ~(size_t)15;
+ elem = P->elem;
+ if (elem + size > P->elem_end) {
+ buf = calloc(ELEM_BUFSIZ, 1);
+ checkmem(buf);
+ *(void**)buf = P->elem_buffers;
+ P->elem_buffers = buf;
+ elem = P->elem = (size_t)buf + 16;
+ P->elem_end = (size_t)buf + ELEM_BUFSIZ;
+ }
+ P->elem += size;
+ return (void*)elem;
+}
+
+int __flatcc_fb_print_error(fb_parser_t *P, const char * format, ...);
+#define fb_print_error __flatcc_fb_print_error
+
+const char *__flatcc_error_find_file_of_token(fb_parser_t *P, fb_token_t *t);
+#define error_find_file_of_token __flatcc_error_find_file_of_token
+
+/*
+ * This is the primary error reporting function.
+ * The parser is flagged as failed and error count incremented.
+ *
+ * If s is not null, then s, len replaces the token text of `t` but
+ * still reports the location of t. `peer` is optional and prints the
+ * token location and text and the end of the message.
+ * `msg` may be the only non-zero argument besides `P`.
+ *
+ * Various helper functions are available for the various cases.
+ *
+ * `fb_print_error` may be called instead to generate text to the error
+ * output that is not counted as an error.
+ */
+void __flatcc_error_report(fb_parser_t *P, fb_token_t *t, const char *msg, fb_token_t *peer, const char *s, size_t len);
+#define error_report __flatcc_error_report
+
+static void error_tok_2(fb_parser_t *P, fb_token_t *t, const char *msg, fb_token_t *peer)
+{
+ error_report(P, t, msg, peer, 0, 0);
+}
+
+static inline void error_tok(fb_parser_t *P, fb_token_t *t, const char *msg)
+{
+ error_tok_2(P, t, msg, 0);
+}
+
+/* Only use the token location. */
+static inline void error_tok_as_string(fb_parser_t *P, fb_token_t *t, const char *msg, char *s, size_t len)
+{
+ error_report(P, t, msg, 0, s, len);
+}
+
+static inline void error(fb_parser_t *P, const char *msg)
+{
+ error_tok(P, 0, msg);
+}
+
+static inline void error_name(fb_parser_t *P, fb_name_t *name, const char *msg)
+{
+ if (!name) {
+ error(P, msg);
+ } else {
+ error_report(P, 0, msg, 0, name->name.s.s, (size_t)name->name.s.len);
+ }
+}
+
+static inline void error_sym(fb_parser_t *P, fb_symbol_t *s, const char *msg)
+{
+ error_tok(P, s->ident, msg);
+}
+
+static inline void error_sym_2(fb_parser_t *P, fb_symbol_t *s, const char *msg, fb_symbol_t *s2)
+{
+ error_tok_2(P, s->ident, msg, s2->ident);
+}
+
+static inline void error_sym_tok(fb_parser_t *P, fb_symbol_t *s, const char *msg, fb_token_t *t2)
+{
+ error_tok_2(P, s->ident, msg, t2);
+}
+
+void error_ref_sym(fb_parser_t *P, fb_ref_t *ref, const char *msg, fb_symbol_t *s2);
+
+static inline void error_ref(fb_parser_t *P, fb_ref_t *ref, const char *msg)
+{
+ error_ref_sym(P, ref, msg, 0);
+}
+
+/*
+ * If `opts` is null, defaults options are being used, otherwise opts is
+ * copied into the parsers options. The name may be path, the basename
+ * without default extension will be extracted. The `error_out` funciton is
+ * optional, otherwise output is printed to stderr, truncated to a
+ * reasoanble size per error. `error_ctx` is provided as argument to
+ * `error_out` if non-zero, and otherwise ignored.
+ *
+ * This api only deals with a single schema file so a parent level
+ * driver must handle file inclusion and update P->dependencies but
+ * order is not significant (parse order is, but this is handled by
+ * updating the `include_index` in the root schema).
+ *
+ * P->dependencies must be cleared by callee in any order but once one
+ * is cleared the entire structure should be taken down because symbols
+ * trees point everywhere. For parses without file inclusion
+ * dependencies will be null. Dependencies are not handled at this
+ * level. P->inverse_dependencies is just the reverse list.
+ *
+ * The file at the head of the dependencies list is the root and the
+ * one that provides the root schema. Other root schemas are not used.
+ */
+int __flatcc_fb_init_parser(fb_parser_t *P, fb_options_t *opts, const char *name,
+ fb_error_fun error_out, void *error_ctx, fb_root_schema_t *rs);
+#define fb_init_parser __flatcc_fb_init_parser
+
+int __flatcc_fb_parse(fb_parser_t *P, const char *input, size_t len, int own_buffer);
+#define fb_parse __flatcc_fb_parse
+
+void __flatcc_fb_clear_parser(fb_parser_t *P);
+#define fb_clear_parser __flatcc_fb_clear_parser
+
+#endif /* PARSER_H */
diff --git a/flatcc/src/compiler/pstrutil.h b/flatcc/src/compiler/pstrutil.h
new file mode 100644
index 0000000..40795a6
--- /dev/null
+++ b/flatcc/src/compiler/pstrutil.h
@@ -0,0 +1,58 @@
+#ifndef PSTRUTIL_H
+#define PSTRUTIL_H
+
+#include <ctype.h> /* toupper */
+
+
+/*
+ * NOTE: unlike strncpy, we return the first character, and we do not
+ * pad up to n. Same applies to related functions.
+ */
+
+/* `strnlen` not widely supported. */
+static inline size_t pstrnlen(const char *s, size_t max_len)
+{
+ const char *end = memchr (s, 0, max_len);
+ return end ? (size_t)(end - s) : max_len;
+}
+
+static inline char *pstrcpyupper(char *dst, const char *src) {
+ char *p = dst;
+ while (*src) {
+ *p++ = (char)toupper(*src++);
+ }
+ *p = '\0';
+ return dst;
+}
+
+static inline char *pstrncpyupper(char *dst, const char *src, size_t n) {
+ size_t i;
+ for (i = 0; i < n && src[i]; ++i) {
+ dst[i] = (char)toupper(src[i]);
+ }
+ if (i < n) {
+ dst[i] = '\0';
+ }
+ return dst;
+}
+
+static inline char *pstrtoupper(char *dst) {
+ char *p;
+ for (p = dst; *p; ++p) {
+ *p = (char)toupper(*p);
+ }
+ return dst;
+}
+
+static inline char *pstrntoupper(char *dst, size_t n) {
+ size_t i;
+ for (i = 0; i < n && dst[i]; ++i) {
+ dst[i] = (char)toupper(dst[i]);
+ }
+ return dst;
+}
+
+#undef strnlen
+#define strnlen pstrnlen
+
+#endif /* PSTRUTIL_H */
diff --git a/flatcc/src/compiler/semantics.c b/flatcc/src/compiler/semantics.c
new file mode 100644
index 0000000..d0a766a
--- /dev/null
+++ b/flatcc/src/compiler/semantics.c
@@ -0,0 +1,1962 @@
+#include <string.h>
+#include <assert.h>
+
+#include "semantics.h"
+#include "parser.h"
+#include "coerce.h"
+#include "stdio.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+/* Same order as enum! */
+static const char *fb_known_attribute_names[] = {
+ "",
+ "id",
+ "deprecated",
+ "original_order",
+ "force_align",
+ "bit_flags",
+ "nested_flatbuffer",
+ "key",
+ "required",
+ "hash",
+ "base64",
+ "base64url",
+ "primary_key",
+ "sorted",
+};
+
+static const int fb_known_attribute_types[] = {
+ vt_invalid, /* Unknowns have arbitrary types. */
+ vt_uint,
+ vt_missing,
+ vt_missing,
+ vt_uint,
+ vt_missing,
+ vt_string,
+ vt_missing,
+ vt_missing,
+ vt_string,
+ vt_missing,
+ vt_missing,
+ vt_missing,
+ vt_missing,
+};
+
+static fb_scalar_type_t map_scalar_token_type(fb_token_t *t)
+{
+ switch (t->id) {
+ case tok_kw_uint64:
+ case tok_kw_ulong:
+ return fb_ulong;
+ case tok_kw_uint32:
+ case tok_kw_uint:
+ return fb_uint;
+ case tok_kw_uint16:
+ case tok_kw_ushort:
+ return fb_ushort;
+ case tok_kw_uint8:
+ case tok_kw_ubyte:
+ return fb_ubyte;
+ case tok_kw_char:
+ return fb_char;
+ case tok_kw_bool:
+ return fb_bool;
+ case tok_kw_int64:
+ case tok_kw_long:
+ return fb_long;
+ case tok_kw_int32:
+ case tok_kw_int:
+ return fb_int;
+ case tok_kw_int16:
+ case tok_kw_short:
+ return fb_short;
+ case tok_kw_int8:
+ case tok_kw_byte:
+ return fb_byte;
+ case tok_kw_float64:
+ case tok_kw_double:
+ return fb_double;
+ case tok_kw_float32:
+ case tok_kw_float:
+ return fb_float;
+ default:
+ return fb_missing_type;
+ }
+}
+
+/*
+ * The flatc compiler currently has a 256 limit.
+ *
+ * Some target C compilers might respect anything above
+ * 16 and may reguire that PAD option of the C code generator.
+ */
+static inline int is_valid_align(uint64_t align)
+{
+ uint64_t n = 1;
+ if (align == 0 || align > FLATCC_FORCE_ALIGN_MAX) {
+ return 0;
+ }
+ while (n <= align) {
+ if (n == align) {
+ return 1;
+ }
+ n *= 2;
+ }
+ return 0;
+}
+
+static inline uint64_t fb_align(uint64_t size, uint64_t align)
+{
+ assert(is_valid_align(align));
+
+ return (size + align - 1) & ~(align - 1);
+}
+
+/*
+ * The FNV-1a 32-bit little endian hash is a FlatBuffers standard for
+ * transmission of type identifiers in a compact form, in particular as
+ * alternative file identifiers. Note that if hash becomes 0, we map it
+ * to hash("").
+ */
+static inline void set_type_hash(fb_compound_type_t *ct)
+{
+ fb_ref_t *name;
+ fb_symbol_t *sym;
+ uint32_t hash;
+
+ hash = fb_hash_fnv1a_32_init();
+ if (ct->scope) {
+ for (name = ct->scope->name; name; name = name->link) {
+ hash = fb_hash_fnv1a_32_append(hash, name->ident->text, (size_t)name->ident->len);
+ hash = fb_hash_fnv1a_32_append(hash, ".", 1);
+ }
+ }
+ sym = &ct->symbol;
+ hash = fb_hash_fnv1a_32_append(hash, sym->ident->text, (size_t)sym->ident->len);
+ if (hash == 0) {
+ hash = fb_hash_fnv1a_32_init();
+ }
+ ct->type_hash = hash;
+}
+
+static inline fb_scope_t *fb_find_scope_by_string(fb_schema_t *S, const char *name, size_t len)
+{
+ if (!S || !S->root_schema) {
+ return 0;
+ }
+ if (len == 0) {
+ /* Global scope. */
+ name = 0;
+ }
+ return fb_scope_table_find(&S->root_schema->scope_index, name, len);
+}
+
+/* count = 0 indicates zero-terminated ref list, name = 0 indicates global scope. */
+static inline fb_scope_t *fb_find_scope_by_ref(fb_schema_t *S, const fb_ref_t *name, int count)
+{
+ if (!S || !S->root_schema) {
+ return 0;
+ }
+ return fb_scope_table_find(&S->root_schema->scope_index, name, (size_t)(-count));
+}
+
+static inline fb_symbol_t *define_fb_symbol(fb_symbol_table_t *si, fb_symbol_t *sym)
+{
+ return fb_symbol_table_insert_item(si, sym, ht_keep);
+}
+
+static inline fb_symbol_t *find_fb_symbol_by_token(fb_symbol_table_t *si, fb_token_t *token)
+{
+ return fb_symbol_table_find(si, token->text, (size_t)token->len);
+}
+
+static inline fb_name_t *define_fb_name(fb_name_table_t *ni, fb_name_t *name)
+{
+ return fb_name_table_insert_item(ni, name, ht_keep);
+}
+
+static inline fb_name_t *find_fb_name_by_token(fb_name_table_t *ni, fb_token_t *token)
+{
+ return fb_name_table_find(ni, token->text, (size_t)token->len);
+}
+
+/* Returns 1 if value exists, 0 otherwise, */
+static inline int add_to_value_set(fb_value_set_t *vs, fb_value_t *value)
+{
+ return fb_value_set_insert_item(vs, value, ht_keep) != 0;
+}
+
+static inline int is_in_value_set(fb_value_set_t *vs, fb_value_t *value)
+{
+ return 0 != fb_value_set_find_item(vs, value);
+}
+
+/*
+ * An immediate parent scope does not necessarily exist and it might
+ * appear in a later search, so we return the nearest existing parent
+ * and do not cache the parent.
+ */
+static inline fb_scope_t *find_parent_scope(fb_parser_t *P, fb_scope_t *scope)
+{
+ fb_ref_t *p;
+ int count;
+ fb_scope_t *parent;
+
+ parent = 0;
+ count = 0;
+ if (scope == 0) {
+ return 0;
+ }
+ p = scope->name;
+ while (p) {
+ ++count;
+ p = p->link;
+ }
+ if (count == 0) {
+ return 0;
+ }
+ while (count-- > 1) {
+ if ((parent = fb_find_scope_by_ref(&P->schema, scope->name, count))) {
+ return parent;
+ }
+ }
+ /* Root scope. */
+ return fb_find_scope_by_ref(&P->schema, 0, 0);
+}
+
+static inline fb_symbol_t *lookup_string_reference(fb_parser_t *P, fb_scope_t *local, const char *s, size_t len)
+{
+ fb_symbol_t *sym;
+ fb_scope_t *scope;
+ const char *name, *basename;
+ size_t k;
+
+ name = s;
+ basename = s;
+ k = len;
+ while (k > 0) {
+ if (s[--k] == '.') {
+ basename = s + k + 1;
+ --len;
+ break;
+ }
+ }
+ len -= k;
+ if (local && k == 0) {
+ do {
+ if ((sym = fb_symbol_table_find(&local->symbol_index, basename, len))) {
+ if (get_compound_if_visible(&P->schema, sym)) {
+ return sym;
+ }
+ }
+ local = find_parent_scope(P, local);
+ } while (local);
+ return 0;
+ }
+ if (!(scope = fb_find_scope_by_string(&P->schema, name, k))) {
+ return 0;
+ }
+ return fb_symbol_table_find(&scope->symbol_index, basename, len);
+}
+
+/*
+ * First search the optional local scope, then the scope of the namespace prefix if any.
+ * If `enumval` is non-zero, the last namepart is stored in that
+ * pointer and the lookup stops before that part.
+ *
+ * If the reference is prefixed with a namespace then the scope is
+ * looked up relative to root then the basename is searched in that
+ * scope.
+ *
+ * If the refernce is not prefixed with a namespace then the name is
+ * search in the local symbol table (which may be the root if null) and
+ * if that fails, the nearest existing parent scope is used as the new
+ * local scope and the process is repeated until local is root.
+ *
+ * This means that namespace prefixes cannot be relative to a parent
+ * namespace or to the current scope, but simple names can be found in a
+ * parent namespace.
+ */
+static inline fb_symbol_t *lookup_reference(fb_parser_t *P, fb_scope_t *local, fb_ref_t *name, fb_ref_t **enumval)
+{
+ fb_ref_t *basename, *last, *p;
+ fb_scope_t *scope;
+ fb_symbol_t *sym;
+ int count;
+
+ count = 0;
+ scope = 0;
+ p = name;
+ last = 0;
+ basename = 0;
+ while (p) {
+ basename = last;
+ last = p;
+ p = p->link;
+ ++count;
+ }
+ if (enumval) {
+ --count;
+ *enumval = last;
+ } else {
+ basename = last;
+ }
+ if (!basename) {
+ return 0;
+ }
+ if (local && count == 1) {
+ do {
+ if ((sym = find_fb_symbol_by_token(&local->symbol_index, basename->ident))) {
+ if (get_compound_if_visible(&P->schema, sym)) {
+ return sym;
+ }
+ }
+ local = find_parent_scope(P, local);
+ } while (local);
+ return 0;
+ }
+ /* Null name is valid in scope lookup, indicating global scope. */
+ if (count == 1) {
+ name = 0;
+ }
+ if (!(scope = fb_find_scope_by_ref(&P->schema, name, count - 1))) {
+ return 0;
+ }
+ sym = find_fb_symbol_by_token(&scope->symbol_index, basename->ident);
+ if (sym && get_compound_if_visible(&P->schema, sym)) {
+ return sym;
+ }
+ return 0;
+}
+
+static inline fb_symbol_t *lookup_type_reference(fb_parser_t *P, fb_scope_t *local, fb_ref_t *name)
+{
+ return lookup_reference(P, local, name, 0);
+}
+
+/*
+ * `ct` is null when looking up names for scalar types and otherwise it is
+ * the enum type being assigned. The provided reference may reference
+ * an enum value in the `ct` type, or another enum if a scope/type is
+ * given.
+ */
+static inline int lookup_enum_name(fb_parser_t *P, fb_scope_t *local, fb_compound_type_t *ct, fb_ref_t *ref, fb_value_t *value)
+{
+ fb_symbol_t *sym;
+ fb_ref_t *enumval;
+ fb_member_t *member;
+
+ enumval = 0;
+ assert(ref);
+ assert(ct == 0 || ct->symbol.kind == fb_is_enum);
+ sym = lookup_reference(P, local, ref, &enumval);
+ if (sym && sym->kind == fb_is_enum) {
+ ct = (fb_compound_type_t *)sym;
+ } else if (ref->link) {
+ /* If there was a scope / type prefix, it was not found, or it was not an enum type. */
+ return -1;
+ }
+ if (!ct) {
+ return -1;
+ }
+ sym = find_fb_symbol_by_token(&ct->index, enumval->ident);
+ if (!sym) {
+ return -1;
+ }
+ member = (fb_member_t *)sym;
+ *value = member->value;
+ return 0;
+}
+
+/* This is repeated for every include file, but this pose no problem. */
+static void install_known_attributes(fb_parser_t *P)
+{
+ unsigned int i;
+ fb_attribute_t *a;
+
+ for (i = 0; i < KNOWN_ATTR_COUNT; ++i) {
+ /* Don't put it in the parsed list, just the index. */
+ a = new_elem(P, sizeof(*a));
+ a->known = i;
+ a->name.name.s.s = (char *)fb_known_attribute_names[i];
+ a->name.name.s.len = (int)strlen(fb_known_attribute_names[i]);
+ a->name.name.type = vt_string;
+ a->name.link = 0;
+ if ((a = (fb_attribute_t *)define_fb_name(&P->schema.root_schema->attribute_index, &a->name))) {
+ /*
+ * If the user alredy defined the attribute, keep that instead.
+ * (Memory leak is ok here.)
+ */
+ a->known = i;
+ }
+ }
+}
+
+static void revert_order(fb_compound_type_t **list) {
+ fb_compound_type_t *next, *prev = 0, *link = *list;
+
+ while (link) {
+ next = link->order;
+ link->order = prev;
+ prev = link;
+ link = next;
+ }
+ *list = prev;
+}
+
+static inline unsigned short process_metadata(fb_parser_t *P, fb_metadata_t *m,
+ uint16_t expect, fb_metadata_t *out[KNOWN_ATTR_COUNT])
+{
+ uint16_t flags;
+ unsigned int i, n = FLATCC_ATTR_MAX;
+ int type;
+ fb_attribute_t *a;
+
+ memset(out, 0, sizeof(out[0]) * KNOWN_ATTR_COUNT);
+ for (flags = 0; m && n; --n, m = m->link) {
+ a = (fb_attribute_t *)find_fb_name_by_token(&P->schema.root_schema->attribute_index, m->ident);
+ if (!a) {
+ error_tok(P, m->ident, "unknown attribute not declared");
+ continue;
+ }
+ if (!(i = a->known)) {
+ continue;
+ }
+ if (!((1 << i) & expect)) {
+ error_tok(P, m->ident, "known attribute not expected in this context");
+ continue;
+ }
+ flags |= 1 << i;
+ if (out[i]) {
+ error_tok(P, m->ident, "known attribute listed multiple times");
+ continue;
+ }
+ out[i] = m;
+ type = fb_known_attribute_types[i];
+ if (type == vt_missing && m->value.type != vt_missing) {
+ error_tok(P, m->ident, "known attribute does not expect a value");
+ continue;
+ }
+ if (type == vt_string && m->value.type != vt_string) {
+ error_tok(P, m->ident, "known attribute expects a string");
+ continue;
+ }
+ if (type == vt_uint && m->value.type != vt_uint) {
+ error_tok(P, m->ident, "known attribute expects an unsigned integer");
+ continue;
+ }
+ if (type == vt_int && m->value.type != vt_uint && m->value.type != vt_int) {
+ error_tok(P, m->ident, "known attribute expects an integer");
+ continue;
+ }
+ if (type == vt_bool && m->value.type != vt_bool) {
+ error_tok(P, m->ident, "known attribute expects 'true' or 'false'");
+ continue;
+ }
+ }
+ if (m) {
+ error_tok(P, m->ident, "too many attributes");
+ }
+ return flags;
+}
+
+/*
+ * Recursive types are allowed, according to FlatBuffers Internals doc.,
+ * but this cannot be possible for structs because they have no default
+ * value or null option, and can only hold scalars and other structs, so
+ * recursion would never terminate. Enums are simple types and cannot be
+ * recursive either. Unions reference tables which may reference unions,
+ * and recursion works well here. Tables allow any other table, union,
+ * or scalar value to be optional or default, so recursion is possible.
+ * In conclusion, Unions and Table members may reference all other
+ * types, and self. Enums are trivially checked because the only allow
+ * scalars, which leaves structs that can build illegal forms.
+ *
+ * Object instances cannot be recursive meaning the object graph is
+ * always a tree, but this isn't really a concern for the schema
+ * compiler, and for the builder it happens naturally as it only adds to
+ * the buffer (though a compressor might reuse old data without
+ * violating the tree?).
+ *
+ * Conclusion: check structs for circular references and allow
+ * everything else to unfold, provided they otherwise pass type checks.
+ *
+ * Algorithm:
+ *
+ * Depth first search of the struct reference tree. We maintain flags to
+ * find back-links. We prune sub-trees already fully analyzed by using
+ * the closed flag. This operation is O(N) since each struct member is
+ * visited once.
+ *
+ * Recursion is limited to prevent blowing the stack and to protect
+ * against abuse.
+ */
+static int analyze_struct(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_compound_type_t *type;
+ fb_member_t *member;
+ int ret = 0;
+ uint64_t size;
+ uint16_t align;
+ fb_token_t *t;
+
+ assert(ct->symbol.kind == fb_is_struct);
+
+ assert(!(ct->symbol.flags & fb_circular_open));
+ if (ct->symbol.flags & fb_circular_closed) {
+ return 0;
+ }
+ assert(!ct->order);
+ ct->symbol.flags |= fb_circular_open;
+ align = 1;
+ for (sym = ct->members; sym; sym = sym->link) {
+ type = 0;
+ if (P->nesting_level >= FLATCC_NESTING_MAX) {
+ error(P, "maximum allowed nesting level exceeded while processing struct hierarchy");
+ return -1;
+ }
+ member = (fb_member_t *)sym;
+ switch (member->type.type) {
+ case vt_fixed_array_type:
+ case vt_scalar_type:
+ t = member->type.t;
+ member->type.st = map_scalar_token_type(t);
+ size = sizeof_scalar_type(member->type.st);
+ if (size < 1) {
+ error_sym_tok(P, sym, "unexpected type", t);
+ return -1;
+ }
+ member->align = (uint16_t)size;
+ member->size = size * member->type.len;
+ break;
+ case vt_fixed_array_compound_type_ref:
+ case vt_compound_type_ref:
+ /* Enums might not be valid, but then it would be detected earlier. */
+ if (member->type.ct->symbol.kind == fb_is_enum) {
+ type = member->type.ct;
+ size = type->size;
+ member->align = (uint16_t)size;
+ member->size = member->type.len * type->size;
+ break;
+ } else if (member->type.ct->symbol.kind == fb_is_struct) {
+ type = member->type.ct;
+ if (type->symbol.flags & fb_circular_open) {
+ error_sym_2(P, sym, "circular reference to struct at", &type->symbol);
+ return -1;
+ }
+ if (!(type->symbol.flags & fb_circular_closed)) {
+ if (P->opts.hide_later_struct) {
+ error_sym_2(P, sym, "dependency on later defined struct not permitted with current settings", &type->symbol);
+ }
+ ++P->nesting_level;
+ ret = analyze_struct(P, type);
+ --P->nesting_level;
+ if (ret) {
+ return ret;
+ }
+ }
+ member->align = type->align;
+ member->size = member->type.len * type->size;
+ break;
+ } else {
+ error_sym(P, sym, "unexpected compound type for field");
+ return -1;
+ }
+ break;
+ case vt_invalid:
+ /* Old error. */
+ return -1;
+ default:
+ error_sym(P, sym, "unexpected type");
+ return -1;
+ }
+ member->offset = fb_align(ct->size, member->align);
+ if (member->offset < ct->size || member->offset + member->size < member->offset) {
+ error_sym(P, sym, "struct size overflow");
+ return -1;
+ }
+ size = member->offset + member->size;
+ if (size < ct->size || size > FLATCC_STRUCT_MAX_SIZE) {
+ error_sym(P, sym, "struct field overflows maximum allowed struct size");
+ };
+ ct->size = size;
+ /*
+ * FB spec is not very clear on this - but experimentally a
+ * force aligned member struct will force that alignment upon a
+ * containing struct if the alignment would otherwise be
+ * smaller. In otherwise, a struct is aligned to the alignment
+ * of the largest member, not just the largest scalar member
+ * (directly or indirectly).
+ */
+ if (align < member->align) {
+ align = member->align;
+ }
+ }
+ if (ct->align > 0) {
+ if (align > ct->align) {
+ error_sym(P, &ct->symbol, "'force_align' cannot be smaller than natural alignment for");
+ ct->align = align;
+ }
+ } else {
+ ct->align = align;
+ }
+ /* Add trailing padding if necessary. */
+ ct->size = fb_align(ct->size, ct->align);
+
+ if (ct->size == 0) {
+ error_sym(P, &ct->symbol, "struct cannot be empty");
+ return -1;
+ }
+
+ ct->symbol.flags |= fb_circular_closed;
+ ct->symbol.flags &= (uint16_t)~fb_circular_open;
+ ct->order = P->schema.ordered_structs;
+ P->schema.ordered_structs = ct;
+ return ret;
+}
+
+static int define_nested_table(fb_parser_t *P, fb_scope_t *local, fb_member_t *member, fb_metadata_t *m)
+{
+ fb_symbol_t *type_sym;
+
+ if (member->type.type != vt_vector_type || member->type.st != fb_ubyte) {
+ error_tok(P, m->ident, "'nested_flatbuffer' attribute requires a [ubyte] vector type");
+ return -1;
+ }
+ if (m->value.type != vt_string) {
+ /* All known attributes get automatically type checked, so just ignore. */
+ return -1;
+ }
+ type_sym = lookup_string_reference(P, local, m->value.s.s, (size_t)m->value.s.len);
+ if (!type_sym) {
+ error_tok_as_string(P, m->ident, "nested reference not found", m->value.s.s, (size_t)m->value.s.len);
+ return -1;
+ }
+ if (type_sym->kind != fb_is_table) {
+ if (!P->opts.allow_struct_root) {
+ error_tok_2(P, m->ident, "nested reference does not refer to a table", type_sym->ident);
+ return -1;
+ }
+ if (type_sym->kind != fb_is_struct) {
+ error_tok_2(P, m->ident, "nested reference does not refer to a table or a struct", type_sym->ident);
+ return -1;
+ }
+ }
+ member->nest = (fb_compound_type_t *)type_sym;
+ return 0;
+}
+
+static int process_struct(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym, *old, *type_sym;
+ fb_member_t *member;
+ fb_metadata_t *knowns[KNOWN_ATTR_COUNT], *m;
+ uint16_t allow_flags;
+ int key_count = 0, primary_count = 0, key_ok = 0;
+
+ if (ct->type.type) {
+ error_sym(P, &ct->symbol, "internal error: struct cannot have a type");
+ return -1;
+ }
+ ct->metadata_flags = process_metadata(P, ct->metadata, fb_f_force_align, knowns);
+ if ((m = knowns[fb_attr_force_align])) {
+ if (!is_valid_align(m->value.u)) {
+ error_sym(P, &ct->symbol, "'force_align' exceeds maximum permitted alignment or is not a power of 2");
+ } else {
+ /* This may still fail on natural alignment size. */
+ ct->align = (uint16_t)m->value.u;
+ }
+ }
+ for (sym = ct->members; sym; sym = sym->link) {
+ if ((old = define_fb_symbol(&ct->index, sym))) {
+ error_sym_2(P, sym, "struct field already defined here", old);
+ continue;
+ }
+ if (sym->kind != fb_is_member) {
+ error_sym(P, sym, "internal error: field type expected");
+ return -1;
+ }
+ key_ok = 1;
+ member = (fb_member_t *)sym;
+ allow_flags = 0;
+ /* Notice the difference between fb_f_ and fb_attr_ (flag vs index). */
+ if (P->opts.allow_struct_field_key) {
+ allow_flags |= fb_f_key;
+ if (P->opts.allow_primary_key) {
+ allow_flags |= fb_f_primary_key;
+ }
+ }
+ if (P->opts.allow_struct_field_deprecate) {
+ allow_flags |= fb_f_deprecated;
+ }
+ member->metadata_flags = process_metadata(P, member->metadata, allow_flags, knowns);
+ switch (member->type.type) {
+ case vt_fixed_array_type_ref:
+ key_ok = 0;
+ goto lbl_type_ref;
+ case vt_type_ref:
+lbl_type_ref:
+ type_sym = lookup_type_reference(P, ct->scope, member->type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->type.ref, "unknown type reference used with struct field", sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->type.ct = (fb_compound_type_t*)type_sym;
+ member->type.type = member->type.type == vt_fixed_array_type_ref ?
+ vt_fixed_array_compound_type_ref : vt_compound_type_ref;
+ if (type_sym->kind != fb_is_struct) {
+ if (P->opts.allow_enum_struct_field) {
+ if (type_sym->kind != fb_is_enum) {
+ error_sym_2(P, sym, "struct fields can only be scalars, structs, and enums, or arrays of these, but has type", type_sym);
+ member->type.type = vt_invalid;
+ return -1;
+ }
+ if (!P->opts.allow_enum_key) {
+ key_ok = 0;
+ break;
+ }
+ } else {
+ error_sym_2(P, sym, "struct fields can only be scalars and structs, or arrays of these, but has type", type_sym);
+ member->type.type = vt_invalid;
+ return -1;
+ }
+ } else {
+ key_ok = 0;
+ }
+ break;
+ case vt_fixed_array_string_type:
+ error_sym(P, sym, "fixed length arrays cannot have string elements");
+ member->type.type = vt_invalid;
+ return -1;
+ case vt_fixed_array_type:
+ key_ok = 0;
+ break;
+ case vt_scalar_type:
+ break;
+ default:
+ error_sym(P, sym, "struct member member can only be of struct scalar, or fixed length scalar array type");
+ return -1;
+ }
+ if (!key_ok) {
+ if (member->metadata_flags & fb_f_key) {
+ member->type.type = vt_invalid;
+ error_sym(P, sym, "key attribute now allowed for this kind of field");
+ return -1;
+ }
+ if (member->metadata_flags & fb_f_primary_key) {
+ member->type.type = vt_invalid;
+ error_sym(P, sym, "primary_key attribute now allowed for this kind of field");
+ return -1;
+ }
+ }
+ if (member->metadata_flags & fb_f_deprecated) {
+ if (member->metadata_flags & fb_f_key) {
+ error_sym(P, sym, "key attribute not allowed for deprecated struct member");
+ return -1;
+ } else if (member->metadata_flags & fb_f_primary_key) {
+ error_sym(P, sym, "primary_key attribute not allowed for deprecated struct member");
+ return -1;
+ }
+ }
+ if (member->metadata_flags & fb_f_key) {
+ if (member->metadata_flags & fb_f_primary_key) {
+ error_sym(P, sym, "primary_key attribute conflicts with key attribute");
+ member->type.type = vt_invalid;
+ return -1;
+ }
+ key_count++;
+ if (!ct->primary_key) {
+ /* First key is primary key if no primary key is given explicitly. */
+ ct->primary_key = member;
+ }
+ } else if (member->metadata_flags & fb_f_primary_key) {
+ if (primary_count++) {
+ error_sym(P, sym, "at most one struct member can have a primary_key attribute");
+ member->type.type = vt_invalid;
+ return -1;
+ }
+ key_count++;
+ /* Allow backends to treat primary key as an ordinary key. */
+ member->metadata_flags |= fb_f_key;
+ ct->primary_key = member;
+ }
+ if (member->value.type) {
+ error_sym(P, sym, "struct member member cannot have a default value");
+ continue;
+ }
+ }
+ if (key_count) {
+ ct->symbol.flags |= fb_indexed;
+ }
+ /* Set primary key flag for backends even if chosen by default. */
+ if (ct->primary_key) {
+ ct->primary_key->metadata_flags |= fb_f_primary_key;
+ }
+ if (key_count > 1 && !P->opts.allow_multiple_key_fields) {
+ error_sym(P, &ct->symbol, "table has multiple key fields, but at most one is permitted");
+ return -1;
+ }
+ return 0;
+}
+
+static fb_member_t *original_order_members(fb_parser_t *P, fb_member_t *next)
+{
+ fb_member_t *head = 0;
+ fb_member_t **tail = &head;
+
+ /* Not used for now, but in case we need error messages etc. */
+ (void)P;
+
+ while (next) {
+ *tail = next;
+ tail = &next->order;
+ next = (fb_member_t *)(((fb_symbol_t *)next)->link);
+ }
+ *tail = 0;
+ return head;
+}
+
+/*
+ * Alignment of table offset fields are generally not stored, and
+ * vectors store the element alignment for scalar types, so we
+ * detect alignment based on type also. Unions are tricky since they
+ * use a single byte type followed by an offset, but it is impractical
+ * to store these separately so we sort by the type field.
+ */
+static fb_member_t *align_order_members(fb_parser_t *P, fb_member_t *members)
+{
+ uint16_t i, j, k;
+ fb_member_t *heads[9] = {0};
+ fb_member_t **tails[9] = {0};
+ fb_member_t *next = members;
+
+ while (next) {
+ k = next->align;
+ switch (next->type.type) {
+ case vt_compound_type_ref:
+ switch (next->type.ct->symbol.kind) {
+ case fb_is_struct:
+ case fb_is_enum:
+ k = next->type.ct->align;
+ break;
+ case fb_is_union:
+ /*
+ * Unions align to their offsets because the type can
+ * always be added last in a second pass since it is 1
+ * byte.
+ */
+ k = (uint16_t)P->opts.offset_size;
+ break;
+ default:
+ k = (uint16_t)P->opts.offset_size;
+ break;
+ }
+ break;
+ case vt_vector_compound_type_ref:
+ case vt_string_type:
+ case vt_vector_type:
+ case vt_vector_string_type:
+ k = (uint16_t)P->opts.offset_size;
+ break;
+ case vt_invalid:
+ /* Just to have some sane behavior. */
+ return original_order_members(P, members);
+ default:
+ k = next->align;
+ break;
+ }
+ assert(k > 0);
+ i = 0;
+ while (k >>= 1) {
+ ++i;
+ }
+ /* Normally the largest alignment is 256, but otherwise we group them together. */
+ if (i > 7) {
+ i = 7;
+ }
+ if (!heads[i]) {
+ heads[i] = next;
+ } else {
+ *tails[i] = next;
+ }
+ tails[i] = &next->order;
+ next = (fb_member_t *)(((fb_symbol_t *)next)->link);
+ }
+ i = j = 8;
+ tails[8] = &heads[8];
+ while (j) {
+ while (i && !heads[--i]) {
+ }
+ *tails[j] = heads[i];
+ j = i;
+ }
+ return heads[8];
+}
+
+/* Temporary markers only used during assignment of field identifiers. */
+enum { unused_field = 0, normal_field, type_field };
+
+static int process_table(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ char msg_buf [100];
+ fb_symbol_t *sym, *old, *type_sym;
+ fb_member_t *member;
+ fb_metadata_t *knowns[KNOWN_ATTR_COUNT], *m;
+ int ret = 0;
+ uint64_t count = 0;
+ int need_id = 0, id_failed = 0;
+ uint64_t max_id = 0;
+ int key_ok, key_count = 0, primary_count = 0;
+ int is_union_vector, is_vector;
+ uint64_t i, j;
+ int max_id_errors = 10;
+ int allow_flags = 0;
+
+ /*
+ * This just tracks the presence of a `normal_field` or a hidden
+ * `type_field`. The size is litmited to 16-bit unsigned offsets.
+ * It is only of relevance for ithe optional `id` table field
+ * attribute.
+ */
+ uint8_t *field_marker = 0;
+ fb_symbol_t **field_index = 0;
+
+ assert(ct->symbol.kind == fb_is_table);
+ assert(!ct->type.type);
+
+ ct->metadata_flags = process_metadata(P, ct->metadata, fb_f_original_order, knowns);
+ /*
+ * `original_order` now lives as a flag, we need not consider it
+ * further until code generation.
+ */
+ for (sym = ct->members; sym; sym = sym->link) {
+ key_ok = 0;
+ type_sym = 0;
+ is_vector = 0;
+ is_union_vector = 0;
+ if ((old = define_fb_symbol(&ct->index, sym))) {
+ error_sym_2(P, sym, "table member already defined here", old);
+ continue;
+ }
+ if (sym->kind != fb_is_member) {
+ error_sym(P, sym, "internal error: member type expected");
+ return -1;
+ }
+ member = (fb_member_t *)sym;
+ if (member->type.type == vt_invalid) {
+ continue;
+ }
+ if (member->type.type == vt_scalar_type || member->type.type == vt_vector_type) {
+ member->type.st = map_scalar_token_type(member->type.t);
+ }
+ allow_flags =
+ fb_f_id | fb_f_nested_flatbuffer | fb_f_deprecated | fb_f_key |
+ fb_f_required | fb_f_hash | fb_f_base64 | fb_f_base64url | fb_f_sorted;
+
+ if (P->opts.allow_primary_key) {
+ allow_flags |= fb_f_primary_key;
+ }
+ member->metadata_flags = process_metadata(P, member->metadata, (uint16_t)allow_flags, knowns);
+ if ((m = knowns[fb_attr_nested_flatbuffer])) {
+ define_nested_table(P, ct->scope, member, m);
+ }
+ /* Note: we allow base64 and base64url with nested attribute. */
+ if ((member->metadata_flags & fb_f_base64) &&
+ (member->type.type != vt_vector_type || member->type.st != fb_ubyte)) {
+ error_sym(P, sym, "'base64' attribute is only allowed on vectors of type ubyte");
+ }
+ if ((member->metadata_flags & fb_f_base64url) &&
+ (member->type.type != vt_vector_type || member->type.st != fb_ubyte)) {
+ error_sym(P, sym, "'base64url' attribute is only allowed on vectors of type ubyte");
+ }
+ if ((member->metadata_flags & (fb_f_base64 | fb_f_base64url)) ==
+ (fb_f_base64 | fb_f_base64url)) {
+ error_sym(P, sym, "'base64' and 'base64url' attributes cannot both be set");
+ }
+ m = knowns[fb_attr_id];
+ if (m && count == 0) {
+ need_id = 1;
+ field_marker = P->tmp_field_marker;
+ memset(field_marker, 0, (size_t)P->opts.vt_max_count);
+ }
+ if (!id_failed) {
+ if (count >= P->opts.vt_max_count) {
+ error_sym(P, sym, "too many fields for vtable size");
+ id_failed = 1;
+ } else if (!need_id) {
+ member->id = (unsigned short)count;
+ }
+ ++count;
+ }
+ switch (member->type.type) {
+ case vt_scalar_type:
+ if (member->value.type == vt_null) {
+ member->value.type = vt_uint;
+ member->value.u = 0;
+ member->flags |= fb_fm_optional;
+ }
+ if (member->metadata_flags & fb_f_required) {
+ if (member->flags & fb_fm_optional) {
+ error_sym(P, sym, "'required' attribute is incompatible with optional table field (= null)");
+ } else {
+ error_sym(P, sym, "'required' attribute is redundant on scalar table field");
+ }
+ }
+ key_ok = 1;
+ if (member->value.type == vt_name_ref) {
+ if (lookup_enum_name(P, ct->scope, 0, member->value.ref, &member->value)) {
+ error_ref_sym(P, member->value.ref, "unknown name used as initializer for scalar field", sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ }
+ if (!member->value.type) {
+ /*
+ * Simplifying by ensuring we always have a default
+ * value where an initializer is possible (also goes for enum).
+ */
+ member->value.type = vt_uint;
+ member->value.u = 0;
+ }
+ if (fb_coerce_scalar_type(P, sym, member->type.st, &member->value)) {
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->size = sizeof_scalar_type(member->type.st);
+ member->align = (uint16_t)member->size;
+ break;
+ case vt_vector_type:
+ is_vector = 1;
+ member->size = sizeof_scalar_type(member->type.st);
+ member->align =(uint16_t) member->size;
+ if (member->value.type) {
+ error_sym(P, sym, "scalar vectors cannot have an initializer");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ case vt_fixed_array_type_ref:
+ case vt_fixed_array_string_type:
+ case vt_fixed_array_type:
+ error_sym(P, sym, "fixed length arrays can only be used with structs");
+ member->type.type = vt_invalid;
+ return -1;
+ case vt_string_type:
+ /* `size` or `align` not defined - these are implicit uoffset types. */
+ key_ok = P->opts.allow_string_key;
+ if (member->value.type) {
+ error_sym(P, sym, "strings cannot have an initializer");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ case vt_vector_string_type:
+ is_vector = 1;
+ /* `size` or `align` not defined - these are implicit uoffset types. */
+ if (member->value.type) {
+ error_sym(P, sym, "string vectors cannot have an initializer");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ case vt_type_ref:
+ type_sym = lookup_type_reference(P, ct->scope, member->type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->type.ref, "unknown type reference used with table field", sym);
+ member->type.type = vt_invalid;
+ /* We cannot count id's without knowing if it is a union. */
+ id_failed = 1;
+ continue;
+ }
+ switch (type_sym->kind) {
+ case fb_is_enum:
+ /*
+ * Note the enums without a 0 element requires an
+ * initializer in the schema, but that cannot happen
+ * with a null value, so in this case the value is force
+ * to 0. This is only relevant when using the `_get()`
+ * accessor instead of the `_option()`.
+ */
+ if (member->value.type == vt_null) {
+ member->value.type = vt_uint;
+ member->value.u = 0;
+ member->flags |= fb_fm_optional;
+ }
+ if (member->metadata_flags & fb_f_required) {
+ if (member->flags & fb_fm_optional) {
+ error_sym(P, sym, "'required' attribute is incompatible with optional enum table field (= null)");
+ } else {
+ error_sym(P, sym, "'required' attribute is redundant on enum table field");
+ }
+ }
+ key_ok = P->opts.allow_enum_key;
+ break;
+ case fb_is_table:
+ case fb_is_struct:
+ case fb_is_union:
+ break;
+ case fb_is_rpc_service:
+ error_sym_2(P, sym, "rpc service is not a valid table field type", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ default:
+ error_sym_2(P, sym, "internal error: unexpected field type", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->type.type = vt_compound_type_ref;
+ member->type.ct = (fb_compound_type_t*)type_sym;
+ /*
+ * Note: this information transfer won't always work because
+ * structs do not know their full size at this point so
+ * codegen must use the member->type.ct values.
+ */
+ member->size = member->type.ct->size;
+ member->align = member->type.ct->align;
+
+ if (type_sym->kind == fb_is_union && !id_failed) {
+ /* Hidden union type field. */
+ if (!need_id) {
+ member->id = (unsigned short)count;
+ }
+ ++count;
+ }
+ if (member->value.type) {
+ if (type_sym->kind != fb_is_enum) {
+ error_sym(P, sym, "non-scalar field cannot have an initializer");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ if (member->value.type == vt_name_ref) {
+ if (lookup_enum_name(P, ct->scope, member->type.ct, member->value.ref, &member->value)) {
+ error_ref_sym(P, member->value.ref, "unknown name used as initializer for enum field", sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ } else {
+ if (fb_coerce_scalar_type(P, sym, ((fb_compound_type_t *)type_sym)->type.st, &member->value)) {
+ member->type.type = vt_invalid;
+ continue;
+ }
+ /* Bitflags can have complex combinations of values, and do not nativele have a 0 value. */
+ if (P->opts.strict_enum_init && !(member->type.ct->metadata_flags & fb_f_bit_flags)
+ && !(member->flags & fb_fm_optional)) {
+ if (!is_in_value_set(&member->type.ct->value_set, &member->value)) {
+ error_sym(P, sym, "initializer does not match a defined enum value");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ }
+ }
+ } else {
+ /* Enum is the only type that cannot always default to 0. */
+ if (type_sym->kind == fb_is_enum) {
+ member->value.type = vt_uint;
+ member->value.u = 0;
+ if (fb_coerce_scalar_type(P, type_sym, ((fb_compound_type_t *)type_sym)->type.st, &member->value)) {
+ member->type.type = vt_invalid;
+ continue;
+ }
+ if (P->opts.strict_enum_init) {
+ /* TODO: consider if this error is necessary for bit_flags - flatc 2.0.0 errors on this. */
+ if (!is_in_value_set(&member->type.ct->value_set, &member->value)) {
+ error_sym_2(P, sym,
+ "enum type requires an explicit initializer because it has no 0 value", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ }
+ }
+ }
+ break;
+ case vt_vector_type_ref:
+ type_sym = lookup_type_reference(P, ct->scope, member->type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->type.ref, "unknown vector type reference used with table field", sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ switch (type_sym->kind) {
+ case fb_is_enum:
+ case fb_is_table:
+ case fb_is_struct:
+ case fb_is_union:
+ break;
+ default:
+ /* Vectors of strings are handled separately but this is irrelevant to the user. */
+ error_sym_tok(P, sym, "vectors can only hold scalars, structs, enums, strings, tables, and unions", member->type.t);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ is_vector = 1;
+ is_union_vector = type_sym->kind == fb_is_union;
+ if (member->value.type) {
+ error_sym(P, sym, "non-scalar field cannot have an initializer");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ /* Size of the vector element, not of the vector itself. */
+ member->type.type = vt_vector_compound_type_ref;
+ member->type.ct = (fb_compound_type_t*)type_sym;
+ member->size = member->type.ct->size;
+ member->align = member->type.ct->align;
+ if (type_sym->kind == fb_is_union && !id_failed) {
+ /* Hidden union type field. */
+ if (!need_id) {
+ member->id = (unsigned short)count;
+ }
+ ++count;
+ }
+ break;
+ default:
+ error_sym(P, sym, "unexpected table field type encountered");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ if (!id_failed) {
+ if (m && !need_id && !id_failed) {
+ error_tok(P, m->ident, "unexpected id attribute, must be used on all fields, or none");
+ id_failed = 1;
+ } else if (!m && need_id && !id_failed) {
+ error_sym(P, sym, "id attribute missing, must be used on all fields, or none");
+ id_failed = 1;
+ } else if (m) {
+ if (m->value.type == vt_uint) {
+ if (m->value.u >= P->opts.vt_max_count) {
+ error_sym(P, sym, "id too large to fit in vtable");
+ id_failed = 1;
+ } else {
+ member->id = (unsigned short)m->value.u;
+ if (member->id > max_id) {
+ max_id = member->id;
+ }
+ }
+ } else if (m->value.type == vt_int) {
+ error_tok(P, m->ident, "id attribute cannot be negative");
+ id_failed = 1;
+ } else {
+ error_tok(P, m->ident, "unexpecte id attribute type");
+ id_failed = 1;
+ }
+ }
+ }
+ if (need_id && !id_failed) {
+ if (field_marker[member->id] == type_field) {
+ error_tok(P, m->ident, "id attribute value conflicts with a hidden type field");
+ id_failed = normal_field;
+ } else if (field_marker[member->id]) {
+ error_tok(P, m->ident, "id attribute value conflicts with another field");
+ } else {
+ field_marker[member->id] = normal_field;
+ }
+ if (!id_failed && type_sym && type_sym->kind == fb_is_union) {
+ if (member->id <= 1) {
+ error_tok(P, m->ident, is_union_vector ?
+ "id attribute value should be larger to accommodate hidden union vector type field" :
+ "id attribute value should be larger to accommodate hidden union type field");
+ id_failed = 1;
+ } else if (field_marker[member->id - 1] == type_field) {
+ error_tok(P, m->ident, is_union_vector ?
+ "hidden union vector type field below attribute id value conflicts with another hidden type field" :
+ "hidden union type field below attribute id value conflicts with another hidden type field");
+ id_failed = 1;
+ } else if (field_marker[member->id - 1]) {
+ error_tok(P, m->ident, is_union_vector ?
+ "hidden union vector type field below attribute id value conflicts with another field" :
+ "hidden union type field below attribute id value conflicts with another field");
+ id_failed = 1;
+ } else {
+ field_marker[member->id - 1] = type_field;
+ }
+ }
+ }
+ if (member->metadata_flags & fb_f_deprecated) {
+ if (member->metadata_flags & fb_f_key) {
+ error_sym(P, sym, "key attribute not allowed for deprecated field");
+ return -1;
+ } else if (member->metadata_flags & fb_f_primary_key) {
+ error_sym(P, sym, "primary_key attribute not allowed for deprecated field");
+ return -1;
+ }
+ }
+ if (member->metadata_flags & fb_f_key) {
+ ++key_count;
+ if (!key_ok) {
+ error_sym(P, sym, "key attribute not allowed for this kind of field");
+ member->type.type = vt_invalid;
+ } else if (member->metadata_flags & fb_f_primary_key) {
+ error_sym(P, sym, "primary_key attribute conflicts with key attribute");
+ member->type.type = vt_invalid;
+ } else if (!ct->primary_key ||
+ (primary_count == 0 && ct->primary_key->id > member->id)) {
+ /*
+ * Set key field with lowest id as default primary key
+ * unless a key field also has a primary attribute.
+ */
+ ct->primary_key = member;
+ }
+ } else if (member->metadata_flags & fb_f_primary_key) {
+ if (member->metadata_flags & fb_f_primary_key) {
+ if (primary_count++) {
+ error_sym(P, sym, "at most one field can have a primary_key attribute");
+ member->type.type = vt_invalid;
+ continue;
+ } else {
+ ct->primary_key = member;
+ }
+ }
+ key_count++;
+ /* Allow backends to treat primary key as an ordinary key. */
+ member->metadata_flags |= fb_f_key;
+ }
+ if (member->metadata_flags & fb_f_sorted) {
+ if (is_union_vector) {
+ error_sym(P, sym, "sorted attribute not allowed for union vectors");
+ member->type.type = vt_invalid;
+ return -1;
+ } else if (!is_vector) {
+ error_sym(P, sym, "sorted attribute only allowed for vectors");
+ member->type.type = vt_invalid;
+ return -1;
+ }
+ /*
+ * A subsequent call to validate_table_attr will verify that a
+ * sorted vector of tables or structs have a defined field
+ * key. This cannot be done before all types have been
+ * processed.
+ */
+ }
+ }
+ if (!id_failed) {
+ ct->count = count;
+ }
+ if (!id_failed && need_id) {
+ if (count && max_id >= count) {
+ for (i = 0; i < max_id; ++i) {
+ if (field_marker[i] == 0) {
+ if (!max_id_errors--) {
+ error_sym(P, &ct->symbol, "... more id's missing");
+ break;
+ }
+ sprintf(msg_buf, "id range not consequtive from 0, missing id: %"PRIu64"", i);
+ error_sym(P, &ct->symbol, msg_buf);
+ }
+ }
+ id_failed = 1;
+ }
+ }
+ /* Order in which data is ordered in binary buffer. */
+ if (ct->metadata_flags & fb_f_original_order) {
+ ct->ordered_members = original_order_members(P, (fb_member_t *)ct->members);
+ } else {
+ /* Size efficient ordering. */
+ ct->ordered_members = align_order_members(P, (fb_member_t *)ct->members);
+ }
+ if (!id_failed && need_id && count > 0) {
+ field_index = P->tmp_field_index;
+ memset(field_index, 0, sizeof(field_index[0]) * (size_t)P->opts.vt_max_count);
+ /*
+ * Reorder by id so table constructor arguments in code
+ * generators always use same ordering across versions.
+ */
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ field_index[member->id] = sym;
+ }
+ j = 0;
+ if (field_index[0] == 0) {
+ j = 1; /* Adjust for union type. */
+ }
+ ct->members = field_index[j];
+ for (i = j + 1; i <= max_id; ++i) {
+ if (field_index[i] == 0) ++i; /* Adjust for union type. */
+ field_index[j]->link = field_index[i];
+ j = i;
+ }
+ field_index[max_id]->link = 0;
+ }
+ if (key_count) {
+ ct->symbol.flags |= fb_indexed;
+ }
+ /* Set primary key flag for backends even if chosen by default. */
+ if (ct->primary_key) {
+ ct->primary_key->metadata_flags |= fb_f_primary_key;
+ }
+ if (key_count > 1 && !P->opts.allow_multiple_key_fields) {
+ error_sym(P, &ct->symbol, "table has multiple key fields, but at most one is permitted");
+ ret = -1;
+ }
+ if (id_failed) {
+ ret = -1;
+ }
+ return ret;
+}
+
+/*
+ * Post processing of process_table because some information is only
+ * available when all types have been processed.
+ */
+static int validate_table_attr(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym;
+ fb_member_t *member;
+
+ for (sym = ct->members; sym; sym = sym->link) {
+ member = (fb_member_t *)sym;
+ if (member->metadata_flags & fb_f_deprecated) {
+ continue;
+ }
+
+ if (member->type.type == vt_vector_compound_type_ref &&
+ member->metadata_flags & fb_f_sorted) {
+ switch (member->type.ct->symbol.kind) {
+ case fb_is_table:
+ if (!member->type.ct->primary_key) {
+ error_sym(P, sym, "sorted table vector only valid when table has a key field");
+ return -1;
+ }
+ break;
+ case fb_is_struct:
+ if (!member->type.ct->primary_key) {
+ error_sym(P, sym, "sorted struct vector only valid when struct has a key field");
+ return -1;
+ }
+ break;
+ /* Other cases already handled in process_table. */
+ default:
+ continue;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * The parser already makes sure we have exactly one request type,
+ * one response type, and no initializer.
+ *
+ * We are a bit heavy on flagging attributes because their behavior
+ * isn't really specified at this point.
+ */
+static int process_rpc_service(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym, *old, *type_sym;
+ fb_member_t *member;
+#if FLATCC_ALLOW_RPC_SERVICE_ATTRIBUTES || FLATCC_ALLOW_RPC_METHOD_ATTRIBUTES
+ fb_metadata_t *knowns[KNOWN_ATTR_COUNT];
+#endif
+
+ assert(ct->symbol.kind == fb_is_rpc_service);
+ assert(!ct->type.type);
+
+ /*
+ * Deprecated is defined for fields - but it is unclear if this also
+ * covers rpc services. Anyway, we accept it since it may be useful,
+ * and does not harm.
+ */
+#if FLATCC_ALLOW_RPC_SERVICE_ATTRIBUTES
+ /* But we have no known attributes to support. */
+ ct->metadata_flags = process_metadata(P, ct->metadata, 0, knowns);
+#else
+ if (ct->metadata) {
+ error_sym(P, &ct->symbol, "rpc services cannot have attributes");
+ /* Non-fatal. */
+ }
+#endif
+
+ /*
+ * `original_order` now lives as a flag, we need not consider it
+ * further until code generation.
+ */
+ for (sym = ct->members; sym; sym = sym->link) {
+ type_sym = 0;
+ if ((old = define_fb_symbol(&ct->index, sym))) {
+ error_sym_2(P, sym, "rpc method already defined here", old);
+ continue;
+ }
+ if (sym->kind != fb_is_member) {
+ error_sym(P, sym, "internal error: member type expected");
+ return -1;
+ }
+ member = (fb_member_t *)sym;
+ if (member->value.type) {
+ error_sym(P, sym, "internal error: initializer should have been rejected by parser");
+ }
+ if (member->type.type == vt_invalid) {
+ continue;
+ }
+ if (member->type.type != vt_type_ref) {
+ error_sym(P, sym, "internal error: request type expected to be a type reference");
+ }
+ type_sym = lookup_type_reference(P, ct->scope, member->req_type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->req_type.ref, "unknown type reference used with rpc request type", sym);
+ member->type.type = vt_invalid;
+ continue;
+ } else {
+ if (type_sym->kind != fb_is_table) {
+ error_sym_2(P, sym, "rpc request type must reference a table, defined here", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->req_type.type = vt_compound_type_ref;
+ member->req_type.ct = (fb_compound_type_t*)type_sym;
+ }
+ type_sym = lookup_type_reference(P, ct->scope, member->type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->type.ref, "unknown type reference used with rpc response type", sym);
+ member->type.type = vt_invalid;
+ continue;
+ } else {
+ if (type_sym->kind != fb_is_table) {
+ error_sym_2(P, sym, "rpc response type must reference a table, defined here", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->type.type = vt_compound_type_ref;
+ member->type.ct = (fb_compound_type_t*)type_sym;
+ /* Symbols have no size. */
+ member->size = 0;
+ }
+#if FLATCC_ALLOW_RPC_METHOD_ATTRIBUTES
+#if FLATCC_ALLOW_DEPRECATED_RPC_METHOD
+ member->metadata_flags = process_metadata(P, member->metadata, fb_f_deprecated, knowns);
+#else
+ member->metadata_flags = process_metadata(P, member->metadata, 0, knowns);
+#endif
+#else
+ if (member->metadata) {
+ error_sym(P, sym, "rpc methods cannot have attributes");
+ /* Non-fatal. */
+ continue;
+ }
+#endif
+ }
+ return 0;
+}
+
+static int process_enum(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ fb_symbol_t *sym, *old, *type_sym;
+ fb_member_t *member;
+ fb_metadata_t *knowns[KNOWN_ATTR_COUNT];
+ fb_value_t index = { { { 0 } }, 0, 0 };
+ fb_value_t old_index;
+ int first = 1;
+ int bit_flags = 0;
+ int is_union = ct->symbol.kind == fb_is_union;
+
+ if (!is_union) {
+ assert(ct->symbol.kind == fb_is_enum);
+ if (!ct->type.type) {
+ ct->type.type = vt_invalid;
+ error_sym(P, &ct->symbol, "enum must have a type");
+ return -1;
+ }
+ if (ct->type.type == vt_missing) {
+ /*
+ * Enums normally require a type, but the parser may have an
+ * option to allow missing type, and then we provide a
+ * sensible default.
+ */
+ ct->type.st = fb_int;
+ ct->type.type = vt_scalar_type;
+ } else if (ct->type.type == vt_scalar_type) {
+ ct->type.st = map_scalar_token_type(ct->type.t);
+ } else {
+ /* Spec does not mention boolean type in enum, but we allow it. */
+ error_sym(P, &ct->symbol, "enum type must be a scalar integral type or bool");
+ return -1;
+ }
+ ct->size = sizeof_scalar_type(ct->type.st);
+ ct->align = (uint16_t)ct->size;
+ } else {
+ if (ct->type.type) {
+ error_sym(P, &ct->symbol, "unions cannot have a type, they are always enumerated as ubyte");
+ return -1;
+ }
+ /*
+ * We preprocess unions as enums to get the value assignments.
+ * The type field is not documented, but generated output from
+ * flatc suggests ubyte. We use a an injected token to represent
+ * the ubyte type so we do not have to hardcode elsewhere.
+ */
+ ct->type.type = vt_scalar_type;
+ ct->type.st = fb_ubyte;
+ /*
+ * The union field use the type field and not the offset field
+ * to define its size because type.type is scalar.
+ */
+ ct->size = sizeof_scalar_type(fb_ubyte);
+ ct->align = (uint16_t)ct->size;
+ }
+
+ ct->metadata_flags = process_metadata(P, ct->metadata, fb_f_bit_flags, knowns);
+ if (ct->metadata_flags & fb_f_bit_flags) {
+ bit_flags = 1;
+ index.type = vt_uint;
+ index.u = 0;
+ }
+
+ if (ct->type.st == fb_bool) {
+ index.b = 0;
+ index.type = vt_bool;
+ } else {
+ index.i = 0;
+ index.type = vt_int;
+ if (fb_coerce_scalar_type(P, (fb_symbol_t *)ct, ct->type.st, &index)) {
+ error(P, "internal error: unexpected conversion failure on enum 0 index");
+ return -1;
+ }
+ }
+ old_index = index;
+
+ for (sym = ct->members; sym; sym = sym->link, first = 0) {
+ member = (fb_member_t *)sym;
+ if ((old = define_fb_symbol(&ct->index, sym))) {
+ if (old->ident == &P->t_none) {
+ /*
+ * Parser injects `NONE` as the first union member and
+ * it therefore gets index 0. Additional use of NONE
+ * will fail.
+ */
+ error_sym(P, sym, "'NONE' is a predefined value");
+ member->type.type = vt_invalid;
+ continue;
+ }
+ error_sym_2(P, sym, "value already defined here", old);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ if (sym->kind != fb_is_member) {
+ error_sym(P, sym, "internal error: enum value type expected");
+ return -1;
+ }
+ /* Enum / union values cannot have metadata. */
+ assert(member->metadata == 0);
+ if (is_union) {
+ if (member->symbol.ident == &P->t_none) {
+ /* Handle implicit NONE specially. */
+ member->type.type = vt_missing;
+ } else if (member->type.type == vt_string_type) {
+ member->size = 0;
+ } else if (member->type.type != vt_type_ref) {
+ if (member->type.type != vt_invalid) {
+ error_sym(P, sym, "union member type must be string or a reference to a table or a struct");
+ member->type.type = vt_invalid;
+ }
+ continue;
+ } else {
+ type_sym = lookup_type_reference(P, ct->scope, member->type.ref);
+ if (!type_sym) {
+ error_ref_sym(P, member->type.ref, "unknown type reference used with union member", sym);
+ member->type.type = vt_invalid;
+ continue;
+ } else {
+ if (type_sym->kind != fb_is_table && type_sym->kind != fb_is_struct) {
+ error_sym_2(P, sym, "union member type reference must be a table or a struct, defined here", type_sym);
+ member->type.type = vt_invalid;
+ continue;
+ }
+ member->type.type = vt_compound_type_ref;
+ member->type.ct = (fb_compound_type_t*)type_sym;
+ /* Symbols have no size. */
+ member->size = 0;
+ }
+ }
+ }
+ if (!member->value.type && !first) {
+ if (index.type == vt_uint) {
+ if (ct->type.st == fb_long && index.u == UINT64_MAX) {
+ /* Not captured by range check. */
+ error_sym(P, sym, "64-bit unsigned int overflow");
+ }
+ index.u = index.u + 1;
+ } else if (index.type == vt_int && !first) {
+ if (ct->type.st == fb_long && index.i == INT64_MAX) {
+ /* Not captured by range check. */
+ error_sym(P, sym, "64-bit signed int overflow");
+ }
+ index.i = index.i + 1;
+ } else if (index.type == vt_bool && !first) {
+ if (index.b == 1) {
+ error_sym(P, sym, "boolean overflow: cannot enumerate past true");
+ }
+ index.b = 1;
+ }
+ }
+ if (bit_flags) {
+ if (member->value.type) {
+ if (member->value.type != vt_uint) {
+ error_sym(P, sym, "enum value must be unsigned int when used with 'bit_flags'");
+ return -1;
+ } else {
+ index = member->value;
+ }
+ }
+ if (index.u >= sizeof_scalar_type(ct->type.st) * 8) {
+ error_sym(P, sym, "enum value out of range when used with 'bit_flags'");
+ return -1;
+ }
+ member->value.u = UINT64_C(1) << index.u;
+ member->value.type = vt_uint;
+ if (fb_coerce_scalar_type(P, sym, ct->type.st, &member->value)) {
+ /* E.g. enumval = 15 causes signed overflow with short. */
+ error_sym(P, sym, "enum value out of range when used with 'bit_flags'");
+ return -1;
+ }
+ } else {
+ if (member->value.type) {
+ index = member->value;
+ }
+ /*
+ * Captures errors in user assigned values. Also captures
+ * overflow on auto-increment on all types except maximum
+ * representation size, i.e. long or ulong which we handled
+ * above.
+ */
+ if (fb_coerce_scalar_type(P, sym, ct->type.st, &index)) {
+ return -1;
+ }
+ member->value = index;
+ }
+ if (!first && P->opts.ascending_enum) {
+ /* Without ascending enum we also allow duplicate values (but not names). */
+ if ((index.type == vt_uint && index.u <= old_index.u) ||
+ (index.type == vt_int && index.i <= old_index.i)) {
+ if (is_union && old_index.u == 0) {
+ /*
+ * The user explicitly assigned zero, or less, to the first
+ * element (here second element after parser injecting
+ * the NONE element).
+ */
+ error_sym(P, sym, "union values must be positive, 0 is reserved for implicit 'NONE'");
+ member->value.type = vt_invalid;
+ return -1;
+ }
+ error_sym(P, sym, "enum values must be in ascending order");
+ member->value.type = vt_invalid;
+ return -1;
+ }
+ if (index.type == vt_bool && index.b <= old_index.b) {
+ error_sym(P, sym, "enum of type bool can only enumerate from false (0) to true (1)");
+ member->value.type = vt_invalid;
+ return -1;
+ }
+ }
+ old_index = index;
+ if (add_to_value_set(&ct->value_set, &member->value)) {
+ if (is_union) {
+ error_sym(P, sym, "union members require unique positive values (0 being reserved for 'NONE'");
+ member->value.type = vt_invalid;
+ return -1;
+ } else {
+ /*
+ * With ascending enums this won't happen, but
+ * otherwise flag secondary values so we can remove them
+ * from inverse name mappings in code gen.
+ */
+ member->symbol.flags |= fb_duplicate;
+ }
+ }
+ if (member->metadata) {
+ error_sym(P, sym, "enum values cannot have attributes");
+ /* Non-fatal. */
+ continue;
+ }
+ }
+ return 0;
+}
+
+static inline int process_union(fb_parser_t *P, fb_compound_type_t *ct)
+{
+ return process_enum(P, ct);
+}
+
+int fb_build_schema(fb_parser_t *P)
+{
+ fb_schema_t *S = &P->schema;
+ fb_symbol_t *sym, *old_sym;
+ fb_name_t *old_name;
+ fb_compound_type_t *ct;
+ fb_attribute_t *a;
+
+ /* Make sure self is visible at this point in time. */
+ assert(ptr_set_exists(&P->schema.visible_schema, &P->schema));
+ for (sym = S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ case fb_is_enum:
+ case fb_is_union:
+ case fb_is_struct:
+ case fb_is_rpc_service:
+ ct = (fb_compound_type_t *)sym;
+ set_type_hash(ct);
+ ct->schema = &P->schema;
+ if (ct->scope && (old_sym = define_fb_symbol(&ct->scope->symbol_index, sym))) {
+ error_sym_2(P, sym, "symbol already defined here", old_sym);
+ }
+ }
+ }
+
+ /*
+ * Known attributes will be pre-defined if not provided by the
+ * user. After that point, all attribute references must be
+ * defined.
+ */
+ for (a = (fb_attribute_t *)S->attributes; a; a = (fb_attribute_t *)a->name.link) {
+ if ((old_name = define_fb_name(&S->root_schema->attribute_index, &a->name))) {
+ /*
+ * Allow attributes to be defined multiple times, including
+ * known attributes.
+ */
+#if 0
+ error_name(P, &a->name, "attribute already defined");
+#endif
+ }
+ }
+ install_known_attributes(P);
+
+ if (!P->opts.hide_later_enum) {
+ for (sym = S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_enum:
+ ct = (fb_compound_type_t *)sym;
+ if (process_enum(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ default:
+ continue;
+ }
+ }
+ }
+
+ /*
+ * Resolve type references both earlier and later than point of
+ * reference. This also supports recursion for tables and unions.
+ */
+ for (sym = S->symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ ct = (fb_compound_type_t *)sym;
+ if (process_struct(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ case fb_is_table:
+ /* Handle after structs and enums. */
+ continue;
+ case fb_is_rpc_service:
+ /*
+ * Also handle rpc_service later like tables, just in case
+ * we allow non-table types in request/response type.
+ */
+ continue;
+ case fb_is_enum:
+ if (P->opts.hide_later_enum) {
+ ct = (fb_compound_type_t *)sym;
+ if (process_enum(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ }
+ break;
+ case fb_is_union:
+ ct = (fb_compound_type_t *)sym;
+ if (process_union(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ default:
+ error_sym(P, sym, "internal error: unexpected symbol at schema level");
+ return -1;
+ }
+ }
+ for (sym = P->schema.symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_struct:
+ /*
+ * Structs need two stages, first process symbols, then
+ * analyze for size, alignment, and circular references.
+ */
+ ct = (fb_compound_type_t *)sym;
+ if (ct->type.type != vt_invalid && analyze_struct(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ default:
+ continue;
+ }
+ }
+ for (sym = P->schema.symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ ct = (fb_compound_type_t *)sym;
+ /* Only now is the full struct size available. */
+ if (ct->type.type != vt_invalid && process_table(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ break;
+ case fb_is_rpc_service:
+ ct = (fb_compound_type_t *)sym;
+ /* Only now is the full struct size available. */
+ if (ct->type.type != vt_invalid && process_rpc_service(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ }
+ }
+ for (sym = P->schema.symbols; sym; sym = sym->link) {
+ switch (sym->kind) {
+ case fb_is_table:
+ ct = (fb_compound_type_t *)sym;
+ /*
+ * Some table attributes depend on attributes on members and
+ * therefore can only be validated after procesing.
+ */
+ if (ct->type.type != vt_invalid && validate_table_attr(P, ct)) {
+ ct->type.type = vt_invalid;
+ continue;
+ }
+ }
+ }
+ revert_order(&P->schema.ordered_structs);
+ if (!S->root_type.name) {
+ if (P->opts.require_root_type) {
+ error(P, "root type not declared");
+ }
+ } else {
+ sym = S->root_type.type = lookup_type_reference(P,
+ S->root_type.scope, S->root_type.name);
+ if (!sym) {
+ error_ref(P, S->root_type.name, "root type not found");
+ } else if (P->opts.allow_struct_root) {
+ if (sym->kind != fb_is_struct && sym->kind != fb_is_table) {
+ error_ref(P, S->root_type.name, "root type must be a struct or a table");
+ } else {
+ S->root_type.type = sym;
+ }
+ } else {
+ if (sym->kind != fb_is_table) {
+ error_ref(P, S->root_type.name, "root type must be a table");
+ } else {
+ S->root_type.type = sym;
+ }
+ }
+ S->root_type.name = 0;
+ }
+ P->has_schema = !P->failed;
+ return P->failed;
+}
diff --git a/flatcc/src/compiler/semantics.h b/flatcc/src/compiler/semantics.h
new file mode 100644
index 0000000..f4b0ec3
--- /dev/null
+++ b/flatcc/src/compiler/semantics.h
@@ -0,0 +1,12 @@
+#ifndef SCHEMA_H
+#define SCHEMA_H
+
+#include "parser.h"
+
+int __flatcc_fb_build_schema(fb_parser_t *P);
+#define fb_build_schema __flatcc_fb_build_schema
+
+
+fb_scope_t *fb_find_scope(fb_schema_t *S, fb_ref_t *name);
+
+#endif /* SCHEMA_H */
diff --git a/flatcc/src/compiler/symbols.h b/flatcc/src/compiler/symbols.h
new file mode 100644
index 0000000..143a785
--- /dev/null
+++ b/flatcc/src/compiler/symbols.h
@@ -0,0 +1,457 @@
+/* Flatbuffers parser attributes and symbols. */
+
+#ifndef SYMBOLS_H
+#define SYMBOLS_H
+
+#include <stdint.h>
+
+#include "config.h"
+#include "lex/tokens.h"
+#include "hash/hash_table.h"
+#include "hash/ptr_set.h"
+
+typedef struct fb_token fb_token_t;
+typedef struct fb_string fb_string_t;
+typedef struct fb_value fb_value_t;
+typedef struct fb_symbol fb_symbol_t;
+
+typedef struct fb_metadata fb_metadata_t;
+
+typedef struct fb_name fb_name_t;
+typedef fb_symbol_t fb_namespace_t;
+typedef fb_symbol_t fb_ref_t;
+/* Doc is not strictly a symbol, just a chained token list, but close enough. */
+typedef fb_symbol_t fb_doc_t;
+typedef fb_name_t fb_include_t;
+typedef struct fb_attribute fb_attribute_t;
+
+typedef struct fb_member fb_member_t;
+typedef struct fb_compound_type fb_compound_type_t;
+
+typedef struct fb_scope fb_scope_t;
+typedef struct fb_root_schema fb_root_schema_t;
+typedef struct fb_root_type fb_root_type_t;
+typedef struct fb_schema fb_schema_t;
+
+enum {
+ tok_kw_base = LEX_TOK_KW_BASE,
+ tok_kw_bool,
+ tok_kw_byte,
+ tok_kw_char,
+ tok_kw_enum,
+ tok_kw_float32,
+ tok_kw_float64,
+ tok_kw_int,
+ tok_kw_int8,
+ tok_kw_int16,
+ tok_kw_int32,
+ tok_kw_int64,
+ tok_kw_long,
+ tok_kw_null,
+ tok_kw_true,
+ tok_kw_uint,
+ tok_kw_false,
+ tok_kw_float,
+ tok_kw_short,
+ tok_kw_table,
+ tok_kw_ubyte,
+ tok_kw_uint8,
+ tok_kw_uint16,
+ tok_kw_uint32,
+ tok_kw_uint64,
+ tok_kw_ulong,
+ tok_kw_union,
+ tok_kw_double,
+ tok_kw_string,
+ tok_kw_struct,
+ tok_kw_ushort,
+ tok_kw_include,
+ tok_kw_attribute,
+ tok_kw_namespace,
+ tok_kw_root_type,
+ tok_kw_rpc_service,
+ tok_kw_file_extension,
+ tok_kw_file_identifier,
+ LEX_TOK_KW_END,
+ /* Pseudo keywords. */
+ tok_kw_doc_comment
+};
+
+struct fb_token {
+ const char *text;
+ long len;
+ long linenum;
+ long pos;
+ long id;
+};
+
+enum fb_scalar_type {
+ fb_missing_type = 0,
+ fb_ulong,
+ fb_uint,
+ fb_ushort,
+ fb_ubyte,
+ fb_bool,
+ fb_long,
+ fb_int,
+ fb_short,
+ fb_byte,
+ fb_double,
+ fb_float,
+ fb_char,
+};
+
+typedef enum fb_scalar_type fb_scalar_type_t;
+
+static inline size_t sizeof_scalar_type(fb_scalar_type_t st)
+{
+ static const size_t scalar_type_size[] = {
+ 0, 8, 4, 2, 1, 1, 8, 4, 2, 1, 8, 4, 1
+ };
+
+ return scalar_type_size[st];
+}
+
+enum fb_value_type {
+ vt_missing = 0,
+ vt_invalid = 1,
+ vt_null,
+ vt_string,
+ vt_float,
+ vt_int,
+ vt_uint,
+ vt_bool,
+ vt_vector_type,
+ vt_scalar_type,
+ vt_vector_string_type,
+ vt_string_type,
+ vt_vector_type_ref,
+ vt_type_ref,
+ vt_name_ref,
+ vt_compound_type_ref,
+ vt_vector_compound_type_ref,
+ vt_fixed_array_type,
+ vt_fixed_array_type_ref,
+ vt_fixed_array_string_type,
+ vt_fixed_array_compound_type_ref
+};
+
+struct fb_string {
+ char *s;
+ /* printf statements relies on this being int. */
+ int len;
+};
+
+struct fb_value {
+ union {
+ fb_string_t s;
+ double f;
+ int64_t i;
+ uint64_t u;
+ uint8_t b;
+ fb_token_t *t;
+ fb_compound_type_t *ct;
+ fb_scalar_type_t st;
+ fb_ref_t *ref;
+ };
+ unsigned short type;
+ uint32_t len;
+};
+
+enum fb_kind {
+ fb_is_table,
+ fb_is_struct,
+ fb_is_rpc_service,
+ fb_is_enum,
+ fb_is_union,
+ fb_is_member
+};
+
+/*
+ * Used for white, gray, black graph coloring while detecting circular
+ * references.
+ */
+enum fb_symbol_flags {
+ fb_circular_open = 1,
+ fb_circular_closed = 2,
+ fb_duplicate = 4,
+ fb_indexed = 8,
+};
+
+enum fb_member_flags {
+ fb_fm_optional = 1
+};
+
+/*
+ * We keep the link first in all structs so that we can use a
+ * generic list reverse function after all symbols have been pushed
+ * within a scope.
+ */
+struct fb_symbol {
+ fb_symbol_t *link;
+ fb_token_t *ident;
+ uint16_t kind;
+ uint16_t flags;
+};
+
+struct fb_name {
+ fb_name_t *link;
+ fb_value_t name;
+};
+
+#define fb_name_table __flatcc_fb_name_table
+#define fb_value_set __flatcc_fb_value_set
+#define fb_symbol_table __flatcc_fb_symbol_table
+#define fb_scope_table __flatcc_fb_scope_table
+
+DECLARE_HASH_TABLE(fb_name_table, fb_name_t *)
+DECLARE_HASH_TABLE(fb_schema_table, fb_schema_t *)
+DECLARE_HASH_TABLE(fb_value_set, fb_value_t *)
+DECLARE_HASH_TABLE(fb_symbol_table, fb_symbol_t *)
+DECLARE_HASH_TABLE(fb_scope_table, fb_scope_t *)
+
+struct fb_member {
+ fb_symbol_t symbol;
+ /* Struct or table field type, or method response type. */
+ fb_value_t type;
+ /* Method request type only used for methods. */
+ fb_value_t req_type;
+ fb_value_t value;
+ fb_metadata_t *metadata;
+ fb_doc_t *doc;
+ uint16_t metadata_flags;
+ /*
+ * `align`, `offset` are for structs only. 64-bit allows for
+ * dynamically configured 64-bit file offsets. Align is restricted to
+ * at most 256 and must be a power of 2.
+ */
+ uint16_t align;
+ uint16_t flags;
+ uint64_t offset;
+ uint64_t size;
+
+ /* `id` is for table fields only. */
+ uint64_t id;
+ /*
+ * Resolved `nested_flatbuffer` attribute type. Always a table if
+ * set, and only on struct and table fields.
+ */
+ fb_compound_type_t *nest;
+ /* Used to generate table fields in sorted order. */
+ fb_member_t *order;
+
+ /*
+ * Use by code generators. Only valid during export and may hold
+ * garbage from a prevous export.
+ */
+ size_t export_index;
+};
+
+struct fb_metadata {
+ fb_metadata_t *link;
+ fb_token_t *ident;
+ fb_value_t value;
+};
+
+struct fb_compound_type {
+ fb_symbol_t symbol;
+ /* `scope` may span multiple input files, but has a unique namespace. */
+ fb_scope_t *scope;
+ /* Identifies the the schema the symbol belongs. */
+ fb_schema_t *schema;
+ fb_symbol_t *members;
+ fb_member_t *ordered_members;
+ fb_member_t *primary_key;
+ fb_metadata_t *metadata;
+ fb_doc_t *doc;
+ fb_value_t type;
+ fb_symbol_table_t index;
+ /* Only for enums. */
+ fb_value_set_t value_set;
+ /* FNV-1a 32 bit hash of fully qualified name, accidental 0 maps to hash(""). */
+ uint32_t type_hash;
+ uint16_t metadata_flags;
+ /* `count` is for tables only. */
+ uint64_t count;
+ /* `align`, `size` is for structs only. */
+ uint16_t align;
+ uint64_t size;
+ /* Sort structs with forward references. */
+ fb_compound_type_t *order;
+ /*
+ * Use by code generators. Only valid during export and may hold
+ * garbage from a previous export.
+ */
+ size_t export_index;
+};
+
+enum fb_known_attributes {
+ fb_attr_unknown = 0,
+ fb_attr_id = 1,
+ fb_attr_deprecated = 2,
+ fb_attr_original_order = 3,
+ fb_attr_force_align = 4,
+ fb_attr_bit_flags = 5,
+ fb_attr_nested_flatbuffer = 6,
+ fb_attr_key = 7,
+ fb_attr_required = 8,
+ fb_attr_hash = 9,
+ fb_attr_base64 = 10,
+ fb_attr_base64url = 11,
+ fb_attr_primary_key = 12,
+ fb_attr_sorted = 13,
+ KNOWN_ATTR_COUNT
+};
+
+enum fb_known_attribute_flags {
+ fb_f_unknown = 1 << fb_attr_unknown,
+ fb_f_id = 1 << fb_attr_id,
+ fb_f_deprecated = 1 << fb_attr_deprecated,
+ fb_f_original_order = 1 << fb_attr_original_order,
+ fb_f_force_align = 1 << fb_attr_force_align,
+ fb_f_bit_flags = 1 << fb_attr_bit_flags,
+ fb_f_nested_flatbuffer = 1 << fb_attr_nested_flatbuffer,
+ fb_f_key = 1 << fb_attr_key,
+ fb_f_required = 1 << fb_attr_required,
+ fb_f_hash = 1 << fb_attr_hash,
+ fb_f_base64 = 1 << fb_attr_base64,
+ fb_f_base64url = 1 << fb_attr_base64url,
+ fb_f_primary_key = 1 << fb_attr_primary_key,
+ fb_f_sorted = 1 << fb_attr_sorted,
+};
+
+struct fb_attribute {
+ fb_name_t name;
+ unsigned int known;
+};
+
+struct fb_scope {
+ fb_ref_t *name;
+ fb_symbol_table_t symbol_index;
+ fb_string_t prefix;
+};
+
+struct fb_root_schema {
+ fb_scope_table_t scope_index;
+ fb_name_table_t attribute_index;
+ fb_schema_table_t include_index;
+ int include_count;
+ int include_depth;
+ size_t total_source_size;
+};
+
+struct fb_root_type {
+ /* Root decl. before symbol is visible. */
+ fb_ref_t *name;
+ /* Resolved symbol. */
+ fb_symbol_t *type;
+ fb_scope_t *scope;
+};
+
+/*
+ * We store the parsed structure as token references. Tokens are stored
+ * in a single array pointing into the source buffer.
+ *
+ * Strings may contain multiple tokens when holding control characters
+ * and line breaks, but for our purposes the first string part is
+ * sufficient.
+ */
+
+struct fb_schema {
+ fb_include_t *includes;
+ fb_name_t *attributes;
+ fb_value_t file_identifier;
+ fb_value_t file_extension;
+ fb_symbol_t *symbols;
+ /* Topologically sorted structs. */
+ fb_compound_type_t *ordered_structs;
+ fb_root_type_t root_type;
+ fb_root_schema_t *root_schema;
+ /* Only used if schema is root. */
+ fb_root_schema_t root_schema_instance;
+
+ /* An optional scope prefix for generated code. */
+ fb_string_t prefix;
+
+ /* The basenameup in a format that can be index. */
+ fb_name_t name;
+
+ /* These are allocated strings that must be freed. */
+
+ /* Name of schema being processed without path or default extension. */
+ char *basename;
+ /* Uppercase basename for codegen and for case insenstive file inclusion check. */
+ char *basenameup;
+ /* Basename with extension. */
+ char *errorname;
+
+ /*
+ * The dependency schemas visible to this schema (includes self).
+ * Compound symbols have a link to schema which can be checked
+ * against this set to see if the symbol is visible in this
+ * conctext.
+ */
+ ptr_set_t visible_schema;
+};
+
+/*
+ * Helpers to ensure a symbol is actually visible because a scope
+ * (namespace) may be extended when a parent inlcudes another file
+ * first.
+ */
+static inline fb_compound_type_t *get_enum_if_visible(fb_schema_t *schema, fb_symbol_t *sym)
+{
+ fb_compound_type_t *ct = 0;
+
+ switch (sym->kind) {
+ case fb_is_union:
+ /* Fall through. */
+ case fb_is_enum:
+ ct = (fb_compound_type_t *)sym;
+ if (!ptr_set_exists(&schema->visible_schema, ct->schema)) {
+ ct = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return ct;
+}
+
+static inline fb_compound_type_t *get_compound_if_visible(fb_schema_t *schema, fb_symbol_t *sym)
+{
+ fb_compound_type_t *ct = 0;
+
+ switch (sym->kind) {
+ case fb_is_struct:
+ case fb_is_table:
+ case fb_is_rpc_service:
+ case fb_is_union:
+ case fb_is_enum:
+ ct = (fb_compound_type_t *)sym;
+ if (!ptr_set_exists(&schema->visible_schema, ct->schema)) {
+ ct = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return ct;
+}
+
+/* Constants are specific to 32-bit FNV-1a hash. It is important to use unsigned integers. */
+static inline uint32_t fb_hash_fnv1a_32_init(void)
+{
+ return 2166136261UL;
+}
+
+static inline uint32_t fb_hash_fnv1a_32_append(uint32_t hash, const char *data, size_t len)
+{
+ while (len--) {
+ hash ^= *(uint8_t *)data++;
+ hash = hash * 16777619UL;
+ }
+ return hash;
+}
+
+#endif /* SYMBOLS_H */
diff --git a/flatcc/src/runtime/CMakeLists.txt b/flatcc/src/runtime/CMakeLists.txt
new file mode 100644
index 0000000..127e2a4
--- /dev/null
+++ b/flatcc/src/runtime/CMakeLists.txt
@@ -0,0 +1,16 @@
+include_directories (
+ "${PROJECT_SOURCE_DIR}/include"
+)
+
+add_library(flatccrt
+ builder.c
+ emitter.c
+ refmap.c
+ verifier.c
+ json_parser.c
+ json_printer.c
+)
+
+if (FLATCC_INSTALL)
+ install(TARGETS flatccrt DESTINATION ${lib_dir})
+endif()
diff --git a/flatcc/src/runtime/builder.c b/flatcc/src/runtime/builder.c
new file mode 100644
index 0000000..b62c2b6
--- /dev/null
+++ b/flatcc/src/runtime/builder.c
@@ -0,0 +1,2035 @@
+/*
+ * Codegenerator for C, building FlatBuffers.
+ *
+ * There are several approaches, some light, some requiring a library,
+ * some with vectored I/O etc.
+ *
+ * Here we focus on a reasonable balance of light code and efficiency.
+ *
+ * Builder code is generated to a separate file that includes the
+ * generated read-only code.
+ *
+ * Mutable buffers are not supported in this version.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_builder.h"
+#include "flatcc/flatcc_emitter.h"
+
+/*
+ * `check` is designed to handle incorrect use errors that can be
+ * ignored in production of a tested product.
+ *
+ * `check_error` fails if condition is false and is designed to return an
+ * error code in production.
+ */
+
+#if FLATCC_BUILDER_ASSERT_ON_ERROR
+#define check(cond, reason) FLATCC_BUILDER_ASSERT(cond, reason)
+#else
+#define check(cond, reason) ((void)0)
+#endif
+
+#if FLATCC_BUILDER_SKIP_CHECKS
+#define check_error(cond, err, reason) ((void)0)
+#else
+#define check_error(cond, err, reason) if (!(cond)) { check(cond, reason); return err; }
+#endif
+
+/* `strnlen` not widely supported. */
+static inline size_t pstrnlen(const char *s, size_t max_len)
+{
+ const char *end = memchr(s, 0, max_len);
+ return end ? (size_t)(end - s) : max_len;
+}
+#undef strnlen
+#define strnlen pstrnlen
+
+/* Padding can be up to 255 zeroes, and 1 zero string termination byte.
+ * When two paddings are combined at nested buffers, we need twice that.
+ * Visible to emitter so it can test for zero padding in iov. */
+const uint8_t flatcc_builder_padding_base[512] = { 0 };
+#define _pad flatcc_builder_padding_base
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define write_uoffset __flatbuffers_uoffset_write_to_pe
+#define write_voffset __flatbuffers_voffset_write_to_pe
+#define write_identifier __flatbuffers_uoffset_write_to_pe
+#define write_utype __flatbuffers_utype_write_to_pe
+
+#define field_size sizeof(uoffset_t)
+#define max_offset_count FLATBUFFERS_COUNT_MAX(field_size)
+#define union_size sizeof(flatcc_builder_union_ref_t)
+#define max_union_count FLATBUFFERS_COUNT_MAX(union_size)
+#define utype_size sizeof(utype_t)
+#define max_utype_count FLATBUFFERS_COUNT_MAX(utype_size)
+
+#define max_string_len FLATBUFFERS_COUNT_MAX(1)
+#define identifier_size FLATBUFFERS_IDENTIFIER_SIZE
+
+
+#define iovec_t flatcc_iovec_t
+#define frame_size sizeof(__flatcc_builder_frame_t)
+#define frame(x) (B->frame[0].x)
+
+
+/* `align` must be a power of 2. */
+static inline uoffset_t alignup_uoffset(uoffset_t x, size_t align)
+{
+ return (x + (uoffset_t)align - 1u) & ~((uoffset_t)align - 1u);
+}
+
+static inline size_t alignup_size(size_t x, size_t align)
+{
+ return (x + align - 1u) & ~(align - 1u);
+}
+
+
+typedef struct vtable_descriptor vtable_descriptor_t;
+struct vtable_descriptor {
+ /* Where the vtable is emitted. */
+ flatcc_builder_ref_t vt_ref;
+ /* Which buffer it was emitted to. */
+ uoffset_t nest_id;
+ /* Where the vtable is cached. */
+ uoffset_t vb_start;
+ /* Hash table collision chain. */
+ uoffset_t next;
+};
+
+typedef struct flatcc_iov_state flatcc_iov_state_t;
+struct flatcc_iov_state {
+ size_t len;
+ int count;
+ flatcc_iovec_t iov[FLATCC_IOV_COUNT_MAX];
+};
+
+#define iov_state_t flatcc_iov_state_t
+
+/* This assumes `iov_state_t iov;` has been declared in scope */
+#define push_iov_cond(base, size, cond) if ((size) > 0 && (cond)) { iov.len += size;\
+ iov.iov[iov.count].iov_base = (void *)(base); iov.iov[iov.count].iov_len = (size); ++iov.count; }
+#define push_iov(base, size) push_iov_cond(base, size, 1)
+#define init_iov() { iov.len = 0; iov.count = 0; }
+
+
+int flatcc_builder_default_alloc(void *alloc_context, iovec_t *b, size_t request, int zero_fill, int hint)
+{
+ void *p;
+ size_t n;
+
+ (void)alloc_context;
+
+ if (request == 0) {
+ if (b->iov_base) {
+ FLATCC_BUILDER_FREE(b->iov_base);
+ b->iov_base = 0;
+ b->iov_len = 0;
+ }
+ return 0;
+ }
+ switch (hint) {
+ case flatcc_builder_alloc_ds:
+ n = 256;
+ break;
+ case flatcc_builder_alloc_ht:
+ /* Should be exact size, or space size is just wasted. */
+ n = request;
+ break;
+ case flatcc_builder_alloc_fs:
+ n = sizeof(__flatcc_builder_frame_t) * 8;
+ break;
+ case flatcc_builder_alloc_us:
+ n = 64;
+ break;
+ default:
+ /*
+ * We have many small structures - vs stack for tables with few
+ * elements, and few offset fields in patch log. No need to
+ * overallocate in case of busy small messages.
+ */
+ n = 32;
+ break;
+ }
+ while (n < request) {
+ n *= 2;
+ }
+ if (request <= b->iov_len && b->iov_len / 2 >= n) {
+ /* Add hysteresis to shrink. */
+ return 0;
+ }
+ if (!(p = FLATCC_BUILDER_REALLOC(b->iov_base, n))) {
+ return -1;
+ }
+ /* Realloc might also shrink. */
+ if (zero_fill && b->iov_len < n) {
+ memset((uint8_t *)p + b->iov_len, 0, n - b->iov_len);
+ }
+ b->iov_base = p;
+ b->iov_len = n;
+ return 0;
+}
+
+#define T_ptr(base, pos) ((void *)((uint8_t *)(base) + (uoffset_t)(pos)))
+#define ds_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_ds].iov_base, (pos)))
+#define vs_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vs].iov_base, (pos)))
+#define pl_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_pl].iov_base, (pos)))
+#define us_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_us].iov_base, (pos)))
+#define vd_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vd].iov_base, (pos)))
+#define vb_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vb].iov_base, (pos)))
+#define vs_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_vs].iov_base))
+#define pl_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_pl].iov_base))
+#define us_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_us].iov_base))
+
+#define table_limit (FLATBUFFERS_VOFFSET_MAX - field_size + 1)
+#define data_limit (FLATBUFFERS_UOFFSET_MAX - field_size + 1)
+
+#define set_identifier(id) memcpy(&B->identifier, (id) ? (void *)(id) : (void *)_pad, identifier_size)
+
+/* Must also return true when no buffer has been started. */
+#define is_top_buffer(B) (B->nest_id == 0)
+
+/*
+ * Tables use a stack represention better suited for quickly adding
+ * fields to tables, but it must occasionally be refreshed following
+ * reallocation or reentry from child frame.
+ */
+static inline void refresh_ds(flatcc_builder_t *B, uoffset_t type_limit)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
+
+ B->ds = ds_ptr(B->ds_first);
+ B->ds_limit = (uoffset_t)buf->iov_len - B->ds_first;
+ /*
+ * So we don't allocate outside tables representation size, nor our
+ * current buffer size.
+ */
+ if (B->ds_limit > type_limit) {
+ B->ds_limit = type_limit;
+ }
+ /* So exit frame can refresh fast. */
+ frame(type_limit) = type_limit;
+}
+
+static int reserve_ds(flatcc_builder_t *B, size_t need, uoffset_t limit)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
+
+ if (B->alloc(B->alloc_context, buf, B->ds_first + need, 1, flatcc_builder_alloc_ds)) {
+ return -1;
+ }
+ refresh_ds(B, limit);
+ return 0;
+}
+
+/*
+ * Make sure there is always an extra zero termination on stack
+ * even if it isn't emitted such that string updates may count
+ * on zero termination being present always.
+ */
+static inline void *push_ds(flatcc_builder_t *B, uoffset_t size)
+{
+ size_t offset;
+
+ offset = B->ds_offset;
+ if ((B->ds_offset += size) >= B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset + 1, data_limit)) {
+ return 0;
+ }
+ }
+ return B->ds + offset;
+}
+
+static inline void unpush_ds(flatcc_builder_t *B, uoffset_t size)
+{
+ B->ds_offset -= size;
+ memset(B->ds + B->ds_offset, 0, size);
+}
+
+static inline void *push_ds_copy(flatcc_builder_t *B, const void *data, uoffset_t size)
+{
+ void *p;
+
+ if (!(p = push_ds(B, size))) {
+ return 0;
+ }
+ memcpy(p, data, size);
+ return p;
+}
+
+static inline void *push_ds_field(flatcc_builder_t *B, uoffset_t size, uint16_t align, voffset_t id)
+{
+ uoffset_t offset;
+
+ /*
+ * We calculate table field alignment relative to first entry, not
+ * header field with vtable offset.
+ *
+ * Note: >= comparison handles special case where B->ds is not
+ * allocated yet and size is 0 so the return value would be mistaken
+ * for an error.
+ */
+ offset = alignup_uoffset(B->ds_offset, align);
+ if ((B->ds_offset = offset + size) >= B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset + 1, table_limit)) {
+ return 0;
+ }
+ }
+ B->vs[id] = (voffset_t)(offset + field_size);
+ if (id >= B->id_end) {
+ B->id_end = id + 1u;
+ }
+ return B->ds + offset;
+}
+
+static inline void *push_ds_offset_field(flatcc_builder_t *B, voffset_t id)
+{
+ uoffset_t offset;
+
+ offset = alignup_uoffset(B->ds_offset, field_size);
+ if ((B->ds_offset = offset + field_size) > B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset, table_limit)) {
+ return 0;
+ }
+ }
+ B->vs[id] = (voffset_t)(offset + field_size);
+ if (id >= B->id_end) {
+ B->id_end = id + 1u;
+ }
+ *B->pl++ = (flatbuffers_voffset_t)offset;
+ return B->ds + offset;
+}
+
+static inline void *reserve_buffer(flatcc_builder_t *B, int alloc_type, size_t used, size_t need, int zero_init)
+{
+ iovec_t *buf = B->buffers + alloc_type;
+
+ if (used + need > buf->iov_len) {
+ if (B->alloc(B->alloc_context, buf, used + need, zero_init, alloc_type)) {
+ check(0, "memory allocation failed");
+ return 0;
+ }
+ }
+ return (void *)((size_t)buf->iov_base + used);
+}
+
+static inline int reserve_fields(flatcc_builder_t *B, int count)
+{
+ size_t used, need;
+
+ /* Provide faster stack operations for common table operations. */
+ used = frame(container.table.vs_end) + frame(container.table.id_end) * sizeof(voffset_t);
+ need = (size_t)(count + 2) * sizeof(voffset_t);
+ if (!(B->vs = reserve_buffer(B, flatcc_builder_alloc_vs, used, need, 1))) {
+ return -1;
+ }
+ /* Move past header for convenience. */
+ B->vs += 2;
+ used = frame(container.table.pl_end);
+ /* Add one to handle special case of first table being empty. */
+ need = (size_t)count * sizeof(*(B->pl)) + 1;
+ if (!(B->pl = reserve_buffer(B, flatcc_builder_alloc_pl, used, need, 0))) {
+ return -1;
+ }
+ return 0;
+}
+
+static int alloc_ht(flatcc_builder_t *B)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ht;
+
+ size_t size, k;
+ /* Allocate null entry so we can check for return errors. */
+ FLATCC_ASSERT(B->vd_end == 0);
+ if (!reserve_buffer(B, flatcc_builder_alloc_vd, B->vd_end, sizeof(vtable_descriptor_t), 0)) {
+ return -1;
+ }
+ B->vd_end = sizeof(vtable_descriptor_t);
+ size = field_size * FLATCC_BUILDER_MIN_HASH_COUNT;
+ if (B->alloc(B->alloc_context, buf, size, 1, flatcc_builder_alloc_ht)) {
+ return -1;
+ }
+ while (size * 2 <= buf->iov_len) {
+ size *= 2;
+ }
+ size /= field_size;
+ for (k = 0; (((size_t)1) << k) < size; ++k) {
+ }
+ B->ht_width = k;
+ return 0;
+}
+
+static inline uoffset_t *lookup_ht(flatcc_builder_t *B, uint32_t hash)
+{
+ uoffset_t *T;
+
+ if (B->ht_width == 0) {
+ if (alloc_ht(B)) {
+ return 0;
+ }
+ }
+ T = B->buffers[flatcc_builder_alloc_ht].iov_base;
+
+ return &T[FLATCC_BUILDER_BUCKET_VT_HASH(hash, B->ht_width)];
+}
+
+void flatcc_builder_flush_vtable_cache(flatcc_builder_t *B)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ht;
+
+ if (B->ht_width == 0) {
+ return;
+ }
+ memset(buf->iov_base, 0, buf->iov_len);
+ /* Reserve the null entry. */
+ B->vd_end = sizeof(vtable_descriptor_t);
+ B->vb_end = 0;
+}
+
+int flatcc_builder_custom_init(flatcc_builder_t *B,
+ flatcc_builder_emit_fun *emit, void *emit_context,
+ flatcc_builder_alloc_fun *alloc, void *alloc_context)
+{
+ /*
+ * Do not allocate anything here. Only the required buffers will be
+ * allocated. For simple struct buffers, no allocation is required
+ * at all.
+ */
+ memset(B, 0, sizeof(*B));
+
+ if (emit == 0) {
+ B->is_default_emitter = 1;
+ emit = flatcc_emitter;
+ emit_context = &B->default_emit_context;
+ }
+ if (alloc == 0) {
+ alloc = flatcc_builder_default_alloc;
+ }
+ B->alloc_context = alloc_context;
+ B->alloc = alloc;
+ B->emit_context = emit_context;
+ B->emit = emit;
+ return 0;
+}
+
+int flatcc_builder_init(flatcc_builder_t *B)
+{
+ return flatcc_builder_custom_init(B, 0, 0, 0, 0);
+}
+
+int flatcc_builder_custom_reset(flatcc_builder_t *B, int set_defaults, int reduce_buffers)
+{
+ iovec_t *buf;
+ int i;
+
+ for (i = 0; i < FLATCC_BUILDER_ALLOC_BUFFER_COUNT; ++i) {
+ buf = B->buffers + i;
+ if (buf->iov_base) {
+ /* Don't try to reduce the hash table. */
+ if (i != flatcc_builder_alloc_ht &&
+ reduce_buffers && B->alloc(B->alloc_context, buf, 1, 1, i)) {
+ return -1;
+ }
+ memset(buf->iov_base, 0, buf->iov_len);
+ } else {
+ FLATCC_ASSERT(buf->iov_len == 0);
+ }
+ }
+ B->vb_end = 0;
+ if (B->vd_end > 0) {
+ /* Reset past null entry. */
+ B->vd_end = sizeof(vtable_descriptor_t);
+ }
+ B->min_align = 0;
+ B->emit_start = 0;
+ B->emit_end = 0;
+ B->level = 0;
+ B->limit_level = 0;
+ B->ds_offset = 0;
+ B->ds_limit = 0;
+ B->nest_count = 0;
+ B->nest_id = 0;
+ /* Needed for correct offset calculation. */
+ B->ds = B->buffers[flatcc_builder_alloc_ds].iov_base;
+ B->pl = B->buffers[flatcc_builder_alloc_pl].iov_base;
+ B->vs = B->buffers[flatcc_builder_alloc_vs].iov_base;
+ B->frame = 0;
+ if (set_defaults) {
+ B->vb_flush_limit = 0;
+ B->max_level = 0;
+ B->disable_vt_clustering = 0;
+ }
+ if (B->is_default_emitter) {
+ flatcc_emitter_reset(&B->default_emit_context);
+ }
+ if (B->refmap) {
+ flatcc_refmap_reset(B->refmap);
+ }
+ return 0;
+}
+
+int flatcc_builder_reset(flatcc_builder_t *B)
+{
+ return flatcc_builder_custom_reset(B, 0, 0);
+}
+
+void flatcc_builder_clear(flatcc_builder_t *B)
+{
+ iovec_t *buf;
+ int i;
+
+ for (i = 0; i < FLATCC_BUILDER_ALLOC_BUFFER_COUNT; ++i) {
+ buf = B->buffers + i;
+ B->alloc(B->alloc_context, buf, 0, 0, i);
+ }
+ if (B->is_default_emitter) {
+ flatcc_emitter_clear(&B->default_emit_context);
+ }
+ if (B->refmap) {
+ flatcc_refmap_clear(B->refmap);
+ }
+ memset(B, 0, sizeof(*B));
+}
+
+static inline void set_min_align(flatcc_builder_t *B, uint16_t align)
+{
+ if (B->min_align < align) {
+ B->min_align = align;
+ }
+}
+
+/*
+ * It's a max, but the minimum viable alignment is the largest observed
+ * alignment requirement, but no larger.
+ */
+static inline void get_min_align(uint16_t *align, uint16_t b)
+{
+ if (*align < b) {
+ *align = b;
+ }
+}
+
+void *flatcc_builder_enter_user_frame_ptr(flatcc_builder_t *B, size_t size)
+{
+ size_t *frame;
+
+ size = alignup_size(size, sizeof(size_t)) + sizeof(size_t);
+
+ if (!(frame = reserve_buffer(B, flatcc_builder_alloc_us, B->user_frame_end, size, 0))) {
+ return 0;
+ }
+ memset(frame, 0, size);
+ *frame++ = B->user_frame_offset;
+ B->user_frame_offset = B->user_frame_end + sizeof(size_t);
+ B->user_frame_end += size;
+ return frame;
+}
+
+size_t flatcc_builder_enter_user_frame(flatcc_builder_t *B, size_t size)
+{
+ size_t *frame;
+
+ size = alignup_size(size, sizeof(size_t)) + sizeof(size_t);
+
+ if (!(frame = reserve_buffer(B, flatcc_builder_alloc_us, B->user_frame_end, size, 0))) {
+ return 0;
+ }
+ memset(frame, 0, size);
+ *frame++ = B->user_frame_offset;
+ B->user_frame_offset = B->user_frame_end + sizeof(size_t);
+ B->user_frame_end += size;
+ return B->user_frame_offset;
+}
+
+
+size_t flatcc_builder_exit_user_frame(flatcc_builder_t *B)
+{
+ size_t *hdr;
+
+ FLATCC_ASSERT(B->user_frame_offset > 0);
+
+ hdr = us_ptr(B->user_frame_offset);
+ B->user_frame_end = B->user_frame_offset - sizeof(size_t);
+ return B->user_frame_offset = hdr[-1];
+}
+
+size_t flatcc_builder_exit_user_frame_at(flatcc_builder_t *B, size_t handle)
+{
+ FLATCC_ASSERT(B->user_frame_offset >= handle);
+
+ B->user_frame_offset = handle;
+ return flatcc_builder_exit_user_frame(B);
+}
+
+size_t flatcc_builder_get_current_user_frame(flatcc_builder_t *B)
+{
+ return B->user_frame_offset;
+}
+
+void *flatcc_builder_get_user_frame_ptr(flatcc_builder_t *B, size_t handle)
+{
+ return us_ptr(handle);
+}
+
+static int enter_frame(flatcc_builder_t *B, uint16_t align)
+{
+ if (++B->level > B->limit_level) {
+ if (B->max_level > 0 && B->level > B->max_level) {
+ return -1;
+ }
+ if (!(B->frame = reserve_buffer(B, flatcc_builder_alloc_fs,
+ (size_t)(B->level - 1) * frame_size, frame_size, 0))) {
+ return -1;
+ }
+ B->limit_level = (int)(B->buffers[flatcc_builder_alloc_fs].iov_len / frame_size);
+ if (B->max_level > 0 && B->max_level < B->limit_level) {
+ B->limit_level = B->max_level;
+ }
+ } else {
+ ++B->frame;
+ }
+ frame(ds_offset) = B->ds_offset;
+ frame(align) = B->align;
+ B->align = align;
+ /* Note: do not assume padding before first has been allocated! */
+ frame(ds_first) = B->ds_first;
+ frame(type_limit) = data_limit;
+ B->ds_first = alignup_uoffset(B->ds_first + B->ds_offset, 8);
+ B->ds_offset = 0;
+ return 0;
+}
+
+static inline void exit_frame(flatcc_builder_t *B)
+{
+ memset(B->ds, 0, B->ds_offset);
+ B->ds_offset = frame(ds_offset);
+ B->ds_first = frame(ds_first);
+ refresh_ds(B, frame(type_limit));
+
+ /*
+ * Restore local alignment: e.g. a table should not change alignment
+ * because a child table was just created elsewhere in the buffer,
+ * but the overall alignment (min align), should be aware of it.
+ * Each buffer has its own min align that then migrates up without
+ * being affected by sibling or child buffers.
+ */
+ set_min_align(B, B->align);
+ B->align = frame(align);
+
+ --B->frame;
+ --B->level;
+}
+
+static inline uoffset_t front_pad(flatcc_builder_t *B, uoffset_t size, uint16_t align)
+{
+ return (uoffset_t)(B->emit_start - (flatcc_builder_ref_t)size) & (align - 1u);
+}
+
+static inline uoffset_t back_pad(flatcc_builder_t *B, uint16_t align)
+{
+ return (uoffset_t)(B->emit_end) & (align - 1u);
+}
+
+static inline flatcc_builder_ref_t emit_front(flatcc_builder_t *B, iov_state_t *iov)
+{
+ flatcc_builder_ref_t ref;
+
+ /*
+ * We might have overflow when including headers, but without
+ * headers we should have checks to prevent overflow in the
+ * uoffset_t range, hence we subtract 16 to be safe. With that
+ * guarantee we can also make a safe check on the soffset_t range.
+ *
+ * We only allow buffers half the theoritical size of
+ * FLATBUFFERS_UOFFSET_MAX so we can safely use signed references.
+ *
+ * NOTE: vtables vt_offset field is signed, and the check in create
+ * table only ensures the signed limit. The check would fail if the
+ * total buffer size could grow beyond UOFFSET_MAX, and we prevent
+ * that by limiting the lower end to SOFFSET_MIN, and the upper end
+ * at emit_back to SOFFSET_MAX.
+ */
+ ref = B->emit_start - (flatcc_builder_ref_t)iov->len;
+ if ((iov->len > 16 && iov->len - 16 > FLATBUFFERS_UOFFSET_MAX) || ref >= B->emit_start) {
+ check(0, "buffer too large to represent");
+ return 0;
+ }
+ if (B->emit(B->emit_context, iov->iov, iov->count, ref, iov->len)) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ return B->emit_start = ref;
+}
+
+static inline flatcc_builder_ref_t emit_back(flatcc_builder_t *B, iov_state_t *iov)
+{
+ flatcc_builder_ref_t ref;
+
+ ref = B->emit_end;
+ B->emit_end = ref + (flatcc_builder_ref_t)iov->len;
+ /*
+ * Similar to emit_front check, but since we only emit vtables and
+ * padding at the back, we are not concerned with iov->len overflow,
+ * only total buffer overflow.
+ *
+ * With this check, vtable soffset references at table header can
+ * still overflow in extreme cases, so this must be checked
+ * separately.
+ */
+ if (B->emit_end < ref) {
+ check(0, "buffer too large to represent");
+ return 0;
+ }
+ if (B->emit(B->emit_context, iov->iov, iov->count, ref, iov->len)) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ /*
+ * Back references always return ref + 1 because ref == 0 is valid and
+ * should not be mistaken for error. vtables understand this.
+ */
+ return ref + 1;
+}
+
+static int align_to_block(flatcc_builder_t *B, uint16_t *align, uint16_t block_align, int is_nested)
+{
+ size_t end_pad;
+ iov_state_t iov;
+
+ block_align = block_align ? block_align : B->block_align ? B->block_align : 1;
+ get_min_align(align, field_size);
+ get_min_align(align, block_align);
+ /* Pad end of buffer to multiple. */
+ if (!is_nested) {
+ end_pad = back_pad(B, block_align);
+ if (end_pad) {
+ init_iov();
+ push_iov(_pad, end_pad);
+ if (0 == emit_back(B, &iov)) {
+ check(0, "emitter rejected buffer content");
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_embed_buffer(flatcc_builder_t *B,
+ uint16_t block_align,
+ const void *data, size_t size, uint16_t align, flatcc_builder_buffer_flags_t flags)
+{
+ uoffset_t size_field, pad;
+ iov_state_t iov;
+ int with_size = (flags & flatcc_builder_with_size) != 0;
+
+ if (align_to_block(B, &align, block_align, !is_top_buffer(B))) {
+ return 0;
+ }
+ pad = front_pad(B, (uoffset_t)(size + (with_size ? field_size : 0)), align);
+ write_uoffset(&size_field, (uoffset_t)size + pad);
+ init_iov();
+ /* Add ubyte vector size header if nested buffer. */
+ push_iov_cond(&size_field, field_size, !is_top_buffer(B));
+ push_iov(data, size);
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_buffer(flatcc_builder_t *B,
+ const char identifier[identifier_size], uint16_t block_align,
+ flatcc_builder_ref_t object_ref, uint16_t align, flatcc_builder_buffer_flags_t flags)
+{
+ flatcc_builder_ref_t buffer_ref;
+ uoffset_t header_pad, id_size = 0;
+ uoffset_t object_offset, buffer_size, buffer_base;
+ iov_state_t iov;
+ flatcc_builder_identifier_t id_out = 0;
+ int is_nested = (flags & flatcc_builder_is_nested) != 0;
+ int with_size = (flags & flatcc_builder_with_size) != 0;
+
+ if (align_to_block(B, &align, block_align, is_nested)) {
+ return 0;
+ }
+ set_min_align(B, align);
+ if (identifier) {
+ FLATCC_ASSERT(sizeof(flatcc_builder_identifier_t) == identifier_size);
+ FLATCC_ASSERT(sizeof(flatcc_builder_identifier_t) == field_size);
+ memcpy(&id_out, identifier, identifier_size);
+ id_out = __flatbuffers_thash_read_from_le(&id_out);
+ write_identifier(&id_out, id_out);
+ }
+ id_size = id_out ? identifier_size : 0;
+ header_pad = front_pad(B, field_size + id_size + (uoffset_t)(with_size ? field_size : 0), align);
+ init_iov();
+ /* ubyte vectors size field wrapping nested buffer. */
+ push_iov_cond(&buffer_size, field_size, is_nested || with_size);
+ push_iov(&object_offset, field_size);
+ /* Identifiers are not always present in buffer. */
+ push_iov(&id_out, id_size);
+ push_iov(_pad, header_pad);
+ buffer_base = (uoffset_t)B->emit_start - (uoffset_t)iov.len + (uoffset_t)((is_nested || with_size) ? field_size : 0);
+ if (is_nested) {
+ write_uoffset(&buffer_size, (uoffset_t)B->buffer_mark - buffer_base);
+ } else {
+ /* Also include clustered vtables. */
+ write_uoffset(&buffer_size, (uoffset_t)B->emit_end - buffer_base);
+ }
+ write_uoffset(&object_offset, (uoffset_t)object_ref - buffer_base);
+ if (0 == (buffer_ref = emit_front(B, &iov))) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ return buffer_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_struct(flatcc_builder_t *B, const void *data, size_t size, uint16_t align)
+{
+ size_t pad;
+ iov_state_t iov;
+
+ check(align >= 1, "align cannot be 0");
+ set_min_align(B, align);
+ pad = front_pad(B, (uoffset_t)size, align);
+ init_iov();
+ push_iov(data, size);
+ /*
+ * Normally structs will already be a multiple of their alignment,
+ * so this padding will not likely be emitted.
+ */
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+int flatcc_builder_start_buffer(flatcc_builder_t *B,
+ const char identifier[identifier_size], uint16_t block_align, flatcc_builder_buffer_flags_t flags)
+{
+ /*
+ * This saves the parent `min_align` in the align field since we
+ * shouldn't use that for the current buffer. `exit_frame`
+ * automatically aggregates align up, so it is updated when the
+ * buffer frame exits.
+ */
+ if (enter_frame(B, B->min_align)) {
+ return -1;
+ }
+ /* B->align now has parent min_align, and child frames will save it. */
+ B->min_align = 1;
+ /* Save the parent block align, and set proper defaults for this buffer. */
+ frame(container.buffer.block_align) = B->block_align;
+ B->block_align = block_align;
+ frame(container.buffer.flags = B->buffer_flags);
+ B->buffer_flags = (uint16_t)flags;
+ frame(container.buffer.mark) = B->buffer_mark;
+ frame(container.buffer.nest_id) = B->nest_id;
+ /*
+ * End of buffer when nested. Not defined for top-level because we
+ * here (on only here) permit strings etc. to be created before buffer start and
+ * because top-level buffer vtables can be clustered.
+ */
+ B->buffer_mark = B->emit_start;
+ /* Must be 0 before and after entering top-level buffer, and unique otherwise. */
+ B->nest_id = B->nest_count++;
+ frame(container.buffer.identifier) = B->identifier;
+ set_identifier(identifier);
+ frame(type) = flatcc_builder_buffer;
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_buffer(flatcc_builder_t *B, flatcc_builder_ref_t root)
+{
+ flatcc_builder_ref_t buffer_ref;
+ flatcc_builder_buffer_flags_t flags;
+
+ flags = (flatcc_builder_buffer_flags_t)B->buffer_flags & flatcc_builder_with_size;
+ flags |= is_top_buffer(B) ? 0 : flatcc_builder_is_nested;
+ check(frame(type) == flatcc_builder_buffer, "expected buffer frame");
+ set_min_align(B, B->block_align);
+ if (0 == (buffer_ref = flatcc_builder_create_buffer(B, (void *)&B->identifier,
+ B->block_align, root, B->min_align, flags))) {
+ return 0;
+ }
+ B->buffer_mark = frame(container.buffer.mark);
+ B->nest_id = frame(container.buffer.nest_id);
+ B->identifier = frame(container.buffer.identifier);
+ B->buffer_flags = frame(container.buffer.flags);
+ exit_frame(B);
+ return buffer_ref;
+}
+
+void *flatcc_builder_start_struct(flatcc_builder_t *B, size_t size, uint16_t align)
+{
+ /* Allocate space for the struct on the ds stack. */
+ if (enter_frame(B, align)) {
+ return 0;
+ }
+ frame(type) = flatcc_builder_struct;
+ refresh_ds(B, data_limit);
+ return push_ds(B, (uoffset_t)size);
+}
+
+void *flatcc_builder_struct_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_struct(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t object_ref;
+
+ check(frame(type) == flatcc_builder_struct, "expected struct frame");
+ if (0 == (object_ref = flatcc_builder_create_struct(B, B->ds, B->ds_offset, B->align))) {
+ return 0;
+ }
+ exit_frame(B);
+ return object_ref;
+}
+
+static inline int vector_count_add(flatcc_builder_t *B, uoffset_t count, uoffset_t max_count)
+{
+ uoffset_t n, n1;
+ n = frame(container.vector.count);
+ n1 = n + count;
+ /*
+ * This prevents elem_size * count from overflowing iff max_vector
+ * has been set sensible. Without this check we might allocate to
+ * little on the ds stack and return a buffer the user thinks is
+ * much larger which of course is bad even though the buffer eventually
+ * would fail anyway.
+ */
+ check_error(n <= n1 && n1 <= max_count, -1, "vector too large to represent");
+ frame(container.vector.count) = n1;
+ return 0;
+}
+
+void *flatcc_builder_extend_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, frame(container.vector.max_count))) {
+ return 0;
+ }
+ return push_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+}
+
+void *flatcc_builder_vector_push(flatcc_builder_t *B, const void *data)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ check_error(frame(container.vector.count) <= frame(container.vector.max_count), 0, "vector max count exceeded");
+ frame(container.vector.count) += 1;
+ return push_ds_copy(B, data, frame(container.vector.elem_size));
+}
+
+void *flatcc_builder_append_vector(flatcc_builder_t *B, const void *data, size_t count)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ if (vector_count_add(B, (uoffset_t)count, frame(container.vector.max_count))) {
+ return 0;
+ }
+ return push_ds_copy(B, data, frame(container.vector.elem_size) * (uoffset_t)count);
+}
+
+flatcc_builder_ref_t *flatcc_builder_extend_offset_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, max_offset_count)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)(field_size * count));
+}
+
+flatcc_builder_ref_t *flatcc_builder_offset_vector_push(flatcc_builder_t *B, flatcc_builder_ref_t ref)
+{
+ flatcc_builder_ref_t *p;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (frame(container.vector.count) == max_offset_count) {
+ return 0;
+ }
+ frame(container.vector.count) += 1;
+ if (0 == (p = push_ds(B, field_size))) {
+ return 0;
+ }
+ *p = ref;
+ return p;
+}
+
+flatcc_builder_ref_t *flatcc_builder_append_offset_vector(flatcc_builder_t *B, const flatcc_builder_ref_t *refs, size_t count)
+{
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (vector_count_add(B, (uoffset_t)count, max_offset_count)) {
+ return 0;
+ }
+ return push_ds_copy(B, refs, (uoffset_t)(field_size * count));
+}
+
+char *flatcc_builder_extend_string(flatcc_builder_t *B, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ if (vector_count_add(B, (uoffset_t)len, max_string_len)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)len);
+}
+
+char *flatcc_builder_append_string(flatcc_builder_t *B, const char *s, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ if (vector_count_add(B, (uoffset_t)len, max_string_len)) {
+ return 0;
+ }
+ return push_ds_copy(B, s, (uoffset_t)len);
+}
+
+char *flatcc_builder_append_string_str(flatcc_builder_t *B, const char *s)
+{
+ return flatcc_builder_append_string(B, s, strlen(s));
+}
+
+char *flatcc_builder_append_string_strn(flatcc_builder_t *B, const char *s, size_t max_len)
+{
+ return flatcc_builder_append_string(B, s, strnlen(s, max_len));
+}
+
+int flatcc_builder_truncate_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ check_error(frame(container.vector.count) >= count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+int flatcc_builder_truncate_offset_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ check_error(frame(container.vector.count) >= (uoffset_t)count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+int flatcc_builder_truncate_string(flatcc_builder_t *B, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ check_error(frame(container.vector.count) >= len, -1, "cannot truncate string past empty");
+ frame(container.vector.count) -= (uoffset_t)len;
+ unpush_ds(B, (uoffset_t)len);
+ return 0;
+}
+
+int flatcc_builder_start_vector(flatcc_builder_t *B, size_t elem_size, uint16_t align, size_t max_count)
+{
+ get_min_align(&align, field_size);
+ if (enter_frame(B, align)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = (uoffset_t)elem_size;
+ frame(container.vector.count) = 0;
+ frame(container.vector.max_count) = (uoffset_t)max_count;
+ frame(type) = flatcc_builder_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+int flatcc_builder_start_offset_vector(flatcc_builder_t *B)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = field_size;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_offset_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_offset_vector(flatcc_builder_t *B,
+ const flatcc_builder_ref_t *vec, size_t count)
+{
+ flatcc_builder_ref_t *_vec;
+
+ if (flatcc_builder_start_offset_vector(B)) {
+ return 0;
+ }
+ if (!(_vec = flatcc_builder_extend_offset_vector(B, count))) {
+ return 0;
+ }
+ memcpy(_vec, vec, count * field_size);
+ return flatcc_builder_end_offset_vector(B);
+}
+
+int flatcc_builder_start_string(flatcc_builder_t *B)
+{
+ if (enter_frame(B, 1)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = 1;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_string;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+int flatcc_builder_reserve_table(flatcc_builder_t *B, int count)
+{
+ check(count >= 0, "cannot reserve negative count");
+ return reserve_fields(B, count);
+}
+
+int flatcc_builder_start_table(flatcc_builder_t *B, int count)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.table.vs_end) = vs_offset(B->vs);
+ frame(container.table.pl_end) = pl_offset(B->pl);
+ frame(container.table.vt_hash) = B->vt_hash;
+ frame(container.table.id_end) = B->id_end;
+ B->vt_hash = 0;
+ FLATCC_BUILDER_INIT_VT_HASH(B->vt_hash);
+ B->id_end = 0;
+ frame(type) = flatcc_builder_table;
+ if (reserve_fields(B, count)) {
+ return -1;
+ }
+ refresh_ds(B, table_limit);
+ return 0;
+}
+
+flatcc_builder_vt_ref_t flatcc_builder_create_vtable(flatcc_builder_t *B,
+ const voffset_t *vt, voffset_t vt_size)
+{
+ flatcc_builder_vt_ref_t vt_ref;
+ iov_state_t iov;
+ voffset_t *vt_;
+ size_t i;
+
+ /*
+ * Only top-level buffer can cluster vtables because only it can
+ * extend beyond the end.
+ *
+ * We write the vtable after the referencing table to maintain
+ * the construction invariant that any offset reference has
+ * valid emitted data at a higher address, and also that any
+ * issued negative emit address represents an offset reference
+ * to some flatbuffer object or vector (or possibly a root
+ * struct).
+ *
+ * The vt_ref is stored as the reference + 1 to avoid having 0 as a
+ * valid reference (which usally means error). It also idententifies
+ * vtable references as the only uneven references, and the only
+ * references that can be used multiple times in the same buffer.
+ *
+ * We do the vtable conversion here so cached vtables can be built
+ * hashed and compared more efficiently, and so end users with
+ * direct vtable construction don't have to worry about endianness.
+ * This also ensures the hash function works the same wrt.
+ * collision frequency.
+ */
+
+ if (!flatbuffers_is_native_pe()) {
+ /* Make space in vtable cache for temporary endian conversion. */
+ if (!(vt_ = reserve_buffer(B, flatcc_builder_alloc_vb, B->vb_end, vt_size, 0))) {
+ return 0;
+ }
+ for (i = 0; i < vt_size / sizeof(voffset_t); ++i) {
+ write_voffset(&vt_[i], vt[i]);
+ }
+ vt = vt_;
+ /* We don't need to free the reservation since we don't advance any base pointer. */
+ }
+
+ init_iov();
+ push_iov(vt, vt_size);
+ if (is_top_buffer(B) && !B->disable_vt_clustering) {
+ /* Note that `emit_back` already returns ref + 1 as we require for vtables. */
+ if (0 == (vt_ref = emit_back(B, &iov))) {
+ return 0;
+ }
+ } else {
+ if (0 == (vt_ref = emit_front(B, &iov))) {
+ return 0;
+ }
+ /*
+ * We don't have a valid 0 ref here, but to be consistent with
+ * clustered vtables we offset by one. This cannot be zero
+ * either.
+ */
+ vt_ref += 1;
+ }
+ return vt_ref;
+}
+
+flatcc_builder_vt_ref_t flatcc_builder_create_cached_vtable(flatcc_builder_t *B,
+ const voffset_t *vt, voffset_t vt_size, uint32_t vt_hash)
+{
+ vtable_descriptor_t *vd, *vd2;
+ uoffset_t *pvd, *pvd_head;
+ uoffset_t next;
+ voffset_t *vt_;
+
+ /* This just gets the hash table slot, we still have to inspect it. */
+ if (!(pvd_head = lookup_ht(B, vt_hash))) {
+ return 0;
+ }
+ pvd = pvd_head;
+ next = *pvd;
+ /* Tracks if there already is a cached copy. */
+ vd2 = 0;
+ while (next) {
+ vd = vd_ptr(next);
+ vt_ = vb_ptr(vd->vb_start);
+ if (vt_[0] != vt_size || 0 != memcmp(vt, vt_, vt_size)) {
+ pvd = &vd->next;
+ next = vd->next;
+ continue;
+ }
+ /* Can't share emitted vtables between buffers, */
+ if (vd->nest_id != B->nest_id) {
+ /* but we don't have to resubmit to cache. */
+ vd2 = vd;
+ /* See if there is a better match. */
+ pvd = &vd->next;
+ next = vd->next;
+ continue;
+ }
+ /* Move to front hash strategy. */
+ if (pvd != pvd_head) {
+ *pvd = vd->next;
+ vd->next = *pvd_head;
+ *pvd_head = next;
+ }
+ /* vtable exists and has been emitted within current buffer. */
+ return vd->vt_ref;
+ }
+ /* Allocate new descriptor. */
+ if (!(vd = reserve_buffer(B, flatcc_builder_alloc_vd, B->vd_end, sizeof(vtable_descriptor_t), 0))) {
+ return 0;
+ }
+ next = B->vd_end;
+ B->vd_end += (uoffset_t)sizeof(vtable_descriptor_t);
+
+ /* Identify the buffer this vtable descriptor belongs to. */
+ vd->nest_id = B->nest_id;
+
+ /* Move to front hash strategy. */
+ vd->next = *pvd_head;
+ *pvd_head = next;
+ if (0 == (vd->vt_ref = flatcc_builder_create_vtable(B, vt, vt_size))) {
+ return 0;
+ }
+ if (vd2) {
+ /* Reuse cached copy. */
+ vd->vb_start = vd2->vb_start;
+ } else {
+ if (B->vb_flush_limit && B->vb_flush_limit < B->vb_end + vt_size) {
+ flatcc_builder_flush_vtable_cache(B);
+ } else {
+ /* Make space in vtable cache. */
+ if (!(vt_ = reserve_buffer(B, flatcc_builder_alloc_vb, B->vb_end, vt_size, 0))) {
+ return -1;
+ }
+ vd->vb_start = B->vb_end;
+ B->vb_end += vt_size;
+ memcpy(vt_, vt, vt_size);
+ }
+ }
+ return vd->vt_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_table(flatcc_builder_t *B, const void *data, size_t size, uint16_t align,
+ flatbuffers_voffset_t *offsets, int offset_count, flatcc_builder_vt_ref_t vt_ref)
+{
+ int i;
+ uoffset_t pad, vt_offset, vt_offset_field, vt_base, base, offset, *offset_field;
+ iov_state_t iov;
+
+ check(offset_count >= 0, "expected non-negative offset_count");
+ /*
+ * vtable references are offset by 1 to avoid confusion with
+ * 0 as an error reference. It also uniquely identifies them
+ * as vtables being the only uneven reference type.
+ */
+ check(vt_ref & 1, "invalid vtable referenc");
+ get_min_align(&align, field_size);
+ set_min_align(B, align);
+ /* Alignment is calculated for the first element, not the header. */
+ pad = front_pad(B, (uoffset_t)size, align);
+ base = (uoffset_t)B->emit_start - (uoffset_t)(pad + size + field_size);
+ /* Adjust by 1 to get unencoded vtable reference. */
+ vt_base = (uoffset_t)(vt_ref - 1);
+ vt_offset = base - vt_base;
+ /* Avoid overflow. */
+ if (base - vt_offset != vt_base) {
+ return -1;
+ }
+ /* Protocol endian encoding. */
+ write_uoffset(&vt_offset_field, vt_offset);
+ for (i = 0; i < offset_count; ++i) {
+ offset_field = (uoffset_t *)((size_t)data + offsets[i]);
+ offset = *offset_field - base - offsets[i] - (uoffset_t)field_size;
+ write_uoffset(offset_field, offset);
+ }
+ init_iov();
+ push_iov(&vt_offset_field, field_size);
+ push_iov(data, size);
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+int flatcc_builder_check_required_field(flatcc_builder_t *B, flatbuffers_voffset_t id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ return id < B->id_end && B->vs[id] != 0;
+}
+
+int flatcc_builder_check_union_field(flatcc_builder_t *B, flatbuffers_voffset_t id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ if (id == 0 || id >= B->id_end) {
+ return 0;
+ }
+ if (B->vs[id - 1] == 0) {
+ return B->vs[id] == 0;
+ }
+ if (*(uint8_t *)(B->ds + B->vs[id - 1])) {
+ return B->vs[id] != 0;
+ }
+ return B->vs[id] == 0;
+}
+
+int flatcc_builder_check_required(flatcc_builder_t *B, const flatbuffers_voffset_t *required, int count)
+{
+ int i;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ if (B->id_end < count) {
+ return 0;
+ }
+ for (i = 0; i < count; ++i) {
+ if (B->vs[required[i]] == 0) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_table(flatcc_builder_t *B)
+{
+ voffset_t *vt, vt_size;
+ flatcc_builder_ref_t table_ref, vt_ref;
+ int pl_count;
+ voffset_t *pl;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ /* We have `ds_limit`, so we should not have to check for overflow here. */
+
+ vt = B->vs - 2;
+ vt_size = (voffset_t)(sizeof(voffset_t) * (B->id_end + 2u));
+ /* Update vtable header fields, first vtable size, then object table size. */
+ vt[0] = vt_size;
+ /*
+ * The `ds` buffer is always at least `field_size` aligned but excludes the
+ * initial vtable offset field. Therefore `field_size` is added here
+ * to the total table size in the vtable.
+ */
+ vt[1] = (voffset_t)(B->ds_offset + field_size);
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)vt[0], (uint32_t)vt[1]);
+ /* Find already emitted vtable, or emit a new one. */
+ if (!(vt_ref = flatcc_builder_create_cached_vtable(B, vt, vt_size, B->vt_hash))) {
+ return 0;
+ }
+ /* Clear vs stack so it is ready for the next vtable (ds stack is cleared by exit frame). */
+ memset(vt, 0, vt_size);
+
+ pl = pl_ptr(frame(container.table.pl_end));
+ pl_count = (int)(B->pl - pl);
+ if (0 == (table_ref = flatcc_builder_create_table(B, B->ds, B->ds_offset, B->align, pl, pl_count, vt_ref))) {
+ return 0;
+ }
+ B->vt_hash = frame(container.table.vt_hash);
+ B->id_end = frame(container.table.id_end);
+ B->vs = vs_ptr(frame(container.table.vs_end));
+ B->pl = pl_ptr(frame(container.table.pl_end));
+ exit_frame(B);
+ return table_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_vector(flatcc_builder_t *B,
+ const void *data, size_t count, size_t elem_size, uint16_t align, size_t max_count)
+{
+ /*
+ * Note: it is important that vec_size is uoffset not size_t
+ * in case sizeof(uoffset_t) > sizeof(size_t) because max_count is
+ * defined in terms of uoffset_t representation size, and also
+ * because we risk accepting too large a vector even if max_count is
+ * not violated.
+ */
+ uoffset_t vec_size, vec_pad, length_prefix;
+ iov_state_t iov;
+
+ check_error(count <= max_count, 0, "vector max_count violated");
+ get_min_align(&align, field_size);
+ set_min_align(B, align);
+ vec_size = (uoffset_t)count * (uoffset_t)elem_size;
+ /*
+ * That can happen on 32 bit systems when uoffset_t is defined as 64-bit.
+ * `emit_front/back` captures overflow, but not if our size type wraps first.
+ */
+#if FLATBUFFERS_UOFFSET_MAX > SIZE_MAX
+ check_error(vec_size < SIZE_MAX, 0, "vector larger than address space");
+#endif
+ write_uoffset(&length_prefix, (uoffset_t)count);
+ /* Alignment is calculated for the first element, not the header. */
+ vec_pad = front_pad(B, vec_size, align);
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(data, vec_size);
+ push_iov(_pad, vec_pad);
+ return emit_front(B, &iov);
+}
+
+/*
+ * Note: FlatBuffers official documentation states that the size field of a
+ * vector is a 32-bit element count. It is not quite clear if the
+ * intention is to have the size field be of type uoffset_t since tables
+ * also have a uoffset_t sized header, or if the vector size should
+ * remain unchanged if uoffset is changed to 16- or 64-bits
+ * respectively. Since it makes most sense to have a vector compatible
+ * with the addressable space, we choose to use uoffset_t as size field,
+ * which remains compatible with the default 32-bit version of uoffset_t.
+ */
+flatcc_builder_ref_t flatcc_builder_end_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+
+ if (0 == (vector_ref = flatcc_builder_create_vector(B, B->ds,
+ frame(container.vector.count), frame(container.vector.elem_size),
+ B->align, frame(container.vector.max_count)))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+size_t flatcc_builder_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+void *flatcc_builder_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+/* This function destroys the source content but avoids stack allocation. */
+static flatcc_builder_ref_t _create_offset_vector_direct(flatcc_builder_t *B,
+ flatcc_builder_ref_t *vec, size_t count, const utype_t *types)
+{
+ uoffset_t vec_size, vec_pad;
+ uoffset_t length_prefix, offset;
+ uoffset_t i;
+ soffset_t base;
+ iov_state_t iov;
+
+ if ((uoffset_t)count > max_offset_count) {
+ return 0;
+ }
+ set_min_align(B, field_size);
+ vec_size = (uoffset_t)(count * field_size);
+ write_uoffset(&length_prefix, (uoffset_t)count);
+ /* Alignment is calculated for the first element, not the header. */
+ vec_pad = front_pad(B, vec_size, field_size);
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(vec, vec_size);
+ push_iov(_pad, vec_pad);
+ base = B->emit_start - (soffset_t)iov.len;
+ for (i = 0; i < (uoffset_t)count; ++i) {
+ /*
+ * 0 is either end of buffer, start of vtables, or start of
+ * buffer depending on the direction in which the buffer is
+ * built. None of these can create a valid 0 reference but it
+ * is easy to create by mistake when manually building offset
+ * vectors.
+ *
+ * Unions do permit nulls, but only when the type is NONE.
+ */
+ if (vec[i] != 0) {
+ offset = (uoffset_t)
+ (vec[i] - base - (soffset_t)(i * field_size) - (soffset_t)field_size);
+ write_uoffset(&vec[i], offset);
+ if (types) {
+ check(types[i] != 0, "union vector cannot have non-null element with type NONE");
+ }
+ } else {
+ if (types) {
+ check(types[i] == 0, "union vector cannot have null element without type NONE");
+ } else {
+ check(0, "offset vector cannot have null element");
+ }
+ }
+ }
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_offset_vector_direct(flatcc_builder_t *B,
+ flatcc_builder_ref_t *vec, size_t count)
+{
+ return _create_offset_vector_direct(B, vec, count, 0);
+}
+
+flatcc_builder_ref_t flatcc_builder_end_offset_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (0 == (vector_ref = flatcc_builder_create_offset_vector_direct(B,
+ (flatcc_builder_ref_t *)B->ds, frame(container.vector.count)))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_offset_vector_for_unions(flatcc_builder_t *B, const utype_t *types)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (0 == (vector_ref = _create_offset_vector_direct(B,
+ (flatcc_builder_ref_t *)B->ds, frame(container.vector.count), types))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+void *flatcc_builder_offset_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+size_t flatcc_builder_offset_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+int flatcc_builder_table_add_union(flatcc_builder_t *B, int id,
+ flatcc_builder_union_ref_t uref)
+{
+ flatcc_builder_ref_t *pref;
+ flatcc_builder_utype_t *putype;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check_error(uref.type != 0 || uref.value == 0, -1, "expected null value for type NONE");
+ if (uref.value != 0) {
+ pref = flatcc_builder_table_add_offset(B, id);
+ check_error(pref != 0, -1, "unable to add union value");
+ *pref = uref.value;
+ }
+ putype = flatcc_builder_table_add(B, id - 1, utype_size, utype_size);
+ check_error(putype != 0, -1, "unable to add union type");
+ write_utype(putype, uref.type);
+ return 0;
+}
+
+int flatcc_builder_table_add_union_vector(flatcc_builder_t *B, int id,
+ flatcc_builder_union_vec_ref_t uvref)
+{
+ flatcc_builder_ref_t *pref;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check_error((uvref.type == 0) == (uvref.value == 0), -1, "expected both type and value vector, or neither");
+ if (uvref.type != 0) {
+ pref = flatcc_builder_table_add_offset(B, id - 1);
+ check_error(pref != 0, -1, "unable to add union member");
+ *pref = uvref.type;
+
+ pref = flatcc_builder_table_add_offset(B, id);
+ check_error(pref != 0, -1, "unable to add union member");
+ *pref = uvref.value;
+ }
+ return 0;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+ flatcc_builder_utype_t *types;
+ flatcc_builder_ref_t *refs;
+ size_t i;
+
+ if (flatcc_builder_start_offset_vector(B)) {
+ return uvref;
+ }
+ if (0 == flatcc_builder_extend_offset_vector(B, count)) {
+ return uvref;
+ }
+ if (0 == (types = push_ds(B, (uoffset_t)(utype_size * count)))) {
+ return uvref;
+ }
+
+ /* Safe even if push_ds caused stack reallocation. */
+ refs = flatcc_builder_offset_vector_edit(B);
+
+ for (i = 0; i < count; ++i) {
+ types[i] = urefs[i].type;
+ refs[i] = urefs[i].value;
+ }
+ uvref = flatcc_builder_create_union_vector_direct(B,
+ types, refs, count);
+ /* No need to clean up after out temporary types vector. */
+ exit_frame(B);
+ return uvref;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector_direct(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, flatcc_builder_ref_t *data, size_t count)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+
+ if (0 == (uvref.value = _create_offset_vector_direct(B, data, count, types))) {
+ return uvref;
+ }
+ if (0 == (uvref.type = flatcc_builder_create_type_vector(B, types, count))) {
+ return uvref;
+ }
+ return uvref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_type_vector(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, size_t count)
+{
+ return flatcc_builder_create_vector(B, types, count,
+ utype_size, utype_size, max_utype_count);
+}
+
+int flatcc_builder_start_union_vector(flatcc_builder_t *B)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = union_size;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_union_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_end_union_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+ flatcc_builder_utype_t *types;
+ flatcc_builder_union_ref_t *urefs;
+ flatcc_builder_ref_t *refs;
+ size_t i, count;
+
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+
+ /*
+ * We could split the union vector in-place, but then we would have
+ * to deal with strict pointer aliasing rules which is not worthwhile
+ * so we create a new offset and type vector on the stack.
+ *
+ * We assume the stack is sufficiently aligned as is.
+ */
+ count = flatcc_builder_union_vector_count(B);
+ if (0 == (refs = push_ds(B, (uoffset_t)(count * (utype_size + field_size))))) {
+ return uvref;
+ }
+ types = (flatcc_builder_utype_t *)(refs + count);
+
+ /* Safe even if push_ds caused stack reallocation. */
+ urefs = flatcc_builder_union_vector_edit(B);
+
+ for (i = 0; i < count; ++i) {
+ types[i] = urefs[i].type;
+ refs[i] = urefs[i].value;
+ }
+ uvref = flatcc_builder_create_union_vector_direct(B, types, refs, count);
+ /* No need to clean up after out temporary types vector. */
+ exit_frame(B);
+ return uvref;
+}
+
+void *flatcc_builder_union_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+size_t flatcc_builder_union_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_extend_union_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, max_union_count)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)(union_size * count));
+}
+
+int flatcc_builder_truncate_union_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ check_error(frame(container.vector.count) >= (uoffset_t)count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_union_vector_push(flatcc_builder_t *B,
+ flatcc_builder_union_ref_t uref)
+{
+ flatcc_builder_union_ref_t *p;
+
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ if (frame(container.vector.count) == max_union_count) {
+ return 0;
+ }
+ frame(container.vector.count) += 1;
+ if (0 == (p = push_ds(B, union_size))) {
+ return 0;
+ }
+ *p = uref;
+ return p;
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_append_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count)
+{
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ if (vector_count_add(B, (uoffset_t)count, max_union_count)) {
+ return 0;
+ }
+ return push_ds_copy(B, urefs, (uoffset_t)(union_size * count));
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string(flatcc_builder_t *B, const char *s, size_t len)
+{
+ uoffset_t s_pad;
+ uoffset_t length_prefix;
+ iov_state_t iov;
+
+ if (len > max_string_len) {
+ return 0;
+ }
+ write_uoffset(&length_prefix, (uoffset_t)len);
+ /* Add 1 for zero termination. */
+ s_pad = front_pad(B, (uoffset_t)len + 1, field_size) + 1;
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(s, len);
+ push_iov(_pad, s_pad);
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string_str(flatcc_builder_t *B, const char *s)
+{
+ return flatcc_builder_create_string(B, s, strlen(s));
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string_strn(flatcc_builder_t *B, const char *s, size_t max_len)
+{
+ return flatcc_builder_create_string(B, s, strnlen(s, max_len));
+}
+
+flatcc_builder_ref_t flatcc_builder_end_string(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t string_ref;
+
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ FLATCC_ASSERT(frame(container.vector.count) == B->ds_offset);
+ if (0 == (string_ref = flatcc_builder_create_string(B,
+ (const char *)B->ds, B->ds_offset))) {
+ return 0;
+ }
+ exit_frame(B);
+ return string_ref;
+}
+
+char *flatcc_builder_string_edit(flatcc_builder_t *B)
+{
+ return (char *)B->ds;
+}
+
+size_t flatcc_builder_string_len(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+void *flatcc_builder_table_add(flatcc_builder_t *B, int id, size_t size, uint16_t align)
+{
+ /*
+ * We align the offset relative to the first table field, excluding
+ * the header holding the vtable reference. On the stack, `ds_first`
+ * is aligned to 8 bytes thanks to the `enter_frame` logic, and this
+ * provides a safe way to update the fields on the stack, but here
+ * we are concerned with the target buffer alignment.
+ *
+ * We could also have aligned relative to the end of the table which
+ * would allow us to emit each field immediately, but it would be a
+ * confusing user experience wrt. field ordering, and it would add
+ * more variability to vtable layouts, thus reducing reuse, and
+ * frequent emissions to external emitter interface would be
+ * sub-optimal. Also, with that appoach, the vtable offsets would
+ * have to be adjusted at table end.
+ *
+ * As we have it, each emit occur at table end, vector end, string
+ * end, or buffer end, which might be helpful to various backend
+ * processors.
+ */
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check(id >= 0 && id <= (int)FLATBUFFERS_ID_MAX, "table id out of range");
+ if (align > B->align) {
+ B->align = align;
+ }
+#if FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
+ if (B->vs[id] != 0) {
+ return B->ds + B->vs[id] - field_size;
+ }
+#else
+ if (B->vs[id] != 0) {
+ check(0, "table field already set");
+ return 0;
+ }
+#endif
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)id, (uint32_t)size);
+ return push_ds_field(B, (uoffset_t)size, align, (voffset_t)id);
+}
+
+void *flatcc_builder_table_edit(flatcc_builder_t *B, size_t size)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ return B->ds + B->ds_offset - size;
+}
+
+void *flatcc_builder_table_add_copy(flatcc_builder_t *B, int id, const void *data, size_t size, uint16_t align)
+{
+ void *p;
+
+ if ((p = flatcc_builder_table_add(B, id, size, align))) {
+ memcpy(p, data, size);
+ }
+ return p;
+}
+
+flatcc_builder_ref_t *flatcc_builder_table_add_offset(flatcc_builder_t *B, int id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check(id >= 0 && id <= (int)FLATBUFFERS_ID_MAX, "table id out of range");
+#if FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
+ if (B->vs[id] != 0) {
+ return B->ds + B->vs[id] - field_size;
+ }
+#else
+ if (B->vs[id] != 0) {
+ check(0, "table field already set");
+ return 0;
+ }
+#endif
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)id, (uint32_t)field_size);
+ return push_ds_offset_field(B, (voffset_t)id);
+}
+
+uint16_t flatcc_builder_push_buffer_alignment(flatcc_builder_t *B)
+{
+ uint16_t old_min_align = B->min_align;
+
+ B->min_align = field_size;
+ return old_min_align;
+}
+
+void flatcc_builder_pop_buffer_alignment(flatcc_builder_t *B, uint16_t pushed_align)
+{
+ set_min_align(B, pushed_align);
+}
+
+uint16_t flatcc_builder_get_buffer_alignment(flatcc_builder_t *B)
+{
+ return B->min_align;
+}
+
+void flatcc_builder_set_vtable_clustering(flatcc_builder_t *B, int enable)
+{
+ /* Inverted because we zero all memory in B on init. */
+ B->disable_vt_clustering = !enable;
+}
+
+void flatcc_builder_set_block_align(flatcc_builder_t *B, uint16_t align)
+{
+ B->block_align = align;
+}
+
+int flatcc_builder_get_level(flatcc_builder_t *B)
+{
+ return B->level;
+}
+
+void flatcc_builder_set_max_level(flatcc_builder_t *B, int max_level)
+{
+ B->max_level = max_level;
+ if (B->limit_level < B->max_level) {
+ B->limit_level = B->max_level;
+ }
+}
+
+size_t flatcc_builder_get_buffer_size(flatcc_builder_t *B)
+{
+ return (size_t)(B->emit_end - B->emit_start);
+}
+
+flatcc_builder_ref_t flatcc_builder_get_buffer_start(flatcc_builder_t *B)
+{
+ return B->emit_start;
+}
+
+flatcc_builder_ref_t flatcc_builder_get_buffer_end(flatcc_builder_t *B)
+{
+ return B->emit_end;
+}
+
+void flatcc_builder_set_vtable_cache_limit(flatcc_builder_t *B, size_t size)
+{
+ B->vb_flush_limit = size;
+}
+
+void flatcc_builder_set_identifier(flatcc_builder_t *B, const char identifier[identifier_size])
+{
+ set_identifier(identifier);
+}
+
+enum flatcc_builder_type flatcc_builder_get_type(flatcc_builder_t *B)
+{
+ return B->frame ? frame(type) : flatcc_builder_empty;
+}
+
+enum flatcc_builder_type flatcc_builder_get_type_at(flatcc_builder_t *B, int level)
+{
+ if (level < 1 || level > B->level) {
+ return flatcc_builder_empty;
+ }
+ return B->frame[level - B->level].type;
+}
+
+void *flatcc_builder_get_direct_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ if (B->is_default_emitter) {
+ return flatcc_emitter_get_direct_buffer(&B->default_emit_context, size_out);
+ } else {
+ if (size_out) {
+ *size_out = 0;
+ }
+ }
+ return 0;
+}
+
+void *flatcc_builder_copy_buffer(flatcc_builder_t *B, void *buffer, size_t size)
+{
+ /* User is allowed to call tentatively to see if there is support. */
+ if (!B->is_default_emitter) {
+ return 0;
+ }
+ buffer = flatcc_emitter_copy_buffer(&B->default_emit_context, buffer, size);
+ check(buffer, "default emitter declined to copy buffer");
+ return buffer;
+}
+
+void *flatcc_builder_finalize_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ void * buffer;
+ size_t size;
+
+ size = flatcc_builder_get_buffer_size(B);
+
+ if (size_out) {
+ *size_out = size;
+ }
+
+ buffer = FLATCC_BUILDER_ALLOC(size);
+
+ if (!buffer) {
+ check(0, "failed to allocated memory for finalized buffer");
+ goto done;
+ }
+ if (!flatcc_builder_copy_buffer(B, buffer, size)) {
+ check(0, "default emitter declined to copy buffer");
+ FLATCC_BUILDER_FREE(buffer);
+ buffer = 0;
+ }
+done:
+ if (!buffer && size_out) {
+ *size_out = 0;
+ }
+ return buffer;
+}
+
+void *flatcc_builder_finalize_aligned_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ void * buffer;
+ size_t align;
+ size_t size;
+
+ size = flatcc_builder_get_buffer_size(B);
+
+ if (size_out) {
+ *size_out = size;
+ }
+ align = flatcc_builder_get_buffer_alignment(B);
+
+ size = (size + align - 1) & ~(align - 1);
+ buffer = FLATCC_BUILDER_ALIGNED_ALLOC(align, size);
+
+ if (!buffer) {
+ goto done;
+ }
+ if (!flatcc_builder_copy_buffer(B, buffer, size)) {
+ FLATCC_BUILDER_ALIGNED_FREE(buffer);
+ buffer = 0;
+ goto done;
+ }
+done:
+ if (!buffer && size_out) {
+ *size_out = 0;
+ }
+ return buffer;
+}
+
+void *flatcc_builder_aligned_alloc(size_t alignment, size_t size)
+{
+ return FLATCC_BUILDER_ALIGNED_ALLOC(alignment, size);
+}
+
+void flatcc_builder_aligned_free(void *p)
+{
+ FLATCC_BUILDER_ALIGNED_FREE(p);
+}
+
+void *flatcc_builder_alloc(size_t size)
+{
+ return FLATCC_BUILDER_ALLOC(size);
+}
+
+void flatcc_builder_free(void *p)
+{
+ FLATCC_BUILDER_FREE(p);
+}
+
+void *flatcc_builder_get_emit_context(flatcc_builder_t *B)
+{
+ return B->emit_context;
+}
diff --git a/flatcc/src/runtime/emitter.c b/flatcc/src/runtime/emitter.c
new file mode 100644
index 0000000..089ea00
--- /dev/null
+++ b/flatcc/src/runtime/emitter.c
@@ -0,0 +1,269 @@
+#include <stdlib.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_emitter.h"
+
+static int advance_front(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = 0;
+
+ if (E->front && E->front->prev != E->back) {
+ E->front->prev->page_offset = E->front->page_offset - FLATCC_EMITTER_PAGE_SIZE;
+ E->front = E->front->prev;
+ goto done;
+ }
+ if (!(p = FLATCC_EMITTER_ALLOC(sizeof(flatcc_emitter_page_t)))) {
+ return -1;
+ }
+ E->capacity += FLATCC_EMITTER_PAGE_SIZE;
+ if (E->front) {
+ p->prev = E->back;
+ p->next = E->front;
+ E->front->prev = p;
+ E->back->next = p;
+ E->front = p;
+ goto done;
+ }
+ /*
+ * The first page is shared between front and back to avoid
+ * double unecessary extra allocation.
+ */
+ E->front = p;
+ E->back = p;
+ p->next = p;
+ p->prev = p;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ p->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ return 0;
+done:
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE;
+ E->front->page_offset = E->front->next->page_offset - FLATCC_EMITTER_PAGE_SIZE;
+ return 0;
+}
+
+static int advance_back(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = 0;
+
+ if (E->back && E->back->next != E->front) {
+ E->back = E->back->next;
+ goto done;
+ }
+ if (!(p = FLATCC_EMITTER_ALLOC(sizeof(flatcc_emitter_page_t)))) {
+ return -1;
+ }
+ E->capacity += FLATCC_EMITTER_PAGE_SIZE;
+ if (E->back) {
+ p->prev = E->back;
+ p->next = E->front;
+ E->front->prev = p;
+ E->back->next = p;
+ E->back = p;
+ goto done;
+ }
+ /*
+ * The first page is shared between front and back to avoid
+ * double unecessary extra allocation.
+ */
+ E->front = p;
+ E->back = p;
+ p->next = p;
+ p->prev = p;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ p->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ return 0;
+done:
+ E->back_cursor = E->back->page;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE;
+ E->back->page_offset = E->back->prev->page_offset + FLATCC_EMITTER_PAGE_SIZE;
+ return 0;
+}
+
+static int copy_front(flatcc_emitter_t *E, uint8_t *data, size_t size)
+{
+ size_t k;
+
+ data += size;
+ while (size) {
+ k = size;
+ if (k > E->front_left) {
+ k = E->front_left;
+ if (k == 0) {
+ if (advance_front(E)) {
+ return -1;
+ }
+ continue;
+ }
+ }
+ E->front_cursor -= k;
+ E->front_left -= k;
+ data -= k;
+ size -= k;
+ memcpy(E->front_cursor, data, k);
+ };
+ return 0;
+}
+
+static int copy_back(flatcc_emitter_t *E, uint8_t *data, size_t size)
+{
+ size_t k;
+
+ while (size) {
+ k = size;
+ if (k > E->back_left) {
+ k = E->back_left;
+ if (k == 0) {
+ if (advance_back(E)) {
+ return -1;
+ }
+ continue;
+ }
+ }
+ memcpy(E->back_cursor, data, k);
+ size -= k;
+ data += k;
+ E->back_cursor += k;
+ E->back_left -= k;
+ }
+ return 0;
+}
+
+int flatcc_emitter_recycle_page(flatcc_emitter_t *E, flatcc_emitter_page_t *p)
+{
+ if (p == E->front || p == E->back) {
+ return -1;
+ }
+ p->next->prev = p->prev;
+ p->prev->next = p->next;
+ p->prev = E->front->prev;
+ p->next = E->front;
+ p->prev->next = p;
+ p->next->prev = p;
+ return 0;
+}
+
+void flatcc_emitter_reset(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = E->front;
+
+ if (!E->front) {
+ return;
+ }
+ E->back = E->front;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->front->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ /* Heuristic to reduce peak allocation over time. */
+ if (E->used_average == 0) {
+ E->used_average = E->used;
+ }
+ E->used_average = E->used_average * 3 / 4 + E->used / 4;
+ E->used = 0;
+ while (E->used_average * 2 < E->capacity && E->back->next != E->front) {
+ /* We deallocate the page after back since it is less likely to be hot in cache. */
+ p = E->back->next;
+ E->back->next = p->next;
+ p->next->prev = E->back;
+ FLATCC_EMITTER_FREE(p);
+ E->capacity -= FLATCC_EMITTER_PAGE_SIZE;
+ }
+}
+
+void flatcc_emitter_clear(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = E->front;
+
+ if (!p) {
+ return;
+ }
+ p->prev->next = 0;
+ while (p->next) {
+ p = p->next;
+ FLATCC_EMITTER_FREE(p->prev);
+ }
+ FLATCC_EMITTER_FREE(p);
+ memset(E, 0, sizeof(*E));
+}
+
+int flatcc_emitter(void *emit_context,
+ const flatcc_iovec_t *iov, int iov_count,
+ flatbuffers_soffset_t offset, size_t len)
+{
+ flatcc_emitter_t *E = emit_context;
+ uint8_t *p;
+
+ E->used += len;
+ if (offset < 0) {
+ if (len <= E->front_left) {
+ E->front_cursor -= len;
+ E->front_left -= len;
+ p = E->front_cursor;
+ goto copy;
+ }
+ iov += iov_count;
+ while (iov_count--) {
+ --iov;
+ if (copy_front(E, iov->iov_base, iov->iov_len)) {
+ return -1;
+ }
+ }
+ } else {
+ if (len <= E->back_left) {
+ p = E->back_cursor;
+ E->back_cursor += len;
+ E->back_left -= len;
+ goto copy;
+ }
+ while (iov_count--) {
+ if (copy_back(E, iov->iov_base, iov->iov_len)) {
+ return -1;
+ }
+ ++iov;
+ }
+ }
+ return 0;
+copy:
+ while (iov_count--) {
+ memcpy(p, iov->iov_base, iov->iov_len);
+ p += iov->iov_len;
+ ++iov;
+ }
+ return 0;
+}
+
+void *flatcc_emitter_copy_buffer(flatcc_emitter_t *E, void *buf, size_t size)
+{
+ flatcc_emitter_page_t *p;
+ size_t len;
+
+ if (size < E->used) {
+ return 0;
+ }
+ if (!E->front) {
+ return 0;
+ }
+ if (E->front == E->back) {
+ memcpy(buf, E->front_cursor, E->used);
+ return buf;
+ }
+ len = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ memcpy(buf, E->front_cursor, len);
+ buf = (uint8_t *)buf + len;
+ p = E->front->next;
+ while (p != E->back) {
+ memcpy(buf, p->page, FLATCC_EMITTER_PAGE_SIZE);
+ buf = (uint8_t *)buf + FLATCC_EMITTER_PAGE_SIZE;
+ p = p->next;
+ }
+ memcpy(buf, p->page, FLATCC_EMITTER_PAGE_SIZE - E->back_left);
+ return buf;
+}
diff --git a/flatcc/src/runtime/json_parser.c b/flatcc/src/runtime/json_parser.c
new file mode 100644
index 0000000..4472af2
--- /dev/null
+++ b/flatcc/src/runtime/json_parser.c
@@ -0,0 +1,1297 @@
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_json_parser.h"
+#include "flatcc/flatcc_assert.h"
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+
+#define offset_size uoffset_size
+#if FLATCC_USE_GRISU3 && !defined(PORTABLE_USE_GRISU3)
+#define PORTABLE_USE_GRISU3 1
+#endif
+#include "flatcc/portable/pparsefp.h"
+#include "flatcc/portable/pbase64.h"
+
+#if FLATCC_USE_SSE4_2
+#ifdef __SSE4_2__
+#define USE_SSE4_2
+#endif
+#endif
+
+#ifdef USE_SSE4_2
+#include <nmmintrin.h>
+#define cmpistri(end, haystack, needle, flags) \
+ if (end - haystack >= 16) do { \
+ int i; \
+ __m128i a = _mm_loadu_si128((const __m128i *)(needle)); \
+ do { \
+ __m128i b = _mm_loadu_si128((const __m128i *)(haystack)); \
+ i = _mm_cmpistri(a, b, flags); \
+ haystack += i; \
+ } while (i == 16 && end - haystack >= 16); \
+ } while(0)
+#endif
+
+const char *flatcc_json_parser_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_json_parser_error_##no: \
+ return str;
+ FLATCC_JSON_PARSE_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+const char *flatcc_json_parser_set_error(flatcc_json_parser_t *ctx, const char *loc, const char *end, int err)
+{
+ if (!ctx->error) {
+ ctx->error = err;
+ ctx->pos = (int)(loc - ctx->line_start + 1);
+ ctx->error_loc = loc;
+ }
+ return end;
+}
+
+const char *flatcc_json_parser_string_part(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+/*
+ * Disabled because it doesn't catch all control characters, but is
+ * useful for performance testing.
+ */
+#if 0
+//#ifdef USE_SSE4_2
+ cmpistri(end, buf, "\"\\\0\r\n\t\v\f", _SIDD_POSITIVE_POLARITY);
+#else
+ /*
+ * Testing for signed char >= 0x20 would also capture UTF-8
+ * encodings that we could verify, and also invalid encodings like
+ * 0xff, but we do not wan't to enforce strict UTF-8.
+ */
+ while (buf != end && *buf != '\"' && ((unsigned char)*buf) >= 0x20 && *buf != '\\') {
+ ++buf;
+ }
+#endif
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unterminated_string);
+ }
+ if (*buf == '"') {
+ return buf;
+ }
+ if (*buf < 0x20) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_character);
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_space_ext(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+again:
+#ifdef USE_SSE4_2
+ /*
+ * We can include line break, but then error reporting suffers and
+ * it really makes no big difference.
+ */
+ //cmpistri(end, buf, "\x20\t\v\f\r\n", _SIDD_NEGATIVE_POLARITY);
+ cmpistri(end, buf, "\x20\t\v\f", _SIDD_NEGATIVE_POLARITY);
+#else
+#if FLATCC_ALLOW_UNALIGNED_ACCESS
+ while (end - buf >= 16) {
+ if (*buf > 0x20) {
+ return buf;
+ }
+#if FLATCC_JSON_PARSE_WIDE_SPACE
+ if (((uint64_t *)buf)[0] != 0x2020202020202020) {
+descend:
+ if (((uint32_t *)buf)[0] == 0x20202020) {
+ buf += 4;
+ }
+#endif
+ if (((uint16_t *)buf)[0] == 0x2020) {
+ buf += 2;
+ }
+ if (*buf == 0x20) {
+ ++buf;
+ }
+ if (*buf > 0x20) {
+ return buf;
+ }
+ break;
+#if FLATCC_JSON_PARSE_WIDE_SPACE
+ }
+ if (((uint64_t *)buf)[1] != 0x2020202020202020) {
+ buf += 8;
+ goto descend;
+ }
+ buf += 16;
+#endif
+ }
+#endif
+#endif
+ while (buf != end && *buf == 0x20) {
+ ++buf;
+ }
+ while (buf != end && *buf <= 0x20) {
+ switch (*buf) {
+ case 0x0d: buf += (end - buf > 1 && buf[1] == 0x0a);
+ /* Consume following LF or treating CR as LF. */
+ ++ctx->line; ctx->line_start = ++buf; continue;
+ case 0x0a: ++ctx->line; ctx->line_start = ++buf; continue;
+ case 0x09: ++buf; continue;
+ case 0x20: goto again; /* Don't consume here, sync with power of 2 spaces. */
+ default: return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ }
+ return buf;
+}
+
+static int decode_hex4(const char *buf, uint32_t *result)
+{
+ uint32_t u, x;
+ char c;
+
+ u = 0;
+ c = buf[0];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u = x << 12;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 12;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[1];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x << 8;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 8;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[2];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x << 4;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 4;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[3];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x;
+ } else {
+ return -1;
+ }
+ }
+ *result = u;
+ return 0;
+}
+
+static int decode_unicode_char(uint32_t u, char *code)
+{
+ if (u <= 0x7f) {
+ code[0] = 1;
+ code[1] = (char)u;
+ } else if (u <= 0x7ff) {
+ code[0] = 2;
+ code[1] = (char)(0xc0 | (u >> 6));
+ code[2] = (char)(0x80 | (u & 0x3f));
+ } else if (u <= 0xffff) {
+ code[0] = 3;
+ code[1] = (char)(0xe0 | (u >> 12));
+ code[2] = (char)(0x80 | ((u >> 6) & 0x3f));
+ code[3] = (char)(0x80 | (u & 0x3f));
+ } else if (u <= 0x10ffff) {
+ code[0] = 4;
+ code[1] = (char)(0xf0 | (u >> 18));
+ code[2] = (char)(0x80 | ((u >> 12) & 0x3f));
+ code[3] = (char)(0x80 | ((u >> 6) & 0x3f));
+ code[4] = (char)(0x80 | (u & 0x3f));
+ } else {
+ code[0] = 0;
+ return -1;
+ }
+ return 0;
+}
+
+static inline uint32_t combine_utf16_surrogate_pair(uint32_t high, uint32_t low)
+{
+ return (high - 0xd800) * 0x400 + (low - 0xdc00) + 0x10000;
+}
+
+static inline int decode_utf16_surrogate_pair(uint32_t high, uint32_t low, char *code)
+{
+ return decode_unicode_char(combine_utf16_surrogate_pair(high, low), code);
+}
+
+
+/*
+ * UTF-8 code points can have up to 4 bytes but JSON can only
+ * encode up to 3 bytes via the \uXXXX syntax.
+ * To handle the range U+10000..U+10FFFF two UTF-16 surrogate
+ * pairs must be used. If this is not detected, the pairs
+ * survive in the output which is not valid but often tolerated.
+ * Emojis generally require such a pair, unless encoded
+ * unescaped in UTF-8.
+ *
+ * If a high surrogate pair is detected and a low surrogate pair
+ * follows, the combined sequence is decoded as a 4 byte
+ * UTF-8 sequence. Unpaired surrogate halves are decoded as is
+ * despite being an invalid UTF-8 value.
+ */
+
+const char *flatcc_json_parser_string_escape(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_json_parser_escape_buffer_t code)
+{
+ char c, v;
+ uint32_t u, u2;
+
+ if (end - buf < 2 || buf[0] != '\\') {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ switch (buf[1]) {
+ case 'x':
+ v = 0;
+ code[0] = 1;
+ if (end - buf < 4) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ c = buf[2];
+ if (c >= '0' && c <= '9') {
+ v |= (c - '0') << 4;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ v |= (c - 'a' + 10) << 4;
+ } else {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ }
+ c = buf[3];
+ if (c >= '0' && c <= '9') {
+ v |= c - '0';
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ v |= c - 'a' + 10;
+ } else {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ }
+ code[1] = v;
+ return buf + 4;
+ case 'u':
+ if (end - buf < 6) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ if (decode_hex4(buf + 2, &u)) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ };
+ /* If a high UTF-16 surrogate half pair was detected */
+ if (u >= 0xd800 && u <= 0xdbff &&
+ /* and there is space for a matching low half pair */
+ end - buf >= 12 &&
+ /* and there is a second escape following immediately */
+ buf[6] == '\\' && buf[7] == 'u' &&
+ /* and it is valid hex */
+ decode_hex4(buf + 8, &u2) == 0 &&
+ /* and it is a low UTF-16 surrogate pair */
+ u2 >= 0xdc00 && u2 <= 0xdfff) {
+ /* then decode the pair into a single 4 byte utf-8 sequence. */
+ if (decode_utf16_surrogate_pair(u, u2, code)) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ return buf + 12;
+ /*
+ * Otherwise decode unmatched surrogate pairs as is any
+ * other UTF-8. Some systems might depend on these surviving.
+ * Leave ignored errors for the next parse step.
+ */
+ }
+ decode_unicode_char(u, code);
+ return buf + 6;
+ case 't':
+ code[0] = 1;
+ code[1] = '\t';
+ return buf + 2;
+ case 'n':
+ code[0] = 1;
+ code[1] = '\n';
+ return buf + 2;
+ case 'r':
+ code[0] = 1;
+ code[1] = '\r';
+ return buf + 2;
+ case 'b':
+ code[0] = 1;
+ code[1] = '\b';
+ return buf + 2;
+ case 'f':
+ code[0] = 1;
+ code[1] = '\f';
+ return buf + 2;
+ case '\"':
+ code[0] = 1;
+ code[1] = '\"';
+ return buf + 2;
+ case '\\':
+ code[0] = 1;
+ code[1] = '\\';
+ return buf + 2;
+ case '/':
+ code[0] = 1;
+ code[1] = '/';
+ return buf + 2;
+ default:
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+}
+
+/* Only applies to unquoted constants during generic parsring, otherwise it is skipped as a string. */
+const char *flatcc_json_parser_skip_constant(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ char c;
+ const char *k;
+
+ while (buf != end) {
+ c = *buf;
+ if ((c & 0x80) || (c == '_') || (c >= '0' && c <= '9') || c == '.') {
+ ++buf;
+ continue;
+ }
+ /* Upper case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'z') {
+ ++buf;
+ continue;
+ }
+ buf = flatcc_json_parser_space(ctx, (k = buf), end);
+ if (buf == k) {
+ return buf;
+ }
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_match_constant(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos, int *more)
+{
+ const char *mark = buf, *k = buf + pos;
+
+ if (end - buf <= pos) {
+ *more = 0;
+ return buf;
+ }
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (ctx->unquoted) {
+ buf = flatcc_json_parser_space(ctx, k, end);
+ if (buf == end) {
+ /*
+ * We cannot make a decision on more.
+ * Just return end and let parser handle sync point in
+ * case it is able to resume parse later on.
+ * For the same reason we do not lower ctx->unquoted.
+ */
+ *more = 0;
+ return buf;
+ }
+ if (buf != k) {
+ char c = *buf;
+ /*
+ * Space was seen - and thus we have a valid match.
+ * If the next char is an identifier start symbol
+ * we raise the more flag to support syntax like:
+ *
+ * `flags: Hungry Sleepy Awake, ...`
+ */
+ if (c == '_' || (c & 0x80)) {
+ *more = 1;
+ return buf;
+ }
+ c |= 0x20;
+ if (c >= 'a' && c <= 'z') {
+ *more = 1;
+ return buf;
+ }
+ }
+ /*
+ * Space was not seen, so the match is only valid if followed
+ * by a JSON separator symbol, and there cannot be more values
+ * following so `more` is lowered.
+ */
+ *more = 0;
+ if (*buf == ',' || *buf == '}' || *buf == ']') {
+ return buf;
+ }
+ return mark;
+ }
+#endif
+ buf = k;
+ if (*buf == 0x20) {
+ ++buf;
+ while (buf != end && *buf == 0x20) {
+ ++buf;
+ }
+ if (buf == end) {
+ *more = 0;
+ return buf;
+ }
+ /* We accept untrimmed space like " Green Blue ". */
+ if (*buf != '\"') {
+ *more = 1;
+ return buf;
+ }
+ }
+ switch (*buf) {
+ case '\\':
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ case '\"':
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ *more = 0;
+ return buf;
+ }
+ *more = 0;
+ return mark;
+}
+
+const char *flatcc_json_parser_unmatched_symbol(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (ctx->flags & flatcc_json_parser_f_skip_unknown) {
+ buf = flatcc_json_parser_symbol_end(ctx, buf, end);
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf != end && *buf == ':') {
+ ++buf;
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ } else {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_colon);
+ }
+ return flatcc_json_parser_generic_json(ctx, buf, end);
+ } else {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unknown_symbol);
+ }
+}
+
+static const char *__flatcc_json_parser_number(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end) {
+ return buf;
+ }
+ if (*buf == '-') {
+ ++buf;
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ }
+ if (*buf == '0') {
+ ++buf;
+ } else {
+ if (*buf < '1' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ if (buf != end) {
+ if (*buf == '.') {
+ ++buf;
+ if (*buf < '0' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ }
+ if (buf != end && (*buf == 'e' || *buf == 'E')) {
+ ++buf;
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ if (*buf == '+' || *buf == '-') {
+ ++buf;
+ }
+ if (buf == end || *buf < '0' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+
+ /*
+ * For strtod termination we must ensure the tail is not valid
+ * including non-json exponent types. The simplest approach is
+ * to accept anything that could be valid json successor
+ * characters and reject end of buffer since we expect a closing
+ * '}'.
+ *
+ * The ',' is actually not safe if strtod uses a non-POSIX locale.
+ */
+ if (buf != end) {
+ switch (*buf) {
+ case ',':
+ case ':':
+ case ']':
+ case '}':
+ case ' ':
+ case '\r':
+ case '\t':
+ case '\n':
+ case '\v':
+ return buf;
+ }
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+}
+
+const char *flatcc_json_parser_double(flatcc_json_parser_t *ctx, const char *buf, const char *end, double *v)
+{
+ const char *next, *k;
+
+ *v = 0.0;
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ if (*buf == '-') ++k;
+ if (end - k > 1 && (k[0] == '.' || (k[0] == '0' && k[1] == '0'))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ next = parse_double(buf, (size_t)(end - buf), v);
+ if (next == 0 || next == buf) {
+ if (parse_double_isinf(*v)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_overflow);
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ return next;
+}
+
+const char *flatcc_json_parser_float(flatcc_json_parser_t *ctx, const char *buf, const char *end, float *v)
+{
+ const char *next, *k;
+
+ *v = 0.0;
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ if (*buf == '-') ++k;
+ if (end - k > 1 && (k[0] == '.' || (k[0] == '0' && k[1] == '0'))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ next = parse_float(buf, (size_t)(end - buf), v);
+ if (next == 0 || next == buf) {
+ if (parse_float_isinf(*v)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_overflow);
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ return next;
+}
+
+const char *flatcc_json_parser_generic_json(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ char stack[FLATCC_JSON_PARSE_GENERIC_MAX_NEST];
+ char *sp, *spend;
+ const char *k;
+ flatcc_json_parser_escape_buffer_t code;
+ int more = 0;
+
+ sp = stack;
+ spend = sp + FLATCC_JSON_PARSE_GENERIC_MAX_NEST;
+
+again:
+ if (buf == end) {
+ return buf;
+ }
+ if (sp != stack && sp[-1] == '}') {
+ /* Inside an object, about to read field name. */
+ buf = flatcc_json_parser_symbol_start(ctx, buf, end);
+ buf = flatcc_json_parser_symbol_end(ctx, buf, end);
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_object);
+ }
+ if (*buf != ':') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_colon);
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ switch (*buf) {
+ case '\"':
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ while (buf != end && *buf != '\"') {
+ buf = flatcc_json_parser_string_part(ctx, buf, end);
+ if (buf != end && *buf == '\"') {
+ break;
+ }
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ }
+ buf = flatcc_json_parser_string_end(ctx, buf, end);
+ break;
+ case '-':
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ buf = __flatcc_json_parser_number(ctx, buf, end);
+ break;
+#if !FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ case 't': case 'f':
+ {
+ uint8_t v;
+ buf = flatcc_json_parser_bool(ctx, (k = buf), end, &v);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ }
+ break;
+ case 'n':
+ buf = flatcc_json_parser_null((k = buf), end);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ break;
+#endif
+ case '[':
+ if (sp == spend) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_deep_nesting);
+ }
+ *sp++ = ']';
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == ']') {
+ break;
+ }
+ goto again;
+ case '{':
+ if (sp == spend) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_deep_nesting);
+ }
+ *sp++ = '}';
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == '}') {
+ break;
+ }
+ goto again;
+
+ default:
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ buf = flatcc_json_parser_skip_constant(ctx, (k = buf), end);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ break;
+#else
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+#endif
+ }
+ while (buf != end && sp != stack) {
+ --sp;
+ if (*sp == ']') {
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ } else {
+ buf = flatcc_json_parser_object_end(ctx, buf, end, &more);
+ }
+ if (more) {
+ ++sp;
+ goto again;
+ }
+ }
+ if (buf == end && sp != stack) {
+ return flatcc_json_parser_set_error(ctx, buf, end, sp[-1] == ']' ?
+ flatcc_json_parser_error_unbalanced_array :
+ flatcc_json_parser_error_unbalanced_object);
+ }
+ /* Any ',', ']', or '}' belongs to parent context. */
+ return buf;
+}
+
+const char *flatcc_json_parser_integer(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_sign, uint64_t *value)
+{
+ uint64_t x0, x = 0;
+ const char *k;
+
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ *value_sign = *buf == '-';
+ buf += *value_sign;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ x0 = x;
+ x = x * 10 + (uint64_t)(*buf - '0');
+ if (x0 > x) {
+ return flatcc_json_parser_set_error(ctx, buf, end, value_sign ?
+ flatcc_json_parser_error_underflow : flatcc_json_parser_error_overflow);
+ }
+ ++buf;
+ }
+ if (buf == k) {
+ /* Give up, but don't fail the parse just yet, it might be a valid symbol. */
+ return buf;
+ }
+ if (buf != end && (*buf == 'e' || *buf == 'E' || *buf == '.')) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_float_unexpected);
+ }
+ *value = x;
+ return buf;
+}
+
+/* Array Creation - depends on flatcc builder. */
+
+const char *flatcc_json_parser_build_uint8_vector_base64(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref, int urlsafe)
+{
+ const char *mark;
+ uint8_t *pval;
+ size_t max_len;
+ size_t decoded_len, src_len;
+ int mode;
+ int ret;
+
+ mode = urlsafe ? base64_mode_url : base64_mode_rfc4648;
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf == end || *buf != '\"') {
+ goto base64_failed;
+ }
+ max_len = base64_decoded_size((size_t)(buf - mark));
+ if (flatcc_builder_start_vector(ctx->ctx, 1, 1, FLATBUFFERS_COUNT_MAX((utype_size)))) {
+ goto failed;
+ }
+ if (!(pval = flatcc_builder_extend_vector(ctx->ctx, max_len))) {
+ goto failed;
+ }
+ src_len = (size_t)(buf - mark);
+ decoded_len = max_len;
+ if ((ret = base64_decode(pval, (const uint8_t *)mark, &decoded_len, &src_len, mode))) {
+ buf = mark + src_len;
+ goto base64_failed;
+ }
+ if (src_len != (size_t)(buf - mark)) {
+ buf = mark + src_len;
+ goto base64_failed;
+ }
+ if (decoded_len < max_len) {
+ if (flatcc_builder_truncate_vector(ctx->ctx, max_len - decoded_len)) {
+ goto failed;
+ }
+ }
+ if (!(*ref = flatcc_builder_end_vector(ctx->ctx))) {
+ goto failed;
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+
+failed:
+ *ref = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+
+base64_failed:
+ *ref = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end,
+ urlsafe ? flatcc_json_parser_error_base64url : flatcc_json_parser_error_base64);
+}
+
+const char *flatcc_json_parser_char_array(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, char *s, size_t n)
+{
+ flatcc_json_parser_escape_buffer_t code;
+ const char *mark;
+ size_t k = 0;
+
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ if (buf != end)
+ while (*buf != '\"') {
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf == end) return end;
+ k = (size_t)(buf - mark);
+ if (k > n) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);
+ }
+ k = n; /* Might truncate UTF-8. */
+ }
+ memcpy(s, mark, k);
+ s += k;
+ n -= k;
+ if (*buf == '\"') break;
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ if (buf == end) return end;
+ k = (size_t)code[0];
+ mark = code + 1;
+ if (k > n) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);
+ }
+ k = n; /* Might truncate UTF-8. */
+ }
+ memcpy(s, mark, k);
+ s += k;
+ n -= k;
+ }
+ if (n != 0) {
+ if (ctx->flags & flatcc_json_parser_f_reject_array_underflow) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_underflow);
+ }
+ memset(s, 0, n);
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+}
+
+
+/* String Creation - depends on flatcc builder. */
+
+const char *flatcc_json_parser_build_string(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref)
+{
+ flatcc_json_parser_escape_buffer_t code;
+ const char *mark;
+
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf != end && *buf == '\"') {
+ *ref = flatcc_builder_create_string(ctx->ctx, mark, (size_t)(buf - mark));
+ } else {
+ if (flatcc_builder_start_string(ctx->ctx) ||
+ 0 == flatcc_builder_append_string(ctx->ctx, mark, (size_t)(buf - mark))) goto failed;
+ while (buf != end && *buf != '\"') {
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ if (0 == flatcc_builder_append_string(ctx->ctx, code + 1, (size_t)code[0])) goto failed;
+ if (end != (buf = flatcc_json_parser_string_part(ctx, (mark = buf), end))) {
+ if (0 == flatcc_builder_append_string(ctx->ctx, mark, (size_t)(buf - mark))) goto failed;
+ }
+ }
+ *ref = flatcc_builder_end_string(ctx->ctx);
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+
+failed:
+ *ref = 0;
+ return buf;
+}
+
+/* UNIONS */
+
+/*
+ * Unions are difficult to parse because the type field may appear after
+ * the union table and because having two fields opens up for many more
+ * possible error scenarios. We must store each union of a table
+ * temporarily - this cannot be in the generated table parser function
+ * because there could be many unions (about 2^15 with default voffsets)
+ * although usually there will be only a few. We can also not store the
+ * data encoded in the existing table buffer in builder because we may
+ * have to remove it due to schema forwarding and removing it messes up
+ * the table layout. We also cannot naively allocate it dynamically for
+ * performance reasons. Instead we place the temporary union data in a
+ * separate frame from the table buffer, but on a similar stack. This is
+ * called the user stack and we manage one frame per table that is known
+ * to contain unions.
+ *
+ * Even the temporary structures in place we still cannot parse a union
+ * before we know its type. Due to JSON typically sorting fields
+ * alphabetically in various pretty printers, we are likely to receive
+ * the type late with (`<union_name>_type` following `<union_name>`.
+ * To deal with this we store a backtracking pointer and parses the
+ * table generically in a first pass and reparse the table once the type
+ * is known. This can happen recursively with nested tables containing
+ * unions which is why we need to have a stack frame.
+ *
+ * If the type field is stored first we just store the type in the
+ * custom frame and immediately parses the table with the right type
+ * once we see it. The parse will be much faster and we can strongly
+ * recommend that flatbuffer serializers do this, but we cannot require
+ * it.
+ *
+ * The actual overhead of dealing with the custom stack frame is fairly
+ * cheap once we get past the first custom stack allocation.
+ *
+ * We cannot update the builder before both the table and table type
+ * has been parsed because the the type might have to be ingored due
+ * to schema forwarding. Therefore the union type must be cached or
+ * reread. This happens trivially be calling the union parser with the
+ * type as argument, but it is important to be aware of before
+ * refactoring the code.
+ *
+ * The user frame is created at table start and remains valid until
+ * table exit, but we cannot assume the pointers to the frame remain
+ * valid. Specifically we cannot use frame pointers after calling
+ * the union parser. This means the union type must be cached or reread
+ * so it can be added to the table. Because the type is passed to
+ * the union parser this caching happens automatically but it is still
+ * important to be aware that it is required.
+ *
+ * The frame reserves temporary information for all unions the table
+ * holds, enumerated 0 <= `union_index` < `union_total`
+ * where the `union_total` is fixed type specific number.
+ *
+ * The `type_present` is needed because union types range from 0..255
+ * and we need an extra bit do distinguish not present from union type
+ * `NONE = 0`.
+ */
+
+typedef struct {
+ const char *backtrace;
+ const char *line_start;
+ int line;
+ uint8_t type_present;
+ uint8_t type;
+ /* Union vectors: */
+ uoffset_t count;
+ size_t h_types;
+} __flatcc_json_parser_union_entry_t;
+
+typedef struct {
+ size_t union_total;
+ size_t union_count;
+ __flatcc_json_parser_union_entry_t unions[1];
+} __flatcc_json_parser_union_frame_t;
+
+const char *flatcc_json_parser_prepare_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_total, size_t *handle)
+{
+ __flatcc_json_parser_union_frame_t *f;
+
+ if (!(*handle = flatcc_builder_enter_user_frame(ctx->ctx,
+ sizeof(__flatcc_json_parser_union_frame_t) + (union_total - 1) *
+ sizeof(__flatcc_json_parser_union_entry_t)))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+ }
+ f = flatcc_builder_get_user_frame_ptr(ctx->ctx, *handle);
+ /* Frames have zeroed memory. */
+ f->union_total = union_total;
+ return buf;
+}
+
+const char *flatcc_json_parser_finalize_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t handle)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+
+ if (f->union_count) {
+ buf = flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_incomplete);
+ }
+ flatcc_builder_exit_user_frame_at(ctx->ctx, handle);
+ return buf;
+}
+
+const char *flatcc_json_parser_union(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle, flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = &f->unions[union_index];
+ flatcc_builder_union_ref_t uref;
+
+ if (e->backtrace) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ if (!e->type_present) {
+ /* If we supported table: null, we should not count it, but we don't. */
+ ++f->union_count;
+ e->line = ctx->line;
+ e->line_start = ctx->line_start;
+ buf = flatcc_json_parser_generic_json(ctx, (e->backtrace = buf), end);
+ } else {
+ uref.type = e->type;
+ if (e->type == 0) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_none_present);
+ }
+ --f->union_count;
+ buf = union_parser(ctx, buf, end, e->type, &uref.value);
+ if (buf != end) {
+ if (flatcc_builder_table_add_union(ctx->ctx, id, uref)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ }
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_union_type(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index, flatbuffers_voffset_t id,
+ size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ flatcc_builder_union_ref_t uref;
+ const char *mark;
+ int line;
+ const char *line_start;
+
+ if (e->type_present) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ e->type_present = 1;
+ buf = flatcc_json_parser_uint8(ctx, (mark = buf), end, &e->type);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_uint8(ctx, buf, end, type_parsers, &e->type);
+ }
+ /* Only count the union if the type is not NONE. */
+ if (e->backtrace == 0) {
+ f->union_count += e->type != 0;
+ return buf;
+ }
+ FLATCC_ASSERT(f->union_count);
+ --f->union_count;
+ /*
+ * IMPORTANT: we cannot access any value in the frame or entry
+ * pointer after calling union parse because it might cause the
+ * stack to reallocate. We should read the frame pointer again if
+ * needed - we don't but remember it if refactoring code.
+ *
+ * IMPORTANT 2: Do not assign buf here. We are backtracking.
+ */
+ line = ctx->line;
+ line_start = ctx->line_start;
+ ctx->line = e->line;
+ ctx->line_start = e->line_start;
+ uref.type = e->type;
+ if (end == union_parser(ctx, e->backtrace, end, e->type, &uref.value)) {
+ return end;
+ }
+ if (flatcc_builder_table_add_union(ctx->ctx, id, uref)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ ctx->line = line;
+ ctx->line_start = line_start;
+ return buf;
+}
+
+static const char *_parse_union_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t h_types, uoffset_t count,
+ flatbuffers_voffset_t id, flatcc_json_parser_union_f *union_parser)
+{
+ flatcc_builder_ref_t ref = 0, *pref;
+ utype_t *types;
+ int more;
+ size_t i;
+
+ if (flatcc_builder_start_offset_vector(ctx->ctx)) goto failed;
+ buf = flatcc_json_parser_array_start(ctx, buf, end, &more);
+ i = 0;
+ while (more) {
+ if (i == count) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_vector_length);
+ }
+ /* Frame must be restored between calls to table parser. */
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ buf = union_parser(ctx, buf, end, types[i], &ref);
+ if (buf == end) {
+ return buf;
+ }
+ if (!(pref = flatcc_builder_extend_offset_vector(ctx->ctx, 1))) goto failed;
+ *pref = ref;
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ ++i;
+ }
+ if (i != count) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_vector_length);
+ }
+ /* Frame must be restored between calls to table parser. */
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ if (!(ref = flatcc_builder_end_offset_vector_for_unions(ctx->ctx, types))) goto failed;
+ if (!(pref = flatcc_builder_table_add_offset(ctx->ctx, id))) goto failed;
+ *pref = ref;
+ return buf;
+failed:
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+}
+
+const char *flatcc_json_parser_union_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle, flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ if (e->backtrace) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ if (!e->type_present) {
+ ++f->union_count;
+ e->line = ctx->line;
+ e->line_start = ctx->line_start;
+ buf = flatcc_json_parser_generic_json(ctx, (e->backtrace = buf), end);
+ } else {
+ --f->union_count;
+ buf = _parse_union_vector(ctx, buf, end, e->h_types, e->count, id, union_parser);
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_union_type_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index, flatbuffers_voffset_t id,
+ size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser,
+ flatcc_json_parser_is_known_type_f accept_type)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ const char *mark;
+ int line;
+ const char *line_start;
+ int more;
+ utype_t val;
+ void *pval;
+ flatcc_builder_ref_t ref, *pref;
+ utype_t *types;
+ size_t size;
+ size_t h_types;
+ uoffset_t count;
+
+#if FLATBUFFERS_UTYPE_MAX != UINT8_MAX
+#error "Update union vector parser to support current union type definition."
+#endif
+
+ if (e->type_present) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ e->type_present = 1;
+ if (flatcc_builder_start_vector(ctx->ctx, 1, 1, FLATBUFFERS_COUNT_MAX((utype_size)))) goto failed;
+ buf = flatcc_json_parser_array_start(ctx, buf, end, &more);
+ while (more) {
+ if (!(pval = flatcc_builder_extend_vector(ctx->ctx, 1))) goto failed;
+ buf = flatcc_json_parser_uint8(ctx, (mark = buf), end, &val);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_uint8(ctx, (mark = buf), end, type_parsers, &val);
+ if (buf == mark || buf == end) goto failed;
+ }
+ /* Parse unknown types as NONE */
+ if (!accept_type(val)) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_unknown)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unknown_union);
+ }
+ val = 0;
+ }
+ flatbuffers_uint8_write_to_pe(pval, val);
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ }
+ count = (uoffset_t)flatcc_builder_vector_count(ctx->ctx);
+ e->count = count;
+ size = count * utype_size;
+ /* Store type vector so it is accessible to the table vector parser. */
+ h_types = flatcc_builder_enter_user_frame(ctx->ctx, size);
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ memcpy(types, flatcc_builder_vector_edit(ctx->ctx), size);
+ if (!((ref = flatcc_builder_end_vector(ctx->ctx)))) goto failed;
+ if (!(pref = flatcc_builder_table_add_offset(ctx->ctx, id - 1))) goto failed;
+ *pref = ref;
+
+ /* Restore union frame after possible invalidation due to types frame allocation. */
+ f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ e = f->unions + union_index;
+
+ e->h_types = h_types;
+ if (e->backtrace == 0) {
+ ++f->union_count;
+ return buf;
+ }
+ FLATCC_ASSERT(f->union_count);
+ --f->union_count;
+ line = ctx->line;
+ line_start = ctx->line_start;
+ ctx->line = e->line;
+ ctx->line_start = e->line_start;
+ /* We must not assign buf here because we are backtracking. */
+ if (end == _parse_union_vector(ctx, e->backtrace, end, h_types, count, id, union_parser)) return end;
+ /*
+ * NOTE: We do not need the user frame anymore, but if we did, it
+ * would have to be restored from its handle due to the above parse.
+ */
+ ctx->line = line;
+ ctx->line_start = line_start;
+ return buf;
+failed:
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+}
+
+int flatcc_json_parser_table_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid,
+ flatcc_json_parser_table_f *parser)
+{
+ flatcc_json_parser_t _ctx;
+ flatcc_builder_ref_t root;
+ flatcc_builder_buffer_flags_t builder_flags = flags & flatcc_json_parser_f_with_size ? flatcc_builder_with_size : 0;
+
+ ctx = ctx ? ctx : &_ctx;
+ flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);
+ if (flatcc_builder_start_buffer(B, fid, 0, builder_flags)) return -1;
+ buf = parser(ctx, buf, buf + bufsiz, &root);
+ if (ctx->error) {
+ return ctx->error;
+ }
+ if (!flatcc_builder_end_buffer(B, root)) return -1;
+ ctx->end_loc = buf;
+ return 0;
+}
+
+int flatcc_json_parser_struct_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid,
+ flatcc_json_parser_table_f *parser)
+{
+ flatcc_json_parser_t _ctx;
+ flatcc_builder_ref_t root;
+ flatcc_builder_buffer_flags_t builder_flags = flags & flatcc_json_parser_f_with_size ? flatcc_builder_with_size : 0;
+
+ ctx = ctx ? ctx : &_ctx;
+ flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);
+ if (flatcc_builder_start_buffer(B, fid, 0, builder_flags)) return -1;
+ buf = parser(ctx, buf, buf + bufsiz, &root);
+ if (ctx->error) {
+ return ctx->error;
+ }
+ if (!flatcc_builder_end_buffer(B, root)) return -1;
+ ctx->end_loc = buf;
+ return 0;
+}
diff --git a/flatcc/src/runtime/json_printer.c b/flatcc/src/runtime/json_printer.c
new file mode 100644
index 0000000..4ebe1c1
--- /dev/null
+++ b/flatcc/src/runtime/json_printer.c
@@ -0,0 +1,1486 @@
+/*
+ * Runtime support for printing flatbuffers to JSON.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_assert.h"
+
+/*
+ * Grisu significantly improves printing speed of floating point values
+ * and also the overall printing speed when floating point values are
+ * present in non-trivial amounts. (Also applies to parsing).
+ */
+#if FLATCC_USE_GRISU3 && !defined(PORTABLE_USE_GRISU3)
+#define PORTABLE_USE_GRISU3 1
+#endif
+
+#include "flatcc/flatcc_flatbuffers.h"
+#include "flatcc/flatcc_json_printer.h"
+#include "flatcc/flatcc_identifier.h"
+
+#include "flatcc/portable/pprintint.h"
+#include "flatcc/portable/pprintfp.h"
+#include "flatcc/portable/pbase64.h"
+
+
+#define RAISE_ERROR(err) flatcc_json_printer_set_error(ctx, flatcc_json_printer_error_##err)
+
+const char *flatcc_json_printer_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_json_printer_error_##no: \
+ return str;
+ FLATCC_JSON_PRINT_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+#define flatcc_json_printer_utype_enum_f flatcc_json_printer_union_type_f
+#define flatbuffers_utype_read_from_pe __flatbuffers_utype_read_from_pe
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+
+#define offset_size uoffset_size
+
+#if FLATBUFFERS_UTYPE_MAX == UINT8_MAX
+#define print_utype print_uint8
+#else
+#ifdef FLATBUFFERS_UTYPE_MIN
+#define print_utype print_int64
+#else
+#define print_utype print_uint64
+#endif
+#endif
+
+static inline const void *read_uoffset_ptr(const void *p)
+{
+ return (uint8_t *)p + __flatbuffers_uoffset_read_from_pe(p);
+}
+
+static inline voffset_t read_voffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_voffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline const void *get_field_ptr(flatcc_json_printer_table_descriptor_t *td, int id)
+{
+ uoffset_t vo = (uoffset_t)(id + 2) * (uoffset_t)sizeof(voffset_t);
+
+ if (vo >= (uoffset_t)td->vsize) {
+ return 0;
+ }
+ vo = read_voffset(td->vtable, vo);
+ if (vo == 0) {
+ return 0;
+ }
+ return (uint8_t *)td->table + vo;
+}
+
+#define print_char(c) *ctx->p++ = (c)
+
+#define print_null() do { \
+ print_char('n'); \
+ print_char('u'); \
+ print_char('l'); \
+ print_char('l'); \
+} while (0)
+
+#define print_start(c) do { \
+ ++ctx->level; \
+ *ctx->p++ = c; \
+} while (0)
+
+#define print_end(c) do { \
+ if (ctx->indent) { \
+ *ctx->p++ = '\n'; \
+ --ctx->level; \
+ print_indent(ctx); \
+ } \
+ *ctx->p++ = c; \
+} while (0)
+
+#define print_space() do { \
+ *ctx->p = ' '; \
+ ctx->p += !!ctx->indent; \
+} while (0)
+
+#define print_nl() do { \
+ if (ctx->indent) { \
+ *ctx->p++ = '\n'; \
+ print_indent(ctx); \
+ } else { \
+ flatcc_json_printer_flush_partial(ctx); \
+ } \
+} while (0)
+
+/* Call at the end so print_end does not have to check for level. */
+#define print_last_nl() do { \
+ if (ctx->indent && ctx->level == 0) { \
+ *ctx->p++ = '\n'; \
+ } \
+ ctx->flush(ctx, 1); \
+} while (0)
+
+int flatcc_json_printer_fmt_float(char *buf, float n)
+{
+#if FLATCC_JSON_PRINT_HEX_FLOAT
+ return print_hex_float(buf, n);
+#else
+ return print_float(n, buf);
+#endif
+}
+
+int flatcc_json_printer_fmt_double(char *buf, double n)
+{
+#if FLATCC_JSON_PRINT_HEX_FLOAT
+ return print_hex_double(buf, n);
+#else
+ return print_double(n, buf);
+#endif
+}
+
+int flatcc_json_printer_fmt_bool(char *buf, int n)
+{
+ if (n) {
+ memcpy(buf, "true", 4);
+ return 4;
+ }
+ memcpy(buf, "false", 5);
+ return 5;
+}
+
+static void print_ex(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ size_t k;
+
+ if (ctx->p >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ k = (size_t)(ctx->pflush - ctx->p);
+ while (n > k) {
+ memcpy(ctx->p, s, k);
+ ctx->p += k;
+ s += k;
+ n -= k;
+ ctx->flush(ctx, 0);
+ k = (size_t)(ctx->pflush - ctx->p);
+ }
+ memcpy(ctx->p, s, n);
+ ctx->p += n;
+}
+
+static inline void print(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ if (ctx->p + n >= ctx->pflush) {
+ print_ex(ctx, s, n);
+ } else {
+ memcpy(ctx->p, s, n);
+ ctx->p += n;
+ }
+}
+
+static void print_escape(flatcc_json_printer_t *ctx, unsigned char c)
+{
+ unsigned char x;
+
+ print_char('\\');
+ switch (c) {
+ case '"': print_char('\"'); break;
+ case '\\': print_char('\\'); break;
+ case '\t' : print_char('t'); break;
+ case '\f' : print_char('f'); break;
+ case '\r' : print_char('r'); break;
+ case '\n' : print_char('n'); break;
+ case '\b' : print_char('b'); break;
+ default:
+ print_char('u');
+ print_char('0');
+ print_char('0');
+ x = c >> 4;
+ x += x < 10 ? '0' : 'a' - 10;
+ print_char((char)x);
+ x = c & 15;
+ x += x < 10 ? '0' : 'a' - 10;
+ print_char((char)x);
+ break;
+ }
+}
+
+/*
+ * Even though we know the the string length, we need to scan for escape
+ * characters. There may be embedded zeroes. Because FlatBuffer strings
+ * are always zero terminated, we assume and optimize for this.
+ *
+ * We enforce \u00xx for control characters, but not for invalid
+ * characters like 0xff - this makes it possible to handle some other
+ * codepages transparently while formally not valid. (Formally JSON
+ * also supports UTF-16/32 little/big endian but flatbuffers only
+ * support UTF-8 and we expect this in JSON input/output too).
+ */
+static void print_string(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ const char *p = s;
+ /* Unsigned is important. */
+ unsigned char c;
+ size_t k;
+
+ print_char('\"');
+ for (;;) {
+ c = (unsigned char)*p;
+ while (c >= 0x20 && c != '\"' && c != '\\') {
+ c = (unsigned char)*++p;
+ }
+ k = (size_t)(p - s);
+ /* Even if k == 0, print ensures buffer flush. */
+ print(ctx, s, k);
+ n -= k;
+ if (n == 0) break;
+ s += k;
+ print_escape(ctx, c);
+ ++p;
+ --n;
+ ++s;
+ }
+ print_char('\"');
+}
+
+/*
+ * Similar to print_string, but null termination is not guaranteed, and
+ * trailing nulls are stripped.
+ */
+static void print_char_array(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ const char *p = s;
+ /* Unsigned is important. */
+ unsigned char c = 0;
+ size_t k;
+
+ while (n > 0 && s[n - 1] == '\0') --n;
+
+ print_char('\"');
+ for (;;) {
+ while (n) {
+ c = (unsigned char)*p;
+ if (c < 0x20 || c == '\"' || c == '\\') break;
+ ++p;
+ --n;
+ }
+ k = (size_t)(p - s);
+ /* Even if k == 0, print ensures buffer flush. */
+ print(ctx, s, k);
+ if (n == 0) break;
+ s += k;
+ print_escape(ctx, c);
+ ++p;
+ --n;
+ ++s;
+ }
+ print_char('\"');
+}
+
+static void print_uint8_vector_base64_object(flatcc_json_printer_t *ctx, const void *p, int mode)
+{
+ const int unpadded_mode = mode & ~base64_enc_modifier_padding;
+ size_t k, n, len;
+ const uint8_t *data;
+ size_t data_len, src_len;
+
+ data_len = (size_t)__flatbuffers_uoffset_read_from_pe(p);
+ data = (const uint8_t *)p + uoffset_size;
+
+ print_char('\"');
+
+ len = base64_encoded_size(data_len, mode);
+ if (ctx->p + len >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ while (ctx->p + len > ctx->pflush) {
+ /* Multiples of 4 output chars consumes exactly 3 bytes before final padding. */
+ k = (size_t)(ctx->pflush - ctx->p) & ~(size_t)3;
+ n = k * 3 / 4;
+ FLATCC_ASSERT(n > 0);
+ src_len = k * 3 / 4;
+ base64_encode((uint8_t *)ctx->p, data, 0, &src_len, unpadded_mode);
+ ctx->p += k;
+ data += n;
+ data_len -= n;
+ ctx->flush(ctx, 0);
+ len = base64_encoded_size(data_len, mode);
+ }
+ base64_encode((uint8_t *)ctx->p, data, 0, &data_len, mode);
+ ctx->p += len;
+ print_char('\"');
+}
+
+static void print_indent_ex(flatcc_json_printer_t *ctx, size_t n)
+{
+ size_t k;
+
+ if (ctx->p >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ k = (size_t)(ctx->pflush - ctx->p);
+ while (n > k) {
+ memset(ctx->p, ' ', k);
+ ctx->p += k;
+ n -= k;
+ ctx->flush(ctx, 0);
+ k = (size_t)(ctx->pflush - ctx->p);
+ }
+ memset(ctx->p, ' ', n);
+ ctx->p += n;
+}
+
+static inline void print_indent(flatcc_json_printer_t *ctx)
+{
+ size_t n = (size_t)(ctx->level * ctx->indent);
+
+ if (ctx->p + n > ctx->pflush) {
+ print_indent_ex(ctx, n);
+ } else {
+ memset(ctx->p, ' ', n);
+ ctx->p += n;
+ }
+}
+
+/*
+ * Helpers for external use - does not do autmatic pretty printing, but
+ * does escape strings.
+ */
+void flatcc_json_printer_string(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ print_string(ctx, s, n);
+}
+
+void flatcc_json_printer_write(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ print(ctx, s, n);
+}
+
+void flatcc_json_printer_nl(flatcc_json_printer_t *ctx)
+{
+ print_char('\n');
+ flatcc_json_printer_flush_partial(ctx);
+}
+
+void flatcc_json_printer_char(flatcc_json_printer_t *ctx, char c)
+{
+ print_char(c);
+}
+
+void flatcc_json_printer_indent(flatcc_json_printer_t *ctx)
+{
+ /*
+ * This is only needed when indent is 0 but helps external users
+ * to avoid flushing when indenting.
+ */
+ print_indent(ctx);
+}
+
+void flatcc_json_printer_add_level(flatcc_json_printer_t *ctx, int n)
+{
+ ctx->level += n;
+}
+
+int flatcc_json_printer_get_level(flatcc_json_printer_t *ctx)
+{
+ return ctx->level;
+}
+
+static inline void print_symbol(flatcc_json_printer_t *ctx, const char *name, size_t len)
+{
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ if (ctx->p + len < ctx->pflush) {
+ memcpy(ctx->p, name, len);
+ ctx->p += len;
+ } else {
+ print(ctx, name, len);
+ }
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+}
+
+static inline void print_name(flatcc_json_printer_t *ctx, const char *name, size_t len)
+{
+ print_nl();
+ print_symbol(ctx, name, len);
+ print_char(':');
+ print_space();
+}
+
+#define __flatcc_define_json_printer_scalar(TN, T) \
+void flatcc_json_printer_ ## TN( \
+ flatcc_json_printer_t *ctx, T v) \
+{ \
+ ctx->p += print_ ## TN(v, ctx->p); \
+}
+
+__flatcc_define_json_printer_scalar(uint8, uint8_t)
+__flatcc_define_json_printer_scalar(uint16, uint16_t)
+__flatcc_define_json_printer_scalar(uint32, uint32_t)
+__flatcc_define_json_printer_scalar(uint64, uint64_t)
+__flatcc_define_json_printer_scalar(int8, int8_t)
+__flatcc_define_json_printer_scalar(int16, int16_t)
+__flatcc_define_json_printer_scalar(int32, int32_t)
+__flatcc_define_json_printer_scalar(int64, int64_t)
+__flatcc_define_json_printer_scalar(float, float)
+__flatcc_define_json_printer_scalar(double, double)
+
+void flatcc_json_printer_enum(flatcc_json_printer_t *ctx, const char *symbol, size_t len)
+{
+ print_symbol(ctx, symbol, len);
+}
+
+void flatcc_json_printer_delimit_enum_flags(flatcc_json_printer_t *ctx, int multiple)
+{
+#if FLATCC_JSON_PRINT_ALWAYS_QUOTE_MULTIPLE_FLAGS
+ int quote = !ctx->unquote || multiple;
+#else
+ int quote = !ctx->unquote;
+#endif
+ *ctx->p = '"';
+ ctx->p += quote;
+}
+
+void flatcc_json_printer_enum_flag(flatcc_json_printer_t *ctx, int count, const char *symbol, size_t len)
+{
+ *ctx->p = ' ';
+ ctx->p += count > 0;
+ print(ctx, symbol, len);
+}
+
+static inline void print_string_object(flatcc_json_printer_t *ctx, const void *p)
+{
+ size_t len;
+ const char *s;
+
+ len = (size_t)__flatbuffers_uoffset_read_from_pe(p);
+ s = (const char *)p + uoffset_size;
+ print_string(ctx, s, len);
+}
+
+#define __define_print_scalar_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _struct_field(flatcc_json_printer_t *ctx,\
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len) \
+{ \
+ T x = flatbuffers_ ## TN ## _read_from_pe((uint8_t *)p + offset); \
+ \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+void flatcc_json_printer_char_array_struct_field(
+ flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len, size_t count)
+{
+ p = (void *)((size_t)p + offset);
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_char_array(ctx, p, count);
+}
+
+#define __define_print_scalar_array_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count) \
+{ \
+ p = (void *)((size_t)p + offset); \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+}
+
+#define __define_print_enum_array_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ \
+ p = (void *)((size_t)p + offset); \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+}
+
+#define __define_print_enum_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x = flatbuffers_ ## TN ## _read_from_pe((uint8_t *)p + offset); \
+ \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+#define __define_print_scalar_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (p) { \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (x == v && ctx->skip_default) { \
+ return; \
+ } \
+ } else { \
+ if (!ctx->force_default) { \
+ return; \
+ } \
+ x = v; \
+ } \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+#define __define_print_scalar_optional_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (!p) return; \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+
+#define __define_print_enum_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (p) { \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (x == v && ctx->skip_default) { \
+ return; \
+ } \
+ } else { \
+ if (!ctx->force_default) { \
+ return; \
+ } \
+ x = v; \
+ } \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+#define __define_print_enum_optional_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (!p) return; \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+static inline void print_table_object(flatcc_json_printer_t *ctx,
+ const void *p, int ttl, flatcc_json_printer_table_f pf)
+{
+ flatcc_json_printer_table_descriptor_t td;
+
+ if (!--ttl) {
+ flatcc_json_printer_set_error(ctx, flatcc_json_printer_error_deep_recursion);
+ return;
+ }
+ print_start('{');
+ td.count = 0;
+ td.ttl = ttl;
+ td.table = p;
+ td.vtable = (uint8_t *)p - __flatbuffers_soffset_read_from_pe(p);
+ td.vsize = __flatbuffers_voffset_read_from_pe(td.vtable);
+ pf(ctx, &td);
+ print_end('}');
+}
+
+void flatcc_json_printer_string_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_string_object(ctx, read_uoffset_ptr(p));
+ }
+}
+
+void flatcc_json_printer_uint8_vector_base64_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len, int urlsafe)
+{
+ const void *p = get_field_ptr(td, id);
+ int mode;
+
+ mode = urlsafe ? base64_mode_url : base64_mode_rfc4648;
+ mode |= base64_enc_modifier_padding;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_uint8_vector_base64_object(ctx, read_uoffset_ptr(p), mode);
+ }
+}
+
+#define __define_print_scalar_vector_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _vector_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len) \
+{ \
+ const void *p = get_field_ptr(td, id); \
+ uoffset_t count; \
+ \
+ if (p) { \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ p = read_uoffset_ptr(p); \
+ count = __flatbuffers_uoffset_read_from_pe(p); \
+ p = (void *)((size_t)p + uoffset_size); \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+ } \
+}
+
+#define __define_print_enum_vector_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_vector_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ const void *p; \
+ uoffset_t count; \
+ \
+ if (ctx->noenum) { \
+ flatcc_json_printer_ ## TN ## _vector_field(ctx, td, id, name, len);\
+ return; \
+ } \
+ p = get_field_ptr(td, id); \
+ if (p) { \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ p = read_uoffset_ptr(p); \
+ count = __flatbuffers_uoffset_read_from_pe(p); \
+ p = (void *)((size_t)p + uoffset_size); \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ pf(ctx, flatbuffers_ ## TN ## _read_from_pe(p)); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ pf(ctx, flatbuffers_ ## TN ## _read_from_pe(p)); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+ } \
+}
+
+__define_print_scalar_field(uint8, uint8_t)
+__define_print_scalar_field(uint16, uint16_t)
+__define_print_scalar_field(uint32, uint32_t)
+__define_print_scalar_field(uint64, uint64_t)
+__define_print_scalar_field(int8, int8_t)
+__define_print_scalar_field(int16, int16_t)
+__define_print_scalar_field(int32, int32_t)
+__define_print_scalar_field(int64, int64_t)
+__define_print_scalar_field(bool, flatbuffers_bool_t)
+__define_print_scalar_field(float, float)
+__define_print_scalar_field(double, double)
+
+__define_print_enum_field(uint8, uint8_t)
+__define_print_enum_field(uint16, uint16_t)
+__define_print_enum_field(uint32, uint32_t)
+__define_print_enum_field(uint64, uint64_t)
+__define_print_enum_field(int8, int8_t)
+__define_print_enum_field(int16, int16_t)
+__define_print_enum_field(int32, int32_t)
+__define_print_enum_field(int64, int64_t)
+__define_print_enum_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_optional_field(uint8, uint8_t)
+__define_print_scalar_optional_field(uint16, uint16_t)
+__define_print_scalar_optional_field(uint32, uint32_t)
+__define_print_scalar_optional_field(uint64, uint64_t)
+__define_print_scalar_optional_field(int8, int8_t)
+__define_print_scalar_optional_field(int16, int16_t)
+__define_print_scalar_optional_field(int32, int32_t)
+__define_print_scalar_optional_field(int64, int64_t)
+__define_print_scalar_optional_field(bool, flatbuffers_bool_t)
+__define_print_scalar_optional_field(float, float)
+__define_print_scalar_optional_field(double, double)
+
+__define_print_enum_optional_field(uint8, uint8_t)
+__define_print_enum_optional_field(uint16, uint16_t)
+__define_print_enum_optional_field(uint32, uint32_t)
+__define_print_enum_optional_field(uint64, uint64_t)
+__define_print_enum_optional_field(int8, int8_t)
+__define_print_enum_optional_field(int16, int16_t)
+__define_print_enum_optional_field(int32, int32_t)
+__define_print_enum_optional_field(int64, int64_t)
+__define_print_enum_optional_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_struct_field(uint8, uint8_t)
+__define_print_scalar_struct_field(uint16, uint16_t)
+__define_print_scalar_struct_field(uint32, uint32_t)
+__define_print_scalar_struct_field(uint64, uint64_t)
+__define_print_scalar_struct_field(int8, int8_t)
+__define_print_scalar_struct_field(int16, int16_t)
+__define_print_scalar_struct_field(int32, int32_t)
+__define_print_scalar_struct_field(int64, int64_t)
+__define_print_scalar_struct_field(bool, flatbuffers_bool_t)
+__define_print_scalar_struct_field(float, float)
+__define_print_scalar_struct_field(double, double)
+
+__define_print_scalar_array_struct_field(uint8, uint8_t)
+__define_print_scalar_array_struct_field(uint16, uint16_t)
+__define_print_scalar_array_struct_field(uint32, uint32_t)
+__define_print_scalar_array_struct_field(uint64, uint64_t)
+__define_print_scalar_array_struct_field(int8, int8_t)
+__define_print_scalar_array_struct_field(int16, int16_t)
+__define_print_scalar_array_struct_field(int32, int32_t)
+__define_print_scalar_array_struct_field(int64, int64_t)
+__define_print_scalar_array_struct_field(bool, flatbuffers_bool_t)
+__define_print_scalar_array_struct_field(float, float)
+__define_print_scalar_array_struct_field(double, double)
+
+__define_print_enum_array_struct_field(uint8, uint8_t)
+__define_print_enum_array_struct_field(uint16, uint16_t)
+__define_print_enum_array_struct_field(uint32, uint32_t)
+__define_print_enum_array_struct_field(uint64, uint64_t)
+__define_print_enum_array_struct_field(int8, int8_t)
+__define_print_enum_array_struct_field(int16, int16_t)
+__define_print_enum_array_struct_field(int32, int32_t)
+__define_print_enum_array_struct_field(int64, int64_t)
+__define_print_enum_array_struct_field(bool, flatbuffers_bool_t)
+
+__define_print_enum_struct_field(uint8, uint8_t)
+__define_print_enum_struct_field(uint16, uint16_t)
+__define_print_enum_struct_field(uint32, uint32_t)
+__define_print_enum_struct_field(uint64, uint64_t)
+__define_print_enum_struct_field(int8, int8_t)
+__define_print_enum_struct_field(int16, int16_t)
+__define_print_enum_struct_field(int32, int32_t)
+__define_print_enum_struct_field(int64, int64_t)
+__define_print_enum_struct_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_vector_field(utype, flatbuffers_utype_t)
+__define_print_scalar_vector_field(uint8, uint8_t)
+__define_print_scalar_vector_field(uint16, uint16_t)
+__define_print_scalar_vector_field(uint32, uint32_t)
+__define_print_scalar_vector_field(uint64, uint64_t)
+__define_print_scalar_vector_field(int8, int8_t)
+__define_print_scalar_vector_field(int16, int16_t)
+__define_print_scalar_vector_field(int32, int32_t)
+__define_print_scalar_vector_field(int64, int64_t)
+__define_print_scalar_vector_field(bool, flatbuffers_bool_t)
+__define_print_scalar_vector_field(float, float)
+__define_print_scalar_vector_field(double, double)
+
+__define_print_enum_vector_field(utype, flatbuffers_utype_t)
+__define_print_enum_vector_field(uint8, uint8_t)
+__define_print_enum_vector_field(uint16, uint16_t)
+__define_print_enum_vector_field(uint32, uint32_t)
+__define_print_enum_vector_field(uint64, uint64_t)
+__define_print_enum_vector_field(int8, int8_t)
+__define_print_enum_vector_field(int16, int16_t)
+__define_print_enum_vector_field(int32, int32_t)
+__define_print_enum_vector_field(int64, int64_t)
+__define_print_enum_vector_field(bool, flatbuffers_bool_t)
+
+void flatcc_json_printer_struct_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ size_t size,
+ flatcc_json_printer_struct_f pf)
+{
+ const uint8_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ p += uoffset_size;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_nl();
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ --count;
+ }
+ while (count--) {
+ p += size;
+ print_char(',');
+ print_nl();
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_string_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len)
+{
+ const uoffset_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_nl();
+ print_string_object(ctx, read_uoffset_ptr(p));
+ --count;
+ }
+ while (count--) {
+ ++p;
+ print_char(',');
+ print_nl();
+ print_string_object(ctx, read_uoffset_ptr(p));
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_table_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf)
+{
+ const uoffset_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ --count;
+ }
+ while (count--) {
+ ++p;
+ print_char(',');
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_union_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf)
+{
+ const uoffset_t *pt = get_field_ptr(td, id - 1);
+ const uoffset_t *p = get_field_ptr(td, id);
+ utype_t *types, type;
+ uoffset_t count;
+ char type_name[FLATCC_JSON_PRINT_NAME_LEN_MAX + 5];
+ flatcc_json_printer_union_descriptor_t ud;
+
+ ud.ttl = td->ttl;
+ if (len > FLATCC_JSON_PRINT_NAME_LEN_MAX) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "identifier too long");
+ return;
+ }
+ memcpy(type_name, name, len);
+ memcpy(type_name + len, "_type", 5);
+ if (p && pt) {
+ flatcc_json_printer_utype_enum_vector_field(ctx, td, id - 1,
+ type_name, len + 5, ptf);
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ pt = read_uoffset_ptr(pt);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ ++pt;
+ types = (utype_t *)pt;
+ print_name(ctx, name, len);
+ print_start('[');
+
+ if (count) {
+ type = __flatbuffers_utype_read_from_pe(types);
+ if (type != 0) {
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ } else {
+ print_null();
+ }
+ --count;
+ }
+ while (count--) {
+ ++p;
+ ++types;
+ type = __flatbuffers_utype_read_from_pe(types);
+ print_char(',');
+ if (type != 0) {
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ } else {
+ print_null();
+ }
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_table_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ }
+}
+
+void flatcc_json_printer_union_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf)
+{
+ const void *pt = get_field_ptr(td, id - 1);
+ const void *p = get_field_ptr(td, id);
+ utype_t type;
+ flatcc_json_printer_union_descriptor_t ud;
+
+ if (!p || !pt) {
+ return;
+ }
+ type = __flatbuffers_utype_read_from_pe(pt);
+ if (td->count++) {
+ print_char(',');
+ }
+ print_nl();
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ if (ctx->p + len < ctx->pflush) {
+ memcpy(ctx->p, name, len);
+ ctx->p += len;
+ } else {
+ print(ctx, name, len);
+ }
+ print(ctx, "_type", 5);
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ print_char(':');
+ print_space();
+ if (ctx->noenum) {
+ ctx->p += print_utype(type, ctx->p);
+ } else {
+ ptf(ctx, type);
+ }
+ if (type != 0) {
+ print_char(',');
+ print_name(ctx, name, len);
+ ud.ttl = td->ttl;
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ }
+}
+
+void flatcc_json_printer_union_table(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_table_f pf)
+{
+ print_table_object(ctx, read_uoffset_ptr(ud->member), ud->ttl, pf);
+}
+
+void flatcc_json_printer_union_struct(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_struct_f pf)
+{
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(ud->member));
+ print_end('}');
+}
+
+void flatcc_json_printer_union_string(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud)
+{
+ print_string_object(ctx, read_uoffset_ptr(ud->member));
+}
+
+void flatcc_json_printer_embedded_struct_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ flatcc_json_printer_struct_f pf)
+{
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, (uint8_t *)p + offset);
+ print_end('}');
+}
+
+void flatcc_json_printer_embedded_struct_array_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ size_t size, size_t count,
+ flatcc_json_printer_struct_f pf)
+{
+ size_t i;
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('[');
+ for (i = 0; i < count; ++i) {
+ if (i > 0) {
+ print_char(',');
+ }
+ print_start('{'); \
+ pf(ctx, (uint8_t *)p + offset + i * size);
+ print_end('}');
+ }
+ print_end(']');
+}
+
+void flatcc_json_printer_struct_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_struct_f *pf)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ }
+}
+
+/*
+ * Make sure the buffer identifier is valid before assuming the rest of
+ * the buffer is sane.
+ * NOTE: this won't work with type hashes because these can contain
+ * nulls in the fid string. In this case use null as fid to disable
+ * check.
+ */
+static int accept_header(flatcc_json_printer_t * ctx,
+ const void *buf, size_t bufsiz, const char *fid)
+{
+ flatbuffers_thash_t id, id2 = 0;
+
+ if (buf == 0 || bufsiz < offset_size + FLATBUFFERS_IDENTIFIER_SIZE) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "buffer header too small");
+ return 0;
+ }
+ if (fid != 0) {
+ id2 = flatbuffers_type_hash_from_string(fid);
+ id = __flatbuffers_thash_read_from_pe((uint8_t *)buf + offset_size);
+ if (!(id2 == 0 || id == id2)) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "identifier mismatch");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int flatcc_json_printer_struct_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid,
+ flatcc_json_printer_struct_f *pf)
+{
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return -1;
+ }
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(buf));
+ print_end('}');
+ print_last_nl();
+ return flatcc_json_printer_get_error(ctx) ? -1 : (int)ctx->total + (int)(ctx->p - ctx->buf);
+}
+
+int flatcc_json_printer_table_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid, flatcc_json_printer_table_f *pf)
+{
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return -1;
+ }
+ print_table_object(ctx, read_uoffset_ptr(buf), FLATCC_JSON_PRINT_MAX_LEVELS, pf);
+ print_last_nl();
+ return flatcc_json_printer_get_error(ctx) ? -1 : (int)ctx->total + (int)(ctx->p - ctx->buf);
+}
+
+void flatcc_json_printer_struct_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_struct_f *pf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return;
+ }
+ buf = (const uoffset_t *)((size_t)buf + __flatbuffers_uoffset_read_from_pe(buf));
+ bufsiz = __flatbuffers_uoffset_read_from_pe(buf);
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return;
+ }
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(buf));
+ print_end('}');
+}
+
+void flatcc_json_printer_table_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_table_f pf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return;
+ }
+ buf = (const uoffset_t *)((size_t)buf + __flatbuffers_uoffset_read_from_pe(buf));
+ bufsiz = __flatbuffers_uoffset_read_from_pe(buf);
+ ++buf;
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return;
+ }
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_table_object(ctx, read_uoffset_ptr(buf), td->ttl, pf);
+}
+
+static void __flatcc_json_printer_flush(flatcc_json_printer_t *ctx, int all)
+{
+ if (!all && ctx->p >= ctx->pflush) {
+ size_t spill = (size_t)(ctx->p - ctx->pflush);
+
+ fwrite(ctx->buf, ctx->flush_size, 1, ctx->fp);
+ memcpy(ctx->buf, ctx->buf + ctx->flush_size, spill);
+ ctx->p = ctx->buf + spill;
+ ctx->total += ctx->flush_size;
+ } else {
+ size_t len = (size_t)(ctx->p - ctx->buf);
+
+ fwrite(ctx->buf, len, 1, ctx->fp);
+ ctx->p = ctx->buf;
+ ctx->total += len;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init(flatcc_json_printer_t *ctx, void *fp)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->fp = fp ? fp : stdout;
+ ctx->flush = __flatcc_json_printer_flush;
+ if (!(ctx->buf = FLATCC_JSON_PRINTER_ALLOC(FLATCC_JSON_PRINT_BUFFER_SIZE))) {
+ return -1;
+ }
+ ctx->own_buffer = 1;
+ ctx->size = FLATCC_JSON_PRINT_BUFFER_SIZE;
+ ctx->flush_size = FLATCC_JSON_PRINT_FLUSH_SIZE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ /*
+ * Make sure we have space for primitive operations such as printing numbers
+ * without having to flush.
+ */
+ FLATCC_ASSERT(ctx->flush_size + FLATCC_JSON_PRINT_RESERVE <= ctx->size);
+ return 0;
+}
+
+static void __flatcc_json_printer_flush_buffer(flatcc_json_printer_t *ctx, int all)
+{
+ (void)all;
+
+ if (ctx->p >= ctx->pflush) {
+ RAISE_ERROR(overflow);
+ ctx->total += (size_t)(ctx->p - ctx->buf);
+ ctx->p = ctx->buf;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init_buffer(flatcc_json_printer_t *ctx, char *buffer, size_t buffer_size)
+{
+ FLATCC_ASSERT(buffer_size >= FLATCC_JSON_PRINT_RESERVE);
+ if (buffer_size < FLATCC_JSON_PRINT_RESERVE) {
+ return -1;
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->buf = buffer;
+ ctx->size = buffer_size;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ ctx->flush = __flatcc_json_printer_flush_buffer;
+ return 0;
+}
+
+static void __flatcc_json_printer_flush_dynamic_buffer(flatcc_json_printer_t *ctx, int all)
+{
+ size_t len = (size_t)(ctx->p - ctx->buf);
+ char *p;
+
+ (void)all;
+
+ *ctx->p = '\0';
+ if (ctx->p < ctx->pflush) {
+ return;
+ }
+ p = FLATCC_JSON_PRINTER_REALLOC(ctx->buf, ctx->size * 2);
+ if (!p) {
+ RAISE_ERROR(overflow);
+ ctx->total += len;
+ ctx->p = ctx->buf;
+ } else {
+ ctx->size *= 2;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->buf = p;
+ ctx->p = p + len;
+ ctx->pflush = p + ctx->flush_size;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init_dynamic_buffer(flatcc_json_printer_t *ctx, size_t buffer_size)
+{
+ if (buffer_size == 0) {
+ buffer_size = FLATCC_JSON_PRINT_DYN_BUFFER_SIZE;
+ }
+ if (buffer_size < FLATCC_JSON_PRINT_RESERVE) {
+ buffer_size = FLATCC_JSON_PRINT_RESERVE;
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->buf = FLATCC_JSON_PRINTER_ALLOC(buffer_size);
+ ctx->own_buffer = 1;
+ ctx->size = buffer_size;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ ctx->flush = __flatcc_json_printer_flush_dynamic_buffer;
+ if (!ctx->buf) {
+ RAISE_ERROR(overflow);
+ return -1;
+ }
+ return 0;
+}
+
+void *flatcc_json_printer_get_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size)
+{
+ ctx->flush(ctx, 0);
+ if (buffer_size) {
+ *buffer_size = (size_t)(ctx->p - ctx->buf);
+ }
+ return ctx->buf;
+}
+
+void *flatcc_json_printer_finalize_dynamic_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size)
+{
+ void *buffer;
+
+ buffer = flatcc_json_printer_get_buffer(ctx, buffer_size);
+ memset(ctx, 0, sizeof(*ctx));
+ return buffer;
+}
+
+void flatcc_json_printer_clear(flatcc_json_printer_t *ctx)
+{
+ if (ctx->own_buffer && ctx->buf) {
+ FLATCC_JSON_PRINTER_FREE(ctx->buf);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+}
diff --git a/flatcc/src/runtime/refmap.c b/flatcc/src/runtime/refmap.c
new file mode 100644
index 0000000..a2497f0
--- /dev/null
+++ b/flatcc/src/runtime/refmap.c
@@ -0,0 +1,248 @@
+/*
+ * Optional file that can be included in runtime library to support DAG
+ * cloning with the builder and may also be used for custom purposes
+ * standalone. See also comments in `flatcc/flatcc_builder.h`.
+ *
+ * Note that dynamic construction takes place and that large offset
+ * vectors might consume significant space if there are not many shared
+ * references. In the basic use case no allocation takes place because a
+ * few references can be held using only a small stack allocated hash
+ * table.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_refmap.h"
+#include "flatcc/flatcc_alloc.h"
+#include "flatcc/flatcc_assert.h"
+
+#define _flatcc_refmap_calloc FLATCC_CALLOC
+#define _flatcc_refmap_free FLATCC_FREE
+
+/* Can be used as a primitive defense against collision attacks. */
+#ifdef FLATCC_HASH_SEED
+#define _flatcc_refmap_seed FLATCC_HASH_SEED
+#else
+#define _flatcc_refmap_seed 0x2f693b52
+#endif
+
+static inline size_t _flatcc_refmap_above_load_factor(size_t count, size_t buckets)
+{
+ static const size_t d = 256;
+ static const size_t n = (size_t)((FLATCC_REFMAP_LOAD_FACTOR) * 256.0f);
+
+ return count >= buckets * n / d;
+}
+
+#define _flatcc_refmap_probe(k, i, N) ((k + i) & N)
+
+void flatcc_refmap_clear(flatcc_refmap_t *refmap)
+{
+ if (refmap->table && refmap->table != refmap->min_table) {
+ _flatcc_refmap_free(refmap->table);
+ }
+ flatcc_refmap_init(refmap);
+}
+
+static inline size_t _flatcc_refmap_hash(const void *src)
+{
+ /* MurmurHash3 64-bit finalizer */
+ uint64_t x;
+
+ x = (uint64_t)((size_t)src) ^ _flatcc_refmap_seed;
+
+ x ^= x >> 33;
+ x *= 0xff51afd7ed558ccdULL;
+ x ^= x >> 33;
+ x *= 0xc4ceb9fe1a85ec53ULL;
+ x ^= x >> 33;
+ return (size_t)x;
+}
+
+void flatcc_refmap_reset(flatcc_refmap_t *refmap)
+{
+ if (refmap->count) {
+ memset(refmap->table, 0, sizeof(refmap->table[0]) * refmap->buckets);
+ }
+ refmap->count = 0;
+}
+
+/*
+ * Technically resize also supports shrinking which may be useful for
+ * adapations, but the current hash table never deletes individual items.
+ */
+int flatcc_refmap_resize(flatcc_refmap_t *refmap, size_t count)
+{
+ const size_t min_buckets = sizeof(refmap->min_table) / sizeof(refmap->min_table[0]);
+
+ size_t i;
+ size_t buckets;
+ size_t buckets_old;
+ struct flatcc_refmap_item *T_old;
+
+ if (count < refmap->count) {
+ count = refmap->count;
+ }
+ buckets = min_buckets;
+
+ while (_flatcc_refmap_above_load_factor(count, buckets)) {
+ buckets *= 2;
+ }
+ if (refmap->buckets == buckets) {
+ return 0;
+ }
+ T_old = refmap->table;
+ buckets_old = refmap->buckets;
+ if (buckets == min_buckets) {
+ memset(refmap->min_table, 0, sizeof(refmap->min_table));
+ refmap->table = refmap->min_table;
+ } else {
+ refmap->table = _flatcc_refmap_calloc(buckets, sizeof(refmap->table[0]));
+ if (refmap->table == 0) {
+ refmap->table = T_old;
+ FLATCC_ASSERT(0); /* out of memory */
+ return -1;
+ }
+ }
+ refmap->buckets = buckets;
+ refmap->count = 0;
+ for (i = 0; i < buckets_old; ++i) {
+ if (T_old[i].src) {
+ flatcc_refmap_insert(refmap, T_old[i].src, T_old[i].ref);
+ }
+ }
+ if (T_old && T_old != refmap->min_table) {
+ _flatcc_refmap_free(T_old);
+ }
+ return 0;
+}
+
+flatcc_refmap_ref_t flatcc_refmap_insert(flatcc_refmap_t *refmap, const void *src, flatcc_refmap_ref_t ref)
+{
+ struct flatcc_refmap_item *T;
+ size_t N, i, j, k;
+
+ if (src == 0) return ref;
+ if (_flatcc_refmap_above_load_factor(refmap->count, refmap->buckets)) {
+ if (flatcc_refmap_resize(refmap, refmap->count * 2)) {
+ return flatcc_refmap_not_found; /* alloc failed */
+ }
+ }
+ T = refmap->table;
+ N = refmap->buckets - 1;
+ k = _flatcc_refmap_hash(src);
+ i = 0;
+ j = _flatcc_refmap_probe(k, i, N);
+ while (T[j].src) {
+ if (T[j].src == src) {
+ return T[j].ref = ref;
+ }
+ ++i;
+ j = _flatcc_refmap_probe(k, i, N);
+ }
+ ++refmap->count;
+ T[j].src = src;
+ return T[j].ref = ref;
+}
+
+flatcc_refmap_ref_t flatcc_refmap_find(flatcc_refmap_t *refmap, const void *src)
+{
+ struct flatcc_refmap_item *T;
+ size_t N, i, j, k;
+
+ if (refmap->count == 0) {
+ return flatcc_refmap_not_found;
+ }
+ T = refmap->table;
+ N = refmap->buckets - 1;
+ k = _flatcc_refmap_hash(src);
+ i = 0;
+ j = _flatcc_refmap_probe(k, i, N);
+ while (T[j].src) {
+ if (T[j].src == src) return T[j].ref;
+ ++i;
+ j = _flatcc_refmap_probe(k, i, N);
+ }
+ return flatcc_refmap_not_found;
+}
+
+/*
+ * To run test from project root:
+ *
+ * cc -D FLATCC_REFMAP_TEST -I include src/runtime/refmap.c -o test_refmap && ./test_refmap
+ *
+ */
+#ifdef FLATCC_REFMAP_TEST
+
+#include <stdio.h>
+
+#ifndef FLATCC_REFMAP_H
+#include "flatcc/flatcc_refmap.h"
+#endif
+
+#define test(x) do { if (!(x)) { fprintf(stderr, "%02d: refmap test failed\n", __LINE__); exit(-1); } } while (0)
+#define test_start() fprintf(stderr, "starting refmap test ...\n")
+#define test_ok() fprintf(stderr, "refmap test succeeded\n")
+
+int main()
+{
+ int i;
+ int data[1000];
+ int a = 1;
+ int b = 2;
+ int c = 3;
+ flatcc_refmap_t refmap;
+
+ flatcc_refmap_init(&refmap);
+
+ test(flatcc_refmap_find(&refmap, &a) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &b) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &c) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, 0) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &a) == 0);
+
+ test(flatcc_refmap_insert(&refmap, &a, 42) == 42);
+ test(flatcc_refmap_find(&refmap, &a) == 42);
+ test(flatcc_refmap_find(&refmap, &b) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &c) == flatcc_refmap_not_found);
+ test(flatcc_refmap_insert(&refmap, &a, 42) == 42);
+ test(flatcc_refmap_find(&refmap, &a) == 42);
+ test(refmap.count == 1);
+ test(flatcc_refmap_insert(&refmap, &a, 43) == 43);
+ test(flatcc_refmap_find(&refmap, &a) == 43);
+ test(refmap.count == 1);
+ test(flatcc_refmap_insert(&refmap, &b, -10) == -10);
+ test(flatcc_refmap_insert(&refmap, &c, 100) == 100);
+ test(refmap.count == 3);
+ test(flatcc_refmap_find(&refmap, &a) == 43);
+ test(flatcc_refmap_find(&refmap, &b) == -10);
+ test(flatcc_refmap_find(&refmap, &c) == 100);
+
+ test(flatcc_refmap_insert(&refmap, 0, 1000) == 1000);
+ test(flatcc_refmap_find(&refmap, 0) == 0);
+ test(refmap.count == 3);
+
+ test(flatcc_refmap_insert(&refmap, &b, 0) == 0);
+ test(flatcc_refmap_find(&refmap, &b) == 0);
+ test(refmap.count == 3);
+
+ flatcc_refmap_reset(&refmap);
+ test(refmap.count == 0);
+ test(refmap.buckets > 0);
+ for (i = 0; i < 1000; ++i) {
+ test(flatcc_refmap_insert(&refmap, data + i, i + 42) == i + 42);
+ }
+ test(refmap.count == 1000);
+ for (i = 0; i < 1000; ++i) {
+ test(flatcc_refmap_find(&refmap, data + i) == i + 42);
+ }
+ flatcc_refmap_clear(&refmap);
+ test(refmap.count == 0);
+ test(refmap.buckets == 0);
+ test_ok();
+ return 0;
+}
+
+#endif /* FLATCC_REFMAP_TEST */
diff --git a/flatcc/src/runtime/verifier.c b/flatcc/src/runtime/verifier.c
new file mode 100644
index 0000000..9c43bf6
--- /dev/null
+++ b/flatcc/src/runtime/verifier.c
@@ -0,0 +1,617 @@
+/*
+ * Runtime support for verifying flatbuffers.
+ *
+ * Depends mutually on generated verifier functions for table types that
+ * call into this library.
+ */
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_flatbuffers.h"
+#include "flatcc/flatcc_verifier.h"
+#include "flatcc/flatcc_identifier.h"
+
+/* Customization for testing. */
+#if FLATCC_DEBUG_VERIFY
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 1
+#include <stdio.h>
+#define FLATCC_VERIFIER_ASSERT(cond, reason) \
+ if (!(cond)) { fprintf(stderr, "verifier assert: %s\n", \
+ flatcc_verify_error_string(reason)); FLATCC_ASSERT(0); return reason; }
+#endif
+
+#if FLATCC_TRACE_VERIFY
+#include <stdio.h>
+#define trace_verify(s, p) \
+ fprintf(stderr, "trace verify: %s: 0x%02x\n", (s), (unsigned)(size_t)(p));
+#else
+#define trace_verify(s, p) ((void)0)
+#endif
+
+/* The runtime library does not use the global config file. */
+
+/* This is a guideline, not an exact measure. */
+#ifndef FLATCC_VERIFIER_MAX_LEVELS
+#define FLATCC_VERIFIER_MAX_LEVELS 100
+#endif
+
+#ifndef FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 0
+#endif
+
+/*
+ * Generally a check should tell if a buffer is valid or not such
+ * that runtime can take appropriate actions rather than crash,
+ * also in debug, but assertions are helpful in debugging a problem.
+ *
+ * This must be compiled into the debug runtime library to take effect.
+ */
+#ifndef FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 1
+#endif
+
+/* May be redefined for logging purposes. */
+#ifndef FLATCC_VERIFIER_ASSERT
+#define FLATCC_VERIFIER_ASSERT(cond, reason) FLATCC_ASSERT(cond)
+#endif
+
+#if FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define flatcc_verify(cond, reason) if (!(cond)) { FLATCC_VERIFIER_ASSERT(cond, reason); return reason; }
+#else
+#define flatcc_verify(cond, reason) if (!(cond)) { return reason; }
+#endif
+
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+#define thash_t flatbuffers_thash_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+#define thash_size sizeof(thash_t)
+#define offset_size uoffset_size
+
+const char *flatcc_verify_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_verify_error_##no: \
+ return str;
+ FLATCC_VERIFY_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+/* `cond` may have side effects. */
+#define verify(cond, reason) do { int c = (cond); flatcc_verify(c, reason); } while(0)
+
+/*
+ * Identify checks related to runtime conditions (buffer size and
+ * alignment) as seperate from those related to buffer content.
+ */
+#define verify_runtime(cond, reason) verify(cond, reason)
+
+#define check_result(x) if (x) { return (x); }
+
+#define check_field(td, id, required, base) do { \
+ int ret = get_offset_field(td, id, required, &base); \
+ if (ret || !base) { return ret; }} while (0)
+
+static inline uoffset_t read_uoffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_uoffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline thash_t read_thash_identifier(const char *identifier)
+{
+ return flatbuffers_type_hash_from_string(identifier);
+}
+
+static inline thash_t read_thash(const void *p, uoffset_t base)
+{
+ return __flatbuffers_thash_read_from_pe((uint8_t *)p + base);
+}
+
+static inline voffset_t read_voffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_voffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline int check_header(uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t k = base + offset;
+
+ if (uoffset_size <= voffset_size && k + offset_size < k) {
+ return 0;
+ }
+
+ /* The `k > base` rather than `k >= base` is to avoid null offsets. */
+ return k > base && k + offset_size <= end && !(k & (offset_size - 1));
+}
+
+static inline int check_aligned_header(uoffset_t end, uoffset_t base, uoffset_t offset, uint16_t align)
+{
+ uoffset_t k = base + offset;
+
+ if (uoffset_size <= voffset_size && k + offset_size < k) {
+ return 0;
+ }
+ /* Alignment refers to element 0 and header must also be aligned. */
+ align = align < uoffset_size ? uoffset_size : align;
+
+ /* Note to self: the builder can also use the mask OR trick to propagate `min_align`. */
+ return k > base && k + offset_size <= end && !((k + offset_size) & ((offset_size - 1) | (align - 1u)));
+}
+
+static inline int verify_struct(uoffset_t end, uoffset_t base, uoffset_t offset, uoffset_t size, uint16_t align)
+{
+ /* Structs can have zero size so `end` is a valid value. */
+ if (offset == 0 || base + offset > end) {
+ return flatcc_verify_error_offset_out_of_range;
+ }
+ base += offset;
+ verify(base + size >= base, flatcc_verify_error_struct_size_overflow);
+ verify(base + size <= end, flatcc_verify_error_struct_out_of_range);
+ verify (!(base & (align - 1u)), flatcc_verify_error_struct_unaligned);
+ return flatcc_verify_ok;
+}
+
+static inline voffset_t read_vt_entry(flatcc_table_verifier_descriptor_t *td, voffset_t id)
+{
+ voffset_t vo = (id + 2u) * sizeof(voffset_t);
+
+ /* Assumes tsize has been verified for alignment. */
+ if (vo >= td->vsize) {
+ return 0;
+ }
+ return read_voffset(td->vtable, vo);
+}
+
+static inline const void *get_field_ptr(flatcc_table_verifier_descriptor_t *td, voffset_t id)
+{
+ voffset_t vte = read_vt_entry(td, id);
+ return vte ? (const uint8_t *)td->buf + td->table + vte : 0;
+}
+
+static int verify_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, uoffset_t size, uint16_t align)
+{
+ uoffset_t k, k2;
+ voffset_t vte;
+ uoffset_t base = (uoffset_t)(size_t)td->buf;
+
+
+ /*
+ * Otherwise range check assumptions break, and normal access code likely also.
+ * We don't require voffset_size < uoffset_size, but some checks are faster if true.
+ */
+ FLATCC_ASSERT(uoffset_size >= voffset_size);
+ FLATCC_ASSERT(soffset_size == uoffset_size);
+
+ vte = read_vt_entry(td, id);
+ if (!vte) {
+ verify(!required, flatcc_verify_error_required_field_missing);
+ return flatcc_verify_ok;
+ }
+ trace_verify("table buffer", td->buf);
+ trace_verify("table", td->table);
+ trace_verify("id", id);
+ trace_verify("vte", vte);
+
+ /*
+ * Note that we don't add td.table to k and we test against table
+ * size not table end or buffer end. Otherwise it would not be safe
+ * to optimized out the k <= k2 check for normal uoffset and voffset
+ * configurations.
+ */
+ k = vte;
+ k2 = k + size;
+ verify(k2 <= td->tsize, flatcc_verify_error_table_field_out_of_range);
+ /* This normally optimizes to nop. */
+ verify(uoffset_size > voffset_size || k <= k2, flatcc_verify_error_table_field_size_overflow);
+ trace_verify("table + vte", vte + td->table);
+ k += td->table + base;
+ trace_verify("entry: buf + table + vte", k);
+ trace_verify("align", align);
+ trace_verify("align masked entry", k & (align - 1u));
+ verify(!(k & (align - 1u)), flatcc_verify_error_table_field_not_aligned);
+ /* We assume the table size has already been verified. */
+ return flatcc_verify_ok;
+}
+
+static int get_offset_field(flatcc_table_verifier_descriptor_t *td, voffset_t id, int required, uoffset_t *out)
+{
+ uoffset_t k, k2;
+ voffset_t vte;
+
+ vte = read_vt_entry(td, id);
+ if (!vte) {
+ *out = 0;
+ if (required) {
+ return flatcc_verify_error_required_field_missing;
+ }
+ /* Missing, but not invalid. */
+ return flatcc_verify_ok;
+ }
+ /*
+ * Note that we don't add td.table to k and we test against table
+ * size not table end or buffer end. Otherwise it would not be safe
+ * to optimized out the k <= k2 check for normal uoffset and voffset
+ * configurations.
+ */
+ k = vte;
+ k2 = k + offset_size;
+ verify(k2 <= td->tsize, flatcc_verify_error_table_field_out_of_range);
+ /* This normally optimizes to nop. */
+ verify(uoffset_size > voffset_size || k <= k2, flatcc_verify_error_table_field_size_overflow);
+ k += td->table;
+ verify(!(k & (offset_size - 1u)), flatcc_verify_error_table_field_not_aligned);
+ /* We assume the table size has already been verified. */
+ *out = k;
+ return flatcc_verify_ok;
+}
+
+static inline int verify_string(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t n;
+
+ verify(check_header(end, base, offset), flatcc_verify_error_string_header_out_of_range_or_unaligned);
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ verify(end - base > n, flatcc_verify_error_string_out_of_range);
+ verify(((uint8_t *)buf + base)[n] == 0, flatcc_verify_error_string_not_zero_terminated);
+ return flatcc_verify_ok;
+}
+
+/*
+ * Keep interface somwewhat similar ot flatcc_builder_start_vector.
+ * `max_count` is a precomputed division to manage overflow check on vector length.
+ */
+static inline int verify_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset, uoffset_t elem_size, uint16_t align, uoffset_t max_count)
+{
+ uoffset_t n;
+
+ verify(check_aligned_header(end, base, offset, align), flatcc_verify_error_vector_header_out_of_range_or_unaligned);
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ /* `n * elem_size` can overflow uncontrollably otherwise. */
+ verify(n <= max_count, flatcc_verify_error_vector_count_exceeds_representable_vector_size);
+ verify(end - base >= n * elem_size, flatcc_verify_error_vector_out_of_range);
+ return flatcc_verify_ok;
+}
+
+static inline int verify_string_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t i, n;
+
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ for (i = 0; i < n; ++i, base += offset_size) {
+ check_result(verify_string(buf, end, base, read_uoffset(buf, base)));
+ }
+ return flatcc_verify_ok;
+}
+
+static inline int verify_table(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset,
+ int ttl, flatcc_table_verifier_f tvf)
+{
+ uoffset_t vbase, vend;
+ flatcc_table_verifier_descriptor_t td;
+
+ verify((td.ttl = ttl - 1), flatcc_verify_error_max_nesting_level_reached);
+ verify(check_header(end, base, offset), flatcc_verify_error_table_header_out_of_range_or_unaligned);
+ td.table = base + offset;
+ /* Read vtable offset - it is signed, but we want it unsigned, assuming 2's complement works. */
+ vbase = td.table - read_uoffset(buf, td.table);
+ verify((soffset_t)vbase >= 0 && !(vbase & (voffset_size - 1)), flatcc_verify_error_vtable_offset_out_of_range_or_unaligned);
+ verify(vbase + voffset_size <= end, flatcc_verify_error_vtable_header_out_of_range);
+ /* Read vtable size. */
+ td.vsize = read_voffset(buf, vbase);
+ vend = vbase + td.vsize;
+ verify(vend <= end && !(td.vsize & (voffset_size - 1)), flatcc_verify_error_vtable_size_out_of_range_or_unaligned);
+ /* Optimizes away overflow check if uoffset_t is large enough. */
+ verify(uoffset_size > voffset_size || vend >= vbase, flatcc_verify_error_vtable_size_overflow);
+
+ verify(td.vsize >= 2 * voffset_size, flatcc_verify_error_vtable_header_too_small);
+ /* Read table size. */
+ td.tsize = read_voffset(buf, vbase + voffset_size);
+ verify(end - td.table >= td.tsize, flatcc_verify_error_table_size_out_of_range);
+ td.vtable = (uint8_t *)buf + vbase;
+ td.buf = buf;
+ td.end = end;
+ return tvf(&td);
+}
+
+static inline int verify_table_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset, int ttl, flatcc_table_verifier_f tvf)
+{
+ uoffset_t i, n;
+
+ verify(ttl-- > 0, flatcc_verify_error_max_nesting_level_reached);
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ for (i = 0; i < n; ++i, base += offset_size) {
+ check_result(verify_table(buf, end, base, read_uoffset(buf, base), ttl, tvf));
+ }
+ return flatcc_verify_ok;
+}
+
+static inline int verify_union_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset,
+ uoffset_t count, const utype_t *types, int ttl, flatcc_union_verifier_f uvf)
+{
+ uoffset_t i, n, elem;
+ flatcc_union_verifier_descriptor_t ud;
+
+ verify(ttl-- > 0, flatcc_verify_error_max_nesting_level_reached);
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ verify(n == count, flatcc_verify_error_union_vector_length_mismatch);
+ base += offset_size;
+
+ ud.buf = buf;
+ ud.end = end;
+ ud.ttl = ttl;
+
+ for (i = 0; i < n; ++i, base += offset_size) {
+ /* Table vectors can never be null, but unions can when the type is NONE. */
+ elem = read_uoffset(buf, base);
+ if (elem == 0) {
+ verify(types[i] == 0, flatcc_verify_error_union_element_absent_without_type_NONE);
+ } else {
+ verify(types[i] != 0, flatcc_verify_error_union_element_present_with_type_NONE);
+ ud.type = types[i];
+ ud.base = base;
+ ud.offset = elem;
+ check_result(uvf(&ud));
+ }
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, size_t size, uint16_t align)
+{
+ check_result(verify_field(td, id, 0, (uoffset_t)size, align));
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_string_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_string(td->buf, td->end, base, read_uoffset(td->buf, base));
+}
+
+int flatcc_verify_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, size_t elem_size, uint16_t align, size_t max_count)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_vector(td->buf, td->end, base, read_uoffset(td->buf, base),
+ (uoffset_t)elem_size, align, (uoffset_t)max_count);
+}
+
+int flatcc_verify_string_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_string_vector(td->buf, td->end, base, read_uoffset(td->buf, base));
+}
+
+int flatcc_verify_table_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_table_verifier_f tvf)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_table(td->buf, td->end, base, read_uoffset(td->buf, base), td->ttl, tvf);
+}
+
+int flatcc_verify_table_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_table_verifier_f tvf)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_table_vector(td->buf, td->end, base, read_uoffset(td->buf, base), td->ttl, tvf);
+}
+
+int flatcc_verify_union_table(flatcc_union_verifier_descriptor_t *ud, flatcc_table_verifier_f *tvf)
+{
+ return verify_table(ud->buf, ud->end, ud->base, ud->offset, ud->ttl, tvf);
+}
+
+int flatcc_verify_union_struct(flatcc_union_verifier_descriptor_t *ud, size_t size, uint16_t align)
+{
+ return verify_struct(ud->end, ud->base, ud->offset, (uoffset_t)size, align);
+}
+
+int flatcc_verify_union_string(flatcc_union_verifier_descriptor_t *ud)
+{
+ return verify_string(ud->buf, ud->end, ud->base, ud->offset);
+}
+
+int flatcc_verify_buffer_header(const void *buf, size_t bufsiz, const char *fid)
+{
+ thash_t id, id2;
+
+ verify_runtime(!(((size_t)buf) & (offset_size - 1)), flatcc_verify_error_runtime_buffer_header_not_aligned);
+ /* -8 ensures no scalar or offset field size can overflow. */
+ verify_runtime(bufsiz <= FLATBUFFERS_UOFFSET_MAX - 8, flatcc_verify_error_runtime_buffer_size_too_large);
+ /*
+ * Even if we specify no fid, the user might later. Therefore
+ * require space for it. Not all buffer generators will take this
+ * into account, so it is possible to fail an otherwise valid buffer
+ * - but such buffers aren't safe.
+ */
+ verify(bufsiz >= offset_size + FLATBUFFERS_IDENTIFIER_SIZE, flatcc_verify_error_buffer_header_too_small);
+ if (fid != 0) {
+ id2 = read_thash_identifier(fid);
+ id = read_thash(buf, offset_size);
+ verify(id2 == 0 || id == id2, flatcc_verify_error_identifier_mismatch);
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_typed_buffer_header(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ thash_t id, id2;
+
+ verify_runtime(!(((size_t)buf) & (offset_size - 1)), flatcc_verify_error_runtime_buffer_header_not_aligned);
+ /* -8 ensures no scalar or offset field size can overflow. */
+ verify_runtime(bufsiz <= FLATBUFFERS_UOFFSET_MAX - 8, flatcc_verify_error_runtime_buffer_size_too_large);
+ /*
+ * Even if we specify no fid, the user might later. Therefore
+ * require space for it. Not all buffer generators will take this
+ * into account, so it is possible to fail an otherwise valid buffer
+ * - but such buffers aren't safe.
+ */
+ verify(bufsiz >= offset_size + FLATBUFFERS_IDENTIFIER_SIZE, flatcc_verify_error_buffer_header_too_small);
+ if (thash != 0) {
+ id2 = thash;
+ id = read_thash(buf, offset_size);
+ verify(id2 == 0 || id == id2, flatcc_verify_error_identifier_mismatch);
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_struct_as_root(const void *buf, size_t bufsiz, const char *fid, size_t size, uint16_t align)
+{
+ check_result(flatcc_verify_buffer_header(buf, bufsiz, fid));
+ return verify_struct((uoffset_t)bufsiz, 0, read_uoffset(buf, 0), (uoffset_t)size, align);
+}
+
+int flatcc_verify_struct_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash, size_t size, uint16_t align)
+{
+ check_result(flatcc_verify_typed_buffer_header(buf, bufsiz, thash));
+ return verify_struct((uoffset_t)bufsiz, 0, read_uoffset(buf, 0), (uoffset_t)size, align);
+}
+
+int flatcc_verify_table_as_root(const void *buf, size_t bufsiz, const char *fid, flatcc_table_verifier_f *tvf)
+{
+ check_result(flatcc_verify_buffer_header(buf, (uoffset_t)bufsiz, fid));
+ return verify_table(buf, (uoffset_t)bufsiz, 0, read_uoffset(buf, 0), FLATCC_VERIFIER_MAX_LEVELS, tvf);
+}
+
+int flatcc_verify_table_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash, flatcc_table_verifier_f *tvf)
+{
+ check_result(flatcc_verify_typed_buffer_header(buf, (uoffset_t)bufsiz, thash));
+ return verify_table(buf, (uoffset_t)bufsiz, 0, read_uoffset(buf, 0), FLATCC_VERIFIER_MAX_LEVELS, tvf);
+}
+
+int flatcc_verify_struct_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, const char *fid, size_t size, uint16_t align)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ check_result(flatcc_verify_vector_field(td, id, required, align, 1, FLATBUFFERS_COUNT_MAX(1)));
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ bufsiz = read_uoffset(buf, 0);
+ ++buf;
+ return flatcc_verify_struct_as_root(buf, bufsiz, fid, size, align);
+}
+
+int flatcc_verify_table_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, const char *fid,
+ uint16_t align, flatcc_table_verifier_f tvf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ check_result(flatcc_verify_vector_field(td, id, required, align, 1, FLATBUFFERS_COUNT_MAX(1)));
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ bufsiz = read_uoffset(buf, 0);
+ ++buf;
+ /*
+ * Don't verify nested buffers identifier - information is difficult to get and
+ * might not be what is desired anyway. User can do it later.
+ */
+ check_result(flatcc_verify_buffer_header(buf, bufsiz, fid));
+ return verify_table(buf, bufsiz, 0, read_uoffset(buf, 0), td->ttl, tvf);
+}
+
+int flatcc_verify_union_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_union_verifier_f uvf)
+{
+ voffset_t vte_type, vte_table;
+ const uint8_t *type;
+ uoffset_t base;
+ flatcc_union_verifier_descriptor_t ud;
+
+ if (0 == (vte_type = read_vt_entry(td, id - 1))) {
+ vte_table = read_vt_entry(td, id);
+ verify(vte_table == 0, flatcc_verify_error_union_cannot_have_a_table_without_a_type);
+ verify(!required, flatcc_verify_error_type_field_absent_from_required_union_field);
+ return flatcc_verify_ok;
+ }
+ /* No need to check required here. */
+ check_result(verify_field(td, id - 1, 0, 1, 1));
+ /* Only now is it safe to read the type. */
+ vte_table = read_vt_entry(td, id);
+ type = (const uint8_t *)td->buf + td->table + vte_type;
+ verify(*type || vte_table == 0, flatcc_verify_error_union_type_NONE_cannot_have_a_value);
+
+ if (*type == 0) {
+ return flatcc_verify_ok;
+ }
+ check_field(td, id, required, base);
+ ud.buf = td->buf;
+ ud.end = td->end;
+ ud.ttl = td->ttl;
+ ud.base = base;
+ ud.offset = read_uoffset(td->buf, base);
+ ud.type = *type;
+ return uvf(&ud);
+}
+
+int flatcc_verify_union_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_union_verifier_f uvf)
+{
+ voffset_t vte_type, vte_table;
+ const uoffset_t *buf;
+ const utype_t *types;
+ uoffset_t count, base;
+
+ if (0 == (vte_type = read_vt_entry(td, id - 1))) {
+ if (0 == (vte_table = read_vt_entry(td, id))) {
+ verify(!required, flatcc_verify_error_type_field_absent_from_required_union_vector_field);
+ }
+ }
+ check_result(flatcc_verify_vector_field(td, id - 1, required,
+ utype_size, utype_size, FLATBUFFERS_COUNT_MAX(utype_size)));
+ if (0 == (buf = get_field_ptr(td, id - 1))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ count = read_uoffset(buf, 0);
+ ++buf;
+ types = (utype_t *)buf;
+
+ check_field(td, id, required, base);
+ return verify_union_vector(td->buf, td->end, base, read_uoffset(td->buf, base),
+ count, types, td->ttl, uvf);
+}