aboutsummaryrefslogtreecommitdiff
path: root/flatcc/src/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'flatcc/src/runtime')
-rw-r--r--flatcc/src/runtime/CMakeLists.txt16
-rw-r--r--flatcc/src/runtime/builder.c2035
-rw-r--r--flatcc/src/runtime/emitter.c269
-rw-r--r--flatcc/src/runtime/json_parser.c1297
-rw-r--r--flatcc/src/runtime/json_printer.c1486
-rw-r--r--flatcc/src/runtime/refmap.c248
-rw-r--r--flatcc/src/runtime/verifier.c617
7 files changed, 5968 insertions, 0 deletions
diff --git a/flatcc/src/runtime/CMakeLists.txt b/flatcc/src/runtime/CMakeLists.txt
new file mode 100644
index 0000000..127e2a4
--- /dev/null
+++ b/flatcc/src/runtime/CMakeLists.txt
@@ -0,0 +1,16 @@
+include_directories (
+ "${PROJECT_SOURCE_DIR}/include"
+)
+
+add_library(flatccrt
+ builder.c
+ emitter.c
+ refmap.c
+ verifier.c
+ json_parser.c
+ json_printer.c
+)
+
+if (FLATCC_INSTALL)
+ install(TARGETS flatccrt DESTINATION ${lib_dir})
+endif()
diff --git a/flatcc/src/runtime/builder.c b/flatcc/src/runtime/builder.c
new file mode 100644
index 0000000..b62c2b6
--- /dev/null
+++ b/flatcc/src/runtime/builder.c
@@ -0,0 +1,2035 @@
+/*
+ * Codegenerator for C, building FlatBuffers.
+ *
+ * There are several approaches, some light, some requiring a library,
+ * some with vectored I/O etc.
+ *
+ * Here we focus on a reasonable balance of light code and efficiency.
+ *
+ * Builder code is generated to a separate file that includes the
+ * generated read-only code.
+ *
+ * Mutable buffers are not supported in this version.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_builder.h"
+#include "flatcc/flatcc_emitter.h"
+
+/*
+ * `check` is designed to handle incorrect use errors that can be
+ * ignored in production of a tested product.
+ *
+ * `check_error` fails if condition is false and is designed to return an
+ * error code in production.
+ */
+
+#if FLATCC_BUILDER_ASSERT_ON_ERROR
+#define check(cond, reason) FLATCC_BUILDER_ASSERT(cond, reason)
+#else
+#define check(cond, reason) ((void)0)
+#endif
+
+#if FLATCC_BUILDER_SKIP_CHECKS
+#define check_error(cond, err, reason) ((void)0)
+#else
+#define check_error(cond, err, reason) if (!(cond)) { check(cond, reason); return err; }
+#endif
+
+/* `strnlen` not widely supported. */
+static inline size_t pstrnlen(const char *s, size_t max_len)
+{
+ const char *end = memchr(s, 0, max_len);
+ return end ? (size_t)(end - s) : max_len;
+}
+#undef strnlen
+#define strnlen pstrnlen
+
+/* Padding can be up to 255 zeroes, and 1 zero string termination byte.
+ * When two paddings are combined at nested buffers, we need twice that.
+ * Visible to emitter so it can test for zero padding in iov. */
+const uint8_t flatcc_builder_padding_base[512] = { 0 };
+#define _pad flatcc_builder_padding_base
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define write_uoffset __flatbuffers_uoffset_write_to_pe
+#define write_voffset __flatbuffers_voffset_write_to_pe
+#define write_identifier __flatbuffers_uoffset_write_to_pe
+#define write_utype __flatbuffers_utype_write_to_pe
+
+#define field_size sizeof(uoffset_t)
+#define max_offset_count FLATBUFFERS_COUNT_MAX(field_size)
+#define union_size sizeof(flatcc_builder_union_ref_t)
+#define max_union_count FLATBUFFERS_COUNT_MAX(union_size)
+#define utype_size sizeof(utype_t)
+#define max_utype_count FLATBUFFERS_COUNT_MAX(utype_size)
+
+#define max_string_len FLATBUFFERS_COUNT_MAX(1)
+#define identifier_size FLATBUFFERS_IDENTIFIER_SIZE
+
+
+#define iovec_t flatcc_iovec_t
+#define frame_size sizeof(__flatcc_builder_frame_t)
+#define frame(x) (B->frame[0].x)
+
+
+/* `align` must be a power of 2. */
+static inline uoffset_t alignup_uoffset(uoffset_t x, size_t align)
+{
+ return (x + (uoffset_t)align - 1u) & ~((uoffset_t)align - 1u);
+}
+
+static inline size_t alignup_size(size_t x, size_t align)
+{
+ return (x + align - 1u) & ~(align - 1u);
+}
+
+
+typedef struct vtable_descriptor vtable_descriptor_t;
+struct vtable_descriptor {
+ /* Where the vtable is emitted. */
+ flatcc_builder_ref_t vt_ref;
+ /* Which buffer it was emitted to. */
+ uoffset_t nest_id;
+ /* Where the vtable is cached. */
+ uoffset_t vb_start;
+ /* Hash table collision chain. */
+ uoffset_t next;
+};
+
+typedef struct flatcc_iov_state flatcc_iov_state_t;
+struct flatcc_iov_state {
+ size_t len;
+ int count;
+ flatcc_iovec_t iov[FLATCC_IOV_COUNT_MAX];
+};
+
+#define iov_state_t flatcc_iov_state_t
+
+/* This assumes `iov_state_t iov;` has been declared in scope */
+#define push_iov_cond(base, size, cond) if ((size) > 0 && (cond)) { iov.len += size;\
+ iov.iov[iov.count].iov_base = (void *)(base); iov.iov[iov.count].iov_len = (size); ++iov.count; }
+#define push_iov(base, size) push_iov_cond(base, size, 1)
+#define init_iov() { iov.len = 0; iov.count = 0; }
+
+
+int flatcc_builder_default_alloc(void *alloc_context, iovec_t *b, size_t request, int zero_fill, int hint)
+{
+ void *p;
+ size_t n;
+
+ (void)alloc_context;
+
+ if (request == 0) {
+ if (b->iov_base) {
+ FLATCC_BUILDER_FREE(b->iov_base);
+ b->iov_base = 0;
+ b->iov_len = 0;
+ }
+ return 0;
+ }
+ switch (hint) {
+ case flatcc_builder_alloc_ds:
+ n = 256;
+ break;
+ case flatcc_builder_alloc_ht:
+ /* Should be exact size, or space size is just wasted. */
+ n = request;
+ break;
+ case flatcc_builder_alloc_fs:
+ n = sizeof(__flatcc_builder_frame_t) * 8;
+ break;
+ case flatcc_builder_alloc_us:
+ n = 64;
+ break;
+ default:
+ /*
+ * We have many small structures - vs stack for tables with few
+ * elements, and few offset fields in patch log. No need to
+ * overallocate in case of busy small messages.
+ */
+ n = 32;
+ break;
+ }
+ while (n < request) {
+ n *= 2;
+ }
+ if (request <= b->iov_len && b->iov_len / 2 >= n) {
+ /* Add hysteresis to shrink. */
+ return 0;
+ }
+ if (!(p = FLATCC_BUILDER_REALLOC(b->iov_base, n))) {
+ return -1;
+ }
+ /* Realloc might also shrink. */
+ if (zero_fill && b->iov_len < n) {
+ memset((uint8_t *)p + b->iov_len, 0, n - b->iov_len);
+ }
+ b->iov_base = p;
+ b->iov_len = n;
+ return 0;
+}
+
+#define T_ptr(base, pos) ((void *)((uint8_t *)(base) + (uoffset_t)(pos)))
+#define ds_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_ds].iov_base, (pos)))
+#define vs_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vs].iov_base, (pos)))
+#define pl_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_pl].iov_base, (pos)))
+#define us_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_us].iov_base, (pos)))
+#define vd_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vd].iov_base, (pos)))
+#define vb_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vb].iov_base, (pos)))
+#define vs_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_vs].iov_base))
+#define pl_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_pl].iov_base))
+#define us_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_us].iov_base))
+
+#define table_limit (FLATBUFFERS_VOFFSET_MAX - field_size + 1)
+#define data_limit (FLATBUFFERS_UOFFSET_MAX - field_size + 1)
+
+#define set_identifier(id) memcpy(&B->identifier, (id) ? (void *)(id) : (void *)_pad, identifier_size)
+
+/* Must also return true when no buffer has been started. */
+#define is_top_buffer(B) (B->nest_id == 0)
+
+/*
+ * Tables use a stack represention better suited for quickly adding
+ * fields to tables, but it must occasionally be refreshed following
+ * reallocation or reentry from child frame.
+ */
+static inline void refresh_ds(flatcc_builder_t *B, uoffset_t type_limit)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
+
+ B->ds = ds_ptr(B->ds_first);
+ B->ds_limit = (uoffset_t)buf->iov_len - B->ds_first;
+ /*
+ * So we don't allocate outside tables representation size, nor our
+ * current buffer size.
+ */
+ if (B->ds_limit > type_limit) {
+ B->ds_limit = type_limit;
+ }
+ /* So exit frame can refresh fast. */
+ frame(type_limit) = type_limit;
+}
+
+static int reserve_ds(flatcc_builder_t *B, size_t need, uoffset_t limit)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
+
+ if (B->alloc(B->alloc_context, buf, B->ds_first + need, 1, flatcc_builder_alloc_ds)) {
+ return -1;
+ }
+ refresh_ds(B, limit);
+ return 0;
+}
+
+/*
+ * Make sure there is always an extra zero termination on stack
+ * even if it isn't emitted such that string updates may count
+ * on zero termination being present always.
+ */
+static inline void *push_ds(flatcc_builder_t *B, uoffset_t size)
+{
+ size_t offset;
+
+ offset = B->ds_offset;
+ if ((B->ds_offset += size) >= B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset + 1, data_limit)) {
+ return 0;
+ }
+ }
+ return B->ds + offset;
+}
+
+static inline void unpush_ds(flatcc_builder_t *B, uoffset_t size)
+{
+ B->ds_offset -= size;
+ memset(B->ds + B->ds_offset, 0, size);
+}
+
+static inline void *push_ds_copy(flatcc_builder_t *B, const void *data, uoffset_t size)
+{
+ void *p;
+
+ if (!(p = push_ds(B, size))) {
+ return 0;
+ }
+ memcpy(p, data, size);
+ return p;
+}
+
+static inline void *push_ds_field(flatcc_builder_t *B, uoffset_t size, uint16_t align, voffset_t id)
+{
+ uoffset_t offset;
+
+ /*
+ * We calculate table field alignment relative to first entry, not
+ * header field with vtable offset.
+ *
+ * Note: >= comparison handles special case where B->ds is not
+ * allocated yet and size is 0 so the return value would be mistaken
+ * for an error.
+ */
+ offset = alignup_uoffset(B->ds_offset, align);
+ if ((B->ds_offset = offset + size) >= B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset + 1, table_limit)) {
+ return 0;
+ }
+ }
+ B->vs[id] = (voffset_t)(offset + field_size);
+ if (id >= B->id_end) {
+ B->id_end = id + 1u;
+ }
+ return B->ds + offset;
+}
+
+static inline void *push_ds_offset_field(flatcc_builder_t *B, voffset_t id)
+{
+ uoffset_t offset;
+
+ offset = alignup_uoffset(B->ds_offset, field_size);
+ if ((B->ds_offset = offset + field_size) > B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset, table_limit)) {
+ return 0;
+ }
+ }
+ B->vs[id] = (voffset_t)(offset + field_size);
+ if (id >= B->id_end) {
+ B->id_end = id + 1u;
+ }
+ *B->pl++ = (flatbuffers_voffset_t)offset;
+ return B->ds + offset;
+}
+
+static inline void *reserve_buffer(flatcc_builder_t *B, int alloc_type, size_t used, size_t need, int zero_init)
+{
+ iovec_t *buf = B->buffers + alloc_type;
+
+ if (used + need > buf->iov_len) {
+ if (B->alloc(B->alloc_context, buf, used + need, zero_init, alloc_type)) {
+ check(0, "memory allocation failed");
+ return 0;
+ }
+ }
+ return (void *)((size_t)buf->iov_base + used);
+}
+
+static inline int reserve_fields(flatcc_builder_t *B, int count)
+{
+ size_t used, need;
+
+ /* Provide faster stack operations for common table operations. */
+ used = frame(container.table.vs_end) + frame(container.table.id_end) * sizeof(voffset_t);
+ need = (size_t)(count + 2) * sizeof(voffset_t);
+ if (!(B->vs = reserve_buffer(B, flatcc_builder_alloc_vs, used, need, 1))) {
+ return -1;
+ }
+ /* Move past header for convenience. */
+ B->vs += 2;
+ used = frame(container.table.pl_end);
+ /* Add one to handle special case of first table being empty. */
+ need = (size_t)count * sizeof(*(B->pl)) + 1;
+ if (!(B->pl = reserve_buffer(B, flatcc_builder_alloc_pl, used, need, 0))) {
+ return -1;
+ }
+ return 0;
+}
+
+static int alloc_ht(flatcc_builder_t *B)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ht;
+
+ size_t size, k;
+ /* Allocate null entry so we can check for return errors. */
+ FLATCC_ASSERT(B->vd_end == 0);
+ if (!reserve_buffer(B, flatcc_builder_alloc_vd, B->vd_end, sizeof(vtable_descriptor_t), 0)) {
+ return -1;
+ }
+ B->vd_end = sizeof(vtable_descriptor_t);
+ size = field_size * FLATCC_BUILDER_MIN_HASH_COUNT;
+ if (B->alloc(B->alloc_context, buf, size, 1, flatcc_builder_alloc_ht)) {
+ return -1;
+ }
+ while (size * 2 <= buf->iov_len) {
+ size *= 2;
+ }
+ size /= field_size;
+ for (k = 0; (((size_t)1) << k) < size; ++k) {
+ }
+ B->ht_width = k;
+ return 0;
+}
+
+static inline uoffset_t *lookup_ht(flatcc_builder_t *B, uint32_t hash)
+{
+ uoffset_t *T;
+
+ if (B->ht_width == 0) {
+ if (alloc_ht(B)) {
+ return 0;
+ }
+ }
+ T = B->buffers[flatcc_builder_alloc_ht].iov_base;
+
+ return &T[FLATCC_BUILDER_BUCKET_VT_HASH(hash, B->ht_width)];
+}
+
+void flatcc_builder_flush_vtable_cache(flatcc_builder_t *B)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ht;
+
+ if (B->ht_width == 0) {
+ return;
+ }
+ memset(buf->iov_base, 0, buf->iov_len);
+ /* Reserve the null entry. */
+ B->vd_end = sizeof(vtable_descriptor_t);
+ B->vb_end = 0;
+}
+
+int flatcc_builder_custom_init(flatcc_builder_t *B,
+ flatcc_builder_emit_fun *emit, void *emit_context,
+ flatcc_builder_alloc_fun *alloc, void *alloc_context)
+{
+ /*
+ * Do not allocate anything here. Only the required buffers will be
+ * allocated. For simple struct buffers, no allocation is required
+ * at all.
+ */
+ memset(B, 0, sizeof(*B));
+
+ if (emit == 0) {
+ B->is_default_emitter = 1;
+ emit = flatcc_emitter;
+ emit_context = &B->default_emit_context;
+ }
+ if (alloc == 0) {
+ alloc = flatcc_builder_default_alloc;
+ }
+ B->alloc_context = alloc_context;
+ B->alloc = alloc;
+ B->emit_context = emit_context;
+ B->emit = emit;
+ return 0;
+}
+
+int flatcc_builder_init(flatcc_builder_t *B)
+{
+ return flatcc_builder_custom_init(B, 0, 0, 0, 0);
+}
+
+int flatcc_builder_custom_reset(flatcc_builder_t *B, int set_defaults, int reduce_buffers)
+{
+ iovec_t *buf;
+ int i;
+
+ for (i = 0; i < FLATCC_BUILDER_ALLOC_BUFFER_COUNT; ++i) {
+ buf = B->buffers + i;
+ if (buf->iov_base) {
+ /* Don't try to reduce the hash table. */
+ if (i != flatcc_builder_alloc_ht &&
+ reduce_buffers && B->alloc(B->alloc_context, buf, 1, 1, i)) {
+ return -1;
+ }
+ memset(buf->iov_base, 0, buf->iov_len);
+ } else {
+ FLATCC_ASSERT(buf->iov_len == 0);
+ }
+ }
+ B->vb_end = 0;
+ if (B->vd_end > 0) {
+ /* Reset past null entry. */
+ B->vd_end = sizeof(vtable_descriptor_t);
+ }
+ B->min_align = 0;
+ B->emit_start = 0;
+ B->emit_end = 0;
+ B->level = 0;
+ B->limit_level = 0;
+ B->ds_offset = 0;
+ B->ds_limit = 0;
+ B->nest_count = 0;
+ B->nest_id = 0;
+ /* Needed for correct offset calculation. */
+ B->ds = B->buffers[flatcc_builder_alloc_ds].iov_base;
+ B->pl = B->buffers[flatcc_builder_alloc_pl].iov_base;
+ B->vs = B->buffers[flatcc_builder_alloc_vs].iov_base;
+ B->frame = 0;
+ if (set_defaults) {
+ B->vb_flush_limit = 0;
+ B->max_level = 0;
+ B->disable_vt_clustering = 0;
+ }
+ if (B->is_default_emitter) {
+ flatcc_emitter_reset(&B->default_emit_context);
+ }
+ if (B->refmap) {
+ flatcc_refmap_reset(B->refmap);
+ }
+ return 0;
+}
+
+int flatcc_builder_reset(flatcc_builder_t *B)
+{
+ return flatcc_builder_custom_reset(B, 0, 0);
+}
+
+void flatcc_builder_clear(flatcc_builder_t *B)
+{
+ iovec_t *buf;
+ int i;
+
+ for (i = 0; i < FLATCC_BUILDER_ALLOC_BUFFER_COUNT; ++i) {
+ buf = B->buffers + i;
+ B->alloc(B->alloc_context, buf, 0, 0, i);
+ }
+ if (B->is_default_emitter) {
+ flatcc_emitter_clear(&B->default_emit_context);
+ }
+ if (B->refmap) {
+ flatcc_refmap_clear(B->refmap);
+ }
+ memset(B, 0, sizeof(*B));
+}
+
+static inline void set_min_align(flatcc_builder_t *B, uint16_t align)
+{
+ if (B->min_align < align) {
+ B->min_align = align;
+ }
+}
+
+/*
+ * It's a max, but the minimum viable alignment is the largest observed
+ * alignment requirement, but no larger.
+ */
+static inline void get_min_align(uint16_t *align, uint16_t b)
+{
+ if (*align < b) {
+ *align = b;
+ }
+}
+
+void *flatcc_builder_enter_user_frame_ptr(flatcc_builder_t *B, size_t size)
+{
+ size_t *frame;
+
+ size = alignup_size(size, sizeof(size_t)) + sizeof(size_t);
+
+ if (!(frame = reserve_buffer(B, flatcc_builder_alloc_us, B->user_frame_end, size, 0))) {
+ return 0;
+ }
+ memset(frame, 0, size);
+ *frame++ = B->user_frame_offset;
+ B->user_frame_offset = B->user_frame_end + sizeof(size_t);
+ B->user_frame_end += size;
+ return frame;
+}
+
+size_t flatcc_builder_enter_user_frame(flatcc_builder_t *B, size_t size)
+{
+ size_t *frame;
+
+ size = alignup_size(size, sizeof(size_t)) + sizeof(size_t);
+
+ if (!(frame = reserve_buffer(B, flatcc_builder_alloc_us, B->user_frame_end, size, 0))) {
+ return 0;
+ }
+ memset(frame, 0, size);
+ *frame++ = B->user_frame_offset;
+ B->user_frame_offset = B->user_frame_end + sizeof(size_t);
+ B->user_frame_end += size;
+ return B->user_frame_offset;
+}
+
+
+size_t flatcc_builder_exit_user_frame(flatcc_builder_t *B)
+{
+ size_t *hdr;
+
+ FLATCC_ASSERT(B->user_frame_offset > 0);
+
+ hdr = us_ptr(B->user_frame_offset);
+ B->user_frame_end = B->user_frame_offset - sizeof(size_t);
+ return B->user_frame_offset = hdr[-1];
+}
+
+size_t flatcc_builder_exit_user_frame_at(flatcc_builder_t *B, size_t handle)
+{
+ FLATCC_ASSERT(B->user_frame_offset >= handle);
+
+ B->user_frame_offset = handle;
+ return flatcc_builder_exit_user_frame(B);
+}
+
+size_t flatcc_builder_get_current_user_frame(flatcc_builder_t *B)
+{
+ return B->user_frame_offset;
+}
+
+void *flatcc_builder_get_user_frame_ptr(flatcc_builder_t *B, size_t handle)
+{
+ return us_ptr(handle);
+}
+
+static int enter_frame(flatcc_builder_t *B, uint16_t align)
+{
+ if (++B->level > B->limit_level) {
+ if (B->max_level > 0 && B->level > B->max_level) {
+ return -1;
+ }
+ if (!(B->frame = reserve_buffer(B, flatcc_builder_alloc_fs,
+ (size_t)(B->level - 1) * frame_size, frame_size, 0))) {
+ return -1;
+ }
+ B->limit_level = (int)(B->buffers[flatcc_builder_alloc_fs].iov_len / frame_size);
+ if (B->max_level > 0 && B->max_level < B->limit_level) {
+ B->limit_level = B->max_level;
+ }
+ } else {
+ ++B->frame;
+ }
+ frame(ds_offset) = B->ds_offset;
+ frame(align) = B->align;
+ B->align = align;
+ /* Note: do not assume padding before first has been allocated! */
+ frame(ds_first) = B->ds_first;
+ frame(type_limit) = data_limit;
+ B->ds_first = alignup_uoffset(B->ds_first + B->ds_offset, 8);
+ B->ds_offset = 0;
+ return 0;
+}
+
+static inline void exit_frame(flatcc_builder_t *B)
+{
+ memset(B->ds, 0, B->ds_offset);
+ B->ds_offset = frame(ds_offset);
+ B->ds_first = frame(ds_first);
+ refresh_ds(B, frame(type_limit));
+
+ /*
+ * Restore local alignment: e.g. a table should not change alignment
+ * because a child table was just created elsewhere in the buffer,
+ * but the overall alignment (min align), should be aware of it.
+ * Each buffer has its own min align that then migrates up without
+ * being affected by sibling or child buffers.
+ */
+ set_min_align(B, B->align);
+ B->align = frame(align);
+
+ --B->frame;
+ --B->level;
+}
+
+static inline uoffset_t front_pad(flatcc_builder_t *B, uoffset_t size, uint16_t align)
+{
+ return (uoffset_t)(B->emit_start - (flatcc_builder_ref_t)size) & (align - 1u);
+}
+
+static inline uoffset_t back_pad(flatcc_builder_t *B, uint16_t align)
+{
+ return (uoffset_t)(B->emit_end) & (align - 1u);
+}
+
+static inline flatcc_builder_ref_t emit_front(flatcc_builder_t *B, iov_state_t *iov)
+{
+ flatcc_builder_ref_t ref;
+
+ /*
+ * We might have overflow when including headers, but without
+ * headers we should have checks to prevent overflow in the
+ * uoffset_t range, hence we subtract 16 to be safe. With that
+ * guarantee we can also make a safe check on the soffset_t range.
+ *
+ * We only allow buffers half the theoritical size of
+ * FLATBUFFERS_UOFFSET_MAX so we can safely use signed references.
+ *
+ * NOTE: vtables vt_offset field is signed, and the check in create
+ * table only ensures the signed limit. The check would fail if the
+ * total buffer size could grow beyond UOFFSET_MAX, and we prevent
+ * that by limiting the lower end to SOFFSET_MIN, and the upper end
+ * at emit_back to SOFFSET_MAX.
+ */
+ ref = B->emit_start - (flatcc_builder_ref_t)iov->len;
+ if ((iov->len > 16 && iov->len - 16 > FLATBUFFERS_UOFFSET_MAX) || ref >= B->emit_start) {
+ check(0, "buffer too large to represent");
+ return 0;
+ }
+ if (B->emit(B->emit_context, iov->iov, iov->count, ref, iov->len)) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ return B->emit_start = ref;
+}
+
+static inline flatcc_builder_ref_t emit_back(flatcc_builder_t *B, iov_state_t *iov)
+{
+ flatcc_builder_ref_t ref;
+
+ ref = B->emit_end;
+ B->emit_end = ref + (flatcc_builder_ref_t)iov->len;
+ /*
+ * Similar to emit_front check, but since we only emit vtables and
+ * padding at the back, we are not concerned with iov->len overflow,
+ * only total buffer overflow.
+ *
+ * With this check, vtable soffset references at table header can
+ * still overflow in extreme cases, so this must be checked
+ * separately.
+ */
+ if (B->emit_end < ref) {
+ check(0, "buffer too large to represent");
+ return 0;
+ }
+ if (B->emit(B->emit_context, iov->iov, iov->count, ref, iov->len)) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ /*
+ * Back references always return ref + 1 because ref == 0 is valid and
+ * should not be mistaken for error. vtables understand this.
+ */
+ return ref + 1;
+}
+
+static int align_to_block(flatcc_builder_t *B, uint16_t *align, uint16_t block_align, int is_nested)
+{
+ size_t end_pad;
+ iov_state_t iov;
+
+ block_align = block_align ? block_align : B->block_align ? B->block_align : 1;
+ get_min_align(align, field_size);
+ get_min_align(align, block_align);
+ /* Pad end of buffer to multiple. */
+ if (!is_nested) {
+ end_pad = back_pad(B, block_align);
+ if (end_pad) {
+ init_iov();
+ push_iov(_pad, end_pad);
+ if (0 == emit_back(B, &iov)) {
+ check(0, "emitter rejected buffer content");
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_embed_buffer(flatcc_builder_t *B,
+ uint16_t block_align,
+ const void *data, size_t size, uint16_t align, flatcc_builder_buffer_flags_t flags)
+{
+ uoffset_t size_field, pad;
+ iov_state_t iov;
+ int with_size = (flags & flatcc_builder_with_size) != 0;
+
+ if (align_to_block(B, &align, block_align, !is_top_buffer(B))) {
+ return 0;
+ }
+ pad = front_pad(B, (uoffset_t)(size + (with_size ? field_size : 0)), align);
+ write_uoffset(&size_field, (uoffset_t)size + pad);
+ init_iov();
+ /* Add ubyte vector size header if nested buffer. */
+ push_iov_cond(&size_field, field_size, !is_top_buffer(B));
+ push_iov(data, size);
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_buffer(flatcc_builder_t *B,
+ const char identifier[identifier_size], uint16_t block_align,
+ flatcc_builder_ref_t object_ref, uint16_t align, flatcc_builder_buffer_flags_t flags)
+{
+ flatcc_builder_ref_t buffer_ref;
+ uoffset_t header_pad, id_size = 0;
+ uoffset_t object_offset, buffer_size, buffer_base;
+ iov_state_t iov;
+ flatcc_builder_identifier_t id_out = 0;
+ int is_nested = (flags & flatcc_builder_is_nested) != 0;
+ int with_size = (flags & flatcc_builder_with_size) != 0;
+
+ if (align_to_block(B, &align, block_align, is_nested)) {
+ return 0;
+ }
+ set_min_align(B, align);
+ if (identifier) {
+ FLATCC_ASSERT(sizeof(flatcc_builder_identifier_t) == identifier_size);
+ FLATCC_ASSERT(sizeof(flatcc_builder_identifier_t) == field_size);
+ memcpy(&id_out, identifier, identifier_size);
+ id_out = __flatbuffers_thash_read_from_le(&id_out);
+ write_identifier(&id_out, id_out);
+ }
+ id_size = id_out ? identifier_size : 0;
+ header_pad = front_pad(B, field_size + id_size + (uoffset_t)(with_size ? field_size : 0), align);
+ init_iov();
+ /* ubyte vectors size field wrapping nested buffer. */
+ push_iov_cond(&buffer_size, field_size, is_nested || with_size);
+ push_iov(&object_offset, field_size);
+ /* Identifiers are not always present in buffer. */
+ push_iov(&id_out, id_size);
+ push_iov(_pad, header_pad);
+ buffer_base = (uoffset_t)B->emit_start - (uoffset_t)iov.len + (uoffset_t)((is_nested || with_size) ? field_size : 0);
+ if (is_nested) {
+ write_uoffset(&buffer_size, (uoffset_t)B->buffer_mark - buffer_base);
+ } else {
+ /* Also include clustered vtables. */
+ write_uoffset(&buffer_size, (uoffset_t)B->emit_end - buffer_base);
+ }
+ write_uoffset(&object_offset, (uoffset_t)object_ref - buffer_base);
+ if (0 == (buffer_ref = emit_front(B, &iov))) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ return buffer_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_struct(flatcc_builder_t *B, const void *data, size_t size, uint16_t align)
+{
+ size_t pad;
+ iov_state_t iov;
+
+ check(align >= 1, "align cannot be 0");
+ set_min_align(B, align);
+ pad = front_pad(B, (uoffset_t)size, align);
+ init_iov();
+ push_iov(data, size);
+ /*
+ * Normally structs will already be a multiple of their alignment,
+ * so this padding will not likely be emitted.
+ */
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+int flatcc_builder_start_buffer(flatcc_builder_t *B,
+ const char identifier[identifier_size], uint16_t block_align, flatcc_builder_buffer_flags_t flags)
+{
+ /*
+ * This saves the parent `min_align` in the align field since we
+ * shouldn't use that for the current buffer. `exit_frame`
+ * automatically aggregates align up, so it is updated when the
+ * buffer frame exits.
+ */
+ if (enter_frame(B, B->min_align)) {
+ return -1;
+ }
+ /* B->align now has parent min_align, and child frames will save it. */
+ B->min_align = 1;
+ /* Save the parent block align, and set proper defaults for this buffer. */
+ frame(container.buffer.block_align) = B->block_align;
+ B->block_align = block_align;
+ frame(container.buffer.flags = B->buffer_flags);
+ B->buffer_flags = (uint16_t)flags;
+ frame(container.buffer.mark) = B->buffer_mark;
+ frame(container.buffer.nest_id) = B->nest_id;
+ /*
+ * End of buffer when nested. Not defined for top-level because we
+ * here (on only here) permit strings etc. to be created before buffer start and
+ * because top-level buffer vtables can be clustered.
+ */
+ B->buffer_mark = B->emit_start;
+ /* Must be 0 before and after entering top-level buffer, and unique otherwise. */
+ B->nest_id = B->nest_count++;
+ frame(container.buffer.identifier) = B->identifier;
+ set_identifier(identifier);
+ frame(type) = flatcc_builder_buffer;
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_buffer(flatcc_builder_t *B, flatcc_builder_ref_t root)
+{
+ flatcc_builder_ref_t buffer_ref;
+ flatcc_builder_buffer_flags_t flags;
+
+ flags = (flatcc_builder_buffer_flags_t)B->buffer_flags & flatcc_builder_with_size;
+ flags |= is_top_buffer(B) ? 0 : flatcc_builder_is_nested;
+ check(frame(type) == flatcc_builder_buffer, "expected buffer frame");
+ set_min_align(B, B->block_align);
+ if (0 == (buffer_ref = flatcc_builder_create_buffer(B, (void *)&B->identifier,
+ B->block_align, root, B->min_align, flags))) {
+ return 0;
+ }
+ B->buffer_mark = frame(container.buffer.mark);
+ B->nest_id = frame(container.buffer.nest_id);
+ B->identifier = frame(container.buffer.identifier);
+ B->buffer_flags = frame(container.buffer.flags);
+ exit_frame(B);
+ return buffer_ref;
+}
+
+void *flatcc_builder_start_struct(flatcc_builder_t *B, size_t size, uint16_t align)
+{
+ /* Allocate space for the struct on the ds stack. */
+ if (enter_frame(B, align)) {
+ return 0;
+ }
+ frame(type) = flatcc_builder_struct;
+ refresh_ds(B, data_limit);
+ return push_ds(B, (uoffset_t)size);
+}
+
+void *flatcc_builder_struct_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_struct(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t object_ref;
+
+ check(frame(type) == flatcc_builder_struct, "expected struct frame");
+ if (0 == (object_ref = flatcc_builder_create_struct(B, B->ds, B->ds_offset, B->align))) {
+ return 0;
+ }
+ exit_frame(B);
+ return object_ref;
+}
+
+static inline int vector_count_add(flatcc_builder_t *B, uoffset_t count, uoffset_t max_count)
+{
+ uoffset_t n, n1;
+ n = frame(container.vector.count);
+ n1 = n + count;
+ /*
+ * This prevents elem_size * count from overflowing iff max_vector
+ * has been set sensible. Without this check we might allocate to
+ * little on the ds stack and return a buffer the user thinks is
+ * much larger which of course is bad even though the buffer eventually
+ * would fail anyway.
+ */
+ check_error(n <= n1 && n1 <= max_count, -1, "vector too large to represent");
+ frame(container.vector.count) = n1;
+ return 0;
+}
+
+void *flatcc_builder_extend_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, frame(container.vector.max_count))) {
+ return 0;
+ }
+ return push_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+}
+
+void *flatcc_builder_vector_push(flatcc_builder_t *B, const void *data)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ check_error(frame(container.vector.count) <= frame(container.vector.max_count), 0, "vector max count exceeded");
+ frame(container.vector.count) += 1;
+ return push_ds_copy(B, data, frame(container.vector.elem_size));
+}
+
+void *flatcc_builder_append_vector(flatcc_builder_t *B, const void *data, size_t count)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ if (vector_count_add(B, (uoffset_t)count, frame(container.vector.max_count))) {
+ return 0;
+ }
+ return push_ds_copy(B, data, frame(container.vector.elem_size) * (uoffset_t)count);
+}
+
+flatcc_builder_ref_t *flatcc_builder_extend_offset_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, max_offset_count)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)(field_size * count));
+}
+
+flatcc_builder_ref_t *flatcc_builder_offset_vector_push(flatcc_builder_t *B, flatcc_builder_ref_t ref)
+{
+ flatcc_builder_ref_t *p;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (frame(container.vector.count) == max_offset_count) {
+ return 0;
+ }
+ frame(container.vector.count) += 1;
+ if (0 == (p = push_ds(B, field_size))) {
+ return 0;
+ }
+ *p = ref;
+ return p;
+}
+
+flatcc_builder_ref_t *flatcc_builder_append_offset_vector(flatcc_builder_t *B, const flatcc_builder_ref_t *refs, size_t count)
+{
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (vector_count_add(B, (uoffset_t)count, max_offset_count)) {
+ return 0;
+ }
+ return push_ds_copy(B, refs, (uoffset_t)(field_size * count));
+}
+
+char *flatcc_builder_extend_string(flatcc_builder_t *B, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ if (vector_count_add(B, (uoffset_t)len, max_string_len)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)len);
+}
+
+char *flatcc_builder_append_string(flatcc_builder_t *B, const char *s, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ if (vector_count_add(B, (uoffset_t)len, max_string_len)) {
+ return 0;
+ }
+ return push_ds_copy(B, s, (uoffset_t)len);
+}
+
+char *flatcc_builder_append_string_str(flatcc_builder_t *B, const char *s)
+{
+ return flatcc_builder_append_string(B, s, strlen(s));
+}
+
+char *flatcc_builder_append_string_strn(flatcc_builder_t *B, const char *s, size_t max_len)
+{
+ return flatcc_builder_append_string(B, s, strnlen(s, max_len));
+}
+
+int flatcc_builder_truncate_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ check_error(frame(container.vector.count) >= count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+int flatcc_builder_truncate_offset_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ check_error(frame(container.vector.count) >= (uoffset_t)count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+int flatcc_builder_truncate_string(flatcc_builder_t *B, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ check_error(frame(container.vector.count) >= len, -1, "cannot truncate string past empty");
+ frame(container.vector.count) -= (uoffset_t)len;
+ unpush_ds(B, (uoffset_t)len);
+ return 0;
+}
+
+int flatcc_builder_start_vector(flatcc_builder_t *B, size_t elem_size, uint16_t align, size_t max_count)
+{
+ get_min_align(&align, field_size);
+ if (enter_frame(B, align)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = (uoffset_t)elem_size;
+ frame(container.vector.count) = 0;
+ frame(container.vector.max_count) = (uoffset_t)max_count;
+ frame(type) = flatcc_builder_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+int flatcc_builder_start_offset_vector(flatcc_builder_t *B)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = field_size;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_offset_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_offset_vector(flatcc_builder_t *B,
+ const flatcc_builder_ref_t *vec, size_t count)
+{
+ flatcc_builder_ref_t *_vec;
+
+ if (flatcc_builder_start_offset_vector(B)) {
+ return 0;
+ }
+ if (!(_vec = flatcc_builder_extend_offset_vector(B, count))) {
+ return 0;
+ }
+ memcpy(_vec, vec, count * field_size);
+ return flatcc_builder_end_offset_vector(B);
+}
+
+int flatcc_builder_start_string(flatcc_builder_t *B)
+{
+ if (enter_frame(B, 1)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = 1;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_string;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+int flatcc_builder_reserve_table(flatcc_builder_t *B, int count)
+{
+ check(count >= 0, "cannot reserve negative count");
+ return reserve_fields(B, count);
+}
+
+int flatcc_builder_start_table(flatcc_builder_t *B, int count)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.table.vs_end) = vs_offset(B->vs);
+ frame(container.table.pl_end) = pl_offset(B->pl);
+ frame(container.table.vt_hash) = B->vt_hash;
+ frame(container.table.id_end) = B->id_end;
+ B->vt_hash = 0;
+ FLATCC_BUILDER_INIT_VT_HASH(B->vt_hash);
+ B->id_end = 0;
+ frame(type) = flatcc_builder_table;
+ if (reserve_fields(B, count)) {
+ return -1;
+ }
+ refresh_ds(B, table_limit);
+ return 0;
+}
+
+flatcc_builder_vt_ref_t flatcc_builder_create_vtable(flatcc_builder_t *B,
+ const voffset_t *vt, voffset_t vt_size)
+{
+ flatcc_builder_vt_ref_t vt_ref;
+ iov_state_t iov;
+ voffset_t *vt_;
+ size_t i;
+
+ /*
+ * Only top-level buffer can cluster vtables because only it can
+ * extend beyond the end.
+ *
+ * We write the vtable after the referencing table to maintain
+ * the construction invariant that any offset reference has
+ * valid emitted data at a higher address, and also that any
+ * issued negative emit address represents an offset reference
+ * to some flatbuffer object or vector (or possibly a root
+ * struct).
+ *
+ * The vt_ref is stored as the reference + 1 to avoid having 0 as a
+ * valid reference (which usally means error). It also idententifies
+ * vtable references as the only uneven references, and the only
+ * references that can be used multiple times in the same buffer.
+ *
+ * We do the vtable conversion here so cached vtables can be built
+ * hashed and compared more efficiently, and so end users with
+ * direct vtable construction don't have to worry about endianness.
+ * This also ensures the hash function works the same wrt.
+ * collision frequency.
+ */
+
+ if (!flatbuffers_is_native_pe()) {
+ /* Make space in vtable cache for temporary endian conversion. */
+ if (!(vt_ = reserve_buffer(B, flatcc_builder_alloc_vb, B->vb_end, vt_size, 0))) {
+ return 0;
+ }
+ for (i = 0; i < vt_size / sizeof(voffset_t); ++i) {
+ write_voffset(&vt_[i], vt[i]);
+ }
+ vt = vt_;
+ /* We don't need to free the reservation since we don't advance any base pointer. */
+ }
+
+ init_iov();
+ push_iov(vt, vt_size);
+ if (is_top_buffer(B) && !B->disable_vt_clustering) {
+ /* Note that `emit_back` already returns ref + 1 as we require for vtables. */
+ if (0 == (vt_ref = emit_back(B, &iov))) {
+ return 0;
+ }
+ } else {
+ if (0 == (vt_ref = emit_front(B, &iov))) {
+ return 0;
+ }
+ /*
+ * We don't have a valid 0 ref here, but to be consistent with
+ * clustered vtables we offset by one. This cannot be zero
+ * either.
+ */
+ vt_ref += 1;
+ }
+ return vt_ref;
+}
+
+flatcc_builder_vt_ref_t flatcc_builder_create_cached_vtable(flatcc_builder_t *B,
+ const voffset_t *vt, voffset_t vt_size, uint32_t vt_hash)
+{
+ vtable_descriptor_t *vd, *vd2;
+ uoffset_t *pvd, *pvd_head;
+ uoffset_t next;
+ voffset_t *vt_;
+
+ /* This just gets the hash table slot, we still have to inspect it. */
+ if (!(pvd_head = lookup_ht(B, vt_hash))) {
+ return 0;
+ }
+ pvd = pvd_head;
+ next = *pvd;
+ /* Tracks if there already is a cached copy. */
+ vd2 = 0;
+ while (next) {
+ vd = vd_ptr(next);
+ vt_ = vb_ptr(vd->vb_start);
+ if (vt_[0] != vt_size || 0 != memcmp(vt, vt_, vt_size)) {
+ pvd = &vd->next;
+ next = vd->next;
+ continue;
+ }
+ /* Can't share emitted vtables between buffers, */
+ if (vd->nest_id != B->nest_id) {
+ /* but we don't have to resubmit to cache. */
+ vd2 = vd;
+ /* See if there is a better match. */
+ pvd = &vd->next;
+ next = vd->next;
+ continue;
+ }
+ /* Move to front hash strategy. */
+ if (pvd != pvd_head) {
+ *pvd = vd->next;
+ vd->next = *pvd_head;
+ *pvd_head = next;
+ }
+ /* vtable exists and has been emitted within current buffer. */
+ return vd->vt_ref;
+ }
+ /* Allocate new descriptor. */
+ if (!(vd = reserve_buffer(B, flatcc_builder_alloc_vd, B->vd_end, sizeof(vtable_descriptor_t), 0))) {
+ return 0;
+ }
+ next = B->vd_end;
+ B->vd_end += (uoffset_t)sizeof(vtable_descriptor_t);
+
+ /* Identify the buffer this vtable descriptor belongs to. */
+ vd->nest_id = B->nest_id;
+
+ /* Move to front hash strategy. */
+ vd->next = *pvd_head;
+ *pvd_head = next;
+ if (0 == (vd->vt_ref = flatcc_builder_create_vtable(B, vt, vt_size))) {
+ return 0;
+ }
+ if (vd2) {
+ /* Reuse cached copy. */
+ vd->vb_start = vd2->vb_start;
+ } else {
+ if (B->vb_flush_limit && B->vb_flush_limit < B->vb_end + vt_size) {
+ flatcc_builder_flush_vtable_cache(B);
+ } else {
+ /* Make space in vtable cache. */
+ if (!(vt_ = reserve_buffer(B, flatcc_builder_alloc_vb, B->vb_end, vt_size, 0))) {
+ return -1;
+ }
+ vd->vb_start = B->vb_end;
+ B->vb_end += vt_size;
+ memcpy(vt_, vt, vt_size);
+ }
+ }
+ return vd->vt_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_table(flatcc_builder_t *B, const void *data, size_t size, uint16_t align,
+ flatbuffers_voffset_t *offsets, int offset_count, flatcc_builder_vt_ref_t vt_ref)
+{
+ int i;
+ uoffset_t pad, vt_offset, vt_offset_field, vt_base, base, offset, *offset_field;
+ iov_state_t iov;
+
+ check(offset_count >= 0, "expected non-negative offset_count");
+ /*
+ * vtable references are offset by 1 to avoid confusion with
+ * 0 as an error reference. It also uniquely identifies them
+ * as vtables being the only uneven reference type.
+ */
+ check(vt_ref & 1, "invalid vtable referenc");
+ get_min_align(&align, field_size);
+ set_min_align(B, align);
+ /* Alignment is calculated for the first element, not the header. */
+ pad = front_pad(B, (uoffset_t)size, align);
+ base = (uoffset_t)B->emit_start - (uoffset_t)(pad + size + field_size);
+ /* Adjust by 1 to get unencoded vtable reference. */
+ vt_base = (uoffset_t)(vt_ref - 1);
+ vt_offset = base - vt_base;
+ /* Avoid overflow. */
+ if (base - vt_offset != vt_base) {
+ return -1;
+ }
+ /* Protocol endian encoding. */
+ write_uoffset(&vt_offset_field, vt_offset);
+ for (i = 0; i < offset_count; ++i) {
+ offset_field = (uoffset_t *)((size_t)data + offsets[i]);
+ offset = *offset_field - base - offsets[i] - (uoffset_t)field_size;
+ write_uoffset(offset_field, offset);
+ }
+ init_iov();
+ push_iov(&vt_offset_field, field_size);
+ push_iov(data, size);
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+int flatcc_builder_check_required_field(flatcc_builder_t *B, flatbuffers_voffset_t id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ return id < B->id_end && B->vs[id] != 0;
+}
+
+int flatcc_builder_check_union_field(flatcc_builder_t *B, flatbuffers_voffset_t id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ if (id == 0 || id >= B->id_end) {
+ return 0;
+ }
+ if (B->vs[id - 1] == 0) {
+ return B->vs[id] == 0;
+ }
+ if (*(uint8_t *)(B->ds + B->vs[id - 1])) {
+ return B->vs[id] != 0;
+ }
+ return B->vs[id] == 0;
+}
+
+int flatcc_builder_check_required(flatcc_builder_t *B, const flatbuffers_voffset_t *required, int count)
+{
+ int i;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ if (B->id_end < count) {
+ return 0;
+ }
+ for (i = 0; i < count; ++i) {
+ if (B->vs[required[i]] == 0) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_table(flatcc_builder_t *B)
+{
+ voffset_t *vt, vt_size;
+ flatcc_builder_ref_t table_ref, vt_ref;
+ int pl_count;
+ voffset_t *pl;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ /* We have `ds_limit`, so we should not have to check for overflow here. */
+
+ vt = B->vs - 2;
+ vt_size = (voffset_t)(sizeof(voffset_t) * (B->id_end + 2u));
+ /* Update vtable header fields, first vtable size, then object table size. */
+ vt[0] = vt_size;
+ /*
+ * The `ds` buffer is always at least `field_size` aligned but excludes the
+ * initial vtable offset field. Therefore `field_size` is added here
+ * to the total table size in the vtable.
+ */
+ vt[1] = (voffset_t)(B->ds_offset + field_size);
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)vt[0], (uint32_t)vt[1]);
+ /* Find already emitted vtable, or emit a new one. */
+ if (!(vt_ref = flatcc_builder_create_cached_vtable(B, vt, vt_size, B->vt_hash))) {
+ return 0;
+ }
+ /* Clear vs stack so it is ready for the next vtable (ds stack is cleared by exit frame). */
+ memset(vt, 0, vt_size);
+
+ pl = pl_ptr(frame(container.table.pl_end));
+ pl_count = (int)(B->pl - pl);
+ if (0 == (table_ref = flatcc_builder_create_table(B, B->ds, B->ds_offset, B->align, pl, pl_count, vt_ref))) {
+ return 0;
+ }
+ B->vt_hash = frame(container.table.vt_hash);
+ B->id_end = frame(container.table.id_end);
+ B->vs = vs_ptr(frame(container.table.vs_end));
+ B->pl = pl_ptr(frame(container.table.pl_end));
+ exit_frame(B);
+ return table_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_vector(flatcc_builder_t *B,
+ const void *data, size_t count, size_t elem_size, uint16_t align, size_t max_count)
+{
+ /*
+ * Note: it is important that vec_size is uoffset not size_t
+ * in case sizeof(uoffset_t) > sizeof(size_t) because max_count is
+ * defined in terms of uoffset_t representation size, and also
+ * because we risk accepting too large a vector even if max_count is
+ * not violated.
+ */
+ uoffset_t vec_size, vec_pad, length_prefix;
+ iov_state_t iov;
+
+ check_error(count <= max_count, 0, "vector max_count violated");
+ get_min_align(&align, field_size);
+ set_min_align(B, align);
+ vec_size = (uoffset_t)count * (uoffset_t)elem_size;
+ /*
+ * That can happen on 32 bit systems when uoffset_t is defined as 64-bit.
+ * `emit_front/back` captures overflow, but not if our size type wraps first.
+ */
+#if FLATBUFFERS_UOFFSET_MAX > SIZE_MAX
+ check_error(vec_size < SIZE_MAX, 0, "vector larger than address space");
+#endif
+ write_uoffset(&length_prefix, (uoffset_t)count);
+ /* Alignment is calculated for the first element, not the header. */
+ vec_pad = front_pad(B, vec_size, align);
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(data, vec_size);
+ push_iov(_pad, vec_pad);
+ return emit_front(B, &iov);
+}
+
+/*
+ * Note: FlatBuffers official documentation states that the size field of a
+ * vector is a 32-bit element count. It is not quite clear if the
+ * intention is to have the size field be of type uoffset_t since tables
+ * also have a uoffset_t sized header, or if the vector size should
+ * remain unchanged if uoffset is changed to 16- or 64-bits
+ * respectively. Since it makes most sense to have a vector compatible
+ * with the addressable space, we choose to use uoffset_t as size field,
+ * which remains compatible with the default 32-bit version of uoffset_t.
+ */
+flatcc_builder_ref_t flatcc_builder_end_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+
+ if (0 == (vector_ref = flatcc_builder_create_vector(B, B->ds,
+ frame(container.vector.count), frame(container.vector.elem_size),
+ B->align, frame(container.vector.max_count)))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+size_t flatcc_builder_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+void *flatcc_builder_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+/* This function destroys the source content but avoids stack allocation. */
+static flatcc_builder_ref_t _create_offset_vector_direct(flatcc_builder_t *B,
+ flatcc_builder_ref_t *vec, size_t count, const utype_t *types)
+{
+ uoffset_t vec_size, vec_pad;
+ uoffset_t length_prefix, offset;
+ uoffset_t i;
+ soffset_t base;
+ iov_state_t iov;
+
+ if ((uoffset_t)count > max_offset_count) {
+ return 0;
+ }
+ set_min_align(B, field_size);
+ vec_size = (uoffset_t)(count * field_size);
+ write_uoffset(&length_prefix, (uoffset_t)count);
+ /* Alignment is calculated for the first element, not the header. */
+ vec_pad = front_pad(B, vec_size, field_size);
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(vec, vec_size);
+ push_iov(_pad, vec_pad);
+ base = B->emit_start - (soffset_t)iov.len;
+ for (i = 0; i < (uoffset_t)count; ++i) {
+ /*
+ * 0 is either end of buffer, start of vtables, or start of
+ * buffer depending on the direction in which the buffer is
+ * built. None of these can create a valid 0 reference but it
+ * is easy to create by mistake when manually building offset
+ * vectors.
+ *
+ * Unions do permit nulls, but only when the type is NONE.
+ */
+ if (vec[i] != 0) {
+ offset = (uoffset_t)
+ (vec[i] - base - (soffset_t)(i * field_size) - (soffset_t)field_size);
+ write_uoffset(&vec[i], offset);
+ if (types) {
+ check(types[i] != 0, "union vector cannot have non-null element with type NONE");
+ }
+ } else {
+ if (types) {
+ check(types[i] == 0, "union vector cannot have null element without type NONE");
+ } else {
+ check(0, "offset vector cannot have null element");
+ }
+ }
+ }
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_offset_vector_direct(flatcc_builder_t *B,
+ flatcc_builder_ref_t *vec, size_t count)
+{
+ return _create_offset_vector_direct(B, vec, count, 0);
+}
+
+flatcc_builder_ref_t flatcc_builder_end_offset_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (0 == (vector_ref = flatcc_builder_create_offset_vector_direct(B,
+ (flatcc_builder_ref_t *)B->ds, frame(container.vector.count)))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_offset_vector_for_unions(flatcc_builder_t *B, const utype_t *types)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (0 == (vector_ref = _create_offset_vector_direct(B,
+ (flatcc_builder_ref_t *)B->ds, frame(container.vector.count), types))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+void *flatcc_builder_offset_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+size_t flatcc_builder_offset_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+int flatcc_builder_table_add_union(flatcc_builder_t *B, int id,
+ flatcc_builder_union_ref_t uref)
+{
+ flatcc_builder_ref_t *pref;
+ flatcc_builder_utype_t *putype;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check_error(uref.type != 0 || uref.value == 0, -1, "expected null value for type NONE");
+ if (uref.value != 0) {
+ pref = flatcc_builder_table_add_offset(B, id);
+ check_error(pref != 0, -1, "unable to add union value");
+ *pref = uref.value;
+ }
+ putype = flatcc_builder_table_add(B, id - 1, utype_size, utype_size);
+ check_error(putype != 0, -1, "unable to add union type");
+ write_utype(putype, uref.type);
+ return 0;
+}
+
+int flatcc_builder_table_add_union_vector(flatcc_builder_t *B, int id,
+ flatcc_builder_union_vec_ref_t uvref)
+{
+ flatcc_builder_ref_t *pref;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check_error((uvref.type == 0) == (uvref.value == 0), -1, "expected both type and value vector, or neither");
+ if (uvref.type != 0) {
+ pref = flatcc_builder_table_add_offset(B, id - 1);
+ check_error(pref != 0, -1, "unable to add union member");
+ *pref = uvref.type;
+
+ pref = flatcc_builder_table_add_offset(B, id);
+ check_error(pref != 0, -1, "unable to add union member");
+ *pref = uvref.value;
+ }
+ return 0;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+ flatcc_builder_utype_t *types;
+ flatcc_builder_ref_t *refs;
+ size_t i;
+
+ if (flatcc_builder_start_offset_vector(B)) {
+ return uvref;
+ }
+ if (0 == flatcc_builder_extend_offset_vector(B, count)) {
+ return uvref;
+ }
+ if (0 == (types = push_ds(B, (uoffset_t)(utype_size * count)))) {
+ return uvref;
+ }
+
+ /* Safe even if push_ds caused stack reallocation. */
+ refs = flatcc_builder_offset_vector_edit(B);
+
+ for (i = 0; i < count; ++i) {
+ types[i] = urefs[i].type;
+ refs[i] = urefs[i].value;
+ }
+ uvref = flatcc_builder_create_union_vector_direct(B,
+ types, refs, count);
+ /* No need to clean up after out temporary types vector. */
+ exit_frame(B);
+ return uvref;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector_direct(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, flatcc_builder_ref_t *data, size_t count)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+
+ if (0 == (uvref.value = _create_offset_vector_direct(B, data, count, types))) {
+ return uvref;
+ }
+ if (0 == (uvref.type = flatcc_builder_create_type_vector(B, types, count))) {
+ return uvref;
+ }
+ return uvref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_type_vector(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, size_t count)
+{
+ return flatcc_builder_create_vector(B, types, count,
+ utype_size, utype_size, max_utype_count);
+}
+
+int flatcc_builder_start_union_vector(flatcc_builder_t *B)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = union_size;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_union_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_end_union_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+ flatcc_builder_utype_t *types;
+ flatcc_builder_union_ref_t *urefs;
+ flatcc_builder_ref_t *refs;
+ size_t i, count;
+
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+
+ /*
+ * We could split the union vector in-place, but then we would have
+ * to deal with strict pointer aliasing rules which is not worthwhile
+ * so we create a new offset and type vector on the stack.
+ *
+ * We assume the stack is sufficiently aligned as is.
+ */
+ count = flatcc_builder_union_vector_count(B);
+ if (0 == (refs = push_ds(B, (uoffset_t)(count * (utype_size + field_size))))) {
+ return uvref;
+ }
+ types = (flatcc_builder_utype_t *)(refs + count);
+
+ /* Safe even if push_ds caused stack reallocation. */
+ urefs = flatcc_builder_union_vector_edit(B);
+
+ for (i = 0; i < count; ++i) {
+ types[i] = urefs[i].type;
+ refs[i] = urefs[i].value;
+ }
+ uvref = flatcc_builder_create_union_vector_direct(B, types, refs, count);
+ /* No need to clean up after out temporary types vector. */
+ exit_frame(B);
+ return uvref;
+}
+
+void *flatcc_builder_union_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+size_t flatcc_builder_union_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_extend_union_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, max_union_count)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)(union_size * count));
+}
+
+int flatcc_builder_truncate_union_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ check_error(frame(container.vector.count) >= (uoffset_t)count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_union_vector_push(flatcc_builder_t *B,
+ flatcc_builder_union_ref_t uref)
+{
+ flatcc_builder_union_ref_t *p;
+
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ if (frame(container.vector.count) == max_union_count) {
+ return 0;
+ }
+ frame(container.vector.count) += 1;
+ if (0 == (p = push_ds(B, union_size))) {
+ return 0;
+ }
+ *p = uref;
+ return p;
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_append_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count)
+{
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ if (vector_count_add(B, (uoffset_t)count, max_union_count)) {
+ return 0;
+ }
+ return push_ds_copy(B, urefs, (uoffset_t)(union_size * count));
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string(flatcc_builder_t *B, const char *s, size_t len)
+{
+ uoffset_t s_pad;
+ uoffset_t length_prefix;
+ iov_state_t iov;
+
+ if (len > max_string_len) {
+ return 0;
+ }
+ write_uoffset(&length_prefix, (uoffset_t)len);
+ /* Add 1 for zero termination. */
+ s_pad = front_pad(B, (uoffset_t)len + 1, field_size) + 1;
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(s, len);
+ push_iov(_pad, s_pad);
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string_str(flatcc_builder_t *B, const char *s)
+{
+ return flatcc_builder_create_string(B, s, strlen(s));
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string_strn(flatcc_builder_t *B, const char *s, size_t max_len)
+{
+ return flatcc_builder_create_string(B, s, strnlen(s, max_len));
+}
+
+flatcc_builder_ref_t flatcc_builder_end_string(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t string_ref;
+
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ FLATCC_ASSERT(frame(container.vector.count) == B->ds_offset);
+ if (0 == (string_ref = flatcc_builder_create_string(B,
+ (const char *)B->ds, B->ds_offset))) {
+ return 0;
+ }
+ exit_frame(B);
+ return string_ref;
+}
+
+char *flatcc_builder_string_edit(flatcc_builder_t *B)
+{
+ return (char *)B->ds;
+}
+
+size_t flatcc_builder_string_len(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+void *flatcc_builder_table_add(flatcc_builder_t *B, int id, size_t size, uint16_t align)
+{
+ /*
+ * We align the offset relative to the first table field, excluding
+ * the header holding the vtable reference. On the stack, `ds_first`
+ * is aligned to 8 bytes thanks to the `enter_frame` logic, and this
+ * provides a safe way to update the fields on the stack, but here
+ * we are concerned with the target buffer alignment.
+ *
+ * We could also have aligned relative to the end of the table which
+ * would allow us to emit each field immediately, but it would be a
+ * confusing user experience wrt. field ordering, and it would add
+ * more variability to vtable layouts, thus reducing reuse, and
+ * frequent emissions to external emitter interface would be
+ * sub-optimal. Also, with that appoach, the vtable offsets would
+ * have to be adjusted at table end.
+ *
+ * As we have it, each emit occur at table end, vector end, string
+ * end, or buffer end, which might be helpful to various backend
+ * processors.
+ */
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check(id >= 0 && id <= (int)FLATBUFFERS_ID_MAX, "table id out of range");
+ if (align > B->align) {
+ B->align = align;
+ }
+#if FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
+ if (B->vs[id] != 0) {
+ return B->ds + B->vs[id] - field_size;
+ }
+#else
+ if (B->vs[id] != 0) {
+ check(0, "table field already set");
+ return 0;
+ }
+#endif
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)id, (uint32_t)size);
+ return push_ds_field(B, (uoffset_t)size, align, (voffset_t)id);
+}
+
+void *flatcc_builder_table_edit(flatcc_builder_t *B, size_t size)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ return B->ds + B->ds_offset - size;
+}
+
+void *flatcc_builder_table_add_copy(flatcc_builder_t *B, int id, const void *data, size_t size, uint16_t align)
+{
+ void *p;
+
+ if ((p = flatcc_builder_table_add(B, id, size, align))) {
+ memcpy(p, data, size);
+ }
+ return p;
+}
+
+flatcc_builder_ref_t *flatcc_builder_table_add_offset(flatcc_builder_t *B, int id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check(id >= 0 && id <= (int)FLATBUFFERS_ID_MAX, "table id out of range");
+#if FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
+ if (B->vs[id] != 0) {
+ return B->ds + B->vs[id] - field_size;
+ }
+#else
+ if (B->vs[id] != 0) {
+ check(0, "table field already set");
+ return 0;
+ }
+#endif
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)id, (uint32_t)field_size);
+ return push_ds_offset_field(B, (voffset_t)id);
+}
+
+uint16_t flatcc_builder_push_buffer_alignment(flatcc_builder_t *B)
+{
+ uint16_t old_min_align = B->min_align;
+
+ B->min_align = field_size;
+ return old_min_align;
+}
+
+void flatcc_builder_pop_buffer_alignment(flatcc_builder_t *B, uint16_t pushed_align)
+{
+ set_min_align(B, pushed_align);
+}
+
+uint16_t flatcc_builder_get_buffer_alignment(flatcc_builder_t *B)
+{
+ return B->min_align;
+}
+
+void flatcc_builder_set_vtable_clustering(flatcc_builder_t *B, int enable)
+{
+ /* Inverted because we zero all memory in B on init. */
+ B->disable_vt_clustering = !enable;
+}
+
+void flatcc_builder_set_block_align(flatcc_builder_t *B, uint16_t align)
+{
+ B->block_align = align;
+}
+
+int flatcc_builder_get_level(flatcc_builder_t *B)
+{
+ return B->level;
+}
+
+void flatcc_builder_set_max_level(flatcc_builder_t *B, int max_level)
+{
+ B->max_level = max_level;
+ if (B->limit_level < B->max_level) {
+ B->limit_level = B->max_level;
+ }
+}
+
+size_t flatcc_builder_get_buffer_size(flatcc_builder_t *B)
+{
+ return (size_t)(B->emit_end - B->emit_start);
+}
+
+flatcc_builder_ref_t flatcc_builder_get_buffer_start(flatcc_builder_t *B)
+{
+ return B->emit_start;
+}
+
+flatcc_builder_ref_t flatcc_builder_get_buffer_end(flatcc_builder_t *B)
+{
+ return B->emit_end;
+}
+
+void flatcc_builder_set_vtable_cache_limit(flatcc_builder_t *B, size_t size)
+{
+ B->vb_flush_limit = size;
+}
+
+void flatcc_builder_set_identifier(flatcc_builder_t *B, const char identifier[identifier_size])
+{
+ set_identifier(identifier);
+}
+
+enum flatcc_builder_type flatcc_builder_get_type(flatcc_builder_t *B)
+{
+ return B->frame ? frame(type) : flatcc_builder_empty;
+}
+
+enum flatcc_builder_type flatcc_builder_get_type_at(flatcc_builder_t *B, int level)
+{
+ if (level < 1 || level > B->level) {
+ return flatcc_builder_empty;
+ }
+ return B->frame[level - B->level].type;
+}
+
+void *flatcc_builder_get_direct_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ if (B->is_default_emitter) {
+ return flatcc_emitter_get_direct_buffer(&B->default_emit_context, size_out);
+ } else {
+ if (size_out) {
+ *size_out = 0;
+ }
+ }
+ return 0;
+}
+
+void *flatcc_builder_copy_buffer(flatcc_builder_t *B, void *buffer, size_t size)
+{
+ /* User is allowed to call tentatively to see if there is support. */
+ if (!B->is_default_emitter) {
+ return 0;
+ }
+ buffer = flatcc_emitter_copy_buffer(&B->default_emit_context, buffer, size);
+ check(buffer, "default emitter declined to copy buffer");
+ return buffer;
+}
+
+void *flatcc_builder_finalize_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ void * buffer;
+ size_t size;
+
+ size = flatcc_builder_get_buffer_size(B);
+
+ if (size_out) {
+ *size_out = size;
+ }
+
+ buffer = FLATCC_BUILDER_ALLOC(size);
+
+ if (!buffer) {
+ check(0, "failed to allocated memory for finalized buffer");
+ goto done;
+ }
+ if (!flatcc_builder_copy_buffer(B, buffer, size)) {
+ check(0, "default emitter declined to copy buffer");
+ FLATCC_BUILDER_FREE(buffer);
+ buffer = 0;
+ }
+done:
+ if (!buffer && size_out) {
+ *size_out = 0;
+ }
+ return buffer;
+}
+
+void *flatcc_builder_finalize_aligned_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ void * buffer;
+ size_t align;
+ size_t size;
+
+ size = flatcc_builder_get_buffer_size(B);
+
+ if (size_out) {
+ *size_out = size;
+ }
+ align = flatcc_builder_get_buffer_alignment(B);
+
+ size = (size + align - 1) & ~(align - 1);
+ buffer = FLATCC_BUILDER_ALIGNED_ALLOC(align, size);
+
+ if (!buffer) {
+ goto done;
+ }
+ if (!flatcc_builder_copy_buffer(B, buffer, size)) {
+ FLATCC_BUILDER_ALIGNED_FREE(buffer);
+ buffer = 0;
+ goto done;
+ }
+done:
+ if (!buffer && size_out) {
+ *size_out = 0;
+ }
+ return buffer;
+}
+
+void *flatcc_builder_aligned_alloc(size_t alignment, size_t size)
+{
+ return FLATCC_BUILDER_ALIGNED_ALLOC(alignment, size);
+}
+
+void flatcc_builder_aligned_free(void *p)
+{
+ FLATCC_BUILDER_ALIGNED_FREE(p);
+}
+
+void *flatcc_builder_alloc(size_t size)
+{
+ return FLATCC_BUILDER_ALLOC(size);
+}
+
+void flatcc_builder_free(void *p)
+{
+ FLATCC_BUILDER_FREE(p);
+}
+
+void *flatcc_builder_get_emit_context(flatcc_builder_t *B)
+{
+ return B->emit_context;
+}
diff --git a/flatcc/src/runtime/emitter.c b/flatcc/src/runtime/emitter.c
new file mode 100644
index 0000000..089ea00
--- /dev/null
+++ b/flatcc/src/runtime/emitter.c
@@ -0,0 +1,269 @@
+#include <stdlib.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_emitter.h"
+
+static int advance_front(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = 0;
+
+ if (E->front && E->front->prev != E->back) {
+ E->front->prev->page_offset = E->front->page_offset - FLATCC_EMITTER_PAGE_SIZE;
+ E->front = E->front->prev;
+ goto done;
+ }
+ if (!(p = FLATCC_EMITTER_ALLOC(sizeof(flatcc_emitter_page_t)))) {
+ return -1;
+ }
+ E->capacity += FLATCC_EMITTER_PAGE_SIZE;
+ if (E->front) {
+ p->prev = E->back;
+ p->next = E->front;
+ E->front->prev = p;
+ E->back->next = p;
+ E->front = p;
+ goto done;
+ }
+ /*
+ * The first page is shared between front and back to avoid
+ * double unecessary extra allocation.
+ */
+ E->front = p;
+ E->back = p;
+ p->next = p;
+ p->prev = p;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ p->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ return 0;
+done:
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE;
+ E->front->page_offset = E->front->next->page_offset - FLATCC_EMITTER_PAGE_SIZE;
+ return 0;
+}
+
+static int advance_back(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = 0;
+
+ if (E->back && E->back->next != E->front) {
+ E->back = E->back->next;
+ goto done;
+ }
+ if (!(p = FLATCC_EMITTER_ALLOC(sizeof(flatcc_emitter_page_t)))) {
+ return -1;
+ }
+ E->capacity += FLATCC_EMITTER_PAGE_SIZE;
+ if (E->back) {
+ p->prev = E->back;
+ p->next = E->front;
+ E->front->prev = p;
+ E->back->next = p;
+ E->back = p;
+ goto done;
+ }
+ /*
+ * The first page is shared between front and back to avoid
+ * double unecessary extra allocation.
+ */
+ E->front = p;
+ E->back = p;
+ p->next = p;
+ p->prev = p;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ p->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ return 0;
+done:
+ E->back_cursor = E->back->page;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE;
+ E->back->page_offset = E->back->prev->page_offset + FLATCC_EMITTER_PAGE_SIZE;
+ return 0;
+}
+
+static int copy_front(flatcc_emitter_t *E, uint8_t *data, size_t size)
+{
+ size_t k;
+
+ data += size;
+ while (size) {
+ k = size;
+ if (k > E->front_left) {
+ k = E->front_left;
+ if (k == 0) {
+ if (advance_front(E)) {
+ return -1;
+ }
+ continue;
+ }
+ }
+ E->front_cursor -= k;
+ E->front_left -= k;
+ data -= k;
+ size -= k;
+ memcpy(E->front_cursor, data, k);
+ };
+ return 0;
+}
+
+static int copy_back(flatcc_emitter_t *E, uint8_t *data, size_t size)
+{
+ size_t k;
+
+ while (size) {
+ k = size;
+ if (k > E->back_left) {
+ k = E->back_left;
+ if (k == 0) {
+ if (advance_back(E)) {
+ return -1;
+ }
+ continue;
+ }
+ }
+ memcpy(E->back_cursor, data, k);
+ size -= k;
+ data += k;
+ E->back_cursor += k;
+ E->back_left -= k;
+ }
+ return 0;
+}
+
+int flatcc_emitter_recycle_page(flatcc_emitter_t *E, flatcc_emitter_page_t *p)
+{
+ if (p == E->front || p == E->back) {
+ return -1;
+ }
+ p->next->prev = p->prev;
+ p->prev->next = p->next;
+ p->prev = E->front->prev;
+ p->next = E->front;
+ p->prev->next = p;
+ p->next->prev = p;
+ return 0;
+}
+
+void flatcc_emitter_reset(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = E->front;
+
+ if (!E->front) {
+ return;
+ }
+ E->back = E->front;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->front->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ /* Heuristic to reduce peak allocation over time. */
+ if (E->used_average == 0) {
+ E->used_average = E->used;
+ }
+ E->used_average = E->used_average * 3 / 4 + E->used / 4;
+ E->used = 0;
+ while (E->used_average * 2 < E->capacity && E->back->next != E->front) {
+ /* We deallocate the page after back since it is less likely to be hot in cache. */
+ p = E->back->next;
+ E->back->next = p->next;
+ p->next->prev = E->back;
+ FLATCC_EMITTER_FREE(p);
+ E->capacity -= FLATCC_EMITTER_PAGE_SIZE;
+ }
+}
+
+void flatcc_emitter_clear(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = E->front;
+
+ if (!p) {
+ return;
+ }
+ p->prev->next = 0;
+ while (p->next) {
+ p = p->next;
+ FLATCC_EMITTER_FREE(p->prev);
+ }
+ FLATCC_EMITTER_FREE(p);
+ memset(E, 0, sizeof(*E));
+}
+
+int flatcc_emitter(void *emit_context,
+ const flatcc_iovec_t *iov, int iov_count,
+ flatbuffers_soffset_t offset, size_t len)
+{
+ flatcc_emitter_t *E = emit_context;
+ uint8_t *p;
+
+ E->used += len;
+ if (offset < 0) {
+ if (len <= E->front_left) {
+ E->front_cursor -= len;
+ E->front_left -= len;
+ p = E->front_cursor;
+ goto copy;
+ }
+ iov += iov_count;
+ while (iov_count--) {
+ --iov;
+ if (copy_front(E, iov->iov_base, iov->iov_len)) {
+ return -1;
+ }
+ }
+ } else {
+ if (len <= E->back_left) {
+ p = E->back_cursor;
+ E->back_cursor += len;
+ E->back_left -= len;
+ goto copy;
+ }
+ while (iov_count--) {
+ if (copy_back(E, iov->iov_base, iov->iov_len)) {
+ return -1;
+ }
+ ++iov;
+ }
+ }
+ return 0;
+copy:
+ while (iov_count--) {
+ memcpy(p, iov->iov_base, iov->iov_len);
+ p += iov->iov_len;
+ ++iov;
+ }
+ return 0;
+}
+
+void *flatcc_emitter_copy_buffer(flatcc_emitter_t *E, void *buf, size_t size)
+{
+ flatcc_emitter_page_t *p;
+ size_t len;
+
+ if (size < E->used) {
+ return 0;
+ }
+ if (!E->front) {
+ return 0;
+ }
+ if (E->front == E->back) {
+ memcpy(buf, E->front_cursor, E->used);
+ return buf;
+ }
+ len = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ memcpy(buf, E->front_cursor, len);
+ buf = (uint8_t *)buf + len;
+ p = E->front->next;
+ while (p != E->back) {
+ memcpy(buf, p->page, FLATCC_EMITTER_PAGE_SIZE);
+ buf = (uint8_t *)buf + FLATCC_EMITTER_PAGE_SIZE;
+ p = p->next;
+ }
+ memcpy(buf, p->page, FLATCC_EMITTER_PAGE_SIZE - E->back_left);
+ return buf;
+}
diff --git a/flatcc/src/runtime/json_parser.c b/flatcc/src/runtime/json_parser.c
new file mode 100644
index 0000000..4472af2
--- /dev/null
+++ b/flatcc/src/runtime/json_parser.c
@@ -0,0 +1,1297 @@
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_json_parser.h"
+#include "flatcc/flatcc_assert.h"
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+
+#define offset_size uoffset_size
+#if FLATCC_USE_GRISU3 && !defined(PORTABLE_USE_GRISU3)
+#define PORTABLE_USE_GRISU3 1
+#endif
+#include "flatcc/portable/pparsefp.h"
+#include "flatcc/portable/pbase64.h"
+
+#if FLATCC_USE_SSE4_2
+#ifdef __SSE4_2__
+#define USE_SSE4_2
+#endif
+#endif
+
+#ifdef USE_SSE4_2
+#include <nmmintrin.h>
+#define cmpistri(end, haystack, needle, flags) \
+ if (end - haystack >= 16) do { \
+ int i; \
+ __m128i a = _mm_loadu_si128((const __m128i *)(needle)); \
+ do { \
+ __m128i b = _mm_loadu_si128((const __m128i *)(haystack)); \
+ i = _mm_cmpistri(a, b, flags); \
+ haystack += i; \
+ } while (i == 16 && end - haystack >= 16); \
+ } while(0)
+#endif
+
+const char *flatcc_json_parser_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_json_parser_error_##no: \
+ return str;
+ FLATCC_JSON_PARSE_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+const char *flatcc_json_parser_set_error(flatcc_json_parser_t *ctx, const char *loc, const char *end, int err)
+{
+ if (!ctx->error) {
+ ctx->error = err;
+ ctx->pos = (int)(loc - ctx->line_start + 1);
+ ctx->error_loc = loc;
+ }
+ return end;
+}
+
+const char *flatcc_json_parser_string_part(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+/*
+ * Disabled because it doesn't catch all control characters, but is
+ * useful for performance testing.
+ */
+#if 0
+//#ifdef USE_SSE4_2
+ cmpistri(end, buf, "\"\\\0\r\n\t\v\f", _SIDD_POSITIVE_POLARITY);
+#else
+ /*
+ * Testing for signed char >= 0x20 would also capture UTF-8
+ * encodings that we could verify, and also invalid encodings like
+ * 0xff, but we do not wan't to enforce strict UTF-8.
+ */
+ while (buf != end && *buf != '\"' && ((unsigned char)*buf) >= 0x20 && *buf != '\\') {
+ ++buf;
+ }
+#endif
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unterminated_string);
+ }
+ if (*buf == '"') {
+ return buf;
+ }
+ if (*buf < 0x20) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_character);
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_space_ext(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+again:
+#ifdef USE_SSE4_2
+ /*
+ * We can include line break, but then error reporting suffers and
+ * it really makes no big difference.
+ */
+ //cmpistri(end, buf, "\x20\t\v\f\r\n", _SIDD_NEGATIVE_POLARITY);
+ cmpistri(end, buf, "\x20\t\v\f", _SIDD_NEGATIVE_POLARITY);
+#else
+#if FLATCC_ALLOW_UNALIGNED_ACCESS
+ while (end - buf >= 16) {
+ if (*buf > 0x20) {
+ return buf;
+ }
+#if FLATCC_JSON_PARSE_WIDE_SPACE
+ if (((uint64_t *)buf)[0] != 0x2020202020202020) {
+descend:
+ if (((uint32_t *)buf)[0] == 0x20202020) {
+ buf += 4;
+ }
+#endif
+ if (((uint16_t *)buf)[0] == 0x2020) {
+ buf += 2;
+ }
+ if (*buf == 0x20) {
+ ++buf;
+ }
+ if (*buf > 0x20) {
+ return buf;
+ }
+ break;
+#if FLATCC_JSON_PARSE_WIDE_SPACE
+ }
+ if (((uint64_t *)buf)[1] != 0x2020202020202020) {
+ buf += 8;
+ goto descend;
+ }
+ buf += 16;
+#endif
+ }
+#endif
+#endif
+ while (buf != end && *buf == 0x20) {
+ ++buf;
+ }
+ while (buf != end && *buf <= 0x20) {
+ switch (*buf) {
+ case 0x0d: buf += (end - buf > 1 && buf[1] == 0x0a);
+ /* Consume following LF or treating CR as LF. */
+ ++ctx->line; ctx->line_start = ++buf; continue;
+ case 0x0a: ++ctx->line; ctx->line_start = ++buf; continue;
+ case 0x09: ++buf; continue;
+ case 0x20: goto again; /* Don't consume here, sync with power of 2 spaces. */
+ default: return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ }
+ return buf;
+}
+
+static int decode_hex4(const char *buf, uint32_t *result)
+{
+ uint32_t u, x;
+ char c;
+
+ u = 0;
+ c = buf[0];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u = x << 12;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 12;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[1];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x << 8;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 8;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[2];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x << 4;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 4;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[3];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x;
+ } else {
+ return -1;
+ }
+ }
+ *result = u;
+ return 0;
+}
+
+static int decode_unicode_char(uint32_t u, char *code)
+{
+ if (u <= 0x7f) {
+ code[0] = 1;
+ code[1] = (char)u;
+ } else if (u <= 0x7ff) {
+ code[0] = 2;
+ code[1] = (char)(0xc0 | (u >> 6));
+ code[2] = (char)(0x80 | (u & 0x3f));
+ } else if (u <= 0xffff) {
+ code[0] = 3;
+ code[1] = (char)(0xe0 | (u >> 12));
+ code[2] = (char)(0x80 | ((u >> 6) & 0x3f));
+ code[3] = (char)(0x80 | (u & 0x3f));
+ } else if (u <= 0x10ffff) {
+ code[0] = 4;
+ code[1] = (char)(0xf0 | (u >> 18));
+ code[2] = (char)(0x80 | ((u >> 12) & 0x3f));
+ code[3] = (char)(0x80 | ((u >> 6) & 0x3f));
+ code[4] = (char)(0x80 | (u & 0x3f));
+ } else {
+ code[0] = 0;
+ return -1;
+ }
+ return 0;
+}
+
+static inline uint32_t combine_utf16_surrogate_pair(uint32_t high, uint32_t low)
+{
+ return (high - 0xd800) * 0x400 + (low - 0xdc00) + 0x10000;
+}
+
+static inline int decode_utf16_surrogate_pair(uint32_t high, uint32_t low, char *code)
+{
+ return decode_unicode_char(combine_utf16_surrogate_pair(high, low), code);
+}
+
+
+/*
+ * UTF-8 code points can have up to 4 bytes but JSON can only
+ * encode up to 3 bytes via the \uXXXX syntax.
+ * To handle the range U+10000..U+10FFFF two UTF-16 surrogate
+ * pairs must be used. If this is not detected, the pairs
+ * survive in the output which is not valid but often tolerated.
+ * Emojis generally require such a pair, unless encoded
+ * unescaped in UTF-8.
+ *
+ * If a high surrogate pair is detected and a low surrogate pair
+ * follows, the combined sequence is decoded as a 4 byte
+ * UTF-8 sequence. Unpaired surrogate halves are decoded as is
+ * despite being an invalid UTF-8 value.
+ */
+
+const char *flatcc_json_parser_string_escape(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_json_parser_escape_buffer_t code)
+{
+ char c, v;
+ uint32_t u, u2;
+
+ if (end - buf < 2 || buf[0] != '\\') {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ switch (buf[1]) {
+ case 'x':
+ v = 0;
+ code[0] = 1;
+ if (end - buf < 4) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ c = buf[2];
+ if (c >= '0' && c <= '9') {
+ v |= (c - '0') << 4;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ v |= (c - 'a' + 10) << 4;
+ } else {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ }
+ c = buf[3];
+ if (c >= '0' && c <= '9') {
+ v |= c - '0';
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ v |= c - 'a' + 10;
+ } else {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ }
+ code[1] = v;
+ return buf + 4;
+ case 'u':
+ if (end - buf < 6) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ if (decode_hex4(buf + 2, &u)) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ };
+ /* If a high UTF-16 surrogate half pair was detected */
+ if (u >= 0xd800 && u <= 0xdbff &&
+ /* and there is space for a matching low half pair */
+ end - buf >= 12 &&
+ /* and there is a second escape following immediately */
+ buf[6] == '\\' && buf[7] == 'u' &&
+ /* and it is valid hex */
+ decode_hex4(buf + 8, &u2) == 0 &&
+ /* and it is a low UTF-16 surrogate pair */
+ u2 >= 0xdc00 && u2 <= 0xdfff) {
+ /* then decode the pair into a single 4 byte utf-8 sequence. */
+ if (decode_utf16_surrogate_pair(u, u2, code)) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ return buf + 12;
+ /*
+ * Otherwise decode unmatched surrogate pairs as is any
+ * other UTF-8. Some systems might depend on these surviving.
+ * Leave ignored errors for the next parse step.
+ */
+ }
+ decode_unicode_char(u, code);
+ return buf + 6;
+ case 't':
+ code[0] = 1;
+ code[1] = '\t';
+ return buf + 2;
+ case 'n':
+ code[0] = 1;
+ code[1] = '\n';
+ return buf + 2;
+ case 'r':
+ code[0] = 1;
+ code[1] = '\r';
+ return buf + 2;
+ case 'b':
+ code[0] = 1;
+ code[1] = '\b';
+ return buf + 2;
+ case 'f':
+ code[0] = 1;
+ code[1] = '\f';
+ return buf + 2;
+ case '\"':
+ code[0] = 1;
+ code[1] = '\"';
+ return buf + 2;
+ case '\\':
+ code[0] = 1;
+ code[1] = '\\';
+ return buf + 2;
+ case '/':
+ code[0] = 1;
+ code[1] = '/';
+ return buf + 2;
+ default:
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+}
+
+/* Only applies to unquoted constants during generic parsring, otherwise it is skipped as a string. */
+const char *flatcc_json_parser_skip_constant(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ char c;
+ const char *k;
+
+ while (buf != end) {
+ c = *buf;
+ if ((c & 0x80) || (c == '_') || (c >= '0' && c <= '9') || c == '.') {
+ ++buf;
+ continue;
+ }
+ /* Upper case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'z') {
+ ++buf;
+ continue;
+ }
+ buf = flatcc_json_parser_space(ctx, (k = buf), end);
+ if (buf == k) {
+ return buf;
+ }
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_match_constant(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos, int *more)
+{
+ const char *mark = buf, *k = buf + pos;
+
+ if (end - buf <= pos) {
+ *more = 0;
+ return buf;
+ }
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (ctx->unquoted) {
+ buf = flatcc_json_parser_space(ctx, k, end);
+ if (buf == end) {
+ /*
+ * We cannot make a decision on more.
+ * Just return end and let parser handle sync point in
+ * case it is able to resume parse later on.
+ * For the same reason we do not lower ctx->unquoted.
+ */
+ *more = 0;
+ return buf;
+ }
+ if (buf != k) {
+ char c = *buf;
+ /*
+ * Space was seen - and thus we have a valid match.
+ * If the next char is an identifier start symbol
+ * we raise the more flag to support syntax like:
+ *
+ * `flags: Hungry Sleepy Awake, ...`
+ */
+ if (c == '_' || (c & 0x80)) {
+ *more = 1;
+ return buf;
+ }
+ c |= 0x20;
+ if (c >= 'a' && c <= 'z') {
+ *more = 1;
+ return buf;
+ }
+ }
+ /*
+ * Space was not seen, so the match is only valid if followed
+ * by a JSON separator symbol, and there cannot be more values
+ * following so `more` is lowered.
+ */
+ *more = 0;
+ if (*buf == ',' || *buf == '}' || *buf == ']') {
+ return buf;
+ }
+ return mark;
+ }
+#endif
+ buf = k;
+ if (*buf == 0x20) {
+ ++buf;
+ while (buf != end && *buf == 0x20) {
+ ++buf;
+ }
+ if (buf == end) {
+ *more = 0;
+ return buf;
+ }
+ /* We accept untrimmed space like " Green Blue ". */
+ if (*buf != '\"') {
+ *more = 1;
+ return buf;
+ }
+ }
+ switch (*buf) {
+ case '\\':
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ case '\"':
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ *more = 0;
+ return buf;
+ }
+ *more = 0;
+ return mark;
+}
+
+const char *flatcc_json_parser_unmatched_symbol(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (ctx->flags & flatcc_json_parser_f_skip_unknown) {
+ buf = flatcc_json_parser_symbol_end(ctx, buf, end);
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf != end && *buf == ':') {
+ ++buf;
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ } else {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_colon);
+ }
+ return flatcc_json_parser_generic_json(ctx, buf, end);
+ } else {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unknown_symbol);
+ }
+}
+
+static const char *__flatcc_json_parser_number(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end) {
+ return buf;
+ }
+ if (*buf == '-') {
+ ++buf;
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ }
+ if (*buf == '0') {
+ ++buf;
+ } else {
+ if (*buf < '1' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ if (buf != end) {
+ if (*buf == '.') {
+ ++buf;
+ if (*buf < '0' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ }
+ if (buf != end && (*buf == 'e' || *buf == 'E')) {
+ ++buf;
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ if (*buf == '+' || *buf == '-') {
+ ++buf;
+ }
+ if (buf == end || *buf < '0' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+
+ /*
+ * For strtod termination we must ensure the tail is not valid
+ * including non-json exponent types. The simplest approach is
+ * to accept anything that could be valid json successor
+ * characters and reject end of buffer since we expect a closing
+ * '}'.
+ *
+ * The ',' is actually not safe if strtod uses a non-POSIX locale.
+ */
+ if (buf != end) {
+ switch (*buf) {
+ case ',':
+ case ':':
+ case ']':
+ case '}':
+ case ' ':
+ case '\r':
+ case '\t':
+ case '\n':
+ case '\v':
+ return buf;
+ }
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+}
+
+const char *flatcc_json_parser_double(flatcc_json_parser_t *ctx, const char *buf, const char *end, double *v)
+{
+ const char *next, *k;
+
+ *v = 0.0;
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ if (*buf == '-') ++k;
+ if (end - k > 1 && (k[0] == '.' || (k[0] == '0' && k[1] == '0'))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ next = parse_double(buf, (size_t)(end - buf), v);
+ if (next == 0 || next == buf) {
+ if (parse_double_isinf(*v)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_overflow);
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ return next;
+}
+
+const char *flatcc_json_parser_float(flatcc_json_parser_t *ctx, const char *buf, const char *end, float *v)
+{
+ const char *next, *k;
+
+ *v = 0.0;
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ if (*buf == '-') ++k;
+ if (end - k > 1 && (k[0] == '.' || (k[0] == '0' && k[1] == '0'))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ next = parse_float(buf, (size_t)(end - buf), v);
+ if (next == 0 || next == buf) {
+ if (parse_float_isinf(*v)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_overflow);
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ return next;
+}
+
+const char *flatcc_json_parser_generic_json(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ char stack[FLATCC_JSON_PARSE_GENERIC_MAX_NEST];
+ char *sp, *spend;
+ const char *k;
+ flatcc_json_parser_escape_buffer_t code;
+ int more = 0;
+
+ sp = stack;
+ spend = sp + FLATCC_JSON_PARSE_GENERIC_MAX_NEST;
+
+again:
+ if (buf == end) {
+ return buf;
+ }
+ if (sp != stack && sp[-1] == '}') {
+ /* Inside an object, about to read field name. */
+ buf = flatcc_json_parser_symbol_start(ctx, buf, end);
+ buf = flatcc_json_parser_symbol_end(ctx, buf, end);
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_object);
+ }
+ if (*buf != ':') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_colon);
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ switch (*buf) {
+ case '\"':
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ while (buf != end && *buf != '\"') {
+ buf = flatcc_json_parser_string_part(ctx, buf, end);
+ if (buf != end && *buf == '\"') {
+ break;
+ }
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ }
+ buf = flatcc_json_parser_string_end(ctx, buf, end);
+ break;
+ case '-':
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ buf = __flatcc_json_parser_number(ctx, buf, end);
+ break;
+#if !FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ case 't': case 'f':
+ {
+ uint8_t v;
+ buf = flatcc_json_parser_bool(ctx, (k = buf), end, &v);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ }
+ break;
+ case 'n':
+ buf = flatcc_json_parser_null((k = buf), end);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ break;
+#endif
+ case '[':
+ if (sp == spend) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_deep_nesting);
+ }
+ *sp++ = ']';
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == ']') {
+ break;
+ }
+ goto again;
+ case '{':
+ if (sp == spend) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_deep_nesting);
+ }
+ *sp++ = '}';
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == '}') {
+ break;
+ }
+ goto again;
+
+ default:
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ buf = flatcc_json_parser_skip_constant(ctx, (k = buf), end);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ break;
+#else
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+#endif
+ }
+ while (buf != end && sp != stack) {
+ --sp;
+ if (*sp == ']') {
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ } else {
+ buf = flatcc_json_parser_object_end(ctx, buf, end, &more);
+ }
+ if (more) {
+ ++sp;
+ goto again;
+ }
+ }
+ if (buf == end && sp != stack) {
+ return flatcc_json_parser_set_error(ctx, buf, end, sp[-1] == ']' ?
+ flatcc_json_parser_error_unbalanced_array :
+ flatcc_json_parser_error_unbalanced_object);
+ }
+ /* Any ',', ']', or '}' belongs to parent context. */
+ return buf;
+}
+
+const char *flatcc_json_parser_integer(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_sign, uint64_t *value)
+{
+ uint64_t x0, x = 0;
+ const char *k;
+
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ *value_sign = *buf == '-';
+ buf += *value_sign;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ x0 = x;
+ x = x * 10 + (uint64_t)(*buf - '0');
+ if (x0 > x) {
+ return flatcc_json_parser_set_error(ctx, buf, end, value_sign ?
+ flatcc_json_parser_error_underflow : flatcc_json_parser_error_overflow);
+ }
+ ++buf;
+ }
+ if (buf == k) {
+ /* Give up, but don't fail the parse just yet, it might be a valid symbol. */
+ return buf;
+ }
+ if (buf != end && (*buf == 'e' || *buf == 'E' || *buf == '.')) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_float_unexpected);
+ }
+ *value = x;
+ return buf;
+}
+
+/* Array Creation - depends on flatcc builder. */
+
+const char *flatcc_json_parser_build_uint8_vector_base64(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref, int urlsafe)
+{
+ const char *mark;
+ uint8_t *pval;
+ size_t max_len;
+ size_t decoded_len, src_len;
+ int mode;
+ int ret;
+
+ mode = urlsafe ? base64_mode_url : base64_mode_rfc4648;
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf == end || *buf != '\"') {
+ goto base64_failed;
+ }
+ max_len = base64_decoded_size((size_t)(buf - mark));
+ if (flatcc_builder_start_vector(ctx->ctx, 1, 1, FLATBUFFERS_COUNT_MAX((utype_size)))) {
+ goto failed;
+ }
+ if (!(pval = flatcc_builder_extend_vector(ctx->ctx, max_len))) {
+ goto failed;
+ }
+ src_len = (size_t)(buf - mark);
+ decoded_len = max_len;
+ if ((ret = base64_decode(pval, (const uint8_t *)mark, &decoded_len, &src_len, mode))) {
+ buf = mark + src_len;
+ goto base64_failed;
+ }
+ if (src_len != (size_t)(buf - mark)) {
+ buf = mark + src_len;
+ goto base64_failed;
+ }
+ if (decoded_len < max_len) {
+ if (flatcc_builder_truncate_vector(ctx->ctx, max_len - decoded_len)) {
+ goto failed;
+ }
+ }
+ if (!(*ref = flatcc_builder_end_vector(ctx->ctx))) {
+ goto failed;
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+
+failed:
+ *ref = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+
+base64_failed:
+ *ref = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end,
+ urlsafe ? flatcc_json_parser_error_base64url : flatcc_json_parser_error_base64);
+}
+
+const char *flatcc_json_parser_char_array(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, char *s, size_t n)
+{
+ flatcc_json_parser_escape_buffer_t code;
+ const char *mark;
+ size_t k = 0;
+
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ if (buf != end)
+ while (*buf != '\"') {
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf == end) return end;
+ k = (size_t)(buf - mark);
+ if (k > n) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);
+ }
+ k = n; /* Might truncate UTF-8. */
+ }
+ memcpy(s, mark, k);
+ s += k;
+ n -= k;
+ if (*buf == '\"') break;
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ if (buf == end) return end;
+ k = (size_t)code[0];
+ mark = code + 1;
+ if (k > n) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);
+ }
+ k = n; /* Might truncate UTF-8. */
+ }
+ memcpy(s, mark, k);
+ s += k;
+ n -= k;
+ }
+ if (n != 0) {
+ if (ctx->flags & flatcc_json_parser_f_reject_array_underflow) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_underflow);
+ }
+ memset(s, 0, n);
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+}
+
+
+/* String Creation - depends on flatcc builder. */
+
+const char *flatcc_json_parser_build_string(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref)
+{
+ flatcc_json_parser_escape_buffer_t code;
+ const char *mark;
+
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf != end && *buf == '\"') {
+ *ref = flatcc_builder_create_string(ctx->ctx, mark, (size_t)(buf - mark));
+ } else {
+ if (flatcc_builder_start_string(ctx->ctx) ||
+ 0 == flatcc_builder_append_string(ctx->ctx, mark, (size_t)(buf - mark))) goto failed;
+ while (buf != end && *buf != '\"') {
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ if (0 == flatcc_builder_append_string(ctx->ctx, code + 1, (size_t)code[0])) goto failed;
+ if (end != (buf = flatcc_json_parser_string_part(ctx, (mark = buf), end))) {
+ if (0 == flatcc_builder_append_string(ctx->ctx, mark, (size_t)(buf - mark))) goto failed;
+ }
+ }
+ *ref = flatcc_builder_end_string(ctx->ctx);
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+
+failed:
+ *ref = 0;
+ return buf;
+}
+
+/* UNIONS */
+
+/*
+ * Unions are difficult to parse because the type field may appear after
+ * the union table and because having two fields opens up for many more
+ * possible error scenarios. We must store each union of a table
+ * temporarily - this cannot be in the generated table parser function
+ * because there could be many unions (about 2^15 with default voffsets)
+ * although usually there will be only a few. We can also not store the
+ * data encoded in the existing table buffer in builder because we may
+ * have to remove it due to schema forwarding and removing it messes up
+ * the table layout. We also cannot naively allocate it dynamically for
+ * performance reasons. Instead we place the temporary union data in a
+ * separate frame from the table buffer, but on a similar stack. This is
+ * called the user stack and we manage one frame per table that is known
+ * to contain unions.
+ *
+ * Even the temporary structures in place we still cannot parse a union
+ * before we know its type. Due to JSON typically sorting fields
+ * alphabetically in various pretty printers, we are likely to receive
+ * the type late with (`<union_name>_type` following `<union_name>`.
+ * To deal with this we store a backtracking pointer and parses the
+ * table generically in a first pass and reparse the table once the type
+ * is known. This can happen recursively with nested tables containing
+ * unions which is why we need to have a stack frame.
+ *
+ * If the type field is stored first we just store the type in the
+ * custom frame and immediately parses the table with the right type
+ * once we see it. The parse will be much faster and we can strongly
+ * recommend that flatbuffer serializers do this, but we cannot require
+ * it.
+ *
+ * The actual overhead of dealing with the custom stack frame is fairly
+ * cheap once we get past the first custom stack allocation.
+ *
+ * We cannot update the builder before both the table and table type
+ * has been parsed because the the type might have to be ingored due
+ * to schema forwarding. Therefore the union type must be cached or
+ * reread. This happens trivially be calling the union parser with the
+ * type as argument, but it is important to be aware of before
+ * refactoring the code.
+ *
+ * The user frame is created at table start and remains valid until
+ * table exit, but we cannot assume the pointers to the frame remain
+ * valid. Specifically we cannot use frame pointers after calling
+ * the union parser. This means the union type must be cached or reread
+ * so it can be added to the table. Because the type is passed to
+ * the union parser this caching happens automatically but it is still
+ * important to be aware that it is required.
+ *
+ * The frame reserves temporary information for all unions the table
+ * holds, enumerated 0 <= `union_index` < `union_total`
+ * where the `union_total` is fixed type specific number.
+ *
+ * The `type_present` is needed because union types range from 0..255
+ * and we need an extra bit do distinguish not present from union type
+ * `NONE = 0`.
+ */
+
+typedef struct {
+ const char *backtrace;
+ const char *line_start;
+ int line;
+ uint8_t type_present;
+ uint8_t type;
+ /* Union vectors: */
+ uoffset_t count;
+ size_t h_types;
+} __flatcc_json_parser_union_entry_t;
+
+typedef struct {
+ size_t union_total;
+ size_t union_count;
+ __flatcc_json_parser_union_entry_t unions[1];
+} __flatcc_json_parser_union_frame_t;
+
+const char *flatcc_json_parser_prepare_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_total, size_t *handle)
+{
+ __flatcc_json_parser_union_frame_t *f;
+
+ if (!(*handle = flatcc_builder_enter_user_frame(ctx->ctx,
+ sizeof(__flatcc_json_parser_union_frame_t) + (union_total - 1) *
+ sizeof(__flatcc_json_parser_union_entry_t)))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+ }
+ f = flatcc_builder_get_user_frame_ptr(ctx->ctx, *handle);
+ /* Frames have zeroed memory. */
+ f->union_total = union_total;
+ return buf;
+}
+
+const char *flatcc_json_parser_finalize_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t handle)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+
+ if (f->union_count) {
+ buf = flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_incomplete);
+ }
+ flatcc_builder_exit_user_frame_at(ctx->ctx, handle);
+ return buf;
+}
+
+const char *flatcc_json_parser_union(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle, flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = &f->unions[union_index];
+ flatcc_builder_union_ref_t uref;
+
+ if (e->backtrace) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ if (!e->type_present) {
+ /* If we supported table: null, we should not count it, but we don't. */
+ ++f->union_count;
+ e->line = ctx->line;
+ e->line_start = ctx->line_start;
+ buf = flatcc_json_parser_generic_json(ctx, (e->backtrace = buf), end);
+ } else {
+ uref.type = e->type;
+ if (e->type == 0) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_none_present);
+ }
+ --f->union_count;
+ buf = union_parser(ctx, buf, end, e->type, &uref.value);
+ if (buf != end) {
+ if (flatcc_builder_table_add_union(ctx->ctx, id, uref)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ }
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_union_type(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index, flatbuffers_voffset_t id,
+ size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ flatcc_builder_union_ref_t uref;
+ const char *mark;
+ int line;
+ const char *line_start;
+
+ if (e->type_present) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ e->type_present = 1;
+ buf = flatcc_json_parser_uint8(ctx, (mark = buf), end, &e->type);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_uint8(ctx, buf, end, type_parsers, &e->type);
+ }
+ /* Only count the union if the type is not NONE. */
+ if (e->backtrace == 0) {
+ f->union_count += e->type != 0;
+ return buf;
+ }
+ FLATCC_ASSERT(f->union_count);
+ --f->union_count;
+ /*
+ * IMPORTANT: we cannot access any value in the frame or entry
+ * pointer after calling union parse because it might cause the
+ * stack to reallocate. We should read the frame pointer again if
+ * needed - we don't but remember it if refactoring code.
+ *
+ * IMPORTANT 2: Do not assign buf here. We are backtracking.
+ */
+ line = ctx->line;
+ line_start = ctx->line_start;
+ ctx->line = e->line;
+ ctx->line_start = e->line_start;
+ uref.type = e->type;
+ if (end == union_parser(ctx, e->backtrace, end, e->type, &uref.value)) {
+ return end;
+ }
+ if (flatcc_builder_table_add_union(ctx->ctx, id, uref)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ ctx->line = line;
+ ctx->line_start = line_start;
+ return buf;
+}
+
+static const char *_parse_union_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t h_types, uoffset_t count,
+ flatbuffers_voffset_t id, flatcc_json_parser_union_f *union_parser)
+{
+ flatcc_builder_ref_t ref = 0, *pref;
+ utype_t *types;
+ int more;
+ size_t i;
+
+ if (flatcc_builder_start_offset_vector(ctx->ctx)) goto failed;
+ buf = flatcc_json_parser_array_start(ctx, buf, end, &more);
+ i = 0;
+ while (more) {
+ if (i == count) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_vector_length);
+ }
+ /* Frame must be restored between calls to table parser. */
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ buf = union_parser(ctx, buf, end, types[i], &ref);
+ if (buf == end) {
+ return buf;
+ }
+ if (!(pref = flatcc_builder_extend_offset_vector(ctx->ctx, 1))) goto failed;
+ *pref = ref;
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ ++i;
+ }
+ if (i != count) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_vector_length);
+ }
+ /* Frame must be restored between calls to table parser. */
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ if (!(ref = flatcc_builder_end_offset_vector_for_unions(ctx->ctx, types))) goto failed;
+ if (!(pref = flatcc_builder_table_add_offset(ctx->ctx, id))) goto failed;
+ *pref = ref;
+ return buf;
+failed:
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+}
+
+const char *flatcc_json_parser_union_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle, flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ if (e->backtrace) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ if (!e->type_present) {
+ ++f->union_count;
+ e->line = ctx->line;
+ e->line_start = ctx->line_start;
+ buf = flatcc_json_parser_generic_json(ctx, (e->backtrace = buf), end);
+ } else {
+ --f->union_count;
+ buf = _parse_union_vector(ctx, buf, end, e->h_types, e->count, id, union_parser);
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_union_type_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index, flatbuffers_voffset_t id,
+ size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser,
+ flatcc_json_parser_is_known_type_f accept_type)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ const char *mark;
+ int line;
+ const char *line_start;
+ int more;
+ utype_t val;
+ void *pval;
+ flatcc_builder_ref_t ref, *pref;
+ utype_t *types;
+ size_t size;
+ size_t h_types;
+ uoffset_t count;
+
+#if FLATBUFFERS_UTYPE_MAX != UINT8_MAX
+#error "Update union vector parser to support current union type definition."
+#endif
+
+ if (e->type_present) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ e->type_present = 1;
+ if (flatcc_builder_start_vector(ctx->ctx, 1, 1, FLATBUFFERS_COUNT_MAX((utype_size)))) goto failed;
+ buf = flatcc_json_parser_array_start(ctx, buf, end, &more);
+ while (more) {
+ if (!(pval = flatcc_builder_extend_vector(ctx->ctx, 1))) goto failed;
+ buf = flatcc_json_parser_uint8(ctx, (mark = buf), end, &val);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_uint8(ctx, (mark = buf), end, type_parsers, &val);
+ if (buf == mark || buf == end) goto failed;
+ }
+ /* Parse unknown types as NONE */
+ if (!accept_type(val)) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_unknown)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unknown_union);
+ }
+ val = 0;
+ }
+ flatbuffers_uint8_write_to_pe(pval, val);
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ }
+ count = (uoffset_t)flatcc_builder_vector_count(ctx->ctx);
+ e->count = count;
+ size = count * utype_size;
+ /* Store type vector so it is accessible to the table vector parser. */
+ h_types = flatcc_builder_enter_user_frame(ctx->ctx, size);
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ memcpy(types, flatcc_builder_vector_edit(ctx->ctx), size);
+ if (!((ref = flatcc_builder_end_vector(ctx->ctx)))) goto failed;
+ if (!(pref = flatcc_builder_table_add_offset(ctx->ctx, id - 1))) goto failed;
+ *pref = ref;
+
+ /* Restore union frame after possible invalidation due to types frame allocation. */
+ f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ e = f->unions + union_index;
+
+ e->h_types = h_types;
+ if (e->backtrace == 0) {
+ ++f->union_count;
+ return buf;
+ }
+ FLATCC_ASSERT(f->union_count);
+ --f->union_count;
+ line = ctx->line;
+ line_start = ctx->line_start;
+ ctx->line = e->line;
+ ctx->line_start = e->line_start;
+ /* We must not assign buf here because we are backtracking. */
+ if (end == _parse_union_vector(ctx, e->backtrace, end, h_types, count, id, union_parser)) return end;
+ /*
+ * NOTE: We do not need the user frame anymore, but if we did, it
+ * would have to be restored from its handle due to the above parse.
+ */
+ ctx->line = line;
+ ctx->line_start = line_start;
+ return buf;
+failed:
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+}
+
+int flatcc_json_parser_table_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid,
+ flatcc_json_parser_table_f *parser)
+{
+ flatcc_json_parser_t _ctx;
+ flatcc_builder_ref_t root;
+ flatcc_builder_buffer_flags_t builder_flags = flags & flatcc_json_parser_f_with_size ? flatcc_builder_with_size : 0;
+
+ ctx = ctx ? ctx : &_ctx;
+ flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);
+ if (flatcc_builder_start_buffer(B, fid, 0, builder_flags)) return -1;
+ buf = parser(ctx, buf, buf + bufsiz, &root);
+ if (ctx->error) {
+ return ctx->error;
+ }
+ if (!flatcc_builder_end_buffer(B, root)) return -1;
+ ctx->end_loc = buf;
+ return 0;
+}
+
+int flatcc_json_parser_struct_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, flatcc_json_parser_flags_t flags, const char *fid,
+ flatcc_json_parser_table_f *parser)
+{
+ flatcc_json_parser_t _ctx;
+ flatcc_builder_ref_t root;
+ flatcc_builder_buffer_flags_t builder_flags = flags & flatcc_json_parser_f_with_size ? flatcc_builder_with_size : 0;
+
+ ctx = ctx ? ctx : &_ctx;
+ flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);
+ if (flatcc_builder_start_buffer(B, fid, 0, builder_flags)) return -1;
+ buf = parser(ctx, buf, buf + bufsiz, &root);
+ if (ctx->error) {
+ return ctx->error;
+ }
+ if (!flatcc_builder_end_buffer(B, root)) return -1;
+ ctx->end_loc = buf;
+ return 0;
+}
diff --git a/flatcc/src/runtime/json_printer.c b/flatcc/src/runtime/json_printer.c
new file mode 100644
index 0000000..4ebe1c1
--- /dev/null
+++ b/flatcc/src/runtime/json_printer.c
@@ -0,0 +1,1486 @@
+/*
+ * Runtime support for printing flatbuffers to JSON.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_assert.h"
+
+/*
+ * Grisu significantly improves printing speed of floating point values
+ * and also the overall printing speed when floating point values are
+ * present in non-trivial amounts. (Also applies to parsing).
+ */
+#if FLATCC_USE_GRISU3 && !defined(PORTABLE_USE_GRISU3)
+#define PORTABLE_USE_GRISU3 1
+#endif
+
+#include "flatcc/flatcc_flatbuffers.h"
+#include "flatcc/flatcc_json_printer.h"
+#include "flatcc/flatcc_identifier.h"
+
+#include "flatcc/portable/pprintint.h"
+#include "flatcc/portable/pprintfp.h"
+#include "flatcc/portable/pbase64.h"
+
+
+#define RAISE_ERROR(err) flatcc_json_printer_set_error(ctx, flatcc_json_printer_error_##err)
+
+const char *flatcc_json_printer_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_json_printer_error_##no: \
+ return str;
+ FLATCC_JSON_PRINT_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+#define flatcc_json_printer_utype_enum_f flatcc_json_printer_union_type_f
+#define flatbuffers_utype_read_from_pe __flatbuffers_utype_read_from_pe
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+
+#define offset_size uoffset_size
+
+#if FLATBUFFERS_UTYPE_MAX == UINT8_MAX
+#define print_utype print_uint8
+#else
+#ifdef FLATBUFFERS_UTYPE_MIN
+#define print_utype print_int64
+#else
+#define print_utype print_uint64
+#endif
+#endif
+
+static inline const void *read_uoffset_ptr(const void *p)
+{
+ return (uint8_t *)p + __flatbuffers_uoffset_read_from_pe(p);
+}
+
+static inline voffset_t read_voffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_voffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline const void *get_field_ptr(flatcc_json_printer_table_descriptor_t *td, int id)
+{
+ uoffset_t vo = (uoffset_t)(id + 2) * (uoffset_t)sizeof(voffset_t);
+
+ if (vo >= (uoffset_t)td->vsize) {
+ return 0;
+ }
+ vo = read_voffset(td->vtable, vo);
+ if (vo == 0) {
+ return 0;
+ }
+ return (uint8_t *)td->table + vo;
+}
+
+#define print_char(c) *ctx->p++ = (c)
+
+#define print_null() do { \
+ print_char('n'); \
+ print_char('u'); \
+ print_char('l'); \
+ print_char('l'); \
+} while (0)
+
+#define print_start(c) do { \
+ ++ctx->level; \
+ *ctx->p++ = c; \
+} while (0)
+
+#define print_end(c) do { \
+ if (ctx->indent) { \
+ *ctx->p++ = '\n'; \
+ --ctx->level; \
+ print_indent(ctx); \
+ } \
+ *ctx->p++ = c; \
+} while (0)
+
+#define print_space() do { \
+ *ctx->p = ' '; \
+ ctx->p += !!ctx->indent; \
+} while (0)
+
+#define print_nl() do { \
+ if (ctx->indent) { \
+ *ctx->p++ = '\n'; \
+ print_indent(ctx); \
+ } else { \
+ flatcc_json_printer_flush_partial(ctx); \
+ } \
+} while (0)
+
+/* Call at the end so print_end does not have to check for level. */
+#define print_last_nl() do { \
+ if (ctx->indent && ctx->level == 0) { \
+ *ctx->p++ = '\n'; \
+ } \
+ ctx->flush(ctx, 1); \
+} while (0)
+
+int flatcc_json_printer_fmt_float(char *buf, float n)
+{
+#if FLATCC_JSON_PRINT_HEX_FLOAT
+ return print_hex_float(buf, n);
+#else
+ return print_float(n, buf);
+#endif
+}
+
+int flatcc_json_printer_fmt_double(char *buf, double n)
+{
+#if FLATCC_JSON_PRINT_HEX_FLOAT
+ return print_hex_double(buf, n);
+#else
+ return print_double(n, buf);
+#endif
+}
+
+int flatcc_json_printer_fmt_bool(char *buf, int n)
+{
+ if (n) {
+ memcpy(buf, "true", 4);
+ return 4;
+ }
+ memcpy(buf, "false", 5);
+ return 5;
+}
+
+static void print_ex(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ size_t k;
+
+ if (ctx->p >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ k = (size_t)(ctx->pflush - ctx->p);
+ while (n > k) {
+ memcpy(ctx->p, s, k);
+ ctx->p += k;
+ s += k;
+ n -= k;
+ ctx->flush(ctx, 0);
+ k = (size_t)(ctx->pflush - ctx->p);
+ }
+ memcpy(ctx->p, s, n);
+ ctx->p += n;
+}
+
+static inline void print(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ if (ctx->p + n >= ctx->pflush) {
+ print_ex(ctx, s, n);
+ } else {
+ memcpy(ctx->p, s, n);
+ ctx->p += n;
+ }
+}
+
+static void print_escape(flatcc_json_printer_t *ctx, unsigned char c)
+{
+ unsigned char x;
+
+ print_char('\\');
+ switch (c) {
+ case '"': print_char('\"'); break;
+ case '\\': print_char('\\'); break;
+ case '\t' : print_char('t'); break;
+ case '\f' : print_char('f'); break;
+ case '\r' : print_char('r'); break;
+ case '\n' : print_char('n'); break;
+ case '\b' : print_char('b'); break;
+ default:
+ print_char('u');
+ print_char('0');
+ print_char('0');
+ x = c >> 4;
+ x += x < 10 ? '0' : 'a' - 10;
+ print_char((char)x);
+ x = c & 15;
+ x += x < 10 ? '0' : 'a' - 10;
+ print_char((char)x);
+ break;
+ }
+}
+
+/*
+ * Even though we know the the string length, we need to scan for escape
+ * characters. There may be embedded zeroes. Because FlatBuffer strings
+ * are always zero terminated, we assume and optimize for this.
+ *
+ * We enforce \u00xx for control characters, but not for invalid
+ * characters like 0xff - this makes it possible to handle some other
+ * codepages transparently while formally not valid. (Formally JSON
+ * also supports UTF-16/32 little/big endian but flatbuffers only
+ * support UTF-8 and we expect this in JSON input/output too).
+ */
+static void print_string(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ const char *p = s;
+ /* Unsigned is important. */
+ unsigned char c;
+ size_t k;
+
+ print_char('\"');
+ for (;;) {
+ c = (unsigned char)*p;
+ while (c >= 0x20 && c != '\"' && c != '\\') {
+ c = (unsigned char)*++p;
+ }
+ k = (size_t)(p - s);
+ /* Even if k == 0, print ensures buffer flush. */
+ print(ctx, s, k);
+ n -= k;
+ if (n == 0) break;
+ s += k;
+ print_escape(ctx, c);
+ ++p;
+ --n;
+ ++s;
+ }
+ print_char('\"');
+}
+
+/*
+ * Similar to print_string, but null termination is not guaranteed, and
+ * trailing nulls are stripped.
+ */
+static void print_char_array(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ const char *p = s;
+ /* Unsigned is important. */
+ unsigned char c = 0;
+ size_t k;
+
+ while (n > 0 && s[n - 1] == '\0') --n;
+
+ print_char('\"');
+ for (;;) {
+ while (n) {
+ c = (unsigned char)*p;
+ if (c < 0x20 || c == '\"' || c == '\\') break;
+ ++p;
+ --n;
+ }
+ k = (size_t)(p - s);
+ /* Even if k == 0, print ensures buffer flush. */
+ print(ctx, s, k);
+ if (n == 0) break;
+ s += k;
+ print_escape(ctx, c);
+ ++p;
+ --n;
+ ++s;
+ }
+ print_char('\"');
+}
+
+static void print_uint8_vector_base64_object(flatcc_json_printer_t *ctx, const void *p, int mode)
+{
+ const int unpadded_mode = mode & ~base64_enc_modifier_padding;
+ size_t k, n, len;
+ const uint8_t *data;
+ size_t data_len, src_len;
+
+ data_len = (size_t)__flatbuffers_uoffset_read_from_pe(p);
+ data = (const uint8_t *)p + uoffset_size;
+
+ print_char('\"');
+
+ len = base64_encoded_size(data_len, mode);
+ if (ctx->p + len >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ while (ctx->p + len > ctx->pflush) {
+ /* Multiples of 4 output chars consumes exactly 3 bytes before final padding. */
+ k = (size_t)(ctx->pflush - ctx->p) & ~(size_t)3;
+ n = k * 3 / 4;
+ FLATCC_ASSERT(n > 0);
+ src_len = k * 3 / 4;
+ base64_encode((uint8_t *)ctx->p, data, 0, &src_len, unpadded_mode);
+ ctx->p += k;
+ data += n;
+ data_len -= n;
+ ctx->flush(ctx, 0);
+ len = base64_encoded_size(data_len, mode);
+ }
+ base64_encode((uint8_t *)ctx->p, data, 0, &data_len, mode);
+ ctx->p += len;
+ print_char('\"');
+}
+
+static void print_indent_ex(flatcc_json_printer_t *ctx, size_t n)
+{
+ size_t k;
+
+ if (ctx->p >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ k = (size_t)(ctx->pflush - ctx->p);
+ while (n > k) {
+ memset(ctx->p, ' ', k);
+ ctx->p += k;
+ n -= k;
+ ctx->flush(ctx, 0);
+ k = (size_t)(ctx->pflush - ctx->p);
+ }
+ memset(ctx->p, ' ', n);
+ ctx->p += n;
+}
+
+static inline void print_indent(flatcc_json_printer_t *ctx)
+{
+ size_t n = (size_t)(ctx->level * ctx->indent);
+
+ if (ctx->p + n > ctx->pflush) {
+ print_indent_ex(ctx, n);
+ } else {
+ memset(ctx->p, ' ', n);
+ ctx->p += n;
+ }
+}
+
+/*
+ * Helpers for external use - does not do autmatic pretty printing, but
+ * does escape strings.
+ */
+void flatcc_json_printer_string(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ print_string(ctx, s, n);
+}
+
+void flatcc_json_printer_write(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ print(ctx, s, n);
+}
+
+void flatcc_json_printer_nl(flatcc_json_printer_t *ctx)
+{
+ print_char('\n');
+ flatcc_json_printer_flush_partial(ctx);
+}
+
+void flatcc_json_printer_char(flatcc_json_printer_t *ctx, char c)
+{
+ print_char(c);
+}
+
+void flatcc_json_printer_indent(flatcc_json_printer_t *ctx)
+{
+ /*
+ * This is only needed when indent is 0 but helps external users
+ * to avoid flushing when indenting.
+ */
+ print_indent(ctx);
+}
+
+void flatcc_json_printer_add_level(flatcc_json_printer_t *ctx, int n)
+{
+ ctx->level += n;
+}
+
+int flatcc_json_printer_get_level(flatcc_json_printer_t *ctx)
+{
+ return ctx->level;
+}
+
+static inline void print_symbol(flatcc_json_printer_t *ctx, const char *name, size_t len)
+{
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ if (ctx->p + len < ctx->pflush) {
+ memcpy(ctx->p, name, len);
+ ctx->p += len;
+ } else {
+ print(ctx, name, len);
+ }
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+}
+
+static inline void print_name(flatcc_json_printer_t *ctx, const char *name, size_t len)
+{
+ print_nl();
+ print_symbol(ctx, name, len);
+ print_char(':');
+ print_space();
+}
+
+#define __flatcc_define_json_printer_scalar(TN, T) \
+void flatcc_json_printer_ ## TN( \
+ flatcc_json_printer_t *ctx, T v) \
+{ \
+ ctx->p += print_ ## TN(v, ctx->p); \
+}
+
+__flatcc_define_json_printer_scalar(uint8, uint8_t)
+__flatcc_define_json_printer_scalar(uint16, uint16_t)
+__flatcc_define_json_printer_scalar(uint32, uint32_t)
+__flatcc_define_json_printer_scalar(uint64, uint64_t)
+__flatcc_define_json_printer_scalar(int8, int8_t)
+__flatcc_define_json_printer_scalar(int16, int16_t)
+__flatcc_define_json_printer_scalar(int32, int32_t)
+__flatcc_define_json_printer_scalar(int64, int64_t)
+__flatcc_define_json_printer_scalar(float, float)
+__flatcc_define_json_printer_scalar(double, double)
+
+void flatcc_json_printer_enum(flatcc_json_printer_t *ctx, const char *symbol, size_t len)
+{
+ print_symbol(ctx, symbol, len);
+}
+
+void flatcc_json_printer_delimit_enum_flags(flatcc_json_printer_t *ctx, int multiple)
+{
+#if FLATCC_JSON_PRINT_ALWAYS_QUOTE_MULTIPLE_FLAGS
+ int quote = !ctx->unquote || multiple;
+#else
+ int quote = !ctx->unquote;
+#endif
+ *ctx->p = '"';
+ ctx->p += quote;
+}
+
+void flatcc_json_printer_enum_flag(flatcc_json_printer_t *ctx, int count, const char *symbol, size_t len)
+{
+ *ctx->p = ' ';
+ ctx->p += count > 0;
+ print(ctx, symbol, len);
+}
+
+static inline void print_string_object(flatcc_json_printer_t *ctx, const void *p)
+{
+ size_t len;
+ const char *s;
+
+ len = (size_t)__flatbuffers_uoffset_read_from_pe(p);
+ s = (const char *)p + uoffset_size;
+ print_string(ctx, s, len);
+}
+
+#define __define_print_scalar_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _struct_field(flatcc_json_printer_t *ctx,\
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len) \
+{ \
+ T x = flatbuffers_ ## TN ## _read_from_pe((uint8_t *)p + offset); \
+ \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+void flatcc_json_printer_char_array_struct_field(
+ flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len, size_t count)
+{
+ p = (void *)((size_t)p + offset);
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_char_array(ctx, p, count);
+}
+
+#define __define_print_scalar_array_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count) \
+{ \
+ p = (void *)((size_t)p + offset); \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+}
+
+#define __define_print_enum_array_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ \
+ p = (void *)((size_t)p + offset); \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+}
+
+#define __define_print_enum_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x = flatbuffers_ ## TN ## _read_from_pe((uint8_t *)p + offset); \
+ \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+#define __define_print_scalar_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (p) { \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (x == v && ctx->skip_default) { \
+ return; \
+ } \
+ } else { \
+ if (!ctx->force_default) { \
+ return; \
+ } \
+ x = v; \
+ } \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+#define __define_print_scalar_optional_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (!p) return; \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+
+#define __define_print_enum_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (p) { \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (x == v && ctx->skip_default) { \
+ return; \
+ } \
+ } else { \
+ if (!ctx->force_default) { \
+ return; \
+ } \
+ x = v; \
+ } \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+#define __define_print_enum_optional_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (!p) return; \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+static inline void print_table_object(flatcc_json_printer_t *ctx,
+ const void *p, int ttl, flatcc_json_printer_table_f pf)
+{
+ flatcc_json_printer_table_descriptor_t td;
+
+ if (!--ttl) {
+ flatcc_json_printer_set_error(ctx, flatcc_json_printer_error_deep_recursion);
+ return;
+ }
+ print_start('{');
+ td.count = 0;
+ td.ttl = ttl;
+ td.table = p;
+ td.vtable = (uint8_t *)p - __flatbuffers_soffset_read_from_pe(p);
+ td.vsize = __flatbuffers_voffset_read_from_pe(td.vtable);
+ pf(ctx, &td);
+ print_end('}');
+}
+
+void flatcc_json_printer_string_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_string_object(ctx, read_uoffset_ptr(p));
+ }
+}
+
+void flatcc_json_printer_uint8_vector_base64_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len, int urlsafe)
+{
+ const void *p = get_field_ptr(td, id);
+ int mode;
+
+ mode = urlsafe ? base64_mode_url : base64_mode_rfc4648;
+ mode |= base64_enc_modifier_padding;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_uint8_vector_base64_object(ctx, read_uoffset_ptr(p), mode);
+ }
+}
+
+#define __define_print_scalar_vector_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _vector_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len) \
+{ \
+ const void *p = get_field_ptr(td, id); \
+ uoffset_t count; \
+ \
+ if (p) { \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ p = read_uoffset_ptr(p); \
+ count = __flatbuffers_uoffset_read_from_pe(p); \
+ p = (void *)((size_t)p + uoffset_size); \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+ } \
+}
+
+#define __define_print_enum_vector_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_vector_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ const void *p; \
+ uoffset_t count; \
+ \
+ if (ctx->noenum) { \
+ flatcc_json_printer_ ## TN ## _vector_field(ctx, td, id, name, len);\
+ return; \
+ } \
+ p = get_field_ptr(td, id); \
+ if (p) { \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ p = read_uoffset_ptr(p); \
+ count = __flatbuffers_uoffset_read_from_pe(p); \
+ p = (void *)((size_t)p + uoffset_size); \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ pf(ctx, flatbuffers_ ## TN ## _read_from_pe(p)); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ pf(ctx, flatbuffers_ ## TN ## _read_from_pe(p)); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+ } \
+}
+
+__define_print_scalar_field(uint8, uint8_t)
+__define_print_scalar_field(uint16, uint16_t)
+__define_print_scalar_field(uint32, uint32_t)
+__define_print_scalar_field(uint64, uint64_t)
+__define_print_scalar_field(int8, int8_t)
+__define_print_scalar_field(int16, int16_t)
+__define_print_scalar_field(int32, int32_t)
+__define_print_scalar_field(int64, int64_t)
+__define_print_scalar_field(bool, flatbuffers_bool_t)
+__define_print_scalar_field(float, float)
+__define_print_scalar_field(double, double)
+
+__define_print_enum_field(uint8, uint8_t)
+__define_print_enum_field(uint16, uint16_t)
+__define_print_enum_field(uint32, uint32_t)
+__define_print_enum_field(uint64, uint64_t)
+__define_print_enum_field(int8, int8_t)
+__define_print_enum_field(int16, int16_t)
+__define_print_enum_field(int32, int32_t)
+__define_print_enum_field(int64, int64_t)
+__define_print_enum_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_optional_field(uint8, uint8_t)
+__define_print_scalar_optional_field(uint16, uint16_t)
+__define_print_scalar_optional_field(uint32, uint32_t)
+__define_print_scalar_optional_field(uint64, uint64_t)
+__define_print_scalar_optional_field(int8, int8_t)
+__define_print_scalar_optional_field(int16, int16_t)
+__define_print_scalar_optional_field(int32, int32_t)
+__define_print_scalar_optional_field(int64, int64_t)
+__define_print_scalar_optional_field(bool, flatbuffers_bool_t)
+__define_print_scalar_optional_field(float, float)
+__define_print_scalar_optional_field(double, double)
+
+__define_print_enum_optional_field(uint8, uint8_t)
+__define_print_enum_optional_field(uint16, uint16_t)
+__define_print_enum_optional_field(uint32, uint32_t)
+__define_print_enum_optional_field(uint64, uint64_t)
+__define_print_enum_optional_field(int8, int8_t)
+__define_print_enum_optional_field(int16, int16_t)
+__define_print_enum_optional_field(int32, int32_t)
+__define_print_enum_optional_field(int64, int64_t)
+__define_print_enum_optional_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_struct_field(uint8, uint8_t)
+__define_print_scalar_struct_field(uint16, uint16_t)
+__define_print_scalar_struct_field(uint32, uint32_t)
+__define_print_scalar_struct_field(uint64, uint64_t)
+__define_print_scalar_struct_field(int8, int8_t)
+__define_print_scalar_struct_field(int16, int16_t)
+__define_print_scalar_struct_field(int32, int32_t)
+__define_print_scalar_struct_field(int64, int64_t)
+__define_print_scalar_struct_field(bool, flatbuffers_bool_t)
+__define_print_scalar_struct_field(float, float)
+__define_print_scalar_struct_field(double, double)
+
+__define_print_scalar_array_struct_field(uint8, uint8_t)
+__define_print_scalar_array_struct_field(uint16, uint16_t)
+__define_print_scalar_array_struct_field(uint32, uint32_t)
+__define_print_scalar_array_struct_field(uint64, uint64_t)
+__define_print_scalar_array_struct_field(int8, int8_t)
+__define_print_scalar_array_struct_field(int16, int16_t)
+__define_print_scalar_array_struct_field(int32, int32_t)
+__define_print_scalar_array_struct_field(int64, int64_t)
+__define_print_scalar_array_struct_field(bool, flatbuffers_bool_t)
+__define_print_scalar_array_struct_field(float, float)
+__define_print_scalar_array_struct_field(double, double)
+
+__define_print_enum_array_struct_field(uint8, uint8_t)
+__define_print_enum_array_struct_field(uint16, uint16_t)
+__define_print_enum_array_struct_field(uint32, uint32_t)
+__define_print_enum_array_struct_field(uint64, uint64_t)
+__define_print_enum_array_struct_field(int8, int8_t)
+__define_print_enum_array_struct_field(int16, int16_t)
+__define_print_enum_array_struct_field(int32, int32_t)
+__define_print_enum_array_struct_field(int64, int64_t)
+__define_print_enum_array_struct_field(bool, flatbuffers_bool_t)
+
+__define_print_enum_struct_field(uint8, uint8_t)
+__define_print_enum_struct_field(uint16, uint16_t)
+__define_print_enum_struct_field(uint32, uint32_t)
+__define_print_enum_struct_field(uint64, uint64_t)
+__define_print_enum_struct_field(int8, int8_t)
+__define_print_enum_struct_field(int16, int16_t)
+__define_print_enum_struct_field(int32, int32_t)
+__define_print_enum_struct_field(int64, int64_t)
+__define_print_enum_struct_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_vector_field(utype, flatbuffers_utype_t)
+__define_print_scalar_vector_field(uint8, uint8_t)
+__define_print_scalar_vector_field(uint16, uint16_t)
+__define_print_scalar_vector_field(uint32, uint32_t)
+__define_print_scalar_vector_field(uint64, uint64_t)
+__define_print_scalar_vector_field(int8, int8_t)
+__define_print_scalar_vector_field(int16, int16_t)
+__define_print_scalar_vector_field(int32, int32_t)
+__define_print_scalar_vector_field(int64, int64_t)
+__define_print_scalar_vector_field(bool, flatbuffers_bool_t)
+__define_print_scalar_vector_field(float, float)
+__define_print_scalar_vector_field(double, double)
+
+__define_print_enum_vector_field(utype, flatbuffers_utype_t)
+__define_print_enum_vector_field(uint8, uint8_t)
+__define_print_enum_vector_field(uint16, uint16_t)
+__define_print_enum_vector_field(uint32, uint32_t)
+__define_print_enum_vector_field(uint64, uint64_t)
+__define_print_enum_vector_field(int8, int8_t)
+__define_print_enum_vector_field(int16, int16_t)
+__define_print_enum_vector_field(int32, int32_t)
+__define_print_enum_vector_field(int64, int64_t)
+__define_print_enum_vector_field(bool, flatbuffers_bool_t)
+
+void flatcc_json_printer_struct_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ size_t size,
+ flatcc_json_printer_struct_f pf)
+{
+ const uint8_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ p += uoffset_size;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_nl();
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ --count;
+ }
+ while (count--) {
+ p += size;
+ print_char(',');
+ print_nl();
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_string_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len)
+{
+ const uoffset_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_nl();
+ print_string_object(ctx, read_uoffset_ptr(p));
+ --count;
+ }
+ while (count--) {
+ ++p;
+ print_char(',');
+ print_nl();
+ print_string_object(ctx, read_uoffset_ptr(p));
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_table_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf)
+{
+ const uoffset_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ --count;
+ }
+ while (count--) {
+ ++p;
+ print_char(',');
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_union_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf)
+{
+ const uoffset_t *pt = get_field_ptr(td, id - 1);
+ const uoffset_t *p = get_field_ptr(td, id);
+ utype_t *types, type;
+ uoffset_t count;
+ char type_name[FLATCC_JSON_PRINT_NAME_LEN_MAX + 5];
+ flatcc_json_printer_union_descriptor_t ud;
+
+ ud.ttl = td->ttl;
+ if (len > FLATCC_JSON_PRINT_NAME_LEN_MAX) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "identifier too long");
+ return;
+ }
+ memcpy(type_name, name, len);
+ memcpy(type_name + len, "_type", 5);
+ if (p && pt) {
+ flatcc_json_printer_utype_enum_vector_field(ctx, td, id - 1,
+ type_name, len + 5, ptf);
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ pt = read_uoffset_ptr(pt);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ ++pt;
+ types = (utype_t *)pt;
+ print_name(ctx, name, len);
+ print_start('[');
+
+ if (count) {
+ type = __flatbuffers_utype_read_from_pe(types);
+ if (type != 0) {
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ } else {
+ print_null();
+ }
+ --count;
+ }
+ while (count--) {
+ ++p;
+ ++types;
+ type = __flatbuffers_utype_read_from_pe(types);
+ print_char(',');
+ if (type != 0) {
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ } else {
+ print_null();
+ }
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_table_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ }
+}
+
+void flatcc_json_printer_union_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf)
+{
+ const void *pt = get_field_ptr(td, id - 1);
+ const void *p = get_field_ptr(td, id);
+ utype_t type;
+ flatcc_json_printer_union_descriptor_t ud;
+
+ if (!p || !pt) {
+ return;
+ }
+ type = __flatbuffers_utype_read_from_pe(pt);
+ if (td->count++) {
+ print_char(',');
+ }
+ print_nl();
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ if (ctx->p + len < ctx->pflush) {
+ memcpy(ctx->p, name, len);
+ ctx->p += len;
+ } else {
+ print(ctx, name, len);
+ }
+ print(ctx, "_type", 5);
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ print_char(':');
+ print_space();
+ if (ctx->noenum) {
+ ctx->p += print_utype(type, ctx->p);
+ } else {
+ ptf(ctx, type);
+ }
+ if (type != 0) {
+ print_char(',');
+ print_name(ctx, name, len);
+ ud.ttl = td->ttl;
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ }
+}
+
+void flatcc_json_printer_union_table(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_table_f pf)
+{
+ print_table_object(ctx, read_uoffset_ptr(ud->member), ud->ttl, pf);
+}
+
+void flatcc_json_printer_union_struct(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_struct_f pf)
+{
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(ud->member));
+ print_end('}');
+}
+
+void flatcc_json_printer_union_string(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud)
+{
+ print_string_object(ctx, read_uoffset_ptr(ud->member));
+}
+
+void flatcc_json_printer_embedded_struct_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ flatcc_json_printer_struct_f pf)
+{
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, (uint8_t *)p + offset);
+ print_end('}');
+}
+
+void flatcc_json_printer_embedded_struct_array_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ size_t size, size_t count,
+ flatcc_json_printer_struct_f pf)
+{
+ size_t i;
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('[');
+ for (i = 0; i < count; ++i) {
+ if (i > 0) {
+ print_char(',');
+ }
+ print_start('{'); \
+ pf(ctx, (uint8_t *)p + offset + i * size);
+ print_end('}');
+ }
+ print_end(']');
+}
+
+void flatcc_json_printer_struct_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_struct_f *pf)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ }
+}
+
+/*
+ * Make sure the buffer identifier is valid before assuming the rest of
+ * the buffer is sane.
+ * NOTE: this won't work with type hashes because these can contain
+ * nulls in the fid string. In this case use null as fid to disable
+ * check.
+ */
+static int accept_header(flatcc_json_printer_t * ctx,
+ const void *buf, size_t bufsiz, const char *fid)
+{
+ flatbuffers_thash_t id, id2 = 0;
+
+ if (buf == 0 || bufsiz < offset_size + FLATBUFFERS_IDENTIFIER_SIZE) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "buffer header too small");
+ return 0;
+ }
+ if (fid != 0) {
+ id2 = flatbuffers_type_hash_from_string(fid);
+ id = __flatbuffers_thash_read_from_pe((uint8_t *)buf + offset_size);
+ if (!(id2 == 0 || id == id2)) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "identifier mismatch");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int flatcc_json_printer_struct_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid,
+ flatcc_json_printer_struct_f *pf)
+{
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return -1;
+ }
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(buf));
+ print_end('}');
+ print_last_nl();
+ return flatcc_json_printer_get_error(ctx) ? -1 : (int)ctx->total + (int)(ctx->p - ctx->buf);
+}
+
+int flatcc_json_printer_table_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid, flatcc_json_printer_table_f *pf)
+{
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return -1;
+ }
+ print_table_object(ctx, read_uoffset_ptr(buf), FLATCC_JSON_PRINT_MAX_LEVELS, pf);
+ print_last_nl();
+ return flatcc_json_printer_get_error(ctx) ? -1 : (int)ctx->total + (int)(ctx->p - ctx->buf);
+}
+
+void flatcc_json_printer_struct_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_struct_f *pf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return;
+ }
+ buf = (const uoffset_t *)((size_t)buf + __flatbuffers_uoffset_read_from_pe(buf));
+ bufsiz = __flatbuffers_uoffset_read_from_pe(buf);
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return;
+ }
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(buf));
+ print_end('}');
+}
+
+void flatcc_json_printer_table_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_table_f pf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return;
+ }
+ buf = (const uoffset_t *)((size_t)buf + __flatbuffers_uoffset_read_from_pe(buf));
+ bufsiz = __flatbuffers_uoffset_read_from_pe(buf);
+ ++buf;
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return;
+ }
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_table_object(ctx, read_uoffset_ptr(buf), td->ttl, pf);
+}
+
+static void __flatcc_json_printer_flush(flatcc_json_printer_t *ctx, int all)
+{
+ if (!all && ctx->p >= ctx->pflush) {
+ size_t spill = (size_t)(ctx->p - ctx->pflush);
+
+ fwrite(ctx->buf, ctx->flush_size, 1, ctx->fp);
+ memcpy(ctx->buf, ctx->buf + ctx->flush_size, spill);
+ ctx->p = ctx->buf + spill;
+ ctx->total += ctx->flush_size;
+ } else {
+ size_t len = (size_t)(ctx->p - ctx->buf);
+
+ fwrite(ctx->buf, len, 1, ctx->fp);
+ ctx->p = ctx->buf;
+ ctx->total += len;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init(flatcc_json_printer_t *ctx, void *fp)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->fp = fp ? fp : stdout;
+ ctx->flush = __flatcc_json_printer_flush;
+ if (!(ctx->buf = FLATCC_JSON_PRINTER_ALLOC(FLATCC_JSON_PRINT_BUFFER_SIZE))) {
+ return -1;
+ }
+ ctx->own_buffer = 1;
+ ctx->size = FLATCC_JSON_PRINT_BUFFER_SIZE;
+ ctx->flush_size = FLATCC_JSON_PRINT_FLUSH_SIZE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ /*
+ * Make sure we have space for primitive operations such as printing numbers
+ * without having to flush.
+ */
+ FLATCC_ASSERT(ctx->flush_size + FLATCC_JSON_PRINT_RESERVE <= ctx->size);
+ return 0;
+}
+
+static void __flatcc_json_printer_flush_buffer(flatcc_json_printer_t *ctx, int all)
+{
+ (void)all;
+
+ if (ctx->p >= ctx->pflush) {
+ RAISE_ERROR(overflow);
+ ctx->total += (size_t)(ctx->p - ctx->buf);
+ ctx->p = ctx->buf;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init_buffer(flatcc_json_printer_t *ctx, char *buffer, size_t buffer_size)
+{
+ FLATCC_ASSERT(buffer_size >= FLATCC_JSON_PRINT_RESERVE);
+ if (buffer_size < FLATCC_JSON_PRINT_RESERVE) {
+ return -1;
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->buf = buffer;
+ ctx->size = buffer_size;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ ctx->flush = __flatcc_json_printer_flush_buffer;
+ return 0;
+}
+
+static void __flatcc_json_printer_flush_dynamic_buffer(flatcc_json_printer_t *ctx, int all)
+{
+ size_t len = (size_t)(ctx->p - ctx->buf);
+ char *p;
+
+ (void)all;
+
+ *ctx->p = '\0';
+ if (ctx->p < ctx->pflush) {
+ return;
+ }
+ p = FLATCC_JSON_PRINTER_REALLOC(ctx->buf, ctx->size * 2);
+ if (!p) {
+ RAISE_ERROR(overflow);
+ ctx->total += len;
+ ctx->p = ctx->buf;
+ } else {
+ ctx->size *= 2;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->buf = p;
+ ctx->p = p + len;
+ ctx->pflush = p + ctx->flush_size;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init_dynamic_buffer(flatcc_json_printer_t *ctx, size_t buffer_size)
+{
+ if (buffer_size == 0) {
+ buffer_size = FLATCC_JSON_PRINT_DYN_BUFFER_SIZE;
+ }
+ if (buffer_size < FLATCC_JSON_PRINT_RESERVE) {
+ buffer_size = FLATCC_JSON_PRINT_RESERVE;
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->buf = FLATCC_JSON_PRINTER_ALLOC(buffer_size);
+ ctx->own_buffer = 1;
+ ctx->size = buffer_size;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ ctx->flush = __flatcc_json_printer_flush_dynamic_buffer;
+ if (!ctx->buf) {
+ RAISE_ERROR(overflow);
+ return -1;
+ }
+ return 0;
+}
+
+void *flatcc_json_printer_get_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size)
+{
+ ctx->flush(ctx, 0);
+ if (buffer_size) {
+ *buffer_size = (size_t)(ctx->p - ctx->buf);
+ }
+ return ctx->buf;
+}
+
+void *flatcc_json_printer_finalize_dynamic_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size)
+{
+ void *buffer;
+
+ buffer = flatcc_json_printer_get_buffer(ctx, buffer_size);
+ memset(ctx, 0, sizeof(*ctx));
+ return buffer;
+}
+
+void flatcc_json_printer_clear(flatcc_json_printer_t *ctx)
+{
+ if (ctx->own_buffer && ctx->buf) {
+ FLATCC_JSON_PRINTER_FREE(ctx->buf);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+}
diff --git a/flatcc/src/runtime/refmap.c b/flatcc/src/runtime/refmap.c
new file mode 100644
index 0000000..a2497f0
--- /dev/null
+++ b/flatcc/src/runtime/refmap.c
@@ -0,0 +1,248 @@
+/*
+ * Optional file that can be included in runtime library to support DAG
+ * cloning with the builder and may also be used for custom purposes
+ * standalone. See also comments in `flatcc/flatcc_builder.h`.
+ *
+ * Note that dynamic construction takes place and that large offset
+ * vectors might consume significant space if there are not many shared
+ * references. In the basic use case no allocation takes place because a
+ * few references can be held using only a small stack allocated hash
+ * table.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_refmap.h"
+#include "flatcc/flatcc_alloc.h"
+#include "flatcc/flatcc_assert.h"
+
+#define _flatcc_refmap_calloc FLATCC_CALLOC
+#define _flatcc_refmap_free FLATCC_FREE
+
+/* Can be used as a primitive defense against collision attacks. */
+#ifdef FLATCC_HASH_SEED
+#define _flatcc_refmap_seed FLATCC_HASH_SEED
+#else
+#define _flatcc_refmap_seed 0x2f693b52
+#endif
+
+static inline size_t _flatcc_refmap_above_load_factor(size_t count, size_t buckets)
+{
+ static const size_t d = 256;
+ static const size_t n = (size_t)((FLATCC_REFMAP_LOAD_FACTOR) * 256.0f);
+
+ return count >= buckets * n / d;
+}
+
+#define _flatcc_refmap_probe(k, i, N) ((k + i) & N)
+
+void flatcc_refmap_clear(flatcc_refmap_t *refmap)
+{
+ if (refmap->table && refmap->table != refmap->min_table) {
+ _flatcc_refmap_free(refmap->table);
+ }
+ flatcc_refmap_init(refmap);
+}
+
+static inline size_t _flatcc_refmap_hash(const void *src)
+{
+ /* MurmurHash3 64-bit finalizer */
+ uint64_t x;
+
+ x = (uint64_t)((size_t)src) ^ _flatcc_refmap_seed;
+
+ x ^= x >> 33;
+ x *= 0xff51afd7ed558ccdULL;
+ x ^= x >> 33;
+ x *= 0xc4ceb9fe1a85ec53ULL;
+ x ^= x >> 33;
+ return (size_t)x;
+}
+
+void flatcc_refmap_reset(flatcc_refmap_t *refmap)
+{
+ if (refmap->count) {
+ memset(refmap->table, 0, sizeof(refmap->table[0]) * refmap->buckets);
+ }
+ refmap->count = 0;
+}
+
+/*
+ * Technically resize also supports shrinking which may be useful for
+ * adapations, but the current hash table never deletes individual items.
+ */
+int flatcc_refmap_resize(flatcc_refmap_t *refmap, size_t count)
+{
+ const size_t min_buckets = sizeof(refmap->min_table) / sizeof(refmap->min_table[0]);
+
+ size_t i;
+ size_t buckets;
+ size_t buckets_old;
+ struct flatcc_refmap_item *T_old;
+
+ if (count < refmap->count) {
+ count = refmap->count;
+ }
+ buckets = min_buckets;
+
+ while (_flatcc_refmap_above_load_factor(count, buckets)) {
+ buckets *= 2;
+ }
+ if (refmap->buckets == buckets) {
+ return 0;
+ }
+ T_old = refmap->table;
+ buckets_old = refmap->buckets;
+ if (buckets == min_buckets) {
+ memset(refmap->min_table, 0, sizeof(refmap->min_table));
+ refmap->table = refmap->min_table;
+ } else {
+ refmap->table = _flatcc_refmap_calloc(buckets, sizeof(refmap->table[0]));
+ if (refmap->table == 0) {
+ refmap->table = T_old;
+ FLATCC_ASSERT(0); /* out of memory */
+ return -1;
+ }
+ }
+ refmap->buckets = buckets;
+ refmap->count = 0;
+ for (i = 0; i < buckets_old; ++i) {
+ if (T_old[i].src) {
+ flatcc_refmap_insert(refmap, T_old[i].src, T_old[i].ref);
+ }
+ }
+ if (T_old && T_old != refmap->min_table) {
+ _flatcc_refmap_free(T_old);
+ }
+ return 0;
+}
+
+flatcc_refmap_ref_t flatcc_refmap_insert(flatcc_refmap_t *refmap, const void *src, flatcc_refmap_ref_t ref)
+{
+ struct flatcc_refmap_item *T;
+ size_t N, i, j, k;
+
+ if (src == 0) return ref;
+ if (_flatcc_refmap_above_load_factor(refmap->count, refmap->buckets)) {
+ if (flatcc_refmap_resize(refmap, refmap->count * 2)) {
+ return flatcc_refmap_not_found; /* alloc failed */
+ }
+ }
+ T = refmap->table;
+ N = refmap->buckets - 1;
+ k = _flatcc_refmap_hash(src);
+ i = 0;
+ j = _flatcc_refmap_probe(k, i, N);
+ while (T[j].src) {
+ if (T[j].src == src) {
+ return T[j].ref = ref;
+ }
+ ++i;
+ j = _flatcc_refmap_probe(k, i, N);
+ }
+ ++refmap->count;
+ T[j].src = src;
+ return T[j].ref = ref;
+}
+
+flatcc_refmap_ref_t flatcc_refmap_find(flatcc_refmap_t *refmap, const void *src)
+{
+ struct flatcc_refmap_item *T;
+ size_t N, i, j, k;
+
+ if (refmap->count == 0) {
+ return flatcc_refmap_not_found;
+ }
+ T = refmap->table;
+ N = refmap->buckets - 1;
+ k = _flatcc_refmap_hash(src);
+ i = 0;
+ j = _flatcc_refmap_probe(k, i, N);
+ while (T[j].src) {
+ if (T[j].src == src) return T[j].ref;
+ ++i;
+ j = _flatcc_refmap_probe(k, i, N);
+ }
+ return flatcc_refmap_not_found;
+}
+
+/*
+ * To run test from project root:
+ *
+ * cc -D FLATCC_REFMAP_TEST -I include src/runtime/refmap.c -o test_refmap && ./test_refmap
+ *
+ */
+#ifdef FLATCC_REFMAP_TEST
+
+#include <stdio.h>
+
+#ifndef FLATCC_REFMAP_H
+#include "flatcc/flatcc_refmap.h"
+#endif
+
+#define test(x) do { if (!(x)) { fprintf(stderr, "%02d: refmap test failed\n", __LINE__); exit(-1); } } while (0)
+#define test_start() fprintf(stderr, "starting refmap test ...\n")
+#define test_ok() fprintf(stderr, "refmap test succeeded\n")
+
+int main()
+{
+ int i;
+ int data[1000];
+ int a = 1;
+ int b = 2;
+ int c = 3;
+ flatcc_refmap_t refmap;
+
+ flatcc_refmap_init(&refmap);
+
+ test(flatcc_refmap_find(&refmap, &a) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &b) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &c) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, 0) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &a) == 0);
+
+ test(flatcc_refmap_insert(&refmap, &a, 42) == 42);
+ test(flatcc_refmap_find(&refmap, &a) == 42);
+ test(flatcc_refmap_find(&refmap, &b) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &c) == flatcc_refmap_not_found);
+ test(flatcc_refmap_insert(&refmap, &a, 42) == 42);
+ test(flatcc_refmap_find(&refmap, &a) == 42);
+ test(refmap.count == 1);
+ test(flatcc_refmap_insert(&refmap, &a, 43) == 43);
+ test(flatcc_refmap_find(&refmap, &a) == 43);
+ test(refmap.count == 1);
+ test(flatcc_refmap_insert(&refmap, &b, -10) == -10);
+ test(flatcc_refmap_insert(&refmap, &c, 100) == 100);
+ test(refmap.count == 3);
+ test(flatcc_refmap_find(&refmap, &a) == 43);
+ test(flatcc_refmap_find(&refmap, &b) == -10);
+ test(flatcc_refmap_find(&refmap, &c) == 100);
+
+ test(flatcc_refmap_insert(&refmap, 0, 1000) == 1000);
+ test(flatcc_refmap_find(&refmap, 0) == 0);
+ test(refmap.count == 3);
+
+ test(flatcc_refmap_insert(&refmap, &b, 0) == 0);
+ test(flatcc_refmap_find(&refmap, &b) == 0);
+ test(refmap.count == 3);
+
+ flatcc_refmap_reset(&refmap);
+ test(refmap.count == 0);
+ test(refmap.buckets > 0);
+ for (i = 0; i < 1000; ++i) {
+ test(flatcc_refmap_insert(&refmap, data + i, i + 42) == i + 42);
+ }
+ test(refmap.count == 1000);
+ for (i = 0; i < 1000; ++i) {
+ test(flatcc_refmap_find(&refmap, data + i) == i + 42);
+ }
+ flatcc_refmap_clear(&refmap);
+ test(refmap.count == 0);
+ test(refmap.buckets == 0);
+ test_ok();
+ return 0;
+}
+
+#endif /* FLATCC_REFMAP_TEST */
diff --git a/flatcc/src/runtime/verifier.c b/flatcc/src/runtime/verifier.c
new file mode 100644
index 0000000..9c43bf6
--- /dev/null
+++ b/flatcc/src/runtime/verifier.c
@@ -0,0 +1,617 @@
+/*
+ * Runtime support for verifying flatbuffers.
+ *
+ * Depends mutually on generated verifier functions for table types that
+ * call into this library.
+ */
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_flatbuffers.h"
+#include "flatcc/flatcc_verifier.h"
+#include "flatcc/flatcc_identifier.h"
+
+/* Customization for testing. */
+#if FLATCC_DEBUG_VERIFY
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 1
+#include <stdio.h>
+#define FLATCC_VERIFIER_ASSERT(cond, reason) \
+ if (!(cond)) { fprintf(stderr, "verifier assert: %s\n", \
+ flatcc_verify_error_string(reason)); FLATCC_ASSERT(0); return reason; }
+#endif
+
+#if FLATCC_TRACE_VERIFY
+#include <stdio.h>
+#define trace_verify(s, p) \
+ fprintf(stderr, "trace verify: %s: 0x%02x\n", (s), (unsigned)(size_t)(p));
+#else
+#define trace_verify(s, p) ((void)0)
+#endif
+
+/* The runtime library does not use the global config file. */
+
+/* This is a guideline, not an exact measure. */
+#ifndef FLATCC_VERIFIER_MAX_LEVELS
+#define FLATCC_VERIFIER_MAX_LEVELS 100
+#endif
+
+#ifndef FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 0
+#endif
+
+/*
+ * Generally a check should tell if a buffer is valid or not such
+ * that runtime can take appropriate actions rather than crash,
+ * also in debug, but assertions are helpful in debugging a problem.
+ *
+ * This must be compiled into the debug runtime library to take effect.
+ */
+#ifndef FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 1
+#endif
+
+/* May be redefined for logging purposes. */
+#ifndef FLATCC_VERIFIER_ASSERT
+#define FLATCC_VERIFIER_ASSERT(cond, reason) FLATCC_ASSERT(cond)
+#endif
+
+#if FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define flatcc_verify(cond, reason) if (!(cond)) { FLATCC_VERIFIER_ASSERT(cond, reason); return reason; }
+#else
+#define flatcc_verify(cond, reason) if (!(cond)) { return reason; }
+#endif
+
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+#define thash_t flatbuffers_thash_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+#define thash_size sizeof(thash_t)
+#define offset_size uoffset_size
+
+const char *flatcc_verify_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_verify_error_##no: \
+ return str;
+ FLATCC_VERIFY_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+/* `cond` may have side effects. */
+#define verify(cond, reason) do { int c = (cond); flatcc_verify(c, reason); } while(0)
+
+/*
+ * Identify checks related to runtime conditions (buffer size and
+ * alignment) as seperate from those related to buffer content.
+ */
+#define verify_runtime(cond, reason) verify(cond, reason)
+
+#define check_result(x) if (x) { return (x); }
+
+#define check_field(td, id, required, base) do { \
+ int ret = get_offset_field(td, id, required, &base); \
+ if (ret || !base) { return ret; }} while (0)
+
+static inline uoffset_t read_uoffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_uoffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline thash_t read_thash_identifier(const char *identifier)
+{
+ return flatbuffers_type_hash_from_string(identifier);
+}
+
+static inline thash_t read_thash(const void *p, uoffset_t base)
+{
+ return __flatbuffers_thash_read_from_pe((uint8_t *)p + base);
+}
+
+static inline voffset_t read_voffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_voffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline int check_header(uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t k = base + offset;
+
+ if (uoffset_size <= voffset_size && k + offset_size < k) {
+ return 0;
+ }
+
+ /* The `k > base` rather than `k >= base` is to avoid null offsets. */
+ return k > base && k + offset_size <= end && !(k & (offset_size - 1));
+}
+
+static inline int check_aligned_header(uoffset_t end, uoffset_t base, uoffset_t offset, uint16_t align)
+{
+ uoffset_t k = base + offset;
+
+ if (uoffset_size <= voffset_size && k + offset_size < k) {
+ return 0;
+ }
+ /* Alignment refers to element 0 and header must also be aligned. */
+ align = align < uoffset_size ? uoffset_size : align;
+
+ /* Note to self: the builder can also use the mask OR trick to propagate `min_align`. */
+ return k > base && k + offset_size <= end && !((k + offset_size) & ((offset_size - 1) | (align - 1u)));
+}
+
+static inline int verify_struct(uoffset_t end, uoffset_t base, uoffset_t offset, uoffset_t size, uint16_t align)
+{
+ /* Structs can have zero size so `end` is a valid value. */
+ if (offset == 0 || base + offset > end) {
+ return flatcc_verify_error_offset_out_of_range;
+ }
+ base += offset;
+ verify(base + size >= base, flatcc_verify_error_struct_size_overflow);
+ verify(base + size <= end, flatcc_verify_error_struct_out_of_range);
+ verify (!(base & (align - 1u)), flatcc_verify_error_struct_unaligned);
+ return flatcc_verify_ok;
+}
+
+static inline voffset_t read_vt_entry(flatcc_table_verifier_descriptor_t *td, voffset_t id)
+{
+ voffset_t vo = (id + 2u) * sizeof(voffset_t);
+
+ /* Assumes tsize has been verified for alignment. */
+ if (vo >= td->vsize) {
+ return 0;
+ }
+ return read_voffset(td->vtable, vo);
+}
+
+static inline const void *get_field_ptr(flatcc_table_verifier_descriptor_t *td, voffset_t id)
+{
+ voffset_t vte = read_vt_entry(td, id);
+ return vte ? (const uint8_t *)td->buf + td->table + vte : 0;
+}
+
+static int verify_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, uoffset_t size, uint16_t align)
+{
+ uoffset_t k, k2;
+ voffset_t vte;
+ uoffset_t base = (uoffset_t)(size_t)td->buf;
+
+
+ /*
+ * Otherwise range check assumptions break, and normal access code likely also.
+ * We don't require voffset_size < uoffset_size, but some checks are faster if true.
+ */
+ FLATCC_ASSERT(uoffset_size >= voffset_size);
+ FLATCC_ASSERT(soffset_size == uoffset_size);
+
+ vte = read_vt_entry(td, id);
+ if (!vte) {
+ verify(!required, flatcc_verify_error_required_field_missing);
+ return flatcc_verify_ok;
+ }
+ trace_verify("table buffer", td->buf);
+ trace_verify("table", td->table);
+ trace_verify("id", id);
+ trace_verify("vte", vte);
+
+ /*
+ * Note that we don't add td.table to k and we test against table
+ * size not table end or buffer end. Otherwise it would not be safe
+ * to optimized out the k <= k2 check for normal uoffset and voffset
+ * configurations.
+ */
+ k = vte;
+ k2 = k + size;
+ verify(k2 <= td->tsize, flatcc_verify_error_table_field_out_of_range);
+ /* This normally optimizes to nop. */
+ verify(uoffset_size > voffset_size || k <= k2, flatcc_verify_error_table_field_size_overflow);
+ trace_verify("table + vte", vte + td->table);
+ k += td->table + base;
+ trace_verify("entry: buf + table + vte", k);
+ trace_verify("align", align);
+ trace_verify("align masked entry", k & (align - 1u));
+ verify(!(k & (align - 1u)), flatcc_verify_error_table_field_not_aligned);
+ /* We assume the table size has already been verified. */
+ return flatcc_verify_ok;
+}
+
+static int get_offset_field(flatcc_table_verifier_descriptor_t *td, voffset_t id, int required, uoffset_t *out)
+{
+ uoffset_t k, k2;
+ voffset_t vte;
+
+ vte = read_vt_entry(td, id);
+ if (!vte) {
+ *out = 0;
+ if (required) {
+ return flatcc_verify_error_required_field_missing;
+ }
+ /* Missing, but not invalid. */
+ return flatcc_verify_ok;
+ }
+ /*
+ * Note that we don't add td.table to k and we test against table
+ * size not table end or buffer end. Otherwise it would not be safe
+ * to optimized out the k <= k2 check for normal uoffset and voffset
+ * configurations.
+ */
+ k = vte;
+ k2 = k + offset_size;
+ verify(k2 <= td->tsize, flatcc_verify_error_table_field_out_of_range);
+ /* This normally optimizes to nop. */
+ verify(uoffset_size > voffset_size || k <= k2, flatcc_verify_error_table_field_size_overflow);
+ k += td->table;
+ verify(!(k & (offset_size - 1u)), flatcc_verify_error_table_field_not_aligned);
+ /* We assume the table size has already been verified. */
+ *out = k;
+ return flatcc_verify_ok;
+}
+
+static inline int verify_string(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t n;
+
+ verify(check_header(end, base, offset), flatcc_verify_error_string_header_out_of_range_or_unaligned);
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ verify(end - base > n, flatcc_verify_error_string_out_of_range);
+ verify(((uint8_t *)buf + base)[n] == 0, flatcc_verify_error_string_not_zero_terminated);
+ return flatcc_verify_ok;
+}
+
+/*
+ * Keep interface somwewhat similar ot flatcc_builder_start_vector.
+ * `max_count` is a precomputed division to manage overflow check on vector length.
+ */
+static inline int verify_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset, uoffset_t elem_size, uint16_t align, uoffset_t max_count)
+{
+ uoffset_t n;
+
+ verify(check_aligned_header(end, base, offset, align), flatcc_verify_error_vector_header_out_of_range_or_unaligned);
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ /* `n * elem_size` can overflow uncontrollably otherwise. */
+ verify(n <= max_count, flatcc_verify_error_vector_count_exceeds_representable_vector_size);
+ verify(end - base >= n * elem_size, flatcc_verify_error_vector_out_of_range);
+ return flatcc_verify_ok;
+}
+
+static inline int verify_string_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t i, n;
+
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ for (i = 0; i < n; ++i, base += offset_size) {
+ check_result(verify_string(buf, end, base, read_uoffset(buf, base)));
+ }
+ return flatcc_verify_ok;
+}
+
+static inline int verify_table(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset,
+ int ttl, flatcc_table_verifier_f tvf)
+{
+ uoffset_t vbase, vend;
+ flatcc_table_verifier_descriptor_t td;
+
+ verify((td.ttl = ttl - 1), flatcc_verify_error_max_nesting_level_reached);
+ verify(check_header(end, base, offset), flatcc_verify_error_table_header_out_of_range_or_unaligned);
+ td.table = base + offset;
+ /* Read vtable offset - it is signed, but we want it unsigned, assuming 2's complement works. */
+ vbase = td.table - read_uoffset(buf, td.table);
+ verify((soffset_t)vbase >= 0 && !(vbase & (voffset_size - 1)), flatcc_verify_error_vtable_offset_out_of_range_or_unaligned);
+ verify(vbase + voffset_size <= end, flatcc_verify_error_vtable_header_out_of_range);
+ /* Read vtable size. */
+ td.vsize = read_voffset(buf, vbase);
+ vend = vbase + td.vsize;
+ verify(vend <= end && !(td.vsize & (voffset_size - 1)), flatcc_verify_error_vtable_size_out_of_range_or_unaligned);
+ /* Optimizes away overflow check if uoffset_t is large enough. */
+ verify(uoffset_size > voffset_size || vend >= vbase, flatcc_verify_error_vtable_size_overflow);
+
+ verify(td.vsize >= 2 * voffset_size, flatcc_verify_error_vtable_header_too_small);
+ /* Read table size. */
+ td.tsize = read_voffset(buf, vbase + voffset_size);
+ verify(end - td.table >= td.tsize, flatcc_verify_error_table_size_out_of_range);
+ td.vtable = (uint8_t *)buf + vbase;
+ td.buf = buf;
+ td.end = end;
+ return tvf(&td);
+}
+
+static inline int verify_table_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset, int ttl, flatcc_table_verifier_f tvf)
+{
+ uoffset_t i, n;
+
+ verify(ttl-- > 0, flatcc_verify_error_max_nesting_level_reached);
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ for (i = 0; i < n; ++i, base += offset_size) {
+ check_result(verify_table(buf, end, base, read_uoffset(buf, base), ttl, tvf));
+ }
+ return flatcc_verify_ok;
+}
+
+static inline int verify_union_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset,
+ uoffset_t count, const utype_t *types, int ttl, flatcc_union_verifier_f uvf)
+{
+ uoffset_t i, n, elem;
+ flatcc_union_verifier_descriptor_t ud;
+
+ verify(ttl-- > 0, flatcc_verify_error_max_nesting_level_reached);
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ verify(n == count, flatcc_verify_error_union_vector_length_mismatch);
+ base += offset_size;
+
+ ud.buf = buf;
+ ud.end = end;
+ ud.ttl = ttl;
+
+ for (i = 0; i < n; ++i, base += offset_size) {
+ /* Table vectors can never be null, but unions can when the type is NONE. */
+ elem = read_uoffset(buf, base);
+ if (elem == 0) {
+ verify(types[i] == 0, flatcc_verify_error_union_element_absent_without_type_NONE);
+ } else {
+ verify(types[i] != 0, flatcc_verify_error_union_element_present_with_type_NONE);
+ ud.type = types[i];
+ ud.base = base;
+ ud.offset = elem;
+ check_result(uvf(&ud));
+ }
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, size_t size, uint16_t align)
+{
+ check_result(verify_field(td, id, 0, (uoffset_t)size, align));
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_string_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_string(td->buf, td->end, base, read_uoffset(td->buf, base));
+}
+
+int flatcc_verify_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, size_t elem_size, uint16_t align, size_t max_count)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_vector(td->buf, td->end, base, read_uoffset(td->buf, base),
+ (uoffset_t)elem_size, align, (uoffset_t)max_count);
+}
+
+int flatcc_verify_string_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_string_vector(td->buf, td->end, base, read_uoffset(td->buf, base));
+}
+
+int flatcc_verify_table_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_table_verifier_f tvf)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_table(td->buf, td->end, base, read_uoffset(td->buf, base), td->ttl, tvf);
+}
+
+int flatcc_verify_table_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_table_verifier_f tvf)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_table_vector(td->buf, td->end, base, read_uoffset(td->buf, base), td->ttl, tvf);
+}
+
+int flatcc_verify_union_table(flatcc_union_verifier_descriptor_t *ud, flatcc_table_verifier_f *tvf)
+{
+ return verify_table(ud->buf, ud->end, ud->base, ud->offset, ud->ttl, tvf);
+}
+
+int flatcc_verify_union_struct(flatcc_union_verifier_descriptor_t *ud, size_t size, uint16_t align)
+{
+ return verify_struct(ud->end, ud->base, ud->offset, (uoffset_t)size, align);
+}
+
+int flatcc_verify_union_string(flatcc_union_verifier_descriptor_t *ud)
+{
+ return verify_string(ud->buf, ud->end, ud->base, ud->offset);
+}
+
+int flatcc_verify_buffer_header(const void *buf, size_t bufsiz, const char *fid)
+{
+ thash_t id, id2;
+
+ verify_runtime(!(((size_t)buf) & (offset_size - 1)), flatcc_verify_error_runtime_buffer_header_not_aligned);
+ /* -8 ensures no scalar or offset field size can overflow. */
+ verify_runtime(bufsiz <= FLATBUFFERS_UOFFSET_MAX - 8, flatcc_verify_error_runtime_buffer_size_too_large);
+ /*
+ * Even if we specify no fid, the user might later. Therefore
+ * require space for it. Not all buffer generators will take this
+ * into account, so it is possible to fail an otherwise valid buffer
+ * - but such buffers aren't safe.
+ */
+ verify(bufsiz >= offset_size + FLATBUFFERS_IDENTIFIER_SIZE, flatcc_verify_error_buffer_header_too_small);
+ if (fid != 0) {
+ id2 = read_thash_identifier(fid);
+ id = read_thash(buf, offset_size);
+ verify(id2 == 0 || id == id2, flatcc_verify_error_identifier_mismatch);
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_typed_buffer_header(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ thash_t id, id2;
+
+ verify_runtime(!(((size_t)buf) & (offset_size - 1)), flatcc_verify_error_runtime_buffer_header_not_aligned);
+ /* -8 ensures no scalar or offset field size can overflow. */
+ verify_runtime(bufsiz <= FLATBUFFERS_UOFFSET_MAX - 8, flatcc_verify_error_runtime_buffer_size_too_large);
+ /*
+ * Even if we specify no fid, the user might later. Therefore
+ * require space for it. Not all buffer generators will take this
+ * into account, so it is possible to fail an otherwise valid buffer
+ * - but such buffers aren't safe.
+ */
+ verify(bufsiz >= offset_size + FLATBUFFERS_IDENTIFIER_SIZE, flatcc_verify_error_buffer_header_too_small);
+ if (thash != 0) {
+ id2 = thash;
+ id = read_thash(buf, offset_size);
+ verify(id2 == 0 || id == id2, flatcc_verify_error_identifier_mismatch);
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_struct_as_root(const void *buf, size_t bufsiz, const char *fid, size_t size, uint16_t align)
+{
+ check_result(flatcc_verify_buffer_header(buf, bufsiz, fid));
+ return verify_struct((uoffset_t)bufsiz, 0, read_uoffset(buf, 0), (uoffset_t)size, align);
+}
+
+int flatcc_verify_struct_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash, size_t size, uint16_t align)
+{
+ check_result(flatcc_verify_typed_buffer_header(buf, bufsiz, thash));
+ return verify_struct((uoffset_t)bufsiz, 0, read_uoffset(buf, 0), (uoffset_t)size, align);
+}
+
+int flatcc_verify_table_as_root(const void *buf, size_t bufsiz, const char *fid, flatcc_table_verifier_f *tvf)
+{
+ check_result(flatcc_verify_buffer_header(buf, (uoffset_t)bufsiz, fid));
+ return verify_table(buf, (uoffset_t)bufsiz, 0, read_uoffset(buf, 0), FLATCC_VERIFIER_MAX_LEVELS, tvf);
+}
+
+int flatcc_verify_table_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash, flatcc_table_verifier_f *tvf)
+{
+ check_result(flatcc_verify_typed_buffer_header(buf, (uoffset_t)bufsiz, thash));
+ return verify_table(buf, (uoffset_t)bufsiz, 0, read_uoffset(buf, 0), FLATCC_VERIFIER_MAX_LEVELS, tvf);
+}
+
+int flatcc_verify_struct_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, const char *fid, size_t size, uint16_t align)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ check_result(flatcc_verify_vector_field(td, id, required, align, 1, FLATBUFFERS_COUNT_MAX(1)));
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ bufsiz = read_uoffset(buf, 0);
+ ++buf;
+ return flatcc_verify_struct_as_root(buf, bufsiz, fid, size, align);
+}
+
+int flatcc_verify_table_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, const char *fid,
+ uint16_t align, flatcc_table_verifier_f tvf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ check_result(flatcc_verify_vector_field(td, id, required, align, 1, FLATBUFFERS_COUNT_MAX(1)));
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ bufsiz = read_uoffset(buf, 0);
+ ++buf;
+ /*
+ * Don't verify nested buffers identifier - information is difficult to get and
+ * might not be what is desired anyway. User can do it later.
+ */
+ check_result(flatcc_verify_buffer_header(buf, bufsiz, fid));
+ return verify_table(buf, bufsiz, 0, read_uoffset(buf, 0), td->ttl, tvf);
+}
+
+int flatcc_verify_union_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_union_verifier_f uvf)
+{
+ voffset_t vte_type, vte_table;
+ const uint8_t *type;
+ uoffset_t base;
+ flatcc_union_verifier_descriptor_t ud;
+
+ if (0 == (vte_type = read_vt_entry(td, id - 1))) {
+ vte_table = read_vt_entry(td, id);
+ verify(vte_table == 0, flatcc_verify_error_union_cannot_have_a_table_without_a_type);
+ verify(!required, flatcc_verify_error_type_field_absent_from_required_union_field);
+ return flatcc_verify_ok;
+ }
+ /* No need to check required here. */
+ check_result(verify_field(td, id - 1, 0, 1, 1));
+ /* Only now is it safe to read the type. */
+ vte_table = read_vt_entry(td, id);
+ type = (const uint8_t *)td->buf + td->table + vte_type;
+ verify(*type || vte_table == 0, flatcc_verify_error_union_type_NONE_cannot_have_a_value);
+
+ if (*type == 0) {
+ return flatcc_verify_ok;
+ }
+ check_field(td, id, required, base);
+ ud.buf = td->buf;
+ ud.end = td->end;
+ ud.ttl = td->ttl;
+ ud.base = base;
+ ud.offset = read_uoffset(td->buf, base);
+ ud.type = *type;
+ return uvf(&ud);
+}
+
+int flatcc_verify_union_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_union_verifier_f uvf)
+{
+ voffset_t vte_type, vte_table;
+ const uoffset_t *buf;
+ const utype_t *types;
+ uoffset_t count, base;
+
+ if (0 == (vte_type = read_vt_entry(td, id - 1))) {
+ if (0 == (vte_table = read_vt_entry(td, id))) {
+ verify(!required, flatcc_verify_error_type_field_absent_from_required_union_vector_field);
+ }
+ }
+ check_result(flatcc_verify_vector_field(td, id - 1, required,
+ utype_size, utype_size, FLATBUFFERS_COUNT_MAX(utype_size)));
+ if (0 == (buf = get_field_ptr(td, id - 1))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ count = read_uoffset(buf, 0);
+ ++buf;
+ types = (utype_t *)buf;
+
+ check_field(td, id, required, base);
+ return verify_union_vector(td->buf, td->end, base, read_uoffset(td->buf, base),
+ count, types, td->ttl, uvf);
+}