aboutsummaryrefslogtreecommitdiff
path: root/utils/jq/patches
diff options
context:
space:
mode:
authorMarko Ratkaj <marko.ratkaj@sartura.hr>2018-11-06 16:55:47 +0100
committerMarko Ratkaj <marko.ratkaj@sartura.hr>2018-11-06 16:56:47 +0100
commit0099bad30b22fe895223393bcc60f44b30fa86f9 (patch)
tree257d3018feaa66a7f87529b06b778472612ac525 /utils/jq/patches
parent9753c21d5c73a2a53a866fd8ee706acdddf90d6d (diff)
utils: jq: bump version to 1.6
Signed-off-by: Marko Ratkaj <marko.ratkaj@sartura.hr>
Diffstat (limited to 'utils/jq/patches')
-rw-r--r--utils/jq/patches/001-stack-exhaustion.patch37
-rw-r--r--utils/jq/patches/002-heap-buffer-overflow.patch34
2 files changed, 0 insertions, 71 deletions
diff --git a/utils/jq/patches/001-stack-exhaustion.patch b/utils/jq/patches/001-stack-exhaustion.patch
deleted file mode 100644
index 8d23f6108..000000000
--- a/utils/jq/patches/001-stack-exhaustion.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 2d38a12d686a5156d4e7afb1fed7851805590582 Mon Sep 17 00:00:00 2001
-From: W-Mark Kubacki <wmark@hurrikane.de>
-Date: Fri, 19 Aug 2016 19:50:39 +0200
-Subject: [PATCH] Skip printing at MAX_DEPTH and deeper
-
-This addresses #1136, and mitigates a stack exhaustion when printing
-a very deeply nested term.
----
- src/jv_print.c | 8 +++++++-
- 1 file changed, 7 insertions(+), 1 deletion(-)
-
-diff --git a/src/jv_print.c b/src/jv_print.c
-index 5f4f234..cf6651b 100644
---- src/jv_print.c
-+++ src/jv_print.c
-@@ -13,6 +13,10 @@
- #include "jv_dtoa.h"
- #include "jv_unicode.h"
-
-+#ifndef MAX_DEPTH
-+#define MAX_DEPTH 256
-+#endif
-+
- #define ESC "\033"
- #define COL(c) (ESC "[" c "m")
- #define COLRESET (ESC "[0m")
-@@ -150,7 +154,9 @@ static void jv_dump_term(struct dtoa_context* C, jv x, int flags, int indent, FI
- }
- }
- }
-- switch (jv_get_kind(x)) {
-+ if (indent > MAX_DEPTH) {
-+ put_str("<stripped: exceeds max depth>", F, S, flags & JV_PRINT_ISATTY);
-+ } else switch (jv_get_kind(x)) {
- default:
- case JV_KIND_INVALID:
- if (flags & JV_PRINT_INVALID) {
diff --git a/utils/jq/patches/002-heap-buffer-overflow.patch b/utils/jq/patches/002-heap-buffer-overflow.patch
deleted file mode 100644
index ecfbd71c9..000000000
--- a/utils/jq/patches/002-heap-buffer-overflow.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From 8eb1367ca44e772963e704a700ef72ae2e12babd Mon Sep 17 00:00:00 2001
-From: Nicolas Williams <nico@cryptonector.com>
-Date: Sat, 24 Oct 2015 17:24:57 -0500
-Subject: [PATCH] Heap buffer overflow in tokenadd() (fix #105)
-
-This was an off-by one: the NUL terminator byte was not allocated on
-resize. This was triggered by JSON-encoded numbers longer than 256
-bytes.
----
- src/jv_parse.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/src/jv_parse.c b/src/jv_parse.c
-index 3102ed4..84245b8 100644
---- src/jv_parse.c
-+++ src/jv_parse.c
-@@ -383,7 +383,7 @@ static pfunc stream_token(struct jv_parser* p, char ch) {
-
- static void tokenadd(struct jv_parser* p, char c) {
- assert(p->tokenpos <= p->tokenlen);
-- if (p->tokenpos == p->tokenlen) {
-+ if (p->tokenpos >= (p->tokenlen - 1)) {
- p->tokenlen = p->tokenlen*2 + 256;
- p->tokenbuf = jv_mem_realloc(p->tokenbuf, p->tokenlen);
- }
-@@ -485,7 +485,7 @@ static pfunc check_literal(struct jv_parser* p) {
- TRY(value(p, v));
- } else {
- // FIXME: better parser
-- p->tokenbuf[p->tokenpos] = 0; // FIXME: invalid
-+ p->tokenbuf[p->tokenpos] = 0;
- char* end = 0;
- double d = jvp_strtod(&p->dtoa, p->tokenbuf, &end);
- if (end == 0 || *end != 0)