diff options
author | mark andrews <mandrews@liveaction.com> | 2022-09-11 11:51:06 -0700 |
---|---|---|
committer | Toni <matzeton@googlemail.com> | 2022-09-21 18:03:22 +0200 |
commit | 5d5b46e5140d046e68e95be02ecb421d330b8b32 (patch) | |
tree | beccb90de34a6e601c9bdf5eff2c4dd1444a4021 /src | |
parent | 03d217eae60c45288131676b736a9d7ece1baca2 (diff) |
Add proj and sln for compile of dynamic x64 lib under Visual Studio 2019.
* add CI support via MSBuild
Signed-off-by: Toni Uhlig <matzeton@googlemail.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/lib/ndpi_utils.c | 2 | ||||
-rw-r--r-- | src/lib/third_party/src/gcrypt/digest.c | 2 | ||||
-rw-r--r-- | src/lib/third_party/src/roaring.cc | 30 |
3 files changed, 17 insertions, 17 deletions
diff --git a/src/lib/ndpi_utils.c b/src/lib/ndpi_utils.c index d7de4e713..2dc39dd3c 100644 --- a/src/lib/ndpi_utils.c +++ b/src/lib/ndpi_utils.c @@ -1538,7 +1538,7 @@ int ndpi_flow2json(struct ndpi_detection_module_struct *ndpi_struct, u_int16_t src_port, u_int16_t dst_port, ndpi_protocol l7_protocol, ndpi_serializer *serializer) { - char src_name[32] = {}, dst_name[32] = {}; + char src_name[32] = {'\0'}, dst_name[32] = {'\0'}; if(ip_version == 4) { inet_ntop(AF_INET, &src_v4, src_name, sizeof(src_name)); diff --git a/src/lib/third_party/src/gcrypt/digest.c b/src/lib/third_party/src/gcrypt/digest.c index e68d905cf..48e6eea97 100644 --- a/src/lib/third_party/src/gcrypt/digest.c +++ b/src/lib/third_party/src/gcrypt/digest.c @@ -130,7 +130,7 @@ sha256_init(sha256_t *p) static void sha256_transform(uint32_t *state, const uint32_t *data) { - uint32_t W[16] = {}; + uint32_t W[16] = {0}; unsigned j; #ifdef _SHA256_UNROLL2 uint32_t a,b,c,d,e,f,g,h; diff --git a/src/lib/third_party/src/roaring.cc b/src/lib/third_party/src/roaring.cc index a2668fcda..36b96bed3 100644 --- a/src/lib/third_party/src/roaring.cc +++ b/src/lib/third_party/src/roaring.cc @@ -11983,7 +11983,7 @@ static void binarySearch2(const uint16_t *array, int32_t n, uint16_t target1, * and binarySearch2. This approach can be slightly superior to a conventional * galloping search in some instances. */ -static int32_t intersect_skewed_uint16(const uint16_t *small, size_t size_s, +static int32_t intersect_skewed_uint16(const uint16_t *small_set, size_t size_s, const uint16_t *large, size_t size_l, uint16_t *buffer) { size_t pos = 0, idx_l = 0, idx_s = 0; @@ -11993,10 +11993,10 @@ static int32_t intersect_skewed_uint16(const uint16_t *small, size_t size_s, } int32_t index1 = 0, index2 = 0, index3 = 0, index4 = 0; while ((idx_s + 4 <= size_s) && (idx_l < size_l)) { - uint16_t target1 = small[idx_s]; - uint16_t target2 = small[idx_s + 1]; - uint16_t target3 = small[idx_s + 2]; - uint16_t target4 = small[idx_s + 3]; + uint16_t target1 = small_set[idx_s]; + uint16_t target2 = small_set[idx_s + 1]; + uint16_t target3 = small_set[idx_s + 2]; + uint16_t target4 = small_set[idx_s + 3]; binarySearch4(large + idx_l, (int32_t)(size_l - idx_l), target1, target2, target3, target4, &index1, &index2, &index3, &index4); if ((index1 + idx_l < size_l) && (large[idx_l + index1] == target1)) { @@ -12015,8 +12015,8 @@ static int32_t intersect_skewed_uint16(const uint16_t *small, size_t size_s, idx_l += index4; } if ((idx_s + 2 <= size_s) && (idx_l < size_l)) { - uint16_t target1 = small[idx_s]; - uint16_t target2 = small[idx_s + 1]; + uint16_t target1 = small_set[idx_s]; + uint16_t target2 = small_set[idx_s + 1]; binarySearch2(large + idx_l, (int32_t)(size_l - idx_l), target1, target2, &index1, &index2); if ((index1 + idx_l < size_l) && (large[idx_l + index1] == target1)) { @@ -12029,7 +12029,7 @@ static int32_t intersect_skewed_uint16(const uint16_t *small, size_t size_s, idx_l += index2; } if ((idx_s < size_s) && (idx_l < size_l)) { - uint16_t val_s = small[idx_s]; + uint16_t val_s = small_set[idx_s]; int32_t index = binarySearch(large + idx_l, (int32_t)(size_l - idx_l), val_s); if (index >= 0) buffer[pos++] = val_s; @@ -12040,7 +12040,7 @@ static int32_t intersect_skewed_uint16(const uint16_t *small, size_t size_s, // TODO: this could be accelerated, possibly, by using binarySearch4 as above. -static int32_t intersect_skewed_uint16_cardinality(const uint16_t *small, +static int32_t intersect_skewed_uint16_cardinality(const uint16_t *small_set, size_t size_s, const uint16_t *large, size_t size_l) { @@ -12050,7 +12050,7 @@ static int32_t intersect_skewed_uint16_cardinality(const uint16_t *small, return 0; } - uint16_t val_l = large[idx_l], val_s = small[idx_s]; + uint16_t val_l = large[idx_l], val_s = small_set[idx_s]; while (true) { if (val_l < val_s) { @@ -12060,12 +12060,12 @@ static int32_t intersect_skewed_uint16_cardinality(const uint16_t *small, } else if (val_s < val_l) { idx_s++; if (idx_s == size_s) break; - val_s = small[idx_s]; + val_s = small_set[idx_s]; } else { pos++; idx_s++; if (idx_s == size_s) break; - val_s = small[idx_s]; + val_s = small_set[idx_s]; idx_l = advanceUntil(large, (int32_t)idx_l, (int32_t)size_l, val_s); if (idx_l == size_l) break; val_l = large[idx_l]; @@ -12075,7 +12075,7 @@ static int32_t intersect_skewed_uint16_cardinality(const uint16_t *small, return (int32_t)pos; } -bool intersect_skewed_uint16_nonempty(const uint16_t *small, size_t size_s, +bool intersect_skewed_uint16_nonempty(const uint16_t *small_set, size_t size_s, const uint16_t *large, size_t size_l) { size_t idx_l = 0, idx_s = 0; @@ -12083,7 +12083,7 @@ bool intersect_skewed_uint16_nonempty(const uint16_t *small, size_t size_s, return false; } - uint16_t val_l = large[idx_l], val_s = small[idx_s]; + uint16_t val_l = large[idx_l], val_s = small_set[idx_s]; while (true) { if (val_l < val_s) { @@ -12093,7 +12093,7 @@ bool intersect_skewed_uint16_nonempty(const uint16_t *small, size_t size_s, } else if (val_s < val_l) { idx_s++; if (idx_s == size_s) break; - val_s = small[idx_s]; + val_s = small_set[idx_s]; } else { return true; } |