aboutsummaryrefslogtreecommitdiff
path: root/src/lib/third_party/src
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/third_party/src')
-rw-r--r--src/lib/third_party/src/gcrypt/aes.c508
-rw-r--r--src/lib/third_party/src/gcrypt/aesni.c461
-rw-r--r--src/lib/third_party/src/gcrypt/cipher.c591
-rw-r--r--src/lib/third_party/src/gcrypt/cipher_wrap.c252
-rw-r--r--src/lib/third_party/src/gcrypt/digest.c321
-rw-r--r--src/lib/third_party/src/gcrypt/gcm.c667
-rw-r--r--src/lib/third_party/src/gcrypt_light.c375
7 files changed, 3175 insertions, 0 deletions
diff --git a/src/lib/third_party/src/gcrypt/aes.c b/src/lib/third_party/src/gcrypt/aes.c
new file mode 100644
index 000000000..61dc2137a
--- /dev/null
+++ b/src/lib/third_party/src/gcrypt/aes.c
@@ -0,0 +1,508 @@
+/*
+ * FIPS-197 compliant AES implementation
+ *
+ * Copyright The Mbed TLS Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * The AES block cipher was designed by Vincent Rijmen and Joan Daemen.
+ *
+ * http://csrc.nist.gov/encryption/aes/rijndael/Rijndael.pdf
+ * http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
+ */
+
+
+/* Parameter validation macros based on platform_util.h */
+#define AES_VALIDATE_RET( cond ) \
+ MBEDTLS_INTERNAL_VALIDATE_RET( cond, MBEDTLS_ERR_AES_BAD_INPUT_DATA )
+#define AES_VALIDATE( cond ) MBEDTLS_INTERNAL_VALIDATE( cond )
+
+/*
+ * Forward S-box & tables
+ */
+static unsigned char FSb[256];
+static uint32_t FT0[256];
+static uint32_t FT1[256];
+static uint32_t FT2[256];
+static uint32_t FT3[256];
+
+/*
+ * Reverse S-box & tables
+ */
+static unsigned char RSb[256];
+static uint32_t RT0[256];
+static uint32_t RT1[256];
+static uint32_t RT2[256];
+static uint32_t RT3[256];
+
+/*
+ * Round constants
+ */
+static uint32_t RCON[10];
+
+/*
+ * Tables generation code
+ */
+#define XTIME(x) ( ( (x) << 1 ) ^ ( ( (x) & 0x80 ) ? 0x1B : 0x00 ) )
+#define MUL(x,y) ( ( (x) && (y) ) ? pow[(log[(x)]+log[(y)]) % 255] : 0 )
+
+static int aes_init_done = 0;
+int aes_aesni_has_support = 0;
+
+static void aes_gen_tables( void )
+{
+ int i, x, y, z;
+ int pow[256];
+ int log[256];
+
+#if defined(MBEDTLS_AESNI_C) && defined(MBEDTLS_HAVE_X86_64)
+ if( mbedtls_aesni_has_support( MBEDTLS_AESNI_AES ) )
+ aes_aesni_has_support = 1;
+#endif
+
+ /*
+ * compute pow and log tables over GF(2^8)
+ */
+ for( i = 0, x = 1; i < 256; i++ )
+ {
+ pow[i] = x;
+ log[x] = i;
+ x = MBEDTLS_BYTE_0( x ^ XTIME( x ) );
+ }
+
+ /*
+ * calculate the round constants
+ */
+ for( i = 0, x = 1; i < 10; i++ )
+ {
+ RCON[i] = (uint32_t) x;
+ x = MBEDTLS_BYTE_0( XTIME( x ) );
+ }
+
+ /*
+ * generate the forward and reverse S-boxes
+ */
+ FSb[0x00] = 0x63;
+ RSb[0x63] = 0x00;
+
+ for( i = 1; i < 256; i++ )
+ {
+ x = pow[255 - log[i]];
+
+ y = x; y = MBEDTLS_BYTE_0( ( y << 1 ) | ( y >> 7 ) );
+ x ^= y; y = MBEDTLS_BYTE_0( ( y << 1 ) | ( y >> 7 ) );
+ x ^= y; y = MBEDTLS_BYTE_0( ( y << 1 ) | ( y >> 7 ) );
+ x ^= y; y = MBEDTLS_BYTE_0( ( y << 1 ) | ( y >> 7 ) );
+ x ^= y ^ 0x63;
+
+ FSb[i] = (unsigned char) x;
+ RSb[x] = (unsigned char) i;
+ }
+
+ /*
+ * generate the forward and reverse tables
+ */
+ for( i = 0; i < 256; i++ )
+ {
+ x = FSb[i];
+ y = MBEDTLS_BYTE_0( XTIME( x ) );
+ z = MBEDTLS_BYTE_0( y ^ x );
+
+ FT0[i] = ( (uint32_t) y ) ^
+ ( (uint32_t) x << 8 ) ^
+ ( (uint32_t) x << 16 ) ^
+ ( (uint32_t) z << 24 );
+
+ FT1[i] = ROTL8( FT0[i] );
+ FT2[i] = ROTL8( FT1[i] );
+ FT3[i] = ROTL8( FT2[i] );
+
+ x = RSb[i];
+
+ RT0[i] = ( (uint32_t) MUL( 0x0E, x ) ) ^
+ ( (uint32_t) MUL( 0x09, x ) << 8 ) ^
+ ( (uint32_t) MUL( 0x0D, x ) << 16 ) ^
+ ( (uint32_t) MUL( 0x0B, x ) << 24 );
+
+ RT1[i] = ROTL8( RT0[i] );
+ RT2[i] = ROTL8( RT1[i] );
+ RT3[i] = ROTL8( RT2[i] );
+ }
+}
+
+#define AES_RT0(idx) RT0[idx]
+#define AES_RT1(idx) RT1[idx]
+#define AES_RT2(idx) RT2[idx]
+#define AES_RT3(idx) RT3[idx]
+
+#define AES_FT0(idx) FT0[idx]
+#define AES_FT1(idx) FT1[idx]
+#define AES_FT2(idx) FT2[idx]
+#define AES_FT3(idx) FT3[idx]
+
+void mbedtls_aes_init( mbedtls_aes_context *ctx )
+{
+ AES_VALIDATE( ctx != NULL );
+
+ memset( ctx, 0, sizeof( mbedtls_aes_context ) );
+}
+
+void mbedtls_aes_free( mbedtls_aes_context *ctx )
+{
+ if( ctx == NULL )
+ return;
+
+ // mbedtls_platform_zeroize( ctx, sizeof( mbedtls_aes_context ) );
+}
+
+
+/*
+ * AES key schedule (encryption)
+ */
+int mbedtls_aes_setkey_enc( mbedtls_aes_context *ctx, const unsigned char *key,
+ unsigned int keybits )
+{
+ unsigned int i;
+ uint32_t *RK;
+
+ AES_VALIDATE_RET( ctx != NULL );
+ AES_VALIDATE_RET( key != NULL );
+
+ switch( keybits )
+ {
+ case 128: ctx->nr = 10; break;
+ case 192: ctx->nr = 12; break;
+ case 256: ctx->nr = 14; break;
+ default : return( MBEDTLS_ERR_AES_INVALID_KEY_LENGTH );
+ }
+
+ if( aes_init_done == 0 )
+ {
+ aes_gen_tables();
+ aes_init_done = 1;
+ }
+
+ ctx->rk = RK = ctx->buf;
+
+#if defined(MBEDTLS_AESNI_C) && defined(MBEDTLS_HAVE_X86_64)
+ if( aes_aesni_has_support )
+ return( mbedtls_aesni_setkey_enc( (unsigned char *) ctx->rk, key, keybits ) );
+#endif
+
+ for( i = 0; i < ( keybits >> 5 ); i++ )
+ {
+ RK[i] = MBEDTLS_GET_UINT32_LE( key, i << 2 );
+ }
+
+ switch( ctx->nr )
+ {
+ case 10:
+
+ for( i = 0; i < 10; i++, RK += 4 )
+ {
+ RK[4] = RK[0] ^ RCON[i] ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_1( RK[3] ) ] ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_2( RK[3] ) ] << 8 ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_3( RK[3] ) ] << 16 ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_0( RK[3] ) ] << 24 );
+
+ RK[5] = RK[1] ^ RK[4];
+ RK[6] = RK[2] ^ RK[5];
+ RK[7] = RK[3] ^ RK[6];
+ }
+ break;
+
+ case 12:
+
+ for( i = 0; i < 8; i++, RK += 6 )
+ {
+ RK[6] = RK[0] ^ RCON[i] ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_1( RK[5] ) ] ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_2( RK[5] ) ] << 8 ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_3( RK[5] ) ] << 16 ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_0( RK[5] ) ] << 24 );
+
+ RK[7] = RK[1] ^ RK[6];
+ RK[8] = RK[2] ^ RK[7];
+ RK[9] = RK[3] ^ RK[8];
+ RK[10] = RK[4] ^ RK[9];
+ RK[11] = RK[5] ^ RK[10];
+ }
+ break;
+
+ case 14:
+
+ for( i = 0; i < 7; i++, RK += 8 )
+ {
+ RK[8] = RK[0] ^ RCON[i] ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_1( RK[7] ) ] ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_2( RK[7] ) ] << 8 ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_3( RK[7] ) ] << 16 ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_0( RK[7] ) ] << 24 );
+
+ RK[9] = RK[1] ^ RK[8];
+ RK[10] = RK[2] ^ RK[9];
+ RK[11] = RK[3] ^ RK[10];
+
+ RK[12] = RK[4] ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_0( RK[11] ) ] ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_1( RK[11] ) ] << 8 ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_2( RK[11] ) ] << 16 ) ^
+ ( (uint32_t) FSb[ MBEDTLS_BYTE_3( RK[11] ) ] << 24 );
+
+ RK[13] = RK[5] ^ RK[12];
+ RK[14] = RK[6] ^ RK[13];
+ RK[15] = RK[7] ^ RK[14];
+ }
+ break;
+ }
+
+ return( 0 );
+}
+
+/*
+ * AES key schedule (decryption)
+ */
+int mbedtls_aes_setkey_dec( mbedtls_aes_context *ctx, const unsigned char *key,
+ unsigned int keybits )
+{
+ int i, j, ret;
+ mbedtls_aes_context cty;
+ uint32_t *RK;
+ uint32_t *SK;
+
+ AES_VALIDATE_RET( ctx != NULL );
+ AES_VALIDATE_RET( key != NULL );
+
+ mbedtls_aes_init( &cty );
+
+ ctx->rk = RK = ctx->buf;
+
+ /* Also checks keybits */
+ if( ( ret = mbedtls_aes_setkey_enc( &cty, key, keybits ) ) != 0 )
+ goto exit;
+
+ ctx->nr = cty.nr;
+
+#if defined(MBEDTLS_AESNI_C) && defined(MBEDTLS_HAVE_X86_64)
+ if( aes_aesni_has_support ) {
+ mbedtls_aesni_inverse_key( (unsigned char *) ctx->rk,
+ (const unsigned char *) cty.rk, ctx->nr );
+ goto exit;
+ }
+#endif
+
+ SK = cty.rk + cty.nr * 4;
+
+ *RK++ = *SK++;
+ *RK++ = *SK++;
+ *RK++ = *SK++;
+ *RK++ = *SK++;
+
+ for( i = ctx->nr - 1, SK -= 8; i > 0; i--, SK -= 8 )
+ {
+ for( j = 0; j < 4; j++, SK++ )
+ {
+ *RK++ = AES_RT0( FSb[ MBEDTLS_BYTE_0( *SK ) ] ) ^
+ AES_RT1( FSb[ MBEDTLS_BYTE_1( *SK ) ] ) ^
+ AES_RT2( FSb[ MBEDTLS_BYTE_2( *SK ) ] ) ^
+ AES_RT3( FSb[ MBEDTLS_BYTE_3( *SK ) ] );
+ }
+ }
+
+ *RK++ = *SK++;
+ *RK++ = *SK++;
+ *RK++ = *SK++;
+ *RK++ = *SK++;
+
+exit:
+ mbedtls_aes_free( &cty );
+
+ return( ret );
+}
+
+#define AES_FROUND(X0, X1, X2, X3, Y0, Y1, Y2, Y3) \
+ { uint32_t T; \
+ X0 = *RK++; X1 = *RK++; X2 = *RK++; X3 = *RK++; \
+ T=Y0; \
+ X0 ^= FT0[ ( T ) & 0xFF ]; T >>= 8; \
+ X3 ^= FT1[ ( T ) & 0xFF ]; T >>= 8; \
+ X2 ^= FT2[ ( T ) & 0xFF ]; T >>= 8; \
+ X1 ^= FT3[ ( T ) & 0xFF ]; \
+ T=Y1; \
+ X1 ^= FT0[ ( T ) & 0xFF ]; T >>= 8; \
+ X0 ^= FT1[ ( T ) & 0xFF ]; T >>= 8; \
+ X3 ^= FT2[ ( T ) & 0xFF ]; T >>= 8; \
+ X2 ^= FT3[ ( T ) & 0xFF ]; \
+ T=Y2; \
+ X2 ^= FT0[ ( T ) & 0xFF ]; T >>= 8; \
+ X1 ^= FT1[ ( T ) & 0xFF ]; T >>= 8; \
+ X0 ^= FT2[ ( T ) & 0xFF ]; T >>= 8; \
+ X3 ^= FT3[ ( T ) & 0xFF ]; \
+ T=Y3; \
+ X3 ^= FT0[ ( T ) & 0xFF ]; T >>= 8; \
+ X2 ^= FT1[ ( T ) & 0xFF ]; T >>= 8; \
+ X1 ^= FT2[ ( T ) & 0xFF ]; T >>= 8; \
+ X0 ^= FT3[ ( T ) & 0xFF ]; \
+ }
+
+#define AES_RROUND(X0,X1,X2,X3,Y0,Y1,Y2,Y3) \
+{ uint32_t T; \
+ X0 = *RK++; X1 = *RK++; X2 = *RK++; X3 = *RK++; \
+ T=Y0; \
+ X0 ^= RT0[ ( T ) & 0xFF ]; T >>= 8; \
+ X1 ^= RT1[ ( T ) & 0xFF ]; T >>= 8; \
+ X2 ^= RT2[ ( T ) & 0xFF ]; T >>= 8; \
+ X3 ^= RT3[ ( T ) & 0xFF ]; \
+ T=Y1; \
+ X1 ^= RT0[ ( T ) & 0xFF ]; T >>= 8; \
+ X2 ^= RT1[ ( T ) & 0xFF ]; T >>= 8; \
+ X3 ^= RT2[ ( T ) & 0xFF ]; T >>= 8; \
+ X0 ^= RT3[ ( T ) & 0xFF ]; \
+ T=Y2; \
+ X2 ^= RT0[ ( T ) & 0xFF ]; T >>= 8; \
+ X3 ^= RT1[ ( T ) & 0xFF ]; T >>= 8; \
+ X0 ^= RT2[ ( T ) & 0xFF ]; T >>= 8; \
+ X2 ^= RT3[ ( T ) & 0xFF ]; \
+ T=Y3; \
+ X3 ^= RT0[ ( T ) & 0xFF ]; T >>= 8; \
+ X0 ^= RT1[ ( T ) & 0xFF ]; T >>= 8; \
+ X1 ^= RT2[ ( T ) & 0xFF ]; T >>= 8; \
+ X2 ^= RT3[ ( T ) & 0xFF ]; \
+}
+
+/*
+ * AES-ECB block encryption
+ */
+int mbedtls_internal_aes_encrypt( mbedtls_aes_context *ctx,
+ const unsigned char input[16],
+ unsigned char output[16] )
+{
+ int i;
+ uint32_t T0,*RK = ctx->rk;
+ struct
+ {
+ uint32_t X[4];
+ uint32_t Y[4];
+ } t;
+
+ t.X[0] = MBEDTLS_GET_UINT32_LE( input, 0 ); t.X[0] ^= *RK++;
+ t.X[1] = MBEDTLS_GET_UINT32_LE( input, 4 ); t.X[1] ^= *RK++;
+ t.X[2] = MBEDTLS_GET_UINT32_LE( input, 8 ); t.X[2] ^= *RK++;
+ t.X[3] = MBEDTLS_GET_UINT32_LE( input, 12 ); t.X[3] ^= *RK++;
+
+ for( i = ( ctx->nr >> 1 ) - 1; i > 0; i-- )
+ {
+ AES_FROUND( t.Y[0], t.Y[1], t.Y[2], t.Y[3], t.X[0], t.X[1], t.X[2], t.X[3] );
+ AES_FROUND( t.X[0], t.X[1], t.X[2], t.X[3], t.Y[0], t.Y[1], t.Y[2], t.Y[3] );
+ }
+
+ AES_FROUND( t.Y[0], t.Y[1], t.Y[2], t.Y[3], t.X[0], t.X[1], t.X[2], t.X[3] );
+#define AES_XROUND(X,Y0,Y1,Y2,Y3) \
+ T0 = FSb[ ( Y3 >> 24 ) & 0xFF ]; T0 <<= 8; \
+ T0 |= FSb[ ( Y2 >> 16 ) & 0xFF ]; T0 <<= 8; \
+ T0 |= FSb[ ( Y1 >> 8 ) & 0xFF ]; T0 <<= 8;\
+ T0 |= FSb[ ( Y0 ) & 0xFF ]; \
+ X = *RK++ ^ T0
+
+ AES_XROUND(t.X[0],t.Y[0],t.Y[1],t.Y[2],t.Y[3]);
+ AES_XROUND(t.X[1],t.Y[1],t.Y[2],t.Y[3],t.Y[0]);
+ AES_XROUND(t.X[2],t.Y[2],t.Y[3],t.Y[0],t.Y[1]);
+ AES_XROUND(t.X[3],t.Y[3],t.Y[0],t.Y[1],t.Y[2]);
+#undef AES_XROUND
+
+ MBEDTLS_PUT_UINT32_LE( t.X[0], output, 0 );
+ MBEDTLS_PUT_UINT32_LE( t.X[1], output, 4 );
+ MBEDTLS_PUT_UINT32_LE( t.X[2], output, 8 );
+ MBEDTLS_PUT_UINT32_LE( t.X[3], output, 12 );
+
+ return( 0 );
+}
+
+/*
+ * AES-ECB block decryption
+ */
+int mbedtls_internal_aes_decrypt( mbedtls_aes_context *ctx,
+ const unsigned char input[16],
+ unsigned char output[16] )
+{
+ int i;
+ uint32_t T0,*RK = ctx->rk;
+ struct
+ {
+ uint32_t X[4];
+ uint32_t Y[4];
+ } t;
+
+ t.X[0] = MBEDTLS_GET_UINT32_LE( input, 0 ); t.X[0] ^= *RK++;
+ t.X[1] = MBEDTLS_GET_UINT32_LE( input, 4 ); t.X[1] ^= *RK++;
+ t.X[2] = MBEDTLS_GET_UINT32_LE( input, 8 ); t.X[2] ^= *RK++;
+ t.X[3] = MBEDTLS_GET_UINT32_LE( input, 12 ); t.X[3] ^= *RK++;
+
+ for( i = ( ctx->nr >> 1 ) - 1; i > 0; i-- )
+ {
+ AES_RROUND( t.Y[0], t.Y[1], t.Y[2], t.Y[3], t.X[0], t.X[1], t.X[2], t.X[3] );
+ AES_RROUND( t.X[0], t.X[1], t.X[2], t.X[3], t.Y[0], t.Y[1], t.Y[2], t.Y[3] );
+ }
+
+ AES_RROUND( t.Y[0], t.Y[1], t.Y[2], t.Y[3], t.X[0], t.X[1], t.X[2], t.X[3] );
+#define AES_XROUNDD(X,Y0,Y1,Y2,Y3) \
+ T0 = RSb[ ( Y3 >> 24 ) & 0xFF ]; T0 <<= 8; \
+ T0 |= RSb[ ( Y2 >> 16 ) & 0xFF ]; T0 <<= 8; \
+ T0 |= RSb[ ( Y1 >> 8 ) & 0xFF ]; T0 <<= 8;\
+ T0 |= RSb[ ( Y0 ) & 0xFF ]; \
+ X = *RK++ ^ T0
+
+ AES_XROUNDD(t.X[0],t.Y[0],t.Y[3],t.Y[2],t.Y[1]);
+ AES_XROUNDD(t.X[1],t.Y[1],t.Y[0],t.Y[3],t.Y[2]);
+ AES_XROUNDD(t.X[2],t.Y[2],t.Y[1],t.Y[0],t.Y[3]);
+ AES_XROUNDD(t.X[3],t.Y[3],t.Y[2],t.Y[1],t.Y[0]);
+
+#undef AES_XROUNDD
+
+ MBEDTLS_PUT_UINT32_LE( t.X[0], output, 0 );
+ MBEDTLS_PUT_UINT32_LE( t.X[1], output, 4 );
+ MBEDTLS_PUT_UINT32_LE( t.X[2], output, 8 );
+ MBEDTLS_PUT_UINT32_LE( t.X[3], output, 12 );
+
+ return( 0 );
+}
+
+/*
+ * AES-ECB block encryption/decryption
+ */
+int mbedtls_aes_crypt_ecb( mbedtls_aes_context *ctx,
+ int mode,
+ const unsigned char input[16],
+ unsigned char output[16] )
+{
+ AES_VALIDATE_RET( ctx != NULL );
+ AES_VALIDATE_RET( input != NULL );
+ AES_VALIDATE_RET( output != NULL );
+ AES_VALIDATE_RET( mode == MBEDTLS_AES_ENCRYPT ||
+ mode == MBEDTLS_AES_DECRYPT );
+
+#if defined(MBEDTLS_AESNI_C) && defined(MBEDTLS_HAVE_X86_64)
+ if( aes_aesni_has_support )
+ return( mbedtls_aesni_crypt_ecb( ctx, mode, input, output ) );
+#endif
+
+
+ if( mode == MBEDTLS_AES_ENCRYPT )
+ return( mbedtls_internal_aes_encrypt( ctx, input, output ) );
+ else
+ return( mbedtls_internal_aes_decrypt( ctx, input, output ) );
+}
+
diff --git a/src/lib/third_party/src/gcrypt/aesni.c b/src/lib/third_party/src/gcrypt/aesni.c
new file mode 100644
index 000000000..76edeea4e
--- /dev/null
+++ b/src/lib/third_party/src/gcrypt/aesni.c
@@ -0,0 +1,461 @@
+/*
+ * AES-NI support functions
+ *
+ * Copyright The Mbed TLS Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * [AES-WP] http://software.intel.com/en-us/articles/intel-advanced-encryption-standard-aes-instructions-set
+ * [CLMUL-WP] http://software.intel.com/en-us/articles/intel-carry-less-multiplication-instruction-and-its-usage-for-computing-the-gcm-mode/
+ */
+
+
+#if defined(__has_feature)
+#if __has_feature(memory_sanitizer)
+#warning "MBEDTLS_AESNI_C is known to cause spurious error reports with some memory sanitizers as they do not understand the assembly code."
+#endif
+#endif
+
+#ifndef asm
+#define asm __asm
+#endif
+
+#if defined(MBEDTLS_HAVE_X86_64)
+
+/*
+ * AES-NI support detection routine
+ */
+int mbedtls_aesni_has_support( unsigned int what )
+{
+ static int done = 0;
+ static unsigned int c = 0;
+
+#if defined(__has_feature)
+# if __has_feature(memory_sanitizer)
+ return 0;
+# endif
+#endif
+
+ if( ! done )
+ {
+ asm( "movl $1, %%eax \n\t"
+ "cpuid \n\t"
+ : "=c" (c)
+ :
+ : "eax", "ebx", "edx" );
+ done = 1;
+ }
+
+ return( ( (volatile unsigned int)c & what ) != 0 );
+}
+
+/*
+ * Binutils needs to be at least 2.19 to support AES-NI instructions.
+ * Unfortunately, a lot of users have a lower version now (2014-04).
+ * Emit bytecode directly in order to support "old" version of gas.
+ *
+ * Opcodes from the Intel architecture reference manual, vol. 3.
+ * We always use registers, so we don't need prefixes for memory operands.
+ * Operand macros are in gas order (src, dst) as opposed to Intel order
+ * (dst, src) in order to blend better into the surrounding assembly code.
+ */
+#define AESDEC ".byte 0x66,0x0F,0x38,0xDE,"
+#define AESDECLAST ".byte 0x66,0x0F,0x38,0xDF,"
+#define AESENC ".byte 0x66,0x0F,0x38,0xDC,"
+#define AESENCLAST ".byte 0x66,0x0F,0x38,0xDD,"
+#define AESIMC ".byte 0x66,0x0F,0x38,0xDB,"
+#define AESKEYGENA ".byte 0x66,0x0F,0x3A,0xDF,"
+#define PCLMULQDQ ".byte 0x66,0x0F,0x3A,0x44,"
+
+#define xmm0_xmm0 "0xC0"
+#define xmm0_xmm1 "0xC8"
+#define xmm0_xmm2 "0xD0"
+#define xmm0_xmm3 "0xD8"
+#define xmm0_xmm4 "0xE0"
+#define xmm1_xmm0 "0xC1"
+#define xmm1_xmm2 "0xD1"
+
+/*
+ * AES-NI AES-ECB block en(de)cryption
+ */
+int mbedtls_aesni_crypt_ecb( mbedtls_aes_context *ctx,
+ int mode,
+ const unsigned char input[16],
+ unsigned char output[16] )
+{
+ asm( "movdqu (%3), %%xmm0 \n\t" // load input
+ "movdqu (%1), %%xmm1 \n\t" // load round key 0
+ "pxor %%xmm1, %%xmm0 \n\t" // round 0
+ "add $16, %1 \n\t" // point to next round key
+ "subl $1, %0 \n\t" // normal rounds = nr - 1
+ "test %2, %2 \n\t" // mode?
+ "jz 2f \n\t" // 0 = decrypt
+
+ "1: \n\t" // encryption loop
+ "movdqu (%1), %%xmm1 \n\t" // load round key
+ AESENC xmm1_xmm0 "\n\t" // do round
+ "add $16, %1 \n\t" // point to next round key
+ "subl $1, %0 \n\t" // loop
+ "jnz 1b \n\t"
+ "movdqu (%1), %%xmm1 \n\t" // load round key
+ AESENCLAST xmm1_xmm0 "\n\t" // last round
+ "jmp 3f \n\t"
+
+ "2: \n\t" // decryption loop
+ "movdqu (%1), %%xmm1 \n\t"
+ AESDEC xmm1_xmm0 "\n\t" // do round
+ "add $16, %1 \n\t"
+ "subl $1, %0 \n\t"
+ "jnz 2b \n\t"
+ "movdqu (%1), %%xmm1 \n\t" // load round key
+ AESDECLAST xmm1_xmm0 "\n\t" // last round
+
+ "3: \n\t"
+ "movdqu %%xmm0, (%4) \n\t" // export output
+ :
+ : "r" (ctx->nr), "r" (ctx->rk), "r" (mode), "r" (input), "r" (output)
+ : "memory", "cc", "xmm0", "xmm1" );
+
+
+ return( 0 );
+}
+
+/*
+ * GCM multiplication: c = a times b in GF(2^128)
+ * Based on [CLMUL-WP] algorithms 1 (with equation 27) and 5.
+ */
+void mbedtls_aesni_gcm_mult( unsigned char c[16],
+ const unsigned char a[16],
+ const unsigned char b[16] )
+{
+ unsigned char aa[16], bb[16], cc[16];
+ size_t i;
+
+ /* The inputs are in big-endian order, so byte-reverse them */
+ for( i = 0; i < 16; i++ )
+ {
+ aa[i] = a[15 - i];
+ bb[i] = b[15 - i];
+ }
+
+ asm( "movdqu (%0), %%xmm0 \n\t" // a1:a0
+ "movdqu (%1), %%xmm1 \n\t" // b1:b0
+
+ /*
+ * Caryless multiplication xmm2:xmm1 = xmm0 * xmm1
+ * using [CLMUL-WP] algorithm 1 (p. 13).
+ */
+ "movdqa %%xmm1, %%xmm2 \n\t" // copy of b1:b0
+ "movdqa %%xmm1, %%xmm3 \n\t" // same
+ "movdqa %%xmm1, %%xmm4 \n\t" // same
+ PCLMULQDQ xmm0_xmm1 ",0x00 \n\t" // a0*b0 = c1:c0
+ PCLMULQDQ xmm0_xmm2 ",0x11 \n\t" // a1*b1 = d1:d0
+ PCLMULQDQ xmm0_xmm3 ",0x10 \n\t" // a0*b1 = e1:e0
+ PCLMULQDQ xmm0_xmm4 ",0x01 \n\t" // a1*b0 = f1:f0
+ "pxor %%xmm3, %%xmm4 \n\t" // e1+f1:e0+f0
+ "movdqa %%xmm4, %%xmm3 \n\t" // same
+ "psrldq $8, %%xmm4 \n\t" // 0:e1+f1
+ "pslldq $8, %%xmm3 \n\t" // e0+f0:0
+ "pxor %%xmm4, %%xmm2 \n\t" // d1:d0+e1+f1
+ "pxor %%xmm3, %%xmm1 \n\t" // c1+e0+f1:c0
+
+ /*
+ * Now shift the result one bit to the left,
+ * taking advantage of [CLMUL-WP] eq 27 (p. 20)
+ */
+ "movdqa %%xmm1, %%xmm3 \n\t" // r1:r0
+ "movdqa %%xmm2, %%xmm4 \n\t" // r3:r2
+ "psllq $1, %%xmm1 \n\t" // r1<<1:r0<<1
+ "psllq $1, %%xmm2 \n\t" // r3<<1:r2<<1
+ "psrlq $63, %%xmm3 \n\t" // r1>>63:r0>>63
+ "psrlq $63, %%xmm4 \n\t" // r3>>63:r2>>63
+ "movdqa %%xmm3, %%xmm5 \n\t" // r1>>63:r0>>63
+ "pslldq $8, %%xmm3 \n\t" // r0>>63:0
+ "pslldq $8, %%xmm4 \n\t" // r2>>63:0
+ "psrldq $8, %%xmm5 \n\t" // 0:r1>>63
+ "por %%xmm3, %%xmm1 \n\t" // r1<<1|r0>>63:r0<<1
+ "por %%xmm4, %%xmm2 \n\t" // r3<<1|r2>>62:r2<<1
+ "por %%xmm5, %%xmm2 \n\t" // r3<<1|r2>>62:r2<<1|r1>>63
+
+ /*
+ * Now reduce modulo the GCM polynomial x^128 + x^7 + x^2 + x + 1
+ * using [CLMUL-WP] algorithm 5 (p. 20).
+ * Currently xmm2:xmm1 holds x3:x2:x1:x0 (already shifted).
+ */
+ /* Step 2 (1) */
+ "movdqa %%xmm1, %%xmm3 \n\t" // x1:x0
+ "movdqa %%xmm1, %%xmm4 \n\t" // same
+ "movdqa %%xmm1, %%xmm5 \n\t" // same
+ "psllq $63, %%xmm3 \n\t" // x1<<63:x0<<63 = stuff:a
+ "psllq $62, %%xmm4 \n\t" // x1<<62:x0<<62 = stuff:b
+ "psllq $57, %%xmm5 \n\t" // x1<<57:x0<<57 = stuff:c
+
+ /* Step 2 (2) */
+ "pxor %%xmm4, %%xmm3 \n\t" // stuff:a+b
+ "pxor %%xmm5, %%xmm3 \n\t" // stuff:a+b+c
+ "pslldq $8, %%xmm3 \n\t" // a+b+c:0
+ "pxor %%xmm3, %%xmm1 \n\t" // x1+a+b+c:x0 = d:x0
+
+ /* Steps 3 and 4 */
+ "movdqa %%xmm1,%%xmm0 \n\t" // d:x0
+ "movdqa %%xmm1,%%xmm4 \n\t" // same
+ "movdqa %%xmm1,%%xmm5 \n\t" // same
+ "psrlq $1, %%xmm0 \n\t" // e1:x0>>1 = e1:e0'
+ "psrlq $2, %%xmm4 \n\t" // f1:x0>>2 = f1:f0'
+ "psrlq $7, %%xmm5 \n\t" // g1:x0>>7 = g1:g0'
+ "pxor %%xmm4, %%xmm0 \n\t" // e1+f1:e0'+f0'
+ "pxor %%xmm5, %%xmm0 \n\t" // e1+f1+g1:e0'+f0'+g0'
+ // e0'+f0'+g0' is almost e0+f0+g0, ex\tcept for some missing
+ // bits carried from d. Now get those\t bits back in.
+ "movdqa %%xmm1,%%xmm3 \n\t" // d:x0
+ "movdqa %%xmm1,%%xmm4 \n\t" // same
+ "movdqa %%xmm1,%%xmm5 \n\t" // same
+ "psllq $63, %%xmm3 \n\t" // d<<63:stuff
+ "psllq $62, %%xmm4 \n\t" // d<<62:stuff
+ "psllq $57, %%xmm5 \n\t" // d<<57:stuff
+ "pxor %%xmm4, %%xmm3 \n\t" // d<<63+d<<62:stuff
+ "pxor %%xmm5, %%xmm3 \n\t" // missing bits of d:stuff
+ "psrldq $8, %%xmm3 \n\t" // 0:missing bits of d
+ "pxor %%xmm3, %%xmm0 \n\t" // e1+f1+g1:e0+f0+g0
+ "pxor %%xmm1, %%xmm0 \n\t" // h1:h0
+ "pxor %%xmm2, %%xmm0 \n\t" // x3+h1:x2+h0
+
+ "movdqu %%xmm0, (%2) \n\t" // done
+ :
+ : "r" (aa), "r" (bb), "r" (cc)
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" );
+
+ /* Now byte-reverse the outputs */
+ for( i = 0; i < 16; i++ )
+ c[i] = cc[15 - i];
+
+ return;
+}
+
+/*
+ * Compute decryption round keys from encryption round keys
+ */
+void mbedtls_aesni_inverse_key( unsigned char *invkey,
+ const unsigned char *fwdkey, int nr )
+{
+ unsigned char *ik = invkey;
+ const unsigned char *fk = fwdkey + 16 * nr;
+
+ memcpy( ik, fk, 16 );
+
+ for( fk -= 16, ik += 16; fk > fwdkey; fk -= 16, ik += 16 )
+ asm( "movdqu (%0), %%xmm0 \n\t"
+ AESIMC xmm0_xmm0 "\n\t"
+ "movdqu %%xmm0, (%1) \n\t"
+ :
+ : "r" (fk), "r" (ik)
+ : "memory", "xmm0" );
+
+ memcpy( ik, fk, 16 );
+}
+
+/*
+ * Key expansion, 128-bit case
+ */
+static void aesni_setkey_enc_128( unsigned char *rk,
+ const unsigned char *key )
+{
+ asm( "movdqu (%1), %%xmm0 \n\t" // copy the original key
+ "movdqu %%xmm0, (%0) \n\t" // as round key 0
+ "jmp 2f \n\t" // skip auxiliary routine
+
+ /*
+ * Finish generating the next round key.
+ *
+ * On entry xmm0 is r3:r2:r1:r0 and xmm1 is X:stuff:stuff:stuff
+ * with X = rot( sub( r3 ) ) ^ RCON.
+ *
+ * On exit, xmm0 is r7:r6:r5:r4
+ * with r4 = X + r0, r5 = r4 + r1, r6 = r5 + r2, r7 = r6 + r3
+ * and those are written to the round key buffer.
+ */
+ "1: \n\t"
+ "pshufd $0xff, %%xmm1, %%xmm1 \n\t" // X:X:X:X
+ "pxor %%xmm0, %%xmm1 \n\t" // X+r3:X+r2:X+r1:r4
+ "pslldq $4, %%xmm0 \n\t" // r2:r1:r0:0
+ "pxor %%xmm0, %%xmm1 \n\t" // X+r3+r2:X+r2+r1:r5:r4
+ "pslldq $4, %%xmm0 \n\t" // etc
+ "pxor %%xmm0, %%xmm1 \n\t"
+ "pslldq $4, %%xmm0 \n\t"
+ "pxor %%xmm1, %%xmm0 \n\t" // update xmm0 for next time!
+ "add $16, %0 \n\t" // point to next round key
+ "movdqu %%xmm0, (%0) \n\t" // write it
+ "ret \n\t"
+
+ /* Main "loop" */
+ "2: \n\t"
+ AESKEYGENA xmm0_xmm1 ",0x01 \n\tcall 1b \n\t"
+ AESKEYGENA xmm0_xmm1 ",0x02 \n\tcall 1b \n\t"
+ AESKEYGENA xmm0_xmm1 ",0x04 \n\tcall 1b \n\t"
+ AESKEYGENA xmm0_xmm1 ",0x08 \n\tcall 1b \n\t"
+ AESKEYGENA xmm0_xmm1 ",0x10 \n\tcall 1b \n\t"
+ AESKEYGENA xmm0_xmm1 ",0x20 \n\tcall 1b \n\t"
+ AESKEYGENA xmm0_xmm1 ",0x40 \n\tcall 1b \n\t"
+ AESKEYGENA xmm0_xmm1 ",0x80 \n\tcall 1b \n\t"
+ AESKEYGENA xmm0_xmm1 ",0x1B \n\tcall 1b \n\t"
+ AESKEYGENA xmm0_xmm1 ",0x36 \n\tcall 1b \n\t"
+ :
+ : "r" (rk), "r" (key)
+ : "memory", "cc", "0" );
+}
+
+/*
+ * Key expansion, 192-bit case
+ */
+static void aesni_setkey_enc_192( unsigned char *rk,
+ const unsigned char *key )
+{
+ asm( "movdqu (%1), %%xmm0 \n\t" // copy original round key
+ "movdqu %%xmm0, (%0) \n\t"
+ "add $16, %0 \n\t"
+ "movq 16(%1), %%xmm1 \n\t"
+ "movq %%xmm1, (%0) \n\t"
+ "add $8, %0 \n\t"
+ "jmp 2f \n\t" // skip auxiliary routine
+
+ /*
+ * Finish generating the next 6 quarter-keys.
+ *
+ * On entry xmm0 is r3:r2:r1:r0, xmm1 is stuff:stuff:r5:r4
+ * and xmm2 is stuff:stuff:X:stuff with X = rot( sub( r3 ) ) ^ RCON.
+ *
+ * On exit, xmm0 is r9:r8:r7:r6 and xmm1 is stuff:stuff:r11:r10
+ * and those are written to the round key buffer.
+ */
+ "1: \n\t"
+ "pshufd $0x55, %%xmm2, %%xmm2 \n\t" // X:X:X:X
+ "pxor %%xmm0, %%xmm2 \n\t" // X+r3:X+r2:X+r1:r4
+ "pslldq $4, %%xmm0 \n\t" // etc
+ "pxor %%xmm0, %%xmm2 \n\t"
+ "pslldq $4, %%xmm0 \n\t"
+ "pxor %%xmm0, %%xmm2 \n\t"
+ "pslldq $4, %%xmm0 \n\t"
+ "pxor %%xmm2, %%xmm0 \n\t" // update xmm0 = r9:r8:r7:r6
+ "movdqu %%xmm0, (%0) \n\t"
+ "add $16, %0 \n\t"
+ "pshufd $0xff, %%xmm0, %%xmm2 \n\t" // r9:r9:r9:r9
+ "pxor %%xmm1, %%xmm2 \n\t" // stuff:stuff:r9+r5:r10
+ "pslldq $4, %%xmm1 \n\t" // r2:r1:r0:0
+ "pxor %%xmm2, %%xmm1 \n\t" // xmm1 = stuff:stuff:r11:r10
+ "movq %%xmm1, (%0) \n\t"
+ "add $8, %0 \n\t"
+ "ret \n\t"
+
+ "2: \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x01 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x02 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x04 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x08 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x10 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x20 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x40 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x80 \n\tcall 1b \n\t"
+
+ :
+ : "r" (rk), "r" (key)
+ : "memory", "cc", "0" );
+}
+
+/*
+ * Key expansion, 256-bit case
+ */
+static void aesni_setkey_enc_256( unsigned char *rk,
+ const unsigned char *key )
+{
+ asm( "movdqu (%1), %%xmm0 \n\t"
+ "movdqu %%xmm0, (%0) \n\t"
+ "add $16, %0 \n\t"
+ "movdqu 16(%1), %%xmm1 \n\t"
+ "movdqu %%xmm1, (%0) \n\t"
+ "jmp 2f \n\t" // skip auxiliary routine
+
+ /*
+ * Finish generating the next two round keys.
+ *
+ * On entry xmm0 is r3:r2:r1:r0, xmm1 is r7:r6:r5:r4 and
+ * xmm2 is X:stuff:stuff:stuff with X = rot( sub( r7 )) ^ RCON
+ *
+ * On exit, xmm0 is r11:r10:r9:r8 and xmm1 is r15:r14:r13:r12
+ * and those have been written to the output buffer.
+ */
+ "1: \n\t"
+ "pshufd $0xff, %%xmm2, %%xmm2 \n\t"
+ "pxor %%xmm0, %%xmm2 \n\t"
+ "pslldq $4, %%xmm0 \n\t"
+ "pxor %%xmm0, %%xmm2 \n\t"
+ "pslldq $4, %%xmm0 \n\t"
+ "pxor %%xmm0, %%xmm2 \n\t"
+ "pslldq $4, %%xmm0 \n\t"
+ "pxor %%xmm2, %%xmm0 \n\t"
+ "add $16, %0 \n\t"
+ "movdqu %%xmm0, (%0) \n\t"
+
+ /* Set xmm2 to stuff:Y:stuff:stuff with Y = subword( r11 )
+ * and proceed to generate next round key from there */
+ AESKEYGENA xmm0_xmm2 ",0x00 \n\t"
+ "pshufd $0xaa, %%xmm2, %%xmm2 \n\t"
+ "pxor %%xmm1, %%xmm2 \n\t"
+ "pslldq $4, %%xmm1 \n\t"
+ "pxor %%xmm1, %%xmm2 \n\t"
+ "pslldq $4, %%xmm1 \n\t"
+ "pxor %%xmm1, %%xmm2 \n\t"
+ "pslldq $4, %%xmm1 \n\t"
+ "pxor %%xmm2, %%xmm1 \n\t"
+ "add $16, %0 \n\t"
+ "movdqu %%xmm1, (%0) \n\t"
+ "ret \n\t"
+
+ /*
+ * Main "loop" - Generating one more key than necessary,
+ * see definition of mbedtls_aes_context.buf
+ */
+ "2: \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x01 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x02 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x04 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x08 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x10 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x20 \n\tcall 1b \n\t"
+ AESKEYGENA xmm1_xmm2 ",0x40 \n\tcall 1b \n\t"
+ :
+ : "r" (rk), "r" (key)
+ : "memory", "cc", "0" );
+}
+
+/*
+ * Key expansion, wrapper
+ */
+int mbedtls_aesni_setkey_enc( unsigned char *rk,
+ const unsigned char *key,
+ size_t bits )
+{
+ switch( bits )
+ {
+ case 128: aesni_setkey_enc_128( rk, key ); break;
+ case 192: aesni_setkey_enc_192( rk, key ); break;
+ case 256: aesni_setkey_enc_256( rk, key ); break;
+ default : return( MBEDTLS_ERR_AES_INVALID_KEY_LENGTH );
+ }
+
+ return( 0 );
+}
+
+#endif /* MBEDTLS_HAVE_X86_64 */
diff --git a/src/lib/third_party/src/gcrypt/cipher.c b/src/lib/third_party/src/gcrypt/cipher.c
new file mode 100644
index 000000000..2231db9f6
--- /dev/null
+++ b/src/lib/third_party/src/gcrypt/cipher.c
@@ -0,0 +1,591 @@
+/**
+ * \file cipher.c
+ *
+ * \brief Generic cipher wrapper for mbed TLS
+ *
+ * \author Adriaan de Jong <dejong@fox-it.com>
+ *
+ * Copyright The Mbed TLS Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#define CIPHER_VALIDATE_RET( cond ) \
+ MBEDTLS_INTERNAL_VALIDATE_RET( cond, MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA )
+#define CIPHER_VALIDATE( cond ) \
+ MBEDTLS_INTERNAL_VALIDATE( cond )
+
+static int supported_init = 0;
+
+const int *mbedtls_cipher_list( void )
+{
+ const mbedtls_cipher_definition_t *def;
+ int *type;
+
+ if( ! supported_init )
+ {
+ def = mbedtls_cipher_definitions;
+ type = mbedtls_cipher_supported;
+
+ while( def->type != 0 )
+ *type++ = (*def++).type;
+
+ *type = 0;
+
+ supported_init = 1;
+ }
+
+ return( mbedtls_cipher_supported );
+}
+
+const mbedtls_cipher_info_t *mbedtls_cipher_info_from_type(
+ const mbedtls_cipher_type_t cipher_type )
+{
+ const mbedtls_cipher_definition_t *def;
+
+ for( def = mbedtls_cipher_definitions; def->info != NULL; def++ )
+ if( def->type == cipher_type )
+ return( def->info );
+
+ return( NULL );
+}
+
+const mbedtls_cipher_info_t *mbedtls_cipher_info_from_string(
+ const char *cipher_name )
+{
+ const mbedtls_cipher_definition_t *def;
+
+ if( NULL == cipher_name )
+ return( NULL );
+
+ for( def = mbedtls_cipher_definitions; def->info != NULL; def++ )
+ if( ! strcmp( def->info->name, cipher_name ) )
+ return( def->info );
+
+ return( NULL );
+}
+
+const mbedtls_cipher_info_t *mbedtls_cipher_info_from_values(
+ const mbedtls_cipher_id_t cipher_id,
+ int key_bitlen,
+ const mbedtls_cipher_mode_t mode )
+{
+ const mbedtls_cipher_definition_t *def;
+
+ for( def = mbedtls_cipher_definitions; def->info != NULL; def++ )
+ if( def->info->base->cipher == cipher_id &&
+ def->info->key_bitlen == (unsigned) key_bitlen &&
+ def->info->mode == mode )
+ return( def->info );
+
+ return( NULL );
+}
+
+void mbedtls_cipher_init( mbedtls_cipher_context_t *ctx )
+{
+ CIPHER_VALIDATE( ctx != NULL );
+ memset( ctx, 0, sizeof( mbedtls_cipher_context_t ) );
+}
+
+int mbedtls_cipher_setkey( mbedtls_cipher_context_t *ctx,
+ const unsigned char *key,
+ int key_bitlen,
+ const mbedtls_operation_t operation )
+{
+ CIPHER_VALIDATE_RET( ctx != NULL );
+ CIPHER_VALIDATE_RET( key != NULL );
+ CIPHER_VALIDATE_RET( operation == MBEDTLS_ENCRYPT ||
+ operation == MBEDTLS_DECRYPT );
+ if( ctx->cipher_info == NULL )
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+
+
+ if( ( ctx->cipher_info->flags & MBEDTLS_CIPHER_VARIABLE_KEY_LEN ) == 0 &&
+ (int) ctx->cipher_info->key_bitlen != key_bitlen )
+ {
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+ }
+
+ ctx->key_bitlen = key_bitlen;
+ ctx->operation = operation;
+
+ /*
+ * For OFB, CFB and CTR mode always use the encryption key schedule
+ */
+ if( MBEDTLS_ENCRYPT == operation ||
+ MBEDTLS_MODE_CFB == ctx->cipher_info->mode ||
+ MBEDTLS_MODE_OFB == ctx->cipher_info->mode ||
+ MBEDTLS_MODE_CTR == ctx->cipher_info->mode )
+ {
+ return( ctx->cipher_info->base->setkey_enc_func( ctx->cipher_ctx, key,
+ ctx->key_bitlen ) );
+ }
+
+ if( MBEDTLS_DECRYPT == operation )
+ return( ctx->cipher_info->base->setkey_dec_func( ctx->cipher_ctx, key,
+ ctx->key_bitlen ) );
+
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+}
+
+int mbedtls_cipher_set_iv( mbedtls_cipher_context_t *ctx,
+ const unsigned char *iv,
+ size_t iv_len )
+{
+ size_t actual_iv_size;
+
+ CIPHER_VALIDATE_RET( ctx != NULL );
+ CIPHER_VALIDATE_RET( iv_len == 0 || iv != NULL );
+ if( ctx->cipher_info == NULL )
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+
+ /* avoid buffer overflow in ctx->iv */
+ if( iv_len > MBEDTLS_MAX_IV_LENGTH )
+ return( MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE );
+
+ if( ( ctx->cipher_info->flags & MBEDTLS_CIPHER_VARIABLE_IV_LEN ) != 0 )
+ actual_iv_size = iv_len;
+ else
+ {
+ actual_iv_size = ctx->cipher_info->iv_size;
+
+ /* avoid reading past the end of input buffer */
+ if( actual_iv_size > iv_len )
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+ }
+
+
+#if defined(MBEDTLS_GCM_C)
+ if( MBEDTLS_MODE_GCM == ctx->cipher_info->mode )
+ {
+ return( mbedtls_gcm_starts( (mbedtls_gcm_context *) ctx->cipher_ctx,
+ ctx->operation,
+ iv, iv_len ) );
+ }
+#endif
+
+
+ if ( actual_iv_size != 0 )
+ {
+ memcpy( ctx->iv, iv, actual_iv_size );
+ ctx->iv_size = actual_iv_size;
+ }
+
+ return( 0 );
+}
+
+int mbedtls_cipher_reset( mbedtls_cipher_context_t *ctx )
+{
+ CIPHER_VALIDATE_RET( ctx != NULL );
+ if( ctx->cipher_info == NULL )
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+
+
+ ctx->unprocessed_len = 0;
+
+ return( 0 );
+}
+
+#if defined(MBEDTLS_GCM_C) || defined(MBEDTLS_CHACHAPOLY_C)
+int mbedtls_cipher_update_ad( mbedtls_cipher_context_t *ctx,
+ const unsigned char *ad, size_t ad_len )
+{
+ CIPHER_VALIDATE_RET( ctx != NULL );
+ CIPHER_VALIDATE_RET( ad_len == 0 || ad != NULL );
+ if( ctx->cipher_info == NULL )
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+
+
+#if defined(MBEDTLS_GCM_C)
+ if( MBEDTLS_MODE_GCM == ctx->cipher_info->mode )
+ {
+ return( mbedtls_gcm_update_ad( (mbedtls_gcm_context *) ctx->cipher_ctx,
+ ad, ad_len ) );
+ }
+#endif
+
+
+ return( 0 );
+}
+#endif /* MBEDTLS_GCM_C || MBEDTLS_CHACHAPOLY_C */
+
+int mbedtls_cipher_update( mbedtls_cipher_context_t *ctx, const unsigned char *input,
+ size_t ilen, unsigned char *output, size_t *olen )
+{
+ int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
+ size_t block_size;
+
+ CIPHER_VALIDATE_RET( ctx != NULL );
+ CIPHER_VALIDATE_RET( ilen == 0 || input != NULL );
+ CIPHER_VALIDATE_RET( output != NULL );
+ CIPHER_VALIDATE_RET( olen != NULL );
+ if( ctx->cipher_info == NULL )
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+
+
+ *olen = 0;
+ block_size = mbedtls_cipher_get_block_size( ctx );
+ if ( 0 == block_size )
+ {
+ return( MBEDTLS_ERR_CIPHER_INVALID_CONTEXT );
+ }
+
+ if( ctx->cipher_info->mode == MBEDTLS_MODE_ECB )
+ {
+ if( ilen != block_size )
+ return( MBEDTLS_ERR_CIPHER_FULL_BLOCK_EXPECTED );
+
+ *olen = ilen;
+
+ if( 0 != ( ret = ctx->cipher_info->base->ecb_func( ctx->cipher_ctx,
+ ctx->operation, input, output ) ) )
+ {
+ return( ret );
+ }
+
+ return( 0 );
+ }
+
+#if defined(MBEDTLS_GCM_C)
+ if( ctx->cipher_info->mode == MBEDTLS_MODE_GCM )
+ {
+ return( mbedtls_gcm_update( (mbedtls_gcm_context *) ctx->cipher_ctx,
+ input, ilen,
+ output, ilen, olen ) );
+ }
+#endif
+
+
+
+ if( input == output &&
+ ( ctx->unprocessed_len != 0 || ilen % block_size ) )
+ {
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+ }
+
+#if defined(MBEDTLS_CIPHER_MODE_CBC)
+ if( ctx->cipher_info->mode == MBEDTLS_MODE_CBC )
+ {
+ size_t copy_len = 0;
+
+ /*
+ * If there is not enough data for a full block, cache it.
+ */
+ if( ( ctx->operation == MBEDTLS_DECRYPT && NULL != ctx->add_padding &&
+ ilen <= block_size - ctx->unprocessed_len ) ||
+ ( ctx->operation == MBEDTLS_DECRYPT && NULL == ctx->add_padding &&
+ ilen < block_size - ctx->unprocessed_len ) ||
+ ( ctx->operation == MBEDTLS_ENCRYPT &&
+ ilen < block_size - ctx->unprocessed_len ) )
+ {
+ memcpy( &( ctx->unprocessed_data[ctx->unprocessed_len] ), input,
+ ilen );
+
+ ctx->unprocessed_len += ilen;
+ return( 0 );
+ }
+
+ /*
+ * Process cached data first
+ */
+ if( 0 != ctx->unprocessed_len )
+ {
+ copy_len = block_size - ctx->unprocessed_len;
+
+ memcpy( &( ctx->unprocessed_data[ctx->unprocessed_len] ), input,
+ copy_len );
+
+ if( 0 != ( ret = ctx->cipher_info->base->cbc_func( ctx->cipher_ctx,
+ ctx->operation, block_size, ctx->iv,
+ ctx->unprocessed_data, output ) ) )
+ {
+ return( ret );
+ }
+
+ *olen += block_size;
+ output += block_size;
+ ctx->unprocessed_len = 0;
+
+ input += copy_len;
+ ilen -= copy_len;
+ }
+
+ /*
+ * Cache final, incomplete block
+ */
+ if( 0 != ilen )
+ {
+ /* Encryption: only cache partial blocks
+ * Decryption w/ padding: always keep at least one whole block
+ * Decryption w/o padding: only cache partial blocks
+ */
+ copy_len = ilen % block_size;
+ if( copy_len == 0 &&
+ ctx->operation == MBEDTLS_DECRYPT &&
+ NULL != ctx->add_padding)
+ {
+ copy_len = block_size;
+ }
+
+ memcpy( ctx->unprocessed_data, &( input[ilen - copy_len] ),
+ copy_len );
+
+ ctx->unprocessed_len += copy_len;
+ ilen -= copy_len;
+ }
+
+ /*
+ * Process remaining full blocks
+ */
+ if( ilen )
+ {
+ if( 0 != ( ret = ctx->cipher_info->base->cbc_func( ctx->cipher_ctx,
+ ctx->operation, ilen, ctx->iv, input, output ) ) )
+ {
+ return( ret );
+ }
+
+ *olen += ilen;
+ }
+
+ return( 0 );
+ }
+#endif /* MBEDTLS_CIPHER_MODE_CBC */
+
+
+
+
+
+
+ return( MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE );
+}
+
+
+int mbedtls_cipher_finish( mbedtls_cipher_context_t *ctx,
+ unsigned char *output, size_t *olen )
+{
+ CIPHER_VALIDATE_RET( ctx != NULL );
+ CIPHER_VALIDATE_RET( output != NULL );
+ CIPHER_VALIDATE_RET( olen != NULL );
+ if( ctx->cipher_info == NULL )
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+
+
+ *olen = 0;
+
+ if( MBEDTLS_MODE_CFB == ctx->cipher_info->mode ||
+ MBEDTLS_MODE_OFB == ctx->cipher_info->mode ||
+ MBEDTLS_MODE_CTR == ctx->cipher_info->mode ||
+ MBEDTLS_MODE_GCM == ctx->cipher_info->mode ||
+ MBEDTLS_MODE_CCM_STAR_NO_TAG == ctx->cipher_info->mode ||
+ MBEDTLS_MODE_XTS == ctx->cipher_info->mode ||
+ MBEDTLS_MODE_STREAM == ctx->cipher_info->mode )
+ {
+ return( 0 );
+ }
+
+ if ( ( MBEDTLS_CIPHER_CHACHA20 == ctx->cipher_info->type ) ||
+ ( MBEDTLS_CIPHER_CHACHA20_POLY1305 == ctx->cipher_info->type ) )
+ {
+ return( 0 );
+ }
+
+ if( MBEDTLS_MODE_ECB == ctx->cipher_info->mode )
+ {
+ if( ctx->unprocessed_len != 0 )
+ return( MBEDTLS_ERR_CIPHER_FULL_BLOCK_EXPECTED );
+
+ return( 0 );
+ }
+
+#if defined(MBEDTLS_CIPHER_MODE_CBC)
+ if( MBEDTLS_MODE_CBC == ctx->cipher_info->mode )
+ {
+ int ret = 0;
+
+ if( MBEDTLS_ENCRYPT == ctx->operation )
+ {
+ /* check for 'no padding' mode */
+ if( NULL == ctx->add_padding )
+ {
+ if( 0 != ctx->unprocessed_len )
+ return( MBEDTLS_ERR_CIPHER_FULL_BLOCK_EXPECTED );
+
+ return( 0 );
+ }
+
+ ctx->add_padding( ctx->unprocessed_data, mbedtls_cipher_get_iv_size( ctx ),
+ ctx->unprocessed_len );
+ }
+ else if( mbedtls_cipher_get_block_size( ctx ) != ctx->unprocessed_len )
+ {
+ /*
+ * For decrypt operations, expect a full block,
+ * or an empty block if no padding
+ */
+ if( NULL == ctx->add_padding && 0 == ctx->unprocessed_len )
+ return( 0 );
+
+ return( MBEDTLS_ERR_CIPHER_FULL_BLOCK_EXPECTED );
+ }
+
+ /* cipher block */
+ if( 0 != ( ret = ctx->cipher_info->base->cbc_func( ctx->cipher_ctx,
+ ctx->operation, mbedtls_cipher_get_block_size( ctx ), ctx->iv,
+ ctx->unprocessed_data, output ) ) )
+ {
+ return( ret );
+ }
+
+ /* Set output size for decryption */
+ if( MBEDTLS_DECRYPT == ctx->operation )
+ return( ctx->get_padding( output, mbedtls_cipher_get_block_size( ctx ),
+ olen ) );
+
+ /* Set output size for encryption */
+ *olen = mbedtls_cipher_get_block_size( ctx );
+ return( 0 );
+ }
+#else
+ ((void) output);
+#endif /* MBEDTLS_CIPHER_MODE_CBC */
+
+ return( MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE );
+}
+
+
+#if defined(MBEDTLS_GCM_C) || defined(MBEDTLS_CHACHAPOLY_C)
+int mbedtls_cipher_write_tag( mbedtls_cipher_context_t *ctx,
+ unsigned char *tag, size_t tag_len )
+{
+ CIPHER_VALIDATE_RET( ctx != NULL );
+ CIPHER_VALIDATE_RET( tag_len == 0 || tag != NULL );
+ if( ctx->cipher_info == NULL )
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+
+ if( MBEDTLS_ENCRYPT != ctx->operation )
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+
+
+#if defined(MBEDTLS_GCM_C)
+ if( MBEDTLS_MODE_GCM == ctx->cipher_info->mode )
+ {
+ size_t output_length;
+ /* The code here doesn't yet support alternative implementations
+ * that can delay up to a block of output. */
+ return( mbedtls_gcm_finish( (mbedtls_gcm_context *) ctx->cipher_ctx,
+ NULL, 0, &output_length,
+ tag, tag_len ) );
+ }
+#endif
+
+
+ return( 0 );
+}
+
+int mbedtls_cipher_check_tag( mbedtls_cipher_context_t *ctx,
+ const unsigned char *tag, size_t tag_len )
+{
+ unsigned char check_tag[16];
+ int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
+
+ CIPHER_VALIDATE_RET( ctx != NULL );
+ CIPHER_VALIDATE_RET( tag_len == 0 || tag != NULL );
+ if( ctx->cipher_info == NULL )
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+
+ if( MBEDTLS_DECRYPT != ctx->operation )
+ {
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+ }
+
+
+ /* Status to return on a non-authenticated algorithm. It would make sense
+ * to return MBEDTLS_ERR_CIPHER_INVALID_CONTEXT or perhaps
+ * MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA, but at the time I write this our
+ * unit tests assume 0. */
+ ret = 0;
+
+#if defined(MBEDTLS_GCM_C)
+ if( MBEDTLS_MODE_GCM == ctx->cipher_info->mode )
+ {
+ size_t output_length;
+ /* The code here doesn't yet support alternative implementations
+ * that can delay up to a block of output. */
+
+ if( tag_len > sizeof( check_tag ) )
+ return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA );
+
+ if( 0 != ( ret = mbedtls_gcm_finish(
+ (mbedtls_gcm_context *) ctx->cipher_ctx,
+ NULL, 0, &output_length,
+ check_tag, tag_len ) ) )
+ {
+ return( ret );
+ }
+
+ /* Check the tag in "constant-time" */
+ if( mbedtls_ct_memcmp( tag, check_tag, tag_len ) != 0 )
+ {
+ ret = MBEDTLS_ERR_CIPHER_AUTH_FAILED;
+ goto exit;
+ }
+ }
+#endif /* MBEDTLS_GCM_C */
+
+
+exit:
+ mbedtls_platform_zeroize( check_tag, tag_len );
+ return( ret );
+}
+#endif /* MBEDTLS_GCM_C || MBEDTLS_CHACHAPOLY_C */
+
+/*
+ * Packet-oriented wrapper for non-AEAD modes
+ */
+int mbedtls_cipher_crypt( mbedtls_cipher_context_t *ctx,
+ const unsigned char *iv, size_t iv_len,
+ const unsigned char *input, size_t ilen,
+ unsigned char *output, size_t *olen )
+{
+ int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
+ size_t finish_olen;
+
+ CIPHER_VALIDATE_RET( ctx != NULL );
+ CIPHER_VALIDATE_RET( iv_len == 0 || iv != NULL );
+ CIPHER_VALIDATE_RET( ilen == 0 || input != NULL );
+ CIPHER_VALIDATE_RET( output != NULL );
+ CIPHER_VALIDATE_RET( olen != NULL );
+
+
+ if( ( ret = mbedtls_cipher_set_iv( ctx, iv, iv_len ) ) != 0 )
+ return( ret );
+
+ if( ( ret = mbedtls_cipher_reset( ctx ) ) != 0 )
+ return( ret );
+
+ if( ( ret = mbedtls_cipher_update( ctx, input, ilen,
+ output, olen ) ) != 0 )
+ return( ret );
+
+ if( ( ret = mbedtls_cipher_finish( ctx, output + *olen,
+ &finish_olen ) ) != 0 )
+ return( ret );
+
+ *olen += finish_olen;
+
+ return( 0 );
+}
+
diff --git a/src/lib/third_party/src/gcrypt/cipher_wrap.c b/src/lib/third_party/src/gcrypt/cipher_wrap.c
new file mode 100644
index 000000000..6db209cba
--- /dev/null
+++ b/src/lib/third_party/src/gcrypt/cipher_wrap.c
@@ -0,0 +1,252 @@
+/**
+ * \file cipher_wrap.c
+ *
+ * \brief Generic cipher wrapper for mbed TLS
+ *
+ * \author Adriaan de Jong <dejong@fox-it.com>
+ *
+ * Copyright The Mbed TLS Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(MBEDTLS_AES_C)
+
+static int aes_crypt_ecb_wrap( void *ctx, mbedtls_operation_t operation,
+ const unsigned char *input, unsigned char *output )
+{
+ return mbedtls_aes_crypt_ecb( (mbedtls_aes_context *) ctx, operation, input, output );
+}
+
+
+static int aes_setkey_dec_wrap( void *ctx, const unsigned char *key,
+ unsigned int key_bitlen )
+{
+ return mbedtls_aes_setkey_dec( (mbedtls_aes_context *) ctx, key, key_bitlen );
+}
+
+static int aes_setkey_enc_wrap( void *ctx, const unsigned char *key,
+ unsigned int key_bitlen )
+{
+ return mbedtls_aes_setkey_enc( (mbedtls_aes_context *) ctx, key, key_bitlen );
+}
+
+static void * aes_ctx_alloc( void )
+{
+ mbedtls_aes_context *aes = mbedtls_calloc( 1, sizeof( mbedtls_aes_context ) );
+
+ if( aes == NULL )
+ return( NULL );
+
+ mbedtls_aes_init( aes );
+
+ return( aes );
+}
+
+static void aes_ctx_free( void *ctx )
+{
+ mbedtls_aes_free( (mbedtls_aes_context *) ctx );
+ mbedtls_free( ctx );
+}
+
+static void aes_ctx_zero( void *ctx )
+{
+ mbedtls_aes_init( (mbedtls_aes_context *) ctx );
+}
+
+static const mbedtls_cipher_base_t aes_info = {
+ MBEDTLS_CIPHER_ID_AES,
+ aes_crypt_ecb_wrap,
+ aes_setkey_enc_wrap,
+ aes_setkey_dec_wrap,
+ NULL, // aes_ctx_alloc
+ NULL, // aes_ctx_free
+ aes_ctx_zero
+};
+
+static const mbedtls_cipher_info_t aes_128_ecb_info = {
+ MBEDTLS_CIPHER_AES_128_ECB,
+ MBEDTLS_MODE_ECB,
+ 128,
+ "AES-128-ECB",
+ 0,
+ 0,
+ 16,
+ &aes_info
+};
+
+static const mbedtls_cipher_info_t aes_192_ecb_info = {
+ MBEDTLS_CIPHER_AES_192_ECB,
+ MBEDTLS_MODE_ECB,
+ 192,
+ "AES-192-ECB",
+ 0,
+ 0,
+ 16,
+ &aes_info
+};
+
+static const mbedtls_cipher_info_t aes_256_ecb_info = {
+ MBEDTLS_CIPHER_AES_256_ECB,
+ MBEDTLS_MODE_ECB,
+ 256,
+ "AES-256-ECB",
+ 0,
+ 0,
+ 16,
+ &aes_info
+};
+
+#if defined(MBEDTLS_GCM_C)
+static int gcm_aes_setkey_wrap( void *ctx, const unsigned char *key,
+ unsigned int key_bitlen )
+{
+ return mbedtls_gcm_setkey( (mbedtls_gcm_context *) ctx, MBEDTLS_CIPHER_ID_AES,
+ key, key_bitlen );
+}
+
+static const mbedtls_cipher_base_t gcm_aes_info = {
+ MBEDTLS_CIPHER_ID_AES,
+ NULL,
+ gcm_aes_setkey_wrap,
+ gcm_aes_setkey_wrap,
+ NULL, // gcm_ctx_alloc
+ NULL, // gcm_ctx_free
+ NULL
+};
+
+static const mbedtls_cipher_info_t aes_128_gcm_info = {
+ MBEDTLS_CIPHER_AES_128_GCM,
+ MBEDTLS_MODE_GCM,
+ 128,
+ "AES-128-GCM",
+ 12,
+ MBEDTLS_CIPHER_VARIABLE_IV_LEN,
+ 16,
+ &gcm_aes_info
+};
+
+static const mbedtls_cipher_info_t aes_192_gcm_info = {
+ MBEDTLS_CIPHER_AES_192_GCM,
+ MBEDTLS_MODE_GCM,
+ 192,
+ "AES-192-GCM",
+ 12,
+ MBEDTLS_CIPHER_VARIABLE_IV_LEN,
+ 16,
+ &gcm_aes_info
+};
+
+static const mbedtls_cipher_info_t aes_256_gcm_info = {
+ MBEDTLS_CIPHER_AES_256_GCM,
+ MBEDTLS_MODE_GCM,
+ 256,
+ "AES-256-GCM",
+ 12,
+ MBEDTLS_CIPHER_VARIABLE_IV_LEN,
+ 16,
+ &gcm_aes_info
+};
+#endif /* MBEDTLS_GCM_C */
+
+
+#endif /* MBEDTLS_AES_C */
+
+
+
+
+
+
+#if defined(MBEDTLS_CIPHER_NULL_CIPHER)
+static int null_crypt_stream( void *ctx, size_t length,
+ const unsigned char *input,
+ unsigned char *output )
+{
+ ((void) ctx);
+ memmove( output, input, length );
+ return( 0 );
+}
+
+static int null_setkey( void *ctx, const unsigned char *key,
+ unsigned int key_bitlen )
+{
+ ((void) ctx);
+ ((void) key);
+ ((void) key_bitlen);
+
+ return( 0 );
+}
+
+static void * null_ctx_alloc( void )
+{
+ return( (void *) 1 );
+}
+
+static void null_ctx_free( void *ctx )
+{
+ ((void) ctx);
+}
+
+static const mbedtls_cipher_base_t null_base_info = {
+ MBEDTLS_CIPHER_ID_NULL,
+ NULL,
+ null_setkey,
+ null_setkey,
+ null_ctx_alloc,
+ null_ctx_free
+};
+
+static const mbedtls_cipher_info_t null_cipher_info = {
+ MBEDTLS_CIPHER_NULL,
+ MBEDTLS_MODE_STREAM,
+ 0,
+ "NULL",
+ 0,
+ 0,
+ 1,
+ &null_base_info
+};
+#endif /* defined(MBEDTLS_CIPHER_NULL_CIPHER) */
+
+
+const mbedtls_cipher_definition_t mbedtls_cipher_definitions[] =
+{
+#if defined(MBEDTLS_AES_C)
+ { MBEDTLS_CIPHER_AES_128_ECB, &aes_128_ecb_info },
+ { MBEDTLS_CIPHER_AES_192_ECB, &aes_192_ecb_info },
+ { MBEDTLS_CIPHER_AES_256_ECB, &aes_256_ecb_info },
+#if defined(MBEDTLS_GCM_C)
+ { MBEDTLS_CIPHER_AES_128_GCM, &aes_128_gcm_info },
+ { MBEDTLS_CIPHER_AES_192_GCM, &aes_192_gcm_info },
+ { MBEDTLS_CIPHER_AES_256_GCM, &aes_256_gcm_info },
+#endif
+#endif /* MBEDTLS_AES_C */
+
+
+
+
+
+
+
+#if defined(MBEDTLS_CIPHER_NULL_CIPHER)
+ { MBEDTLS_CIPHER_NULL, &null_cipher_info },
+#endif /* MBEDTLS_CIPHER_NULL_CIPHER */
+
+ { MBEDTLS_CIPHER_NONE, NULL }
+};
+
+#define NUM_CIPHERS ( sizeof(mbedtls_cipher_definitions) / \
+ sizeof(mbedtls_cipher_definitions[0]) )
+int mbedtls_cipher_supported[NUM_CIPHERS];
+
diff --git a/src/lib/third_party/src/gcrypt/digest.c b/src/lib/third_party/src/gcrypt/digest.c
new file mode 100644
index 000000000..3cdd1f1f8
--- /dev/null
+++ b/src/lib/third_party/src/gcrypt/digest.c
@@ -0,0 +1,321 @@
+/*
+ * hmac-sha256.c
+ * Copyright (C) 2017 Adrian Perez <aperez@igalia.com>
+ *
+ * Distributed under terms of the MIT license.
+ */
+
+//#include <stdint.h>
+//#include <unistd.h>
+//#include <string.h>
+//#include <stdlib.h>
+
+//#include "gcrypt/digest.h"
+
+/* define it for speed optimization */
+#define _SHA256_UNROLL
+#define _SHA256_UNROLL2
+
+/*
+ * Crypto/Sha256.c -- SHA-256 Hash
+ * 2010-06-11 : Igor Pavlov : Public domain
+ * This code is based on public domain code from Wei Dai's Crypto++ library.
+ */
+
+#define U8V(v) ((uint8_t)(v) & 0xFFU)
+#define U16V(v) ((uint16_t)(v) & 0xFFFFU)
+#define U32V(v) ((uint32_t)(v) & 0xFFFFFFFFU)
+#define U64V(v) ((uint64_t)(v) & 0xFFFFFFFFFFFFFFFFU)
+
+#define ROTL32(v, n) (U32V((uint32_t)(v) << (n)) | ((uint32_t)(v) >> (32 - (n))))
+
+// tests fail if we don't have this cast...
+#define ROTL64(v, n) (U64V((uint64_t)(v) << (n)) | ((uint64_t)(v) >> (64 - (n))))
+
+#define ROTR32(v, n) ROTL32(v, 32 - (n))
+#define ROTR64(v, n) ROTL64(v, 64 - (n))
+
+#ifndef ROTL8
+#define ROTL8(v, n) (U8V((uint8_t)(v) << (n)) | ((uint8_t)(v) >> (8 - (n))))
+#endif
+#ifndef ROTL16
+#define ROTL16(v, n) (U16V((uint16_t)(v) << (n)) | ((uint16_t)(v) >> (16 - (n))))
+#endif
+
+#define ROTR8(v, n) ROTL8(v, 8 - (n))
+#define ROTR16(v, n) ROTL16(v, 16 - (n))
+
+#define S0(x) (ROTR32(x, 2) ^ ROTR32(x,13) ^ ROTR32(x, 22))
+#define S1(x) (ROTR32(x, 6) ^ ROTR32(x,11) ^ ROTR32(x, 25))
+#define s0(x) (ROTR32(x, 7) ^ ROTR32(x,18) ^ (x >> 3))
+#define s1(x) (ROTR32(x,17) ^ ROTR32(x,19) ^ (x >> 10))
+
+#define blk0(i) (W[i] = data[i])
+#define blk2(i) (W[i&15] += s1(W[(i-2)&15]) + W[(i-7)&15] + s0(W[(i-15)&15]))
+
+#define Ch(x,y,z) (z^(x&(y^z)))
+#define Maj(x,y,z) ((x&y)|(z&(x|y)))
+
+#define a(i) T[(0-(i))&7]
+#define b(i) T[(1-(i))&7]
+#define c(i) T[(2-(i))&7]
+#define d(i) T[(3-(i))&7]
+#define e(i) T[(4-(i))&7]
+#define f(i) T[(5-(i))&7]
+#define g(i) T[(6-(i))&7]
+#define h(i) T[(7-(i))&7]
+
+
+#ifdef _SHA256_UNROLL2
+
+#define R(a,b,c,d,e,f,g,h, i) h += S1(e) + Ch(e,f,g) + K[i+j] + (j?blk2(i):blk0(i));\
+ d += h; h += S0(a) + Maj(a, b, c)
+
+#define RX_8(i) \
+ R(a,b,c,d,e,f,g,h, i); \
+ R(h,a,b,c,d,e,f,g, (i+1)); \
+ R(g,h,a,b,c,d,e,f, (i+2)); \
+ R(f,g,h,a,b,c,d,e, (i+3)); \
+ R(e,f,g,h,a,b,c,d, (i+4)); \
+ R(d,e,f,g,h,a,b,c, (i+5)); \
+ R(c,d,e,f,g,h,a,b, (i+6)); \
+ R(b,c,d,e,f,g,h,a, (i+7))
+
+#else
+
+#define R(i) h(i) += S1(e(i)) + Ch(e(i),f(i),g(i)) + K[i+j] + (j?blk2(i):blk0(i));\
+ d(i) += h(i); h(i) += S0(a(i)) + Maj(a(i), b(i), c(i))
+
+#ifdef _SHA256_UNROLL
+
+#define RX_8(i) R(i+0); R(i+1); R(i+2); R(i+3); R(i+4); R(i+5); R(i+6); R(i+7);
+
+#endif
+
+#endif
+
+static const uint32_t K[64] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+};
+
+static void
+sha256_init(sha256_t *p)
+{
+ p->state[0] = 0x6a09e667;
+ p->state[1] = 0xbb67ae85;
+ p->state[2] = 0x3c6ef372;
+ p->state[3] = 0xa54ff53a;
+ p->state[4] = 0x510e527f;
+ p->state[5] = 0x9b05688c;
+ p->state[6] = 0x1f83d9ab;
+ p->state[7] = 0x5be0cd19;
+ p->count = 0;
+}
+
+static void
+sha256_transform(uint32_t *state, const uint32_t *data)
+{
+ uint32_t W[16];
+ unsigned j;
+ #ifdef _SHA256_UNROLL2
+ uint32_t a,b,c,d,e,f,g,h;
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+ f = state[5];
+ g = state[6];
+ h = state[7];
+ #else
+ uint32_t T[8];
+ for (j = 0; j < 8; j++)
+ T[j] = state[j];
+ #endif
+
+ for (j = 0; j < 64; j += 16)
+ {
+ #if defined(_SHA256_UNROLL) || defined(_SHA256_UNROLL2)
+ RX_8(0); RX_8(8);
+ #else
+ unsigned i;
+ for (i = 0; i < 16; i++) { R(i); }
+ #endif
+ }
+
+ #ifdef _SHA256_UNROLL2
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ state[5] += f;
+ state[6] += g;
+ state[7] += h;
+ #else
+ for (j = 0; j < 8; j++)
+ state[j] += T[j];
+ #endif
+
+ /* Wipe variables */
+ /* memset(W, 0, sizeof(W)); */
+ /* memset(T, 0, sizeof(T)); */
+}
+
+#undef S0
+#undef S1
+#undef s0
+#undef s1
+
+static void
+sha256_write_byte_block(sha256_t *p)
+{
+ uint32_t data32[16];
+ unsigned i;
+ for (i = 0; i < 16; i++)
+ data32[i] =
+ ((uint32_t)(p->buffer[i * 4 ]) << 24) +
+ ((uint32_t)(p->buffer[i * 4 + 1]) << 16) +
+ ((uint32_t)(p->buffer[i * 4 + 2]) << 8) +
+ ((uint32_t)(p->buffer[i * 4 + 3]));
+ sha256_transform(p->state, data32);
+}
+
+static void
+sha256_update(sha256_t *p, const unsigned char *data, size_t size)
+{
+ uint32_t curBufferPos = (uint32_t)p->count & 0x3F;
+ while (size > 0)
+ {
+ p->buffer[curBufferPos++] = *data++;
+ p->count++;
+ size--;
+ if (curBufferPos == 64)
+ {
+ curBufferPos = 0;
+ sha256_write_byte_block(p);
+ }
+ }
+}
+
+
+void
+sha256_final(sha256_t *p, unsigned char *digest)
+{
+ uint64_t lenInBits = (p->count << 3);
+ uint32_t curBufferPos = (uint32_t)p->count & 0x3F;
+ unsigned i;
+ p->buffer[curBufferPos++] = 0x80;
+ while (curBufferPos != (64 - 8))
+ {
+ curBufferPos &= 0x3F;
+ if (curBufferPos == 0)
+ sha256_write_byte_block(p);
+ p->buffer[curBufferPos++] = 0;
+ }
+ for (i = 0; i < 8; i++)
+ {
+ p->buffer[curBufferPos++] = (unsigned char)(lenInBits >> 56);
+ lenInBits <<= 8;
+ }
+ sha256_write_byte_block(p);
+
+ for (i = 0; i < 8; i++)
+ {
+ *digest++ = (unsigned char)(p->state[i] >> 24);
+ *digest++ = (unsigned char)(p->state[i] >> 16);
+ *digest++ = (unsigned char)(p->state[i] >> 8);
+ *digest++ = (unsigned char)(p->state[i]);
+ }
+ sha256_init(p);
+}
+
+/*
+ * HMAC(H, K) == H(K ^ opad, H(K ^ ipad, text))
+ *
+ * H: Hash function (sha256)
+ * K: Secret key
+ * B: Block byte length
+ * L: Byte length of hash function output
+ *
+ * https://tools.ietf.org/html/rfc2104
+ */
+
+#define B 64
+#define L (SHA256_DIGEST_SIZE)
+#define K (SHA256_DIGEST_SIZE * 2)
+
+#define I_PAD 0x36
+#define O_PAD 0x5C
+
+void
+hmac_sha256 (uint8_t out[HMAC_SHA256_DIGEST_SIZE],
+ const uint8_t *data, size_t data_len,
+ const uint8_t *key, size_t key_len)
+{
+ sha256_t ss;
+ uint8_t kx[B];
+ size_t i;
+
+ if(!out) return;
+ if(!data) return;
+ if(!key) return;
+ if(key_len > B) return;
+
+ /*
+ * (1) append zeros to the end of K to create a B byte string
+ * (e.g., if K is of length 20 bytes and B=64, then K will be
+ * appended with 44 zero bytes 0x00)
+ * (2) XOR (bitwise exclusive-OR) the B byte string computed in step
+ * (1) with ipad
+ */
+ for (i = 0; i < key_len; i++) kx[i] = I_PAD ^ key[i];
+ for (i = key_len; i < B; i++) kx[i] = I_PAD ^ 0;
+
+ /*
+ * (3) append the stream of data 'text' to the B byte string resulting
+ * from step (2)
+ * (4) apply H to the stream generated in step (3)
+ */
+ sha256_init (&ss);
+ sha256_update (&ss, kx, B);
+ sha256_update (&ss, data, data_len);
+ sha256_final (&ss, out);
+
+ /*
+ * (5) XOR (bitwise exclusive-OR) the B byte string computed in
+ * step (1) with opad
+ *
+ * NOTE: The "kx" variable is reused.
+ */
+ for (i = 0; i < key_len; i++) kx[i] = O_PAD ^ key[i];
+ for (i = key_len; i < B; i++) kx[i] = O_PAD ^ 0;
+
+ /*
+ * (6) append the H result from step (4) to the B byte string
+ * resulting from step (5)
+ * (7) apply H to the stream generated in step (6) and output
+ * the result
+ */
+ sha256_init (&ss);
+ sha256_update (&ss, kx, B);
+ sha256_update (&ss, out, SHA256_DIGEST_SIZE);
+ sha256_final (&ss, out);
+}
+
+
diff --git a/src/lib/third_party/src/gcrypt/gcm.c b/src/lib/third_party/src/gcrypt/gcm.c
new file mode 100644
index 000000000..824c92bc9
--- /dev/null
+++ b/src/lib/third_party/src/gcrypt/gcm.c
@@ -0,0 +1,667 @@
+/*
+ * NIST SP800-38D compliant GCM implementation
+ *
+ * Copyright The Mbed TLS Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
+ *
+ * See also:
+ * [MGV] http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * We use the algorithm described as Shoup's method with 4-bit tables in
+ * [MGV] 4.1, pp. 12-13, to enhance speed without using too much memory.
+ */
+
+
+/* Parameter validation macros */
+#define GCM_VALIDATE_RET( cond ) \
+ MBEDTLS_INTERNAL_VALIDATE_RET( cond, MBEDTLS_ERR_GCM_BAD_INPUT )
+#define GCM_VALIDATE( cond ) \
+ MBEDTLS_INTERNAL_VALIDATE( cond )
+
+#define LBLOCKSIZE __SIZEOF_LONG__
+
+static void xorbytes( unsigned char *dst, const unsigned char *src, int n) {
+ while(n > LBLOCKSIZE) {
+ *(unsigned long int *)dst ^= *(const unsigned long int *)src;
+ dst += LBLOCKSIZE;
+ src += LBLOCKSIZE;
+ n -= LBLOCKSIZE;
+ }
+ while(n) {
+ *dst++ ^= *src++;
+ n--;
+ }
+}
+
+static void xorbytes3d( unsigned char *output, unsigned char *buf,
+ const unsigned char *ectr, const unsigned char *input, int n) {
+ while(n > LBLOCKSIZE) {
+ *(unsigned long int *)buf ^= *(const unsigned long int *)input;
+ *(unsigned long int *)output = *(const unsigned long int *)input ^ *(const unsigned long int *)ectr;
+ buf += LBLOCKSIZE;
+ output += LBLOCKSIZE;
+ ectr += LBLOCKSIZE;
+ input += LBLOCKSIZE;
+ n -= LBLOCKSIZE;
+ }
+ while(n) {
+ *buf++ ^= *input;
+ *output++ = *input++ ^ *ectr++;
+ n--;
+ }
+}
+
+static void xorbytes3e( unsigned char *output, unsigned char *buf,
+ const unsigned char *ectr, const unsigned char *input, int n) {
+ while(n > LBLOCKSIZE) {
+ unsigned long int t = *(const unsigned long int *)input ^ *(const unsigned long int *)ectr;
+ *(unsigned long int *)output = t;
+ *(unsigned long int *)buf ^= t;
+ buf += LBLOCKSIZE;
+ output += LBLOCKSIZE;
+ ectr += LBLOCKSIZE;
+ input += LBLOCKSIZE;
+ n -= LBLOCKSIZE;
+ }
+ while(n) {
+ *output++ = *input++ ^ *ectr++;
+ *buf++ ^= *output++;
+ n--;
+ }
+}
+
+
+
+/*
+ * Initialize a context
+ */
+void mbedtls_gcm_init( mbedtls_gcm_context *ctx, void *aes_ctx )
+{
+ GCM_VALIDATE( ctx != NULL );
+ memset( ctx, 0, sizeof( mbedtls_gcm_context ) );
+ ctx->cipher_ctx.cipher_ctx = aes_ctx;
+}
+
+/*
+ * Precompute small multiples of H, that is set
+ * HH[i] || HL[i] = H times i,
+ * where i is seen as a field element as in [MGV], ie high-order bits
+ * correspond to low powers of P. The result is stored in the same way, that
+ * is the high-order bit of HH corresponds to P^0 and the low-order bit of HL
+ * corresponds to P^127.
+ */
+static int gcm_gen_table( mbedtls_gcm_context *ctx )
+{
+ int ret, i, j;
+ uint64_t hi, lo;
+ uint64_t vl, vh;
+ unsigned char h[16];
+ size_t olen = 0;
+
+ memset( h, 0, 16 );
+ if( ( ret = mbedtls_cipher_update( &ctx->cipher_ctx, h, 16, h, &olen ) ) != 0 )
+ return( ret );
+
+ /* pack h as two 64-bits ints, big-endian */
+ hi = MBEDTLS_GET_UINT32_BE( h, 0 );
+ lo = MBEDTLS_GET_UINT32_BE( h, 4 );
+ vh = (uint64_t) hi << 32 | lo;
+
+ hi = MBEDTLS_GET_UINT32_BE( h, 8 );
+ lo = MBEDTLS_GET_UINT32_BE( h, 12 );
+ vl = (uint64_t) hi << 32 | lo;
+
+ /* 8 = 1000 corresponds to 1 in GF(2^128) */
+ ctx->HL[8] = vl;
+ ctx->HH[8] = vh;
+
+#if defined(MBEDTLS_AESNI_C) && defined(MBEDTLS_HAVE_X86_64)
+ /* With CLMUL support, we need only h, not the rest of the table */
+ if( mbedtls_aesni_has_support( MBEDTLS_AESNI_CLMUL ) ) {
+ aes_aesni_has_support = 1;
+ return( 0 );
+ }
+#endif
+
+ /* 0 corresponds to 0 in GF(2^128) */
+ ctx->HH[0] = 0;
+ ctx->HL[0] = 0;
+
+ for( i = 4; i > 0; i >>= 1 )
+ {
+ uint32_t T = ( vl & 1 ) * 0xe1000000U;
+ vl = ( vh << 63 ) | ( vl >> 1 );
+ vh = ( vh >> 1 ) ^ ( (uint64_t) T << 32);
+
+ ctx->HL[i] = vl;
+ ctx->HH[i] = vh;
+ }
+
+ for( i = 2; i <= 8; i *= 2 )
+ {
+ uint64_t *HiL = ctx->HL + i, *HiH = ctx->HH + i;
+ vh = *HiH;
+ vl = *HiL;
+ for( j = 1; j < i; j++ )
+ {
+ HiH[j] = vh ^ ctx->HH[j];
+ HiL[j] = vl ^ ctx->HL[j];
+ }
+ }
+
+ return( 0 );
+}
+
+int mbedtls_gcm_setkey( mbedtls_gcm_context *ctx,
+ mbedtls_cipher_id_t cipher,
+ const unsigned char *key,
+ unsigned int keybits )
+{
+ int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
+ const mbedtls_cipher_info_t *cipher_info;
+
+ GCM_VALIDATE_RET( ctx != NULL );
+ GCM_VALIDATE_RET( key != NULL );
+ GCM_VALIDATE_RET( keybits == 128 || keybits == 192 || keybits == 256 );
+
+ cipher_info = mbedtls_cipher_info_from_values( cipher, keybits,
+ MBEDTLS_MODE_ECB );
+ if( cipher_info == NULL )
+ return( MBEDTLS_ERR_GCM_BAD_INPUT );
+
+ if( cipher_info->block_size != 16 )
+ return( MBEDTLS_ERR_GCM_BAD_INPUT );
+
+ if(ctx->cipher_ctx.cipher_ctx == NULL) return MBEDTLS_ERR_GCM_BAD_INPUT;
+ if(!cipher_info->base->ctx_zero_func) return MBEDTLS_ERR_GCM_BAD_INPUT;
+ (*cipher_info->base->ctx_zero_func)(ctx->cipher_ctx.cipher_ctx);
+ ctx->cipher_ctx.cipher_info = cipher_info;
+
+ if( ( ret = mbedtls_cipher_setkey( &ctx->cipher_ctx, key, keybits,
+ MBEDTLS_ENCRYPT ) ) != 0 )
+ {
+ return( ret );
+ }
+
+ if( ( ret = gcm_gen_table( ctx ) ) != 0 )
+ return( ret );
+
+ return( 0 );
+}
+
+/*
+ * Shoup's method for multiplication use this table with
+ * last4[x] = x times P^128
+ * where x and last4[x] are seen as elements of GF(2^128) as in [MGV]
+ */
+static const uint64_t last4[16] =
+{
+ 0x0000UL << 48, 0x1c20UL << 48, 0x3840UL << 48, 0x2460UL << 48,
+ 0x7080UL << 48, 0x6ca0UL << 48, 0x48c0UL << 48, 0x54e0UL << 48,
+ 0xe100UL << 48, 0xfd20UL << 48, 0xd940UL << 48, 0xc560UL << 48,
+ 0x9180UL << 48, 0x8da0UL << 48, 0xa9c0UL << 48, 0xb5e0UL << 48
+};
+
+/*
+ * Sets output to x times H using the precomputed tables.
+ * x and output are seen as elements of GF(2^128) as in [MGV].
+ */
+static void gcm_mult( mbedtls_gcm_context *ctx, const unsigned char x[16],
+ unsigned char output[16] )
+{
+ int i = 0;
+ unsigned char lo, hi, rem;
+ uint64_t zh, zl;
+
+#if defined(MBEDTLS_AESNI_C) && defined(MBEDTLS_HAVE_X86_64)
+ if( aes_aesni_has_support) {
+ unsigned char h[16];
+
+ MBEDTLS_PUT_UINT64_BE( ctx->HH[8], h, 0 );
+ MBEDTLS_PUT_UINT64_BE( ctx->HL[8], h, 8 );
+
+ mbedtls_aesni_gcm_mult( output, x, h );
+ return;
+ }
+#endif /* MBEDTLS_AESNI_C && MBEDTLS_HAVE_X86_64 */
+
+ lo = x[15] & 0xf;
+
+ zh = ctx->HH[lo];
+ zl = ctx->HL[lo];
+
+ for( i = 15; i >= 0; i-- )
+ {
+ lo = x[i] & 0xf;
+ hi = ( x[i] >> 4 ) & 0xf;
+
+ if( i != 15 )
+ {
+ rem = (unsigned char) zl & 0xf;
+ zl = ( zh << 60 ) | ( zl >> 4 );
+ zh = ( zh >> 4 );
+ zh ^= (uint64_t) last4[rem];
+ zh ^= ctx->HH[lo];
+ zl ^= ctx->HL[lo];
+
+ }
+
+ rem = (unsigned char) zl & 0xf;
+ zl = ( zh << 60 ) | ( zl >> 4 );
+ zh = ( zh >> 4 );
+ zh ^= (uint64_t) last4[rem];
+ zh ^= ctx->HH[hi];
+ zl ^= ctx->HL[hi];
+ }
+ MBEDTLS_PUT_UINT64_BE( zh, output, 0 );
+ MBEDTLS_PUT_UINT64_BE( zl, output, 8 );
+}
+
+int mbedtls_gcm_starts( mbedtls_gcm_context *ctx,
+ int mode,
+ const unsigned char *iv, size_t iv_len )
+{
+ int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
+ unsigned char work_buf[16];
+ const unsigned char *p;
+ size_t use_len, olen = 0;
+ uint64_t iv_bits;
+
+ GCM_VALIDATE_RET( ctx != NULL );
+ GCM_VALIDATE_RET( iv != NULL );
+
+ /* IV is limited to 2^64 bits, so 2^61 bytes */
+ /* IV is not allowed to be zero length */
+ if( iv_len == 0)
+ return( MBEDTLS_ERR_GCM_BAD_INPUT );
+#if __SIZE_WIDTH__ == 64
+ if( iv_len >= (1UL << 32 ))
+ return( MBEDTLS_ERR_GCM_BAD_INPUT );
+#endif
+
+ memset( ctx->y, 0x00, sizeof(ctx->y) );
+ memset( ctx->buf, 0x00, sizeof(ctx->buf) );
+
+ ctx->mode = mode;
+ ctx->len = 0;
+ ctx->add_len = 0;
+
+ if( iv_len == 12 )
+ {
+ memcpy( ctx->y, iv, iv_len );
+ ctx->y[15] = 1;
+ }
+ else
+ {
+ memset( work_buf, 0x00, 16 );
+ iv_bits = (uint64_t)iv_len * 8;
+ MBEDTLS_PUT_UINT64_BE( iv_bits, work_buf, 8 );
+
+ p = iv;
+ while( iv_len > 0 )
+ {
+ use_len = ( iv_len < 16 ) ? iv_len : 16;
+
+ xorbytes(ctx->y,p,use_len);
+
+ gcm_mult( ctx, ctx->y, ctx->y );
+
+ iv_len -= use_len;
+ p += use_len;
+ }
+
+ xorbytes(ctx->y,work_buf,16);
+
+ gcm_mult( ctx, ctx->y, ctx->y );
+ }
+
+ if( ( ret = mbedtls_cipher_update( &ctx->cipher_ctx, ctx->y, 16,
+ ctx->base_ectr, &olen ) ) != 0 )
+ {
+ return( ret );
+ }
+
+ return( 0 );
+}
+
+/**
+ * mbedtls_gcm_context::buf contains the partial state of the computation of
+ * the authentication tag.
+ * mbedtls_gcm_context::add_len and mbedtls_gcm_context::len indicate
+ * different stages of the computation:
+ * * len == 0 && add_len == 0: initial state
+ * * len == 0 && add_len % 16 != 0: the first `add_len % 16` bytes have
+ * a partial block of AD that has been
+ * xored in but not yet multiplied in.
+ * * len == 0 && add_len % 16 == 0: the authentication tag is correct if
+ * the data ends now.
+ * * len % 16 != 0: the first `len % 16` bytes have
+ * a partial block of ciphertext that has
+ * been xored in but not yet multiplied in.
+ * * len > 0 && len % 16 == 0: the authentication tag is correct if
+ * the data ends now.
+ */
+int mbedtls_gcm_update_ad( mbedtls_gcm_context *ctx,
+ const unsigned char *add, size_t add_len )
+{
+ const unsigned char *p;
+ size_t use_len, offset;
+
+ GCM_VALIDATE_RET( add_len == 0 || add != NULL );
+
+ /* IV is limited to 2^64 bits, so 2^61 bytes */
+ if( (uint64_t) add_len >> 61 != 0 )
+ return( MBEDTLS_ERR_GCM_BAD_INPUT );
+
+ offset = ctx->add_len % 16;
+ p = add;
+
+ if( offset != 0 )
+ {
+ use_len = 16 - offset;
+ if( use_len > add_len )
+ use_len = add_len;
+
+ xorbytes(ctx->buf,p,use_len);
+
+ if( offset + use_len == 16 )
+ gcm_mult( ctx, ctx->buf, ctx->buf );
+
+ ctx->add_len += use_len;
+ add_len -= use_len;
+ p += use_len;
+ }
+
+ ctx->add_len += add_len;
+
+ while( add_len >= 16 )
+ {
+ xorbytes(ctx->buf,p,16);
+
+ gcm_mult( ctx, ctx->buf, ctx->buf );
+
+ add_len -= 16;
+ p += 16;
+ }
+
+ if( add_len > 0 )
+ xorbytes(ctx->buf,p,add_len);
+
+ return( 0 );
+}
+
+/* Increment the counter. */
+static void gcm_incr( unsigned char y[16] )
+{
+ size_t i;
+ for( i = 16; i > 12; i-- )
+ if( ++y[i - 1] != 0 )
+ break;
+}
+
+/* Calculate and apply the encryption mask. Process use_len bytes of data,
+ * starting at position offset in the mask block. */
+static int gcm_mask( mbedtls_gcm_context *ctx,
+ unsigned char ectr[16],
+ size_t offset, size_t use_len,
+ const unsigned char *input,
+ unsigned char *output )
+{
+ size_t olen = 0;
+ int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
+
+ if( ( ret = mbedtls_cipher_update( &ctx->cipher_ctx, ctx->y, 16, ectr,
+ &olen ) ) != 0 )
+ {
+ mbedtls_platform_zeroize( ectr, 16 );
+ return( ret );
+ }
+
+ if(ctx->mode == MBEDTLS_GCM_DECRYPT )
+ xorbytes3d(output,&ctx->buf[offset],&ectr[offset],input,use_len);
+ else
+ xorbytes3e(output,&ctx->buf[offset],&ectr[offset],input,use_len);
+
+ return( 0 );
+}
+
+int mbedtls_gcm_update( mbedtls_gcm_context *ctx,
+ const unsigned char *input, size_t input_length,
+ unsigned char *output, size_t output_size,
+ size_t *output_length )
+{
+ int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
+ const unsigned char *p = input;
+ unsigned char *out_p = output;
+ size_t offset;
+ unsigned char ectr[16];
+
+ if( output_size < input_length )
+ return( MBEDTLS_ERR_GCM_BUFFER_TOO_SMALL );
+ GCM_VALIDATE_RET( output_length != NULL );
+ *output_length = input_length;
+
+ /* Exit early if input_length==0 so that we don't do any pointer arithmetic
+ * on a potentially null pointer.
+ * Returning early also means that the last partial block of AD remains
+ * untouched for mbedtls_gcm_finish */
+ if( input_length == 0 )
+ return( 0 );
+
+ GCM_VALIDATE_RET( ctx != NULL );
+ GCM_VALIDATE_RET( input != NULL );
+ GCM_VALIDATE_RET( output != NULL );
+
+ if( output > input && (size_t) ( output - input ) < input_length )
+ return( MBEDTLS_ERR_GCM_BAD_INPUT );
+
+ /* Total length is restricted to 2^39 - 256 bits, ie 2^36 - 2^5 bytes
+ * Also check for possible overflow */
+ if( ctx->len + input_length < ctx->len ||
+ (uint64_t) ctx->len + input_length > 0xFFFFFFFE0ull )
+ {
+ return( MBEDTLS_ERR_GCM_BAD_INPUT );
+ }
+
+ if( ctx->len == 0 && ctx->add_len % 16 != 0 )
+ {
+ gcm_mult( ctx, ctx->buf, ctx->buf );
+ }
+
+ offset = ctx->len % 16;
+ if( offset != 0 )
+ {
+ size_t use_len = 16 - offset;
+ if( use_len > input_length )
+ use_len = input_length;
+
+ if( ( ret = gcm_mask( ctx, ectr, offset, use_len, p, out_p ) ) != 0 )
+ return( ret );
+
+ if( offset + use_len == 16 )
+ gcm_mult( ctx, ctx->buf, ctx->buf );
+
+ ctx->len += use_len;
+ input_length -= use_len;
+ p += use_len;
+ out_p += use_len;
+ }
+
+ ctx->len += input_length;
+
+ while( input_length >= 16 )
+ {
+ gcm_incr( ctx->y );
+ if( ( ret = gcm_mask( ctx, ectr, 0, 16, p, out_p ) ) != 0 )
+ return( ret );
+
+ gcm_mult( ctx, ctx->buf, ctx->buf );
+
+ input_length -= 16;
+ p += 16;
+ out_p += 16;
+ }
+
+ if( input_length > 0 )
+ {
+ gcm_incr( ctx->y );
+ if( ( ret = gcm_mask( ctx, ectr, 0, input_length, p, out_p ) ) != 0 )
+ return( ret );
+ }
+
+ return( 0 );
+}
+
+int mbedtls_gcm_finish( mbedtls_gcm_context *ctx,
+ unsigned char *output, size_t output_size,
+ size_t *output_length,
+ unsigned char *tag, size_t tag_len )
+{
+ unsigned char work_buf[16];
+ uint64_t orig_len;
+ uint64_t orig_add_len;
+
+ GCM_VALIDATE_RET( ctx != NULL );
+ GCM_VALIDATE_RET( tag != NULL );
+
+ /* We never pass any output in finish(). The output parameter exists only
+ * for the sake of alternative implementations. */
+ (void) output;
+ (void) output_size;
+ *output_length = 0;
+
+ orig_len = ctx->len * 8;
+ orig_add_len = ctx->add_len * 8;
+
+ if( ctx->len == 0 && ctx->add_len % 16 != 0 )
+ {
+ gcm_mult( ctx, ctx->buf, ctx->buf );
+ }
+
+ if( tag_len > 16 || tag_len < 4 )
+ return( MBEDTLS_ERR_GCM_BAD_INPUT );
+
+ if( ctx->len % 16 != 0 )
+ gcm_mult( ctx, ctx->buf, ctx->buf );
+
+ memcpy( tag, ctx->base_ectr, tag_len );
+
+ if( orig_len || orig_add_len )
+ {
+ MBEDTLS_PUT_UINT64_BE( ( orig_add_len ), work_buf, 0 );
+ MBEDTLS_PUT_UINT64_BE( ( orig_len ), work_buf, 8 );
+
+ xorbytes(ctx->buf,work_buf,16);
+
+ gcm_mult( ctx, ctx->buf, ctx->buf );
+
+ xorbytes(tag,ctx->buf,tag_len);
+ }
+
+ return( 0 );
+}
+
+int mbedtls_gcm_crypt_and_tag( mbedtls_gcm_context *ctx,
+ int mode,
+ size_t length,
+ const unsigned char *iv,
+ size_t iv_len,
+ const unsigned char *add,
+ size_t add_len,
+ const unsigned char *input,
+ unsigned char *output,
+ size_t tag_len,
+ unsigned char *tag )
+{
+ int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
+ size_t olen;
+
+ GCM_VALIDATE_RET( ctx != NULL );
+ GCM_VALIDATE_RET( iv != NULL );
+ GCM_VALIDATE_RET( add_len == 0 || add != NULL );
+ GCM_VALIDATE_RET( length == 0 || input != NULL );
+ GCM_VALIDATE_RET( length == 0 || output != NULL );
+ GCM_VALIDATE_RET( tag != NULL );
+
+ if( ( ret = mbedtls_gcm_starts( ctx, mode, iv, iv_len ) ) != 0 )
+ return( ret );
+
+ if( ( ret = mbedtls_gcm_update_ad( ctx, add, add_len ) ) != 0 )
+ return( ret );
+
+ if( ( ret = mbedtls_gcm_update( ctx, input, length,
+ output, length, &olen ) ) != 0 )
+ return( ret );
+
+ if( ( ret = mbedtls_gcm_finish( ctx, NULL, 0, &olen, tag, tag_len ) ) != 0 )
+ return( ret );
+
+ return( 0 );
+}
+
+int mbedtls_gcm_auth_decrypt( mbedtls_gcm_context *ctx,
+ size_t length,
+ const unsigned char *iv,
+ size_t iv_len,
+ const unsigned char *add,
+ size_t add_len,
+ const unsigned char *tag,
+ size_t tag_len,
+ const unsigned char *input,
+ unsigned char *output )
+{
+ int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
+ unsigned char check_tag[16];
+ size_t i;
+ int diff;
+
+ GCM_VALIDATE_RET( ctx != NULL );
+ GCM_VALIDATE_RET( iv != NULL );
+ GCM_VALIDATE_RET( add_len == 0 || add != NULL );
+ GCM_VALIDATE_RET( tag != NULL );
+ GCM_VALIDATE_RET( length == 0 || input != NULL );
+ GCM_VALIDATE_RET( length == 0 || output != NULL );
+
+ if( ( ret = mbedtls_gcm_crypt_and_tag( ctx, MBEDTLS_GCM_DECRYPT, length,
+ iv, iv_len, add, add_len,
+ input, output, tag_len, check_tag ) ) != 0 )
+ {
+ return( ret );
+ }
+
+ /* Check tag in "constant-time" */
+ for( diff = 0, i = 0; i < tag_len; i++ )
+ diff |= tag[i] ^ check_tag[i];
+
+ if( diff != 0 )
+ {
+ mbedtls_platform_zeroize( output, length );
+ return( MBEDTLS_ERR_GCM_AUTH_FAILED );
+ }
+
+ return( 0 );
+}
+
+void mbedtls_gcm_free( mbedtls_gcm_context *ctx )
+{
+ if( ctx == NULL )
+ return;
+ // mbedtls_cipher_free( &ctx->cipher_ctx );
+}
+
diff --git a/src/lib/third_party/src/gcrypt_light.c b/src/lib/third_party/src/gcrypt_light.c
new file mode 100644
index 000000000..9c8a9e3f3
--- /dev/null
+++ b/src/lib/third_party/src/gcrypt_light.c
@@ -0,0 +1,375 @@
+
+#include <stdint.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "ndpi_api.h"
+
+#if !defined(HAVE_LIBGCRYPT)
+
+#if defined(__GNUC__) && \
+ ( defined(__amd64__) || defined(__x86_64__) ) && \
+ ! defined(MBEDTLS_HAVE_X86_64)
+#define MBEDTLS_HAVE_X86_64
+#define MBEDTLS_AESNI_C
+#endif
+
+/****************************/
+#define MBEDTLS_GCM_C
+#define MBEDTLS_CIPHER_C
+#define MBEDTLS_AES_C
+/****************************/
+
+#define mbedtls_calloc ndpi_calloc
+#define mbedtls_free ndpi_free
+
+#include "gcrypt_light.h"
+
+#define MBEDTLS_CHECK_RETURN_TYPICAL
+#define MBEDTLS_INTERNAL_VALIDATE_RET( cond, ret ) do { } while( 0 )
+#define MBEDTLS_INTERNAL_VALIDATE( cond ) do { } while( 0 )
+
+#define mbedtls_platform_zeroize(a,b) memset(a,0,b)
+#define mbedtls_ct_memcmp(s1,s2,n) memcmp(s1,s2,n)
+
+#include "gcrypt/common.h"
+#include "gcrypt/error.h"
+#include "gcrypt/aes.h"
+#if defined(MBEDTLS_AESNI_C)
+#include "gcrypt/aesni.h"
+#endif
+#include "gcrypt/cipher.h"
+#include "gcrypt/gcm.h"
+#include "gcrypt/digest.h"
+#include "gcrypt/cipher_wrap.h"
+
+
+#include "gcrypt/aes.c"
+#if defined(MBEDTLS_AESNI_C) && defined(MBEDTLS_HAVE_X86_64)
+#include "gcrypt/aesni.c"
+#endif
+
+#include "gcrypt/gcm.c"
+
+#include "gcrypt/cipher.c"
+#include "gcrypt/cipher_wrap.c"
+#include "gcrypt/digest.c"
+
+#define MBEDTLS_ERR_MD_ALLOC_FAILED 0x50f0
+#define MBEDTLS_ERR_MD_NOT_SUPPORT 0x50f1
+#define MBEDTLS_ERR_MD_REKEY 0x50f2
+#define MBEDTLS_ERR_MD_DATA_TOO_BIG 0x50f3
+#define MBEDTLS_ERR_CIPHER_BAD_KEY 0x50f4
+#define MBEDTLS_ERR_GCM_ALLOC_FAILED 0x50f5
+#define MBEDTLS_ERR_GCM_NOT_SUPPORT 0x50f6
+#define MBEDTLS_ERR_GCM_MISSING_KEY 0x50f7
+#define MBEDTLS_ERR_AES_MISSING_KEY 0x50f8
+#define MBEDTLS_ERR_NOT_SUPPORT 0x50f9
+
+const char *gcry_errstr(gcry_error_t err) {
+ switch(err) {
+ case MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED: return "Corruption detected";
+ case MBEDTLS_ERR_MD_ALLOC_FAILED: return "MD:Alloc failed";
+ case MBEDTLS_ERR_MD_NOT_SUPPORT: return "MD:Not supported";
+ case MBEDTLS_ERR_MD_REKEY: return "MD:Key already set";
+ case MBEDTLS_ERR_MD_DATA_TOO_BIG: return "MD:Data is too long";
+ case MBEDTLS_ERR_AES_BAD_INPUT_DATA: return "AES:Bad input data";
+ case MBEDTLS_ERR_AES_MISSING_KEY: return "AES:No key";
+ case MBEDTLS_ERR_AES_INVALID_KEY_LENGTH: return "AES:Invalid key length";
+ case MBEDTLS_ERR_AES_INVALID_INPUT_LENGTH: return "AES:Invalid input length";
+ case MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA: return "CIPHER:Bad input data";
+ case MBEDTLS_ERR_CIPHER_ALLOC_FAILED: return "CIPHER:Alloc failed";
+ case MBEDTLS_ERR_CIPHER_BAD_KEY: return "CIPHER:Wrong key/iv";
+ case MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE: return "CIPHER:Feature unavailable";
+ case MBEDTLS_ERR_CIPHER_INVALID_CONTEXT: return "CIPHER:Invalid context";
+ case MBEDTLS_ERR_CIPHER_FULL_BLOCK_EXPECTED: return "CIPHER:Full block expected";
+ case MBEDTLS_ERR_CIPHER_AUTH_FAILED: return "CIPHER:Auth failed";
+ case MBEDTLS_ERR_GCM_AUTH_FAILED: return "GCM:Auth failed";
+ case MBEDTLS_ERR_GCM_BAD_INPUT: return "GCM:Bad input";
+ case MBEDTLS_ERR_GCM_BUFFER_TOO_SMALL: return "GCM:Buffer too small";
+ case MBEDTLS_ERR_GCM_ALLOC_FAILED: return "GCM:Alloc failed";
+ case MBEDTLS_ERR_GCM_NOT_SUPPORT: return "GCM:Not supported";
+ case MBEDTLS_ERR_GCM_MISSING_KEY: return "GCM:No key/siv/auth";
+ case MBEDTLS_ERR_NOT_SUPPORT: return "Not supported";
+ }
+ return "Unknown error code";
+}
+
+char *gpg_strerror_r(gcry_error_t err,char *buf, size_t buflen) {
+ const char *err_txt = gcry_errstr(err);
+ strncpy(buf,err_txt,buflen-1);
+ return buf;
+}
+
+int gcry_control (int ctl,int val) {
+ if(ctl == GCRYCTL_INITIALIZATION_FINISHED) return GPG_ERR_NO_ERROR;
+ return MBEDTLS_ERR_NOT_SUPPORT;
+}
+
+const char *gcry_check_version(void *unused) {
+ return "1.8.6internal";
+}
+
+gcry_error_t gcry_md_open(gcry_md_hd_t *h,int algo,int flags) {
+ gcry_md_hd_t ctx;
+ if(!(algo == GCRY_MD_SHA256 && flags == GCRY_MD_FLAG_HMAC)) return MBEDTLS_ERR_MD_NOT_SUPPORT;
+ ctx = ndpi_calloc(1,sizeof(struct gcry_md_hd));
+ if(!ctx) return MBEDTLS_ERR_MD_ALLOC_FAILED;
+ *h = ctx;
+ return GPG_ERR_NO_ERROR;
+}
+
+void gcry_md_close(gcry_md_hd_t h) {
+ if(h) ndpi_free(h);
+}
+
+void gcry_md_reset(gcry_md_hd_t h) {
+ memset((char *)h, 0, sizeof(*h));
+}
+
+gcry_error_t gcry_md_setkey(gcry_md_hd_t h,const uint8_t *key,size_t key_len) {
+ if(h->key_len) return MBEDTLS_ERR_MD_REKEY;
+ h->key_len = key_len <= sizeof(h->key) ? key_len : sizeof(h->key);
+ memcpy(h->key,key,h->key_len);
+ return GPG_ERR_NO_ERROR;
+}
+
+gcry_error_t gcry_md_write(gcry_md_hd_t h,const uint8_t *data,size_t data_len) {
+ if(h->data_len + data_len > GCRY_MD_BUFF_SIZE) return MBEDTLS_ERR_MD_DATA_TOO_BIG;
+ memcpy(&h->data_buf[h->data_len],data,data_len);
+ h->data_len += data_len;
+ return GPG_ERR_NO_ERROR;
+}
+
+size_t gcry_md_get_algo_dlen(int algo) {
+ return algo == GCRY_MD_SHA256 ? HMAC_SHA256_DIGEST_SIZE:0;
+}
+
+int gcry_md_get_algo(gcry_md_hd_t h) {
+ return GCRY_MD_SHA256;
+}
+
+uint8_t *gcry_md_read(gcry_md_hd_t h, int flag) {
+ hmac_sha256(h->out,h->data_buf,h->data_len,h->key,h->key_len);
+ return h->out;
+}
+
+/**********************************************************/
+
+static int check_valid_algo_mode(gcry_cipher_hd_t h) {
+ if(!h) return 1;
+ if(h->algo == GCRY_CIPHER_AES128 &&
+ (h->mode == GCRY_CIPHER_MODE_ECB || h->mode == GCRY_CIPHER_MODE_GCM)) return 0;
+ return 1;
+}
+
+#define ROUND_SIZE8(a) (((a)+7UL) & ~7UL)
+
+gcry_error_t gcry_cipher_open (gcry_cipher_hd_t *handle,
+ int algo, int mode, unsigned int flags) {
+
+struct gcry_cipher_hd *r = 0;
+size_t s_len = ROUND_SIZE8(sizeof(struct gcry_cipher_hd));;
+
+ if(flags || algo != GCRY_CIPHER_AES128 || !( mode == GCRY_CIPHER_MODE_ECB || mode == GCRY_CIPHER_MODE_GCM)) return MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+
+ switch(mode) {
+ case GCRY_CIPHER_MODE_ECB:
+ r = ndpi_calloc(1,s_len + sizeof(mbedtls_aes_context));
+ if(!r) return MBEDTLS_ERR_CIPHER_ALLOC_FAILED;
+ r->ctx.ecb = (mbedtls_aes_context *)(r+1);
+ mbedtls_aes_init(r->ctx.ecb);
+ break;
+ case GCRY_CIPHER_MODE_GCM:
+ {
+ size_t aes_ctx_size = ROUND_SIZE8(sizeof( mbedtls_aes_context ));
+ size_t gcm_ctx_size = ROUND_SIZE8(sizeof( mbedtls_gcm_context ));
+
+ r = ndpi_calloc(1,s_len + gcm_ctx_size + aes_ctx_size);
+ if(!r) return MBEDTLS_ERR_CIPHER_ALLOC_FAILED;
+ r->ctx.gcm = (mbedtls_gcm_context *)(r+1);
+ mbedtls_gcm_init(r->ctx.gcm,(void *)(((char *)(r+1)) + gcm_ctx_size));
+ }
+ break;
+ default:
+ return MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+ }
+ r->algo = algo;
+ r->mode = mode;
+ *handle = r;
+ return GPG_ERR_NO_ERROR;
+}
+
+void gcry_cipher_close (gcry_cipher_hd_t h) {
+ if(h && !check_valid_algo_mode(h)) {
+ switch(h->mode) {
+ case GCRY_CIPHER_MODE_ECB:
+ mbedtls_aes_free(h->ctx.ecb);
+ break;
+ case GCRY_CIPHER_MODE_GCM:
+ mbedtls_gcm_free(h->ctx.gcm);
+ break;
+ }
+ ndpi_free(h);
+ }
+}
+
+gcry_error_t gcry_cipher_ctl (gcry_cipher_hd_t h, int cmd, void *data, size_t len) {
+ return MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+}
+
+gcry_error_t gcry_cipher_reset (gcry_cipher_hd_t h) {
+
+ gcry_error_t err = MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+ if(check_valid_algo_mode(h)) return err;
+ h->authlen = 0; h->taglen = 0; h->ivlen = 0;
+ h->s_auth = 0; h->s_iv = 0; h->s_crypt_ok = 0;
+ memset((char *)h->iv,0,sizeof(h->iv));
+ memset((char *)h->auth,0,sizeof(h->auth));
+ memset((char *)h->tag,0,sizeof(h->tag));
+ switch(h->mode) {
+ case GCRY_CIPHER_MODE_ECB:
+ break;
+ case GCRY_CIPHER_MODE_GCM:
+ mbedtls_cipher_reset(&h->ctx.gcm->cipher_ctx);
+ break;
+ default:
+ return err;
+ }
+ return 0;
+}
+
+
+gcry_error_t gcry_cipher_setkey (gcry_cipher_hd_t h, const void *key, size_t keylen) {
+ gcry_error_t r = MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA;
+ if(check_valid_algo_mode(h)) return MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+ if( h->s_key ) return MBEDTLS_ERR_CIPHER_BAD_KEY;
+ if( keylen != gcry_cipher_get_algo_keylen(h->algo)) return MBEDTLS_ERR_CIPHER_BAD_KEY;
+ switch(h->mode) {
+ case GCRY_CIPHER_MODE_ECB:
+ r = mbedtls_aes_setkey_enc( h->ctx.ecb, key, keylen*8 );
+ break;
+ case GCRY_CIPHER_MODE_GCM:
+ r = mbedtls_gcm_setkey( h->ctx.gcm, MBEDTLS_CIPHER_ID_AES, key, keylen*8 );
+ break;
+ }
+ if(!r) {
+ h->s_key = 1;
+ h->keylen = keylen;
+ }
+ return r;
+}
+
+gcry_error_t gcry_cipher_setiv (gcry_cipher_hd_t h, const void *iv, size_t ivlen) {
+ if(check_valid_algo_mode(h)) return MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+ if(h->s_iv) return MBEDTLS_ERR_CIPHER_BAD_KEY;
+ switch(h->mode) {
+ case GCRY_CIPHER_MODE_GCM:
+ if(ivlen != 12) return MBEDTLS_ERR_CIPHER_BAD_KEY;
+ h->s_iv = 1;
+ h->ivlen = ivlen;
+ memcpy( h->iv, iv, ivlen );
+ return 0;
+ }
+ return MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+}
+
+gcry_error_t gcry_cipher_authenticate (gcry_cipher_hd_t h, const void *abuf, size_t abuflen) {
+ if(check_valid_algo_mode(h)) return MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+ if(h->s_auth) return MBEDTLS_ERR_CIPHER_BAD_KEY;
+ switch(h->mode) {
+ case GCRY_CIPHER_MODE_GCM:
+ if(abuflen > sizeof(h->auth)) return MBEDTLS_ERR_CIPHER_BAD_KEY;
+ h->s_auth = 1;
+ h->authlen = abuflen;
+ memcpy(h->auth,abuf,abuflen);
+ return 0;
+ }
+ return MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+}
+
+gcry_error_t gcry_cipher_checktag (gcry_cipher_hd_t h, const void *intag, size_t taglen) {
+ if(check_valid_algo_mode(h)) return MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+ switch(h->mode) {
+ case GCRY_CIPHER_MODE_GCM:
+ if(h->s_crypt_ok && h->taglen == taglen) {
+ size_t i;
+ int diff;
+ const uint8_t *ctag = intag;
+ for( diff = 0, i = 0; i < taglen; i++ )
+ diff |= ctag[i] ^ h->tag[i];
+ if(!diff) return 0;
+ }
+ return MBEDTLS_ERR_GCM_AUTH_FAILED;
+ }
+ return MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+}
+
+size_t gcry_cipher_get_algo_keylen (int algo) {
+ switch(algo) {
+ case GCRY_CIPHER_AES128: return 16;
+ default: return 0;
+ }
+ return 0;
+}
+
+static gcry_error_t _gcry_cipher_crypt (gcry_cipher_hd_t h,
+ void *out, size_t outsize,
+ const void *in, size_t inlen,int encrypt) {
+ uint8_t *src = NULL;
+ size_t srclen = 0;
+ gcry_error_t rv = MBEDTLS_ERR_GCM_BAD_INPUT;
+
+ if(check_valid_algo_mode(h)) return MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE;
+ if(!inlen && !outsize) return MBEDTLS_ERR_GCM_BAD_INPUT;
+ if(!in && !inlen) {
+ src = ndpi_malloc(outsize);
+ if(!src) return MBEDTLS_ERR_GCM_ALLOC_FAILED;
+ srclen = outsize;
+ memcpy(src,out,outsize);
+ } else {
+ if(inlen != outsize) return MBEDTLS_ERR_GCM_BAD_INPUT;
+ }
+ switch(h->mode) {
+ case GCRY_CIPHER_MODE_ECB:
+ if(!encrypt) return MBEDTLS_ERR_GCM_NOT_SUPPORT;
+ if(!( h->s_key && !h->s_crypt_ok)) return MBEDTLS_ERR_AES_MISSING_KEY;
+ rv = mbedtls_aes_crypt_ecb(h->ctx.ecb, MBEDTLS_AES_ENCRYPT,
+ src ? src:in, out);
+ break;
+ case GCRY_CIPHER_MODE_GCM:
+ if(encrypt) return MBEDTLS_ERR_GCM_NOT_SUPPORT;
+ if(!( h->s_key && h->s_auth && h->s_iv && !h->s_crypt_ok)) return MBEDTLS_ERR_GCM_MISSING_KEY;
+ h->taglen = 16;
+ rv = mbedtls_gcm_crypt_and_tag(h->ctx.gcm,
+ MBEDTLS_GCM_DECRYPT,
+ src ? srclen:outsize,
+ h->iv,h->ivlen,
+ h->auth,h->authlen,
+ src ? src:in,out,
+ h->taglen, h->tag);
+ break;
+ }
+ if(!rv) h->s_crypt_ok = 1;
+
+ if(src) ndpi_free(src);
+ return rv;
+}
+
+
+gcry_error_t gcry_cipher_encrypt (gcry_cipher_hd_t h,
+ void *out, size_t outsize,
+ const void *in, size_t inlen) {
+ return _gcry_cipher_crypt(h,out,outsize,in,inlen,1);
+}
+
+gcry_error_t gcry_cipher_decrypt (gcry_cipher_hd_t h,
+ void *out, size_t outsize,
+ const void *in, size_t inlen) {
+ return _gcry_cipher_crypt(h,out,outsize,in,inlen,0);
+}
+
+#endif /* HAVE_LIBGCRYPT */
+
+/* vim: set ts=4 sw=4 et: */