From ad03518aa8179e8e115345af0590ac5e7d4af8a6 Mon Sep 17 00:00:00 2001 From: JacobBarthelmeh Date: Mon, 9 Dec 2024 09:50:44 -0700 Subject: [PATCH] armasm with opensslcoexist build --- wolfcrypt/src/port/arm/armv8-aes.c | 794 ++++++++++++++--------------- 1 file changed, 397 insertions(+), 397 deletions(-) diff --git a/wolfcrypt/src/port/arm/armv8-aes.c b/wolfcrypt/src/port/arm/armv8-aes.c index 9ae90e8cfa..03a7daaf1b 100644 --- a/wolfcrypt/src/port/arm/armv8-aes.c +++ b/wolfcrypt/src/port/arm/armv8-aes.c @@ -148,7 +148,7 @@ static WC_INLINE void IncrementGcmCounter(byte* inOutCtr) int i; /* in network byte order so start at end and work back */ - for (i = AES_BLOCK_SIZE - 1; i >= AES_BLOCK_SIZE - CTR_SZ; i--) { + for (i = WC_AES_BLOCK_SIZE - 1; i >= WC_AES_BLOCK_SIZE - CTR_SZ; i--) { if (++inOutCtr[i]) /* we're done unless we overflow */ return; } @@ -296,9 +296,9 @@ int wc_AesSetIV(Aes* aes, const byte* iv) return BAD_FUNC_ARG; if (iv) - XMEMCPY(aes->reg, iv, AES_BLOCK_SIZE); + XMEMCPY(aes->reg, iv, WC_AES_BLOCK_SIZE); else - XMEMSET(aes->reg, 0, AES_BLOCK_SIZE); + XMEMSET(aes->reg, 0, WC_AES_BLOCK_SIZE); return 0; } @@ -452,7 +452,7 @@ void AES_decrypt_AARCH64(const byte* inBlock, byte* outBlock, byte* key, int nr) void AES_CBC_encrypt_AARCH64(const byte* in, byte* out, word32 sz, byte* reg, byte* key, int rounds) { - word32 numBlocks = sz / AES_BLOCK_SIZE; + word32 numBlocks = sz / WC_AES_BLOCK_SIZE; /* AESE exor's input with round key @@ -638,7 +638,7 @@ void AES_CBC_encrypt_AARCH64(const byte* in, byte* out, word32 sz, byte* reg, void AES_CBC_decrypt_AARCH64(const byte* in, byte* out, word32 sz, byte* reg, byte* key, int rounds) { - word32 numBlocks = sz / AES_BLOCK_SIZE; + word32 numBlocks = sz / WC_AES_BLOCK_SIZE; switch (rounds) { #ifdef WOLFSSL_AES_128 @@ -1330,22 +1330,22 @@ void AES_CTR_encrypt_AARCH64(Aes* aes, byte* out, const byte* in, word32 sz) word32 numBlocks; /* do as many block size ops as possible */ - numBlocks = sz / AES_BLOCK_SIZE; + numBlocks = sz / WC_AES_BLOCK_SIZE; if (numBlocks > 0) { wc_aes_ctr_encrypt_asm(aes, out, in, (byte*)aes->key, numBlocks); - sz -= numBlocks * AES_BLOCK_SIZE; - out += numBlocks * AES_BLOCK_SIZE; - in += numBlocks * AES_BLOCK_SIZE; + sz -= numBlocks * WC_AES_BLOCK_SIZE; + out += numBlocks * WC_AES_BLOCK_SIZE; + in += numBlocks * WC_AES_BLOCK_SIZE; } /* handle non block size remaining */ if (sz) { - byte zeros[AES_BLOCK_SIZE] = { 0, 0, 0, 0, 0, 0, 0, 0, + byte zeros[WC_AES_BLOCK_SIZE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; wc_aes_ctr_encrypt_asm(aes, (byte*)aes->tmp, zeros, (byte*)aes->key, 1); - aes->left = AES_BLOCK_SIZE; + aes->left = WC_AES_BLOCK_SIZE; tmp = (byte*)aes->tmp; while (sz--) { @@ -1402,7 +1402,7 @@ void GMULT_AARCH64(byte* X, byte* Y) static void GHASH_AARCH64(Gcm* gcm, const byte* a, word32 aSz, const byte* c, word32 cSz, byte* s, word32 sSz) { - byte scratch[AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; __asm__ __volatile__ ( "LD1 {v3.16b}, %[h] \n" @@ -1771,15 +1771,15 @@ static void GHASH_AARCH64(Gcm* gcm, const byte* a, word32 aSz, const byte* c, #ifdef WOLFSSL_AESGCM_STREAM /* Access initialization counter data. */ - #define AES_INITCTR(aes) ((aes)->streamData + 0 * AES_BLOCK_SIZE) + #define AES_INITCTR(aes) ((aes)->streamData + 0 * WC_AES_BLOCK_SIZE) /* Access counter data. */ - #define AES_COUNTER(aes) ((aes)->streamData + 1 * AES_BLOCK_SIZE) + #define AES_COUNTER(aes) ((aes)->streamData + 1 * WC_AES_BLOCK_SIZE) /* Access tag data. */ - #define AES_TAG(aes) ((aes)->streamData + 2 * AES_BLOCK_SIZE) + #define AES_TAG(aes) ((aes)->streamData + 2 * WC_AES_BLOCK_SIZE) /* Access last GHASH block. */ - #define AES_LASTGBLOCK(aes) ((aes)->streamData + 3 * AES_BLOCK_SIZE) + #define AES_LASTGBLOCK(aes) ((aes)->streamData + 3 * WC_AES_BLOCK_SIZE) /* Access last encrypted block. */ - #define AES_LASTBLOCK(aes) ((aes)->streamData + 4 * AES_BLOCK_SIZE) + #define AES_LASTBLOCK(aes) ((aes)->streamData + 4 * WC_AES_BLOCK_SIZE) /* GHASH one block of data. * @@ -1790,7 +1790,7 @@ static void GHASH_AARCH64(Gcm* gcm, const byte* a, word32 aSz, const byte* c, */ #define GHASH_ONE_BLOCK_AARCH64(aes, block) \ do { \ - xorbuf(AES_TAG(aes), block, AES_BLOCK_SIZE); \ + xorbuf(AES_TAG(aes), block, WC_AES_BLOCK_SIZE); \ GMULT_AARCH64(AES_TAG(aes), aes->gcm.H); \ } \ while (0) @@ -1803,7 +1803,7 @@ static void GHASH_AARCH64(Gcm* gcm, const byte* a, word32 aSz, const byte* c, */ #define GHASH_LEN_BLOCK_AARCH64(aes) \ do { \ - byte scratch[AES_BLOCK_SIZE]; \ + byte scratch[WC_AES_BLOCK_SIZE]; \ FlattenSzInBits(&scratch[0], aes->aSz); \ FlattenSzInBits(&scratch[8], aes->cSz); \ GHASH_ONE_BLOCK_AARCH64(aes, scratch); \ @@ -1831,14 +1831,14 @@ void GHASH_UPDATE_AARCH64(Aes* aes, const byte* a, word32 aSz, const byte* c, /* Check if we have unprocessed data. */ if (aes->aOver > 0) { /* Calculate amount we can use - fill up the block. */ - byte sz = AES_BLOCK_SIZE - aes->aOver; + byte sz = WC_AES_BLOCK_SIZE - aes->aOver; if (sz > aSz) { sz = aSz; } /* Copy extra into last GHASH block array and update count. */ XMEMCPY(AES_LASTGBLOCK(aes) + aes->aOver, a, sz); aes->aOver += sz; - if (aes->aOver == AES_BLOCK_SIZE) { + if (aes->aOver == WC_AES_BLOCK_SIZE) { /* We have filled up the block and can process. */ GHASH_ONE_BLOCK_AARCH64(aes, AES_LASTGBLOCK(aes)); /* Reset count. */ @@ -1850,12 +1850,12 @@ void GHASH_UPDATE_AARCH64(Aes* aes, const byte* a, word32 aSz, const byte* c, } /* Calculate number of blocks of AAD and the leftover. */ - blocks = aSz / AES_BLOCK_SIZE; - partial = aSz % AES_BLOCK_SIZE; + blocks = aSz / WC_AES_BLOCK_SIZE; + partial = aSz % WC_AES_BLOCK_SIZE; /* GHASH full blocks now. */ while (blocks--) { GHASH_ONE_BLOCK_AARCH64(aes, a); - a += AES_BLOCK_SIZE; + a += WC_AES_BLOCK_SIZE; } if (partial != 0) { /* Cache the partial block. */ @@ -1866,7 +1866,7 @@ void GHASH_UPDATE_AARCH64(Aes* aes, const byte* a, word32 aSz, const byte* c, if (aes->aOver > 0 && cSz > 0 && c != NULL) { /* No more AAD coming and we have a partial block. */ /* Fill the rest of the block with zeros. */ - byte sz = AES_BLOCK_SIZE - aes->aOver; + byte sz = WC_AES_BLOCK_SIZE - aes->aOver; XMEMSET(AES_LASTGBLOCK(aes) + aes->aOver, 0, sz); /* GHASH last AAD block. */ GHASH_ONE_BLOCK_AARCH64(aes, AES_LASTGBLOCK(aes)); @@ -1880,14 +1880,14 @@ void GHASH_UPDATE_AARCH64(Aes* aes, const byte* a, word32 aSz, const byte* c, aes->cSz += cSz; if (aes->cOver > 0) { /* Calculate amount we can use - fill up the block. */ - byte sz = AES_BLOCK_SIZE - aes->cOver; + byte sz = WC_AES_BLOCK_SIZE - aes->cOver; if (sz > cSz) { sz = cSz; } XMEMCPY(AES_LASTGBLOCK(aes) + aes->cOver, c, sz); /* Update count of unused encrypted counter. */ aes->cOver += sz; - if (aes->cOver == AES_BLOCK_SIZE) { + if (aes->cOver == WC_AES_BLOCK_SIZE) { /* We have filled up the block and can process. */ GHASH_ONE_BLOCK_AARCH64(aes, AES_LASTGBLOCK(aes)); /* Reset count. */ @@ -1899,12 +1899,12 @@ void GHASH_UPDATE_AARCH64(Aes* aes, const byte* a, word32 aSz, const byte* c, } /* Calculate number of blocks of cipher text and the leftover. */ - blocks = cSz / AES_BLOCK_SIZE; - partial = cSz % AES_BLOCK_SIZE; + blocks = cSz / WC_AES_BLOCK_SIZE; + partial = cSz % WC_AES_BLOCK_SIZE; /* GHASH full blocks now. */ while (blocks--) { GHASH_ONE_BLOCK_AARCH64(aes, c); - c += AES_BLOCK_SIZE; + c += WC_AES_BLOCK_SIZE; } if (partial != 0) { /* Cache the partial block. */ @@ -1933,7 +1933,7 @@ static void GHASH_FINAL_AARCH64(Aes* aes, byte* s, word32 sSz) } if (over > 0) { /* Zeroize the unused part of the block. */ - XMEMSET(AES_LASTGBLOCK(aes) + over, 0, AES_BLOCK_SIZE - over); + XMEMSET(AES_LASTGBLOCK(aes) + over, 0, WC_AES_BLOCK_SIZE - over); /* Hash the last block of cipher text. */ GHASH_ONE_BLOCK_AARCH64(aes, AES_LASTGBLOCK(aes)); } @@ -1945,14 +1945,14 @@ static void GHASH_FINAL_AARCH64(Aes* aes, byte* s, word32 sSz) void AES_GCM_init_AARCH64(Aes* aes, const byte* iv, word32 ivSz) { - ALIGN32 byte counter[AES_BLOCK_SIZE]; + ALIGN32 byte counter[WC_AES_BLOCK_SIZE]; if (ivSz == GCM_NONCE_MID_SZ) { /* Counter is IV with bottom 4 bytes set to: 0x00,0x00,0x00,0x01. */ XMEMCPY(counter, iv, ivSz); XMEMSET(counter + GCM_NONCE_MID_SZ, 0, - AES_BLOCK_SIZE - GCM_NONCE_MID_SZ - 1); - counter[AES_BLOCK_SIZE - 1] = 1; + WC_AES_BLOCK_SIZE - GCM_NONCE_MID_SZ - 1); + counter[WC_AES_BLOCK_SIZE - 1] = 1; } else { /* Counter is GHASH of IV. */ @@ -1960,7 +1960,7 @@ void AES_GCM_init_AARCH64(Aes* aes, const byte* iv, word32 ivSz) word32 aadTemp = aes->gcm.aadLen; aes->gcm.aadLen = 0; #endif - GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, AES_BLOCK_SIZE); + GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, WC_AES_BLOCK_SIZE); GMULT_AARCH64(counter, aes->gcm.H); #ifdef OPENSSL_EXTRA aes->gcm.aadLen = aadTemp; @@ -1968,7 +1968,7 @@ void AES_GCM_init_AARCH64(Aes* aes, const byte* iv, word32 ivSz) } /* Copy in the counter for use with cipher. */ - XMEMCPY(AES_COUNTER(aes), counter, AES_BLOCK_SIZE); + XMEMCPY(AES_COUNTER(aes), counter, WC_AES_BLOCK_SIZE); /* Encrypt initial counter into a buffer for GCM. */ AES_encrypt_AARCH64(counter, AES_INITCTR(aes), (byte*)aes->key, (int)aes->rounds); @@ -1982,12 +1982,12 @@ void AES_GCM_crypt_update_AARCH64(Aes* aes, byte* out, const byte* in, /* Check if previous encrypted block was not used up. */ if (aes->over > 0) { - byte pSz = AES_BLOCK_SIZE - aes->over; + byte pSz = WC_AES_BLOCK_SIZE - aes->over; if (pSz > sz) pSz = sz; /* Use some/all of last encrypted block. */ xorbufout(out, AES_LASTBLOCK(aes) + aes->over, in, pSz); - aes->over = (aes->over + pSz) & (AES_BLOCK_SIZE - 1); + aes->over = (aes->over + pSz) & (WC_AES_BLOCK_SIZE - 1); /* Some data used. */ sz -= pSz; @@ -1997,21 +1997,21 @@ void AES_GCM_crypt_update_AARCH64(Aes* aes, byte* out, const byte* in, /* Calculate the number of blocks needing to be encrypted and any leftover. */ - blocks = sz / AES_BLOCK_SIZE; - partial = sz & (AES_BLOCK_SIZE - 1); + blocks = sz / WC_AES_BLOCK_SIZE; + partial = sz & (WC_AES_BLOCK_SIZE - 1); /* Encrypt block by block. */ while (blocks--) { - ALIGN32 byte scratch[AES_BLOCK_SIZE]; + ALIGN32 byte scratch[WC_AES_BLOCK_SIZE]; IncrementGcmCounter(AES_COUNTER(aes)); /* Encrypt counter into a buffer. */ AES_encrypt_AARCH64(AES_COUNTER(aes), scratch, (byte*)aes->key, (int)aes->rounds); /* XOR plain text into encrypted counter into cipher text buffer. */ - xorbufout(out, scratch, in, AES_BLOCK_SIZE); + xorbufout(out, scratch, in, WC_AES_BLOCK_SIZE); /* Data complete. */ - in += AES_BLOCK_SIZE; - out += AES_BLOCK_SIZE; + in += WC_AES_BLOCK_SIZE; + out += WC_AES_BLOCK_SIZE; } if (partial != 0) { @@ -2044,7 +2044,7 @@ void AES_GCM_final_AARCH64(Aes* aes, byte* authTag, word32 authTagSz) aes->gcm.aadLen = aes->aSz; #endif /* Zeroize last block to protect sensitive data. */ - ForceZero(AES_LASTBLOCK(aes), AES_BLOCK_SIZE); + ForceZero(AES_LASTBLOCK(aes), WC_AES_BLOCK_SIZE); } #endif /* WOLFSSL_AESGCM_STREAM */ @@ -2054,8 +2054,8 @@ static void Aes128GcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, const byte* iv, word32 ivSz, byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - byte counter[AES_BLOCK_SIZE]; - byte scratch[AES_BLOCK_SIZE]; + byte counter[WC_AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; /* Noticed different optimization levels treated head of array different. * Some cases was stack pointer plus offset others was a register containing * address. To make uniform for passing in to inline assembly code am using @@ -2064,13 +2064,13 @@ static void Aes128GcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, byte* ctr = counter; byte* keyPt = (byte*)aes->key; - XMEMSET(counter, 0, AES_BLOCK_SIZE); + XMEMSET(counter, 0, WC_AES_BLOCK_SIZE); if (ivSz == GCM_NONCE_MID_SZ) { XMEMCPY(counter, iv, GCM_NONCE_MID_SZ); - counter[AES_BLOCK_SIZE - 1] = 1; + counter[WC_AES_BLOCK_SIZE - 1] = 1; } else { - GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, AES_BLOCK_SIZE); + GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, WC_AES_BLOCK_SIZE); GMULT_AARCH64(counter, aes->gcm.H); } @@ -3697,8 +3697,8 @@ static void Aes192GcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, const byte* iv, word32 ivSz, byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - byte counter[AES_BLOCK_SIZE]; - byte scratch[AES_BLOCK_SIZE]; + byte counter[WC_AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; /* Noticed different optimization levels treated head of array different. * Some cases was stack pointer plus offset others was a register containing * address. To make uniform for passing in to inline assembly code am using @@ -3707,13 +3707,13 @@ static void Aes192GcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, byte* ctr = counter; byte* keyPt = (byte*)aes->key; - XMEMSET(counter, 0, AES_BLOCK_SIZE); + XMEMSET(counter, 0, WC_AES_BLOCK_SIZE); if (ivSz == GCM_NONCE_MID_SZ) { XMEMCPY(counter, iv, GCM_NONCE_MID_SZ); - counter[AES_BLOCK_SIZE - 1] = 1; + counter[WC_AES_BLOCK_SIZE - 1] = 1; } else { - GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, AES_BLOCK_SIZE); + GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, WC_AES_BLOCK_SIZE); GMULT_AARCH64(counter, aes->gcm.H); } @@ -5457,8 +5457,8 @@ static void Aes256GcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, const byte* iv, word32 ivSz, byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - byte counter[AES_BLOCK_SIZE]; - byte scratch[AES_BLOCK_SIZE]; + byte counter[WC_AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; /* Noticed different optimization levels treated head of array different. * Some cases was stack pointer plus offset others was a register containing * address. To make uniform for passing in to inline assembly code am using @@ -5467,13 +5467,13 @@ static void Aes256GcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, byte* ctr = counter; byte* keyPt = (byte*)aes->key; - XMEMSET(counter, 0, AES_BLOCK_SIZE); + XMEMSET(counter, 0, WC_AES_BLOCK_SIZE); if (ivSz == GCM_NONCE_MID_SZ) { XMEMCPY(counter, iv, GCM_NONCE_MID_SZ); - counter[AES_BLOCK_SIZE - 1] = 1; + counter[WC_AES_BLOCK_SIZE - 1] = 1; } else { - GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, AES_BLOCK_SIZE); + GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, WC_AES_BLOCK_SIZE); GMULT_AARCH64(counter, aes->gcm.H); } @@ -7397,19 +7397,19 @@ static int Aes128GcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, const byte* iv, word32 ivSz, const byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - byte counter[AES_BLOCK_SIZE]; - byte scratch[AES_BLOCK_SIZE]; + byte counter[WC_AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; byte *ctr = counter; byte* keyPt = (byte*)aes->key; int ret = 0; - XMEMSET(counter, 0, AES_BLOCK_SIZE); + XMEMSET(counter, 0, WC_AES_BLOCK_SIZE); if (ivSz == GCM_NONCE_MID_SZ) { XMEMCPY(counter, iv, GCM_NONCE_MID_SZ); - counter[AES_BLOCK_SIZE - 1] = 1; + counter[WC_AES_BLOCK_SIZE - 1] = 1; } else { - GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, AES_BLOCK_SIZE); + GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, WC_AES_BLOCK_SIZE); GMULT_AARCH64(counter, aes->gcm.H); } @@ -9048,19 +9048,19 @@ static int Aes192GcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, const byte* iv, word32 ivSz, const byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - byte counter[AES_BLOCK_SIZE]; - byte scratch[AES_BLOCK_SIZE]; + byte counter[WC_AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; byte *ctr = counter; byte* keyPt = (byte*)aes->key; int ret = 0; - XMEMSET(counter, 0, AES_BLOCK_SIZE); + XMEMSET(counter, 0, WC_AES_BLOCK_SIZE); if (ivSz == GCM_NONCE_MID_SZ) { XMEMCPY(counter, iv, GCM_NONCE_MID_SZ); - counter[AES_BLOCK_SIZE - 1] = 1; + counter[WC_AES_BLOCK_SIZE - 1] = 1; } else { - GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, AES_BLOCK_SIZE); + GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, WC_AES_BLOCK_SIZE); GMULT_AARCH64(counter, aes->gcm.H); } @@ -10816,19 +10816,19 @@ static int Aes256GcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, const byte* iv, word32 ivSz, const byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - byte counter[AES_BLOCK_SIZE]; - byte scratch[AES_BLOCK_SIZE]; + byte counter[WC_AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; byte *ctr = counter; byte* keyPt = (byte*)aes->key; int ret = 0; - XMEMSET(counter, 0, AES_BLOCK_SIZE); + XMEMSET(counter, 0, WC_AES_BLOCK_SIZE); if (ivSz == GCM_NONCE_MID_SZ) { XMEMCPY(counter, iv, GCM_NONCE_MID_SZ); - counter[AES_BLOCK_SIZE - 1] = 1; + counter[WC_AES_BLOCK_SIZE - 1] = 1; } else { - GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, AES_BLOCK_SIZE); + GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, WC_AES_BLOCK_SIZE); GMULT_AARCH64(counter, aes->gcm.H); } @@ -12909,7 +12909,7 @@ int AES_GCM_decrypt_AARCH64(Aes* aes, byte* out, const byte* in, word32 sz, #ifdef HAVE_AES_CBC int wc_AesCbcEncrypt(Aes* aes, byte* out, const byte* in, word32 sz) { - word32 numBlocks = sz / AES_BLOCK_SIZE; + word32 numBlocks = sz / WC_AES_BLOCK_SIZE; if (aes == NULL || out == NULL || in == NULL) { return BAD_FUNC_ARG; @@ -12920,7 +12920,7 @@ int AES_GCM_decrypt_AARCH64(Aes* aes, byte* out, const byte* in, word32 sz, } #ifdef WOLFSSL_AES_CBC_LENGTH_CHECKS - if (sz % AES_BLOCK_SIZE) { + if (sz % WC_AES_BLOCK_SIZE) { return BAD_LENGTH_E; } #endif @@ -13151,7 +13151,7 @@ int AES_GCM_decrypt_AARCH64(Aes* aes, byte* out, const byte* in, word32 sz, #ifdef HAVE_AES_DECRYPT int wc_AesCbcDecrypt(Aes* aes, byte* out, const byte* in, word32 sz) { - word32 numBlocks = sz / AES_BLOCK_SIZE; + word32 numBlocks = sz / WC_AES_BLOCK_SIZE; if (aes == NULL || out == NULL || in == NULL) { return BAD_FUNC_ARG; @@ -13161,7 +13161,7 @@ int AES_GCM_decrypt_AARCH64(Aes* aes, byte* out, const byte* in, word32 sz, return 0; } - if (sz % AES_BLOCK_SIZE) { + if (sz % WC_AES_BLOCK_SIZE) { #ifdef WOLFSSL_AES_CBC_LENGTH_CHECKS return BAD_LENGTH_E; #else @@ -13984,7 +13984,7 @@ int wc_AesCtrEncrypt(Aes* aes, byte* out, const byte* in, word32 sz) } - tmp = (byte*)aes->tmp + AES_BLOCK_SIZE - aes->left; + tmp = (byte*)aes->tmp + WC_AES_BLOCK_SIZE - aes->left; /* consume any unused bytes left in aes->tmp */ while ((aes->left != 0) && (sz != 0)) { @@ -13994,22 +13994,22 @@ int wc_AesCtrEncrypt(Aes* aes, byte* out, const byte* in, word32 sz) } /* do as many block size ops as possible */ - numBlocks = sz / AES_BLOCK_SIZE; + numBlocks = sz / WC_AES_BLOCK_SIZE; if (numBlocks > 0) { wc_aes_ctr_encrypt_asm(aes, out, in, numBlocks); - sz -= numBlocks * AES_BLOCK_SIZE; - out += numBlocks * AES_BLOCK_SIZE; - in += numBlocks * AES_BLOCK_SIZE; + sz -= numBlocks * WC_AES_BLOCK_SIZE; + out += numBlocks * WC_AES_BLOCK_SIZE; + in += numBlocks * WC_AES_BLOCK_SIZE; } /* handle non block size remaining */ if (sz) { - byte zeros[AES_BLOCK_SIZE] = { 0, 0, 0, 0, 0, 0, 0, 0, + byte zeros[WC_AES_BLOCK_SIZE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; wc_aes_ctr_encrypt_asm(aes, (byte*)aes->tmp, zeros, 1); - aes->left = AES_BLOCK_SIZE; + aes->left = WC_AES_BLOCK_SIZE; tmp = (byte*)aes->tmp; while (sz--) { @@ -14097,43 +14097,43 @@ void GMULT(byte* X, byte* Y) void GHASH(Gcm* gcm, const byte* a, word32 aSz, const byte* c, word32 cSz, byte* s, word32 sSz) { - byte x[AES_BLOCK_SIZE]; - byte scratch[AES_BLOCK_SIZE]; + byte x[WC_AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; word32 blocks, partial; byte* h = gcm->H; - XMEMSET(x, 0, AES_BLOCK_SIZE); + XMEMSET(x, 0, WC_AES_BLOCK_SIZE); /* Hash in A, the Additional Authentication Data */ if (aSz != 0 && a != NULL) { - blocks = aSz / AES_BLOCK_SIZE; - partial = aSz % AES_BLOCK_SIZE; + blocks = aSz / WC_AES_BLOCK_SIZE; + partial = aSz % WC_AES_BLOCK_SIZE; while (blocks--) { - xorbuf(x, a, AES_BLOCK_SIZE); + xorbuf(x, a, WC_AES_BLOCK_SIZE); GMULT(x, h); - a += AES_BLOCK_SIZE; + a += WC_AES_BLOCK_SIZE; } if (partial != 0) { - XMEMSET(scratch, 0, AES_BLOCK_SIZE); + XMEMSET(scratch, 0, WC_AES_BLOCK_SIZE); XMEMCPY(scratch, a, partial); - xorbuf(x, scratch, AES_BLOCK_SIZE); + xorbuf(x, scratch, WC_AES_BLOCK_SIZE); GMULT(x, h); } } /* Hash in C, the Ciphertext */ if (cSz != 0 && c != NULL) { - blocks = cSz / AES_BLOCK_SIZE; - partial = cSz % AES_BLOCK_SIZE; + blocks = cSz / WC_AES_BLOCK_SIZE; + partial = cSz % WC_AES_BLOCK_SIZE; while (blocks--) { - xorbuf(x, c, AES_BLOCK_SIZE); + xorbuf(x, c, WC_AES_BLOCK_SIZE); GMULT(x, h); - c += AES_BLOCK_SIZE; + c += WC_AES_BLOCK_SIZE; } if (partial != 0) { - XMEMSET(scratch, 0, AES_BLOCK_SIZE); + XMEMSET(scratch, 0, WC_AES_BLOCK_SIZE); XMEMCPY(scratch, c, partial); - xorbuf(x, scratch, AES_BLOCK_SIZE); + xorbuf(x, scratch, WC_AES_BLOCK_SIZE); GMULT(x, h); } } @@ -14141,7 +14141,7 @@ void GHASH(Gcm* gcm, const byte* a, word32 aSz, const byte* c, word32 cSz, /* Hash in the lengths of A and C in bits */ FlattenSzInBits(&scratch[0], aSz); FlattenSzInBits(&scratch[8], cSz); - xorbuf(x, scratch, AES_BLOCK_SIZE); + xorbuf(x, scratch, WC_AES_BLOCK_SIZE); GMULT(x, h); /* Copy the result into s. */ @@ -14167,14 +14167,14 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - word32 blocks = sz / AES_BLOCK_SIZE; - word32 partial = sz % AES_BLOCK_SIZE; + word32 blocks = sz / WC_AES_BLOCK_SIZE; + word32 partial = sz % WC_AES_BLOCK_SIZE; const byte* p = in; byte* c = out; - byte counter[AES_BLOCK_SIZE]; - byte initialCounter[AES_BLOCK_SIZE]; + byte counter[WC_AES_BLOCK_SIZE]; + byte initialCounter[WC_AES_BLOCK_SIZE]; byte *ctr ; - byte scratch[AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; ctr = counter ; /* sanity checks */ @@ -14186,28 +14186,28 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, return BAD_FUNC_ARG; } - if (authTagSz < WOLFSSL_MIN_AUTH_TAG_SZ || authTagSz > AES_BLOCK_SIZE) { + if (authTagSz < WOLFSSL_MIN_AUTH_TAG_SZ || authTagSz > WC_AES_BLOCK_SIZE) { WOLFSSL_MSG("GcmEncrypt authTagSz error"); return BAD_FUNC_ARG; } - XMEMSET(initialCounter, 0, AES_BLOCK_SIZE); + XMEMSET(initialCounter, 0, WC_AES_BLOCK_SIZE); if (ivSz == GCM_NONCE_MID_SZ) { XMEMCPY(initialCounter, iv, ivSz); - initialCounter[AES_BLOCK_SIZE - 1] = 1; + initialCounter[WC_AES_BLOCK_SIZE - 1] = 1; } else { - GHASH(&aes->gcm, NULL, 0, iv, ivSz, initialCounter, AES_BLOCK_SIZE); + GHASH(&aes->gcm, NULL, 0, iv, ivSz, initialCounter, WC_AES_BLOCK_SIZE); } - XMEMCPY(ctr, initialCounter, AES_BLOCK_SIZE); + XMEMCPY(ctr, initialCounter, WC_AES_BLOCK_SIZE); while (blocks--) { IncrementGcmCounter(ctr); wc_AesEncrypt(aes, ctr, scratch); - xorbuf(scratch, p, AES_BLOCK_SIZE); - XMEMCPY(c, scratch, AES_BLOCK_SIZE); - p += AES_BLOCK_SIZE; - c += AES_BLOCK_SIZE; + xorbuf(scratch, p, WC_AES_BLOCK_SIZE); + XMEMCPY(c, scratch, WC_AES_BLOCK_SIZE); + p += WC_AES_BLOCK_SIZE; + c += WC_AES_BLOCK_SIZE; } if (partial != 0) { @@ -14220,8 +14220,8 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, GHASH(&aes->gcm, authIn, authInSz, out, sz, authTag, authTagSz); wc_AesEncrypt(aes, initialCounter, scratch); - if (authTagSz > AES_BLOCK_SIZE) { - xorbuf(authTag, scratch, AES_BLOCK_SIZE); + if (authTagSz > WC_AES_BLOCK_SIZE) { + xorbuf(authTag, scratch, WC_AES_BLOCK_SIZE); } else { xorbuf(authTag, scratch, authTagSz); @@ -14250,39 +14250,39 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, const byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - word32 blocks = sz / AES_BLOCK_SIZE; - word32 partial = sz % AES_BLOCK_SIZE; + word32 blocks = sz / WC_AES_BLOCK_SIZE; + word32 partial = sz % WC_AES_BLOCK_SIZE; const byte* c = in; byte* p = out; - byte counter[AES_BLOCK_SIZE]; - byte initialCounter[AES_BLOCK_SIZE]; + byte counter[WC_AES_BLOCK_SIZE]; + byte initialCounter[WC_AES_BLOCK_SIZE]; byte *ctr ; - byte scratch[AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; ctr = counter ; /* sanity checks */ if (aes == NULL || iv == NULL || (sz != 0 && (in == NULL || out == NULL)) || - authTag == NULL || authTagSz > AES_BLOCK_SIZE || authTagSz == 0 || + authTag == NULL || authTagSz > WC_AES_BLOCK_SIZE || authTagSz == 0 || ivSz == 0) { WOLFSSL_MSG("a NULL parameter passed in when size is larger than 0"); return BAD_FUNC_ARG; } - XMEMSET(initialCounter, 0, AES_BLOCK_SIZE); + XMEMSET(initialCounter, 0, WC_AES_BLOCK_SIZE); if (ivSz == GCM_NONCE_MID_SZ) { XMEMCPY(initialCounter, iv, ivSz); - initialCounter[AES_BLOCK_SIZE - 1] = 1; + initialCounter[WC_AES_BLOCK_SIZE - 1] = 1; } else { - GHASH(&aes->gcm, NULL, 0, iv, ivSz, initialCounter, AES_BLOCK_SIZE); + GHASH(&aes->gcm, NULL, 0, iv, ivSz, initialCounter, WC_AES_BLOCK_SIZE); } - XMEMCPY(ctr, initialCounter, AES_BLOCK_SIZE); + XMEMCPY(ctr, initialCounter, WC_AES_BLOCK_SIZE); /* Calculate the authTag again using the received auth data and the * cipher text. */ { - byte Tprime[AES_BLOCK_SIZE]; - byte EKY0[AES_BLOCK_SIZE]; + byte Tprime[WC_AES_BLOCK_SIZE]; + byte EKY0[WC_AES_BLOCK_SIZE]; GHASH(&aes->gcm, authIn, authInSz, in, sz, Tprime, sizeof(Tprime)); wc_AesEncrypt(aes, ctr, EKY0); @@ -14297,10 +14297,10 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, IncrementGcmCounter(ctr); wc_AesEncrypt(aes, ctr, scratch); #endif - xorbuf(scratch, c, AES_BLOCK_SIZE); - XMEMCPY(p, scratch, AES_BLOCK_SIZE); - p += AES_BLOCK_SIZE; - c += AES_BLOCK_SIZE; + xorbuf(scratch, c, WC_AES_BLOCK_SIZE); + XMEMCPY(p, scratch, WC_AES_BLOCK_SIZE); + p += WC_AES_BLOCK_SIZE; + c += WC_AES_BLOCK_SIZE; } if (partial != 0) { IncrementGcmCounter(ctr); @@ -14323,15 +14323,15 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, #ifdef WOLFSSL_AESGCM_STREAM #ifndef __aarch64__ /* Access initialization counter data. */ - #define AES_INITCTR(aes) ((aes)->streamData + 0 * AES_BLOCK_SIZE) + #define AES_INITCTR(aes) ((aes)->streamData + 0 * WC_AES_BLOCK_SIZE) /* Access counter data. */ - #define AES_COUNTER(aes) ((aes)->streamData + 1 * AES_BLOCK_SIZE) + #define AES_COUNTER(aes) ((aes)->streamData + 1 * WC_AES_BLOCK_SIZE) /* Access tag data. */ - #define AES_TAG(aes) ((aes)->streamData + 2 * AES_BLOCK_SIZE) + #define AES_TAG(aes) ((aes)->streamData + 2 * WC_AES_BLOCK_SIZE) /* Access last GHASH block. */ - #define AES_LASTGBLOCK(aes) ((aes)->streamData + 3 * AES_BLOCK_SIZE) + #define AES_LASTGBLOCK(aes) ((aes)->streamData + 3 * WC_AES_BLOCK_SIZE) /* Access last encrypted block. */ - #define AES_LASTBLOCK(aes) ((aes)->streamData + 4 * AES_BLOCK_SIZE) + #define AES_LASTBLOCK(aes) ((aes)->streamData + 4 * WC_AES_BLOCK_SIZE) /* GHASH one block of data. * @@ -14342,7 +14342,7 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, */ #define GHASH_ONE_BLOCK(aes, block) \ do { \ - xorbuf(AES_TAG(aes), block, AES_BLOCK_SIZE); \ + xorbuf(AES_TAG(aes), block, WC_AES_BLOCK_SIZE); \ GMULT(AES_TAG(aes), aes->gcm.H); \ } \ while (0) @@ -14355,7 +14355,7 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, */ #define GHASH_LEN_BLOCK(aes) \ do { \ - byte scratch[AES_BLOCK_SIZE]; \ + byte scratch[WC_AES_BLOCK_SIZE]; \ FlattenSzInBits(&scratch[0], aes->aSz); \ FlattenSzInBits(&scratch[8], aes->cSz); \ GHASH_ONE_BLOCK(aes, scratch); \ @@ -14377,7 +14377,7 @@ static WC_INLINE void IncCtr(byte* ctr, word32 ctrSz) */ static void GHASH_INIT(Aes* aes) { /* Set tag to all zeros as initial value. */ - XMEMSET(AES_TAG(aes), 0, AES_BLOCK_SIZE); + XMEMSET(AES_TAG(aes), 0, WC_AES_BLOCK_SIZE); /* Reset counts of AAD and cipher text. */ aes->aOver = 0; aes->cOver = 0; @@ -14404,14 +14404,14 @@ static void GHASH_UPDATE(Aes* aes, const byte* a, word32 aSz, const byte* c, /* Check if we have unprocessed data. */ if (aes->aOver > 0) { /* Calculate amount we can use - fill up the block. */ - byte sz = AES_BLOCK_SIZE - aes->aOver; + byte sz = WC_AES_BLOCK_SIZE - aes->aOver; if (sz > aSz) { sz = aSz; } /* Copy extra into last GHASH block array and update count. */ XMEMCPY(AES_LASTGBLOCK(aes) + aes->aOver, a, sz); aes->aOver += sz; - if (aes->aOver == AES_BLOCK_SIZE) { + if (aes->aOver == WC_AES_BLOCK_SIZE) { /* We have filled up the block and can process. */ GHASH_ONE_BLOCK(aes, AES_LASTGBLOCK(aes)); /* Reset count. */ @@ -14423,12 +14423,12 @@ static void GHASH_UPDATE(Aes* aes, const byte* a, word32 aSz, const byte* c, } /* Calculate number of blocks of AAD and the leftover. */ - blocks = aSz / AES_BLOCK_SIZE; - partial = aSz % AES_BLOCK_SIZE; + blocks = aSz / WC_AES_BLOCK_SIZE; + partial = aSz % WC_AES_BLOCK_SIZE; /* GHASH full blocks now. */ while (blocks--) { GHASH_ONE_BLOCK(aes, a); - a += AES_BLOCK_SIZE; + a += WC_AES_BLOCK_SIZE; } if (partial != 0) { /* Cache the partial block. */ @@ -14439,7 +14439,7 @@ static void GHASH_UPDATE(Aes* aes, const byte* a, word32 aSz, const byte* c, if (aes->aOver > 0 && cSz > 0 && c != NULL) { /* No more AAD coming and we have a partial block. */ /* Fill the rest of the block with zeros. */ - byte sz = AES_BLOCK_SIZE - aes->aOver; + byte sz = WC_AES_BLOCK_SIZE - aes->aOver; XMEMSET(AES_LASTGBLOCK(aes) + aes->aOver, 0, sz); /* GHASH last AAD block. */ GHASH_ONE_BLOCK(aes, AES_LASTGBLOCK(aes)); @@ -14453,14 +14453,14 @@ static void GHASH_UPDATE(Aes* aes, const byte* a, word32 aSz, const byte* c, aes->cSz += cSz; if (aes->cOver > 0) { /* Calculate amount we can use - fill up the block. */ - byte sz = AES_BLOCK_SIZE - aes->cOver; + byte sz = WC_AES_BLOCK_SIZE - aes->cOver; if (sz > cSz) { sz = cSz; } XMEMCPY(AES_LASTGBLOCK(aes) + aes->cOver, c, sz); /* Update count of unused encrypted counter. */ aes->cOver += sz; - if (aes->cOver == AES_BLOCK_SIZE) { + if (aes->cOver == WC_AES_BLOCK_SIZE) { /* We have filled up the block and can process. */ GHASH_ONE_BLOCK(aes, AES_LASTGBLOCK(aes)); /* Reset count. */ @@ -14472,12 +14472,12 @@ static void GHASH_UPDATE(Aes* aes, const byte* a, word32 aSz, const byte* c, } /* Calculate number of blocks of cipher text and the leftover. */ - blocks = cSz / AES_BLOCK_SIZE; - partial = cSz % AES_BLOCK_SIZE; + blocks = cSz / WC_AES_BLOCK_SIZE; + partial = cSz % WC_AES_BLOCK_SIZE; /* GHASH full blocks now. */ while (blocks--) { GHASH_ONE_BLOCK(aes, c); - c += AES_BLOCK_SIZE; + c += WC_AES_BLOCK_SIZE; } if (partial != 0) { /* Cache the partial block. */ @@ -14506,7 +14506,7 @@ static void GHASH_FINAL(Aes* aes, byte* s, word32 sSz) } if (over > 0) { /* Zeroize the unused part of the block. */ - XMEMSET(AES_LASTGBLOCK(aes) + over, 0, AES_BLOCK_SIZE - over); + XMEMSET(AES_LASTGBLOCK(aes) + over, 0, WC_AES_BLOCK_SIZE - over); /* Hash the last block of cipher text. */ GHASH_ONE_BLOCK(aes, AES_LASTGBLOCK(aes)); } @@ -14524,14 +14524,14 @@ static void GHASH_FINAL(Aes* aes, byte* s, word32 sSz) */ static void AesGcmInit_C(Aes* aes, const byte* iv, word32 ivSz) { - ALIGN32 byte counter[AES_BLOCK_SIZE]; + ALIGN32 byte counter[WC_AES_BLOCK_SIZE]; if (ivSz == GCM_NONCE_MID_SZ) { /* Counter is IV with bottom 4 bytes set to: 0x00,0x00,0x00,0x01. */ XMEMCPY(counter, iv, ivSz); XMEMSET(counter + GCM_NONCE_MID_SZ, 0, - AES_BLOCK_SIZE - GCM_NONCE_MID_SZ - 1); - counter[AES_BLOCK_SIZE - 1] = 1; + WC_AES_BLOCK_SIZE - GCM_NONCE_MID_SZ - 1); + counter[WC_AES_BLOCK_SIZE - 1] = 1; } else { /* Counter is GHASH of IV. */ @@ -14540,10 +14540,10 @@ static void AesGcmInit_C(Aes* aes, const byte* iv, word32 ivSz) aes->gcm.aadLen = 0; #endif #ifdef __aarch64__ - GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, AES_BLOCK_SIZE); + GHASH_AARCH64(&aes->gcm, NULL, 0, iv, ivSz, counter, WC_AES_BLOCK_SIZE); GMULT_AARCH64(counter, aes->gcm.H); #else - GHASH(&aes->gcm, NULL, 0, iv, ivSz, counter, AES_BLOCK_SIZE); + GHASH(&aes->gcm, NULL, 0, iv, ivSz, counter, WC_AES_BLOCK_SIZE); GMULT(counter, aes->gcm.H); #endif #ifdef OPENSSL_EXTRA @@ -14552,7 +14552,7 @@ static void AesGcmInit_C(Aes* aes, const byte* iv, word32 ivSz) } /* Copy in the counter for use with cipher. */ - XMEMCPY(AES_COUNTER(aes), counter, AES_BLOCK_SIZE); + XMEMCPY(AES_COUNTER(aes), counter, WC_AES_BLOCK_SIZE); /* Encrypt initial counter into a buffer for GCM. */ AES_encrypt_AARCH64(counter, AES_INITCTR(aes), (byte*)aes->key, aes->rounds); @@ -14580,12 +14580,12 @@ static void AesGcmCryptUpdate_C(Aes* aes, byte* out, const byte* in, word32 sz) /* Check if previous encrypted block was not used up. */ if (aes->over > 0) { - byte pSz = AES_BLOCK_SIZE - aes->over; + byte pSz = WC_AES_BLOCK_SIZE - aes->over; if (pSz > sz) pSz = sz; /* Use some/all of last encrypted block. */ xorbufout(out, AES_LASTBLOCK(aes) + aes->over, in, pSz); - aes->over = (aes->over + pSz) & (AES_BLOCK_SIZE - 1); + aes->over = (aes->over + pSz) & (WC_AES_BLOCK_SIZE - 1); /* Some data used. */ sz -= pSz; @@ -14595,21 +14595,21 @@ static void AesGcmCryptUpdate_C(Aes* aes, byte* out, const byte* in, word32 sz) /* Calculate the number of blocks needing to be encrypted and any leftover. */ - blocks = sz / AES_BLOCK_SIZE; - partial = sz & (AES_BLOCK_SIZE - 1); + blocks = sz / WC_AES_BLOCK_SIZE; + partial = sz & (WC_AES_BLOCK_SIZE - 1); /* Encrypt block by block. */ while (blocks--) { - ALIGN32 byte scratch[AES_BLOCK_SIZE]; + ALIGN32 byte scratch[WC_AES_BLOCK_SIZE]; IncrementGcmCounter(AES_COUNTER(aes)); /* Encrypt counter into a buffer. */ AES_encrypt_AARCH64(AES_COUNTER(aes), scratch, (byte*)aes->key, aes->rounds); /* XOR plain text into encrypted counter into cipher text buffer. */ - xorbufout(out, scratch, in, AES_BLOCK_SIZE); + xorbufout(out, scratch, in, WC_AES_BLOCK_SIZE); /* Data complete. */ - in += AES_BLOCK_SIZE; - out += AES_BLOCK_SIZE; + in += WC_AES_BLOCK_SIZE; + out += WC_AES_BLOCK_SIZE; } if (partial != 0) { @@ -14642,7 +14642,7 @@ static void AesGcmFinal_C(Aes* aes, byte* authTag, word32 authTagSz) aes->gcm.aadLen = aes->aSz; #endif /* Zeroize last block to protect sensitive data. */ - ForceZero(AES_LASTBLOCK(aes), AES_BLOCK_SIZE); + ForceZero(AES_LASTBLOCK(aes), WC_AES_BLOCK_SIZE); } /* Initialize an AES GCM cipher for encryption or decryption. @@ -14673,7 +14673,7 @@ int wc_AesGcmInit(Aes* aes, const byte* key, word32 len, const byte* iv, #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_AESNI) if ((ret == 0) && (aes->streamData == NULL)) { /* Allocate buffers for streaming. */ - aes->streamData = (byte*)XMALLOC(5 * AES_BLOCK_SIZE, aes->heap, + aes->streamData = (byte*)XMALLOC(5 * WC_AES_BLOCK_SIZE, aes->heap, DYNAMIC_TYPE_AES); if (aes->streamData == NULL) { ret = MEMORY_E; @@ -14688,7 +14688,7 @@ int wc_AesGcmInit(Aes* aes, const byte* key, word32 len, const byte* iv, if (ret == 0) { /* Set the IV passed in if it is smaller than a block. */ - if ((iv != NULL) && (ivSz <= AES_BLOCK_SIZE)) { + if ((iv != NULL) && (ivSz <= WC_AES_BLOCK_SIZE)) { XMEMMOVE((byte*)aes->reg, iv, ivSz); aes->nonceSz = ivSz; } @@ -14822,7 +14822,7 @@ int wc_AesGcmEncryptFinal(Aes* aes, byte* authTag, word32 authTagSz) int ret = 0; /* Check validity of parameters. */ - if ((aes == NULL) || (authTag == NULL) || (authTagSz > AES_BLOCK_SIZE) || + if ((aes == NULL) || (authTag == NULL) || (authTagSz > WC_AES_BLOCK_SIZE) || (authTagSz == 0)) { ret = BAD_FUNC_ARG; } @@ -14933,7 +14933,7 @@ int wc_AesGcmDecryptFinal(Aes* aes, const byte* authTag, word32 authTagSz) int ret = 0; /* Check validity of parameters. */ - if ((aes == NULL) || (authTag == NULL) || (authTagSz > AES_BLOCK_SIZE) || + if ((aes == NULL) || (authTag == NULL) || (authTagSz > WC_AES_BLOCK_SIZE) || (authTagSz == 0)) { ret = BAD_FUNC_ARG; } @@ -14949,7 +14949,7 @@ int wc_AesGcmDecryptFinal(Aes* aes, const byte* authTag, word32 authTagSz) if (ret == 0) { /* Calculate authentication tag and compare with one passed in.. */ - ALIGN32 byte calcTag[AES_BLOCK_SIZE]; + ALIGN32 byte calcTag[WC_AES_BLOCK_SIZE]; /* Calculate authentication tag. */ AesGcmFinal_C(aes, calcTag, authTagSz); /* Check calculated tag matches the one passed in. */ @@ -14974,10 +14974,10 @@ int wc_AesGcmDecryptFinal(Aes* aes, const byte* authTag, word32 authTagSz) static void roll_x(Aes* aes, const byte* in, word32 inSz, byte* out) { /* process the bulk of the data */ - while (inSz >= AES_BLOCK_SIZE) { - xorbuf(out, in, AES_BLOCK_SIZE); - in += AES_BLOCK_SIZE; - inSz -= AES_BLOCK_SIZE; + while (inSz >= WC_AES_BLOCK_SIZE) { + xorbuf(out, in, WC_AES_BLOCK_SIZE); + in += WC_AES_BLOCK_SIZE; + inSz -= WC_AES_BLOCK_SIZE; wc_AesEncrypt(aes, out, out); } @@ -15016,7 +15016,7 @@ static void roll_auth(Aes* aes, const byte* in, word32 inSz, byte* out) return; /* start fill out the rest of the first block */ - remainder = AES_BLOCK_SIZE - authLenSz; + remainder = WC_AES_BLOCK_SIZE - authLenSz; if (inSz >= remainder) { /* plenty of bulk data to fill the remainder of this block */ xorbuf(out + authLenSz, in, remainder); @@ -15040,7 +15040,7 @@ static WC_INLINE void AesCcmCtrInc(byte* B, word32 lenSz) word32 i; for (i = 0; i < lenSz; i++) { - if (++B[AES_BLOCK_SIZE - 1 - i] != 0) return; + if (++B[WC_AES_BLOCK_SIZE - 1 - i] != 0) return; } } @@ -15051,8 +15051,8 @@ int wc_AesCcmEncrypt(Aes* aes, byte* out, const byte* in, word32 inSz, byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - byte A[AES_BLOCK_SIZE]; - byte B[AES_BLOCK_SIZE]; + byte A[WC_AES_BLOCK_SIZE]; + byte B[WC_AES_BLOCK_SIZE]; byte lenSz; word32 i; byte mask = 0xFF; @@ -15082,14 +15082,14 @@ int wc_AesCcmEncrypt(Aes* aes, byte* out, const byte* in, word32 inSz, #endif XMEMCPY(B+1, nonce, nonceSz); - lenSz = AES_BLOCK_SIZE - 1 - (byte)nonceSz; + lenSz = WC_AES_BLOCK_SIZE - 1 - (byte)nonceSz; B[0] = (authInSz > 0 ? 64 : 0) + (8 * (((byte)authTagSz - 2) / 2)) + (lenSz - 1); for (i = 0; i < lenSz; i++) { if (mask && i >= wordSz) mask = 0x00; - B[AES_BLOCK_SIZE - 1 - i] = (inSz >> ((8 * i) & mask)) & mask; + B[WC_AES_BLOCK_SIZE - 1 - i] = (inSz >> ((8 * i) & mask)) & mask; } wc_AesEncrypt(aes, B, A); @@ -15102,20 +15102,20 @@ int wc_AesCcmEncrypt(Aes* aes, byte* out, const byte* in, word32 inSz, B[0] = lenSz - 1; for (i = 0; i < lenSz; i++) - B[AES_BLOCK_SIZE - 1 - i] = 0; + B[WC_AES_BLOCK_SIZE - 1 - i] = 0; wc_AesEncrypt(aes, B, A); xorbuf(authTag, A, authTagSz); B[15] = 1; - while (inSz >= AES_BLOCK_SIZE) { + while (inSz >= WC_AES_BLOCK_SIZE) { wc_AesEncrypt(aes, B, A); - xorbuf(A, in, AES_BLOCK_SIZE); - XMEMCPY(out, A, AES_BLOCK_SIZE); + xorbuf(A, in, WC_AES_BLOCK_SIZE); + XMEMCPY(out, A, WC_AES_BLOCK_SIZE); AesCcmCtrInc(B, lenSz); - inSz -= AES_BLOCK_SIZE; - in += AES_BLOCK_SIZE; - out += AES_BLOCK_SIZE; + inSz -= WC_AES_BLOCK_SIZE; + in += WC_AES_BLOCK_SIZE; + out += WC_AES_BLOCK_SIZE; } if (inSz > 0) { wc_AesEncrypt(aes, B, A); @@ -15123,8 +15123,8 @@ int wc_AesCcmEncrypt(Aes* aes, byte* out, const byte* in, word32 inSz, XMEMCPY(out, A, inSz); } - ForceZero(A, AES_BLOCK_SIZE); - ForceZero(B, AES_BLOCK_SIZE); + ForceZero(A, WC_AES_BLOCK_SIZE); + ForceZero(B, WC_AES_BLOCK_SIZE); return 0; } @@ -15135,8 +15135,8 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, const byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - byte A[AES_BLOCK_SIZE]; - byte B[AES_BLOCK_SIZE]; + byte A[WC_AES_BLOCK_SIZE]; + byte B[WC_AES_BLOCK_SIZE]; byte* o; byte lenSz; word32 i, oSz; @@ -15170,22 +15170,22 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, o = out; oSz = inSz; XMEMCPY(B+1, nonce, nonceSz); - lenSz = AES_BLOCK_SIZE - 1 - (byte)nonceSz; + lenSz = WC_AES_BLOCK_SIZE - 1 - (byte)nonceSz; B[0] = lenSz - 1; for (i = 0; i < lenSz; i++) - B[AES_BLOCK_SIZE - 1 - i] = 0; + B[WC_AES_BLOCK_SIZE - 1 - i] = 0; B[15] = 1; - while (oSz >= AES_BLOCK_SIZE) { + while (oSz >= WC_AES_BLOCK_SIZE) { wc_AesEncrypt(aes, B, A); - xorbuf(A, in, AES_BLOCK_SIZE); - XMEMCPY(o, A, AES_BLOCK_SIZE); + xorbuf(A, in, WC_AES_BLOCK_SIZE); + XMEMCPY(o, A, WC_AES_BLOCK_SIZE); AesCcmCtrInc(B, lenSz); - oSz -= AES_BLOCK_SIZE; - in += AES_BLOCK_SIZE; - o += AES_BLOCK_SIZE; + oSz -= WC_AES_BLOCK_SIZE; + in += WC_AES_BLOCK_SIZE; + o += WC_AES_BLOCK_SIZE; } if (inSz > 0) { wc_AesEncrypt(aes, B, A); @@ -15194,7 +15194,7 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, } for (i = 0; i < lenSz; i++) - B[AES_BLOCK_SIZE - 1 - i] = 0; + B[WC_AES_BLOCK_SIZE - 1 - i] = 0; wc_AesEncrypt(aes, B, A); o = out; @@ -15206,7 +15206,7 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, for (i = 0; i < lenSz; i++) { if (mask && i >= wordSz) mask = 0x00; - B[AES_BLOCK_SIZE - 1 - i] = (inSz >> ((8 * i) & mask)) & mask; + B[WC_AES_BLOCK_SIZE - 1 - i] = (inSz >> ((8 * i) & mask)) & mask; } wc_AesEncrypt(aes, B, A); @@ -15218,7 +15218,7 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, B[0] = lenSz - 1; for (i = 0; i < lenSz; i++) - B[AES_BLOCK_SIZE - 1 - i] = 0; + B[WC_AES_BLOCK_SIZE - 1 - i] = 0; wc_AesEncrypt(aes, B, B); xorbuf(A, B, authTagSz); @@ -15230,8 +15230,8 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, result = AES_CCM_AUTH_E; } - ForceZero(A, AES_BLOCK_SIZE); - ForceZero(B, AES_BLOCK_SIZE); + ForceZero(A, WC_AES_BLOCK_SIZE); + ForceZero(B, WC_AES_BLOCK_SIZE); o = NULL; return result; @@ -15264,12 +15264,12 @@ void AES_GCM_set_key_AARCH64(Aes* aes, byte* iv) int wc_AesGcmSetKey(Aes* aes, const byte* key, word32 len) { int ret; - byte iv[AES_BLOCK_SIZE]; + byte iv[WC_AES_BLOCK_SIZE]; if (!((len == 16) || (len == 24) || (len == 32))) return BAD_FUNC_ARG; - XMEMSET(iv, 0, AES_BLOCK_SIZE); + XMEMSET(iv, 0, WC_AES_BLOCK_SIZE); ret = wc_AesSetKey(aes, key, len, iv, AES_ENCRYPTION); if (ret == 0) { @@ -15501,7 +15501,7 @@ int wc_AesGcmSetKey(Aes* aes, const byte* key, word32 len) * in input plain text buffer to encrypt * sz size of both out and in buffers * i value to use for tweak - * iSz size of i buffer, should always be AES_BLOCK_SIZE but having this input + * iSz size of i buffer, should always be WC_AES_BLOCK_SIZE but having this input * adds a sanity check on how the user calls the function. * * returns 0 on success @@ -15509,8 +15509,8 @@ int wc_AesGcmSetKey(Aes* aes, const byte* key, word32 len) void AES_XTS_encrypt_AARCH64(XtsAes* xaes, byte* out, const byte* in, word32 sz, const byte* i) { - word32 blocks = (sz / AES_BLOCK_SIZE); - byte tmp[AES_BLOCK_SIZE]; + word32 blocks = (sz / WC_AES_BLOCK_SIZE); + byte tmp[WC_AES_BLOCK_SIZE]; __asm__ __volatile__ ( "MOV x19, 0x87 \n" @@ -15821,7 +15821,7 @@ void AES_XTS_encrypt_AARCH64(XtsAes* xaes, byte* out, const byte* in, word32 sz, * in input cipher text buffer to decrypt * sz size of both out and in buffers * i value to use for tweak - * iSz size of i buffer, should always be AES_BLOCK_SIZE but having this input + * iSz size of i buffer, should always be WC_AES_BLOCK_SIZE but having this input * adds a sanity check on how the user calls the function. * * returns 0 on success @@ -15829,9 +15829,9 @@ void AES_XTS_encrypt_AARCH64(XtsAes* xaes, byte* out, const byte* in, word32 sz, void AES_XTS_decrypt_AARCH64(XtsAes* xaes, byte* out, const byte* in, word32 sz, const byte* i) { - word32 blocks = (sz / AES_BLOCK_SIZE); - byte tmp[AES_BLOCK_SIZE]; - byte stl = (sz % AES_BLOCK_SIZE); + word32 blocks = (sz / WC_AES_BLOCK_SIZE); + byte tmp[WC_AES_BLOCK_SIZE]; + byte stl = (sz % WC_AES_BLOCK_SIZE); /* if Stealing then break out of loop one block early to handle special * case */ @@ -16342,7 +16342,7 @@ void AES_XTS_decrypt_AARCH64(XtsAes* xaes, byte* out, const byte* in, word32 sz, * in input plain text buffer to encrypt * sz size of both out and in buffers * i value to use for tweak - * iSz size of i buffer, should always be AES_BLOCK_SIZE but having this input + * iSz size of i buffer, should always be WC_AES_BLOCK_SIZE but having this input * adds a sanity check on how the user calls the function. * * returns 0 on success @@ -16351,15 +16351,15 @@ int wc_AesXtsEncrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, const byte* i, word32 iSz) { int ret = 0; - word32 blocks = (sz / AES_BLOCK_SIZE); - byte tmp[AES_BLOCK_SIZE]; + word32 blocks = (sz / WC_AES_BLOCK_SIZE); + byte tmp[WC_AES_BLOCK_SIZE]; word32* key2 = xaes->tweak.key; if (xaes == NULL || out == NULL || in == NULL) { return BAD_FUNC_ARG; } - if (iSz < AES_BLOCK_SIZE) { + if (iSz < WC_AES_BLOCK_SIZE) { return BAD_FUNC_ARG; } @@ -16476,7 +16476,7 @@ int wc_AesXtsEncrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, * in input cipher text buffer to decrypt * sz size of both out and in buffers * i value to use for tweak - * iSz size of i buffer, should always be AES_BLOCK_SIZE but having this input + * iSz size of i buffer, should always be WC_AES_BLOCK_SIZE but having this input * adds a sanity check on how the user calls the function. * * returns 0 on success @@ -16485,16 +16485,16 @@ int wc_AesXtsDecrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, const byte* i, word32 iSz) { int ret = 0; - word32 blocks = (sz / AES_BLOCK_SIZE); - byte tmp[AES_BLOCK_SIZE]; - byte stl = (sz % AES_BLOCK_SIZE); + word32 blocks = (sz / WC_AES_BLOCK_SIZE); + byte tmp[WC_AES_BLOCK_SIZE]; + byte stl = (sz % WC_AES_BLOCK_SIZE); word32* key2 = xaes->tweak.key; if (xaes == NULL || out == NULL || in == NULL) { return BAD_FUNC_ARG; } - if (iSz < AES_BLOCK_SIZE) { + if (iSz < WC_AES_BLOCK_SIZE) { return BAD_FUNC_ARG; } @@ -16653,7 +16653,7 @@ extern void AES_CTR_encrypt(const unsigned char* in, unsigned char* out, unsigned long len, const unsigned char* ks, int nr, unsigned char* ctr); #if defined(GCM_TABLE) || defined(GCM_TABLE_4BIT) /* in pre-C2x C, constness conflicts for dimensioned arrays can't be resolved. */ -extern void GCM_gmult_len(byte* x, /* const */ byte m[32][AES_BLOCK_SIZE], +extern void GCM_gmult_len(byte* x, /* const */ byte m[32][WC_AES_BLOCK_SIZE], const unsigned char* data, unsigned long len); #endif extern void AES_GCM_encrypt(const unsigned char* in, unsigned char* out, @@ -16740,9 +16740,9 @@ int wc_AesSetIV(Aes* aes, const byte* iv) return BAD_FUNC_ARG; if (iv) - XMEMCPY(aes->reg, iv, AES_BLOCK_SIZE); + XMEMCPY(aes->reg, iv, WC_AES_BLOCK_SIZE); else - XMEMSET(aes->reg, 0, AES_BLOCK_SIZE); + XMEMSET(aes->reg, 0, WC_AES_BLOCK_SIZE); return 0; } @@ -16761,7 +16761,7 @@ static int wc_AesEncrypt(Aes* aes, const byte* inBlock, byte* outBlock) #endif { int ret_cb = wc_CryptoCb_AesEcbEncrypt(aes, outBlock, inBlock, - AES_BLOCK_SIZE); + WC_AES_BLOCK_SIZE); if (ret_cb != WC_NO_ERR_TRACE(CRYPTOCB_UNAVAILABLE)) { return ret_cb; } @@ -16769,7 +16769,7 @@ static int wc_AesEncrypt(Aes* aes, const byte* inBlock, byte* outBlock) } #endif - AES_ECB_encrypt(inBlock, outBlock, AES_BLOCK_SIZE, + AES_ECB_encrypt(inBlock, outBlock, WC_AES_BLOCK_SIZE, (const unsigned char*)aes->key, aes->rounds); return 0; } @@ -16789,14 +16789,14 @@ static int wc_AesDecrypt(Aes* aes, const byte* inBlock, byte* outBlock) #endif { int ret_cb = wc_CryptoCb_AesEcbDecrypt(aes, outBlock, inBlock, - AES_BLOCK_SIZE); + WC_AES_BLOCK_SIZE); if (ret_cb != WC_NO_ERR_TRACE(CRYPTOCB_UNAVAILABLE)) return ret_cb; /* fall-through when unavailable */ } #endif - AES_ECB_decrypt(inBlock, outBlock, AES_BLOCK_SIZE, + AES_ECB_decrypt(inBlock, outBlock, WC_AES_BLOCK_SIZE, (const unsigned char*)aes->key, aes->rounds); return 0; } @@ -16842,7 +16842,7 @@ int wc_AesCbcEncrypt(Aes* aes, byte* out, const byte* in, word32 sz) if (sz == 0) { return 0; } - if (sz % AES_BLOCK_SIZE) { + if (sz % WC_AES_BLOCK_SIZE) { #ifdef WOLFSSL_AES_CBC_LENGTH_CHECKS return BAD_LENGTH_E; #else @@ -16883,7 +16883,7 @@ int wc_AesCbcDecrypt(Aes* aes, byte* out, const byte* in, word32 sz) if (sz == 0) { return 0; } - if (sz % AES_BLOCK_SIZE) { + if (sz % WC_AES_BLOCK_SIZE) { #ifdef WOLFSSL_AES_CBC_LENGTH_CHECKS return BAD_LENGTH_E; #else @@ -16938,7 +16938,7 @@ int wc_AesCtrEncrypt(Aes* aes, byte* out, const byte* in, word32 sz) #endif - tmp = (byte*)aes->tmp + AES_BLOCK_SIZE - aes->left; + tmp = (byte*)aes->tmp + WC_AES_BLOCK_SIZE - aes->left; /* consume any unused bytes left in aes->tmp */ while ((aes->left != 0) && (sz != 0)) { *(out++) = *(in++) ^ *(tmp++); @@ -16947,25 +16947,25 @@ int wc_AesCtrEncrypt(Aes* aes, byte* out, const byte* in, word32 sz) } /* do as many block size ops as possible */ - numBlocks = sz / AES_BLOCK_SIZE; + numBlocks = sz / WC_AES_BLOCK_SIZE; if (numBlocks > 0) { - AES_CTR_encrypt(in, out, numBlocks * AES_BLOCK_SIZE, (byte*)aes->key, + AES_CTR_encrypt(in, out, numBlocks * WC_AES_BLOCK_SIZE, (byte*)aes->key, aes->rounds, (byte*)aes->reg); - sz -= numBlocks * AES_BLOCK_SIZE; - out += numBlocks * AES_BLOCK_SIZE; - in += numBlocks * AES_BLOCK_SIZE; + sz -= numBlocks * WC_AES_BLOCK_SIZE; + out += numBlocks * WC_AES_BLOCK_SIZE; + in += numBlocks * WC_AES_BLOCK_SIZE; } /* handle non block size remaining */ if (sz) { - byte zeros[AES_BLOCK_SIZE] = { 0, 0, 0, 0, 0, 0, 0, 0, + byte zeros[WC_AES_BLOCK_SIZE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - AES_CTR_encrypt(zeros, (byte*)aes->tmp, AES_BLOCK_SIZE, (byte*)aes->key, + AES_CTR_encrypt(zeros, (byte*)aes->tmp, WC_AES_BLOCK_SIZE, (byte*)aes->key, aes->rounds, (byte*)aes->reg); - aes->left = AES_BLOCK_SIZE; + aes->left = WC_AES_BLOCK_SIZE; tmp = (byte*)aes->tmp; while (sz--) { @@ -16991,10 +16991,10 @@ int wc_AesCtrSetKey(Aes* aes, const byte* key, word32 len, static void roll_x(Aes* aes, const byte* in, word32 inSz, byte* out) { /* process the bulk of the data */ - while (inSz >= AES_BLOCK_SIZE) { - xorbuf(out, in, AES_BLOCK_SIZE); - in += AES_BLOCK_SIZE; - inSz -= AES_BLOCK_SIZE; + while (inSz >= WC_AES_BLOCK_SIZE) { + xorbuf(out, in, WC_AES_BLOCK_SIZE); + in += WC_AES_BLOCK_SIZE; + inSz -= WC_AES_BLOCK_SIZE; wc_AesEncrypt(aes, out, out); } @@ -17033,7 +17033,7 @@ static void roll_auth(Aes* aes, const byte* in, word32 inSz, byte* out) return; /* start fill out the rest of the first block */ - remainder = AES_BLOCK_SIZE - authLenSz; + remainder = WC_AES_BLOCK_SIZE - authLenSz; if (inSz >= remainder) { /* plenty of bulk data to fill the remainder of this block */ xorbuf(out + authLenSz, in, remainder); @@ -17057,7 +17057,7 @@ static WC_INLINE void AesCcmCtrInc(byte* B, word32 lenSz) word32 i; for (i = 0; i < lenSz; i++) { - if (++B[AES_BLOCK_SIZE - 1 - i] != 0) return; + if (++B[WC_AES_BLOCK_SIZE - 1 - i] != 0) return; } } @@ -17068,8 +17068,8 @@ int wc_AesCcmEncrypt(Aes* aes, byte* out, const byte* in, word32 inSz, byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - byte A[AES_BLOCK_SIZE]; - byte B[AES_BLOCK_SIZE]; + byte A[WC_AES_BLOCK_SIZE]; + byte B[WC_AES_BLOCK_SIZE]; byte lenSz; word32 i; byte mask = 0xFF; @@ -17087,14 +17087,14 @@ int wc_AesCcmEncrypt(Aes* aes, byte* out, const byte* in, word32 inSz, } XMEMCPY(B+1, nonce, nonceSz); - lenSz = AES_BLOCK_SIZE - 1 - (byte)nonceSz; + lenSz = WC_AES_BLOCK_SIZE - 1 - (byte)nonceSz; B[0] = (authInSz > 0 ? 64 : 0) + (8 * (((byte)authTagSz - 2) / 2)) + (lenSz - 1); for (i = 0; i < lenSz; i++) { if (mask && i >= wordSz) mask = 0x00; - B[AES_BLOCK_SIZE - 1 - i] = (inSz >> ((8 * i) & mask)) & mask; + B[WC_AES_BLOCK_SIZE - 1 - i] = (inSz >> ((8 * i) & mask)) & mask; } wc_AesEncrypt(aes, B, A); @@ -17107,20 +17107,20 @@ int wc_AesCcmEncrypt(Aes* aes, byte* out, const byte* in, word32 inSz, B[0] = lenSz - 1; for (i = 0; i < lenSz; i++) - B[AES_BLOCK_SIZE - 1 - i] = 0; + B[WC_AES_BLOCK_SIZE - 1 - i] = 0; wc_AesEncrypt(aes, B, A); xorbuf(authTag, A, authTagSz); B[15] = 1; - while (inSz >= AES_BLOCK_SIZE) { + while (inSz >= WC_AES_BLOCK_SIZE) { wc_AesEncrypt(aes, B, A); - xorbuf(A, in, AES_BLOCK_SIZE); - XMEMCPY(out, A, AES_BLOCK_SIZE); + xorbuf(A, in, WC_AES_BLOCK_SIZE); + XMEMCPY(out, A, WC_AES_BLOCK_SIZE); AesCcmCtrInc(B, lenSz); - inSz -= AES_BLOCK_SIZE; - in += AES_BLOCK_SIZE; - out += AES_BLOCK_SIZE; + inSz -= WC_AES_BLOCK_SIZE; + in += WC_AES_BLOCK_SIZE; + out += WC_AES_BLOCK_SIZE; } if (inSz > 0) { wc_AesEncrypt(aes, B, A); @@ -17128,8 +17128,8 @@ int wc_AesCcmEncrypt(Aes* aes, byte* out, const byte* in, word32 inSz, XMEMCPY(out, A, inSz); } - ForceZero(A, AES_BLOCK_SIZE); - ForceZero(B, AES_BLOCK_SIZE); + ForceZero(A, WC_AES_BLOCK_SIZE); + ForceZero(B, WC_AES_BLOCK_SIZE); return 0; } @@ -17140,8 +17140,8 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, const byte* authTag, word32 authTagSz, const byte* authIn, word32 authInSz) { - byte A[AES_BLOCK_SIZE]; - byte B[AES_BLOCK_SIZE]; + byte A[WC_AES_BLOCK_SIZE]; + byte B[WC_AES_BLOCK_SIZE]; byte* o; byte lenSz; word32 i, oSz; @@ -17163,22 +17163,22 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, o = out; oSz = inSz; XMEMCPY(B+1, nonce, nonceSz); - lenSz = AES_BLOCK_SIZE - 1 - (byte)nonceSz; + lenSz = WC_AES_BLOCK_SIZE - 1 - (byte)nonceSz; B[0] = lenSz - 1; for (i = 0; i < lenSz; i++) - B[AES_BLOCK_SIZE - 1 - i] = 0; + B[WC_AES_BLOCK_SIZE - 1 - i] = 0; B[15] = 1; - while (oSz >= AES_BLOCK_SIZE) { + while (oSz >= WC_AES_BLOCK_SIZE) { wc_AesEncrypt(aes, B, A); - xorbuf(A, in, AES_BLOCK_SIZE); - XMEMCPY(o, A, AES_BLOCK_SIZE); + xorbuf(A, in, WC_AES_BLOCK_SIZE); + XMEMCPY(o, A, WC_AES_BLOCK_SIZE); AesCcmCtrInc(B, lenSz); - oSz -= AES_BLOCK_SIZE; - in += AES_BLOCK_SIZE; - o += AES_BLOCK_SIZE; + oSz -= WC_AES_BLOCK_SIZE; + in += WC_AES_BLOCK_SIZE; + o += WC_AES_BLOCK_SIZE; } if (inSz > 0) { wc_AesEncrypt(aes, B, A); @@ -17187,7 +17187,7 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, } for (i = 0; i < lenSz; i++) - B[AES_BLOCK_SIZE - 1 - i] = 0; + B[WC_AES_BLOCK_SIZE - 1 - i] = 0; wc_AesEncrypt(aes, B, A); o = out; @@ -17199,7 +17199,7 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, for (i = 0; i < lenSz; i++) { if (mask && i >= wordSz) mask = 0x00; - B[AES_BLOCK_SIZE - 1 - i] = (inSz >> ((8 * i) & mask)) & mask; + B[WC_AES_BLOCK_SIZE - 1 - i] = (inSz >> ((8 * i) & mask)) & mask; } wc_AesEncrypt(aes, B, A); @@ -17211,7 +17211,7 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, B[0] = lenSz - 1; for (i = 0; i < lenSz; i++) - B[AES_BLOCK_SIZE - 1 - i] = 0; + B[WC_AES_BLOCK_SIZE - 1 - i] = 0; wc_AesEncrypt(aes, B, B); xorbuf(A, B, authTagSz); @@ -17223,8 +17223,8 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, result = AES_CCM_AUTH_E; } - ForceZero(A, AES_BLOCK_SIZE); - ForceZero(B, AES_BLOCK_SIZE); + ForceZero(A, WC_AES_BLOCK_SIZE); + ForceZero(B, WC_AES_BLOCK_SIZE); o = NULL; return result; @@ -17239,7 +17239,7 @@ static WC_INLINE void RIGHTSHIFTX(byte* x) int carryIn = 0; byte borrow = (0x00 - (x[15] & 0x01)) & 0xE1; - for (i = 0; i < AES_BLOCK_SIZE; i++) { + for (i = 0; i < WC_AES_BLOCK_SIZE; i++) { int carryOut = (x[i] & 0x01) << 7; x[i] = (byte) ((x[i] >> 1) | carryIn); carryIn = carryOut; @@ -17264,49 +17264,49 @@ void GenerateM0(Gcm* gcm) #if !defined(__aarch64__) || !defined(BIG_ENDIAN_ORDER) int i; #endif - byte (*m)[AES_BLOCK_SIZE] = gcm->M0; + byte (*m)[WC_AES_BLOCK_SIZE] = gcm->M0; /* 0 times -> 0x0 */ - XMEMSET(m[0x0], 0, AES_BLOCK_SIZE); + XMEMSET(m[0x0], 0, WC_AES_BLOCK_SIZE); /* 1 times -> 0x8 */ - XMEMCPY(m[0x8], gcm->H, AES_BLOCK_SIZE); + XMEMCPY(m[0x8], gcm->H, WC_AES_BLOCK_SIZE); /* 2 times -> 0x4 */ - XMEMCPY(m[0x4], m[0x8], AES_BLOCK_SIZE); + XMEMCPY(m[0x4], m[0x8], WC_AES_BLOCK_SIZE); RIGHTSHIFTX(m[0x4]); /* 4 times -> 0x2 */ - XMEMCPY(m[0x2], m[0x4], AES_BLOCK_SIZE); + XMEMCPY(m[0x2], m[0x4], WC_AES_BLOCK_SIZE); RIGHTSHIFTX(m[0x2]); /* 8 times -> 0x1 */ - XMEMCPY(m[0x1], m[0x2], AES_BLOCK_SIZE); + XMEMCPY(m[0x1], m[0x2], WC_AES_BLOCK_SIZE); RIGHTSHIFTX(m[0x1]); /* 0x3 */ - XMEMCPY(m[0x3], m[0x2], AES_BLOCK_SIZE); - xorbuf (m[0x3], m[0x1], AES_BLOCK_SIZE); + XMEMCPY(m[0x3], m[0x2], WC_AES_BLOCK_SIZE); + xorbuf (m[0x3], m[0x1], WC_AES_BLOCK_SIZE); /* 0x5 -> 0x7 */ - XMEMCPY(m[0x5], m[0x4], AES_BLOCK_SIZE); - xorbuf (m[0x5], m[0x1], AES_BLOCK_SIZE); - XMEMCPY(m[0x6], m[0x4], AES_BLOCK_SIZE); - xorbuf (m[0x6], m[0x2], AES_BLOCK_SIZE); - XMEMCPY(m[0x7], m[0x4], AES_BLOCK_SIZE); - xorbuf (m[0x7], m[0x3], AES_BLOCK_SIZE); + XMEMCPY(m[0x5], m[0x4], WC_AES_BLOCK_SIZE); + xorbuf (m[0x5], m[0x1], WC_AES_BLOCK_SIZE); + XMEMCPY(m[0x6], m[0x4], WC_AES_BLOCK_SIZE); + xorbuf (m[0x6], m[0x2], WC_AES_BLOCK_SIZE); + XMEMCPY(m[0x7], m[0x4], WC_AES_BLOCK_SIZE); + xorbuf (m[0x7], m[0x3], WC_AES_BLOCK_SIZE); /* 0x9 -> 0xf */ - XMEMCPY(m[0x9], m[0x8], AES_BLOCK_SIZE); - xorbuf (m[0x9], m[0x1], AES_BLOCK_SIZE); - XMEMCPY(m[0xa], m[0x8], AES_BLOCK_SIZE); - xorbuf (m[0xa], m[0x2], AES_BLOCK_SIZE); - XMEMCPY(m[0xb], m[0x8], AES_BLOCK_SIZE); - xorbuf (m[0xb], m[0x3], AES_BLOCK_SIZE); - XMEMCPY(m[0xc], m[0x8], AES_BLOCK_SIZE); - xorbuf (m[0xc], m[0x4], AES_BLOCK_SIZE); - XMEMCPY(m[0xd], m[0x8], AES_BLOCK_SIZE); - xorbuf (m[0xd], m[0x5], AES_BLOCK_SIZE); - XMEMCPY(m[0xe], m[0x8], AES_BLOCK_SIZE); - xorbuf (m[0xe], m[0x6], AES_BLOCK_SIZE); - XMEMCPY(m[0xf], m[0x8], AES_BLOCK_SIZE); - xorbuf (m[0xf], m[0x7], AES_BLOCK_SIZE); + XMEMCPY(m[0x9], m[0x8], WC_AES_BLOCK_SIZE); + xorbuf (m[0x9], m[0x1], WC_AES_BLOCK_SIZE); + XMEMCPY(m[0xa], m[0x8], WC_AES_BLOCK_SIZE); + xorbuf (m[0xa], m[0x2], WC_AES_BLOCK_SIZE); + XMEMCPY(m[0xb], m[0x8], WC_AES_BLOCK_SIZE); + xorbuf (m[0xb], m[0x3], WC_AES_BLOCK_SIZE); + XMEMCPY(m[0xc], m[0x8], WC_AES_BLOCK_SIZE); + xorbuf (m[0xc], m[0x4], WC_AES_BLOCK_SIZE); + XMEMCPY(m[0xd], m[0x8], WC_AES_BLOCK_SIZE); + xorbuf (m[0xd], m[0x5], WC_AES_BLOCK_SIZE); + XMEMCPY(m[0xe], m[0x8], WC_AES_BLOCK_SIZE); + xorbuf (m[0xe], m[0x6], WC_AES_BLOCK_SIZE); + XMEMCPY(m[0xf], m[0x8], WC_AES_BLOCK_SIZE); + xorbuf (m[0xf], m[0x7], WC_AES_BLOCK_SIZE); #ifndef __aarch64__ for (i = 0; i < 16; i++) { @@ -17327,7 +17327,7 @@ void GenerateM0(Gcm* gcm) int wc_AesGcmSetKey(Aes* aes, const byte* key, word32 len) { int ret; - byte iv[AES_BLOCK_SIZE]; + byte iv[WC_AES_BLOCK_SIZE]; if (aes == NULL) { return BAD_FUNC_ARG; @@ -17344,11 +17344,11 @@ int wc_AesGcmSetKey(Aes* aes, const byte* key, word32 len) } #endif - XMEMSET(iv, 0, AES_BLOCK_SIZE); + XMEMSET(iv, 0, WC_AES_BLOCK_SIZE); ret = wc_AesSetKey(aes, key, len, iv, AES_ENCRYPTION); if (ret == 0) { - AES_ECB_encrypt(iv, aes->gcm.H, AES_BLOCK_SIZE, + AES_ECB_encrypt(iv, aes->gcm.H, WC_AES_BLOCK_SIZE, (const unsigned char*)aes->key, aes->rounds); #if defined(GCM_TABLE) || defined(GCM_TABLE_4BIT) GenerateM0(&aes->gcm); @@ -17364,7 +17364,7 @@ static WC_INLINE void IncrementGcmCounter(byte* inOutCtr) int i; /* in network byte order so start at end and work back */ - for (i = AES_BLOCK_SIZE - 1; i >= AES_BLOCK_SIZE - CTR_SZ; i--) { + for (i = WC_AES_BLOCK_SIZE - 1; i >= WC_AES_BLOCK_SIZE - CTR_SZ; i--) { if (++inOutCtr[i]) /* we're done unless we overflow */ return; } @@ -17395,30 +17395,30 @@ static WC_INLINE void FlattenSzInBits(byte* buf, word32 sz) static void GCM_gmult_len(byte* x, const byte* h, const unsigned char* a, unsigned long len) { - byte Z[AES_BLOCK_SIZE]; - byte V[AES_BLOCK_SIZE]; + byte Z[WC_AES_BLOCK_SIZE]; + byte V[WC_AES_BLOCK_SIZE]; int i, j; - while (len >= AES_BLOCK_SIZE) { - xorbuf(x, a, AES_BLOCK_SIZE); + while (len >= WC_AES_BLOCK_SIZE) { + xorbuf(x, a, WC_AES_BLOCK_SIZE); - XMEMSET(Z, 0, AES_BLOCK_SIZE); - XMEMCPY(V, x, AES_BLOCK_SIZE); - for (i = 0; i < AES_BLOCK_SIZE; i++) { + XMEMSET(Z, 0, WC_AES_BLOCK_SIZE); + XMEMCPY(V, x, WC_AES_BLOCK_SIZE); + for (i = 0; i < WC_AES_BLOCK_SIZE; i++) { byte y = h[i]; for (j = 0; j < 8; j++) { if (y & 0x80) { - xorbuf(Z, V, AES_BLOCK_SIZE); + xorbuf(Z, V, WC_AES_BLOCK_SIZE); } RIGHTSHIFTX(V); y = y << 1; } } - XMEMCPY(x, Z, AES_BLOCK_SIZE); + XMEMCPY(x, Z, WC_AES_BLOCK_SIZE); - len -= AES_BLOCK_SIZE; - a += AES_BLOCK_SIZE; + len -= WC_AES_BLOCK_SIZE; + a += WC_AES_BLOCK_SIZE; } } #define GCM_GMULT_LEN(aes, x, a, len) GCM_gmult_len(x, aes->gcm.H, a, len) @@ -17429,50 +17429,50 @@ static WC_INLINE void FlattenSzInBits(byte* buf, word32 sz) static void gcm_ghash_arm32(Aes* aes, const byte* a, word32 aSz, const byte* c, word32 cSz, byte* s, word32 sSz) { - byte x[AES_BLOCK_SIZE]; - byte scratch[AES_BLOCK_SIZE]; + byte x[WC_AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; word32 blocks, partial; if (aes == NULL) { return; } - XMEMSET(x, 0, AES_BLOCK_SIZE); + XMEMSET(x, 0, WC_AES_BLOCK_SIZE); /* Hash in A, the Additional Authentication Data */ if (aSz != 0 && a != NULL) { - blocks = aSz / AES_BLOCK_SIZE; - partial = aSz % AES_BLOCK_SIZE; + blocks = aSz / WC_AES_BLOCK_SIZE; + partial = aSz % WC_AES_BLOCK_SIZE; if (blocks > 0) { - GCM_GMULT_LEN(aes, x, a, blocks * AES_BLOCK_SIZE); - a += blocks * AES_BLOCK_SIZE; + GCM_GMULT_LEN(aes, x, a, blocks * WC_AES_BLOCK_SIZE); + a += blocks * WC_AES_BLOCK_SIZE; } if (partial != 0) { - XMEMSET(scratch, 0, AES_BLOCK_SIZE); + XMEMSET(scratch, 0, WC_AES_BLOCK_SIZE); XMEMCPY(scratch, a, partial); - GCM_GMULT_LEN(aes, x, scratch, AES_BLOCK_SIZE); + GCM_GMULT_LEN(aes, x, scratch, WC_AES_BLOCK_SIZE); } } /* Hash in C, the Ciphertext */ if (cSz != 0 && c != NULL) { - blocks = cSz / AES_BLOCK_SIZE; - partial = cSz % AES_BLOCK_SIZE; + blocks = cSz / WC_AES_BLOCK_SIZE; + partial = cSz % WC_AES_BLOCK_SIZE; if (blocks > 0) { - GCM_GMULT_LEN(aes, x, c, blocks * AES_BLOCK_SIZE); - c += blocks * AES_BLOCK_SIZE; + GCM_GMULT_LEN(aes, x, c, blocks * WC_AES_BLOCK_SIZE); + c += blocks * WC_AES_BLOCK_SIZE; } if (partial != 0) { - XMEMSET(scratch, 0, AES_BLOCK_SIZE); + XMEMSET(scratch, 0, WC_AES_BLOCK_SIZE); XMEMCPY(scratch, c, partial); - GCM_GMULT_LEN(aes, x, scratch, AES_BLOCK_SIZE); + GCM_GMULT_LEN(aes, x, scratch, WC_AES_BLOCK_SIZE); } } /* Hash in the lengths of A and C in bits */ FlattenSzInBits(&scratch[0], aSz); FlattenSzInBits(&scratch[8], cSz); - GCM_GMULT_LEN(aes, x, scratch, AES_BLOCK_SIZE); + GCM_GMULT_LEN(aes, x, scratch, WC_AES_BLOCK_SIZE); /* Copy the result into s. */ XMEMCPY(s, x, sSz); @@ -17485,10 +17485,10 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, { word32 blocks; word32 partial; - byte counter[AES_BLOCK_SIZE]; - byte initialCounter[AES_BLOCK_SIZE]; - byte x[AES_BLOCK_SIZE]; - byte scratch[AES_BLOCK_SIZE]; + byte counter[WC_AES_BLOCK_SIZE]; + byte initialCounter[WC_AES_BLOCK_SIZE]; + byte x[WC_AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; /* sanity checks */ if (aes == NULL || (iv == NULL && ivSz > 0) || (authTag == NULL) || @@ -17497,7 +17497,7 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, return BAD_FUNC_ARG; } - if (authTagSz < WOLFSSL_MIN_AUTH_TAG_SZ || authTagSz > AES_BLOCK_SIZE) { + if (authTagSz < WOLFSSL_MIN_AUTH_TAG_SZ || authTagSz > WC_AES_BLOCK_SIZE) { WOLFSSL_MSG("GcmEncrypt authTagSz error"); return BAD_FUNC_ARG; } @@ -17521,69 +17521,69 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, } #endif - XMEMSET(initialCounter, 0, AES_BLOCK_SIZE); + XMEMSET(initialCounter, 0, WC_AES_BLOCK_SIZE); if (ivSz == GCM_NONCE_MID_SZ) { XMEMCPY(initialCounter, iv, ivSz); - initialCounter[AES_BLOCK_SIZE - 1] = 1; + initialCounter[WC_AES_BLOCK_SIZE - 1] = 1; } else { - gcm_ghash_arm32(aes, NULL, 0, iv, ivSz, initialCounter, AES_BLOCK_SIZE); + gcm_ghash_arm32(aes, NULL, 0, iv, ivSz, initialCounter, WC_AES_BLOCK_SIZE); } - XMEMCPY(counter, initialCounter, AES_BLOCK_SIZE); + XMEMCPY(counter, initialCounter, WC_AES_BLOCK_SIZE); /* Hash in the Additional Authentication Data */ - XMEMSET(x, 0, AES_BLOCK_SIZE); + XMEMSET(x, 0, WC_AES_BLOCK_SIZE); if (authInSz != 0 && authIn != NULL) { - blocks = authInSz / AES_BLOCK_SIZE; - partial = authInSz % AES_BLOCK_SIZE; + blocks = authInSz / WC_AES_BLOCK_SIZE; + partial = authInSz % WC_AES_BLOCK_SIZE; if (blocks > 0) { - GCM_GMULT_LEN(aes, x, authIn, blocks * AES_BLOCK_SIZE); - authIn += blocks * AES_BLOCK_SIZE; + GCM_GMULT_LEN(aes, x, authIn, blocks * WC_AES_BLOCK_SIZE); + authIn += blocks * WC_AES_BLOCK_SIZE; } if (partial != 0) { - XMEMSET(scratch, 0, AES_BLOCK_SIZE); + XMEMSET(scratch, 0, WC_AES_BLOCK_SIZE); XMEMCPY(scratch, authIn, partial); - GCM_GMULT_LEN(aes, x, scratch, AES_BLOCK_SIZE); + GCM_GMULT_LEN(aes, x, scratch, WC_AES_BLOCK_SIZE); } } /* do as many blocks as possible */ - blocks = sz / AES_BLOCK_SIZE; - partial = sz % AES_BLOCK_SIZE; + blocks = sz / WC_AES_BLOCK_SIZE; + partial = sz % WC_AES_BLOCK_SIZE; if (blocks > 0) { - AES_GCM_encrypt(in, out, blocks * AES_BLOCK_SIZE, + AES_GCM_encrypt(in, out, blocks * WC_AES_BLOCK_SIZE, (const unsigned char*)aes->key, aes->rounds, counter); - GCM_GMULT_LEN(aes, x, out, blocks * AES_BLOCK_SIZE); - in += blocks * AES_BLOCK_SIZE; - out += blocks * AES_BLOCK_SIZE; + GCM_GMULT_LEN(aes, x, out, blocks * WC_AES_BLOCK_SIZE); + in += blocks * WC_AES_BLOCK_SIZE; + out += blocks * WC_AES_BLOCK_SIZE; } /* take care of partial block sizes leftover */ if (partial != 0) { - AES_GCM_encrypt(in, scratch, AES_BLOCK_SIZE, + AES_GCM_encrypt(in, scratch, WC_AES_BLOCK_SIZE, (const unsigned char*)aes->key, aes->rounds, counter); XMEMCPY(out, scratch, partial); - XMEMSET(scratch, 0, AES_BLOCK_SIZE); + XMEMSET(scratch, 0, WC_AES_BLOCK_SIZE); XMEMCPY(scratch, out, partial); - GCM_GMULT_LEN(aes, x, scratch, AES_BLOCK_SIZE); + GCM_GMULT_LEN(aes, x, scratch, WC_AES_BLOCK_SIZE); } /* Hash in the lengths of A and C in bits */ - XMEMSET(scratch, 0, AES_BLOCK_SIZE); + XMEMSET(scratch, 0, WC_AES_BLOCK_SIZE); FlattenSzInBits(&scratch[0], authInSz); FlattenSzInBits(&scratch[8], sz); - GCM_GMULT_LEN(aes, x, scratch, AES_BLOCK_SIZE); - if (authTagSz > AES_BLOCK_SIZE) { - XMEMCPY(authTag, x, AES_BLOCK_SIZE); + GCM_GMULT_LEN(aes, x, scratch, WC_AES_BLOCK_SIZE); + if (authTagSz > WC_AES_BLOCK_SIZE) { + XMEMCPY(authTag, x, WC_AES_BLOCK_SIZE); } else { - /* authTagSz can be smaller than AES_BLOCK_SIZE */ + /* authTagSz can be smaller than WC_AES_BLOCK_SIZE */ XMEMCPY(authTag, x, authTagSz); } /* Auth tag calculation. */ - AES_ECB_encrypt(initialCounter, scratch, AES_BLOCK_SIZE, + AES_ECB_encrypt(initialCounter, scratch, WC_AES_BLOCK_SIZE, (const unsigned char*)aes->key, aes->rounds); xorbuf(authTag, scratch, authTagSz); @@ -17596,14 +17596,14 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, { word32 blocks; word32 partial; - byte counter[AES_BLOCK_SIZE]; - byte initialCounter[AES_BLOCK_SIZE]; - byte scratch[AES_BLOCK_SIZE]; - byte x[AES_BLOCK_SIZE]; + byte counter[WC_AES_BLOCK_SIZE]; + byte initialCounter[WC_AES_BLOCK_SIZE]; + byte scratch[WC_AES_BLOCK_SIZE]; + byte x[WC_AES_BLOCK_SIZE]; /* sanity checks */ if (aes == NULL || iv == NULL || (sz != 0 && (in == NULL || out == NULL)) || - authTag == NULL || authTagSz > AES_BLOCK_SIZE || authTagSz == 0 || + authTag == NULL || authTagSz > WC_AES_BLOCK_SIZE || authTagSz == 0 || ivSz == 0) { WOLFSSL_MSG("a NULL parameter passed in when size is larger than 0"); return BAD_FUNC_ARG; @@ -17623,58 +17623,58 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, } #endif - XMEMSET(initialCounter, 0, AES_BLOCK_SIZE); + XMEMSET(initialCounter, 0, WC_AES_BLOCK_SIZE); if (ivSz == GCM_NONCE_MID_SZ) { XMEMCPY(initialCounter, iv, ivSz); - initialCounter[AES_BLOCK_SIZE - 1] = 1; + initialCounter[WC_AES_BLOCK_SIZE - 1] = 1; } else { - gcm_ghash_arm32(aes, NULL, 0, iv, ivSz, initialCounter, AES_BLOCK_SIZE); + gcm_ghash_arm32(aes, NULL, 0, iv, ivSz, initialCounter, WC_AES_BLOCK_SIZE); } - XMEMCPY(counter, initialCounter, AES_BLOCK_SIZE); + XMEMCPY(counter, initialCounter, WC_AES_BLOCK_SIZE); - XMEMSET(x, 0, AES_BLOCK_SIZE); + XMEMSET(x, 0, WC_AES_BLOCK_SIZE); /* Hash in the Additional Authentication Data */ if (authInSz != 0 && authIn != NULL) { - blocks = authInSz / AES_BLOCK_SIZE; - partial = authInSz % AES_BLOCK_SIZE; + blocks = authInSz / WC_AES_BLOCK_SIZE; + partial = authInSz % WC_AES_BLOCK_SIZE; if (blocks > 0) { - GCM_GMULT_LEN(aes, x, authIn, blocks * AES_BLOCK_SIZE); - authIn += blocks * AES_BLOCK_SIZE; + GCM_GMULT_LEN(aes, x, authIn, blocks * WC_AES_BLOCK_SIZE); + authIn += blocks * WC_AES_BLOCK_SIZE; } if (partial != 0) { - XMEMSET(scratch, 0, AES_BLOCK_SIZE); + XMEMSET(scratch, 0, WC_AES_BLOCK_SIZE); XMEMCPY(scratch, authIn, partial); - GCM_GMULT_LEN(aes, x, scratch, AES_BLOCK_SIZE); + GCM_GMULT_LEN(aes, x, scratch, WC_AES_BLOCK_SIZE); } } - blocks = sz / AES_BLOCK_SIZE; - partial = sz % AES_BLOCK_SIZE; + blocks = sz / WC_AES_BLOCK_SIZE; + partial = sz % WC_AES_BLOCK_SIZE; /* do as many blocks as possible */ if (blocks > 0) { - GCM_GMULT_LEN(aes, x, in, blocks * AES_BLOCK_SIZE); + GCM_GMULT_LEN(aes, x, in, blocks * WC_AES_BLOCK_SIZE); - AES_GCM_encrypt(in, out, blocks * AES_BLOCK_SIZE, + AES_GCM_encrypt(in, out, blocks * WC_AES_BLOCK_SIZE, (const unsigned char*)aes->key, aes->rounds, counter); - in += blocks * AES_BLOCK_SIZE; - out += blocks * AES_BLOCK_SIZE; + in += blocks * WC_AES_BLOCK_SIZE; + out += blocks * WC_AES_BLOCK_SIZE; } if (partial != 0) { - XMEMSET(scratch, 0, AES_BLOCK_SIZE); + XMEMSET(scratch, 0, WC_AES_BLOCK_SIZE); XMEMCPY(scratch, in, partial); - GCM_GMULT_LEN(aes, x, scratch, AES_BLOCK_SIZE); + GCM_GMULT_LEN(aes, x, scratch, WC_AES_BLOCK_SIZE); - AES_GCM_encrypt(in, scratch, AES_BLOCK_SIZE, + AES_GCM_encrypt(in, scratch, WC_AES_BLOCK_SIZE, (const unsigned char*)aes->key, aes->rounds, counter); XMEMCPY(out, scratch, partial); } - XMEMSET(scratch, 0, AES_BLOCK_SIZE); + XMEMSET(scratch, 0, WC_AES_BLOCK_SIZE); FlattenSzInBits(&scratch[0], authInSz); FlattenSzInBits(&scratch[8], sz); - GCM_GMULT_LEN(aes, x, scratch, AES_BLOCK_SIZE); - AES_ECB_encrypt(initialCounter, scratch, AES_BLOCK_SIZE, + GCM_GMULT_LEN(aes, x, scratch, WC_AES_BLOCK_SIZE); + AES_ECB_encrypt(initialCounter, scratch, WC_AES_BLOCK_SIZE, (const unsigned char*)aes->key, aes->rounds); xorbuf(x, scratch, authTagSz); if (authTag != NULL) {