summaryrefslogtreecommitdiff
path: root/src/crypto/fipsmodule/modes/gcm.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/crypto/fipsmodule/modes/gcm.c')
-rw-r--r--src/crypto/fipsmodule/modes/gcm.c75
1 files changed, 42 insertions, 33 deletions
diff --git a/src/crypto/fipsmodule/modes/gcm.c b/src/crypto/fipsmodule/modes/gcm.c
index 99d0e15e..6eff4792 100644
--- a/src/crypto/fipsmodule/modes/gcm.c
+++ b/src/crypto/fipsmodule/modes/gcm.c
@@ -243,9 +243,10 @@ void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
size_t len);
#endif
-#define GCM_MUL(ctx, Xi) gcm_gmult_4bit((ctx)->Xi.u, (ctx)->Htable)
+#define GCM_MUL(ctx, Xi) gcm_gmult_4bit((ctx)->Xi.u, (ctx)->gcm_key.Htable)
#if defined(GHASH_ASM)
-#define GHASH(ctx, in, len) gcm_ghash_4bit((ctx)->Xi.u, (ctx)->Htable, in, len)
+#define GHASH(ctx, in, len) \
+ gcm_ghash_4bit((ctx)->Xi.u, (ctx)->gcm_key.Htable, in, len)
// GHASH_CHUNK is "stride parameter" missioned to mitigate cache
// trashing effect. In other words idea is to hash data while it's
// still in L1 cache after encryption pass...
@@ -337,10 +338,11 @@ void gcm_ghash_p8(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
#ifdef GCM_FUNCREF_4BIT
#undef GCM_MUL
-#define GCM_MUL(ctx, Xi) (*gcm_gmult_p)((ctx)->Xi.u, (ctx)->Htable)
+#define GCM_MUL(ctx, Xi) (*gcm_gmult_p)((ctx)->Xi.u, (ctx)->gcm_key.Htable)
#ifdef GHASH
#undef GHASH
-#define GHASH(ctx, in, len) (*gcm_ghash_p)((ctx)->Xi.u, (ctx)->Htable, in, len)
+#define GHASH(ctx, in, len) \
+ (*gcm_ghash_p)((ctx)->Xi.u, (ctx)->gcm_key.Htable, in, len)
#endif
#endif
@@ -417,27 +419,28 @@ void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash,
#endif
}
-void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, const void *aes_key,
- block128_f block, int block_is_hwaes) {
- OPENSSL_memset(ctx, 0, sizeof(*ctx));
- ctx->block = block;
+void CRYPTO_gcm128_init_key(GCM128_KEY *gcm_key, const void *aes_key,
+ block128_f block, int block_is_hwaes) {
+ OPENSSL_memset(gcm_key, 0, sizeof(*gcm_key));
+ gcm_key->block = block;
- uint8_t gcm_key[16];
- OPENSSL_memset(gcm_key, 0, sizeof(gcm_key));
- (*block)(gcm_key, gcm_key, aes_key);
+ uint8_t ghash_key[16];
+ OPENSSL_memset(ghash_key, 0, sizeof(ghash_key));
+ (*block)(ghash_key, ghash_key, aes_key);
int is_avx;
- CRYPTO_ghash_init(&ctx->gmult, &ctx->ghash, &ctx->H, ctx->Htable, &is_avx,
- gcm_key);
+ CRYPTO_ghash_init(&gcm_key->gmult, &gcm_key->ghash, &gcm_key->H,
+ gcm_key->Htable, &is_avx, ghash_key);
- ctx->use_aesni_gcm_crypt = (is_avx && block_is_hwaes) ? 1 : 0;
+ gcm_key->use_aesni_gcm_crypt = (is_avx && block_is_hwaes) ? 1 : 0;
}
void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key,
const uint8_t *iv, size_t len) {
unsigned int ctr;
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
+ ctx->gcm_key.gmult;
#endif
ctx->Yi.u[0] = 0;
@@ -477,7 +480,7 @@ void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key,
ctr = CRYPTO_bswap4(ctx->Yi.d[3]);
}
- (*ctx->block)(ctx->Yi.c, ctx->EK0.c, key);
+ (*ctx->gcm_key.block)(ctx->Yi.c, ctx->EK0.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
}
@@ -486,10 +489,11 @@ int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len) {
unsigned int n;
uint64_t alen = ctx->len.u[0];
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
+ ctx->gcm_key.gmult;
#ifdef GHASH
void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
- size_t len) = ctx->ghash;
+ size_t len) = ctx->gcm_key.ghash;
#endif
#endif
@@ -553,12 +557,13 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key,
const uint8_t *in, uint8_t *out, size_t len) {
unsigned int n, ctr;
uint64_t mlen = ctx->len.u[1];
- block128_f block = ctx->block;
+ block128_f block = ctx->gcm_key.block;
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
+ ctx->gcm_key.gmult;
#ifdef GHASH
void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
- size_t len) = ctx->ghash;
+ size_t len) = ctx->gcm_key.ghash;
#endif
#endif
@@ -679,12 +684,13 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key,
size_t len) {
unsigned int n, ctr;
uint64_t mlen = ctx->len.u[1];
- block128_f block = ctx->block;
+ block128_f block = ctx->gcm_key.block;
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
+ ctx->gcm_key.gmult;
#ifdef GHASH
void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
- size_t len) = ctx->ghash;
+ size_t len) = ctx->gcm_key.ghash;
#endif
#endif
@@ -813,10 +819,11 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
unsigned int n, ctr;
uint64_t mlen = ctx->len.u[1];
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
+ ctx->gcm_key.gmult;
#ifdef GHASH
void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
- size_t len) = ctx->ghash;
+ size_t len) = ctx->gcm_key.ghash;
#endif
#endif
@@ -849,7 +856,7 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
}
#if defined(AESNI_GCM)
- if (ctx->use_aesni_gcm_crypt) {
+ if (ctx->gcm_key.use_aesni_gcm_crypt) {
// |aesni_gcm_encrypt| may not process all the input given to it. It may
// not process *any* of its input if it is deemed too small.
size_t bulk = aesni_gcm_encrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u);
@@ -895,7 +902,7 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
#endif
}
if (len) {
- (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
+ (*ctx->gcm_key.block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
while (len--) {
@@ -914,10 +921,11 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
unsigned int n, ctr;
uint64_t mlen = ctx->len.u[1];
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
+ ctx->gcm_key.gmult;
#ifdef GHASH
void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
- size_t len) = ctx->ghash;
+ size_t len) = ctx->gcm_key.ghash;
#endif
#endif
@@ -952,7 +960,7 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
}
#if defined(AESNI_GCM)
- if (ctx->use_aesni_gcm_crypt) {
+ if (ctx->gcm_key.use_aesni_gcm_crypt) {
// |aesni_gcm_decrypt| may not process all the input given to it. It may
// not process *any* of its input if it is deemed too small.
size_t bulk = aesni_gcm_decrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u);
@@ -1001,7 +1009,7 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
len -= i;
}
if (len) {
- (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
+ (*ctx->gcm_key.block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
while (len--) {
@@ -1020,7 +1028,8 @@ int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag, size_t len) {
uint64_t alen = ctx->len.u[0] << 3;
uint64_t clen = ctx->len.u[1] << 3;
#ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) =
+ ctx->gcm_key.gmult;
#endif
if (ctx->mres || ctx->ares) {