diff --git a/core/arch/arm/crypto/aes-gcm-ce.c b/core/arch/arm/crypto/aes-gcm-ce.c index c42489b87d7..47e3c0bea41 100644 --- a/core/arch/arm/crypto/aes-gcm-ce.c +++ b/core/arch/arm/crypto/aes-gcm-ce.c @@ -29,41 +29,37 @@ static void put_be_block(void *dst, const void *src) put_be64((uint8_t *)dst + 8, s[0]); } -TEE_Result internal_aes_gcm_set_key(struct internal_aes_gcm_ctx *ctx, - const void *key, size_t key_len) +void internal_aes_gcm_set_key(struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *enc_key) { uint64_t k[2]; uint64_t a; uint64_t b; - if (aes_setup(key, key_len, 0, &ctx->skey)) - return TEE_ERROR_BAD_PARAMETERS; - - internal_aes_gcm_encrypt_block(ctx, ctx->ctr, ctx->hash_subkey); + internal_aes_gcm_encrypt_block(enc_key, state->ctr, state->hash_subkey); /* Store hash key in little endian and multiply by 'x' */ - b = get_be64(ctx->hash_subkey); - a = get_be64(ctx->hash_subkey + 8); + b = get_be64(state->hash_subkey); + a = get_be64(state->hash_subkey + 8); k[0] = (a << 1) | (b >> 63); k[1] = (b << 1) | (a >> 63); if (b >> 63) k[1] ^= 0xc200000000000000UL; - memcpy(ctx->hash_subkey, k, TEE_AES_BLOCK_SIZE); - return TEE_SUCCESS; + memcpy(state->hash_subkey, k, TEE_AES_BLOCK_SIZE); } -void internal_aes_gcm_ghash_update(struct internal_aes_gcm_ctx *ctx, +void internal_aes_gcm_ghash_update(struct internal_aes_gcm_state *state, const void *head, const void *data, - size_t num_blocks) + size_t num_blocks) { uint32_t vfp_state; uint64_t dg[2]; uint64_t *k; - get_be_block(dg, ctx->hash_state); + get_be_block(dg, state->hash_state); - k = (void *)ctx->hash_subkey; + k = (void *)state->hash_subkey; vfp_state = thread_kernel_enable_vfp(); @@ -74,56 +70,113 @@ void internal_aes_gcm_ghash_update(struct internal_aes_gcm_ctx *ctx, #endif thread_kernel_disable_vfp(vfp_state); - put_be_block(ctx->hash_state, dg); + put_be_block(state->hash_state, dg); } #ifdef ARM64 -void internal_aes_gcm_encrypt_block(struct internal_aes_gcm_ctx *ctx, +static uint32_t ror32(uint32_t word, unsigned int shift) +{ + return (word >> shift) | (word << (32 - shift)); +} + +TEE_Result internal_aes_gcm_expand_enc_key(const void *key, size_t key_len, + struct internal_aes_gcm_key *enc_key) +{ + /* The AES key schedule round constants */ + static uint8_t const rcon[] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, + }; + uint32_t vfp_state; + uint32_t kwords = key_len / sizeof(uint32_t); + void *p = enc_key->data; + uint32_t *k = p; + unsigned int i; + + if (key_len != 16 && key_len != 24 && key_len != 32) + return TEE_ERROR_BAD_PARAMETERS; + + memcpy(k, key, key_len); + /* + * # of rounds specified by AES: + * 128 bit key 10 rounds + * 192 bit key 12 rounds + * 256 bit key 14 rounds + * => n byte key => 6 + (n/4) rounds + */ + enc_key->rounds = 6 + key_len / 4; + + vfp_state = thread_kernel_enable_vfp(); + for (i = 0; i < sizeof(rcon); i++) { + uint32_t *rki = k + (i * kwords); + uint32_t *rko = rki + kwords; + + rko[0] = ror32(pmull_gcm_aes_sub(rki[kwords - 1]), 8) ^ + rcon[i] ^ rki[0]; + rko[1] = rko[0] ^ rki[1]; + rko[2] = rko[1] ^ rki[2]; + rko[3] = rko[2] ^ rki[3]; + + if (key_len == 24) { + if (i >= 7) + break; + rko[4] = rko[3] ^ rki[4]; + rko[5] = rko[4] ^ rki[5]; + } else if (key_len == 32) { + if (i >= 6) + break; + rko[4] = pmull_gcm_aes_sub(rko[3]) ^ rki[4]; + rko[5] = rko[4] ^ rki[5]; + rko[6] = rko[5] ^ rki[6]; + rko[7] = rko[6] ^ rki[7]; + } + } + + thread_kernel_disable_vfp(vfp_state); + return TEE_SUCCESS; +} + +void internal_aes_gcm_encrypt_block(const struct internal_aes_gcm_key *ek, const void *src, void *dst) { uint32_t vfp_state; - void *enc_key = ctx->skey.rijndael.eK; - size_t rounds = ctx->skey.rijndael.Nr; vfp_state = thread_kernel_enable_vfp(); - pmull_gcm_load_round_keys(enc_key, rounds); - pmull_gcm_encrypt_block(dst, src, rounds); + pmull_gcm_load_round_keys(ek->data, ek->rounds); + pmull_gcm_encrypt_block(dst, src, ek->rounds); thread_kernel_disable_vfp(vfp_state); } -void -internal_aes_gcm_update_payload_block_aligned(struct internal_aes_gcm_ctx *ctx, - TEE_OperationMode m, - const void *src, - size_t num_blocks, void *dst) +void internal_aes_gcm_update_payload_block_aligned( + struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *ek, + TEE_OperationMode mode, const void *src, + size_t num_blocks, void *dst) { uint32_t vfp_state; uint64_t dg[2]; uint64_t ctr[2]; uint64_t *k; - void *enc_key = ctx->skey.rijndael.eK; - size_t rounds = ctx->skey.rijndael.Nr; - get_be_block(dg, ctx->hash_state); - get_be_block(ctr, ctx->ctr); + get_be_block(dg, state->hash_state); + get_be_block(ctr, state->ctr); - k = (void *)ctx->hash_subkey; + k = (void *)state->hash_subkey; vfp_state = thread_kernel_enable_vfp(); - pmull_gcm_load_round_keys(enc_key, rounds); + pmull_gcm_load_round_keys(ek->data, ek->rounds); - if (m == TEE_MODE_ENCRYPT) - pmull_gcm_encrypt(num_blocks, dg, dst, src, k, ctr, rounds, - ctx->buf_cryp); + if (mode == TEE_MODE_ENCRYPT) + pmull_gcm_encrypt(num_blocks, dg, dst, src, k, ctr, ek->rounds, + state->buf_cryp); else - pmull_gcm_decrypt(num_blocks, dg, dst, src, k, ctr, rounds); + pmull_gcm_decrypt(num_blocks, dg, dst, src, k, ctr, ek->rounds); thread_kernel_disable_vfp(vfp_state); - put_be_block(ctx->ctr, ctr); - put_be_block(ctx->hash_state, dg); + put_be_block(state->ctr, ctr); + put_be_block(state->hash_state, dg); } #endif /*ARM64*/ diff --git a/core/arch/arm/crypto/ghash-ce-core_a64.S b/core/arch/arm/crypto/ghash-ce-core_a64.S index af40c556352..af9803018e0 100644 --- a/core/arch/arm/crypto/ghash-ce-core_a64.S +++ b/core/arch/arm/crypto/ghash-ce-core_a64.S @@ -464,3 +464,17 @@ ENTRY(pmull_gcm_load_round_keys) load_round_keys w1, x0 ret ENDPROC(pmull_gcm_load_round_keys) + + /* + * uint32_t pmull_gcm_aes_sub(uint32_t input) + * + * use the aese instruction to perform the AES sbox substitution + * on each byte in 'input' + */ +ENTRY(pmull_gcm_aes_sub) + dup v1.4s, w0 + movi v0.16b, #0 + aese v0.16b, v1.16b + umov w0, v0.4s[0] + ret +ENDPROC(pmull_gcm_aes_sub) diff --git a/core/arch/arm/include/crypto/ghash-ce-core.h b/core/arch/arm/include/crypto/ghash-ce-core.h index 657910e4863..c6acb395ac9 100644 --- a/core/arch/arm/include/crypto/ghash-ce-core.h +++ b/core/arch/arm/include/crypto/ghash-ce-core.h @@ -15,7 +15,7 @@ void pmull_ghash_update_p64(int blocks, uint64_t dg[2], const uint8_t *src, void pmull_ghash_update_p8(int blocks, uint64_t dg[2], const uint8_t *src, const uint64_t k[2], const uint8_t *head); -void pmull_gcm_load_round_keys(uint64_t rk[30], int rounds); +void pmull_gcm_load_round_keys(const uint64_t rk[30], int rounds); void pmull_gcm_encrypt(int blocks, uint64_t dg[2], uint8_t dst[], const uint8_t src[], const uint64_t k[2], @@ -26,6 +26,8 @@ void pmull_gcm_decrypt(int blocks, uint64_t dg[2], uint8_t dst[], const uint8_t src[], const uint64_t k[2], uint64_t ctr[], int rounds); +uint32_t pmull_gcm_aes_sub(uint32_t input); + void pmull_gcm_encrypt_block(uint8_t dst[], const uint8_t src[], int rounds); #endif /*__GHASH_CE_CORE_H*/ diff --git a/core/arch/arm/mm/pager_aes_gcm.c b/core/arch/arm/mm/pager_aes_gcm.c deleted file mode 100644 index 871c2005ffc..00000000000 --- a/core/arch/arm/mm/pager_aes_gcm.c +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Galois/Counter Mode (GCM) and GMAC with AES - * - * Copyright (c) 2016, Linaro Limited - * Copyright (c) 2012, Jouni Malinen - * - * This software may be distributed under the terms of the BSD license. - * See README for more details. - * - * The license part of what was the "README" above: - * License - * ------- - * - * This software may be distributed, used, and modified under the terms of - * BSD license: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * 3. Neither the name(s) of the above-listed copyright holder(s) nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include "pager_private.h" -#include -#include -#include -#include - -/* - * Source copied from git://w1.fi/srv/git/hostap.git files - * src/utils/common.h and src/crypto/aes-gcm.c - * - * The source has been modified for the pager use case. - */ - -#define BLOCK_ALIGNMENT sizeof(uint64_t) - -static void aes_encrypt(symmetric_key *skey, const uint8_t *plain, - uint8_t *crypt) -{ - aes_ecb_encrypt(plain, crypt, skey); -} - -static void inc32(uint8_t *block) -{ - uint32_t val; - - val = get_be32(block + TEE_AES_BLOCK_SIZE - 4); - val++; - put_be32(block + TEE_AES_BLOCK_SIZE - 4, val); -} - -static void xor_block(void *dst, const void *src) -{ - uint64_t *d = dst; - const uint64_t *s = src; - - *d++ ^= *s++; - *d++ ^= *s++; -} - -static void shift_right_block(uint8_t *v) -{ - uint32_t next_val; - uint32_t val; - - val = get_be32(v + 12); - next_val = get_be32(v + 8); - val >>= 1; - val |= next_val << 31; - put_be32(v + 12, val); - - val = next_val; - next_val = get_be32(v + 4); - val >>= 1; - val |= next_val << 31; - put_be32(v + 8, val); - - val = next_val; - next_val = get_be32(v); - val >>= 1; - val |= next_val << 31; - put_be32(v + 4, val); - - val = next_val; - val >>= 1; - put_be32(v, val); -} - -/* Multiplication in GF(2^128) */ -static void gf_mult(const uint8_t *x, const uint8_t *y, uint8_t *z) -{ - uint8_t v[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT); - unsigned i; - unsigned j; - - memset(z, 0, TEE_AES_BLOCK_SIZE); /* Z_0 = 0^128 */ - memcpy(v, y, TEE_AES_BLOCK_SIZE); /* V_0 = Y */ - - for (i = 0; i < TEE_AES_BLOCK_SIZE; i++) { - for (j = 0; j < 8; j++) { - if (x[i] & BIT(7 - j)) { - /* Z_(i + 1) = Z_i XOR V_i */ - xor_block(z, v); - } else { - /* Z_(i + 1) = Z_i */ - } - - if (v[15] & 0x01) { - /* V_(i + 1) = (V_i >> 1) XOR R */ - shift_right_block(v); - /* R = 11100001 || 0^120 */ - v[0] ^= 0xe1; - } else { - /* V_(i + 1) = V_i >> 1 */ - shift_right_block(v); - } - } - } -} - -static void ghash_start(uint8_t *y) -{ - /* Y_0 = 0^128 */ - memset(y, 0, TEE_AES_BLOCK_SIZE); -} - - -static void ghash(const uint8_t *h, const uint8_t *in, size_t len, uint8_t *out) -{ - size_t n; - uint8_t tmp[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT); - - /* We're only dealing with complete blocks */ - assert(!(len % TEE_AES_BLOCK_SIZE)); - - for (n = 0; n < len; n += TEE_AES_BLOCK_SIZE) { - /* Y_i = (Y^(i-1) XOR X_i) dot H */ - xor_block(out, in + n); - - /* dot operation: - * multiplication operation for binary Galois (finite) field of - * 2^128 elements */ - gf_mult(out, h, tmp); - memcpy(out, tmp, TEE_AES_BLOCK_SIZE); - } - /* Return Y_m */ -} - -static bool aes_gcm_init_hash_subkey(symmetric_key *skey, const uint8_t *key, - size_t key_len, uint8_t *H) -{ - if (aes_setup(key, key_len, 0, skey) != CRYPT_OK) - return false; - - /* Generate hash subkey H = AES_K(0^128) */ - memset(H, 0, TEE_AES_BLOCK_SIZE); - aes_encrypt(skey, H, H); - return true; -} - - -static void aes_gcm_prepare_j0(const struct pager_aes_gcm_iv *iv, uint8_t *J0) -{ - /* Prepare block J_0 = IV || 0^31 || 1 [len(IV) = 96] */ - memcpy(J0, iv, sizeof(*iv)); - memset(J0 + sizeof(*iv), 0, TEE_AES_BLOCK_SIZE - sizeof(*iv)); - J0[TEE_AES_BLOCK_SIZE - 1] = 0x01; -} - -static void aes_gcm_core(symmetric_key *skey, bool enc, const uint8_t *J0, - const uint8_t *H, const uint8_t *in, size_t len, - uint8_t *out, uint8_t *tmp, uint8_t *S) -{ - uint8_t J0inc[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT); - size_t n; - - /* We're only dealing with complete blocks */ - assert(len && !(len % TEE_AES_BLOCK_SIZE)); - - /* - * Below in the loop we're doing the encryption and hashing - * on each block interleaved since the encrypted data is stored - * in less secure memory. - */ - - /* - * u = 128 * ceil[len(C)/128] - len(C) - * v = 128 * ceil[len(A)/128] - len(A) - * S = GHASH_H(A || 0^v || C || 0^u || [len(A)]64 || [len(C)]64) - * (i.e., zero padded to block size A || C and lengths of each in bits) - */ - ghash_start(S); - - - memcpy(J0inc, J0, TEE_AES_BLOCK_SIZE); - inc32(J0inc); - - /* Full blocks */ - for (n = 0; n < len; n += TEE_AES_BLOCK_SIZE) { - aes_encrypt(skey, J0inc, tmp); - xor_block(tmp, in + n); - memcpy(out + n, tmp, TEE_AES_BLOCK_SIZE); - inc32(J0inc); - - /* Hash */ - if (enc) - xor_block(S, tmp); - else - xor_block(S, in + n); - gf_mult(S, H, tmp); - memcpy(S, tmp, TEE_AES_BLOCK_SIZE); - } - - put_be64(tmp, 0); /* no aad */ - put_be64(tmp + 8, len * 8); - ghash(H, tmp, TEE_AES_BLOCK_SIZE, S); -} - -/** - * aes_gcm_ae - GCM-AE_K(IV, P, A) - */ -static bool aes_gcm_ae(const uint8_t *key, size_t key_len, - const struct pager_aes_gcm_iv *iv, - const uint8_t *plain, size_t plain_len, - uint8_t *crypt, uint8_t *tag) -{ - symmetric_key skey; - uint8_t H[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT); - uint8_t J0[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT); - uint8_t S[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT); - uint8_t tmp[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT); - - if (!aes_gcm_init_hash_subkey(&skey, key, key_len, H)) - return false; - - aes_gcm_prepare_j0(iv, J0); - - /* C = GCTR_K(inc_32(J_0), P) */ - aes_gcm_core(&skey, true, J0, H, plain, plain_len, crypt, tmp, S); - - /* T = MSB_t(GCTR_K(J_0, S)) */ - aes_encrypt(&skey, J0, tag); - xor_block(tag, S); - - /* Return (C, T) */ - - aes_done(&skey); - - return true; -} - -/** - * aes_gcm_ad - GCM-AD_K(IV, C, A, T) - */ -static bool aes_gcm_ad(const uint8_t *key, size_t key_len, - const struct pager_aes_gcm_iv *iv, - const uint8_t *crypt, size_t crypt_len, - const uint8_t *tag, uint8_t *plain) -{ - symmetric_key skey; - uint8_t H[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT); - uint8_t J0[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT); - uint8_t S[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT); - uint8_t tmp[TEE_AES_BLOCK_SIZE] __aligned(BLOCK_ALIGNMENT); - - if (!aes_gcm_init_hash_subkey(&skey, key, key_len, H)) - return false; - - aes_gcm_prepare_j0(iv, J0); - - /* P = GCTR_K(inc_32(J_0), C) */ - aes_gcm_core(&skey, false, J0, H, crypt, crypt_len, plain, tmp, S); - - /* T' = MSB_t(GCTR_K(J_0, S)) */ - aes_encrypt(&skey, J0, tmp); - xor_block(tmp, S); - - aes_done(&skey); - - return !buf_compare_ct(tag, tmp, TEE_AES_BLOCK_SIZE); -} - -static bool check_block_alignment(const void *p) -{ - return !((vaddr_t)p % BLOCK_ALIGNMENT); -} - -bool pager_aes_gcm_decrypt(const void *key, size_t keylen, - const struct pager_aes_gcm_iv *iv, - const uint8_t tag[PAGER_AES_GCM_TAG_LEN], - const void *src, void *dst, size_t datalen) -{ - if (!datalen || (datalen % TEE_AES_BLOCK_SIZE) || - !check_block_alignment(src) || !check_block_alignment(dst)) - return false; - return aes_gcm_ad(key, keylen, iv, src, datalen, tag, dst); -} - -bool pager_aes_gcm_encrypt(const void *key, size_t keylen, - const struct pager_aes_gcm_iv *iv, - uint8_t tag[PAGER_AES_GCM_TAG_LEN], - const void *src, void *dst, size_t datalen) -{ - if (!datalen || (datalen % TEE_AES_BLOCK_SIZE) || - !check_block_alignment(src) || !check_block_alignment(dst)) - return false; - return aes_gcm_ae(key, keylen, iv, src, datalen, dst, tag); -} diff --git a/core/arch/arm/mm/pager_private.h b/core/arch/arm/mm/pager_private.h deleted file mode 100644 index e7acf954a06..00000000000 --- a/core/arch/arm/mm/pager_private.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2016, Linaro Limited - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include - -struct pager_aes_gcm_iv { - uint32_t iv[3]; -}; - -#define PAGER_AES_GCM_TAG_LEN 16 - -bool pager_aes_gcm_decrypt(const void *key, size_t keylen, - const struct pager_aes_gcm_iv *iv, - const uint8_t tag[PAGER_AES_GCM_TAG_LEN], - const void *src, void *dst, size_t datalen); - -bool pager_aes_gcm_encrypt(const void *key, size_t keylen, - const struct pager_aes_gcm_iv *iv, - uint8_t tag[PAGER_AES_GCM_TAG_LEN], - const void *src, void *dst, size_t datalen); - diff --git a/core/arch/arm/mm/sub.mk b/core/arch/arm/mm/sub.mk index 293957afc59..388535213d8 100644 --- a/core/arch/arm/mm/sub.mk +++ b/core/arch/arm/mm/sub.mk @@ -1,6 +1,5 @@ srcs-y += core_mmu.c srcs-$(CFG_WITH_PAGER) += tee_pager.c -srcs-$(CFG_WITH_PAGER) += pager_aes_gcm.c srcs-y += tee_mmu.c ifeq ($(CFG_WITH_LPAE),y) srcs-y += core_mmu_lpae.c diff --git a/core/arch/arm/mm/tee_pager.c b/core/arch/arm/mm/tee_pager.c index 3f69e0eeb7a..a06c8ecff78 100644 --- a/core/arch/arm/mm/tee_pager.c +++ b/core/arch/arm/mm/tee_pager.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -50,10 +51,14 @@ #include #include -#include "pager_private.h" - #define PAGER_AE_KEY_BITS 256 +struct pager_aes_gcm_iv { + uint32_t iv[3]; +}; + +#define PAGER_AES_GCM_TAG_LEN 16 + struct pager_rw_pstate { uint64_t iv; uint8_t tag[PAGER_AES_GCM_TAG_LEN]; @@ -111,7 +116,7 @@ static struct tee_pager_pmem_head tee_pager_pmem_head = static struct tee_pager_pmem_head tee_pager_lock_pmem_head = TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head); -static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8]; +static struct internal_aes_gcm_key pager_ae_key; /* number of pages hidden */ #define TEE_PAGER_NHIDE (tee_pager_npages / 3) @@ -386,8 +391,13 @@ static void set_alias_area(tee_mm_entry_t *mm) static void generate_ae_key(void) { - if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS) + uint8_t key[PAGER_AE_KEY_BITS / 8]; + + if (rng_generate(key, sizeof(key)) != TEE_SUCCESS) panic("failed to generate random"); + if (internal_aes_gcm_expand_enc_key(key, sizeof(key), + &pager_ae_key)) + panic("failed to expand key"); } static size_t tbl_usage_count(struct core_mmu_table_info *ti) @@ -641,14 +651,17 @@ static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src, struct pager_aes_gcm_iv iv = { { (vaddr_t)rwp, rwp->iv >> 32, rwp->iv } }; + size_t tag_len = sizeof(rwp->tag); - return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key), - &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE); + return !internal_aes_gcm_dec(&pager_ae_key, &iv, sizeof(iv), + NULL, 0, src, SMALL_PAGE_SIZE, dst, + rwp->tag, tag_len); } static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst) { struct pager_aes_gcm_iv iv; + size_t tag_len = sizeof(rwp->tag); assert((rwp->iv + 1) > rwp->iv); rwp->iv++; @@ -662,9 +675,8 @@ static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst) iv.iv[1] = rwp->iv >> 32; iv.iv[2] = rwp->iv; - if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key), - &iv, rwp->tag, - src, dst, SMALL_PAGE_SIZE)) + if (internal_aes_gcm_enc(&pager_ae_key, &iv, sizeof(iv), NULL, 0, + src, SMALL_PAGE_SIZE, dst, rwp->tag, &tag_len)) panic("gcm failed"); } diff --git a/core/crypto.mk b/core/crypto.mk index 2dc24d9ec17..5268cf0532a 100644 --- a/core/crypto.mk +++ b/core/crypto.mk @@ -49,17 +49,27 @@ endif endif ifeq ($(CFG_CRYPTO_WITH_CE),y) + +$(call force,CFG_AES_GCM_TABLE_BASED,n,conflicts with CFG_CRYPTO_WITH_CE) + ifeq ($(CFG_ARM32_core),y) CFG_CRYPTO_AES_ARM32_CE ?= $(CFG_CRYPTO_AES) CFG_CRYPTO_SHA1_ARM32_CE ?= $(CFG_CRYPTO_SHA1) CFG_CRYPTO_SHA256_ARM32_CE ?= $(CFG_CRYPTO_SHA256) endif + ifeq ($(CFG_ARM64_core),y) CFG_CRYPTO_AES_ARM64_CE ?= $(CFG_CRYPTO_AES) CFG_CRYPTO_SHA1_ARM64_CE ?= $(CFG_CRYPTO_SHA1) CFG_CRYPTO_SHA256_ARM64_CE ?= $(CFG_CRYPTO_SHA256) endif -endif + +else #CFG_CRYPTO_WITH_CE + +CFG_AES_GCM_TABLE_BASED ?= y + +endif #!CFG_CRYPTO_WITH_CE + # Cryptographic extensions can only be used safely when OP-TEE knows how to # preserve the VFP context diff --git a/core/crypto/aes-gcm-ghash-tbl.c b/core/crypto/aes-gcm-ghash-tbl.c new file mode 100644 index 00000000000..d99222506e0 --- /dev/null +++ b/core/crypto/aes-gcm-ghash-tbl.c @@ -0,0 +1,151 @@ +/* + * NIST SP800-38D compliant GCM implementation + * + * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include + +#include "aes-gcm-private.h" + +/* + * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf + * + * See also: + * [MGV] http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/ +gcm-revised-spec.pdf + * + * We use the algorithm described as Shoup's method with 4-bit tables in + * [MGV] 4.1, pp. 12-13, to enhance speed without using too much memory. + */ + +/* + * Precompute small multiples of H, that is set + * HH[i] || HL[i] = H times i, + * where i is seen as a field element as in [MGV], ie high-order bits + * correspond to low powers of P. The result is stored in the same way, that + * is the high-order bit of HH corresponds to P^0 and the low-order bit of HL + * corresponds to P^127. + */ +void internal_aes_gcm_ghash_gen_tbl(struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *ek) +{ + int i, j; + uint64_t vl, vh; + unsigned char h[16]; + + memset(h, 0, 16); + internal_aes_gcm_encrypt_block(ek, h, h); + + vh = get_be64(h); + vl = get_be64(h + 8); + + /* 8 = 1000 corresponds to 1 in GF(2^128) */ + state->HL[8] = vl; + state->HH[8] = vh; + + /* 0 corresponds to 0 in GF(2^128) */ + state->HH[0] = 0; + state->HL[0] = 0; + + for (i = 4; i > 0; i >>= 1) { + uint32_t T = (vl & 1) * 0xe1000000U; + + vl = (vh << 63) | (vl >> 1); + vh = (vh >> 1) ^ ((uint64_t)T << 32); + + state->HL[i] = vl; + state->HH[i] = vh; + } + + for (i = 2; i <= 8; i *= 2) { + uint64_t *HiL = state->HL + i, *HiH = state->HH + i; + + vh = *HiH; + vl = *HiL; + for (j = 1; j < i; j++) { + HiH[j] = vh ^ state->HH[j]; + HiL[j] = vl ^ state->HL[j]; + } + } +} + +/* + * Shoup's method for multiplication use this table with + * last4[x] = x times P^128 + * where x and last4[x] are seen as elements of GF(2^128) as in [MGV] + */ +static const uint64_t last4[16] = { + 0x0000, 0x1c20, 0x3840, 0x2460, + 0x7080, 0x6ca0, 0x48c0, 0x54e0, + 0xe100, 0xfd20, 0xd940, 0xc560, + 0x9180, 0x8da0, 0xa9c0, 0xb5e0 +}; + +/* + * Sets output to x times H using the precomputed tables. + * x and output are seen as elements of GF(2^128) as in [MGV]. + */ +static void gcm_mult(struct internal_aes_gcm_state *state, + const unsigned char x[16], unsigned char output[16]) +{ + int i = 0; + unsigned char lo, hi, rem; + uint64_t zh, zl; + + lo = x[15] & 0xf; + + zh = state->HH[lo]; + zl = state->HL[lo]; + + for (i = 15; i >= 0; i--) { + lo = x[i] & 0xf; + hi = x[i] >> 4; + + if (i != 15) { + rem = (unsigned char)zl & 0xf; + zl = (zh << 60) | (zl >> 4); + zh = (zh >> 4); + zh ^= (uint64_t)last4[rem] << 48; + zh ^= state->HH[lo]; + zl ^= state->HL[lo]; + } + + rem = (unsigned char)zl & 0xf; + zl = (zh << 60) | (zl >> 4); + zh = (zh >> 4); + zh ^= (uint64_t)last4[rem] << 48; + zh ^= state->HH[hi]; + zl ^= state->HL[hi]; + } + + put_be64(output, zh); + put_be64(output + 8, zl); +} + +void internal_aes_gcm_ghash_update_block(struct internal_aes_gcm_state *state, + const void *data) +{ + void *y = state->hash_state; + + internal_aes_gcm_xor_block(y, data); + gcm_mult(state, y, y); +} diff --git a/core/crypto/aes-gcm-ghash.c b/core/crypto/aes-gcm-ghash.c index f6dae768f61..dcbdcfb58b4 100644 --- a/core/crypto/aes-gcm-ghash.c +++ b/core/crypto/aes-gcm-ghash.c @@ -21,14 +21,7 @@ #include #include -static void xor_block(void *dst, const void *src) -{ - uint64_t *d = dst; - const uint64_t *s = src; - - d[0] ^= s[0]; - d[1] ^= s[1]; -} +#include "aes-gcm-private.h" /* * gfmul() is based on ghash_gfmul() from @@ -48,7 +41,7 @@ static void gfmul(const uint64_t X[2], const uint64_t Y[2], uint64_t product[2]) for (n = 0; n < TEE_AES_BLOCK_SIZE * 8; n++) { /* update Z */ if (x[n >> 3] & (1 << (~n & 7))) - xor_block(z, y); + internal_aes_gcm_xor_block(z, y); /* update Y */ mul = y[1] & 1; @@ -60,20 +53,11 @@ static void gfmul(const uint64_t X[2], const uint64_t Y[2], uint64_t product[2]) product[1] = TEE_U64_TO_BIG_ENDIAN(z[1]); } -void __weak internal_aes_gcm_ghash_update(struct internal_aes_gcm_ctx *ctx, - const void *head, const void *data, - size_t num_blocks) +void internal_aes_gcm_ghash_update_block(struct internal_aes_gcm_state *state, + const void *data) { - const uint64_t *x = (const void *)data; - void *y = ctx->hash_state; - size_t n; + void *y = state->hash_state; - if (head) { - xor_block(y, head); - gfmul((void *)ctx->hash_subkey, y, y); - } - for (n = 0; n < num_blocks; n++) { - xor_block(y, x + n * 2); - gfmul((void *)ctx->hash_subkey, y, y); - } + internal_aes_gcm_xor_block(y, data); + gfmul((void *)state->hash_subkey, y, y); } diff --git a/core/crypto/aes-gcm-private.h b/core/crypto/aes-gcm-private.h new file mode 100644 index 00000000000..a4903005e0b --- /dev/null +++ b/core/crypto/aes-gcm-private.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include + +static inline void internal_aes_gcm_xor_block(void *dst, const void *src) +{ + uint64_t *d = dst; + const uint64_t *s = src; + + d[0] ^= s[0]; + d[1] ^= s[1]; +} + +static inline bool internal_aes_gcm_ptr_is_block_aligned(const void *p) +{ + return !((vaddr_t)p & (TEE_AES_BLOCK_SIZE - 1)); +} + +void internal_aes_gcm_ghash_gen_tbl(struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *enc_key); +void internal_aes_gcm_ghash_update_block(struct internal_aes_gcm_state *state, + const void *data); diff --git a/core/crypto/aes-gcm-sw.c b/core/crypto/aes-gcm-sw.c index 5c05e53b8f5..8df52d6464f 100644 --- a/core/crypto/aes-gcm-sw.c +++ b/core/crypto/aes-gcm-sw.c @@ -5,98 +5,88 @@ * SPDX-License-Identifier: BSD-2-Clause */ -/* - * gfmul() is based on ghash_gfmul from - * https://github.com/openbsd/src/blob/master/sys/crypto/gmac.c - * Which is: - * Copyright (c) 2010 Mike Belopuhov - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - +#include +#include #include -#include #include #include -#include #include -static bool __maybe_unused ptr_is_block_aligned(const void *p) -{ - return !((vaddr_t)p & (TEE_AES_BLOCK_SIZE - 1)); -} +#include "aes-gcm-private.h" -static void xor_block(void *dst, const void *src) +void __weak internal_aes_gcm_set_key(struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *ek) { - uint64_t *d = dst; - const uint64_t *s = src; - - d[0] ^= s[0]; - d[1] ^= s[1]; +#ifdef CFG_AES_GCM_TABLE_BASED + internal_aes_gcm_ghash_gen_tbl(state, ek); +#else + internal_aes_gcm_encrypt_block(ek, state->ctr, state->hash_subkey); +#endif } -TEE_Result __weak internal_aes_gcm_set_key(struct internal_aes_gcm_ctx *ctx, - const void *key, size_t key_len) +void __weak internal_aes_gcm_ghash_update(struct internal_aes_gcm_state *state, + const void *head, const void *data, + size_t num_blocks) { - if (aes_setup(key, key_len, 0, &ctx->skey)) - return TEE_ERROR_BAD_PARAMETERS; + size_t n; - if (aes_ecb_encrypt((void *)ctx->ctr, ctx->hash_subkey, &ctx->skey)) - panic(); + if (head) + internal_aes_gcm_ghash_update_block(state, head); - return TEE_SUCCESS; + for (n = 0; n < num_blocks; n++) + internal_aes_gcm_ghash_update_block(state, (uint8_t *)data + + n * TEE_AES_BLOCK_SIZE); } void __weak -internal_aes_gcm_update_payload_block_aligned(struct internal_aes_gcm_ctx *ctx, - TEE_OperationMode m, - const void *src, - size_t num_blocks, void *dst) +internal_aes_gcm_update_payload_block_aligned( + struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *ek, + TEE_OperationMode m, const void *src, + size_t num_blocks, void *dst) { size_t n; const uint8_t *s = src; uint8_t *d = dst; + void *ctr = state->ctr; + void *buf_cryp = state->buf_cryp; - assert(!ctx->buf_pos && num_blocks && - ptr_is_block_aligned(s) && ptr_is_block_aligned(d)); + assert(!state->buf_pos && num_blocks && + internal_aes_gcm_ptr_is_block_aligned(s) && + internal_aes_gcm_ptr_is_block_aligned(d)); for (n = 0; n < num_blocks; n++) { if (m == TEE_MODE_ENCRYPT) { - xor_block(ctx->buf_cryp, s); - internal_aes_gcm_ghash_update(ctx, ctx->buf_cryp, - NULL, 0); - memcpy(d, ctx->buf_cryp, sizeof(ctx->buf_cryp)); - internal_aes_gcm_encrypt_block(ctx, ctx->ctr, - ctx->buf_cryp); - internal_aes_gcm_inc_ctr(ctx); + internal_aes_gcm_xor_block(buf_cryp, s); + internal_aes_gcm_ghash_update(state, buf_cryp, NULL, 0); + memcpy(d, buf_cryp, sizeof(state->buf_cryp)); + + internal_aes_gcm_encrypt_block(ek, ctr, buf_cryp); + internal_aes_gcm_inc_ctr(state); } else { - internal_aes_gcm_encrypt_block(ctx, ctx->ctr, - ctx->buf_cryp); + internal_aes_gcm_encrypt_block(ek, ctr, buf_cryp); - xor_block(ctx->buf_cryp, s); - internal_aes_gcm_ghash_update(ctx, s, NULL, 0); - memcpy(d, ctx->buf_cryp, sizeof(ctx->buf_cryp)); + internal_aes_gcm_xor_block(buf_cryp, s); + internal_aes_gcm_ghash_update(state, s, NULL, 0); + memcpy(d, buf_cryp, sizeof(state->buf_cryp)); - internal_aes_gcm_inc_ctr(ctx); + internal_aes_gcm_inc_ctr(state); } s += TEE_AES_BLOCK_SIZE; d += TEE_AES_BLOCK_SIZE; } } -void __weak internal_aes_gcm_encrypt_block(struct internal_aes_gcm_ctx *ctx, - const void *src, void *dst) +void __weak +internal_aes_gcm_encrypt_block(const struct internal_aes_gcm_key *ek, + const void *src, void *dst) +{ + crypto_aes_enc_block(ek->data, ek->rounds, src, dst); +} + +TEE_Result __weak +internal_aes_gcm_expand_enc_key(const void *key, size_t key_len, + struct internal_aes_gcm_key *ek) { - if (aes_ecb_encrypt(src, dst, &ctx->skey)) - panic(); + return crypto_aes_expand_enc_key(key, key_len, ek->data, &ek->rounds); } diff --git a/core/crypto/aes-gcm.c b/core/crypto/aes-gcm.c index 4fb1ff3f180..c60a35cca7b 100644 --- a/core/crypto/aes-gcm.c +++ b/core/crypto/aes-gcm.c @@ -5,6 +5,7 @@ * SPDX-License-Identifier: BSD-2-Clause */ +#include #include #include #include @@ -14,6 +15,8 @@ #include #include +#include "aes-gcm-private.h" + static void xor_buf(uint8_t *dst, const uint8_t *src, size_t len) { size_t n; @@ -22,20 +25,16 @@ static void xor_buf(uint8_t *dst, const uint8_t *src, size_t len) dst[n] ^= src[n]; } -static bool ptr_is_block_aligned(const void *p) -{ - return !((vaddr_t)p & (TEE_AES_BLOCK_SIZE - 1)); -} -static void ghash_update_pad_zero(struct internal_aes_gcm_ctx *ctx, +static void ghash_update_pad_zero(struct internal_aes_gcm_state *state, const uint8_t *data, size_t len) { size_t n = len / TEE_AES_BLOCK_SIZE; uint64_t block[2]; if (n) { - if (ptr_is_block_aligned(data)) { - internal_aes_gcm_ghash_update(ctx, NULL, data, n); + if (internal_aes_gcm_ptr_is_block_aligned(data)) { + internal_aes_gcm_ghash_update(state, NULL, data, n); } else { size_t m; @@ -43,7 +42,7 @@ static void ghash_update_pad_zero(struct internal_aes_gcm_ctx *ctx, memcpy(block, data + m * sizeof(block), sizeof(block)); - internal_aes_gcm_ghash_update(ctx, NULL, + internal_aes_gcm_ghash_update(state, NULL, (void *)block, 1); } } @@ -53,12 +52,12 @@ static void ghash_update_pad_zero(struct internal_aes_gcm_ctx *ctx, memset(block, 0, sizeof(block)); memcpy(block, data + n * TEE_AES_BLOCK_SIZE, len - n * TEE_AES_BLOCK_SIZE); - internal_aes_gcm_ghash_update(ctx, block, NULL, 0); + internal_aes_gcm_ghash_update(state, block, NULL, 0); } } -static void ghash_update_lengths(struct internal_aes_gcm_ctx *ctx, uint32_t l1, - uint32_t l2) +static void ghash_update_lengths(struct internal_aes_gcm_state *state, + uint32_t l1, uint32_t l2) { uint64_t len_fields[2] = { TEE_U64_TO_BIG_ENDIAN(l1 * 8), @@ -66,41 +65,37 @@ static void ghash_update_lengths(struct internal_aes_gcm_ctx *ctx, uint32_t l1, }; COMPILE_TIME_ASSERT(sizeof(len_fields) == TEE_AES_BLOCK_SIZE); - internal_aes_gcm_ghash_update(ctx, (uint8_t *)len_fields, NULL, 0); + internal_aes_gcm_ghash_update(state, (uint8_t *)len_fields, NULL, 0); } -TEE_Result internal_aes_gcm_init(struct internal_aes_gcm_ctx *ctx, - TEE_OperationMode mode, const void *key, - size_t key_len, const void *nonce, - size_t nonce_len, size_t tag_len) +static TEE_Result __gcm_init(struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *ek, + TEE_OperationMode mode, const void *nonce, + size_t nonce_len, size_t tag_len) { - TEE_Result res; + COMPILE_TIME_ASSERT(sizeof(state->ctr) == TEE_AES_BLOCK_SIZE); - COMPILE_TIME_ASSERT(sizeof(ctx->ctr) == TEE_AES_BLOCK_SIZE); - - if (tag_len > sizeof(ctx->buf_tag)) + if (tag_len > sizeof(state->buf_tag)) return TEE_ERROR_BAD_PARAMETERS; - memset(ctx, 0, sizeof(*ctx)); + memset(state, 0, sizeof(*state)); - ctx->tag_len = tag_len; - res = internal_aes_gcm_set_key(ctx, key, key_len); - if (res) - return res; + state->tag_len = tag_len; + internal_aes_gcm_set_key(state, ek); if (nonce_len == (96 / 8)) { - memcpy(ctx->ctr, nonce, nonce_len); - internal_aes_gcm_inc_ctr(ctx); + memcpy(state->ctr, nonce, nonce_len); + internal_aes_gcm_inc_ctr(state); } else { - ghash_update_pad_zero(ctx, nonce, nonce_len); - ghash_update_lengths(ctx, 0, nonce_len); + ghash_update_pad_zero(state, nonce, nonce_len); + ghash_update_lengths(state, 0, nonce_len); - memcpy(ctx->ctr, ctx->hash_state, sizeof(ctx->ctr)); - memset(ctx->hash_state, 0, sizeof(ctx->hash_state)); + memcpy(state->ctr, state->hash_state, sizeof(state->ctr)); + memset(state->hash_state, 0, sizeof(state->hash_state)); } - internal_aes_gcm_encrypt_block(ctx, ctx->ctr, ctx->buf_tag); - internal_aes_gcm_inc_ctr(ctx); + internal_aes_gcm_encrypt_block(ek, state->ctr, state->buf_tag); + internal_aes_gcm_inc_ctr(state); if (mode == TEE_MODE_ENCRYPT) { /* * Encryption uses the pre-encrypted xor-buffer to encrypt @@ -119,48 +114,63 @@ TEE_Result internal_aes_gcm_init(struct internal_aes_gcm_ctx *ctx, * accelerated routines it's more convenient to always have * this optimization activated. */ - internal_aes_gcm_encrypt_block(ctx, ctx->ctr, ctx->buf_cryp); - internal_aes_gcm_inc_ctr(ctx); + internal_aes_gcm_encrypt_block(ek, state->ctr, state->buf_cryp); + internal_aes_gcm_inc_ctr(state); } return TEE_SUCCESS; } -TEE_Result internal_aes_gcm_update_aad(struct internal_aes_gcm_ctx *ctx, - const void *data, size_t len) +TEE_Result internal_aes_gcm_init(struct internal_aes_gcm_ctx *ctx, + TEE_OperationMode mode, const void *key, + size_t key_len, const void *nonce, + size_t nonce_len, size_t tag_len) +{ + TEE_Result res = internal_aes_gcm_expand_enc_key(key, key_len, + &ctx->key); + if (res) + return res; + + return __gcm_init(&ctx->state, &ctx->key, mode, nonce, nonce_len, + tag_len); +} + +static TEE_Result __gcm_update_aad(struct internal_aes_gcm_state *state, + const void *data, size_t len) { const uint8_t *d = data; size_t l = len; const uint8_t *head = NULL; size_t n; - if (ctx->payload_bytes) + if (state->payload_bytes) return TEE_ERROR_BAD_PARAMETERS; - ctx->aad_bytes += len; + state->aad_bytes += len; while (l) { - if (ctx->buf_pos || !ptr_is_block_aligned(d) || + if (state->buf_pos || + !internal_aes_gcm_ptr_is_block_aligned(d) || l < TEE_AES_BLOCK_SIZE) { - n = MIN(TEE_AES_BLOCK_SIZE - ctx->buf_pos, l); - memcpy(ctx->buf_hash + ctx->buf_pos, d, n); - ctx->buf_pos += n; + n = MIN(TEE_AES_BLOCK_SIZE - state->buf_pos, l); + memcpy(state->buf_hash + state->buf_pos, d, n); + state->buf_pos += n; - if (ctx->buf_pos != TEE_AES_BLOCK_SIZE) + if (state->buf_pos != TEE_AES_BLOCK_SIZE) return TEE_SUCCESS; - ctx->buf_pos = 0; - head = ctx->buf_hash; + state->buf_pos = 0; + head = state->buf_hash; d += n; l -= n; } - if (ptr_is_block_aligned(d)) + if (internal_aes_gcm_ptr_is_block_aligned(d)) n = l / TEE_AES_BLOCK_SIZE; else n = 0; - internal_aes_gcm_ghash_update(ctx, head, d, n); + internal_aes_gcm_ghash_update(state, head, d, n); l -= n * TEE_AES_BLOCK_SIZE; d += n * TEE_AES_BLOCK_SIZE; } @@ -168,63 +178,73 @@ TEE_Result internal_aes_gcm_update_aad(struct internal_aes_gcm_ctx *ctx, return TEE_SUCCESS; } -TEE_Result internal_aes_gcm_update_payload(struct internal_aes_gcm_ctx *ctx, - TEE_OperationMode mode, - const void *src, size_t len, - void *dst) +TEE_Result internal_aes_gcm_update_aad(struct internal_aes_gcm_ctx *ctx, + const void *data, size_t len) +{ + return __gcm_update_aad(&ctx->state, data, len); +} + +static TEE_Result +__gcm_update_payload(struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *ek, + TEE_OperationMode mode, const void *src, + size_t len, void *dst) { size_t n; const uint8_t *s = src; uint8_t *d = dst; size_t l = len; - if (!ctx->payload_bytes && ctx->buf_pos) { + if (!state->payload_bytes && state->buf_pos) { /* AAD part done, finish up the last bits. */ - memset(ctx->buf_hash + ctx->buf_pos, 0, - TEE_AES_BLOCK_SIZE - ctx->buf_pos); - internal_aes_gcm_ghash_update(ctx, ctx->buf_hash, NULL, 0); - ctx->buf_pos = 0; + memset(state->buf_hash + state->buf_pos, 0, + TEE_AES_BLOCK_SIZE - state->buf_pos); + internal_aes_gcm_ghash_update(state, state->buf_hash, NULL, 0); + state->buf_pos = 0; } - ctx->payload_bytes += len; + state->payload_bytes += len; while (l) { - if (ctx->buf_pos || !ptr_is_block_aligned(s) || - !ptr_is_block_aligned(d) || l < TEE_AES_BLOCK_SIZE) { - n = MIN(TEE_AES_BLOCK_SIZE - ctx->buf_pos, l); + if (state->buf_pos || + !internal_aes_gcm_ptr_is_block_aligned(s) || + !internal_aes_gcm_ptr_is_block_aligned(d) || + l < TEE_AES_BLOCK_SIZE) { + n = MIN(TEE_AES_BLOCK_SIZE - state->buf_pos, l); - if (!ctx->buf_pos && mode == TEE_MODE_DECRYPT) { - internal_aes_gcm_encrypt_block(ctx, ctx->ctr, - ctx->buf_cryp); + if (!state->buf_pos && mode == TEE_MODE_DECRYPT) { + internal_aes_gcm_encrypt_block(ek, state->ctr, + state->buf_cryp); } - xor_buf(ctx->buf_cryp + ctx->buf_pos, s, n); - memcpy(d, ctx->buf_cryp + ctx->buf_pos, n); + xor_buf(state->buf_cryp + state->buf_pos, s, n); + memcpy(d, state->buf_cryp + state->buf_pos, n); if (mode == TEE_MODE_ENCRYPT) - memcpy(ctx->buf_hash + ctx->buf_pos, - ctx->buf_cryp + ctx->buf_pos, n); + memcpy(state->buf_hash + state->buf_pos, + state->buf_cryp + state->buf_pos, n); else - memcpy(ctx->buf_hash + ctx->buf_pos, s, n); + memcpy(state->buf_hash + state->buf_pos, s, n); - ctx->buf_pos += n; + state->buf_pos += n; - if (ctx->buf_pos != TEE_AES_BLOCK_SIZE) + if (state->buf_pos != TEE_AES_BLOCK_SIZE) return TEE_SUCCESS; - internal_aes_gcm_ghash_update(ctx, ctx->buf_hash, + internal_aes_gcm_ghash_update(state, state->buf_hash, NULL, 0); - ctx->buf_pos = 0; + state->buf_pos = 0; d += n; s += n; l -= n; if (mode == TEE_MODE_ENCRYPT) - internal_aes_gcm_encrypt_block(ctx, ctx->ctr, - ctx->buf_cryp); - internal_aes_gcm_inc_ctr(ctx); + internal_aes_gcm_encrypt_block(ek, state->ctr, + state->buf_cryp); + internal_aes_gcm_inc_ctr(state); } else { n = l / TEE_AES_BLOCK_SIZE; - internal_aes_gcm_update_payload_block_aligned(ctx, mode, + internal_aes_gcm_update_payload_block_aligned(state, ek, + mode, s, n, d); s += n * TEE_AES_BLOCK_SIZE; d += n * TEE_AES_BLOCK_SIZE; @@ -235,79 +255,154 @@ TEE_Result internal_aes_gcm_update_payload(struct internal_aes_gcm_ctx *ctx, return TEE_SUCCESS; } -static TEE_Result operation_final(struct internal_aes_gcm_ctx *ctx, +TEE_Result internal_aes_gcm_update_payload(struct internal_aes_gcm_ctx *ctx, + TEE_OperationMode mode, + const void *src, size_t len, + void *dst) +{ + return __gcm_update_payload(&ctx->state, &ctx->key, mode, src, len, + dst); +} + +static TEE_Result operation_final(struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *enc_key, TEE_OperationMode m, const uint8_t *src, size_t len, uint8_t *dst) { TEE_Result res; - res = internal_aes_gcm_update_payload(ctx, m, src, len, dst); + res = __gcm_update_payload(state, enc_key, m, src, len, dst); if (res) return res; - if (ctx->buf_pos) { - memset(ctx->buf_hash + ctx->buf_pos, 0, - sizeof(ctx->buf_hash) - ctx->buf_pos); - internal_aes_gcm_ghash_update(ctx, ctx->buf_hash, NULL, 0); + if (state->buf_pos) { + memset(state->buf_hash + state->buf_pos, 0, + sizeof(state->buf_hash) - state->buf_pos); + internal_aes_gcm_ghash_update(state, state->buf_hash, NULL, 0); } - ghash_update_lengths(ctx, ctx->aad_bytes, ctx->payload_bytes); + ghash_update_lengths(state, state->aad_bytes, state->payload_bytes); /* buf_tag was filled in with the first counter block aes_gcm_init() */ - xor_buf(ctx->buf_tag, ctx->hash_state, ctx->tag_len); + xor_buf(state->buf_tag, state->hash_state, state->tag_len); return TEE_SUCCESS; } -TEE_Result internal_aes_gcm_enc_final(struct internal_aes_gcm_ctx *ctx, - const void *src, size_t len, void *dst, - void *tag, size_t *tag_len) +static TEE_Result __gcm_enc_final(struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *enc_key, + const void *src, size_t len, void *dst, + void *tag, size_t *tag_len) { TEE_Result res; - if (*tag_len < ctx->tag_len) + if (*tag_len < state->tag_len) return TEE_ERROR_SHORT_BUFFER; - res = operation_final(ctx, TEE_MODE_ENCRYPT, src, len, dst); + res = operation_final(state, enc_key, TEE_MODE_ENCRYPT, src, len, dst); if (res) return res; - memcpy(tag, ctx->buf_tag, ctx->tag_len); - *tag_len = ctx->tag_len; + memcpy(tag, state->buf_tag, state->tag_len); + *tag_len = state->tag_len; return TEE_SUCCESS; } -TEE_Result internal_aes_gcm_dec_final(struct internal_aes_gcm_ctx *ctx, +TEE_Result internal_aes_gcm_enc_final(struct internal_aes_gcm_ctx *ctx, const void *src, size_t len, void *dst, - const void *tag, size_t tag_len) + void *tag, size_t *tag_len) +{ + return __gcm_enc_final(&ctx->state, &ctx->key, src, len, dst, tag, + tag_len); +} + +static TEE_Result __gcm_dec_final(struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *enc_key, + const void *src, size_t len, void *dst, + const void *tag, size_t tag_len) { TEE_Result res; - if (tag_len != ctx->tag_len) + if (tag_len != state->tag_len) return TEE_ERROR_MAC_INVALID; - res = operation_final(ctx, TEE_MODE_DECRYPT, src, len, dst); + res = operation_final(state, enc_key, TEE_MODE_DECRYPT, src, len, dst); if (res) return res; - if (buf_compare_ct(ctx->buf_tag, tag, tag_len)) + if (buf_compare_ct(state->buf_tag, tag, tag_len)) return TEE_ERROR_MAC_INVALID; return TEE_SUCCESS; } -void internal_aes_gcm_inc_ctr(struct internal_aes_gcm_ctx *ctx) +TEE_Result internal_aes_gcm_dec_final(struct internal_aes_gcm_ctx *ctx, + const void *src, size_t len, void *dst, + const void *tag, size_t tag_len) +{ + return __gcm_dec_final(&ctx->state, &ctx->key, src, len, dst, tag, + tag_len); +} + +void internal_aes_gcm_inc_ctr(struct internal_aes_gcm_state *state) { uint64_t c; - c = TEE_U64_FROM_BIG_ENDIAN(ctx->ctr[1]) + 1; - ctx->ctr[1] = TEE_U64_TO_BIG_ENDIAN(c); + c = TEE_U64_FROM_BIG_ENDIAN(state->ctr[1]) + 1; + state->ctr[1] = TEE_U64_TO_BIG_ENDIAN(c); if (!c) { - c = TEE_U64_FROM_BIG_ENDIAN(ctx->ctr[0]) + 1; - ctx->ctr[0] = TEE_U64_TO_BIG_ENDIAN(c); + c = TEE_U64_FROM_BIG_ENDIAN(state->ctr[0]) + 1; + state->ctr[0] = TEE_U64_TO_BIG_ENDIAN(c); + } +} + +TEE_Result internal_aes_gcm_enc(const struct internal_aes_gcm_key *enc_key, + const void *nonce, size_t nonce_len, + const void *aad, size_t aad_len, + const void *src, size_t len, void *dst, + void *tag, size_t *tag_len) +{ + TEE_Result res; + struct internal_aes_gcm_state state; + + res = __gcm_init(&state, enc_key, TEE_MODE_ENCRYPT, nonce, nonce_len, + *tag_len); + if (res) + return res; + + if (aad) { + res = __gcm_update_aad(&state, aad, aad_len); + if (res) + return res; } + + return __gcm_enc_final(&state, enc_key, src, len, dst, tag, tag_len); } +TEE_Result internal_aes_gcm_dec(const struct internal_aes_gcm_key *enc_key, + const void *nonce, size_t nonce_len, + const void *aad, size_t aad_len, + const void *src, size_t len, void *dst, + const void *tag, size_t tag_len) +{ + TEE_Result res; + struct internal_aes_gcm_state state; + + res = __gcm_init(&state, enc_key, TEE_MODE_DECRYPT, nonce, nonce_len, + tag_len); + if (res) + return res; + + if (aad) { + res = __gcm_update_aad(&state, aad, aad_len); + if (res) + return res; + } + + return __gcm_dec_final(&state, enc_key, src, len, dst, tag, tag_len); +} + + #ifndef CFG_CRYPTO_AES_GCM_FROM_CRYPTOLIB #include diff --git a/core/crypto/sub.mk b/core/crypto/sub.mk index 99df944dd5b..3e79b3bfe98 100644 --- a/core/crypto/sub.mk +++ b/core/crypto/sub.mk @@ -1,4 +1,8 @@ srcs-y += crypto.c srcs-y += aes-gcm.c srcs-y += aes-gcm-sw.c +ifeq ($(CFG_AES_GCM_TABLE_BASED),y) +srcs-y += aes-gcm-ghash-tbl.c +else srcs-y += aes-gcm-ghash.c +endif diff --git a/core/include/crypto/crypto.h b/core/include/crypto/crypto.h index 914bbd74b7a..9264f801867 100644 --- a/core/include/crypto/crypto.h +++ b/core/include/crypto/crypto.h @@ -278,4 +278,9 @@ TEE_Result crypto_rng_read(void *buf, size_t blen); TEE_Result rng_generate(void *buffer, size_t len); +TEE_Result crypto_aes_expand_enc_key(const void *key, size_t key_len, + void *enc_key, unsigned int *rounds); +void crypto_aes_enc_block(const void *enc_key, unsigned int rounds, + const void *src, void *dst); + #endif /* __CRYPTO_CRYPTO_H */ diff --git a/core/include/crypto/internal_aes-gcm.h b/core/include/crypto/internal_aes-gcm.h index 5fe3a3f86e7..a74c67b5207 100644 --- a/core/include/crypto/internal_aes-gcm.h +++ b/core/include/crypto/internal_aes-gcm.h @@ -8,29 +8,41 @@ #ifndef __CRYPTO_INTERNAL_AES_GCM_H #define __CRYPTO_INTERNAL_AES_GCM_H -#include #include #include -#include -struct internal_aes_gcm_ctx { +struct internal_aes_gcm_key { + /* AES (CTR) encryption key and number of rounds */ + uint64_t data[30]; + unsigned int rounds; +}; + +struct internal_aes_gcm_state { uint64_t ctr[2]; +#ifdef CFG_AES_GCM_TABLE_BASED + uint64_t HL[16]; + uint64_t HH[16]; +#else uint8_t hash_subkey[TEE_AES_BLOCK_SIZE]; +#endif uint8_t hash_state[TEE_AES_BLOCK_SIZE]; uint8_t buf_tag[TEE_AES_BLOCK_SIZE]; uint8_t buf_hash[TEE_AES_BLOCK_SIZE]; uint8_t buf_cryp[TEE_AES_BLOCK_SIZE]; - symmetric_key skey; - unsigned int tag_len; unsigned int aad_bytes; unsigned int payload_bytes; unsigned int buf_pos; }; +struct internal_aes_gcm_ctx { + struct internal_aes_gcm_state state; + struct internal_aes_gcm_key key; +}; + TEE_Result internal_aes_gcm_init(struct internal_aes_gcm_ctx *ctx, TEE_OperationMode mode, const void *key, size_t key_len, const void *nonce, @@ -48,25 +60,43 @@ TEE_Result internal_aes_gcm_dec_final(struct internal_aes_gcm_ctx *ctx, const void *src, size_t len, void *dst, const void *tag, size_t tag_len); -void internal_aes_gcm_inc_ctr(struct internal_aes_gcm_ctx *ctx); +void internal_aes_gcm_inc_ctr(struct internal_aes_gcm_state *state); + +TEE_Result internal_aes_gcm_enc(const struct internal_aes_gcm_key *enc_key, + const void *nonce, size_t nonce_len, + const void *aad, size_t aad_len, + const void *src, size_t len, void *dst, + void *tag, size_t *tag_len); + +TEE_Result internal_aes_gcm_dec(const struct internal_aes_gcm_key *enc_key, + const void *nonce, size_t nonce_len, + const void *aad, size_t aad_len, + const void *src, size_t len, void *dst, + const void *tag, size_t tag_len); + +TEE_Result +internal_aes_gcm_expand_enc_key(const void *key, size_t key_len, + struct internal_aes_gcm_key *enc_key); /* * Internal weak functions that can be overridden with hardware specific * implementations. */ -void internal_aes_gcm_encrypt_block(struct internal_aes_gcm_ctx *ctx, - const void *src, void *dst); - -TEE_Result internal_aes_gcm_set_key(struct internal_aes_gcm_ctx *ctx, - const void *key, size_t key_len); +void internal_aes_gcm_set_key(struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *enc_key); -void internal_aes_gcm_ghash_update(struct internal_aes_gcm_ctx *ctx, +void internal_aes_gcm_ghash_update(struct internal_aes_gcm_state *state, const void *head, const void *data, size_t num_blocks); -void -internal_aes_gcm_update_payload_block_aligned(struct internal_aes_gcm_ctx *ctx, - TEE_OperationMode mode, - const void *src, - size_t num_blocks, void *dst); +void internal_aes_gcm_update_payload_block_aligned( + struct internal_aes_gcm_state *state, + const struct internal_aes_gcm_key *enc_key, + TEE_OperationMode mode, const void *src, + size_t num_blocks, void *dst); + + + +void internal_aes_gcm_encrypt_block(const struct internal_aes_gcm_key *enc_key, + const void *src, void *dst); #endif /*__CRYPTO_INTERNAL_AES_GCM_H*/ diff --git a/core/lib/libtomcrypt/src/tee_ltc_provider.c b/core/lib/libtomcrypt/src/tee_ltc_provider.c index c37cb12d9c4..16006bd94dc 100644 --- a/core/lib/libtomcrypt/src/tee_ltc_provider.c +++ b/core/lib/libtomcrypt/src/tee_ltc_provider.c @@ -2983,3 +2983,27 @@ TEE_Result rng_generate(void *buffer, size_t len) return get_rng_array(buffer, len); #endif } + +TEE_Result crypto_aes_expand_enc_key(const void *key, size_t key_len, + void *enc_key, unsigned int *rounds) +{ + symmetric_key skey; + + if (aes_setup(key, key_len, 0, &skey)) + return TEE_ERROR_BAD_PARAMETERS; + + memcpy(enc_key, skey.rijndael.eK, sizeof(skey.rijndael.eK)); + *rounds = skey.rijndael.Nr; + return TEE_SUCCESS; +} + +void crypto_aes_enc_block(const void *enc_key, unsigned int rounds, + const void *src, void *dst) +{ + symmetric_key skey; + + memcpy(skey.rijndael.eK, enc_key, sizeof(skey.rijndael.eK)); + skey.rijndael.Nr = rounds; + if (aes_ecb_encrypt(src, dst, &skey)) + panic(); +}