Skip to content

Commit

Permalink
address comments
Browse files Browse the repository at this point in the history
  • Loading branch information
DmitriyMusatkin committed Jun 20, 2024
1 parent ffacd09 commit 73b14fb
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 6 deletions.
3 changes: 1 addition & 2 deletions source/darwin/commoncrypto_aes.c
Original file line number Diff line number Diff line change
Expand Up @@ -511,8 +511,6 @@ static int s_initialize_gcm_cipher_materials(
}
}

aws_byte_buf_clean_up_secure(&cc_cipher->cipher_base.tag);

status = CCCryptorCreateWithMode(
kCCDecrypt,
kCCModeGCM,
Expand Down Expand Up @@ -553,6 +551,7 @@ static int s_gcm_reset(struct aws_symmetric_cipher *cipher) {
struct cc_aes_cipher *cc_cipher = cipher->impl;

int ret_val = s_reset(cipher);
aws_byte_buf_clean_up_secure(&cc_cipher->cipher_base.tag);

if (ret_val == AWS_OP_SUCCESS) {
ret_val = s_initialize_gcm_cipher_materials(cc_cipher, NULL, NULL, NULL);
Expand Down
2 changes: 1 addition & 1 deletion source/unix/openssl_aes.c
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ static int s_gcm_decrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cu
static int s_finalize_gcm_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
struct openssl_aes_cipher *openssl_cipher = cipher->impl;

if (!cipher->tag.len) {
if (cipher->tag.buffer == NULL) {
aws_byte_buf_init(&cipher->tag, cipher->allocator, AWS_AES_256_CIPHER_BLOCK_SIZE);
}

Expand Down
7 changes: 4 additions & 3 deletions source/windows/bcrypt_aes.c
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@ struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl(
}

/*
* The buffer management for this mode is a good deal easier because we don't care about padding.
* The buffer management for gcm mode is a good deal easier than ctr and cbc modes because we don't care about padding.
* In chained mode, BCrypt expects the data to be passed in in multiples of block size,
* followed by a finalize call that turns off chaining and provides any remaining data.
* This function takes care of managing this state - you give it data to work and cipher state and
Expand All @@ -559,9 +559,10 @@ static struct aws_byte_cursor s_gcm_get_working_slice(
AWS_ZERO_STRUCT(working_cur);
/* If there's overflow, prepend it to the working buffer, then append the data */
if (cipher_impl->overflow.len) {
aws_byte_buf_init_copy(scratch, cipher_impl->cipher.allocator, &cipher_impl->overflow);
aws_byte_buf_init(scratch, cipher_impl->cipher.allocator, cipher_impl->overflow.len + data.len);
aws_byte_buf_append(scratch, &cipher_impl->overflow);
aws_byte_buf_reset(&cipher_impl->overflow, true);
aws_byte_buf_append_dynamic(scratch, &data);
aws_byte_buf_append(scratch, &data);
working_cur = aws_byte_cursor_from_buf(scratch);
} else {
working_cur = data;
Expand Down

0 comments on commit 73b14fb

Please sign in to comment.