From e94d1f275c2b6c86dd2dae98dfbe42bef56dfa72 Mon Sep 17 00:00:00 2001 From: "harshal.patil" Date: Mon, 5 Aug 2024 17:49:14 +0530 Subject: [PATCH] fix(mbedtls/aes): Avoid extra C2M sync of memory --- components/mbedtls/port/aes/dma/esp_aes_dma_core.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/components/mbedtls/port/aes/dma/esp_aes_dma_core.c b/components/mbedtls/port/aes/dma/esp_aes_dma_core.c index 4bc6999eb3bb..c5231d5a7a75 100644 --- a/components/mbedtls/port/aes/dma/esp_aes_dma_core.c +++ b/components/mbedtls/port/aes/dma/esp_aes_dma_core.c @@ -325,7 +325,7 @@ static inline void *aes_dma_calloc(size_t num, size_t size, uint32_t caps, size_ return heap_caps_aligned_calloc(DMA_DESC_MEM_ALIGN_SIZE, num, size, caps | MALLOC_CAP_DMA | MALLOC_CAP_8BIT); } -static inline esp_err_t dma_desc_link(crypto_dma_desc_t *dmadesc, size_t crypto_dma_desc_num, size_t cache_line_size) +static inline esp_err_t dma_desc_link(crypto_dma_desc_t *dmadesc, size_t crypto_dma_desc_num) { esp_err_t ret = ESP_OK; for (int i = 0; i < crypto_dma_desc_num; i++) { @@ -334,7 +334,8 @@ static inline esp_err_t dma_desc_link(crypto_dma_desc_t *dmadesc, size_t crypto_ #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE /* Write back both input buffers and output buffers to clear any cache dirty bit if set If we want to remove `ESP_CACHE_MSYNC_FLAG_UNALIGNED` aligned flag then we need to pass - cache msync size = ALIGN_UP(dma_desc.size, cache_line_size), instead of dma_desc.size + cache msync size = ALIGN_UP(dma_desc.size, cache_line_size), where cache_line_size is the + the cache line size coressponding to the buffer that is being synced, instead of dma_desc.size Keeping the `ESP_CACHE_MSYNC_FLAG_UNALIGNED` flag just because it should not look like we are syncing extra bytes due to ALIGN_UP'ed size but just the number of bytes that are needed in the operation. */ @@ -343,7 +344,7 @@ static inline esp_err_t dma_desc_link(crypto_dma_desc_t *dmadesc, size_t crypto_ return ret; } } - ret = esp_cache_msync(dmadesc, ALIGN_UP(crypto_dma_desc_num * sizeof(crypto_dma_desc_t), cache_line_size), ESP_CACHE_MSYNC_FLAG_DIR_C2M); + ret = esp_cache_msync(dmadesc, crypto_dma_desc_num * sizeof(crypto_dma_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED); #else } #endif @@ -470,7 +471,7 @@ static esp_err_t generate_descriptor_list(const uint8_t *buffer, const size_t le populated_dma_descs += (unaligned_end_bytes ? 1 : 0); } - if (dma_desc_link(dma_descriptors, dma_descs_needed, cache_line_size) != ESP_OK) { + if (dma_desc_link(dma_descriptors, dma_descs_needed) != ESP_OK) { ESP_LOGE(TAG, "DMA descriptors cache sync C2M failed"); return ESP_FAIL; }