Skip to content

Commit f786ecc

Browse files
dcpleungkartben
authored andcommitted
tests: mem_protect/mem_map: add data cache manipulations
This adds data cache manipulations, flushing and invalidation, to the tests where buffer content are being written and compared. These tests map different virtual pages to the same physical pages, and write to one of the mapped virtual addresses. Some SoCs may cache the virtual address separately and writes to one virtual address will not be reflected to another virtual address, this failing the comparison. So we need to manually flush the cache after writing to the buffer, and invalidating cache before reading. Note that not all reads and writes need this treatment as some of them only needs to test for access permissions, and not the memory content. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
1 parent 352fed6 commit f786ecc

File tree

1 file changed

+31
-0
lines changed
  • tests/kernel/mem_protect/mem_map/src

1 file changed

+31
-0
lines changed

tests/kernel/mem_protect/mem_map/src/main.c

+31
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <zephyr/toolchain.h>
99
#include <mmu.h>
1010
#include <zephyr/linker/sections.h>
11+
#include <zephyr/cache.h>
1112

1213
#ifdef CONFIG_DEMAND_PAGING
1314
#include <zephyr/kernel/mm/demand_paging.h>
@@ -56,9 +57,19 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_rw)
5657
{
5758
uint8_t *mapped_rw, *mapped_ro;
5859
uint8_t *buf = test_page + BUF_OFFSET;
60+
uintptr_t aligned_addr;
61+
size_t aligned_size;
62+
size_t aligned_offset;
5963

6064
expect_fault = false;
6165

66+
if (IS_ENABLED(CONFIG_DCACHE)) {
67+
/* Flush everything and invalidating all addresses to
68+
* prepare fot comparison test below.
69+
*/
70+
sys_cache_data_flush_and_invd_all();
71+
}
72+
6273
/* Map in a page that allows writes */
6374
k_mem_map_phys_bare(&mapped_rw, k_mem_phys_addr(buf),
6475
BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW);
@@ -72,6 +83,17 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_rw)
7283
mapped_rw[i] = (uint8_t)(i % 256);
7384
}
7485

86+
if (IS_ENABLED(CONFIG_DCACHE)) {
87+
/* Flush the data to memory after write. */
88+
aligned_offset =
89+
k_mem_region_align(&aligned_addr, &aligned_size, (uintptr_t)mapped_rw,
90+
BUF_SIZE, CONFIG_MMU_PAGE_SIZE);
91+
zassert_equal(aligned_offset, BUF_OFFSET,
92+
"unexpected mapped_rw aligned offset: %u != %u", aligned_offset,
93+
BUF_OFFSET);
94+
sys_cache_data_flush_and_invd_range((void *)aligned_addr, aligned_size);
95+
}
96+
7597
/* Check that the backing buffer contains the expected data. */
7698
for (int i = 0; i < BUF_SIZE; i++) {
7799
uint8_t expected_val = (uint8_t)(i % 256);
@@ -288,6 +310,10 @@ ZTEST(mem_map_api, test_k_mem_map_unmap)
288310
}
289311
last_mapped = mapped;
290312

313+
if (IS_ENABLED(CONFIG_DCACHE)) {
314+
sys_cache_data_flush_and_invd_range((void *)mapped, CONFIG_MMU_PAGE_SIZE);
315+
}
316+
291317
/* Page should be zeroed */
292318
for (i = 0; i < CONFIG_MMU_PAGE_SIZE; i++) {
293319
zassert_equal(mapped[i], '\x00', "page not zeroed");
@@ -300,6 +326,11 @@ ZTEST(mem_map_api, test_k_mem_map_unmap)
300326

301327
/* Show we can write to page without exploding */
302328
(void)memset(mapped, '\xFF', CONFIG_MMU_PAGE_SIZE);
329+
330+
if (IS_ENABLED(CONFIG_DCACHE)) {
331+
sys_cache_data_flush_and_invd_range((void *)mapped, CONFIG_MMU_PAGE_SIZE);
332+
}
333+
303334
for (i = 0; i < CONFIG_MMU_PAGE_SIZE; i++) {
304335
zassert_true(mapped[i] == '\xFF',
305336
"incorrect value 0x%hhx read at index %d",

0 commit comments

Comments
 (0)