From 7fbd29bb4dfdde8e4f42708f36e11daf16e811e7 Mon Sep 17 00:00:00 2001 From: Kendall Willis Date: Mon, 7 Aug 2023 14:11:11 -0500 Subject: [PATCH 1/3] CMakeLists: Add support for CMake to enable ASM Enabled ASM language in project to allow for board specific functions and interrupts in the TI K3 family devices. Signed-off-by: Kendall Willis --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8d65132de..5fe3fb7a6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,7 +16,7 @@ list (APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/platforms") include (syscheck) -project (open_amp C) +project (open_amp C ASM) include (CheckIncludeFiles) include (CheckCSourceCompiles) From ad93d42566e7730861e7e9586dbeb6188a5195f9 Mon Sep 17 00:00:00 2001 From: Kendall Willis Date: Mon, 7 Aug 2023 10:36:10 -0500 Subject: [PATCH 2/3] apps: machine: Add support for R5 in TI K3 family devices Adding baremetal support for the R5 to use RPMsg for the devices in the TI K3 family, specifically for the SK-AM64. Users should be able to replace memory addresses for the R5 in the linker_remote.ld file and platform_info.h files to make RPMsg compatible with the R5 in their chosen board. Cmake file was added to aid in building for the R5 in TI K3 family devices. Mailbox driver added to allow RPMsg to work with the TI K3 family devices. Other API files added to aid in interrupts and other board specific functions. IPI and no IPI is supported in the platform_info files to allow mailbox to kick RPMsg. Signed-off-by: Kendall Willis --- apps/machine/ti_k3_r5/CMakeLists.txt | 20 + apps/machine/ti_k3_r5/helper.c | 170 +++++ apps/machine/ti_k3_r5/helper.h | 30 + apps/machine/ti_k3_r5/linker_remote.ld | 177 +++++ apps/machine/ti_k3_r5/mailbox/cslr.h | 559 ++++++++++++++ apps/machine/ti_k3_r5/mailbox/cslr_mailbox.h | 691 ++++++++++++++++++ apps/machine/ti_k3_r5/mailbox/hw_mailbox.h | 333 +++++++++ apps/machine/ti_k3_r5/mailbox/hw_types.h | 85 +++ apps/machine/ti_k3_r5/mailbox/mailbox.c | 247 +++++++ apps/machine/ti_k3_r5/mailbox/mailbox.h | 367 ++++++++++ apps/machine/ti_k3_r5/platform_info.c | 390 ++++++++++ apps/machine/ti_k3_r5/platform_info.h | 135 ++++ apps/machine/ti_k3_r5/r5/CacheP_armv7r.c | 134 ++++ apps/machine/ti_k3_r5/r5/CacheP_armv7r_asm.S | 480 ++++++++++++ apps/machine/ti_k3_r5/r5/HwiP_armv7r_asm.S | 129 ++++ .../ti_k3_r5/r5/HwiP_armv7r_handlers_nortos.c | 197 +++++ .../r5/HwiP_armv7r_handlers_nortos_asm.S | 143 ++++ apps/machine/ti_k3_r5/r5/HwiP_armv7r_vim.c | 289 ++++++++ apps/machine/ti_k3_r5/r5/MpuP_armv7r.c | 210 ++++++ apps/machine/ti_k3_r5/r5/MpuP_armv7r_asm.S | 123 ++++ apps/machine/ti_k3_r5/r5/kernel/dpl/CacheP.h | 161 ++++ apps/machine/ti_k3_r5/r5/kernel/dpl/HwiP.h | 244 +++++++ .../ti_k3_r5/r5/kernel/dpl/HwiP_armv7r_vim.h | 242 ++++++ .../ti_k3_r5/r5/kernel/dpl/MpuP_armv7.h | 191 +++++ apps/machine/ti_k3_r5/r5/kernel/dpl/SystemP.h | 126 ++++ apps/machine/ti_k3_r5/rsc_table.c | 61 ++ apps/machine/ti_k3_r5/rsc_table.h | 56 ++ cmake/platforms/ti_k3_r5.cmake | 7 + 28 files changed, 5997 insertions(+) create mode 100644 apps/machine/ti_k3_r5/CMakeLists.txt create mode 100644 apps/machine/ti_k3_r5/helper.c create mode 100644 apps/machine/ti_k3_r5/helper.h create mode 100644 apps/machine/ti_k3_r5/linker_remote.ld create mode 100644 apps/machine/ti_k3_r5/mailbox/cslr.h create mode 100644 apps/machine/ti_k3_r5/mailbox/cslr_mailbox.h create mode 100644 apps/machine/ti_k3_r5/mailbox/hw_mailbox.h create mode 100644 apps/machine/ti_k3_r5/mailbox/hw_types.h create mode 100644 apps/machine/ti_k3_r5/mailbox/mailbox.c create mode 100644 apps/machine/ti_k3_r5/mailbox/mailbox.h create mode 100644 apps/machine/ti_k3_r5/platform_info.c create mode 100644 apps/machine/ti_k3_r5/platform_info.h create mode 100755 apps/machine/ti_k3_r5/r5/CacheP_armv7r.c create mode 100755 apps/machine/ti_k3_r5/r5/CacheP_armv7r_asm.S create mode 100755 apps/machine/ti_k3_r5/r5/HwiP_armv7r_asm.S create mode 100755 apps/machine/ti_k3_r5/r5/HwiP_armv7r_handlers_nortos.c create mode 100644 apps/machine/ti_k3_r5/r5/HwiP_armv7r_handlers_nortos_asm.S create mode 100644 apps/machine/ti_k3_r5/r5/HwiP_armv7r_vim.c create mode 100755 apps/machine/ti_k3_r5/r5/MpuP_armv7r.c create mode 100755 apps/machine/ti_k3_r5/r5/MpuP_armv7r_asm.S create mode 100644 apps/machine/ti_k3_r5/r5/kernel/dpl/CacheP.h create mode 100644 apps/machine/ti_k3_r5/r5/kernel/dpl/HwiP.h create mode 100755 apps/machine/ti_k3_r5/r5/kernel/dpl/HwiP_armv7r_vim.h create mode 100644 apps/machine/ti_k3_r5/r5/kernel/dpl/MpuP_armv7.h create mode 100644 apps/machine/ti_k3_r5/r5/kernel/dpl/SystemP.h create mode 100644 apps/machine/ti_k3_r5/rsc_table.c create mode 100644 apps/machine/ti_k3_r5/rsc_table.h create mode 100644 cmake/platforms/ti_k3_r5.cmake diff --git a/apps/machine/ti_k3_r5/CMakeLists.txt b/apps/machine/ti_k3_r5/CMakeLists.txt new file mode 100644 index 000000000..febc2f7f3 --- /dev/null +++ b/apps/machine/ti_k3_r5/CMakeLists.txt @@ -0,0 +1,20 @@ +collect (APP_COMMON_SOURCES platform_info.c) +collect (APP_COMMON_SOURCES rsc_table.c) +collect (APP_COMMON_SOURCES helper.c) +collect (APP_COMMON_SOURCES mailbox/mailbox.c) +collect (APP_COMMON_SOURCES r5/CacheP_armv7r_asm.S) +collect (APP_COMMON_SOURCES r5/CacheP_armv7r.c) +collect (APP_COMMON_SOURCES r5/HwiP_armv7r_asm.S) +collect (APP_COMMON_SOURCES r5/HwiP_armv7r_handlers_nortos_asm.S) +collect (APP_COMMON_SOURCES r5/HwiP_armv7r_handlers_nortos.c) +collect (APP_COMMON_SOURCES r5/HwiP_armv7r_vim.c) +collect (APP_COMMON_SOURCES r5/MpuP_armv7r_asm.S) +collect (APP_COMMON_SOURCES r5/MpuP_armv7r.c) + +collect (APP_INC_DIRS "${CMAKE_CURRENT_SOURCE_DIR}") +collect (APP_INC_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/r5") +collect (APP_INC_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/r5/kernel/dpl") +collect (APP_INC_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/mailbox") + +set (_linker_script "${CMAKE_CURRENT_SOURCE_DIR}/linker_remote.ld") +set_property (GLOBAL PROPERTY APP_LINKER_OPT "-T\"${_linker_script}\" --specs=nosys.specs --specs=nano.specs") diff --git a/apps/machine/ti_k3_r5/helper.c b/apps/machine/ti_k3_r5/helper.c new file mode 100644 index 000000000..761823649 --- /dev/null +++ b/apps/machine/ti_k3_r5/helper.c @@ -0,0 +1,170 @@ +/* + * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/ + * Andrew Davis + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "helper.h" +#include "platform_info.h" + +#include "r5/kernel/dpl/HwiP.h" +#include "r5/kernel/dpl/CacheP.h" +#include "r5/kernel/dpl/MpuP_armv7.h" + +/* Place debug trace buffer in special ELF section */ +#define __section_t(S) __attribute__((__section__(#S))) +#define __log_shared __section_t(.log_shared_mem) + +/* ----------- HwiP ----------- */ +#ifndef RPMSG_NO_IPI +HwiP_Config gHwiConfig = { + .intcBaseAddr = INT_BASE_ADDR, +}; +#endif + +// global structures used by MPU and cache init code +CacheP_Config gCacheConfig = { 1, 0 }; // cache on, no forced writethrough +MpuP_Config gMpuConfig = { 3, 1, 1 }; // 2 regions, background region on, MPU on +MpuP_RegionConfig gMpuRegionConfig[] = +{ + // DDR region + { + .baseAddr = DDR_BASE_ADDR, + .size = MpuP_RegionSize_2G, + .attrs = { + .isEnable = 1, + .isCacheable = 1, + .isBufferable = 1, + .isSharable = 0, + .isExecuteNever = 0, + .tex = 7, + .accessPerm = MpuP_AP_ALL_RW, + .subregionDisableMask = 0x0u, + }, + }, + // rpmsg region + { + .baseAddr = RPMSG_BASE_ADDR, + .size = MpuP_RegionSize_1M, + .attrs = { + .isEnable = 1, + .isCacheable = 0, + .isBufferable = 0, + .isSharable = 1, + .isExecuteNever = 1, + .tex = 1, + .accessPerm = MpuP_AP_ALL_RW, + .subregionDisableMask = 0x0u, + }, + }, + + // resource table region + { + .baseAddr = RSC_TABLE_BASE_ADDR, + .size = MpuP_RegionSize_4K, + .attrs = { + .isEnable = 1, + .isCacheable = 0, + .isBufferable = 0, + .isSharable = 1, + .isExecuteNever = 1, + .tex = 1, + .accessPerm = MpuP_AP_ALL_RW, + .subregionDisableMask = 0x0u, + }, + }, +}; + +// NOTE: R5FSS defaults to ARM at reset so these must all be ARM instead of Thumb + +void Reset_Handler(void) __attribute__((naked, section(".boot.reset"), target("arm"))); +void Default_Handler(void) __attribute__((naked, section(".boot.handler"), target("arm"))); + +void Undef_Handler(void) __attribute__((weak, alias("Default_Handler"))); +void SVC_Handler(void) __attribute__((weak, alias("Default_Handler"))); +void PAbt_Handler(void) __attribute__((weak, alias("Default_Handler"))); +void DAbt_Handler(void) __attribute__((weak, alias("Default_Handler"))); +void IRQ_Handler(void) __attribute__((weak, alias("Default_Handler"))); +void FIQ_Handler(void) __attribute__((weak, alias("Default_Handler"))); + +__attribute__((naked, section(".isr_vector"), target("arm"))) void vectors() +{ + asm volatile( + "LDR PC, =Reset_Handler \n" + "LDR PC, =Undef_Handler \n" + "LDR PC, =SVC_Handler \n" + "LDR PC, =PAbt_Handler \n" + "LDR PC, =DAbt_Handler \n" + "NOP \n" + "LDR PC, =IRQ_Handler \n" + "LDR PC, =FIQ_Handler \n"); +} + +// newlib startup code +extern void _start(); + +void Reset_Handler() +{ + asm volatile( + // initialize stack + "ldr sp, =__stack \n" + + // disable interrupts + "mrs r0, cpsr \n" + "orr r0, r0, #0xc0 \n" + "msr cpsr_cf, r0 \n"); + + // must initialize MPU if code is on external memory + MpuP_init(); + CacheP_init(); + + _start(); +} + +void Default_Handler() +{ + while (1) + ; +} + +char __log_shared debug_log_memory[DEBUG_LOG_SIZE]; + +extern void CacheP_wb(void *addr, uint32_t size, uint32_t type); + +// retarget stdout to remoteproc trace buffer +int _write(int handle, char *data, int size) +{ + static size_t idx = 0; + int count; + + metal_unused(handle); + + for (count = 0; count < size; count++) { + if (idx > DEBUG_LOG_SIZE) + idx = 0; + + debug_log_memory[idx++] = data[count]; + } + + /* null terminate end of trace buffer */ + if (idx > DEBUG_LOG_SIZE) + idx = 0; + debug_log_memory[idx] = '\0'; + + CacheP_wb(debug_log_memory, DEBUG_LOG_SIZE, 0); + + return count; +} diff --git a/apps/machine/ti_k3_r5/helper.h b/apps/machine/ti_k3_r5/helper.h new file mode 100644 index 000000000..4d57d0800 --- /dev/null +++ b/apps/machine/ti_k3_r5/helper.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/ + * Andrew Davis + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef HELPER_H_ +#define HELPER_H_ + +#include +#include +#include "platform_info.h" + +#if defined __cplusplus +extern "C" { +#endif + +/** + * \brief size of memory log for a CPU + */ +#define DEBUG_LOG_SIZE ( 4*1024U ) + +extern char debug_log_memory[DEBUG_LOG_SIZE]; + +#if defined __cplusplus +} +#endif + +#endif /* HELPER_H_ */ \ No newline at end of file diff --git a/apps/machine/ti_k3_r5/linker_remote.ld b/apps/machine/ti_k3_r5/linker_remote.ld new file mode 100644 index 000000000..f6a4e9785 --- /dev/null +++ b/apps/machine/ti_k3_r5/linker_remote.ld @@ -0,0 +1,177 @@ +__DDR_START__ = 0xA2000000; + +MEMORY +{ + TCMA (rwx) : ORIGIN = 0, LENGTH = 0x8000 + DDR_0 (rwx) : ORIGIN = __DDR_START__ + 0x100000, LENGTH = 0x1000 + DDR_1 (rwx) : ORIGIN = __DDR_START__ + 0x101000, LENGTH = 0xEFF000 +} + +SECTIONS +{ + .boot : + { + KEEP(*(.isr_vector)) + *(.boot*) + *(.text.mpu*) + *(.text.cache*) + *(.text.boot*) + *(.text.hwi*) + } > TCMA + + .text : + { + *(.text*) + + KEEP(*(.init)) + KEEP(*(.fini)) + + /* .ctors */ + *crtbegin.o(.ctors) + *crtbegin?.o(.ctors) + *(EXCLUDE_FILE(*crtend?.o *crtend.o) .ctors) + *(SORT(.ctors.*)) + *(.ctors) + + /* .dtors */ + *crtbegin.o(.dtors) + *crtbegin?.o(.dtors) + *(EXCLUDE_FILE(*crtend?.o *crtend.o) .dtors) + *(SORT(.dtors.*)) + *(.dtors) + + *(.rodata*) + + KEEP(*(.eh_frame*)) + } > DDR_1 + + .ARM.extab : + { + *(.ARM.extab* .gnu.linkonce.armextab.*) + } > DDR_1 + + __exidx_start = .; + .ARM.exidx : + { + *(.ARM.exidx* .gnu.linkonce.armexidx.*) + } > DDR_1 + __exidx_end = .; + + /* To copy multiple ROM to RAM sections, + * uncomment .copy.table section and, + * define __STARTUP_COPY_MULTIPLE in startup_ARMCMx.S */ + /* + .copy.table : + { + . = ALIGN(4); + __copy_table_start__ = .; + LONG (__etext) + LONG (__data_start__) + LONG (__data_end__ - __data_start__) + LONG (__etext2) + LONG (__data2_start__) + LONG (__data2_end__ - __data2_start__) + __copy_table_end__ = .; + } > FLASH + */ + + /* To clear multiple BSS sections, + * uncomment .zero.table section and, + * define __STARTUP_CLEAR_BSS_MULTIPLE in startup_ARMCMx.S */ + /* + .zero.table : + { + . = ALIGN(4); + __zero_table_start__ = .; + LONG (__bss_start__) + LONG (__bss_end__ - __bss_start__) + LONG (__bss2_start__) + LONG (__bss2_end__ - __bss2_start__) + __zero_table_end__ = .; + } > FLASH + */ + + /* Location counter can end up 2byte aligned with narrow Thumb code but + __etext is assumed by startup code to be the LMA of a section in RAM + which must be 4byte aligned */ + __etext = ALIGN (4); + + .data : AT (__etext) + { + __data_start__ = .; + *(vtable) + *(.data*) + + . = ALIGN(4); + /* preinit data */ + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP(*(.preinit_array)) + PROVIDE_HIDDEN (__preinit_array_end = .); + + . = ALIGN(4); + /* init data */ + PROVIDE_HIDDEN (__init_array_start = .); + KEEP(*(SORT(.init_array.*))) + KEEP(*(.init_array)) + PROVIDE_HIDDEN (__init_array_end = .); + + + . = ALIGN(4); + /* finit data */ + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP(*(SORT(.fini_array.*))) + KEEP(*(.fini_array)) + PROVIDE_HIDDEN (__fini_array_end = .); + + KEEP(*(.jcr*)) + . = ALIGN(4); + /* All data end */ + __data_end__ = .; + + } > DDR_1 + + .log_shared_mem : + { + *(.log_shared_mem*) + } > DDR_1 + + .resource_table : + { + *(.resource_table*) + } > DDR_0 + + .bss : + { + . = ALIGN(4); + __bss_start__ = .; + *(.bss*) + *(COMMON) + . = ALIGN(4); + __bss_end__ = .; + } > DDR_1 + + .heap (COPY): + { + __end__ = .; + PROVIDE(end = .); + *(.heap*) + __HeapLimit = .; + } > DDR_1 + + /* .stack_dummy section doesn't contains any symbols. It is only + * used for linker to calculate size of stack sections, and assign + * values to stack symbols later */ + .stack_dummy (COPY): + { + *(.stack*) + } > DDR_1 + + /* Set stack top to end of RAM, and stack limit move down by + * size of stack_dummy section */ + __StackTop = ORIGIN(DDR_1) + LENGTH(DDR_1); + __StackLimit = __StackTop - SIZEOF(.stack_dummy); + PROVIDE(__stack = __StackTop); + + /* Check if data + heap + stack exceeds RAM limit */ + ASSERT(__StackLimit >= __HeapLimit, "region TCMA overflowed with stack") +} \ No newline at end of file diff --git a/apps/machine/ti_k3_r5/mailbox/cslr.h b/apps/machine/ti_k3_r5/mailbox/cslr.h new file mode 100644 index 000000000..baa9b66c5 --- /dev/null +++ b/apps/machine/ti_k3_r5/mailbox/cslr.h @@ -0,0 +1,559 @@ +/* ============================================================================ + * Copyright (c) Texas Instruments Incorporated 2002-2019 + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * +*/ +/** ============================================================================ + * \file cslr.h + * + * \brief This file contains the macro definations for Register layer + * + */ + +/* Register layer central -- contains field-manipulation macro definitions */ + +#ifndef CSLR_H +#define CSLR_H + +//#include +//#include + +//#if defined (SOC_AM65XX) || defined (SOC_J721E) || defined (SOC_J721S2) || defined (SOC_J784S4) || defined (SOC_J7200) || defined (SOC_AM64X) || defined (SOC_J721S2) || defined (SOC_AM62X) || defined (SOC_AM62A) +//#include +//#endif + +/* the "expression" macros */ + +/* the Field MaKe macro */ +#define CSL_FMK(PER_REG_FIELD, val) \ + (((val) << CSL_##PER_REG_FIELD##_SHIFT) & CSL_##PER_REG_FIELD##_MASK) + +/* the Field EXTract macro */ +#define CSL_FEXT(reg, PER_REG_FIELD) \ + (((reg) & CSL_##PER_REG_FIELD##_MASK) >> CSL_##PER_REG_FIELD##_SHIFT) + +/* the Field INSert macro */ +#define CSL_FINS(reg, PER_REG_FIELD, val) \ + ((reg) = ((reg) & ~CSL_##PER_REG_FIELD##_MASK) \ + | (((val) << CSL_##PER_REG_FIELD##_SHIFT) & CSL_##PER_REG_FIELD##_MASK)) + + +/* the "token" macros */ + +/* the Field MaKe (Token) macro */ +#define CSL_FMKT(PER_REG_FIELD, TOKEN) \ + (((CSL_##PER_REG_FIELD##_##TOKEN) << CSL_##PER_REG_FIELD##_SHIFT) & CSL_##PER_REG_FIELD##_MASK) + +/* the Field INSert (Token) macro */ +#define CSL_FINST(reg, PER_REG_FIELD, TOKEN) \ + ((reg) = ((reg) & ~CSL_##PER_REG_FIELD##_MASK) \ + | (((CSL_##PER_REG_FIELD##_##TOKEN) << CSL_##PER_REG_FIELD##_SHIFT) & CSL_##PER_REG_FIELD##_MASK)) + + +/* the "raw" macros */ +#define kw_val (1u) \ + +/* the Field MaKe (Raw) macro */ +#define CSL_FMKR(msb, lsb, val) \ + (((val) & ((((uint32_t)1U) << ((msb) - (lsb) + ((uint32_t)1U))) - ((uint32_t)1U))) << (lsb)) + +/* the Field EXTract (Raw) macro */ +#define CSL_FEXTR(reg, msb, lsb) \ + (((reg) >> (lsb)) & ((((uint32_t)1U) << ((msb) - (lsb) + ((uint32_t)1U))) - ((uint32_t)1U))) + +/* the Field INSert (Raw) macro */ +#define CSL_FINSR(reg, msb, lsb, val) \ + ((reg) = ((reg) & (~(((uint32_t)((((uint32_t)1U) << ((msb) - (lsb) + ((uint32_t)1U))) - ((uint32_t)1U))) << (lsb)))) \ + | CSL_FMKR((msb), (lsb), (val))) + +/** + * \brief This macro reads a pointer from a provided address + * and returns the value. The access type is implicit based on + * the pointer declaration. + * + * \param p Pointer to the memory or hardware register. + * + * \return Value read from the pointer. + */ +#define CSL_REG_RD(p) (*(p)) + +/** + * \brief This macro writes a value to the pointer. The access type is + * implicit based on the pointer declaration. + * + * \param p Pointer to the memory or hardware register. + * \param v Value which has to be written to the pointer. + */ +#define CSL_REG_WR(p, v) (*(p) = (v)) + +/** + * \brief This macro reads a 32-bit value from a hardware register + * and returns the value. + * + * \param p Address of the memory mapped hardware register. + * + * \return Unsigned 32-bit value read from a register. + */ +#define CSL_REG32_RD(p) (CSL_REG32_RD_RAW((volatile uint32_t *) (p))) + +/** + * \brief This macro writes a 32-bit value to a hardware register. + * + * \param p Address of the memory mapped hardware register. + * \param v Unsigned 32-bit value which has to be written to the + * register. + */ +#define CSL_REG32_WR(p, v) (CSL_REG32_WR_RAW( \ + (volatile uint32_t *) (p), (uint32_t) (v))) + +/** + * \brief This macro reads a 16-bit value from a hardware register + * and returns the value. + * + * \param p Address of the memory mapped hardware register. + * + * \return Unsigned 16-bit value read from a register. + */ +#define CSL_REG16_RD(p) (CSL_REG16_RD_RAW((volatile uint16_t *) (p))) + +/** + * \brief This macro writes a 16-bit value to a hardware register. + * + * \param p Address of the memory mapped hardware register. + * \param v Unsigned 16-bit value which has to be written to the + * register. + */ +#define CSL_REG16_WR(p, v) (CSL_REG16_WR_RAW( \ + (volatile uint16_t *) (p), (uint16_t) (v))) + +/** + * \brief This macro reads a 8-bit value from a hardware register + * and returns the value. + * + * \param p Address of the memory mapped hardware register. + * + * \return Unsigned 8-bit value read from a register. + */ +#define CSL_REG8_RD(p) (CSL_REG8_RD_RAW((volatile uint8_t *) (p))) + +/** + * \brief This macro writes a 8-bit value to a hardware register. + * + * \param p Address of the memory mapped hardware register. + * \param v Unsigned 8-bit value which has to be written to the + * register. + */ +#define CSL_REG8_WR(p, v) (CSL_REG8_WR_RAW( \ + (volatile uint8_t *) (p), (uint8_t) (v))) + +/** + * \brief This macro reads a 32-bit value from a hardware register + * with an offset and returns the value. + * + * \param p Address of the memory mapped hardware register. + * \param off Offset in bytes. + * + * \return Unsigned 32-bit value read from a register. + */ +#define CSL_REG32_RD_OFF(p, off) (CSL_REG32_RD_OFF_RAW( \ + (volatile uint32_t *) (p), \ + (uint32_t) (off))) + +/** + * \brief This macro writes a 32-bit value to a hardware register with + * an offset. + * + * \param p Address of the memory mapped hardware register. + * \param off Offset in bytes. + * \param v Unsigned 32-bit value which has to be written to the + * register. + */ +#define CSL_REG32_WR_OFF(p, off, v) (CSL_REG32_WR_OFF_RAW( \ + (volatile uint32_t *) (p), \ + (uint32_t) (off), \ + (uint32_t) (v))) + +/** + * \brief This macro calls read field API for 32 bit register. It also + * frames the mask and shift from register field macro. + * + * \param p Address of the memory mapped hardware register. + * \param fld Peripheral register bit field name, from which + * specified bit-field value has to be read. + * + * \return Value of the bit-field (absolute value - shifted to LSB position) + */ +#define CSL_REG32_FEXT(p, fld) (CSL_REG32_FEXT_RAW( \ + ((volatile uint32_t *) (p)), \ + ((uint32_t) CSL_##fld##_MASK), \ + ((uint32_t) CSL_##fld##_SHIFT))) + +/** + * \brief This macro calls read field API for 16 bit register. It also + * frames the mask and shift from register field macro. + * + * \param p Address of the memory mapped hardware register. + * \param fld Peripheral register bit field name, from which + * specified bit-field value has to be read. + * + * \return Value of the bit-field (absolute value - shifted to LSB position) + */ +#define CSL_REG16_FEXT(p, fld) (CSL_REG16_FEXT_RAW( \ + ((volatile uint16_t *) (p)), \ + ((uint16_t) CSL_##fld##_MASK), \ + ((uint32_t) CSL_##fld##_SHIFT))) + +/** + * \brief This macro calls read field API for 8 bit register. It also + * frames the mask and shift from register field macro. + * + * \param p Address of the memory mapped hardware register. + * \param fld Peripheral register bit field name, from which + * specified bit-field value has to be read. + * + * \return Value of the bit-field (absolute value - shifted to LSB position) + */ +#define CSL_REG8_FEXT(p, fld) (CSL_REG8_FEXT_RAW( \ + ((volatile uint8_t *) (p)), \ + ((uint8_t) CSL_##fld##_MASK), \ + ((uint32_t) CSL_##fld##_SHIFT))) + +/** + * \brief This macro calls read-modify-write API for 32 bit register. It also + * frames the mask and shift from register field macro. + * + * \param p Address of the memory mapped hardware register. + * \param fld Peripheral register bit field name, from which + * specified bit-field value has to be set. + * \param v Value of the field which has to be set. + */ +#define CSL_REG32_FINS(p, fld, v) (CSL_REG32_FINS_RAW( \ + ((volatile uint32_t *) (p)), \ + ((uint32_t) CSL_##fld##_MASK), \ + ((uint32_t) CSL_##fld##_SHIFT), \ + ((uint32_t) v))) + +/** + * \brief This macro calls read-modify-write API for 16 bit register. It also + * frames the mask and shift from register field macro. + * + * \param p Address of the memory mapped hardware register. + * \param fld Peripheral register bit field name, from which + * specified bit-field value has to be set. + * \param v Value of the field which has to be set. + */ +#define CSL_REG16_FINS(p, fld, v) (CSL_REG16_FINS_RAW( \ + ((volatile uint16_t *) (p)), \ + ((uint16_t) CSL_##fld##_MASK), \ + ((uint32_t) CSL_##fld##_SHIFT), \ + ((uint16_t) v))) + +/** + * \brief This macro calls read-modify-write API for 8 bit register. It also + * frames the mask and shift from register field macro. + * + * \param p Address of the memory mapped hardware register. + * \param fld Peripheral register bit field name, from which + * specified bit-field value has to be set. + * \param v Value of the field which has to be set. + */ +#define CSL_REG8_FINS(p, fld, v) (CSL_REG8_FINS_RAW( \ + ((volatile uint8_t *) (p)), \ + ((uint8_t) CSL_##fld##_MASK), \ + ((uint32_t) CSL_##fld##_SHIFT), \ + ((uint8_t) v))) + +/* ========================================================================== */ +/* Function Declarations */ +/* ========================================================================== */ + +/** + * \brief This function reads a 32-bit value from a hardware register + * and returns the value. + * + * \param p Address of the memory mapped hardware register. + * + * \return Unsigned 32-bit value read from a register. + */ +static inline uint32_t CSL_REG32_RD_RAW(volatile const uint32_t * const p); +static inline uint32_t CSL_REG32_RD_RAW(volatile const uint32_t * const p) +{ + return (*p); +} + +/** + * \brief This function writes a 32-bit value to a hardware register. + * + * \param p Address of the memory mapped hardware register. + * \param v Unsigned 32-bit value which has to be written to the + * register. + */ +static inline void CSL_REG32_WR_RAW(volatile uint32_t * const p, uint32_t v); +static inline void CSL_REG32_WR_RAW(volatile uint32_t * const p, uint32_t v) +{ + *p = v; + return; +} + +/** + * \brief This function reads a 16-bit value from a hardware register + * and returns the value. + * + * \param p Address of the memory mapped hardware register. + * + * \return Unsigned 16-bit value read from a register. + */ +static inline uint16_t CSL_REG16_RD_RAW(volatile const uint16_t * const p); +static inline uint16_t CSL_REG16_RD_RAW(volatile const uint16_t * const p) +{ + return (*p); +} + +/** + * \brief This function writes a 16-bit value to a hardware register. + * + * \param p Address of the memory mapped hardware register. + * \param v Unsigned 16-bit value which has to be written to the + * register. + */ +static inline void CSL_REG16_WR_RAW(volatile uint16_t * const p, uint16_t v); +static inline void CSL_REG16_WR_RAW(volatile uint16_t * const p, uint16_t v) +{ + *p = v; + return; +} + +/** + * \brief This function reads a 8-bit value from a hardware register + * and returns the value. + * + * \param p Address of the memory mapped hardware register. + * + * \return Unsigned 8-bit value read from a register. + */ +static inline uint8_t CSL_REG8_RD_RAW(volatile const uint8_t * const p); +static inline uint8_t CSL_REG8_RD_RAW(volatile const uint8_t * const p) +{ + return (*p); +} + +/** + * \brief This function writes a 8-bit value to a hardware register. + * + * \param p Address of the memory mapped hardware register. + * \param v Unsigned 8-bit value which has to be written to the + * register. + */ +static inline void CSL_REG8_WR_RAW(volatile uint8_t * const p, uint8_t v); +static inline void CSL_REG8_WR_RAW(volatile uint8_t * const p, uint8_t v) +{ + *p = v; + return; +} + +/** + * \brief This function reads a 32-bit value from a hardware register + * with an offset and returns the value. + * + * \param p Address of the memory mapped hardware register. + * \param off Offset in bytes. + * + * \return Unsigned 32-bit value read from a register. + */ +static inline uint32_t CSL_REG32_RD_OFF_RAW(volatile const uint32_t *p, uint32_t off); +static inline uint32_t CSL_REG32_RD_OFF_RAW(volatile const uint32_t *p, uint32_t off) +{ + uintptr_t pOff = ((uintptr_t) p) + off; + return (*(volatile const uint32_t *)(pOff)); +} + +/** + * \brief This function writes a 32-bit value to a hardware register with + * an offset. + * + * \param p Address of the memory mapped hardware register. + * \param off Offset in bytes. + * \param v Unsigned 32-bit value which has to be written to the + * register. + */ +static inline void CSL_REG32_WR_OFF_RAW(volatile uint32_t *const p, + uint32_t off, + uint32_t v); +static inline void CSL_REG32_WR_OFF_RAW(volatile uint32_t *const p, + uint32_t off, + uint32_t v) +{ + uintptr_t pOff = ((uintptr_t) p) + off; + (*(volatile uint32_t *)(pOff)) = (v); + return; +} + +/** + * \brief This function reads a 32 bit register, masks specific set of bits + * and returns the left shifted value. + * + * \param p Address of the memory mapped hardware register. + * \param mask Mask for the bit field. + * \param shift Bit field shift from LSB. + * + * \return Bit-field value (absolute value - shifted to LSB position) + */ +static inline uint32_t CSL_REG32_FEXT_RAW(volatile const uint32_t * const p, + uint32_t mask, + uint32_t shift); +static inline uint32_t CSL_REG32_FEXT_RAW(volatile const uint32_t * const p, + uint32_t mask, + uint32_t shift) +{ + uint32_t regVal = CSL_REG32_RD_RAW(p); + regVal = (regVal & mask) >> shift; + return (regVal); +} + +/** + * \brief This function reads a 16 bit register, masks specific set of bits + * and returns the left shifted value. + * + * \param p Address of the memory mapped hardware register. + * \param mask Mask for the bit field. + * \param shift Bit field shift from LSB. + * + * \return Bit-field value (absolute value - shifted to LSB position) + */ +static inline uint16_t CSL_REG16_FEXT_RAW(volatile const uint16_t * const p, + uint16_t mask, + uint32_t shift); +static inline uint16_t CSL_REG16_FEXT_RAW(volatile const uint16_t * const p, + uint16_t mask, + uint32_t shift) +{ + uint16_t regVal = CSL_REG16_RD_RAW(p); + regVal = (regVal & mask) >> shift; + return (regVal); +} + +/** + * \brief This function reads a 8 bit register, masks specific set of bits + * and returns the left shifted value. + * + * \param p Address of the memory mapped hardware register. + * \param mask Mask for the bit field. + * \param shift Bit field shift from LSB. + * + * \return Bit-field value (absolute value - shifted to LSB position) + */ +static inline uint8_t CSL_REG8_FEXT_RAW(volatile const uint8_t * const p, + uint8_t mask, + uint32_t shift); +static inline uint8_t CSL_REG8_FEXT_RAW(volatile const uint8_t * const p, + uint8_t mask, + uint32_t shift) +{ + uint8_t regVal = CSL_REG8_RD_RAW(p); + regVal = (regVal & mask) >> shift; + return (regVal); +} + +/** + * \brief This function reads a 32 bit register, modifies specific set of + * bits and writes back to the register. + * + * \param p Address of the memory mapped hardware register. + * \param mask Mask for the bit field. + * \param shift Bit field shift from LSB. + * \param v Value to be written to bit-field. + */ +static inline void CSL_REG32_FINS_RAW(volatile uint32_t * const p, + uint32_t mask, + uint32_t shift, + uint32_t v); +static inline void CSL_REG32_FINS_RAW(volatile uint32_t * const p, + uint32_t mask, + uint32_t shift, + uint32_t v) +{ + uint32_t regVal = CSL_REG32_RD_RAW(p); + regVal = (regVal & ~(mask)); + regVal |= (v << shift) & mask; + CSL_REG32_WR_RAW(p, regVal); + return; +} + +/** + * \brief This function reads a 16 bit register, modifies specific set of + * bits and writes back to the register. + * + * \param p Address of the memory mapped hardware register. + * \param mask Mask for the bit field. + * \param shift Bit field shift from LSB. + * \param v Value to be written to bit-field. + */ +static inline void CSL_REG16_FINS_RAW(volatile uint16_t * const p, + uint16_t mask, + uint32_t shift, + uint16_t v); +static inline void CSL_REG16_FINS_RAW(volatile uint16_t * const p, + uint16_t mask, + uint32_t shift, + uint16_t v) +{ + uint16_t regVal = CSL_REG16_RD_RAW(p); + regVal = (regVal & ~(mask)); + regVal |= (v << shift) & mask; + CSL_REG16_WR_RAW(p, regVal); + return; +} + +/** + * \brief This function reads a 8 bit register, modifies specific set of + * bits and writes back to the register. + * + * \param p Address of the memory mapped hardware register. + * \param mask Mask for the bit field. + * \param shift Bit field shift from LSB. + * \param v Value to be written to bit-field. + */ +static inline void CSL_REG8_FINS_RAW(volatile uint8_t * const p, + uint8_t mask, + uint32_t shift, + uint8_t v); +static inline void CSL_REG8_FINS_RAW(volatile uint8_t * const p, + uint8_t mask, + uint32_t shift, + uint8_t v) +{ + uint8_t regVal = CSL_REG8_RD_RAW(p); + regVal = (regVal & ~(mask)); + regVal |= (v << shift) & mask; + CSL_REG8_WR_RAW(p, regVal); + return; +} + +#endif /* CSLR_H_ */ diff --git a/apps/machine/ti_k3_r5/mailbox/cslr_mailbox.h b/apps/machine/ti_k3_r5/mailbox/cslr_mailbox.h new file mode 100644 index 000000000..3268a3a95 --- /dev/null +++ b/apps/machine/ti_k3_r5/mailbox/cslr_mailbox.h @@ -0,0 +1,691 @@ +/******************************************************************** + * Copyright (C) 2017 Texas Instruments Incorporated. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Name : cslr_mailbox.h +*/ +#ifndef CSLR_MAILBOX_H_ +#define CSLR_MAILBOX_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif + +#include + +#include "cslr.h" + +/************************************************************************** +* Hardware Region : +**************************************************************************/ + + +/************************************************************************** +* Register Overlay Structure +**************************************************************************/ + +typedef struct { + volatile uint32_t IRQ_STATUS_RAW; /* Raw status for each event for user [a] */ + volatile uint32_t IRQ_STATUS_CLR; /* Masked status for each event for user [a] */ + volatile uint32_t IRQ_ENABLE_SET; /* Set interrupt enables. */ + volatile uint32_t IRQ_ENABLE_CLR; /* Clear interrupt enables. */ +} CSL_mailboxRegs_user; + + +typedef struct { + volatile uint32_t REVISION; /* Peripheral ID register */ + volatile uint8_t Resv_16[12]; + volatile uint32_t SYSCONFIG; /* Mailbox control register */ + volatile uint8_t Resv_64[44]; + volatile uint32_t MESSAGE[16]; /* Message Mailbox */ + volatile uint32_t FIFO_STATUS[16]; /* FIFO status for Mailbox[a] */ + volatile uint32_t MSG_STATUS[16]; /* Number of messages in Mailbox[a] */ + CSL_mailboxRegs_user USER[4]; + volatile uint32_t IRQ_EOI; /* EOI register */ +} CSL_mailboxRegs; + + +/************************************************************************** +* Register Macros +**************************************************************************/ + +#define CSL_MAILBOX_REVISION (0x00000000U) +#define CSL_MAILBOX_SYSCONFIG (0x00000010U) +#define CSL_MAILBOX_MESSAGE(MESSAGE) (0x00000040U+((uintptr_t)(MESSAGE)*0x4U)) +#define CSL_MAILBOX_FIFO_STATUS(FIFO_STATUS) (0x00000080U+((uintptr_t)(FIFO_STATUS)*0x4U)) +#define CSL_MAILBOX_MSG_STATUS(MSG_STATUS) (0x000000C0U+((uintptr_t)(MSG_STATUS)*0x4U)) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW(USER) (0x00000100U+((uintptr_t)(USER)*0x10U)) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR(USER) (0x00000104U+((uintptr_t)(USER)*0x10U)) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET(USER) (0x00000108U+((uintptr_t)(USER)*0x10U)) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR(USER) (0x0000010CU+((uintptr_t)(USER)*0x10U)) +#define CSL_MAILBOX_IRQ_EOI (0x00000140U) + +/************************************************************************** +* Field Definition Macros +**************************************************************************/ + + +/* IRQ_STATUS_RAW */ + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB0_MASK (0x00000001U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB0_SHIFT (0x00000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB0_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB0_MASK (0x00000002U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB0_SHIFT (0x00000001U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB0_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB1_MASK (0x00000004U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB1_SHIFT (0x00000002U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB1_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB1_MASK (0x00000008U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB1_SHIFT (0x00000003U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB1_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB2_MASK (0x00000010U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB2_SHIFT (0x00000004U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB2_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB2_MASK (0x00000020U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB2_SHIFT (0x00000005U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB2_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB3_MASK (0x00000040U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB3_SHIFT (0x00000006U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB3_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB3_MASK (0x00000080U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB3_SHIFT (0x00000007U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB3_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB4_MASK (0x00000100U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB4_SHIFT (0x00000008U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB4_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB4_MASK (0x00000200U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB4_SHIFT (0x00000009U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB4_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB5_MASK (0x00000400U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB5_SHIFT (0x0000000AU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB5_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB5_MASK (0x00000800U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB5_SHIFT (0x0000000BU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB5_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB6_MASK (0x00001000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB6_SHIFT (0x0000000CU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB6_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB6_MASK (0x00002000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB6_SHIFT (0x0000000DU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB6_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB7_MASK (0x00004000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB7_SHIFT (0x0000000EU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB7_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB7_MASK (0x00008000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB7_SHIFT (0x0000000FU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB7_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB8_MASK (0x00010000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB8_SHIFT (0x00000010U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB8_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB8_MASK (0x00020000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB8_SHIFT (0x00000011U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB8_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB9_MASK (0x00040000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB9_SHIFT (0x00000012U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB9_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB9_MASK (0x00080000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB9_SHIFT (0x00000013U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB9_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB10_MASK (0x00100000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB10_SHIFT (0x00000014U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB10_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB10_MASK (0x00200000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB10_SHIFT (0x00000015U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB10_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB11_MASK (0x00400000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB11_SHIFT (0x00000016U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB11_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB11_MASK (0x00800000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB11_SHIFT (0x00000017U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB11_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB12_MASK (0x01000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB12_SHIFT (0x00000018U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB12_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB12_MASK (0x02000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB12_SHIFT (0x00000019U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB12_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB13_MASK (0x04000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB13_SHIFT (0x0000001AU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB13_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB13_MASK (0x08000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB13_SHIFT (0x0000001BU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB13_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB14_MASK (0x10000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB14_SHIFT (0x0000001CU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB14_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB14_MASK (0x20000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB14_SHIFT (0x0000001DU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB14_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB15_MASK (0x40000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB15_SHIFT (0x0000001EU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NEWMSGSTATUSMB15_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB15_MASK (0x80000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB15_SHIFT (0x0000001FU) +#define CSL_MAILBOX_USER_IRQ_STATUS_RAW_NOTFULLSTATUSMB15_MAX (0x00000001U) + +/* IRQ_STATUS_CLR */ + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB0_MASK (0x00000001U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB0_SHIFT (0x00000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB0_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB0_MASK (0x00000002U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB0_SHIFT (0x00000001U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB0_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB1_MASK (0x00000004U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB1_SHIFT (0x00000002U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB1_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB1_MASK (0x00000008U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB1_SHIFT (0x00000003U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB1_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB2_MASK (0x00000010U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB2_SHIFT (0x00000004U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB2_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB2_MASK (0x00000020U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB2_SHIFT (0x00000005U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB2_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB3_MASK (0x00000040U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB3_SHIFT (0x00000006U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB3_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB3_MASK (0x00000080U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB3_SHIFT (0x00000007U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB3_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB4_MASK (0x00000100U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB4_SHIFT (0x00000008U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB4_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB4_MASK (0x00000200U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB4_SHIFT (0x00000009U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB4_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB5_MASK (0x00000400U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB5_SHIFT (0x0000000AU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB5_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB5_MASK (0x00000800U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB5_SHIFT (0x0000000BU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB5_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB6_MASK (0x00001000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB6_SHIFT (0x0000000CU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB6_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB6_MASK (0x00002000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB6_SHIFT (0x0000000DU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB6_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB7_MASK (0x00004000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB7_SHIFT (0x0000000EU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB7_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB7_MASK (0x00008000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB7_SHIFT (0x0000000FU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB7_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB8_MASK (0x00010000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB8_SHIFT (0x00000010U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB8_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB8_MASK (0x00020000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB8_SHIFT (0x00000011U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB8_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB9_MASK (0x00040000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB9_SHIFT (0x00000012U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB9_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB9_MASK (0x00080000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB9_SHIFT (0x00000013U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB9_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB10_MASK (0x00100000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB10_SHIFT (0x00000014U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB10_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB10_MASK (0x00200000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB10_SHIFT (0x00000015U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB10_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB11_MASK (0x00400000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB11_SHIFT (0x00000016U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB11_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB11_MASK (0x00800000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB11_SHIFT (0x00000017U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB11_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB12_MASK (0x01000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB12_SHIFT (0x00000018U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB12_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB12_MASK (0x02000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB12_SHIFT (0x00000019U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB12_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB13_MASK (0x04000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB13_SHIFT (0x0000001AU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB13_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB13_MASK (0x08000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB13_SHIFT (0x0000001BU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB13_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB14_MASK (0x10000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB14_SHIFT (0x0000001CU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB14_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB14_MASK (0x20000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB14_SHIFT (0x0000001DU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB14_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB15_MASK (0x40000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB15_SHIFT (0x0000001EU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NEWMSGSTATUSMB15_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB15_MASK (0x80000000U) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB15_SHIFT (0x0000001FU) +#define CSL_MAILBOX_USER_IRQ_STATUS_CLR_NOTFULLSTATUSMB15_MAX (0x00000001U) + +/* IRQ_ENABLE_SET */ + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB0_MASK (0x00000001U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB0_SHIFT (0x00000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB0_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB0_MASK (0x00000002U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB0_SHIFT (0x00000001U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB0_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB1_MASK (0x00000004U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB1_SHIFT (0x00000002U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB1_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB1_MASK (0x00000008U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB1_SHIFT (0x00000003U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB1_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB2_MASK (0x00000010U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB2_SHIFT (0x00000004U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB2_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB2_MASK (0x00000020U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB2_SHIFT (0x00000005U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB2_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB3_MASK (0x00000040U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB3_SHIFT (0x00000006U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB3_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB3_MASK (0x00000080U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB3_SHIFT (0x00000007U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB3_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB4_MASK (0x00000100U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB4_SHIFT (0x00000008U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB4_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB4_MASK (0x00000200U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB4_SHIFT (0x00000009U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB4_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB5_MASK (0x00000400U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB5_SHIFT (0x0000000AU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB5_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB5_MASK (0x00000800U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB5_SHIFT (0x0000000BU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB5_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB6_MASK (0x00001000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB6_SHIFT (0x0000000CU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB6_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB6_MASK (0x00002000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB6_SHIFT (0x0000000DU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB6_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB7_MASK (0x00004000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB7_SHIFT (0x0000000EU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB7_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB7_MASK (0x00008000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB7_SHIFT (0x0000000FU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB7_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB8_MASK (0x00010000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB8_SHIFT (0x00000010U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB8_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB8_MASK (0x00020000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB8_SHIFT (0x00000011U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB8_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB9_MASK (0x00040000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB9_SHIFT (0x00000012U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB9_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB9_MASK (0x00080000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB9_SHIFT (0x00000013U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB9_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB10_MASK (0x00100000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB10_SHIFT (0x00000014U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB10_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB10_MASK (0x00200000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB10_SHIFT (0x00000015U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB10_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB11_MASK (0x00400000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB11_SHIFT (0x00000016U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB11_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB11_MASK (0x00800000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB11_SHIFT (0x00000017U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB11_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB12_MASK (0x01000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB12_SHIFT (0x00000018U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB12_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB12_MASK (0x02000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB12_SHIFT (0x00000019U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB12_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB13_MASK (0x04000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB13_SHIFT (0x0000001AU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB13_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB13_MASK (0x08000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB13_SHIFT (0x0000001BU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB13_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB14_MASK (0x10000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB14_SHIFT (0x0000001CU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB14_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB14_MASK (0x20000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB14_SHIFT (0x0000001DU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB14_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB15_MASK (0x40000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB15_SHIFT (0x0000001EU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NEWMSGENABLEMB15_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB15_MASK (0x80000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB15_SHIFT (0x0000001FU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_SET_NOTFULLENABLEMB15_MAX (0x00000001U) + +/* IRQ_ENABLE_CLR */ + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB0_MASK (0x00000001U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB0_SHIFT (0x00000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB0_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB0_MASK (0x00000002U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB0_SHIFT (0x00000001U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB0_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB1_MASK (0x00000004U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB1_SHIFT (0x00000002U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB1_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB1_MASK (0x00000008U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB1_SHIFT (0x00000003U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB1_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB2_MASK (0x00000010U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB2_SHIFT (0x00000004U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB2_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB2_MASK (0x00000020U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB2_SHIFT (0x00000005U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB2_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB3_MASK (0x00000040U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB3_SHIFT (0x00000006U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB3_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB3_MASK (0x00000080U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB3_SHIFT (0x00000007U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB3_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB4_MASK (0x00000100U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB4_SHIFT (0x00000008U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB4_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB4_MASK (0x00000200U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB4_SHIFT (0x00000009U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB4_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB5_MASK (0x00000400U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB5_SHIFT (0x0000000AU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB5_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB5_MASK (0x00000800U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB5_SHIFT (0x0000000BU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB5_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB6_MASK (0x00001000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB6_SHIFT (0x0000000CU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB6_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB6_MASK (0x00002000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB6_SHIFT (0x0000000DU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB6_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB7_MASK (0x00004000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB7_SHIFT (0x0000000EU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB7_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB7_MASK (0x00008000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB7_SHIFT (0x0000000FU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB7_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB8_MASK (0x00010000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB8_SHIFT (0x00000010U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB8_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB8_MASK (0x00020000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB8_SHIFT (0x00000011U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB8_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB9_MASK (0x00040000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB9_SHIFT (0x00000012U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB9_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB9_MASK (0x00080000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB9_SHIFT (0x00000013U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB9_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB10_MASK (0x00100000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB10_SHIFT (0x00000014U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB10_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB10_MASK (0x00200000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB10_SHIFT (0x00000015U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB10_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB11_MASK (0x00400000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB11_SHIFT (0x00000016U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB11_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB11_MASK (0x00800000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB11_SHIFT (0x00000017U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB11_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB12_MASK (0x01000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB12_SHIFT (0x00000018U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB12_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB12_MASK (0x02000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB12_SHIFT (0x00000019U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB12_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB13_MASK (0x04000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB13_SHIFT (0x0000001AU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB13_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB13_MASK (0x08000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB13_SHIFT (0x0000001BU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB13_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB14_MASK (0x10000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB14_SHIFT (0x0000001CU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB14_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB14_MASK (0x20000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB14_SHIFT (0x0000001DU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB14_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB15_MASK (0x40000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB15_SHIFT (0x0000001EU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NEWMSGENABLEMB15_MAX (0x00000001U) + +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB15_MASK (0x80000000U) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB15_SHIFT (0x0000001FU) +#define CSL_MAILBOX_USER_IRQ_ENABLE_CLR_NOTFULLENABLEMB15_MAX (0x00000001U) + +/* REVISION */ + +#define CSL_MAILBOX_REVISION_SCHEME_MASK (0xC0000000U) +#define CSL_MAILBOX_REVISION_SCHEME_SHIFT (0x0000001EU) +#define CSL_MAILBOX_REVISION_SCHEME_MAX (0x00000003U) + +#define CSL_MAILBOX_REVISION_BU_MASK (0x30000000U) +#define CSL_MAILBOX_REVISION_BU_SHIFT (0x0000001CU) +#define CSL_MAILBOX_REVISION_BU_MAX (0x00000003U) + +#define CSL_MAILBOX_REVISION_FUNCTION_MASK (0x0FFF0000U) +#define CSL_MAILBOX_REVISION_FUNCTION_SHIFT (0x00000010U) +#define CSL_MAILBOX_REVISION_FUNCTION_MAX (0x00000FFFU) + +#define CSL_MAILBOX_REVISION_RTL_VER_MASK (0x0000F800U) +#define CSL_MAILBOX_REVISION_RTL_VER_SHIFT (0x0000000BU) +#define CSL_MAILBOX_REVISION_RTL_VER_MAX (0x0000001FU) + +#define CSL_MAILBOX_REVISION_MAJOR_REV_MASK (0x00000700U) +#define CSL_MAILBOX_REVISION_MAJOR_REV_SHIFT (0x00000008U) +#define CSL_MAILBOX_REVISION_MAJOR_REV_MAX (0x00000007U) + +#define CSL_MAILBOX_REVISION_CUSTOM_MASK (0x000000C0U) +#define CSL_MAILBOX_REVISION_CUSTOM_SHIFT (0x00000006U) +#define CSL_MAILBOX_REVISION_CUSTOM_MAX (0x00000003U) + +#define CSL_MAILBOX_REVISION_MINOR_REV_MASK (0x0000003FU) +#define CSL_MAILBOX_REVISION_MINOR_REV_SHIFT (0x00000000U) +#define CSL_MAILBOX_REVISION_MINOR_REV_MAX (0x0000003FU) + +/* SYSCONFIG */ + +#define CSL_MAILBOX_SYSCONFIG_SOFT_RESET_MASK (0x00000001U) +#define CSL_MAILBOX_SYSCONFIG_SOFT_RESET_SHIFT (0x00000000U) +#define CSL_MAILBOX_SYSCONFIG_SOFT_RESET_MAX (0x00000001U) + +/* MESSAGE */ + +#define CSL_MAILBOX_MESSAGE_VALUE_MASK (0xFFFFFFFFU) +#define CSL_MAILBOX_MESSAGE_VALUE_SHIFT (0x00000000U) +#define CSL_MAILBOX_MESSAGE_VALUE_MAX (0xFFFFFFFFU) + +/* FIFO_STATUS */ + +#define CSL_MAILBOX_FIFO_STATUS_FULL_MASK (0x00000001U) +#define CSL_MAILBOX_FIFO_STATUS_FULL_SHIFT (0x00000000U) +#define CSL_MAILBOX_FIFO_STATUS_FULL_MAX (0x00000001U) + +/* MSG_STATUS */ + +#define CSL_MAILBOX_MSG_STATUS_NUM_MESSAGES_MASK (0x0000003FU) +#define CSL_MAILBOX_MSG_STATUS_NUM_MESSAGES_SHIFT (0x00000000U) +#define CSL_MAILBOX_MSG_STATUS_NUM_MESSAGES_MAX (0x0000003FU) + +/* IRQ_EOI */ + +#define CSL_MAILBOX_IRQ_EOI_EOI0_MASK (0x00000001U) +#define CSL_MAILBOX_IRQ_EOI_EOI0_SHIFT (0x00000000U) +#define CSL_MAILBOX_IRQ_EOI_EOI0_MAX (0x00000001U) + +#define CSL_MAILBOX_IRQ_EOI_EOI1_MASK (0x00000002U) +#define CSL_MAILBOX_IRQ_EOI_EOI1_SHIFT (0x00000001U) +#define CSL_MAILBOX_IRQ_EOI_EOI1_MAX (0x00000001U) + +#define CSL_MAILBOX_IRQ_EOI_EOI2_MASK (0x00000004U) +#define CSL_MAILBOX_IRQ_EOI_EOI2_SHIFT (0x00000002U) +#define CSL_MAILBOX_IRQ_EOI_EOI2_MAX (0x00000001U) + +#define CSL_MAILBOX_IRQ_EOI_EOI3_MASK (0x00000008U) +#define CSL_MAILBOX_IRQ_EOI_EOI3_SHIFT (0x00000003U) +#define CSL_MAILBOX_IRQ_EOI_EOI3_MAX (0x00000001U) + +#ifdef __cplusplus +} +#endif +#endif diff --git a/apps/machine/ti_k3_r5/mailbox/hw_mailbox.h b/apps/machine/ti_k3_r5/mailbox/hw_mailbox.h new file mode 100644 index 000000000..bd30d5f13 --- /dev/null +++ b/apps/machine/ti_k3_r5/mailbox/hw_mailbox.h @@ -0,0 +1,333 @@ +/* ============================================================================= + * Copyright (c) Texas Instruments Incorporated 2014 + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/** +* +* \file hw_mailbox.h +* +* \brief register-level header file for MAILBOX +* +**/ + +#ifndef HW_MAILBOX_H_ +#define HW_MAILBOX_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif + +/**************************************************************************************************** +* Register Definitions +****************************************************************************************************/ +#define MAILBOX_REVISION (0x0U) +#define MAILBOX_SYSCONFIG (0x10U) +#define MAILBOX_MESSAGE(n) ((uintptr_t)0x40U + ((n) * 4U)) +#define MAILBOX_FIFOSTATUS(n) ((uintptr_t)0x80U + ((n) * 4U)) +#define MAILBOX_MSGSTATUS(n) ((uintptr_t)0xc0U + ((n) * 4U)) +#define MAILBOX_IRQSTATUS_RAW(n) ((uintptr_t)0x100U + ((n) * 16U)) +#define MAILBOX_IRQSTATUS_CLR(n) ((uintptr_t)0x104U + ((n) * 16U)) +#define MAILBOX_IRQENABLE_CLR(n) ((uintptr_t)0x10cU + ((n) * 16U)) +#define MAILBOX_IRQENABLE_SET(n) ((uintptr_t)0x108U + ((n) * 16U)) +#define MAILBOX_IRQ_EOI (0x140U) + +/**************************************************************************************************** +* Field Definition Macros +****************************************************************************************************/ + +/* REVISION */ + +#define MAILBOX_REVISION_SCHEME_SHIFT (0x1EU) +#define MAILBOX_REVISION_SCHEME_MASK (0xC0000000U) + +#define MAILBOX_REVISION_BU_SHIFT (0x1CU) +#define MAILBOX_REVISION_BU_MASK (0x30000000U) + +#define MAILBOX_REVISION_FUNCTION_SHIFT (0x10U) +#define MAILBOX_REVISION_FUNCTION_MASK (0x0FFF0000U) + +#define MAILBOX_REVISION_RTL_VER_SHIFT (0xBU) +#define MAILBOX_REVISION_RTL_VER_MASK (0x0000F800U) + +#define MAILBOX_REVISION_MAJOR_REV_SHIFT (0x8U) +#define MAILBOX_REVISION_MAJOR_REV_MASK (0x00000700U) + +#define MAILBOX_REVISION_CUSTOM_SHIFT (0x6U) +#define MAILBOX_REVISION_CUSTOM_MASK (0x000000C0U) + +#define MAILBOX_REVISION_MINOR_REV_SHIFT (0x0U) +#define MAILBOX_REVISION_MINOR_REV_MASK (0x0000003FU) + +#define MAILBOX_SYSCONFIG_SOFTRESET_SHIFT (0U) +#define MAILBOX_SYSCONFIG_SOFTRESET_MASK (0x00000001U) +#define MAILBOX_SYSCONFIG_SOFTRESET_B0 (0U) +#define MAILBOX_SYSCONFIG_SOFTRESET_B1 (1U) + +#define MAILBOX_IRQ_EOI_EOI0_SHIFT (0U) +#define MAILBOX_IRQ_EOI_EOI0_MASK (0x00000001U) + +#define MAILBOX_IRQ_EOI_EOI1_SHIFT (0x1U) +#define MAILBOX_IRQ_EOI_EOI1_MASK (0x00000002U) + +#define MAILBOX_IRQ_EOI_EOI2_SHIFT (0x2U) +#define MAILBOX_IRQ_EOI_EOI2_MASK (0x00000004U) + +#define MAILBOX_IRQ_EOI_EOI3_SHIFT (0x3U) +#define MAILBOX_IRQ_EOI_EOI3_MASK (0x00000008U) + +#define MAILBOX_MESSAGE_MESSAGEVALUEMBM_SHIFT (0U) +#define MAILBOX_MESSAGE_MESSAGEVALUEMBM_MASK (0xffffffffU) + +#define MAILBOX_FIFOSTATUS_FIFOFULLMBM_SHIFT (0U) +#define MAILBOX_FIFOSTATUS_FIFOFULLMBM_MASK (0x00000001U) + +#define MAILBOX_FIFOSTATUS_RESERVED_0_SHIFT (1U) +#define MAILBOX_FIFOSTATUS_RESERVED_0_MASK (0xfffffffeU) + +#define MAILBOX_MSGSTATUS_NBOFMSGMBM_SHIFT (0U) +#define MAILBOX_MSGSTATUS_NBOFMSGMBM_MASK (0x0000003fU) + +#define MAILBOX_MSGSTATUS_RESERVED_SHIFT (7U) +#define MAILBOX_MSGSTATUS_RESERVED_MASK (0xffffff80U) + +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB0_SHIFT (0U) +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB0_MASK (0x00000001U) + +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB1_SHIFT (2U) +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB1_MASK (0x00000004U) + +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB1_SHIFT (3U) +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB1_MASK (0x00000008U) + +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB0_SHIFT (1U) +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB0_MASK (0x00000002U) + +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB2_SHIFT (4U) +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB2_MASK (0x00000010U) + +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB2_SHIFT (5U) +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB2_MASK (0x00000020U) + +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB3_SHIFT (6U) +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB3_MASK (0x00000040U) + +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB3_SHIFT (7U) +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB3_MASK (0x00000080U) + +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB4_SHIFT (8U) +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB4_MASK (0x00000100U) + +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB4_SHIFT (9U) +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB4_MASK (0x00000200U) + +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB5_SHIFT (10U) +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB5_MASK (0x00000400U) + +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB5_SHIFT (11U) +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB5_MASK (0x00000800U) + +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB6_SHIFT (13U) +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB6_MASK (0x00002000U) + +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB6_SHIFT (12U) +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB6_MASK (0x00001000U) + +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB7_SHIFT (14U) +#define MAILBOX_IRQSTATUS_RAW_NEWMSGSTATUSUUMB7_MASK (0x00004000U) + +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB7_SHIFT (15U) +#define MAILBOX_IRQSTATUS_RAW_NOTFULLSTATUSUUMB7_MASK (0x00008000U) + +#define MAILBOX_IRQSTATUS_RAW_RESERVED_SHIFT (16U) +#define MAILBOX_IRQSTATUS_RAW_RESERVED_MASK (0xffff0000U) + +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB0_SHIFT (1U) +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB0_MASK (0x00000002U) + +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB1_SHIFT (3U) +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB1_MASK (0x00000008U) + +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB2_SHIFT (5U) +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB2_MASK (0x00000020U) + +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB3_SHIFT (7U) +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB3_MASK (0x00000080U) + +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB4_SHIFT (9U) +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB4_MASK (0x00000200U) + +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB5_SHIFT (11U) +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB5_MASK (0x00000800U) + +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB6_SHIFT (13U) +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB6_MASK (0x00002000U) + +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB7_SHIFT (15U) +#define MAILBOX_IRQSTATUS_CLR_NOTFULLSTATUSENUUMB7_MASK (0x00008000U) + +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB0_SHIFT (0U) +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB0_MASK (0x00000001U) + +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB1_SHIFT (2U) +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB1_MASK (0x00000004U) + +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB2_SHIFT (4U) +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB2_MASK (0x00000010U) + +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB3_SHIFT (6U) +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB3_MASK (0x00000040U) + +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB4_SHIFT (8U) +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB4_MASK (0x00000100U) + +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB5_SHIFT (10U) +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB5_MASK (0x00000400U) + +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB6_SHIFT (12U) +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB6_MASK (0x00001000U) + +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB7_SHIFT (14U) +#define MAILBOX_IRQSTATUS_CLR_NEWMSGSTATUSENUUMB7_MASK (0x00004000U) + +#define MAILBOX_IRQSTATUS_CLR_RESERVED_SHIFT (16U) +#define MAILBOX_IRQSTATUS_CLR_RESERVED_MASK (0xffff0000U) + +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB0_SHIFT (0U) +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB0_MASK (0x00000001U) + +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB1_SHIFT (2U) +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB1_MASK (0x00000004U) + +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB5_SHIFT (10U) +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB5_MASK (0x00000400U) + +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB3_SHIFT (6U) +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB3_MASK (0x00000040U) + +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB2_SHIFT (4U) +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB2_MASK (0x00000010U) + +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB6_SHIFT (12U) +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB6_MASK (0x00001000U) + +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB7_SHIFT (14U) +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB7_MASK (0x00004000U) + +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB6_SHIFT (13U) +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB6_MASK (0x00002000U) + +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB2_SHIFT (5U) +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB2_MASK (0x00000020U) + +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB7_SHIFT (15U) +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB7_MASK (0x00008000U) + +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB5_SHIFT (11U) +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB5_MASK (0x00000800U) + +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB1_SHIFT (3U) +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB1_MASK (0x00000008U) + +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB3_SHIFT (7U) +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB3_MASK (0x00000080U) + +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB8_SHIFT (17U) +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB8_MASK (0x00020000U) + +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB4_SHIFT (9U) +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB4_MASK (0x00000200U) + +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB0_SHIFT (1U) +#define MAILBOX_IRQENABLE_CLR_NOTFULLENABLEUUMB0_MASK (0x00000002U) + +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB4_SHIFT (8U) +#define MAILBOX_IRQENABLE_CLR_NEWMSGENABLEUUMB4_MASK (0x00000100U) + +#define MAILBOX_IRQENABLE_CLR_RESERVED_SHIFT (16U) +#define MAILBOX_IRQENABLE_CLR_RESERVED_MASK (0xffff0000U) + +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB0_SHIFT (1U) +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB0_MASK (0x00000002U) + +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB4_SHIFT (9U) +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB4_MASK (0x00000200U) + +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB3_SHIFT (7U) +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB3_MASK (0x00000080U) + +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB1_SHIFT (3U) +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB1_MASK (0x00000008U) + +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB5_SHIFT (11U) +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB5_MASK (0x00000800U) + +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB6_SHIFT (13U) +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB6_MASK (0x00002000U) + +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB7_SHIFT (15U) +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB7_MASK (0x00008000U) + +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB2_SHIFT (5U) +#define MAILBOX_IRQENABLE_SET_NOTFULLENABLEUUMB2_MASK (0x00000020U) + +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB7_SHIFT (14U) +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB7_MASK (0x00004000U) + +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB2_SHIFT (4U) +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB2_MASK (0x00000010U) + +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB6_SHIFT (12U) +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB6_MASK (0x00001000U) + +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB5_SHIFT (10U) +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB5_MASK (0x00000400U) + +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB1_SHIFT (2U) +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB1_MASK (0x00000004U) + +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB3_SHIFT (6U) +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB3_MASK (0x00000040U) + +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB4_SHIFT (8U) +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB4_MASK (0x00000100U) + +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB0_SHIFT (0U) +#define MAILBOX_IRQENABLE_SET_NEWMSGENABLEUUMB0_MASK (0x00000001U) + +#define MAILBOX_IRQENABLE_SET_RESERVED_SHIFT (16U) +#define MAILBOX_IRQENABLE_SET_RESERVED_MASK (0xffff0000U) + +#ifdef __cplusplus +} +#endif +#endif /* _HW_MAILBOX_H_ */ diff --git a/apps/machine/ti_k3_r5/mailbox/hw_types.h b/apps/machine/ti_k3_r5/mailbox/hw_types.h new file mode 100644 index 000000000..651b3a516 --- /dev/null +++ b/apps/machine/ti_k3_r5/mailbox/hw_types.h @@ -0,0 +1,85 @@ +/* + * hw_types.h + */ + +/* +* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ +*/ +/* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* +* Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the +* distribution. +* +* Neither the name of Texas Instruments Incorporated nor the names of +* its contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ + + +#ifndef _HW_TYPES_H_ +#define _HW_TYPES_H_ + +//***************************************************************************** +// +// Define a boolean type, and values for true and false. +// +//***************************************************************************** +typedef unsigned char tBoolean; + +#ifndef true +#define true 1 +#endif + +#ifndef false +#define false 0 +#endif + +#ifndef NULL +#define NULL ((void*) 0) +#endif +//***************************************************************************** +// +// Macros for hardware access, both direct and via the bit-band region. +// +//***************************************************************************** +#define HWREG(x) \ + (*((volatile unsigned int *)(x))) +#define HWREGH(x) \ + (*((volatile unsigned short *)(x))) +#define HWREGB(x) \ + (*((volatile unsigned char *)(x))) +#define HWREGBITW(x, b) \ + HWREG(((unsigned int)(x) & 0xF0000000) | 0x02000000 | \ + (((unsigned int)(x) & 0x000FFFFF) << 5) | ((b) << 2)) +#define HWREGBITH(x, b) \ + HWREGH(((unsigned int)(x) & 0xF0000000) | 0x02000000 | \ + (((unsigned int)(x) & 0x000FFFFF) << 5) | ((b) << 2)) +#define HWREGBITB(x, b) \ + HWREGB(((unsigned int)(x) & 0xF0000000) | 0x02000000 | \ + (((unsigned int)(x) & 0x000FFFFF) << 5) | ((b) << 2)) + +#define TRUE 1 +#define FALSE 0 + +#endif // __HW_TYPES_H__ diff --git a/apps/machine/ti_k3_r5/mailbox/mailbox.c b/apps/machine/ti_k3_r5/mailbox/mailbox.c new file mode 100644 index 000000000..3460cefe3 --- /dev/null +++ b/apps/machine/ti_k3_r5/mailbox/mailbox.c @@ -0,0 +1,247 @@ +/* + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +/** + * \file mailbox.c + * + * \brief Mailbox Device Abstraction Layer APIs + * + * This file contains the device abstraction layer APIs for the + * mailbox module. These are used for IPC communication. + */ + +/* ========================================================================== */ +/* Include Files */ +/* ========================================================================== */ +/** \brief This is to disable HW_SYNC_BARRIER for register access */ +#define MEM_BARRIER_DISABLE + +#include "stdint.h" +#include "mailbox.h" + +/* ========================================================================== */ +/* Function Definitions */ +/* ========================================================================== */ +void MailboxReset(uintptr_t baseAddr) +{ + /* Start the soft reset sequence */ + /* write SOFTRESET field */ + CSL_REG32_FINS(baseAddr + CSL_MAILBOX_SYSCONFIG, MAILBOX_SYSCONFIG_SOFT_RESET, + MAILBOX_SYSCONFIG_SOFTRESET_B1); + + /* Wait till the reset is complete */ + while (MAILBOX_SYSCONFIG_SOFTRESET_B1 == + CSL_REG32_FEXT(baseAddr + MAILBOX_SYSCONFIG, + MAILBOX_SYSCONFIG_SOFT_RESET)) + { + /* Do nothing - Busy wait */ + } + + return; +} + +void MailboxConfigIdleMode(uintptr_t baseAddr, uint32_t idleMode) +{ +#if defined(SOC_AM572x)|| defined(SOC_AM571x) || defined(SOC_TDA2XX) || \ + defined(SOC_DRA7XX) || defined(SOC_TDA2EX) || defined(SOC_TDA3XX) + /* Configure idle mode */ + HW_WR_FIELD32(baseAddr + MAILBOX_SYSCONFIG, MAILBOX_SYSCONFIG_SIDLEMODE, + idleMode); +#else + (void) baseAddr; + (void) idleMode; +#endif +} + +uint32_t MailboxGetMessage(uintptr_t baseAddr, uint32_t queueId, + uint32_t *msgPtr) +{ + uint32_t msgCount; + uint32_t retval; + + msgCount = MailboxGetMessageCount(baseAddr, queueId); + + if (msgCount > 0U) + { + /* Read message */ + *msgPtr = CSL_REG32_RD(baseAddr + CSL_MAILBOX_MESSAGE(queueId)); + retval = MESSAGE_VALID; + } + else + { + /* Queue empty*/ + retval = MESSAGE_INVALID; + } + + return retval; +} + +void MailboxReadMessage(uintptr_t baseAddr, uint32_t queueId, + uint32_t *msgPtr) +{ + /* Read message */ + *msgPtr = CSL_REG32_RD(baseAddr + CSL_MAILBOX_MESSAGE(queueId)); +} + +uint32_t MailboxSendMessage(uintptr_t baseAddr, uint32_t queueId, uint32_t msg) +{ + uint32_t fifoFull; + uint32_t retval; + + /* Read the FIFO Status */ + fifoFull = CSL_REG32_RD(baseAddr + CSL_MAILBOX_FIFO_STATUS(queueId)); + if (fifoFull == 0U) + { + /* FIFO not full write msg */ + MailboxWriteMessage(baseAddr, queueId, msg); + retval = MESSAGE_VALID; + } + else + { + retval = MESSAGE_INVALID; + } + + return retval; +} + +void MailboxEnableNewMsgInt(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId) +{ + /* Set the NewMsgEnable field - Writing zero has no effect */ + CSL_REG32_WR(baseAddr + CSL_MAILBOX_USER_IRQ_ENABLE_SET(userId), (uint32_t) 0x1 << + (queueId * 2U)); +} + +void MailboxEnableQueueNotFullInt(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId) +{ + /* Set the FIFO Not Full field - Writing zero has no effect */ + CSL_REG32_WR(baseAddr + CSL_MAILBOX_USER_IRQ_ENABLE_SET(userId), (uint32_t) 0x2 << + (queueId * 2U)); +} + +void MailboxDisableNewMsgInt(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId) +{ + /* Set the NewMsgEnable field - Writing zero has no effect */ + CSL_REG32_WR(baseAddr + CSL_MAILBOX_USER_IRQ_ENABLE_CLR(userId), (uint32_t) 0x1 << + (queueId * 2U)); +} + +void MailboxDisableQueueNotFullInt(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId) +{ + /* Set the FIFO Not Full field - Writing zero has no effect */ + CSL_REG32_WR(baseAddr + CSL_MAILBOX_USER_IRQ_ENABLE_CLR(userId), (uint32_t) 0x2 << + (queueId * 2U)); +} + +void MailboxClrNewMsgStatus(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId) +{ + /* Set the NewMsgEnable field - Writing zero has no effect */ + CSL_REG32_WR(baseAddr + CSL_MAILBOX_USER_IRQ_STATUS_CLR(userId), (uint32_t) 0x1 << + (queueId * 2U)); +} + +void MailboxClrQueueNotFullStatus(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId) +{ + /* Set the NewMsgEnable field - Writing zero has no effect */ + CSL_REG32_WR(baseAddr + CSL_MAILBOX_USER_IRQ_STATUS_CLR(userId), (uint32_t) 0x2 << + (queueId * 2U)); +} + +uint32_t MailboxGetRawNewMsgStatus(uintptr_t baseAddr, + uint32_t userId, + uint32_t queueId) +{ + uint32_t regVal; + + /* Read the IRQSTATUSRAW */ + regVal = CSL_REG32_RD(baseAddr + CSL_MAILBOX_USER_IRQ_STATUS_RAW(userId)); + + /* Mask & read the NewMsgStatus for given queueId */ + regVal &= (uint32_t) 0x1 << (queueId * 2U); + + return (regVal >> (queueId * 2U)); +} + +uint32_t MailboxGetRawQueueNotFullStatus(uintptr_t baseAddr, + uint32_t userId, + uint32_t queueId) +{ + uint32_t regVal; + + /* Read the IRQSTATUSRAW */ + regVal = CSL_REG32_RD(baseAddr + CSL_MAILBOX_USER_IRQ_STATUS_RAW(userId)); + + /* Mask & read the FIFO Not full for given queueId */ + regVal &= (uint32_t) 0x2 << (queueId * 2U); + + return (regVal >> (queueId * 2U)); +} + +uint32_t MailboxGetIrqEnableStatus(uintptr_t baseAddr, + uint32_t userId, + uint32_t queueId) +{ + uint32_t regVal; + + /* Read the IRQENABLESET */ + regVal = CSL_REG32_RD(baseAddr + CSL_MAILBOX_USER_IRQ_ENABLE_SET(userId)); + + /* Mask & read the FIFO Not full for given queueId */ + regVal &= (uint32_t) 0x1 << (queueId * 2U); + + return (regVal); +} + +uint32_t MailboxGetMessageCount(uintptr_t baseAddr, + uint32_t queueId) +{ + /* Return message count */ + return (CSL_REG32_RD(baseAddr + CSL_MAILBOX_MSG_STATUS(queueId))); +} + +void MailboxWriteMessage(uintptr_t baseAddr, uint32_t queueId, uint32_t msg) +{ + /* Write mailbox message */ + CSL_REG32_WR(baseAddr + CSL_MAILBOX_MESSAGE(queueId), msg); +} + +void MailboxWriteEOI(uintptr_t baseAddr, uint32_t value) +{ + /* Write mailbox message */ + CSL_REG32_WR(baseAddr + CSL_MAILBOX_IRQ_EOI, value); +} +/********************************* End of file ******************************/ diff --git a/apps/machine/ti_k3_r5/mailbox/mailbox.h b/apps/machine/ti_k3_r5/mailbox/mailbox.h new file mode 100644 index 000000000..40817913c --- /dev/null +++ b/apps/machine/ti_k3_r5/mailbox/mailbox.h @@ -0,0 +1,367 @@ +/* + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +/** + * \ingroup CSL_IP_MODULE + * \defgroup CSL_MAILBOX Mailbox + * + * @{ + */ +/** + * \file mailbox.h + * + * \brief This file contains the function prototypes for Mailbox access. + */ + +#ifndef MAILBOX_H_ +#define MAILBOX_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* ========================================================================== */ +/* Include Files */ +/* ========================================================================== */ +#include "hw_types.h" +#include "cslr_mailbox.h" +#include "hw_mailbox.h" + +/* ========================================================================== */ +/* Macros */ +/* ========================================================================== */ +/* + * \name User Id's Corresponds to the Core using the mailbox + * Mailbox Queue's Corresponds to the Mailbox Queue Number + * @{ + */ +/** + * \brief MAILBOX_QUEUE_0 - Mailbox message queue 0 + */ +#define MAILBOX_QUEUE_0 0U +/** + * \brief MAILBOX_QUEUE_1 - Mailbox message queue 1 + */ +#define MAILBOX_QUEUE_1 1U +/** + * \brief MAILBOX_QUEUE_2 - Mailbox message queue 2 + */ +#define MAILBOX_QUEUE_2 2U +/** + * \brief MAILBOX_QUEUE_3 - Mailbox message queue 3 + */ +#define MAILBOX_QUEUE_3 3U +/** + * \brief MAILBOX_QUEUE_4 - Mailbox message queue 4 + */ +#define MAILBOX_QUEUE_4 4U +/** + * \brief MAILBOX_QUEUE_5 - Mailbox message queue 5 + */ +#define MAILBOX_QUEUE_5 5U +/** + * \brief MAILBOX_QUEUE_6 - Mailbox message queue 6 + */ +#define MAILBOX_QUEUE_6 6U +/** + * \brief MAILBOX_QUEUE_7 - Mailbox message queue 7 + */ +#define MAILBOX_QUEUE_7 7U +/** + * \brief MAILBOX_QUEUE_8 - Mailbox message queue 8 + */ +#define MAILBOX_QUEUE_8 8U +/** + * \brief MAILBOX_QUEUE_9 - Mailbox message queue 9 + */ +#define MAILBOX_QUEUE_9 9U +/** + * \brief MAILBOX_QUEUE_10 - Mailbox message queue 10 + */ +#define MAILBOX_QUEUE_10 10U +/** + * \brief MAILBOX_QUEUE_11 - Mailbox message queue 11 + */ +#define MAILBOX_QUEUE_11 11U +/** + * \brief MAILBOX_QUEUE_12 - Mailbox message queue 12 + */ +#define MAILBOX_QUEUE_12 12U +/** + * \brief MAILBOX_QUEUE_13 - Mailbox message queue 13 + */ +#define MAILBOX_QUEUE_13 13U +/** + * \brief MAILBOX_QUEUE_14 - Mailbox message queue 14 + */ +#define MAILBOX_QUEUE_14 14U +/** + * \brief MAILBOX_QUEUE_15 - Mailbox message queue 15 + */ +#define MAILBOX_QUEUE_15 15U +/* Incase of SOC_AM65XX, there are 64 queues; macros not provided for simplicity */ +/* @} */ + +/* + * \name Values that can be returned by MailboxGetMessage/MailboxSendMessage + * to notify whether the message received/send is valid + * @{ + */ +/** + * \brief MESSAGE_VALID - Valid message + */ +#define MESSAGE_VALID 0U +/** + * \brief MESSAGE_INVALID - Invalid message + */ +#define MESSAGE_INVALID 1U +/* @} */ + +/* ========================================================================== */ +/* Function Declarations */ +/* ========================================================================== */ + +/* Queue Access API's */ +/** + * \brief This function resets the mailbox + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * MAILBOXn (n = 1 to 10) Ex MAILBOX1, MAILBOX2, etc + * EVEn_MLBm (n = 1 to 3, m = 1 to 4) + * + * @return None + */ +void MailboxReset(uintptr_t baseAddr); + +/** + * \brief This function configures the idle mode of the mailbox + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param idleMode Idle mode to be configured. Possible values are + * 0x0: Force-idle. An idle request is acknowledged + * unconditionally + * 0x1: No-idle. An idle request is never acknowledged + * 0x2: Smart-idle. Acknowledgement to an idle request is + * given based on the internal activity of the module + * + * @return None + */ +void MailboxConfigIdleMode(uintptr_t baseAddr, uint32_t idleMode); + +/** + * \brief This function gets the first message in the queue + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param queueId Queue to be read + * @param *msgPtr Message pointer in which the message will be returned + * + * @return Validity The return value indicates whether the message is valid + */ +uint32_t MailboxGetMessage(uintptr_t baseAddr, uint32_t queueId, + uint32_t *msgPtr); + +/** + * \brief This function writes message in the queue + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param queueId Queue to be written + * @param msg Message to be sent + * + * @return status The return value indicates whether the message is + * written to the queue. Possible values are, + * 0 - Written successfully + * 0 - Queue full + */ +uint32_t MailboxSendMessage(uintptr_t baseAddr, uint32_t queueId, uint32_t msg); + +/* Mailbox user(HW using mailbox) access API's */ +/** + * \brief This function enables the new message interrupt for a user for given + * queue + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param userId User for whom the new message should be intimated + * @param queueId Queue to be monitored for new message + * + * @return None + */ +void MailboxEnableNewMsgInt(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId); + +/** + * \brief This function enables the queue not full interrupt for a user for + * given + * queue + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param userId User for whom the event should be intimated + * @param queueId Queue to be monitored for non-full condition + * + * @return None + */ +void MailboxEnableQueueNotFullInt(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId); + +/** + * \brief This function disables the new message interrupt for a user for given + * queue + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param userId User for whom the new message event should be disabled + * @param queueId Queue to be monitored for new message + * + * @return None + */ +void MailboxDisableNewMsgInt(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId); + +/** + * \brief This function disables the queue not full interrupt for a user for + * given queue + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param userId User for whom the event should be disabled + * @param queueId Queue for which the non-full event to be disabled + * + * @return None + */ +void MailboxDisableQueueNotFullInt(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId); + +/** + * \brief This function clears the queue not-full status + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param userId User for whom the event should be cleared + * @param queueId Queue for which the event should be cleared + * + * @return None + */ +void MailboxClrNewMsgStatus(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId); + +/** + * \brief This function clears the queue not-full status + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param userId User for whom the event should be cleared + * @param queueId Queue for which the event should be cleared + * + * @return None + */ +void MailboxClrQueueNotFullStatus(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId); + +/** + * \brief This function gets the raw new message status + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param userId User for whom the event should be checked + * @param queueId Queue for which the event should be checked + * + * @return status status of new message + */ +uint32_t MailboxGetRawNewMsgStatus(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId); + +/** + * \brief This function gets the raw queue not-full status + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param userId User for whom the event should be checked + * @param queueId Queue for which the event should be checked + * + * @return status Queue not full status + */ +uint32_t MailboxGetRawQueueNotFullStatus(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId); + +/** + * \brief This function gets IRQ enable status + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param userId User for whom the event should be checked + * @param queueId Queue for which the event should be checked + * + * @return status Queue not full status + */ +uint32_t MailboxGetIrqEnableStatus(uintptr_t baseAddr, uint32_t userId, + uint32_t queueId); +/** + * \brief This function gets message count in the mailbox + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param queueId Mailbox FIFO id + * + * @return status Queue not full status + */ +uint32_t MailboxGetMessageCount(uintptr_t baseAddr, uint32_t queueId); + +/** + * \brief This function writes mailbox register + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param queueId Mailbox FIFO id + * @param msg value to be written to mailbox + * + * @return None + */ +void MailboxWriteMessage(uintptr_t baseAddr, uint32_t queueId, uint32_t msg); + +/** + * \brief This function writes EOI register + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param value User for whom the event should be checked + * + * @return None + */ +void MailboxWriteEOI(uintptr_t baseAddr, uint32_t value); + +/** + * \brief This function reads mailbox register + * + * @param baseAddr It is the Memory address of the Mailbox instance. + * @param queueId Mailbox FIFO id + * @param msgPtr Message pointer in which the message will be returned + * + * @return None + */ +void MailboxReadMessage(uintptr_t baseAddr, uint32_t queueId, + uint32_t *msgPtr); + +#ifdef __cplusplus +} +#endif +#endif + /** @} */ +/********************************* End of file ******************************/ diff --git a/apps/machine/ti_k3_r5/platform_info.c b/apps/machine/ti_k3_r5/platform_info.c new file mode 100644 index 000000000..64e057443 --- /dev/null +++ b/apps/machine/ti_k3_r5/platform_info.c @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * All rights reserved. + * Copyright (c) 2017 Xilinx, Inc. + * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/ + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "r5/kernel/dpl/HwiP.h" +#include "platform_info.h" +#include "rsc_table.h" +#include "mailbox.h" + +/* Polling information used by remoteproc operations */ +static metal_phys_addr_t poll_phys_addr = MAILBOX_BASE_ADDR; +struct metal_device mailbox_device = { + .name = KICK_DEV_NAME, + .bus = NULL, + .num_regions = 1, + .regions = { + { + .virt = (void *)MAILBOX_BASE_ADDR, + .physmap = &poll_phys_addr, + .size = 0x1000, + .page_shift = -1UL, + .page_mask = -1UL, + .mem_flags = DEVICE_NONSHARED | PRIV_RW_USER_RW, + .ops = {NULL}, + } + }, + .node = {NULL}, + + .irq_num = 1, + .irq_info = (void *)MAILBOX_NEW_MSG_INT, +}; + +static struct remoteproc_priv rproc_priv = { + .kick_dev_name = KICK_DEV_NAME, + .kick_dev_bus_name = KICK_BUS_NAME, +}; + +static struct remoteproc rproc_inst; +uint32_t virtqueue_id = 0; + +#ifndef RPMSG_NO_IPI +void am64_r5_a53_proc_irq_handler(void *args){ + uint32_t msg; + struct remoteproc *rproc = args; + struct remoteproc_priv *prproc; + + if (!rproc) + return; + + prproc = rproc->priv; + + // get virtqueue number to clear interrupt + MailboxGetMessage(MAILBOX_BASE_ADDR, 1, &msg); + + if (msg < INT32_MAX) { + virtqueue_id |= 1 << msg; + atomic_flag_clear(&prproc->ipi_nokick); + } + + HwiP_clearInt(MAILBOX_NEW_MSG_INT); +} +#endif + +/* processor operations from r5 to a53 */ +static struct remoteproc * am64_r5_a53_proc_init(struct remoteproc *rproc, + const struct remoteproc_ops *ops, + void *arg) +{ + struct remoteproc_priv *prproc = arg; + if (!rproc || !prproc || !ops) + return NULL; + + rproc->ops = ops; + + #ifndef RPMSG_NO_IPI + // enable new message interrupt from the mailbox + MailboxEnableNewMsgInt(MAILBOX_BASE_ADDR, 0, 1); + + // enable mailbox interrupt for r5f + HwiP_Params hwiParams; + HwiP_Object hwiObj; + + HwiP_Params_init(&hwiParams); + hwiParams.intNum = MAILBOX_NEW_MSG_INT; + hwiParams.callback = am64_r5_a53_proc_irq_handler; + hwiParams.args = rproc; + + HwiP_init(); + HwiP_construct(&hwiObj, &hwiParams); + HwiP_enable(); + #endif + + return rproc; +} + +static void am64_r5_a53_proc_remove(struct remoteproc *rproc) +{ + struct remoteproc_priv *prproc; + + if (!rproc) + return; + prproc = rproc->priv; + + metal_device_close(prproc->kick_dev); +} + +static void * +am64_r5_a53_proc_mmap(struct remoteproc *rproc, metal_phys_addr_t *pa, + metal_phys_addr_t *da, size_t size, + unsigned int attribute, struct metal_io_region **io) +{ + struct remoteproc_mem *mem; + metal_phys_addr_t lpa, lda; + struct metal_io_region *tmpio; + + lpa = *pa; + lda = *da; + + if (lpa == METAL_BAD_PHYS && lda == METAL_BAD_PHYS) + return NULL; + if (lpa == METAL_BAD_PHYS) + lpa = lda; + if (lda == METAL_BAD_PHYS) + lda = lpa; + + if (!attribute) + attribute = NORM_SHARED_NCACHE | PRIV_RW_USER_RW; + mem = metal_allocate_memory(sizeof(*mem)); + if (!mem) + return NULL; + tmpio = metal_allocate_memory(sizeof(*tmpio)); + if (!tmpio) { + metal_free_memory(mem); + return NULL; + } + remoteproc_init_mem(mem, NULL, lpa, lda, size, tmpio); + /* va is the same as pa in this platform */ + metal_io_init(tmpio, (void *)lpa, &mem->pa, size, + sizeof(metal_phys_addr_t) << 3, attribute, NULL); + remoteproc_add_mem(rproc, mem); + *pa = lpa; + *da = lda; + if (io) + *io = tmpio; + return metal_io_phys_to_virt(tmpio, mem->pa); +} + +static int am64_r5_a53_proc_notify(struct remoteproc *rproc, uint32_t id) +{ + (void)rproc; + + // Put message in mailbox + if (MailboxSendMessage(MAILBOX_BASE_ADDR, 0, id) == 0) + printf("Sent on queue 0: %lu\n", id); + + return 0; +} + +/* processor operations from r5 to a53. It defines + * notification operation and remote processor managementi operations. */ +const struct remoteproc_ops am64_r5_a53_proc_ops = { + .init = am64_r5_a53_proc_init, + .remove = am64_r5_a53_proc_remove, + .mmap = am64_r5_a53_proc_mmap, + .notify = am64_r5_a53_proc_notify, + .start = NULL, + .stop = NULL, + .shutdown = NULL, +}; + +/* RPMsg virtio shared buffer pool */ +static struct rpmsg_virtio_shm_pool shpool; + +static struct remoteproc * +platform_create_proc(int proc_index, int rsc_index) +{ + void *rsc_table; + int rsc_size; + int ret; + metal_phys_addr_t pa; + + (void) proc_index; + rsc_table = get_resource_table(rsc_index, &rsc_size); + + /* Register IPI device */ + if (metal_register_generic_device(&mailbox_device)) + return NULL; + + /* Initialize remoteproc instance */ + if (!remoteproc_init(&rproc_inst, &am64_r5_a53_proc_ops, &rproc_priv)) + return NULL; + + /* + * Mmap shared memories + * Or shall we constraint that they will be set as carved out + * in the resource table? + */ + /* mmap resource table */ + pa = (metal_phys_addr_t)rsc_table; + (void *)remoteproc_mmap(&rproc_inst, &pa, + NULL, rsc_size, + NORM_NSHARED_NCACHE|PRIV_RW_USER_RW, + &rproc_inst.rsc_io); + /* mmap shared memory */ + pa = SHARED_MEM_PA; + (void *)remoteproc_mmap(&rproc_inst, &pa, + NULL, SHARED_MEM_SIZE, + NORM_NSHARED_NCACHE|PRIV_RW_USER_RW, + NULL); + + /* parse resource table to remoteproc */ + ret = remoteproc_set_rsc_table(&rproc_inst, rsc_table, rsc_size); + if (ret) { + printf("Failed to initialize remoteproc (%d)\n", ret); + remoteproc_remove(&rproc_inst); + return NULL; + } + printf("Initialize remoteproc successfully.\r\n"); + + return &rproc_inst; +} + +int platform_init(int argc, char *argv[], void **platform) +{ + unsigned long proc_id = 0; + unsigned long rsc_id = 0; + struct remoteproc *rproc; + + if (!platform) { + printf("Failed to initialize platform," + "NULL pointer to store platform data.\r\n"); + return -EINVAL; + } + /* Initialize HW system components */ + + /* Low level abstraction layer for openamp initialization */ + struct metal_init_params init_param = METAL_INIT_DEFAULTS; + metal_init(&init_param); + + + if (argc >= 2) { + proc_id = strtoul(argv[1], NULL, 0); + } + + if (argc >= 3) { + rsc_id = strtoul(argv[2], NULL, 0); + } + + rproc = platform_create_proc(proc_id, rsc_id); + if (!rproc) { + printf("Failed to create remoteproc device.\r\n"); + return -EINVAL; + } + *platform = rproc; + return 0; +} + +struct rpmsg_device * +platform_create_rpmsg_vdev(void *platform, unsigned int vdev_index, + unsigned int role, + void (*rst_cb)(struct virtio_device *vdev), + rpmsg_ns_bind_cb ns_bind_cb) +{ + struct remoteproc *rproc = platform; + struct rpmsg_virtio_device *rpmsg_vdev; + struct virtio_device *vdev; + void *shbuf; + struct metal_io_region *shbuf_io; + int ret; + + rpmsg_vdev = metal_allocate_memory(sizeof(*rpmsg_vdev)); + if (!rpmsg_vdev) + return NULL; + shbuf_io = remoteproc_get_io_with_pa(rproc, SHARED_MEM_PA); + if (!shbuf_io) + goto err1; + shbuf = metal_io_phys_to_virt(shbuf_io, + SHARED_MEM_PA + SHARED_BUF_OFFSET); + + printf("creating remoteproc virtio\r\n"); + /* TODO: can we have a wrapper for the following two functions? */ + vdev = remoteproc_create_virtio(rproc, vdev_index, role, rst_cb); + if (!vdev) { + printf("failed remoteproc_create_virtio\r\n"); + goto err1; + } + + printf("initializing rpmsg shared buffer pool\r\n"); + /* Only RPMsg virtio driver needs to initialize the shared buffers pool */ + rpmsg_virtio_init_shm_pool(&shpool, shbuf, + (SHARED_MEM_SIZE - SHARED_BUF_OFFSET)); + + printf("initializing rpmsg vdev\r\n"); + /* RPMsg virtio device can set shared buffers pool argument to NULL */ + ret = rpmsg_init_vdev(rpmsg_vdev, vdev, ns_bind_cb, + shbuf_io, + &shpool); + if (ret) { + printf("failed rpmsg_init_vdev\r\n"); + goto err2; + } + printf("initializing rpmsg vdev\r\n"); + return rpmsg_virtio_get_rpmsg_device(rpmsg_vdev); +err2: + remoteproc_remove_virtio(rproc, vdev); +err1: + metal_free_memory(rpmsg_vdev); + return NULL; +} + +int platform_poll(void *priv) +{ + struct remoteproc *rproc = priv; + struct remoteproc_priv *prproc; + uintptr_t oldIntState; + unsigned int flags; + uint32_t msg; + int ret; + + prproc = rproc->priv; + + while(1) { + #ifdef RPMSG_NO_IPI + if (MailboxGetMessage(MAILBOX_BASE_ADDR, 1, &msg) == MESSAGE_VALID) { + ret = remoteproc_get_notification(rproc, msg); + if (ret) + return ret; + break; + } + #else /* interrupts enabled */ + oldIntState = HwiP_disable(); + flags = metal_irq_save_disable(); + if (!(atomic_flag_test_and_set(&prproc->ipi_nokick))) { + metal_irq_restore_enable(flags); + + // find which virtqueue has a message by checking the virtqueue_id bitmask + for (uint32_t i = 0; i < 32; i++) { + if ((virtqueue_id & (1 << i)) > 0) { + virtqueue_id -= virtqueue_id >> i; + ret = remoteproc_get_notification(rproc, i); + + if (ret) + return ret; + break; + } + } + } + _rproc_wait(); + metal_irq_restore_enable(flags); + HwiP_restore(oldIntState); + #endif + } + return 0; +} + +void platform_release_rpmsg_vdev(struct rpmsg_device *rpdev, void *platform) +{ + struct rpmsg_virtio_device *rpvdev; + struct remoteproc *rproc; + + rpvdev = metal_container_of(rpdev, struct rpmsg_virtio_device, rdev); + rproc = platform; + + rpmsg_deinit_vdev(rpvdev); + remoteproc_remove_virtio(rproc, rpvdev->vdev); +} + +void platform_cleanup(void *platform) +{ + struct remoteproc *rproc = platform; + + if (rproc) + remoteproc_remove(rproc); +// cleanup_system(); + + metal_finish(); +} diff --git a/apps/machine/ti_k3_r5/platform_info.h b/apps/machine/ti_k3_r5/platform_info.h new file mode 100644 index 000000000..e737dd66d --- /dev/null +++ b/apps/machine/ti_k3_r5/platform_info.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2014, Mentor Graphics Corporation + * All rights reserved. + * Copyright (c) 2017 Xilinx, Inc. + * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/ + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLATFORM_INFO_H_ +#define PLATFORM_INFO_H_ + +#include +#include +#include + +#if defined __cplusplus +extern "C" { +#endif + +/* Cortex R5 memory attributes */ +#define DEVICE_SHARED 0x00000001U /* device, shareable */ +#define DEVICE_NONSHARED 0x00000010U /* device, non shareable */ +#define NORM_NSHARED_NCACHE 0x00000008U /* Non cacheable non shareable */ +#define NORM_SHARED_NCACHE 0x0000000CU /* Non cacheable shareable */ +#define PRIV_RW_USER_RW (0x00000003U<<8U) /* Full Access */ +#define DDR_BASE_ADDR 0x80000000u + +#define AM64_R5FSS1_0_MAILBOX 0x29040000U +#define RPMSG_BASE_ADDR 0xA2000000u +#define RSC_TABLE_BASE_ADDR 0xA2100000u + +#define MAILBOX_BASE_ADDR AM64_R5FSS1_0_MAILBOX +#define MAILBOX_NEW_MSG_INT 98 + +#ifndef RPMSG_NO_IPI +#define INT_BASE_ADDR 0x2FFF0000u +#endif + +#define KICK_DEV_NAME "mailbox" +#define KICK_BUS_NAME "generic" + +#ifndef SHARED_MEM_PA +#define SHARED_MEM_PA RPMSG_BASE_ADDR +#endif /* !SHARED_MEM_PA */ + +#ifndef SHARED_MEM_SIZE +#define SHARED_MEM_SIZE 0x100000UL +#endif /* !SHARED_MEM_SIZE */ + +#ifndef SHARED_BUF_OFFSET +#define SHARED_BUF_OFFSET 0x8000UL +#endif /* !SHARED_BUF_OFFSET */ + +#ifndef RPMSG_NO_IPI +#define _rproc_wait() asm volatile("wfi") +#endif /* !RPMSG_NO_IPI */ + +extern uint32_t virtqueue_id; + +struct remoteproc_priv { + const char *kick_dev_name; + const char *kick_dev_bus_name; + struct metal_device *kick_dev; + struct metal_io_region *kick_io; + + #ifndef RPMSG_NO_IPI + atomic_int ipi_nokick; + #endif +}; + +extern uint32_t virtqueue_id; + +/** + * platform_init - initialize the platform + * + * It will initialize the platform. + * + * @argc: number of arguments + * @argv: array of the input arguments + * @platform: pointer to store the platform data pointer + * + * return 0 for success or negative value for failure + */ +int platform_init(int argc, char *argv[], void **platform); + +/** + * platform_create_rpmsg_vdev - create rpmsg vdev + * + * It will create rpmsg virtio device, and returns the rpmsg virtio + * device pointer. + * + * @platform: pointer to the private data + * @vdev_index: index of the virtio device, there can more than one vdev + * on the platform. + * @role: virtio driver or virtio device of the vdev + * @rst_cb: virtio device reset callback + * @ns_bind_cb: rpmsg name service bind callback + * + * return pointer to the rpmsg virtio device + */ +struct rpmsg_device * +platform_create_rpmsg_vdev(void *platform, unsigned int vdev_index, + unsigned int role, + void (*rst_cb)(struct virtio_device *vdev), + rpmsg_ns_bind_cb ns_bind_cb); + +/** + * platform_poll - platform poll function + * + * @platform: pointer to the platform + * + * return negative value for errors, otherwise 0. + */ +int platform_poll(void *platform); + +/** + * platform_release_rpmsg_vdev - release rpmsg virtio device + * + * @rpdev: pointer to the rpmsg device + */ +void platform_release_rpmsg_vdev(struct rpmsg_device *rpdev, void *platform); + +/** + * platform_cleanup - clean up the platform resource + * + * @platform: pointer to the platform + */ +void platform_cleanup(void *platform); + +#if defined __cplusplus +} +#endif + +#endif /* PLATFORM_INFO_H_ */ diff --git a/apps/machine/ti_k3_r5/r5/CacheP_armv7r.c b/apps/machine/ti_k3_r5/r5/CacheP_armv7r.c new file mode 100755 index 000000000..002c7c012 --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/CacheP_armv7r.c @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2018-2021 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#define CACHE_SECTION __attribute__((section(".text.cache"))) + +/* APIs defined in CacheP_armv7r_asm.S */ +uint32_t CacheP_getCacheLevelInfo(uint32_t level); +uint32_t CacheP_getEnabled(); +void CacheP_configForceWrThru(uint32_t enable); +void CacheP_disableL1d(); +void CacheP_disableL1p(); +void CacheP_enableL1d(); +void CacheP_enableL1p(); +void CacheP_invL1p(uint32_t blockPtr, uint32_t byteCnt); +void CacheP_invL1d(uint32_t blockPtr, uint32_t byteCnt); +void CacheP_setDLFO(); + +uint32_t gCacheL1dCacheLineSize = 32; +uint32_t gCacheL1pCacheLineSize = 32; + +/* these are defined as part of SysConfig */ +extern CacheP_Config gCacheConfig; + +void CACHE_SECTION CacheP_init() +{ + uint32_t info, enabled; + + /* Read L1D cache info registers */ + info = CacheP_getCacheLevelInfo(0); + gCacheL1dCacheLineSize = 4 << ((info & 0x7) + 2); + + /* Read L1P cache info registers for ROV */ + info = CacheP_getCacheLevelInfo(1); + gCacheL1pCacheLineSize = 4 << ((info & 0x7) + 2); + + enabled = CacheP_getEnabled(); + + /* disable the caches if anything is currently enabled */ + if (enabled) { + CacheP_disable(CacheP_TYPE_ALL); + } + + /* set DLFO, this is not needed on SOC AM64x and later SOCs */ + /* CacheP_setDLFO(); */ + + if (gCacheConfig.enable) { + CacheP_configForceWrThru(gCacheConfig.enableForceWrThru); + + /* + * CacheP_enable() code will invalidate the L1D and L1P caches. + * Therefore, no need to explicitly invalidate the cache here. + */ + CacheP_enable(CacheP_TYPE_ALL); + } +} + +void CACHE_SECTION CacheP_disable(uint32_t type) +{ + uint32_t enabled; + uintptr_t key; + + /* only disable caches that are currently enabled */ + enabled = CacheP_getEnabled(); + + if (enabled & (type & CacheP_TYPE_L1D)) { + key = HwiP_disable(); + CacheP_disableL1d(); /* Disable L1D Cache */ + HwiP_restore(key); + } + if (enabled & (type & CacheP_TYPE_L1P)) { + key = HwiP_disable(); + CacheP_disableL1p(); /* Disable L1P Cache */ + HwiP_restore(key); + } + +} + +void CACHE_SECTION CacheP_enable(uint32_t type) +{ + uint32_t disabled; + + /* only enable caches that are currently disabled */ + disabled = ~(CacheP_getEnabled()); + + if (disabled & (type & CacheP_TYPE_L1D)) { + CacheP_enableL1d(); /* Enable L1D Cache */ + } + if (disabled & (type & CacheP_TYPE_L1P)) { + CacheP_enableL1p(); /* Enable L1P Cache */ + } +} + +void CACHE_SECTION CacheP_inv(void *blockPtr, uint32_t byteCnt, uint32_t type) +{ + if (type & CacheP_TYPE_L1P) { + CacheP_invL1p((uint32_t)blockPtr, byteCnt); + } + if (type & CacheP_TYPE_L1D) { + CacheP_invL1d((uint32_t)blockPtr, byteCnt); + } +} diff --git a/apps/machine/ti_k3_r5/r5/CacheP_armv7r_asm.S b/apps/machine/ti_k3_r5/r5/CacheP_armv7r_asm.S new file mode 100755 index 000000000..0e0d80aaf --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/CacheP_armv7r_asm.S @@ -0,0 +1,480 @@ +/* + * Copyright (C) 2018-2021 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + .text + +/* FUNCTION DEF: void CacheP_disableL1d(void) */ + .global CacheP_disableL1d + .type CacheP_disableL1d,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_disableL1d: + push {r0-r7, r9-r11, lr} + mrc p15, #0, r0, c1, c0, #0 // read SCR register + bic r0, r0, #0x0004 // clear C bit + dsb + mcr p15, #0, r0, c1, c0, #0 // L1D cache disabled + // clean entire L1D cache + movw r0, :lower16:CacheP_wbInvAllAsm + movt r0, :upper16:CacheP_wbInvAllAsm + blx r0 + pop {r0-r7, r9-r11, lr} + bx lr + +/* FUNCTION DEF: void CacheP_disableL1p(void) */ + .global CacheP_disableL1p + .type CacheP_disableL1p,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_disableL1p: + mrc p15, #0, r0, c1, c0, #0 // read SCR register + bic r0, r0, #0x1000 // clear I bit + mcr p15, #0, r0, c1, c0, #0 // L1P cache disabled + mcr p15, #0, r1, c7, c5, #0 // Invalidate entire instruction cache + isb + bx lr + +/* FUNCTION DEF: void CacheP_enableL1d(void) */ + .global CacheP_enableL1d + .type CacheP_enableL1d,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_enableL1d: + mrc p15, #0, r0, c1, c0, #0 // read SCR register + orr r0, r0, #0x0004 // set C bit (bit 2) to 1 + dsb + mcr p15, #0, r1, c15, c5, #0 // Invalidate entire data cache + mcr p15, #0, r0, c1, c0, #0 // L1D cache enabled + bx lr + +/* FUNCTION DEF: void CacheP_enableL1p(void) */ + .global CacheP_enableL1p + .type CacheP_enableL1p,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_enableL1p: + mrc p15, #0, r0, c1, c0, #0 // read SCR register + orr r0, r0, #0x1000 // set I bit (bit 12) to 1 + mcr p15, #0, r1, c7, c5, #0 // Invalidate entire instruction cache + mcr p15, #0, r0, c1, c0, #0 // ICache enabled + isb + bx lr + +/* FUNCTION DEF: void CacheP_invL1d(uint32_t blockPtr, uint32_t byteCnt) + * + * r0 - contains blockPtr + * r1 - contains byteCnt + */ + .global CacheP_invL1d + .type CacheP_invL1d,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_invL1d: + push {r4} + add r1, r0, r1 // calculate last address + ldr r3, l1dCacheLineSizeInvL1dAddr + ldr r3, [r3] + sub r4, r3, #1 + bic r0, r0, r4 // align blockPtr to cache line +invL1dCache_loop: + mcr p15, #0, r0, c7, c6, #1 // invalidate single entry in L1D cache + add r0, r0, r3 // increment address by cache line size + cmp r0, r1 // compare to last address + blo invL1dCache_loop // loop if > 0 + dsb // drain write buffer + pop {r4} + bx lr // return + +l1dCacheLineSizeInvL1dAddr: .long gCacheL1dCacheLineSize + + +/* FUNCTION DEF: void CacheP_invL1p(uint32_t blockPtr, uint32_t byteCnt) + * + * r0 - contains blockPtr + * r1 - contains byteCnt + */ + .global CacheP_invL1p + .type CacheP_invL1p,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_invL1p: + push {r4} + add r1, r0, r1 // calculate last address + ldr r3, l1pCacheLineSizeAddr + ldr r3, [r3] + sub r4, r3, #1 + bic r0, r0, r4 // align blockPtr to cache line +invL1pCache_loop: + mcr p15, #0, r0, c7, c5, #1 // invalidate single entry in ICache + add r0, r0, r3 // increment address by cache line size + cmp r0, r1 // compare to last address + blo invL1pCache_loop // loop if > 0 + dsb // drain write buffer + isb // flush instruction pipeline + pop {r4} + bx lr + +l1pCacheLineSizeAddr: .word gCacheL1pCacheLineSize + +/* FUNCTION DEF: void CacheP_invL1dAll() + * + * Invalidates all in data cache. Note: This is risky since data cache may + * contain some stack variable or valid data that should not be invalidated. + * Only use this function if you know for sure the data cache contains unwanted + * information. + */ + .global CacheP_invL1dAll + .type CacheP_invL1dAll,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_invL1dAll: + mcr p15, #0, r0, c15, c5, #0 // Invalidate entire data cache + bx lr // return + + +/* FUNCTION DEF: void CacheP_invL1pAll() */ + .global CacheP_invL1pAll + .type CacheP_invL1pAll,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_invL1pAll: + mcr p15, #0, r0, c7, c5, #0 // invalidate all entries in ICache + bx lr // return + + +/* FUNCTION DEF: void CacheP_wb(void *addr, uint32_t size, uint32_t type) + * Writes back the range of MVA in data cache. First, wait on any previous cache + * operation. + * + * r0 - contains blockPtr + * r1 - contains byteCnt + * r2 - contains bit mask of cache type (unused) + */ + .global CacheP_wb + .type CacheP_wb,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_wb: + push {r4, r5} + dmb // Ensure all previous memory accesses + // complete + add r1, r0, r1 // calculate last address + ldr r4, l1dCacheLineSizeWbAddr + ldr r4, [r4] + sub r5, r4, #1 + bic r0, r0, r5 // align address to cache line +writeback: + mcr p15, #0, r0, c7, c10, #1 // write back a cache line + add r0, r0, r4 // increment address by cache line size + cmp r0, r1 // compare to last address + blo writeback // loop if count > 0 + dsb // drain write buffer + pop {r4, r5} + bx lr + +l1dCacheLineSizeWbAddr: .long gCacheL1dCacheLineSize + + +/* FUNCTION DEF: void CacheP_wbInv(void *addr, uint32_t size, uint32_t type) + * + * Writes back and invalidates the range of MVA in data cache. + * First, wait on any previous cache operation. + * + * r0 - contains blockPtr + * r1 - contains byteCnt + * r2 - contains bitmask of cache type (unused) + */ + .global CacheP_wbInv + .type CacheP_wbInv,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_wbInv: + push {r4, r5} + dmb // Ensure all previous memory accesses + // complete + add r1, r0, r1 // calculate last address + ldr r4, l1dCacheLineSizeWbInvAddr + ldr r4, [r4] + sub r5, r4, #1 + bic r0, r0, r5 // align blockPtr to cache line +writebackInv: + mcr p15, #0, r0, c7, c14, #1 // writeback inv a cache line + add r0, r0, r4 // increment address by cache line size + cmp r0, r1 // compare to last address + blo writebackInv // loop if count > 0 + dsb // drain write buffer + pop {r4, r5} + bx lr + +l1dCacheLineSizeWbInvAddr: .long gCacheL1dCacheLineSize + + +/* FUNCTION DEF: void CacheP_wbAll() + * + * Write back all of L1 data cache + */ + .global CacheP_wbAll + .type CacheP_wbAll,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_wbAll: + stmfd sp!, {r0-r7, r9-r11, lr} + dmb // Ensure all previous memory accesses + // complete + mrc p15, #1, r0, c0, c0, #1 // read clidr + ands r3, r0, #0x7000000 // extract loc from clidr + mov r3, r3, lsr #23 // left align loc bit field + beq wbafinished // if loc is 0, then no need to clean + + mov r10, #0 // start clean at cache level 0 + +wbaloop1: + add r2, r10, r10, lsr #1 // work out 3x current cache level + mov r1, r0, lsr r2 // extract cache type bits from clidr + and r1, r1, #7 // mask of bits for current cache only + cmp r1, #2 // see what cache we have at this level + blt wbaskip // skip if no cache, or just i-cache + + mrs r6, cpsr + cpsid i // disable interrupts + mcr p15, #2, r10, c0, c0, #0// select current cache level in cssr + isb // flush prefetch buffer + mrc p15, #1, r1, c0, c0, #0 // read the new csidr + msr cpsr_c, r6 // restore interrupts + + and r2, r1, #7 // extract the length of the cache lines + add r2, r2, #4 // add 4 (line length offset) + mov r4, #0x3ff + ands r4, r4, r1, lsr #3 // find maximum number on the way size + clz r5, r4 // find bit position of way size inc. + mov r7, #0x7fff + ands r7, r7, r1, lsr #13 // extract max number of the index size +wbaloop2: + mov r9, r4 // create working copy of max way size +wbaloop3: + orr r11, r10, r9, lsl r5 // factor way and cache number into r11 + orr r11, r11, r7, lsl r2 // factor index number into r11 + mcr p15, #0, r11, c7, c10, #2 // clean line by set/way + subs r9, r9, #1 // decrement the way + bge wbaloop3 + subs r7, r7, #1 // decrement the index + bge wbaloop2 +wbaskip: + add r10, r10, #2 // increment cache number + cmp r3, r10 + bgt wbaloop1 + +wbafinished: + mov r10, #0 // switch back to cache level 0 + mcr p15, #2, r10, c0, c0, #0// select current cache level in cssr + dsb + isb // flush prefetch buffer + ldmfd sp!, {r0-r7, r9-r11, lr} + bx lr + + + +/* FUNCTION DEF: void CacheP_wbInvAll() + * + * Write back and invalidate entire data cache + */ + .global CacheP_wbInvAll + .type CacheP_wbInvAll,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_wbInvAll: + push {r0-r7, r9-r11, lr} + movw r0, :lower16:CacheP_wbInvAllAsm + movt r0, :upper16:CacheP_wbInvAllAsm + blx r0 + pop {r0-r7, r9-r11, lr} + bx lr + +/* FUNCTION DEF: void CacheP_wbInvAllAsm() + * + * Write back and invalidate entire data cache + */ + .global CacheP_wbInvAllAsm + .type CacheP_wbInvAllAsm,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_wbInvAllAsm: + dmb // Ensure all previous memory accesses + // complete + mrc p15, #1, r0, c0, c0, #1 // read clidr + ands r3, r0, #0x7000000 // extract loc from clidr + mov r3, r3, lsr #23 // left align loc bit field + beq finished // if loc is 0, then no need to clean + + mov r10, #0 // start clean at cache level 0 + +loop1: + add r2, r10, r10, lsr #1 // work out 3x current cache level + mov r1, r0, lsr r2 // extract cache type bits from clidr + and r1, r1, #7 // mask of bits for current cache only + cmp r1, #2 // see what cache we have at this level + blt skip // skip if no cache, or just i-cache + + mrs r6, cpsr + cpsid i // disable interrupts + mcr p15, #2, r10, c0, c0, #0// select current cache level in cssr + isb // flush prefetch buffer + mrc p15, #1, r1, c0, c0, #0 // read the new csidr + msr cpsr_c, r6 // restore interrupts + + and r2, r1, #7 // extract the length of the cache lines + add r2, r2, #4 // add 4 (line length offset) + mov r4, #0x3ff + ands r4, r4, r1, lsr #3 // find maximum number on the way size + clz r5, r4 // find bit position of way size inc. + mov r7, #0x7fff + ands r7, r7, r1, lsr #13 // extract max number of the index size +loop2: + mov r9, r4 // create working copy of max way size +loop3: + orr r11, r10, r9, lsl r5 // factor way and cache number into r11 + orr r11, r11, r7, lsl r2 // factor index number into r11 + mcr p15, #0, r11, c7, c14, #2 // clean & invalidate by set/way + subs r9, r9, #1 // decrement the way + bge loop3 + subs r7, r7, #1 // decrement the index + bge loop2 +skip: + add r10, r10, #2 // increment cache number + cmp r3, r10 + bgt loop1 +finished: + mov r10, #0 // swith back to cache level 0 + mcr p15, #2, r10, c0, c0, #0// select current cache level in cssr + dsb + isb // flush prefetch buffer + bx lr + + +/* FUNCTION DEF: uint32_t CacheP_getEnabled() + * + * Determine the mask of enabled caches + */ + .global CacheP_getEnabled + .type CacheP_getEnabled,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_getEnabled: + mov r0, #0 + // Do L1 first + mrc p15, #0, r1, c1, c0, #0 // fetch Control Register into r1 + + tst r1, #0x1000 // test I bit (bit 12) for L1P + addne r0, r0, #1 // if I is true, L1P is enabled + + tst r1, #0x0004 // test C bit (bit 2) for L1D + addne r0, r0, #2 // if C bit is true, L1D is enabled + + // Do L2 next + mrc p15, #0, r1, c1, c0, #1 // fetch Auxiliary Ctrl Register into r1 + + tst r1, #0x0002 // test L2EN bit (bit 1) for L2EN + beq getEnabledDone + + tst r0, #0x0001 + addne r0, r0, #4 // If L2EN and L1P then L2P + + tst r0, #0x0002 + addne r0, r0, #8 // If L2EN and L1D then L2D + +getEnabledDone: + bx lr + + +/* FUNCTION DEF: uint32_t CacheP_getCacheLevelInfo(uint32_t level) */ + .global CacheP_getCacheLevelInfo + .type CacheP_getCacheLevelInfo,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_getCacheLevelInfo: + mcr p15, #2, r0, c0, c0, #0 // write to Cache Size Selection Reg + mrc p15, #1, r0, c0, c0, #0 // read Cache Size Id Reg + bx lr + + +/* FUNCTION DEF: void CacheP_configForceWrThru(uint32_t enable) */ + .global CacheP_configForceWrThru + .type CacheP_configForceWrThru,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_configForceWrThru: + mrc p15, #0, r1, c1, c0, #1 // fetch Auxiliary Ctrl Register into r1 + cmp r0, #0 + beq FWT_disable + orr r1, r1, #(1 << 9) // set (enable) force write-thru bit + b FWT_exit + +FWT_disable: + bic r1, r1, #(1 << 9) // clear (disable) force write-thru bit + +FWT_exit: + mcr p15, #0, r1, c1, c0, #1 // write Auxiliary Ctrl Register + + bx lr + + +/* FUNCTION DEF: void CacheP_setDLFO() */ + .global CacheP_setDLFO + .type CacheP_setDLFO,%function + .section ".text.cache","ax",%progbits + .arm + .align 2 +CacheP_setDLFO: + mrc p15, #0, r1, c1, c0, #1 // fetch Auxiliary Ctrl Register into r1 + orr r1, r1, #(1 << 13) // set DLFO to disable LF optimization + mcr p15, #0, r1, c1, c0, #1 // write Auxiliary Ctrl Register + + bx lr + + .end + diff --git a/apps/machine/ti_k3_r5/r5/HwiP_armv7r_asm.S b/apps/machine/ti_k3_r5/r5/HwiP_armv7r_asm.S new file mode 100755 index 000000000..390253382 --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/HwiP_armv7r_asm.S @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2018-2021 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + + .text + +/* FUNCTION DEF: uint32_t HwiP_disable(void) */ + .global HwiP_disable + .type HwiP_disable,%function + .section ".text.hwi","ax",%progbits + .arm + .align 2 +HwiP_disable: + mrs r0, cpsr + orr r12, r0, #0x80 + msr cpsr_cf, r12 + bx LR + +/* FUNCTION DEF: uint32_t HwiP_disableFIQ(void) */ + .global HwiP_disableFIQ + .type HwiP_disableFIQ,%function + .section ".text.hwi","ax",%progbits + .arm + .align 2 +HwiP_disableFIQ: + mrs r0, cpsr + orr r12, r0, #0x40 + msr cpsr_cf, r12 + bx LR + +/* FUNCTION DEF: void HwiP_enable() */ + .global HwiP_enable + .type HwiP_enable,%function + .section ".text.hwi","ax",%progbits + .arm + .align 2 +HwiP_enable: + mrs r0, cpsr + bic r12, r0, #0x80 + msr cpsr_cf, r12 + bx LR + +/* FUNCTION DEF: void HwiP_enableFIQ() */ + .global HwiP_enableFIQ + .type HwiP_enableFIQ,%function + .section ".text.hwi","ax",%progbits + .arm + .align 2 +HwiP_enableFIQ: + mrs r0, cpsr + bic r12, r0, #0x40 + msr cpsr_cf, r12 + bx LR + +/* FUNCTION DEF: void HwiP_restore(uint32_t oldIntState) */ + .global HwiP_restore + .type HwiP_restore,%function + .section ".text.hwi","ax",%progbits + .arm + .align 2 +HwiP_restore: + msr cpsr_cf, r0 + bx LR + +/* FUNCTION DEF: void HwiP_enableVIC() */ + .global HwiP_enableVIC + .type HwiP_enableVIC,%function + .section ".text.hwi","ax",%progbits + .arm + .align 2 +HwiP_enableVIC: + MRC p15, #0, r0, c1, c0, #0 /* Read SCTLR */ + ORR r0, r0, #(1<<24) /* Set VE mask (VIC controller provides handler address for IRQ) */ + MCR p15, #0, r0, c1, c0, #0 /* Write modified SCTLR */ + bx LR + +/* FUNCTION DEF: void HwiP_disableVIC() */ + .global HwiP_disableVIC + .type HwiP_disableVIC,%function + .section ".text.hwi","ax",%progbits + .arm + .align 2 +HwiP_disableVIC: + MRC p15, #0, r0, c1, c0, #0 /* Read SCTLR */ + BIC r0, r0, #(1<<24) /* Clear VE mask (VIC controller provides handler address for IRQ) */ + MCR p15, #0, r0, c1, c0, #0 /* Write modified SCTLR */ + bx LR + +/* FUNCTION DEF: void HwiP_getCPSR() */ + .global HwiP_getCPSR + .type HwiP_getCPSR,%function + .section ".text.hwi","ax",%progbits + .arm + .align 2 +HwiP_getCPSR: + mrs r0, cpsr + bx LR + + .end + diff --git a/apps/machine/ti_k3_r5/r5/HwiP_armv7r_handlers_nortos.c b/apps/machine/ti_k3_r5/r5/HwiP_armv7r_handlers_nortos.c new file mode 100755 index 000000000..a5aa190c4 --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/HwiP_armv7r_handlers_nortos.c @@ -0,0 +1,197 @@ +/* + * Copyright (C) 2018-2023 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include + +void __attribute__((interrupt("SWI"), section(".text.hwi"))) HwiP_svc_handler(void); + +/* compile flag to enable or disable interrupt nesting */ +#define HWIP_NESTED_INTERRUPTS_IRQ_ENABLE + +/* IRQ handler starts execution in HwiP_irq_handler, defined in HwiP_armv7r_handlers_nortos_asm.S + * After some initial assembly logic it then branches to this function. + * After exiting this function it does some more assembly before exiting + */ +void __attribute__((section(".text.hwi"))) HwiP_irq_handler_c(void) +{ + int32_t status; + uint32_t intNum; + + #ifndef HWIP_VIM_VIC_ENABLE + volatile uint32_t dummy; + + /* Read to force prioritization logic to take effect, in non-VIC mode */ + dummy = HwiP_getIRQVecAddr(); + #endif + + status = HwiP_getIRQ(&intNum); + if(status==SystemP_SUCCESS) + { + uint32_t isPulse = HwiP_isPulse(intNum); + HwiP_FxnCallback isr; + void *args; + + if(isPulse!=0U) + { + HwiP_clearInt(intNum); + } + + isr = gHwiCtrl.isr[intNum]; + args = gHwiCtrl.isrArgs[intNum]; + + #ifdef HWIP_NESTED_INTERRUPTS_IRQ_ENABLE + /* allow nesting of interrupts */ + HwiP_enable(); + #endif + + if(isr!=NULL) + { + isr(args); + } + + /* disallow nesting of interrupts */ + (void) HwiP_disable(); + + if(isPulse==0U) + { + HwiP_clearInt(intNum); + } + HwiP_ackIRQ(intNum); + } + else + { + /* spurious interrupt */ + gHwiCtrl.spuriousIRQCount++; + HwiP_ackIRQ(0); + } +} + +void __attribute__((interrupt("FIQ"), section(".text.hwi"))) HwiP_fiq_handler(void) +{ + int32_t status; + uint32_t intNum; + volatile uint32_t dummy; + + /* Read to force prioritization logic to take effect */ + dummy = HwiP_getFIQVecAddr(); + (void)dummy; + + status = HwiP_getFIQ(&intNum); + if(status==SystemP_SUCCESS) + { + uint32_t isPulse = HwiP_isPulse(intNum); + HwiP_FxnCallback isr; + void *args; + + if(isPulse!=0U) + { + HwiP_clearInt(intNum); + } + + isr = gHwiCtrl.isr[intNum]; + args = gHwiCtrl.isrArgs[intNum]; + + #if 0 /* FIQ interrupt nesting not supported */ + /* allow nesting of interrupts */ + HwiP_enableFIQ(); + #endif + + if(isr!=NULL) + { + isr(args); + } + + /* disallow nesting of interrupts */ + (void) HwiP_disableFIQ(); + + if(isPulse==0U) + { + HwiP_clearInt(intNum); + } + HwiP_ackFIQ(intNum); + } + else + { + /* spurious interrupt */ + gHwiCtrl.spuriousFIQCount++; + HwiP_ackFIQ(0); + } +} + +void __attribute__((interrupt("UNDEF"), section(".text.hwi"))) HwiP_reserved_handler(void) +{ + volatile uint32_t loop = 1; + while(loop!=0U) + { + ; + } +} + +void __attribute__((interrupt("UNDEF"), section(".text.hwi"))) HwiP_undefined_handler(void) +{ + volatile uint32_t loop = 1; + while(loop!=0U) + { + ; + } +} + +void __attribute__((interrupt("SWI"), section(".text.hwi"))) HwiP_svc_handler(void) +{ + volatile uint32_t loop = 1; + while(loop!=0U) + { + ; + } + +} + +void __attribute__((interrupt("ABORT"), section(".text.hwi"))) HwiP_prefetch_abort_handler(void) +{ + volatile uint32_t loop = 1; + while(loop!=0U) + { + ; + } +} + +void __attribute__((interrupt("ABORT"), section(".text.hwi"),weak)) HwiP_data_abort_handler_c(void) +{ + volatile uint32_t loop = 1; + while(loop!=0U) + { + ; + } +} diff --git a/apps/machine/ti_k3_r5/r5/HwiP_armv7r_handlers_nortos_asm.S b/apps/machine/ti_k3_r5/r5/HwiP_armv7r_handlers_nortos_asm.S new file mode 100644 index 000000000..516bf4377 --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/HwiP_armv7r_handlers_nortos_asm.S @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2018-2021 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* make this 0 to disable FPU context save/restore */ +#define ENABLE_FPU_SAVE_RESTORE (0) + + .text + .arm + .align 2 + + .set SVC_MODE, 0x13 + .set IRQ_MODE, 0x12 + +/* FUNCTION DEF: void HwiP_irq_handler(void) */ + .global HwiP_irq_handler + .type HwiP_irq_handler,%function + .section ".text.hwi","ax",%progbits + .arm + .align 2 +HwiP_irq_handler: + /* Return to the interrupted instruction. */ + SUB lr, lr, #4 + + /* Push the return address and SPSR. */ + PUSH {lr} + MRS lr, SPSR + PUSH {lr} + + /* Change to supervisor mode to allow reentry. */ + CPS #SVC_MODE + + /* Push used registers. */ + PUSH {r0-r4, r12} + + #if ENABLE_FPU_SAVE_RESTORE + FMRX R0, FPSCR + VPUSH {D0-D15} + /* VPUSH {D16-D31} */ + PUSH {R0} + #endif + + /* Ensure bit 2 of the stack pointer is clear. r2 holds the bit 2 value for + * future use. _RB_ Is this ever needed provided the start of the stack is + * alligned on an 8-byte boundary? + */ + MOV r2, sp + AND r2, r2, #4 + SUB sp, sp, r2 + + /* Call the interrupt handler. */ + PUSH {r0-r4, lr} + LDR r1, HwiP_irq_handler_const + BLX r1 + POP {r0-r4, lr} + ADD sp, sp, r2 + + /* disable IRQ */ + CPSID i + DSB + ISB + + #if ENABLE_FPU_SAVE_RESTORE + POP {R0} + /* VPOP {D16-D31} */ + VPOP {D0-D15} + VMSR FPSCR, R0 + #endif + + POP {r0-r4, r12} + CPS #IRQ_MODE + POP {LR} + MSR SPSR_cxsf, LR + POP {LR} + MOVS PC, LR + +/* FUNCTION DEF: void HwiP_data_abort_handler(void) */ + .global HwiP_data_abort_handler + .type HwiP_data_abort_handler,%function + .section ".text.hwi","ax",%progbits + .arm + .align 2 +HwiP_data_abort_handler: + /* Return to the instruction following the interrupted. + * The instruction that caused the data abort is at R14_ABT – 8 which means + * that the pointer points two instructions beyond the instruction that caused + * the abort. (pc - 4) in arm mode and (pc - 6) in thumb mode (2 bytes aligned + * instruction set) points to the second instruction beyond the address where + * the exception was generated. + */ + SUB lr, lr, #6 + + /* Push the return address and SPSR. */ + PUSH {lr} + MRS lr, SPSR + PUSH {lr} + + /* Push used registers. */ + PUSH {r0-r4, r12} + + /* Call the interrupt handler. */ + LDR r1, HwiP_data_abort_handler_const + BLX r1 + + /* Restore used registers, LR and SPSR before returning. */ + POP {r0-r4, r12} + POP {LR} + MSR SPSR_cxsf, LR + POP {LR} + MOVS PC, LR + +HwiP_irq_handler_const: .word HwiP_irq_handler_c +HwiP_data_abort_handler_const: .word HwiP_data_abort_handler_c + + .end diff --git a/apps/machine/ti_k3_r5/r5/HwiP_armv7r_vim.c b/apps/machine/ti_k3_r5/r5/HwiP_armv7r_vim.c new file mode 100644 index 000000000..ba4eb06b0 --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/HwiP_armv7r_vim.c @@ -0,0 +1,289 @@ +/* + * Copyright (C) 2018-2023 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include +//#include + +static volatile uint32_t gdummy; + +static void assembly(void) +{ + __asm__ __volatile__ (" isb" "\n\t": : : "memory"); + __asm__ __volatile__ (" dsb" "\n\t": : : "memory"); +} + +typedef struct HwiP_Struct_s { + + uint32_t intNum; + +} HwiP_Struct; + +HwiP_Ctrl gHwiCtrl; + +void HWI_SECTION HwiP_enableInt(uint32_t intNum) +{ + volatile uint32_t *addr; + uint32_t bitPos; + + assembly(); + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_INT_EN(intNum)); + bitPos = VIM_BIT_POS(intNum); + + *addr = ((uint32_t)0x1 << bitPos); +} + +uint32_t HWI_SECTION HwiP_disableInt(uint32_t intNum) +{ + volatile uint32_t *addr; + uint32_t bitPos; + uint32_t isEnable = 0; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_INT_DIS(intNum)); + bitPos = VIM_BIT_POS(intNum); + + if( (*addr & ((uint32_t)0x1 << bitPos))!=0U) + { + isEnable = 1; + } + *addr = ((uint32_t)0x1 << bitPos); + + + assembly(); + + + return isEnable; +} + +void HWI_SECTION HwiP_restoreInt(uint32_t intNum, uint32_t oldIntState) +{ + if(oldIntState!=0U) + { + HwiP_enableInt(intNum); + } + else + { + (void) HwiP_disableInt(intNum); + } +} + +void HWI_SECTION HwiP_clearInt(uint32_t intNum) +{ + volatile uint32_t *addr; + uint32_t bitPos; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_STS(intNum)); + bitPos = VIM_BIT_POS(intNum); + + *addr = ((uint32_t)0x1 << bitPos); +} + +void HWI_SECTION HwiP_post(uint32_t intNum) +{ + volatile uint32_t *addr; + uint32_t bitPos; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_RAW(intNum)); + bitPos = VIM_BIT_POS(intNum); + + *addr = ((uint32_t)0x1 << bitPos); + + /* + * Add delay to insure posted interrupt are triggered before function + * returns. + */ + + assembly(); + + +} + +void HWI_SECTION HwiP_Params_init(HwiP_Params *params) +{ + params->intNum = 0; + params->callback = NULL; + params->args = NULL; + params->eventId = 0; /* NOT USED */ + params->priority = (HwiP_MAX_PRIORITY-1U); + params->isFIQ = 0; + params->isPulse = 0; +} + +int32_t HWI_SECTION HwiP_construct(HwiP_Object *handle, HwiP_Params *params) +{ + HwiP_Struct *obj = (HwiP_Struct *)handle; + +// DebugP_assertNoLog( sizeof(HwiP_Struct) <= sizeof(HwiP_Object) ); +// DebugP_assertNoLog( params->callback != NULL ); +// DebugP_assertNoLog( params->intNum < HwiP_MAX_INTERRUPTS ); +// DebugP_assertNoLog( params->priority < HwiP_MAX_PRIORITY ); + + (void) HwiP_disableInt(params->intNum); + HwiP_clearInt(params->intNum); + + HwiP_setAsFIQ(params->intNum, params->isFIQ); + HwiP_setPri(params->intNum, params->priority); + HwiP_setAsPulse(params->intNum, params->isPulse); + if(params->isFIQ != 0U) + { + + HwiP_setVecAddr(params->intNum, (uintptr_t)HwiP_fiq_handler); + } + else + { + HwiP_setVecAddr(params->intNum, (uintptr_t)HwiP_irq_handler); + } + + gHwiCtrl.isr[params->intNum] = params->callback; + gHwiCtrl.isrArgs[params->intNum] = params->args; + + obj->intNum = params->intNum; + + HwiP_enableInt(params->intNum); + + return SystemP_SUCCESS; +} + +int32_t HwiP_setArgs(HwiP_Object *handle, void *args) +{ + HwiP_Struct *obj = (HwiP_Struct *)handle; + +// DebugP_assertNoLog( obj->intNum < HwiP_MAX_INTERRUPTS ); + + gHwiCtrl.isrArgs[obj->intNum] = args; + + return SystemP_SUCCESS; +} + +void HWI_SECTION HwiP_destruct(HwiP_Object *handle) +{ + HwiP_Struct *obj = (HwiP_Struct *)handle; + + /* disable interrupt, clear pending if any, make as pulse, ISR, lowest priority + * set valid default vector address + */ + (void) HwiP_disableInt(obj->intNum); + HwiP_clearInt(obj->intNum); + HwiP_setAsFIQ(obj->intNum, 0); + HwiP_setPri(obj->intNum, HwiP_MAX_PRIORITY-1U); + HwiP_setAsPulse(obj->intNum, 0); + HwiP_setVecAddr(obj->intNum, (uintptr_t)HwiP_irq_handler); + + /* clear interrupt data structure */ + gHwiCtrl.isr[obj->intNum] = NULL; + gHwiCtrl.isrArgs[obj->intNum] = NULL; +} + +void HWI_SECTION HwiP_init(void) +{ + uint32_t i; + + /* disable IRQ */ + (void) HwiP_disable(); + /* disable FIQ */ + (void) HwiP_disableFIQ(); + +// DebugP_assertNoLog(gHwiConfig.intcBaseAddr != 0U); + + gHwiCtrl.spuriousIRQCount = 0; + gHwiCtrl.spuriousFIQCount = 0; + + /* initalize local data structure, and set all interrupts to lowest priority + * and set ISR address as IRQ handler + */ + for(i=0; i +#include +#include + +#define MPU_SECTION __attribute__((section(".text.mpu"))) + +/* Max possible regions in ARMv7-R CPU */ +#define MpuP_MAX_REGIONS (16u) + +/* APIs defined in MpuP_armv7r_asm.s */ +void MpuP_disableAsm(void); +void MpuP_enableAsm(void); +uint32_t MpuP_isEnableAsm(void); +void MpuP_disableBRAsm(void); +void MpuP_enableBRAsm(void); +void MpuP_setRegionAsm(uint32_t regionId, uint32_t regionBaseAddr, + uint32_t sizeAndEnble, uint32_t regionAttrs); + +/* these are defined as part of SysConfig */ +extern MpuP_Config gMpuConfig; +extern MpuP_RegionConfig gMpuRegionConfig[]; + + +static uint32_t MPU_SECTION MpuP_getAttrs(MpuP_RegionAttrs *region) +{ + uint32_t regionAttrs = + ((uint32_t)(region->isExecuteNever & 0x1) << 12) + | ((uint32_t)(region->accessPerm & 0x7) << 8) + | ((uint32_t)(region->tex & 0x7) << 3) + | ((uint32_t)(region->isSharable & 0x1) << 2) + | ((uint32_t)(region->isCacheable & 0x1) << 1) + | ((uint32_t)(region->isBufferable & 0x1) << 0); + + return regionAttrs; +} + +void MPU_SECTION MpuP_RegionAttrs_init(MpuP_RegionAttrs *region) +{ + region->isExecuteNever = 0; + region->accessPerm = MpuP_AP_S_RW_U_R; + region->tex = 0; + region->isSharable = 1; + region->isCacheable = 0; + region->isBufferable = 0; + region->isEnable = 0; + region->subregionDisableMask = 0; +} + +void MPU_SECTION MpuP_setRegion(uint32_t regionNum, void * addr, uint32_t size, MpuP_RegionAttrs *attrs) +{ + uint32_t baseAddress, sizeAndEnable, regionAttrs; + uint32_t enabled; + uintptr_t key; + + // DebugP_assertNoLog( regionNum < MpuP_MAX_REGIONS); + + /* size 5b field */ + size = (size & 0x1F); + + /* If N is the value in size field, the region size is 2N+1 bytes. */ + sizeAndEnable = ((uint32_t)(attrs->subregionDisableMask & 0xFF) << 8) + | ((uint32_t)(size & 0x1F) << 1) + | ((uint32_t)(attrs->isEnable & 0x1) << 0); + + /* align base address to region size */ + baseAddress = ((uint32_t)addr & ~( (1<<((uint64_t)size+1))-1 )); + + /* get region attribute mask */ + regionAttrs = MpuP_getAttrs(attrs); + + enabled = MpuP_isEnable(); + + /* disable the MPU (if already disabled, does nothing) */ + MpuP_disable(); + + key = HwiP_disable(); + + MpuP_setRegionAsm(regionNum, baseAddress, sizeAndEnable, regionAttrs); + + HwiP_restore(key); + + if (enabled) { + MpuP_enable(); + } +} + +void MPU_SECTION MpuP_enable() +{ + if(!MpuP_isEnable()) + { + uint32_t type; + uintptr_t key; + + key = HwiP_disable(); + + /* get the current enabled bits */ + type = CacheP_getEnabled(); + + if (type & CacheP_TYPE_ALLP) { + CacheP_disable(CacheP_TYPE_ALLP); + } + + MpuP_enableAsm(); + + /* set cache back to initial settings */ + CacheP_enable(type); + + __asm__ (" dsb"); + __asm__ (" isb"); + + HwiP_restore(key); + } +} + +void MPU_SECTION MpuP_disable() +{ + if(MpuP_isEnable()) + { + uint32_t type; + uintptr_t key; + + key = HwiP_disable(); + + /* get the current enabled bits */ + type = CacheP_getEnabled(); + + /* disable all enabled caches */ + CacheP_disable(type); + + __asm__ (" dsb"); + + MpuP_disableAsm(); + + /* set cache back to initial settings */ + CacheP_enable(type); + + HwiP_restore(key); + } +} + +uint32_t MPU_SECTION MpuP_isEnable() +{ + return MpuP_isEnableAsm(); +} + +void MPU_SECTION MpuP_init() +{ + uint32_t i; + + if (MpuP_isEnable()) { + MpuP_disable(); + } + + MpuP_disableBRAsm(); + + // DebugP_assertNoLog( gMpuConfig.numRegions < MpuP_MAX_REGIONS); + + /* + * Initialize MPU regions + */ + for (i = 0; i < gMpuConfig.numRegions; i++) + { + MpuP_setRegion(i, + (void*)gMpuRegionConfig[i].baseAddr, + gMpuRegionConfig[i].size, + &gMpuRegionConfig[i].attrs + ); + } + + if (gMpuConfig.enableBackgroundRegion) { + MpuP_enableBRAsm(); + } + + if (gMpuConfig.enableMpu) { + MpuP_enable(); + } +} diff --git a/apps/machine/ti_k3_r5/r5/MpuP_armv7r_asm.S b/apps/machine/ti_k3_r5/r5/MpuP_armv7r_asm.S new file mode 100755 index 000000000..3faec93c0 --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/MpuP_armv7r_asm.S @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2018-2021 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + + .text + +/* FUNCTION DEF: void MpuP_disableAsm(void) */ + .global MpuP_disableAsm + .type MpuP_disableAsm,%function + .section ".text.mpu","ax",%progbits + .arm + .align 2 +MpuP_disableAsm: + mrc p15, #0, r0, c1, c0, #0 // read SCTLR register + bic r0, r0, #0x1 // clear bit 0 in r0 + dsb + mcr p15, #0, r0, c1, c0, #0 // MPU disabled (bit 0 = 0) + isb // flush instruction pipeline + bx LR + +/* FUNCTION DEF: void MpuP_disableBRAsm(void) */ + .global MpuP_disableBRAsm + .type MpuP_disableBRAsm,%function + .section ".text.mpu","ax",%progbits + .arm + .align 2 +MpuP_disableBRAsm: + mrc p15, #0, r0, c1, c0, #0 // read SCTLR register + bic r0, r0, #0x20000 // clear bit 17 in r0 + mcr p15, #0, r0, c1, c0, #0 // disable background region + bx LR + +/* FUNCTION DEF: void MpuP_enableAsm(void) */ + .global MpuP_enableAsm + .type MpuP_enableAsm,%function + .section ".text.mpu","ax",%progbits + .arm + .align 2 +MpuP_enableAsm: + mrc p15, #0, r0, c1, c0, #0 // read SCTLR register + orr r0, r0, #0x1 // set bit 0 in r0 + dsb + mcr p15, #0, r0, c1, c0, #0 // MPU enabled (bit 0 = 1) + isb // flush instruction pipeline + bx LR + +/* FUNCTION DEF: void MpuP_enableBRAsm(void) */ + .global MpuP_enableBRAsm + .type MpuP_enableBRAsm,%function + .section ".text.mpu","ax",%progbits + .arm + .align 2 +MpuP_enableBRAsm: + mrc p15, #0, r0, c1, c0, #0 // read SCTLR register + orr r0, r0, #0x20000 // set bit 17 in r0 + mcr p15, #0, r0, c1, c0, #0 // background region enabled + bx LR + +/* FUNCTION DEF: uint32_t MpuP_isEnableAsm(void) */ + .global MpuP_isEnableAsm + .type MpuP_isEnableAsm,%function + .section ".text.mpu","ax",%progbits + .arm + .align 2 +MpuP_isEnableAsm: + mov r0, #0 + mrc p15, #0, r1, c1, c0, #0 // read SCTLR register to r1 + tst r1, #0x1 // test bit 0 + movne r0, #1 // if not 0, MPU is enabled + bx LR + +/* FUNCTION DEF: + * void MpuP_setRegionAsm( + * uint32_t regionId, uint32_t regionBaseAddr, uint32_t sizeAndEnble, uint32_t regionAttrs) + * + * r0 = regionId + * r1 = regionBaseAddr + * r2 = sizeAndEnable + * r3 = regionAttrs + */ + .global MpuP_setRegionAsm + .type MpuP_setRegionAsm,%function + .section ".text.mpu","ax",%progbits + .arm + .align 2 +MpuP_setRegionAsm: + mcr p15, #0, r0, c6, c2, #0 // select MPU region + mcr p15, #0, r1, c6, c1, #0 // set region base address + mcr p15, #0, r2, c6, c1, #2 // set region size and enable it + mcr p15, #0, r3, c6, c1, #4 // set protection attributes + bx LR + + .end + diff --git a/apps/machine/ti_k3_r5/r5/kernel/dpl/CacheP.h b/apps/machine/ti_k3_r5/r5/kernel/dpl/CacheP.h new file mode 100644 index 000000000..d1b84e66a --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/kernel/dpl/CacheP.h @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2018-2021 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CACHEP_H +#define CACHEP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * \defgroup KERNEL_DPL_CACHE APIs for Cache + * \ingroup KERNEL_DPL + * + * For more details and example usage, see \ref KERNEL_DPL_CACHE_PAGE + * + * @{ + */ + +/** + * \brief Cache line size for alignment of buffers. + * Actual CPU defined cache line can be smaller that this value, this define + * is a utility macro to keep application portable across different CPU's. + */ +#define CacheP_CACHELINE_ALIGNMENT (128U) + +/** + * \brief Cache type + */ +typedef enum CacheP_Type_ { + CacheP_TYPE_L1P = (0x0001u), /**< L1 program cache */ + CacheP_TYPE_L1D = (0x0002u), /**< L1 data cache */ + CacheP_TYPE_L2P = (0x0004u), /**< L2 program cache */ + CacheP_TYPE_L2D = (0x0008u), /**< L2 data cache */ + CacheP_TYPE_L1 = (CacheP_TYPE_L1P|CacheP_TYPE_L1D), /**< All L1 cache's */ + CacheP_TYPE_L2 = (CacheP_TYPE_L2P|CacheP_TYPE_L2D), /**< All L2 cache's */ + CacheP_TYPE_ALLP = (CacheP_TYPE_L1P|CacheP_TYPE_L2P), /**< All program cache's */ + CacheP_TYPE_ALLD = (CacheP_TYPE_L1D|CacheP_TYPE_L2D), /**< All data cache's */ + CacheP_TYPE_ALL = (CacheP_TYPE_L1|CacheP_TYPE_L2) /**< All cache's */ +} CacheP_Type; + +/** + * \brief Cache config structure, this used by SysConfig and not to be used by end-users directly + */ +typedef struct CacheP_Config_ { + + uint32_t enable; /**< 0: cache disabled, 1: cache enabled */ + uint32_t enableForceWrThru; /**< 0: force write through disabled, 1: force write through enabled */ + +} CacheP_Config; + +/** \brief Externally defined Cache configuration */ +extern CacheP_Config gCacheConfig; + +/** + * \brief Cache enable + * + * \param type [in] cache type's to enable + */ +void CacheP_enable(uint32_t type); + +/** + * \brief Cache disable + * + * \param type [in] cache type's to disable + */ +void CacheP_disable(uint32_t type); + +/** + * \brief Get cache enabled bits + * + * \return cache type's that are enabled + */ +uint32_t CacheP_getEnabled(); + +/** + * \brief Cache writeback for full cache + * + * \param type [in] cache type's to writeback + */ +void CacheP_wbAll(uint32_t type); + +/** + * \brief Cache writeback and invalidate for full cache + * + * \param type [in] cache type's to writeback and invalidate + */ +void CacheP_wbInvAll(uint32_t type); + +/** + * \brief Cache writeback for a specified region + * + * \param addr [in] region address. Recommend to specify address that is cache line aligned + * \param size [in] region size in bytes. Recommend to specify size that is multiple of cache line size + * \param type [in] cache type's to writeback + */ +void CacheP_wb(void *addr, uint32_t size, uint32_t type); + +/** + * \brief Cache invalidate for a specified region + * + * \param addr [in] region address. Recommend to specify address that is cache line aligned + * \param size [in] region size in bytes. Recommend to specify size that is multiple of cache line size + * \param type [in] cache type's to invalidate + */ +void CacheP_inv(void *addr, uint32_t size, uint32_t type); + +/** + * \brief Cache writeback and invalidate for a specified region + * + * \param addr [in] region address. Recommend to specify address that is cache line aligned + * \param size [in] region size in bytes. Recommend to specify size that is multiple of cache line size + * \param type [in] cache type's to writeback and invalidate + */ +void CacheP_wbInv(void *addr, uint32_t size, uint32_t type); + +/** + * \brief Initialize Cache sub-system, called by SysConfig, not to be called by end users + * + */ +void CacheP_init(); + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* CACHEP_H */ + diff --git a/apps/machine/ti_k3_r5/r5/kernel/dpl/HwiP.h b/apps/machine/ti_k3_r5/r5/kernel/dpl/HwiP.h new file mode 100644 index 000000000..9c83ca36f --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/kernel/dpl/HwiP.h @@ -0,0 +1,244 @@ +/* + * Copyright (C) 2018-2021 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HWIP_H +#define HWIP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * \defgroup KERNEL_DPL_HWI APIs for HW Interrupts + * \ingroup KERNEL_DPL + * + * For more details and example usage, see \ref KERNEL_DPL_HWI_PAGE + * + * @{ + */ + +/** + * \brief Callback that is called when a HW interrupt is received + * + * \param args [in] user argument passed during \ref HwiP_construct + */ +typedef void (*HwiP_FxnCallback)(void *args); + + +/** + * \brief HwiP config parameters, setup as part of SysConfig, not to be set by end-users directly + */ +typedef struct HwiP_Config_ +{ + uint32_t intcBaseAddr; /**< For R5F, this is VIM base addr */ + +} HwiP_Config; + +/** + * \brief Parameters passed during \ref HwiP_construct + */ +typedef struct HwiP_Params_ { + + uint32_t intNum; /**< CPU interrupt number. */ + HwiP_FxnCallback callback; /**< Callback to call when interrupt is received */ + void *args; /**< Arguments to pass to the callback */ + uint16_t eventId; /**< Event ID to register against, only used with c6x with event combiner */ + uint8_t priority; /**< Interrupt priority, only used with ARM R5, ARM M4 */ + uint8_t isFIQ; /**< 0: Map interrupt as ISR, 1: map interrupt as FIQ, only used with ARM R5 */ + uint8_t isPulse; /**< 0: Map interrupt as level interrupt, 1: Map interrupt as pulse interrupt, only used with ARM R5, ARM M4 */ + +} HwiP_Params; + +/** + * \brief Max size of Hwi object across no-RTOS and all OS's + */ +#define HwiP_OBJECT_SIZE_MAX (32u) +/** + * \brief Opaque Hwi object used with the Hwi APIs + */ +typedef struct HwiP_Object_ { + + uint32_t rsv[HwiP_OBJECT_SIZE_MAX/sizeof(uint32_t)]; /**< reserved, should NOT be modified by end users */ + +} HwiP_Object; + +/** + * \brief Set default values to HwiP_Params + * + * Strongly recommended to be called before seting values in HwiP_Params + * + * \param params [out] parameter structure to set to default + */ +void HwiP_Params_init(HwiP_Params *params); + +/** + * \brief Create a Hwi object + * + * \param obj [out] created object + * \param params [in] parameter structure + * + * \return \ref SystemP_SUCCESS on success, \ref SystemP_FAILURE on error + */ +int32_t HwiP_construct(HwiP_Object *obj, HwiP_Params *params); + + +/** + * \brief Set argument to pass to the ISR + * + * \param obj [out] created object + * \param args [in] argument to pass to the ISR + * + * \return \ref SystemP_SUCCESS on success, \ref SystemP_FAILURE on error + */ +int32_t HwiP_setArgs(HwiP_Object *obj, void *args); + +/** + * \brief Cleanup, delete, destruct a Hwi object + * + * \param obj [in] Hwi object + */ +void HwiP_destruct(HwiP_Object *obj); + +/** + * \brief Enable a specific interrupt + * + * \param intNum [in] Interrupt number + */ +void HwiP_enableInt(uint32_t intNum); + +/** + * \brief Disable a specific interrupt + * + * The return value is typically used with \ref HwiP_restoreInt to restore the interrupt state + * to old value. + * + * \param intNum [in] Interrupt number + * + * \return old interrupt state, \n 0: interrupt was disabled previously, \n 1: interrupt was enabled previously + */ +uint32_t HwiP_disableInt(uint32_t intNum); + +/** + * \brief Restore a specific interrupt + * + * The oldIntState value typically returned by \ref HwiP_disableInt is used to restore the interrupt state + * to old value. + * + * \param intNum [in] Interrupt number + * \param oldIntState [in] 0: disable interrupt, 1: enable interrupt + */ +void HwiP_restoreInt(uint32_t intNum, uint32_t oldIntState); + +/** + * \brief Clear a pending specific interrupt + * + * \param intNum [in] Interrupt number + */ +void HwiP_clearInt(uint32_t intNum); + + + +/** + * \brief Force trigger a specific interrupt + * + * \param intNum [in] Interrupt number + */ +void HwiP_post(uint32_t intNum); + +/** + * \brief Disable all interrupts + * + * \note In case of ARM R5F, ARM M4F, this only disables IRQ. \n + * FIQ is not disabled. + * + * \return interrupt state before disable, typically used by \ref HwiP_restore later + */ +uintptr_t HwiP_disable(); + +/** + * \brief Enable all interrupts + * + * \note In case of ARM R5F, ARM M4F, this only enables IRQ. \n + * FIQ is not enabled. + */ +void HwiP_enable(); + +/** + * \brief Restores all interrupts to a given state + * + * \note In case of ARM R5F, ARM M4F, this only restores IRQ state. \n + * FIQ state is not changed. + * + * \param oldIntState [in] interrupt state, typically returned by \ref HwiP_disable earlier + */ +void HwiP_restore(uintptr_t oldIntState); + + +/** + * \brief Check if the caller of this function is inside a ISR or not + * + * In some cases, like with freertos, some functions cannot be called from within the OS + * ISR handler, this API allows the user and some driver porting layer (DPL) APIs + * to check and call the appropiate ISR safe API when in ISR handler mode. + * + * To get the exact CPU mode of the executing CPU, use the low level CPU specific system + * calls/registers. + * + * \note In case of ARM R5F, this only checks if caller is inside IRQ or not. + * This means when HwiP_inISR returns 1, CPU is in IRQ mode + * and when HwiP_inISR return 0, CPU could be in system mode or FIQ or abort mode and so on + * + * \return 0 not in interrupt mode, 1 in interrupt mode + */ +uint32_t HwiP_inISR(void); + +/** + * \brief Initialize Hwi module + * + * - Disables all individual interrupts + * - Clears all individual interrupts + * - Enables global interrupts + * + * \note MUST be called during system intialization before any \ref HwiP_construct API calls. + * \note In case of ARM R5F, ARM M4F, this initializes and enables both FIQ and IRQ + */ +void HwiP_init(); + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* HWIP_H */ diff --git a/apps/machine/ti_k3_r5/r5/kernel/dpl/HwiP_armv7r_vim.h b/apps/machine/ti_k3_r5/r5/kernel/dpl/HwiP_armv7r_vim.h new file mode 100755 index 000000000..e750e98ce --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/kernel/dpl/HwiP_armv7r_vim.h @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2018-2023 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HWIP_ARMV7R_VIM_H +#define HWIP_ARMV7R_VIM_H + +#ifdef __cplusplus +extern "C" +{ +#endif + + +#include +#include + +/* compile flag to enable VIC mode of operation, undef this to use non-VIC mode */ +#define HWIP_VIM_VIC_ENABLE + + +#define HWI_SECTION __attribute__((section(".text.hwi"))) + +#define HwiP_MAX_INTERRUPTS (512u) +#define HwiP_MAX_PRIORITY (16u) + +#define VIM_BIT_POS(j) ( (j) & 0x1Fu ) +#define VIM_IRQVEC (0x18u) +#define VIM_FIQVEC (0x1Cu) +#define VIM_ACTIRQ (0x20u) +#define VIM_ACTFIQ (0x24u) +#define VIM_RAW(j) (0x400U + ((((j) >> 5) & 0xFU) * 0x20U)) +#define VIM_STS(j) (0x404U + ((((j) >> 5) & 0xFU) * 0x20U)) +#define VIM_INT_EN(j) (0x408U + ((((j) >> 5) & 0xFU) * 0x20U)) +#define VIM_INT_DIS(j) (0x40CU + ((((j) >> 5) & 0xFU) * 0x20U)) +#define VIM_INT_MAP(j) (0x418U + ((((j) >> 5) & 0xFU) * 0x20U)) +#define VIM_INT_TYPE(j) (0x41CU + ((((j) >> 5) & 0xFU) * 0x20U)) +#define VIM_INT_PRI(j) (0x1000u + ((j) * 0x4u)) +#define VIM_INT_VEC(j) (0x2000u + ((j) * 0x4u)) + +#define ARMV7R_FIQ_MODE (0x11u) +#define ARMV7R_IRQ_MODE (0x12u) +#define ARMV7R_SVC_MODE (0x13u) +#define ARMV7R_SYSTEM_MODE (0x1Fu) + +#define INTERRUPT_VALUE (32U) + +typedef struct HwiP_Ctrl_s { + + HwiP_FxnCallback isr[HwiP_MAX_INTERRUPTS]; + void *isrArgs[HwiP_MAX_INTERRUPTS]; + + uint32_t spuriousIRQCount; + uint32_t spuriousFIQCount; +} HwiP_Ctrl; + +extern HwiP_Ctrl gHwiCtrl; +extern HwiP_Config gHwiConfig; +/* APIs defined in HwiP_armv7r_asm.S */ +uint32_t HwiP_disableFIQ(void); +void HwiP_enableFIQ(void); +void HwiP_enableVIC(void); + +void HwiP_fiq_handler(void); +void HwiP_irq_handler(void); +void HwiP_reserved_handler(void); +void HwiP_undefined_handler(void); +void HwiP_prefetch_abort_handler(void); +void HwiP_data_abort_handler(void); +void HwiP_irq_handler_c(void); + +static inline void HWI_SECTION HwiP_setAsFIQ(uint32_t intNum, uint32_t isFIQ) +{ + volatile uint32_t *addr; + uint32_t bitPos; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_INT_MAP(intNum)); + bitPos = VIM_BIT_POS(intNum); + + if(isFIQ != 0U) + { + *addr |= (0x1u << bitPos); + } + else + { + *addr &= ~(0x1u << bitPos); + } +} + +static inline uint32_t HWI_SECTION HwiP_isPulse(uint32_t intNum) +{ + volatile uint32_t *addr; + uint32_t bitPos; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_INT_TYPE(intNum)); + bitPos = VIM_BIT_POS(intNum); + + return ((*addr >> bitPos) & 0x1u ); +} + + +static inline void HWI_SECTION HwiP_setAsPulse(uint32_t intNum, uint32_t isPulse) +{ + volatile uint32_t *addr; + uint32_t bitPos; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_INT_TYPE(intNum)); + bitPos = VIM_BIT_POS(intNum); + + if(isPulse != 0U) + { + *addr |= (0x1u << bitPos); + } + else + { + *addr &= ~(0x1u << bitPos); + } +} + +static inline void HWI_SECTION HwiP_setPri(uint32_t intNum, uint32_t priority) +{ + volatile uint32_t *addr; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_INT_PRI(intNum)); + + *addr = (priority & 0xFu); +} + +static inline void HWI_SECTION HwiP_setVecAddr(uint32_t intNum, uintptr_t vecAddr) +{ + volatile uint32_t *addr; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_INT_VEC(intNum)); + + *addr = ((uint32_t)vecAddr & 0xFFFFFFFCU); +} + +static inline uint32_t HWI_SECTION HwiP_getIRQVecAddr(void) +{ + volatile uint32_t *addr; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_IRQVEC); + + return *addr; +} + +static inline uint32_t HWI_SECTION HwiP_getFIQVecAddr(void) +{ + volatile uint32_t *addr; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_FIQVEC); + + return *addr; +} + +static inline int32_t HWI_SECTION HwiP_getIRQ(uint32_t *intNum) +{ + volatile uint32_t *addr; + int32_t status = SystemP_FAILURE; + uint32_t value; + + *intNum = 0; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_ACTIRQ); + value = *addr; + + if((value & 0x80000000U) != 0U) + { + *intNum = (value & (HwiP_MAX_INTERRUPTS-1U)); + status = SystemP_SUCCESS; + } + return status; +} + +static inline int32_t HWI_SECTION HwiP_getFIQ(uint32_t *intNum) +{ + volatile uint32_t *addr; + int32_t status = SystemP_FAILURE; + uint32_t value; + + *intNum = 0; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_ACTFIQ); + value = *addr; + + if((value & 0x80000000U) != 0U) + { + *intNum = (value & 0x3FFU); + status = SystemP_SUCCESS; + } + return status; +} + +static inline void HWI_SECTION HwiP_ackIRQ(uint32_t intNum) +{ + volatile uint32_t *addr; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_IRQVEC); + *addr= intNum; +} + +static inline void HWI_SECTION HwiP_ackFIQ(uint32_t intNum) +{ + volatile uint32_t *addr; + + addr = (volatile uint32_t *)(gHwiConfig.intcBaseAddr + VIM_FIQVEC); + *addr= intNum; +} + + +#ifdef __cplusplus +} +#endif + +#endif /* HWIP_ARMV7R_VIM_H */ diff --git a/apps/machine/ti_k3_r5/r5/kernel/dpl/MpuP_armv7.h b/apps/machine/ti_k3_r5/r5/kernel/dpl/MpuP_armv7.h new file mode 100644 index 000000000..0b4aad78f --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/kernel/dpl/MpuP_armv7.h @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2018-2021 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MPUP_ARM_V7_H +#define MPUP_ARM_V7_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * \defgroup KERNEL_DPL_MPU_ARMV7 APIs for MPU for ARMv7 (ARM R5, ARM M4) + * \ingroup KERNEL_DPL + * + * For more details and example usage, see \ref KERNEL_DPL_MPU_ARMV7_PAGE + * + * @{ + */ + +/** + * \brief Enum's to represent different types of access permissions that are possible for a given MPU region + */ +typedef enum MpuP_AccessPerm_ +{ + MpuP_AP_ALL_BLOCK = (0x0u), /**< All accesses are blocked */ + MpuP_AP_S_RW = (0x1u), /**< Only RD+WR supervisor mode accesses are allowed */ + MpuP_AP_S_RW_U_R = (0x2u), /**< RD+WR supervisor and RD only user mode accesses are allowed */ + MpuP_AP_ALL_RW = (0x3u), /**< All RD+WR accesses are allowed */ + MpuP_AP_S_R = (0x5u), /**< Only RD supervisor mode accesses are allowed */ + MpuP_AP_ALL_R = (0x6u) /**< All RD accesses are allowed */ +} MpuP_AccessPerm; + +/** + * \brief Enum's to represent different possible MPU region size + */ +typedef enum MpuP_RegionSize_ { + MpuP_RegionSize_32 = 0x4, + MpuP_RegionSize_64, + MpuP_RegionSize_128, + MpuP_RegionSize_256, + MpuP_RegionSize_512, + MpuP_RegionSize_1K, + MpuP_RegionSize_2K, + MpuP_RegionSize_4K, + MpuP_RegionSize_8K, + MpuP_RegionSize_16K, + MpuP_RegionSize_32K, + MpuP_RegionSize_64K, + MpuP_RegionSize_128K, + MpuP_RegionSize_256K, + MpuP_RegionSize_512K, + MpuP_RegionSize_1M, + MpuP_RegionSize_2M, + MpuP_RegionSize_4M, + MpuP_RegionSize_8M, + MpuP_RegionSize_16M, + MpuP_RegionSize_32M, + MpuP_RegionSize_64M, + MpuP_RegionSize_128M, + MpuP_RegionSize_256M, + MpuP_RegionSize_512M, + MpuP_RegionSize_1G, + MpuP_RegionSize_2G, + MpuP_RegionSize_4G +} MpuP_RegionSize; + +/** + * \brief Attribute's to apply for a MPU region + * + * \note Refer to ARMv7-R or ARMv7-M architecture manual for more details + * + * \note C, B, S, TEX[0:2] bits + * together control if a region should be fully cached or non-cached or marked as device memory + */ +typedef struct MpuP_RegionAttrs_ { + + uint8_t isEnable; /**< 1: enable this region, 0: disable this region */ + uint8_t isCacheable; /**< 1: set C bit, 0: clear C bit */ + uint8_t isBufferable; /**< 1: set B bit, 0: clear B bit */ + uint8_t isSharable; /**< 1: set S bit, 0: clear S bit */ + uint8_t isExecuteNever; /**< 1: set XN bit, 0: clear XN bit */ + uint8_t tex; /**< set TEX[0:2] bits */ + uint8_t accessPerm; /**< set AP[0:2] bits, see \ref MpuP_AccessPerm */ + uint8_t subregionDisableMask; /**< subregion disable mask, bitN = 1 means disable that subregion */ +} MpuP_RegionAttrs; + +/** + * \brief Region config structure, this used by SysConfig and not to be used by end-users directly + */ +typedef struct MpuP_RegionConfig_ { + + uint32_t baseAddr; /**< region start address, MUST aligned to region size */ + uint32_t size; /**< region size, see \ref MpuP_RegionSize */ + MpuP_RegionAttrs attrs; /** region attributes, see \ref MpuP_RegionAttrs */ + +} MpuP_RegionConfig; + +/** + * \brief MPU config structure, this used by SysConfig and not to be used by end-users directly + */ +typedef struct MpuP_Config_ { + + uint32_t numRegions; /** Number of regions to configure */ + uint32_t enableBackgroundRegion; /**< 0: disable backgroun region, 1: enable background region */ + uint32_t enableMpu; /**< 0: keep MPU disabled, 1: enable MPU */ + +} MpuP_Config; + +/** + * \brief Set default values to MpuP_RegionAttrs + * + * Strongly recommended to be called before seting values in MpuP_RegionAttrs + * + * \param region [out] parameter structure to set to default + */ +void MpuP_RegionAttrs_init(MpuP_RegionAttrs *region); + +/** + * \brief Setup a region in the MPU + * + * \note Refer to ARMv7-R or ARMv7-M architecture manual for more details + * \note Recommended to disable MPU and disable cache before setting up MPU regions + * + * \param regionNum [in] region to setup + * \param addr [in] region start address, MUST aligned to region size + * \param size [in] region size, see \ref MpuP_RegionSize + * \param attrs [in] region attrs, see \ref MpuP_RegionAttrs + */ +void MpuP_setRegion(uint32_t regionNum, void * addr, uint32_t size, MpuP_RegionAttrs *attrs); + +/** + * \brief Enable MPU sub-system using the region that are setup using \ref MpuP_setRegion + */ +void MpuP_enable(); + +/** + * \brief Disable MPU sub-system + */ +void MpuP_disable(); + +/** + * \brief Check if MPU sub-system is enabled + * + * \return 0: MPU sub-system is disabled, 1: MPU sub-system is enabled + */ +uint32_t MpuP_isEnable(); + +/** + * \brief Initialize MPU sub-system, called by SysConfig, not to be called by end users + * + */ +void MpuP_init(); + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* MPUP_ARM_V7_H */ diff --git a/apps/machine/ti_k3_r5/r5/kernel/dpl/SystemP.h b/apps/machine/ti_k3_r5/r5/kernel/dpl/SystemP.h new file mode 100644 index 000000000..e8d9af2a6 --- /dev/null +++ b/apps/machine/ti_k3_r5/r5/kernel/dpl/SystemP.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2018-2021 Texas Instruments Incorporated + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef SYSTEMP_H +#define SYSTEMP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +//#include + +/** + * \defgroup KERNEL_DPL_SYSTEM APIs for system level define's and function's + * \ingroup KERNEL_DPL + * + * @{ + */ + +/** @name Return status + */ +/**@{*/ +/** + * \brief Return status when the API execution was successful + */ +#define SystemP_SUCCESS ((int32_t )0) + +/** + * \brief Return status when the API execution was not successful due to a failure + */ +#define SystemP_FAILURE ((int32_t)-1) + +/** + * \brief Return status when the API execution was not successful due to a time out + */ +#define SystemP_TIMEOUT ((int32_t)-2) + +/**@}*/ + +/** @name Timeout values + * @anchor SystemP_Timeout + */ +/**@{*/ + +/** + * \brief Value to use when needing a timeout of zero or NO timeout, return immediately on resource not available. + */ +#define SystemP_NO_WAIT ((uint32_t)0) + +/** + * \brief Value to use when needing a timeout of infinity or wait forver until resource is available + */ +#define SystemP_WAIT_FOREVER ((uint32_t)-1) +/**@}*/ + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* SYSTEMP_H */ + +/** + * \defgroup KERNEL_DPL APIs for Driver Porting Layer + * + * This module contains APIs which are used by the drivers to make them agnostic of the underlying OS and CPU architecture. + */ + +/** + * \defgroup DRV_MODULE APIs for SOC Specific Device Drivers + * + * This module contains APIs for device drivers for various peripherals supported in this SDK + */ + + +/** + * \cond !SOC_AM62X + */ +/** + * \defgroup BOARD_MODULE APIs for Board Specific Device Drivers + * + * This module contains APIs for device drivers for various peripherals supported on the EVM or board supported by this SOC + */ + +/** + * \defgroup SECURITY_MODULE APIs for Security Drivers + * + * This module contains APIs which are used by the Security drivers. + */ + + +/** + * \endcond + */ + diff --git a/apps/machine/ti_k3_r5/rsc_table.c b/apps/machine/ti_k3_r5/rsc_table.c new file mode 100644 index 000000000..bf971ab96 --- /dev/null +++ b/apps/machine/ti_k3_r5/rsc_table.c @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/ + * Andrew Davis + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/* + * This file populates resource table for the remote core + * for use by the Linux host + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rsc_table.h" +#include "helper.h" + +/* Place resource table in special ELF section */ +#define __section_t(S) __attribute__((__section__(#S))) +#define __resource __section_t(.resource_table) + +struct remote_resource_table __resource resource_table = +{ + /* table header information */ + 1U, /* we're the first version that implements this */ + NO_RESOURCE_ENTRIES, /* number of entries */ + { 0U, 0U, }, /* reserved, must be zero */ + + /* offsets to the entries */ + { + offsetof(struct remote_resource_table, rpmsg_vdev), + offsetof(struct remote_resource_table, trace), + }, + + /* vdev entry */ + { RSC_VDEV, VIRTIO_ID_RPMSG_, 31U, RPMSG_VDEV_DFEATURES, 0U, 0U, 0U, NUM_VRINGS, {0U, 0U}, }, + /* the two vrings */ + {RING_TX, VRING_ALIGN, VRING_SIZE, 1U, 0U}, + {RING_RX, VRING_ALIGN, VRING_SIZE, 2U, 0U}, + + /* trace buffer entry */ + { RSC_TRACE, (uint32_t)debug_log_memory, DEBUG_LOG_SIZE, 0, "trace:r5fss0_0", }, +}; + +void *get_resource_table (int rsc_id, int *len) +{ + (void) rsc_id; + *len = sizeof(resource_table); + return &resource_table; +} + diff --git a/apps/machine/ti_k3_r5/rsc_table.h b/apps/machine/ti_k3_r5/rsc_table.h new file mode 100644 index 000000000..65199d5ef --- /dev/null +++ b/apps/machine/ti_k3_r5/rsc_table.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/ + * Andrew Davis + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RSC_TABLE_H_ +#define RSC_TABLE_H_ + +#include +#include + +#if defined __cplusplus +extern "C" { +#endif + +#define NO_RESOURCE_ENTRIES 2 + +#define RPMSG_VDEV_DFEATURES (1 << VIRTIO_RPMSG_F_NS) + +/* VirtIO rpmsg device id */ +#define VIRTIO_ID_RPMSG_ 7 + +#define NUM_VRINGS 0x02 +#define VRING_ALIGN 0x1000 +#ifndef RING_TX +#define RING_TX FW_RSC_U32_ADDR_ANY +#endif /* !RING_TX */ +#ifndef RING_RX +#define RING_RX FW_RSC_U32_ADDR_ANY +#endif /* RING_RX */ +#define VRING_SIZE 256U + +/* Resource table for the given remote */ +struct remote_resource_table { + unsigned int version; + unsigned int num; + unsigned int reserved[2]; + unsigned int offset[NO_RESOURCE_ENTRIES]; + /* rpmsg vdev entry */ + struct fw_rsc_vdev rpmsg_vdev; + struct fw_rsc_vdev_vring rpmsg_vring0; + struct fw_rsc_vdev_vring rpmsg_vring1; + /* trace buffer entry */ + struct fw_rsc_trace trace; +}__attribute__((packed, aligned(0x100))); + +void *get_resource_table (int rsc_id, int *len); + + +#if defined __cplusplus +} +#endif + +#endif /* RSC_TABLE_H_ */ \ No newline at end of file diff --git a/cmake/platforms/ti_k3_r5.cmake b/cmake/platforms/ti_k3_r5.cmake new file mode 100644 index 000000000..57d68d271 --- /dev/null +++ b/cmake/platforms/ti_k3_r5.cmake @@ -0,0 +1,7 @@ +set (CMAKE_SYSTEM_PROCESSOR "arm" CACHE STRING "") +set (MACHINE "ti_k3_r5" CACHE STRING "") +set (CROSS_PREFIX "arm-none-eabi-" CACHE STRING "") + +set (CMAKE_C_FLAGS "-mcpu=cortex-r5 -g -O0" CACHE STRING "") + +include (cross_generic_gcc) From 13035180d7e51c9516b115d654935ef191b9f0b1 Mon Sep 17 00:00:00 2001 From: Kendall Willis Date: Tue, 15 Aug 2023 09:34:06 -0500 Subject: [PATCH 3/3] cmake: platforms: Added libmetal library and include path to ti_k3_r5.cmake Allow users to run build cmake without having to specify where libmetal is if it is in the same directory as OpenAMP. Included LIBMETAL_PATH variable if the libmetal directory is not in the same directory as OpenAMP. Signed-off-by: Kendall Willis --- cmake/platforms/ti_k3_r5.cmake | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmake/platforms/ti_k3_r5.cmake b/cmake/platforms/ti_k3_r5.cmake index 57d68d271..0c5100f78 100644 --- a/cmake/platforms/ti_k3_r5.cmake +++ b/cmake/platforms/ti_k3_r5.cmake @@ -2,6 +2,13 @@ set (CMAKE_SYSTEM_PROCESSOR "arm" CACHE STRING "") set (MACHINE "ti_k3_r5" CACHE STRING "") set (CROSS_PREFIX "arm-none-eabi-" CACHE STRING "") +if (NOT LIBMETAL_PATH) + set (LIBMETAL_PATH "../libmetal") +endif (NOT LIBMETAL_PATH) + +set (CMAKE_INCLUDE_PATH "${LIBMETAL_PATH}/build-libmetal/usr/local/include") +set (CMAKE_LIBRARY_PATH "${LIBMETAL_PATH}/build-libmetal/usr/local/lib") + set (CMAKE_C_FLAGS "-mcpu=cortex-r5 -g -O0" CACHE STRING "") include (cross_generic_gcc)