diff --git a/target/riscv/helper.h b/target/riscv/helper.h index 09c54114a8..2af156e195 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -77,6 +77,8 @@ DEF_HELPER_3(lr_c_cap, void, env, i32, i32) DEF_HELPER_3(sc_c_modedep, tl, env, i32, i32) DEF_HELPER_3(sc_c_ddc, tl, env, i32, i32) DEF_HELPER_3(sc_c_cap, tl, env, i32, i32) +/* experimental instruction helpers*/ +DEF_HELPER_2(ctestdereferenceable, tl, env, i32) #endif #ifdef CONFIG_TCG_LOG_INSTR @@ -271,30 +273,30 @@ DEF_HELPER_5(vlwuff_v_d, void, ptr, ptr, tl, env, i32) #ifdef TARGET_RISCV64 DEF_HELPER_6(vamoswapw_v_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamoswapd_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoaddw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoaddd_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoxorw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoxord_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoandw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoandd_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoorw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoord_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamominw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamomind_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamomaxw_v_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamomaxd_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoaddw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoaddd_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoxorw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoxord_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoandw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoandd_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoorw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoord_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamominw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamomind_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamomaxw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamomaxd_v_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamominuw_v_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamominud_v_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamomaxuw_v_d, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamomaxud_v_d, void, ptr, ptr, tl, ptr, env, i32) #endif DEF_HELPER_6(vamoswapw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoaddw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoxorw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoandw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamoorw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamominw_v_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vamomaxw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoaddw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoxorw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoandw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoorw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamominw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamomaxw_v_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamominuw_v_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vamomaxuw_v_w, void, ptr, ptr, tl, ptr, env, i32) diff --git a/target/riscv/insn32-cheri.decode b/target/riscv/insn32-cheri.decode index 79bbc61aa6..d7cb3bacca 100644 --- a/target/riscv/insn32-cheri.decode +++ b/target/riscv/insn32-cheri.decode @@ -57,6 +57,8 @@ ccopytype 0011110 ..... ..... 000 ..... 1011011 @r ccseal 0011111 ..... ..... 000 ..... 1011011 @r ctestsubset 0100000 ..... ..... 000 ..... 1011011 @r cseqx 0100001 ..... ..... 000 ..... 1011011 @r +candaddr 0100010 ..... ..... 000 ..... 1011011 @r + # 1111011 unused # 1111100 Used for Stores (see below) # 1111101 Used for Loads (see below) @@ -85,9 +87,9 @@ cgetaddr 1111111 01111 ..... 000 ..... 1011011 @r2 # fpclear 1111111 10000 ..... 000 ..... 1011011 @r2 csealentry 1111111 10001 ..... 000 ..... 1011011 @r2 cloadtags 1111111 10010 ..... 000 ..... 1011011 @r2 +ctestdereferenceable 1111111 10011 ..... 000 ..... 1011011 @r2 jalr_pcc 1111111 10100 ..... 000 ..... 1011011 @r2 - - +candpermuserdata 1111111 10101 ..... 000 ..... 1011011 @r2 # There is an existing @sfence_vma format with rs1+rs2 fields, but let's define a new name @r_2source ....... ..... ..... ... ..... ....... %rs2 %rs1 diff --git a/target/riscv/insn_trans/trans_cheri.c.inc b/target/riscv/insn_trans/trans_cheri.c.inc index dae1d785f2..10846cd5c9 100644 --- a/target/riscv/insn_trans/trans_cheri.c.inc +++ b/target/riscv/insn_trans/trans_cheri.c.inc @@ -41,7 +41,7 @@ typedef void(cheri_int_cap_helper)(TCGv, TCGv_env, TCGv_i32); static inline bool gen_cheri_int_cap(DisasContext *ctx, int rd, int cs, - cheri_int_cap_helper *gen_func) + cheri_int_cap_helper *gen_func) { TCGv_i32 source_regnum = tcg_const_i32(cs); TCGv result = tcg_temp_new(); @@ -166,6 +166,9 @@ TRANSLATE_INT_CAP(cgetsealed) TRANSLATE_INT_CAP(cloadtags) +// experimental ctestdereferenceable +TRANSLATE_INT_CAP(ctestdereferenceable) + // Two operand (int int) static inline bool trans_crrl(DisasContext *ctx, arg_crrl *a) { @@ -181,6 +184,40 @@ TRANSLATE_CAP_CAP(ccleartag) TRANSLATE_CAP_CAP(cmove) TRANSLATE_CAP_CAP(csealentry) +// experimental candpermuserdata + +/* User-defined permission bits. */ +#define CAP_PERM_SW0 (1 << 15) /* 0x00008000 */ +#define CAP_PERM_SW1 (1 << 16) /* 0x00010000 */ +#define CAP_PERM_SW2 (1 << 17) /* 0x00020000 */ +#define CAP_PERM_SW3 (1 << 18) /* 0x00040000 */ +#define CAP_PERM_SW_VMEM CAP_PERM_SW1 + +#define CAP_PERMS_SWALL \ + (CAP_PERM_SW0 | CAP_PERM_SW1 | CAP_PERM_SW2 | CAP_PERM_SW3) + +#define CAP_PERMS_USERSPACE \ + (CAP_PERM_GLOBAL | CAP_PERM_LOAD | CAP_PERM_LOAD_CAP | CAP_PERM_CINVOKE | \ + (CAP_PERMS_SWALL & ~CAP_PERM_SW_VMEM)) + +#define CAP_PERMS_USERSPACE_DATA \ + (CAP_PERMS_USERSPACE | CAP_PERM_STORE | CAP_PERM_STORE_CAP | \ + CAP_PERM_STORE_LOCAL) + +static inline bool trans_candpermuserdata(DisasContext *ctx, + arg_candpermuserdata *a) +{ + TCGv_i32 source_regnum = tcg_const_i32(a->rs1); + TCGv_i32 dest_regnum = tcg_const_i32(a->rd); + TCGv gpr = tcg_temp_new(); + gen_set_gpr(gpr, CAP_PERMS_USERSPACE_DATA & ~CAP_PERM_SW_VMEM); + gen_helper_candperm(cpu_env, dest_regnum, source_regnum, gpr); + tcg_temp_free(gpr); + tcg_temp_free_i32(dest_regnum); + tcg_temp_free_i32(source_regnum); + return true; +} + // Three operand (cap cap cap) TRANSLATE_CAP_CAP_CAP(cbuildcap) TRANSLATE_CAP_CAP_CAP(ccopytype) @@ -215,6 +252,9 @@ TRANSLATE_CAP_CAP_INT(csetboundsexact) TRANSLATE_CAP_CAP_INT(csetflags) TRANSLATE_CAP_CAP_INT(csetoffset) +// Add experimental CAndAddr instruction +TRANSLATE_CAP_CAP_INT(candaddr) + // Three operand (int cap cap) TRANSLATE_INT_CAP_CAP(csub) TRANSLATE_INT_CAP_CAP(ctestsubset) @@ -268,7 +308,8 @@ static void gen_cjal(DisasContext *ctx, int rd, target_ulong imm) tcg_temp_free(new_cursor); tcg_temp_free_i32(dst); - gen_goto_tb(ctx, 0, ctx->base.pc_next + imm, /*bounds_check=*/true); /* must use this for safety */ + gen_goto_tb(ctx, 0, ctx->base.pc_next + imm, + /*bounds_check=*/true); /* must use this for safety */ ctx->base.is_jmp = DISAS_NORETURN; } @@ -345,7 +386,7 @@ static inline bool gen_cap_load_mem_idx(DisasContext *ctx, int32_t rd, } static inline bool gen_cap_load(DisasContext *ctx, int32_t rd, int32_t cs, - target_long offset, MemOp op) + target_long offset, MemOp op) { return gen_cap_load_mem_idx(ctx, rd, cs, offset, ctx->mem_idx, op); } diff --git a/target/riscv/op_helper_cheri.c b/target/riscv/op_helper_cheri.c index 8588b5415e..850fd32600 100644 --- a/target/riscv/op_helper_cheri.c +++ b/target/riscv/op_helper_cheri.c @@ -74,71 +74,116 @@ struct SCRInfo { bool w; enum SCRAccessMode access; /* Default = Invalid */ const char *name; - //#define PRV_U 0 - //#define PRV_S 1 - //#define PRV_H 2 /* Reserved */ - //#define PRV_M 3 + // #define PRV_U 0 + // #define PRV_S 1 + // #define PRV_H 2 /* Reserved */ + // #define PRV_M 3 } scr_info[CheriSCR_MAX] = { - [CheriSCR_PCC] = {.r = true, .w = false, .access = U_Always, .name = "PCC"}, - [CheriSCR_DDC] = {.r = true, .w = true, .access = U_Always, .name = "DDC"}, - - [CheriSCR_UTCC] = {.r = true, .w = true, .access = U_ASR, .name = "UTCC"}, - [CheriSCR_UTDC] = {.r = true, .w = true, .access = U_ASR, .name = "UTDC"}, - [CheriSCR_UScratchC] = {.r = true, - .w = true, - .access = U_ASR, - .name = "UScratchC"}, - [CheriSCR_UEPCC] = {.r = true, .w = true, .access = U_ASR, .name = "UEPCC"}, - - [CheriSCR_STCC] = {.r = true, .w = true, .access = S_ASR, .name = "STCC"}, - [CheriSCR_STDC] = {.r = true, .w = true, .access = S_ASR, .name = "STDC"}, - [CheriSCR_SScratchC] = {.r = true, - .w = true, - .access = S_ASR, - .name = "SScratchC"}, - [CheriSCR_SEPCC] = {.r = true, .w = true, .access = S_ASR, .name = "SEPCC"}, - - [CheriSCR_MTCC] = {.r = true, .w = true, .access = M_ASR, .name = "MTCC"}, - [CheriSCR_MTDC] = {.r = true, .w = true, .access = M_ASR, .name = "MTDC"}, - [CheriSCR_MScratchC] = {.r = true, - .w = true, - .access = M_ASR, - .name = "MScratchC"}, - [CheriSCR_MEPCC] = {.r = true, .w = true, .access = M_ASR, .name = "MEPCC"}, - - [CheriSCR_BSTCC] = {.r = true, .w = true, .access = H_ASR, .name= "BSTCC"}, - [CheriSCR_BSTDC] = {.r = true, .w = true, .access = H_ASR, .name= "BSTCC"}, - [CheriSCR_BSScratchC] = {.r = true, .w = true, .access = H_ASR, - .name= "BSTCC"}, - [CheriSCR_BSEPCC] = {.r = true, .w = true, .access = H_ASR, .name= "BSTCC"}, + [CheriSCR_PCC] = { .r = true, + .w = false, + .access = U_Always, + .name = "PCC" }, + [CheriSCR_DDC] = { .r = true, + .w = true, + .access = U_Always, + .name = "DDC" }, + + [CheriSCR_UTCC] = { .r = true, .w = true, .access = U_ASR, .name = "UTCC" }, + [CheriSCR_UTDC] = { .r = true, .w = true, .access = U_ASR, .name = "UTDC" }, + [CheriSCR_UScratchC] = { .r = true, + .w = true, + .access = U_ASR, + .name = "UScratchC" }, + [CheriSCR_UEPCC] = { .r = true, + .w = true, + .access = U_ASR, + .name = "UEPCC" }, + + [CheriSCR_STCC] = { .r = true, .w = true, .access = S_ASR, .name = "STCC" }, + [CheriSCR_STDC] = { .r = true, .w = true, .access = S_ASR, .name = "STDC" }, + [CheriSCR_SScratchC] = { .r = true, + .w = true, + .access = S_ASR, + .name = "SScratchC" }, + [CheriSCR_SEPCC] = { .r = true, + .w = true, + .access = S_ASR, + .name = "SEPCC" }, + + [CheriSCR_MTCC] = { .r = true, .w = true, .access = M_ASR, .name = "MTCC" }, + [CheriSCR_MTDC] = { .r = true, .w = true, .access = M_ASR, .name = "MTDC" }, + [CheriSCR_MScratchC] = { .r = true, + .w = true, + .access = M_ASR, + .name = "MScratchC" }, + [CheriSCR_MEPCC] = { .r = true, + .w = true, + .access = M_ASR, + .name = "MEPCC" }, + + [CheriSCR_BSTCC] = { .r = true, + .w = true, + .access = H_ASR, + .name = "BSTCC" }, + [CheriSCR_BSTDC] = { .r = true, + .w = true, + .access = H_ASR, + .name = "BSTCC" }, + [CheriSCR_BSScratchC] = { .r = true, + .w = true, + .access = H_ASR, + .name = "BSTCC" }, + [CheriSCR_BSEPCC] = { .r = true, + .w = true, + .access = H_ASR, + .name = "BSTCC" }, }; static inline cap_register_t *get_scr(CPUArchState *env, uint32_t index) { switch (index) { - case CheriSCR_PCC: return &env->PCC; - case CheriSCR_DDC: return &env->DDC; - - case CheriSCR_UTCC: return &env->UTCC; - case CheriSCR_UTDC: return &env->UTDC; - case CheriSCR_UScratchC: return &env->UScratchC; - case CheriSCR_UEPCC: return &env->UEPCC; - - case CheriSCR_STCC: return &env->STCC; - case CheriSCR_STDC: return &env->STDC; - case CheriSCR_SScratchC: return &env->SScratchC; - case CheriSCR_SEPCC: return &env->SEPCC; - - case CheriSCR_MTCC: return &env->MTCC; - case CheriSCR_MTDC: return &env->MTDC; - case CheriSCR_MScratchC: return &env->MScratchC; - case CheriSCR_MEPCC: return &env->MEPCC; - - case CheriSCR_BSTCC: return &env->VSTCC; - case CheriSCR_BSTDC: return &env->VSTDC; - case CheriSCR_BSScratchC: return &env->VSScratchC; - case CheriSCR_BSEPCC: return &env->VSEPCC; - default: assert(false && "Should have raised an invalid inst trap!"); + case CheriSCR_PCC: + return &env->PCC; + case CheriSCR_DDC: + return &env->DDC; + + case CheriSCR_UTCC: + return &env->UTCC; + case CheriSCR_UTDC: + return &env->UTDC; + case CheriSCR_UScratchC: + return &env->UScratchC; + case CheriSCR_UEPCC: + return &env->UEPCC; + + case CheriSCR_STCC: + return &env->STCC; + case CheriSCR_STDC: + return &env->STDC; + case CheriSCR_SScratchC: + return &env->SScratchC; + case CheriSCR_SEPCC: + return &env->SEPCC; + + case CheriSCR_MTCC: + return &env->MTCC; + case CheriSCR_MTDC: + return &env->MTDC; + case CheriSCR_MScratchC: + return &env->MScratchC; + case CheriSCR_MEPCC: + return &env->MEPCC; + + case CheriSCR_BSTCC: + return &env->VSTCC; + case CheriSCR_BSTDC: + return &env->VSTDC; + case CheriSCR_BSScratchC: + return &env->VSScratchC; + case CheriSCR_BSEPCC: + return &env->VSEPCC; + default: + assert(false && "Should have raised an invalid inst trap!"); } } @@ -187,7 +232,7 @@ void HELPER(cspecialrw)(CPUArchState *env, uint32_t cd, uint32_t cs, assert(scr_info[index].r && "Bug? Should be readable"); // For xEPCC we clear the low address bit(s) when reading to match xEPC. // See helper_sret/helper_mret for more context. - switch(index) { + switch (index) { case CheriSCR_UEPCC: case CheriSCR_SEPCC: case CheriSCR_MEPCC: { @@ -200,9 +245,10 @@ void HELPER(cspecialrw)(CPUArchState *env, uint32_t cd, uint32_t cs, scr_info[index].name, PRINT_CAP_ARGS(scr)); legalized._cr_cursor = addr; if (!cap_is_unsealed(scr)) { - warn_report("Invalidating sealed %s (contained an unaligned " - "capability): " PRINT_CAP_FMTSTR, - scr_info[index].name, PRINT_CAP_ARGS(scr)); + warn_report( + "Invalidating sealed %s (contained an unaligned " + "capability): " PRINT_CAP_FMTSTR, + scr_info[index].name, PRINT_CAP_ARGS(scr)); legalized.cr_tag = false; } } @@ -219,11 +265,13 @@ void HELPER(cspecialrw)(CPUArchState *env, uint32_t cd, uint32_t cs, #ifdef CONFIG_TCG_LOG_INSTR if (qemu_log_instr_enabled(env)) { qemu_log_instr_extra(env, " %s <- " PRINT_CAP_FMTSTR "\n", - scr_info[index].name, PRINT_CAP_ARGS(&new_val)); + scr_info[index].name, + PRINT_CAP_ARGS(&new_val)); } #endif if (index == CheriSCR_DDC && !new_val.cr_tag) - qemu_log_instr_or_mask_msg(env, CPU_LOG_INT, + qemu_log_instr_or_mask_msg( + env, CPU_LOG_INT, " Note: Installed untagged $ddc at " TARGET_FMT_lx "\n", cpu_get_recent_pc(env)); *scr = new_val; @@ -270,13 +318,15 @@ void HELPER(amoswap_cap)(CPUArchState *env, uint32_t dest_reg, raise_cheri_exception(env, CapEx_PermitStoreLocalCapViolation, val_reg); } - target_ulong addr = (target_ulong)(cap_get_cursor(cbp) + (target_long)offset); + target_ulong addr = + (target_ulong)(cap_get_cursor(cbp) + (target_long)offset); if (!cap_is_in_bounds(cbp, addr, CHERI_CAP_SIZE)) { qemu_log_instr_or_mask_msg(env, CPU_LOG_INT, - "Failed capability bounds check:" - "offset=" TARGET_FMT_ld " cursor=" TARGET_FMT_lx - " addr=" TARGET_FMT_lx "\n", - offset, cap_get_cursor(cbp), addr); + "Failed capability bounds check:" + "offset=" TARGET_FMT_ld + " cursor=" TARGET_FMT_lx + " addr=" TARGET_FMT_lx "\n", + offset, cap_get_cursor(cbp), addr); raise_cheri_exception(env, CapEx_LengthViolation, addr_reg); } else if (!QEMU_IS_ALIGNED(addr, CHERI_CAP_SIZE)) { raise_unaligned_store_exception(env, addr, _host_return_address); @@ -314,13 +364,15 @@ static void lr_c_impl(CPUArchState *env, uint32_t dest_reg, uint32_t addr_reg, raise_cheri_exception(env, CapEx_PermitLoadViolation, addr_reg); } - target_ulong addr = (target_ulong)(cap_get_cursor(cbp) + (target_long)offset); + target_ulong addr = + (target_ulong)(cap_get_cursor(cbp) + (target_long)offset); if (!cap_is_in_bounds(cbp, addr, CHERI_CAP_SIZE)) { qemu_log_instr_or_mask_msg(env, CPU_LOG_INT, - "Failed capability bounds check:" - "offset=" TARGET_FMT_ld " cursor=" TARGET_FMT_lx - " addr=" TARGET_FMT_lx "\n", - offset, cap_get_cursor(cbp), addr); + "Failed capability bounds check:" + "offset=" TARGET_FMT_ld + " cursor=" TARGET_FMT_lx + " addr=" TARGET_FMT_lx "\n", + offset, cap_get_cursor(cbp), addr); raise_cheri_exception(env, CapEx_LengthViolation, addr_reg); } else if (!QEMU_IS_ALIGNED(addr, CHERI_CAP_SIZE)) { raise_unaligned_store_exception(env, addr, _host_return_address); @@ -341,7 +393,8 @@ static void lr_c_impl(CPUArchState *env, uint32_t dest_reg, uint32_t addr_reg, update_compressed_capreg(env, dest_reg, pesbt, tag, cursor); } -void HELPER(lr_c_modedep)(CPUArchState *env, uint32_t dest_reg, uint32_t addr_reg) +void HELPER(lr_c_modedep)(CPUArchState *env, uint32_t dest_reg, + uint32_t addr_reg) { target_long offset = 0; if (!cheri_in_capmode(env)) { @@ -368,7 +421,7 @@ static target_ulong sc_c_impl(CPUArchState *env, uint32_t addr_reg, uintptr_t _host_return_address) { assert(!qemu_tcg_mttcg_enabled() || - (cpu_in_exclusive_context(env_cpu(env)) && + (cpu_in_exclusive_context(env_cpu(env)) && "Should have raised EXCP_ATOMIC")); const cap_register_t *cbp = get_load_store_base_cap(env, addr_reg); @@ -386,13 +439,15 @@ static target_ulong sc_c_impl(CPUArchState *env, uint32_t addr_reg, raise_cheri_exception(env, CapEx_PermitStoreLocalCapViolation, val_reg); } - target_ulong addr = (target_ulong)(cap_get_cursor(cbp) + (target_long)offset); + target_ulong addr = + (target_ulong)(cap_get_cursor(cbp) + (target_long)offset); if (!cap_is_in_bounds(cbp, addr, CHERI_CAP_SIZE)) { qemu_log_instr_or_mask_msg(env, CPU_LOG_INT, - "Failed capability bounds check:" - "offset=" TARGET_FMT_ld " cursor=" TARGET_FMT_lx - " addr=" TARGET_FMT_lx "\n", - offset, cap_get_cursor(cbp), addr); + "Failed capability bounds check:" + "offset=" TARGET_FMT_ld + " cursor=" TARGET_FMT_lx + " addr=" TARGET_FMT_lx "\n", + offset, cap_get_cursor(cbp), addr); raise_cheri_exception(env, CapEx_LengthViolation, addr_reg); } else if (!QEMU_IS_ALIGNED(addr, CHERI_CAP_SIZE)) { raise_unaligned_store_exception(env, addr, _host_return_address); @@ -433,7 +488,8 @@ static target_ulong sc_c_impl(CPUArchState *env, uint32_t addr_reg, return 1; // failure } -target_ulong HELPER(sc_c_modedep)(CPUArchState *env, uint32_t addr_reg, uint32_t val_reg) +target_ulong HELPER(sc_c_modedep)(CPUArchState *env, uint32_t addr_reg, + uint32_t val_reg) { target_long offset = 0; if (!cheri_in_capmode(env)) { @@ -443,13 +499,23 @@ target_ulong HELPER(sc_c_modedep)(CPUArchState *env, uint32_t addr_reg, uint32_t return sc_c_impl(env, addr_reg, val_reg, offset, GETPC()); } -target_ulong HELPER(sc_c_ddc)(CPUArchState *env, uint32_t addr_reg, uint32_t val_reg) +target_ulong HELPER(sc_c_ddc)(CPUArchState *env, uint32_t addr_reg, + uint32_t val_reg) { target_long offset = get_capreg_cursor(env, addr_reg); return sc_c_impl(env, CHERI_EXC_REGNUM_DDC, val_reg, offset, GETPC()); } -target_ulong HELPER(sc_c_cap)(CPUArchState *env, uint32_t addr_reg, uint32_t val_reg) +target_ulong HELPER(sc_c_cap)(CPUArchState *env, uint32_t addr_reg, + uint32_t val_reg) { return sc_c_impl(env, addr_reg, val_reg, /*offset=*/0, GETPC()); } + +/* Helpers for experimental instructions. */ +target_ulong HELPER(ctestdereferenceable)(CPUArchState *env, uint32_t addr_reg) +{ + const cap_register_t *cbp = get_readonly_capreg(env, addr_reg); + + return cbp->cr_tag && cap_is_unsealed(cbp) && cap_cursor_in_bounds(cbp); +}