From 03d66a85110ed85906b68bb8cfd3a42ec0f76cb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20M=C3=A4rkl?= Date: Wed, 9 Mar 2022 16:50:10 +0100 Subject: [PATCH] adds tracing for AArch64 Some parts of this code are shared with the 32-bit arm, specifically tracing of flags and conditions. For AArch64, we store operands only for the flags that are actually used, as opposed to existing arm code, which always dumps all of them unconditionally. To account for this change, loaded_cpsr and store_cpsr have been refactored to now be bitfields indicating the individual flags, rather than being just single booleans. Old code just sets them to TRACE_CPSR_ALL and can be made more fine-grained successively in the future if desired. --- include/tracewrap.h | 8 +- linux-user/aarch64/trace_info.h | 6 + target/arm/helper.h | 19 ++- target/arm/trace_helper.c | 247 +++++++++++++++----------------- target/arm/translate-a64.c | 212 ++++++++++++++++++++++++++- target/arm/translate-vfp.c | 16 +-- target/arm/translate.c | 180 +++++++++++------------ target/arm/translate.h | 44 ++++++ tracewrap.c | 2 +- 9 files changed, 491 insertions(+), 243 deletions(-) create mode 100644 linux-user/aarch64/trace_info.h diff --git a/include/tracewrap.h b/include/tracewrap.h index c23eb22b9330..54055e38679c 100644 --- a/include/tracewrap.h +++ b/include/tracewrap.h @@ -42,9 +42,9 @@ void qemu_trace_set_mode(const char *mode_str); void qemu_trace_endframe(CPUArchState *env, target_ulong pc, target_ulong size); void qemu_trace_finish(uint32_t exit_code); -OperandInfo * load_store_reg(target_ulong reg, target_ulong val, int ls); -OperandInfo * load_store_reg64(target_ulong reg, uint64_t val, int ls); -OperandInfo * load_store_mem(uint32_t addr, int ls, const void *data, size_t data_size); +OperandInfo * load_store_reg(uint32_t reg, uint32_t val, int ls); +OperandInfo * load_store_reg64(uint32_t reg, uint64_t val, int ls); +OperandInfo * load_store_mem(uint64_t addr, int ls, const void *data, size_t data_size); #define REG_EFLAGS 66 #define REG_LO 33 @@ -65,4 +65,6 @@ OperandInfo * load_store_mem(uint32_t addr, int ls, const void *data, size_t dat #define REG_S0 100 // s1 is REG_S0 + 1 and so on +#define REG64_D0 100 // d1 is REG_D0 + 1 and so on + #define SEG_BIT 8 diff --git a/linux-user/aarch64/trace_info.h b/linux-user/aarch64/trace_info.h new file mode 100644 index 000000000000..1c309edc5b53 --- /dev/null +++ b/linux-user/aarch64/trace_info.h @@ -0,0 +1,6 @@ +#pragma once + +#include "frame_arch.h" + +const uint64_t frame_arch = frame_arch_aarch64; +const uint64_t frame_mach = frame_mach_aarch64; diff --git a/target/arm/helper.h b/target/arm/helper.h index fa039c2f6368..919f755d4156 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -58,6 +58,13 @@ DEF_HELPER_3(cpsr_write, void, env, i32, i32) DEF_HELPER_2(cpsr_write_eret, void, env, i32) DEF_HELPER_1(cpsr_read, i32, env) +#define TRACE_CPSR_NF (1 << 0) +#define TRACE_CPSR_ZF (1 << 1) +#define TRACE_CPSR_CF (1 << 2) +#define TRACE_CPSR_VF (1 << 3) +#define TRACE_CPSR_QF (1 << 4) +#define TRACE_CPSR_GE (1 << 5) +#define TRACE_CPSR_ALL 0xffffffff #ifdef HAS_TRACEWRAP DEF_HELPER_1(trace_newframe, void, i32) DEF_HELPER_3(trace_endframe, void, env, i32, i32) @@ -69,11 +76,15 @@ DEF_HELPER_2(trace_load_reg, void, i32, i32) DEF_HELPER_2(trace_store_reg, void, i32, i32) DEF_HELPER_2(trace_load_reg64, void, i32, i64) DEF_HELPER_2(trace_store_reg64, void, i32, i64) -DEF_HELPER_3(trace_cpsr_write, void, env, i32, i32) -DEF_HELPER_1(trace_cpsr_read, i32, env) -DEF_HELPER_1(log_read_cpsr, void, env) -DEF_HELPER_1(log_store_cpsr, void, env) DEF_HELPER_1(trace_mode, void, ptr) +DEF_HELPER_2(trace_read_cpsr, void, env, i32) +DEF_HELPER_2(trace_store_cpsr, void, env, i32) +#ifdef TARGET_AARCH64 +DEF_HELPER_1(trace_newframe_64, void, i64) +DEF_HELPER_2(trace_endframe_64, void, env, i64) +DEF_HELPER_4(trace_ld64_64, void, env, i64, i64, i32) +DEF_HELPER_4(trace_st64_64, void, env, i64, i64, i32) +#endif #endif //HAS_TRACEWRAP DEF_HELPER_3(v7m_msr, void, env, i32, i32) diff --git a/target/arm/trace_helper.c b/target/arm/trace_helper.c index 5b73895ed4a3..17235626dfe4 100644 --- a/target/arm/trace_helper.c +++ b/target/arm/trace_helper.c @@ -7,27 +7,15 @@ #include "exec/helper-proto.h" #include "tcg/tcg.h" -uint32_t HELPER(trace_cpsr_read)(CPUARMState *env) { - uint32_t res = cpsr_read(env) & ~CPSR_EXEC; - OperandInfo * oi = load_store_reg(REG_CPSR, res, 0); - qemu_trace_add_operand(oi, 0x1); - return res; -} - -void HELPER(trace_cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask) { - OperandInfo * oi = load_store_reg(REG_CPSR, val, 1); - qemu_trace_add_operand(oi, 0x2); -} - -void HELPER(trace_newframe)(target_ulong pc) { +void HELPER(trace_newframe)(uint32_t pc) { qemu_trace_newframe(pc, 0); } -void HELPER(trace_endframe)(CPUARMState *env, target_ulong old_pc, uint32_t size) { +void HELPER(trace_endframe)(CPUARMState *env, uint32_t old_pc, uint32_t size) { qemu_trace_endframe(env, old_pc, size); } -OperandInfo *load_store_mem(uint32_t addr, int ls, const void *data, size_t data_size) { +OperandInfo *load_store_mem(uint64_t addr, int ls, const void *data, size_t data_size) { MemOperand * mo = g_new(MemOperand, 1); mem_operand__init(mo); @@ -58,7 +46,7 @@ OperandInfo *load_store_mem(uint32_t addr, int ls, const void *data, size_t data static OperandInfo *build_load_store_reg_op(char *name, int ls, const void *data, size_t data_size) { RegOperand * ro = g_new(RegOperand, 1); reg_operand__init(ro); - ro->name = name; + ro->name = name; OperandInfoSpecific *ois = g_new(OperandInfoSpecific, 1); operand_info_specific__init(ois); @@ -84,76 +72,68 @@ static OperandInfo *build_load_store_reg_op(char *name, int ls, const void *data } OperandInfo * load_store_reg(uint32_t reg, uint32_t val, int ls) { - char *name; - if (reg >= REG_S0) { - name = g_strdup_printf("S%u", (unsigned int)(reg - REG_S0)); - } else { - switch (reg) { - case REG_SP: name = g_strdup("SP"); break; - case REG_LR: name = g_strdup("LR"); break; - case REG_PC: name = g_strdup("PC"); break; - case REG_NF: name = g_strdup("NF"); break; - case REG_ZF: name = g_strdup("ZF"); break; - case REG_CF: name = g_strdup("CF"); break; - case REG_VF: name = g_strdup("VF"); break; - case REG_QF: name = g_strdup("QF"); break; - case REG_GE: name = g_strdup("GE"); break; - default: name = g_strdup_printf("R%u", (unsigned int)reg); break; - } - } - return build_load_store_reg_op(name, ls, &val, sizeof(val)); + char *name; + if (reg >= REG_S0) { + name = g_strdup_printf("S%u", (unsigned int)(reg - REG_S0)); + } else { + switch (reg) { + case REG_SP: name = g_strdup("SP"); break; + case REG_LR: name = g_strdup("LR"); break; + case REG_PC: name = g_strdup("PC"); break; + case REG_NF: name = g_strdup("NF"); break; + case REG_ZF: name = g_strdup("ZF"); break; + case REG_CF: name = g_strdup("CF"); break; + case REG_VF: name = g_strdup("VF"); break; + case REG_QF: name = g_strdup("QF"); break; + case REG_GE: name = g_strdup("GE"); break; + default: name = g_strdup_printf("R%u", (unsigned int)reg); break; + } + } + return build_load_store_reg_op(name, ls, &val, sizeof(val)); } -OperandInfo * load_store_reg64(uint32_t reg, uint64_t val, int ls) { - return build_load_store_reg_op(g_strdup_printf("D%u", (unsigned int)reg), ls, &val, sizeof(val)); +OperandInfo *load_store_reg64(uint32_t reg, uint64_t val, int ls) { + char *name; + if (reg >= REG64_D0) { + name = g_strdup_printf("D%u", (unsigned int)reg - REG64_D0); + } else { + name = g_strdup_printf("R%u", (unsigned int)reg); + } + return build_load_store_reg_op(name, ls, &val, sizeof(val)); } -void HELPER(log_store_cpsr)(CPUARMState *env) +static void trace_cpsr(CPUARMState *env, uint32_t mask, int ls) { - OperandInfo *oi; uint32_t val = cpsr_read(env); - - oi = load_store_reg(REG_NF, (val >> 31) & 0x1, 1); - qemu_trace_add_operand(oi, 0x2); - - oi = load_store_reg(REG_ZF, (val >> 30) & 0x1, 1); - qemu_trace_add_operand(oi, 0x2); - - oi = load_store_reg(REG_CF, (val >> 29) & 0x1, 1); - qemu_trace_add_operand(oi, 0x2); - - oi = load_store_reg(REG_VF, (val >> 28) & 0x1, 1); - qemu_trace_add_operand(oi, 0x2); - - oi = load_store_reg(REG_QF, (val >> 27) & 0x1, 1); - qemu_trace_add_operand(oi, 0x2); - - oi = load_store_reg(REG_GE, (val >> 16) & 0xF, 1); - qemu_trace_add_operand(oi, 0x2); + int inout = ls ? 2 : 1; + if (mask & TRACE_CPSR_NF) { + qemu_trace_add_operand(load_store_reg(REG_NF, (val >> 31) & 0x1, ls), inout); + } + if (mask & TRACE_CPSR_ZF) { + qemu_trace_add_operand(load_store_reg(REG_ZF, (val >> 30) & 0x1, ls), inout); + } + if (mask & TRACE_CPSR_CF) { + qemu_trace_add_operand(load_store_reg(REG_CF, (val >> 29) & 0x1, ls), inout); + } + if (mask & TRACE_CPSR_VF) { + qemu_trace_add_operand(load_store_reg(REG_VF, (val >> 28) & 0x1, ls), inout); + } + if (mask & TRACE_CPSR_QF) { + qemu_trace_add_operand(load_store_reg(REG_QF, (val >> 27) & 0x1, ls), inout); + } + if (mask & TRACE_CPSR_GE) { + qemu_trace_add_operand(load_store_reg(REG_GE, (val >> 16) & 0xF, ls), inout); + } } -void HELPER(log_read_cpsr)(CPUARMState *env) +void HELPER(trace_store_cpsr)(CPUARMState *env, uint32_t mask) { - OperandInfo *oi; - uint32_t val = cpsr_read(env); - - oi = load_store_reg(REG_NF, (val >> 31) & 0x1, 0); - qemu_trace_add_operand(oi, 0x1); - - oi = load_store_reg(REG_ZF, (val >> 30) & 0x1, 0); - qemu_trace_add_operand(oi, 0x1); - - oi = load_store_reg(REG_CF, (val >> 29) & 0x1, 0); - qemu_trace_add_operand(oi, 0x1); - - oi = load_store_reg(REG_VF, (val >> 28) & 0x1, 0); - qemu_trace_add_operand(oi, 0x1); - - oi = load_store_reg(REG_QF, (val >> 27) & 0x1, 0); - qemu_trace_add_operand(oi, 0x1); + trace_cpsr(env, mask, 1); +} - oi = load_store_reg(REG_GE, (val >> 16) & 0xF, 0); - qemu_trace_add_operand(oi, 0x1); +void HELPER(trace_read_cpsr)(CPUARMState *env, uint32_t mask) +{ + trace_cpsr(env, mask, 0); } void HELPER(trace_load_reg)(uint32_t reg, uint32_t val) @@ -172,95 +152,100 @@ void HELPER(trace_store_reg)(uint32_t reg, uint32_t val) void HELPER(trace_load_reg64)(uint32_t reg, uint64_t val) { - qemu_log("This 64-bit register (d%d) was read. Value 0x%llx\n", reg, (unsigned long long)val); + qemu_log("This 64-bit register (%d) was read. Value 0x%llx\n", reg, (unsigned long long)val); OperandInfo *oi = load_store_reg64(reg, val, 0); qemu_trace_add_operand(oi, 0x1); } void HELPER(trace_store_reg64)(uint32_t reg, uint64_t val) { - qemu_log("This 64-bit register (d%d) was written. Value: 0x%llx\n", reg, (unsigned long long)val); + qemu_log("This 64-bit register (%d) was written. Value: 0x%llx\n", reg, (unsigned long long)val); OperandInfo *oi = load_store_reg64(reg, val, 1); qemu_trace_add_operand(oi, 0x2); } void HELPER(trace_ld)(CPUARMState *env, uint32_t val, uint32_t addr, uint32_t opc) { - int len; qemu_log("This was a read 0x%x addr:0x%x value:0x%x\n", env->regs[15], addr, val); - switch (opc & MO_SIZE) { - case MO_8: - len = 1; - break; - case MO_16: - len = 2; - break; - case MO_32: - len = 4; - break; - default: - qemu_log("Do not reach\n"); - return; - } - OperandInfo *oi = load_store_mem(addr, 0, &val, len); + size_t sz = memop_size(opc); + if (!sz || sz > 4) { + qemu_log("Invalid memop\n"); + return; + } + OperandInfo *oi = load_store_mem(addr, 0, &val, sz); qemu_trace_add_operand(oi, 0x1); } void HELPER(trace_st)(CPUARMState *env, uint32_t val, uint32_t addr, uint32_t opc) { - int len; qemu_log("This was a store 0x%x addr:0x%x value:0x%x\n", env->regs[15], addr, val); - - switch (opc & MO_SIZE) { - case MO_8: - len = 1; - break; - case MO_16: - len = 2; - break; - case MO_32: - len = 4; - break; - default: - qemu_log("Do not reach\n"); - return; - } - OperandInfo *oi = load_store_mem(addr, 1, &val, len); + size_t sz = memop_size(opc); + if (!sz || sz > 4) { + qemu_log("Invalid memop\n"); + return; + } + OperandInfo *oi = load_store_mem(addr, 1, &val, sz); qemu_trace_add_operand(oi, 0x2); } void HELPER(trace_ld64)(CPUARMState *env, uint64_t val, uint32_t addr, uint32_t opc) { - int len; qemu_log("This was a 64-bit read 0x%x addr:0x%x value:0x%llx\n", env->regs[15], addr, (unsigned long long)val); - switch (opc & MO_SIZE) { - case MO_64: - len = 8; - break; - default: - qemu_log("Do not reach\n"); - return; - } - OperandInfo *oi = load_store_mem(addr, 0, &val, len); + size_t sz = memop_size(opc); + if (!sz || sz > 8) { + qemu_log("Invalid memop\n"); + return; + } + OperandInfo *oi = load_store_mem(addr, 0, &val, sz); qemu_trace_add_operand(oi, 0x1); } void HELPER(trace_st64)(CPUARMState *env, uint64_t val, uint32_t addr, uint32_t opc) { - int len; qemu_log("This was a 64-bit store 0x%x addr:0x%x value:0x%llx\n", env->regs[15], addr, (unsigned long long)val); - switch (opc & MO_SIZE) { - case MO_64: - len = 8; - break; - default: - qemu_log("Do not reach\n"); - return; - } - OperandInfo *oi = load_store_mem(addr, 1, &val, len); + size_t sz = memop_size(opc); + if (!sz || sz > 8) { + qemu_log("Invalid memop\n"); + return; + } + OperandInfo *oi = load_store_mem(addr, 1, &val, sz); qemu_trace_add_operand(oi, 0x2); } void HELPER(trace_mode)(void *mode) { qemu_trace_set_mode(mode); } + +#ifdef TARGET_AARCH64 +void HELPER(trace_newframe_64)(uint64_t pc) { + qemu_trace_newframe(pc, 0); +} + +void HELPER(trace_endframe_64)(CPUARMState *env, uint64_t old_pc) { + qemu_trace_endframe(env, old_pc, 4); +} + +void HELPER(trace_ld64_64)(CPUARMState *env, uint64_t val, uint64_t addr, uint32_t opc) +{ + qemu_log("This was a 64-bit read 0x%x addr:0x%llx value:0x%llx\n", env->regs[15], (unsigned long long)addr, (unsigned long long)val); + size_t sz = memop_size(opc); + if (!sz || sz > 8) { + qemu_log("Invalid memop\n"); + return; + } + OperandInfo *oi = load_store_mem(addr, 0, &val, sz); + qemu_trace_add_operand(oi, 0x1); +} + +void HELPER(trace_st64_64)(CPUARMState *env, uint64_t val, uint64_t addr, uint32_t opc) +{ + qemu_log("This was a 64-bit store 0x%x addr:0x%llx value:0x%llx\n", env->regs[15], (unsigned long long)addr, (unsigned long long)val); + size_t sz = memop_size(opc); + if (!sz || sz > 8) { + qemu_log("Invalid memop\n"); + return; + } + OperandInfo *oi = load_store_mem(addr, 1, &val, sz); + qemu_trace_add_operand(oi, 0x2); +} +#endif diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index cec672f2296c..9d487c68c720 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -385,6 +385,84 @@ static void gen_step_complete_exception(DisasContext *s) s->base.is_jmp = DISAS_NORETURN; } +#ifdef HAS_TRACEWRAP + +static void gen_trace_load_reg_var(int reg, TCGv_i64 var) +{ + TCGv_i32 t = tcg_const_i32(reg); + gen_helper_trace_load_reg64(t, var); + tcg_temp_free_i32(t); +} + +static void gen_trace_store_reg_var(int reg, TCGv_i64 var) +{ + TCGv_i32 t = tcg_const_i32(reg); + gen_helper_trace_store_reg64(t, var); + tcg_temp_free_i32(t); +} + +/* + * sp indicates whether reg == 31 means sp (cpu_reg_sp() was used) + * instead of zr (cpu_reg() was used) + */ +static void gen_trace_load_reg(int reg, bool sp) +{ + if (!sp && reg == 31) { + return; + } + gen_trace_load_reg_var(reg, cpu_X[reg]); +} + +/* + * sp has same meaning as above + */ +static void gen_trace_store_reg(int reg, bool sp) +{ + if (!sp && reg == 31) { + return; + } + gen_trace_store_reg_var(reg, cpu_X[reg]); +} + +/* + * This is to record memory accesses for atomic instructions. Because those are actually implemented + * in helpers, we don't have access to the actual values being read or written (unless we modify the helpers + * in the future) and instead we pre- or re-read from memory here. + * Thus, this can only work reliably without concurrency at the moment, hence the warning. + */ +static void gen_trace_mem_access_atomic(TCGv_i64 addr, TCGArg memidx, MemOp mop, bool write) +{ + qemu_log("Warning: using non-atomic memory access for trace operands of atomic instruction!\n"); + TCGv_i64 valt = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(valt, addr, memidx, mop); + TCGv_i32 mopt = tcg_const_i32(mop); + (write ? gen_helper_trace_st64_64 : gen_helper_trace_ld64_64)(cpu_env, valt, addr, mopt); + tcg_temp_free_i32(mopt); + tcg_temp_free_i64(valt); +} + +static inline void gen_trace_newframe(DisasContext *s) +{ + TCGv_i64 t = tcg_const_i64(s->pc_curr); + gen_helper_trace_newframe_64(t); + tcg_temp_free_i64(t); + trace_cpsr_reset(); +} + +static inline void gen_trace_endframe(DisasContext *s) +{ + gen_trace_flush_cpsr(); + TCGv_i64 tmp0 = tcg_temp_new_i64(); + tcg_gen_movi_i64(tmp0, s->pc_curr); + gen_helper_trace_endframe_64(cpu_env, tmp0); + tcg_temp_free_i64(tmp0); +} +#else //HAS_TRACEWRAP +static inline void gen_trace_load_reg(int reg, bool sp) {} +static inline void gen_trace_store_reg(int reg, bool sp) {} +static inline void gen_trace_mem_access_atomic(TCGv_i64 addr, TCGArg memidx, MemOp mop, bool write) {} +#endif //HAS_TRACEWRAP + static inline bool use_goto_tb(DisasContext *s, uint64_t dest) { if (s->ss_active) { @@ -395,6 +473,9 @@ static inline bool use_goto_tb(DisasContext *s, uint64_t dest) static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) { +#ifdef HAS_TRACEWRAP + gen_trace_endframe(s); +#endif if (use_goto_tb(s, dest)) { tcg_gen_goto_tb(n); gen_a64_set_pc_im(dest); @@ -490,6 +571,7 @@ TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) } else { tcg_gen_ext32u_i64(v, cpu_X[reg]); } + gen_trace_load_reg(reg, false); } else { tcg_gen_movi_i64(v, 0); } @@ -504,6 +586,7 @@ TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) } else { tcg_gen_ext32u_i64(v, cpu_X[reg]); } + gen_trace_load_reg(reg, true); return v; } @@ -700,6 +783,7 @@ static inline void gen_set_NZ64(TCGv_i64 result) { tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result); tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF); + trace_store_cpsr(TRACE_CPSR_ZF | TRACE_CPSR_NF); } /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */ @@ -710,9 +794,11 @@ static inline void gen_logic_CC(int sf, TCGv_i64 result) } else { tcg_gen_extrl_i64_i32(cpu_ZF, result); tcg_gen_mov_i32(cpu_NF, cpu_ZF); + trace_store_cpsr(TRACE_CPSR_ZF | TRACE_CPSR_NF); } tcg_gen_movi_i32(cpu_CF, 0); tcg_gen_movi_i32(cpu_VF, 0); + trace_store_cpsr(TRACE_CPSR_CF | TRACE_CPSR_VF); } /* dest = T0 + T1; compute C, N, V and Z flags */ @@ -740,6 +826,7 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_mov_i64(dest, result); tcg_temp_free_i64(result); tcg_temp_free_i64(flag); + trace_store_cpsr(TRACE_CPSR_CF | TRACE_CPSR_VF); } else { /* 32 bit arithmetic */ TCGv_i32 t0_32 = tcg_temp_new_i32(); @@ -759,6 +846,7 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_temp_free_i32(tmp); tcg_temp_free_i32(t0_32); tcg_temp_free_i32(t1_32); + trace_store_cpsr(TRACE_CPSR_ZF | TRACE_CPSR_NF | TRACE_CPSR_CF | TRACE_CPSR_VF); } } @@ -787,6 +875,7 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_mov_i64(dest, result); tcg_temp_free_i64(flag); tcg_temp_free_i64(result); + trace_store_cpsr(TRACE_CPSR_CF | TRACE_CPSR_VF); } else { /* 32 bit arithmetic */ TCGv_i32 t0_32 = tcg_temp_new_i32(); @@ -806,6 +895,7 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_and_i32(cpu_VF, cpu_VF, tmp); tcg_temp_free_i32(tmp); tcg_gen_extu_i32_i64(dest, cpu_NF); + trace_store_cpsr(TRACE_CPSR_ZF | TRACE_CPSR_NF | TRACE_CPSR_CF | TRACE_CPSR_VF); } } @@ -888,6 +978,11 @@ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source, { memop = finalize_memop(s, memop); tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop); +#ifdef HAS_TRACEWRAP + TCGv_i32 t = tcg_const_i32(memop); + gen_helper_trace_st64_64(cpu_env, source, tcg_addr, t); + tcg_temp_free_i32(t); +#endif if (iss_valid) { uint32_t syn; @@ -923,6 +1018,11 @@ static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, { memop = finalize_memop(s, memop); tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop); +#ifdef HAS_TRACEWRAP + TCGv_i32 t = tcg_const_i32(memop); + gen_helper_trace_ld64_64(cpu_env, dest, tcg_addr, t); + tcg_temp_free_i32(t); +#endif if (extend && (memop & MO_SIGN)) { g_assert((memop & MO_SIZE) <= MO_32); @@ -1308,6 +1408,7 @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn) if (insn & (1U << 31)) { /* BL Branch with link */ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next); + gen_trace_store_reg(30, false); } /* B Branch / BL Branch with link */ @@ -1363,6 +1464,7 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn) addr = s->pc_curr + sextract32(insn, 5, 14) * 4; rt = extract32(insn, 0, 5); + gen_trace_load_reg(rt, false); tcg_cmp = tcg_temp_new_i64(); tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos)); label_match = gen_new_label(); @@ -1872,9 +1974,13 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, case ARM_CP_NZCV: tcg_rt = cpu_reg(s, rt); if (isread) { + trace_read_cpsr(TRACE_CPSR_NF | TRACE_CPSR_ZF | TRACE_CPSR_CF | TRACE_CPSR_VF); gen_get_nzcv(tcg_rt); + gen_trace_store_reg(rt, false); } else { + gen_trace_load_reg(rt, false); gen_set_nzcv(tcg_rt); + trace_store_cpsr(TRACE_CPSR_NF | TRACE_CPSR_ZF | TRACE_CPSR_CF | TRACE_CPSR_VF); } return; case ARM_CP_CURRENTEL: @@ -2058,6 +2164,9 @@ static void disas_exc(DisasContext *s, uint32_t insn) int imm16 = extract32(insn, 5, 16); TCGv_i32 tmp; +#ifdef HAS_TRACEWRAP + gen_trace_endframe(s); +#endif switch (opc) { case 0: /* For SVC, HVC and SMC we advance the single-step state @@ -2180,6 +2289,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) case 1: /* BLR */ case 2: /* RET */ btype_mod = opc; + gen_trace_load_reg(rn, false); switch (op3) { case 0: /* BR, BLR, RET */ @@ -2227,6 +2337,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) /* BLR also needs to load return address */ if (opc == 1) { tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next); + gen_trace_store_reg(30, false); } break; @@ -2239,6 +2350,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) goto do_unallocated; } btype_mod = opc & 1; + gen_trace_load_reg(rn, false); if (s->pauth_active) { dst = new_tmp_a64(s); modifier = cpu_reg_sp(s, op4); @@ -2254,6 +2366,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) /* BLRAA also needs to load return address */ if (opc == 9) { tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next); + gen_trace_store_reg(30, false); } break; @@ -2513,12 +2626,19 @@ static void gen_compare_and_swap(DisasContext *s, int rs, int rt, int memidx = get_mem_index(s); TCGv_i64 clean_addr; + gen_trace_load_reg(rn, true); if (rn == 31) { gen_check_sp_alignment(s); } clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size); + MemOp mop = size | MO_ALIGN | s->be_data; + gen_trace_mem_access_atomic(clean_addr, memidx, mop, false); + gen_trace_load_reg(rs, false); + gen_trace_load_reg(rt, false); tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx, - size | MO_ALIGN | s->be_data); + mop); + gen_trace_mem_access_atomic(clean_addr, memidx, mop, true); + gen_trace_store_reg(rs, true); } static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, @@ -2710,12 +2830,14 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) if (rn == 31) { gen_check_sp_alignment(s); } + gen_trace_load_reg(rn, true); clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size); /* TODO: ARMv8.4-LSE SCTLR.nAA */ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true, rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); + gen_trace_store_reg(rt, false); return; case 0x2: case 0x3: /* CASP / STXP */ @@ -2830,6 +2952,7 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) false, true, rt, iss_sf, false); } tcg_temp_free_i64(clean_addr); + gen_trace_store_reg(rt, false); } /* @@ -2993,7 +3116,13 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) tcg_gen_mov_i64(tcg_rt, tmp); tcg_temp_free_i64(tmp); + + gen_trace_store_reg(rt, false); + gen_trace_store_reg(rt2, false); } else { + gen_trace_load_reg(rt, false); + gen_trace_load_reg(rt2, false); + do_gpr_st(s, tcg_rt, clean_addr, size, false, 0, false, false); tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size); @@ -3007,6 +3136,7 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); } tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr); + gen_trace_store_reg(rn, true); } } @@ -3117,12 +3247,14 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); if (is_store) { + gen_trace_load_reg(rt, false); do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx, iss_valid, rt, iss_sf, false); } else { do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, is_extended, memidx, iss_valid, rt, iss_sf, false); + gen_trace_store_reg(rt, false); } } @@ -3132,6 +3264,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9); } tcg_gen_mov_i64(tcg_rn, dirty_addr); + gen_trace_store_reg(rn, true); } } @@ -3222,11 +3355,13 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, TCGv_i64 tcg_rt = cpu_reg(s, rt); bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); if (is_store) { + gen_trace_load_reg(rt, false); do_gpr_st(s, tcg_rt, clean_addr, size, true, rt, iss_sf, false); } else { do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, is_extended, true, rt, iss_sf, false); + gen_trace_store_reg(rt, false); } } } @@ -3306,11 +3441,13 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, TCGv_i64 tcg_rt = cpu_reg(s, rt); bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); if (is_store) { + gen_trace_load_reg(rt, false); do_gpr_st(s, tcg_rt, clean_addr, size, true, rt, iss_sf, false); } else { do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, is_extended, true, rt, iss_sf, false); + gen_trace_store_reg(rt, false); } } } @@ -3390,6 +3527,7 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, if (rn == 31) { gen_check_sp_alignment(s); } + gen_trace_load_reg(rn, true); clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size); if (o3_opc == 014) { @@ -3403,6 +3541,7 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt, disas_ldst_compute_iss_sf(size, false, 0), true); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); + gen_trace_store_reg(rt, false); return; } @@ -3416,11 +3555,14 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, /* The tcg atomic primitives are all full barriers. Therefore we * can ignore the Acquire and Release bits of this instruction. */ + gen_trace_mem_access_atomic(clean_addr, get_mem_index(s), mop, false); fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop); + gen_trace_mem_access_atomic(clean_addr, get_mem_index(s), mop, true); if ((mop & MO_SIGN) && size != MO_64) { tcg_gen_ext32u_i64(tcg_rt, tcg_rt); } + gen_trace_store_reg(rt, false); } /* @@ -4182,6 +4324,7 @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn) } tcg_gen_movi_i64(cpu_reg(s, rd), base + offset); + gen_trace_store_reg(rd, false); } /* @@ -4208,6 +4351,7 @@ static void disas_add_sub_imm(DisasContext *s, uint32_t insn) bool is_64bit = extract32(insn, 31, 1); TCGv_i64 tcg_rn = cpu_reg_sp(s, rn); + gen_trace_load_reg(rn, true); TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd); TCGv_i64 tcg_result; @@ -4238,6 +4382,8 @@ static void disas_add_sub_imm(DisasContext *s, uint32_t insn) tcg_gen_ext32u_i64(tcg_rd, tcg_result); } + gen_trace_store_reg(rd, !setflags); + tcg_temp_free_i64(tcg_result); } @@ -4408,6 +4554,7 @@ static void disas_logic_imm(DisasContext *s, uint32_t insn) tcg_rd = cpu_reg_sp(s, rd); } tcg_rn = cpu_reg(s, rn); + gen_trace_load_reg(rn, false); if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) { /* some immediate field values are reserved */ @@ -4443,6 +4590,8 @@ static void disas_logic_imm(DisasContext *s, uint32_t insn) tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } + gen_trace_store_reg(rd, opc != 0x3); + if (opc == 3) { /* ANDS */ gen_logic_CC(sf, tcg_rd); } @@ -4489,6 +4638,7 @@ static void disas_movw_imm(DisasContext *s, uint32_t insn) break; case 3: /* MOVK */ tcg_imm = tcg_const_i64(imm); + gen_trace_load_reg(rd, false); tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16); tcg_temp_free_i64(tcg_imm); if (!sf) { @@ -4497,8 +4647,9 @@ static void disas_movw_imm(DisasContext *s, uint32_t insn) break; default: unallocated_encoding(s); - break; + return; } + gen_trace_store_reg(rd, false); } /* Bitfield @@ -4542,6 +4693,7 @@ static void disas_bitfield(DisasContext *s, uint32_t insn) goto done; } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */ tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len); + gen_trace_store_reg(rd, false); return; } /* opc == 1, BFXIL fall through to deposit */ @@ -4563,6 +4715,7 @@ static void disas_bitfield(DisasContext *s, uint32_t insn) len = ri; } + gen_trace_load_reg(rd, false); if (opc == 1) { /* BFM, BFXIL */ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len); } else { @@ -4570,6 +4723,7 @@ static void disas_bitfield(DisasContext *s, uint32_t insn) any bits outside bitsize, therefore the zero-extension below is unneeded. */ tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len); + gen_trace_store_reg(rd, false); return; } @@ -4577,6 +4731,7 @@ static void disas_bitfield(DisasContext *s, uint32_t insn) if (!sf) { /* zero extend final result */ tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } + gen_trace_store_reg(rd, false); } /* Extract @@ -4610,6 +4765,7 @@ static void disas_extract(DisasContext *s, uint32_t insn) /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts, * so an extract from bit 0 is a special case. */ + gen_trace_load_reg(rm, false); if (sf) { tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm)); } else { @@ -4619,6 +4775,11 @@ static void disas_extract(DisasContext *s, uint32_t insn) tcg_rm = cpu_reg(s, rm); tcg_rn = cpu_reg(s, rn); + gen_trace_load_reg(rm, false); + if (rn != rm) { + gen_trace_load_reg(rn, false); + } + if (sf) { /* Specialization to ROR happens in EXTRACT2. */ tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm); @@ -4638,6 +4799,7 @@ static void disas_extract(DisasContext *s, uint32_t insn) tcg_temp_free_i32(t0); } } + gen_trace_store_reg(rd, false); } } @@ -4758,6 +4920,8 @@ static void disas_logic_reg(DisasContext *s, uint32_t insn) rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); + gen_trace_load_reg(rn, false); + if (!sf && (shift_amount & (1 << 5))) { unallocated_encoding(s); return; @@ -4770,6 +4934,7 @@ static void disas_logic_reg(DisasContext *s, uint32_t insn) * register-register MOV and MVN, so it is worth special casing. */ tcg_rm = cpu_reg(s, rm); + gen_trace_load_reg(rm, false); if (invert) { tcg_gen_not_i64(tcg_rd, tcg_rm); if (!sf) { @@ -4782,6 +4947,7 @@ static void disas_logic_reg(DisasContext *s, uint32_t insn) tcg_gen_ext32u_i64(tcg_rd, tcg_rm); } } + gen_trace_store_reg(rd, false); return; } @@ -4823,6 +4989,8 @@ static void disas_logic_reg(DisasContext *s, uint32_t insn) tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } + gen_trace_store_reg(rd, false); + if (opc == 3) { gen_logic_CC(sf, tcg_rd); } @@ -4900,6 +5068,8 @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) } tcg_temp_free_i64(tcg_result); + + gen_trace_store_reg(rd, !setflags); } /* @@ -4964,6 +5134,8 @@ static void disas_add_sub_reg(DisasContext *s, uint32_t insn) } tcg_temp_free_i64(tcg_result); + + gen_trace_store_reg(rd, false); } /* Data-processing (3 source) @@ -5016,11 +5188,14 @@ static void disas_data_proc_3src(DisasContext *s, uint32_t insn) TCGv_i64 tcg_rn = cpu_reg(s, rn); TCGv_i64 tcg_rm = cpu_reg(s, rm); + gen_trace_load_reg(rn, false); + gen_trace_load_reg(rm, false); if (is_signed) { tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm); } else { tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm); } + gen_trace_store_reg(rd, false); tcg_temp_free_i64(low_bits); return; @@ -5030,6 +5205,8 @@ static void disas_data_proc_3src(DisasContext *s, uint32_t insn) tcg_op2 = tcg_temp_new_i64(); tcg_tmp = tcg_temp_new_i64(); + gen_trace_load_reg(rn, false); + gen_trace_load_reg(rm, false); if (op_id < 0x42) { tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn)); tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm)); @@ -5048,6 +5225,7 @@ static void disas_data_proc_3src(DisasContext *s, uint32_t insn) tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2); } else { tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2); + gen_trace_load_reg(ra, false); if (is_sub) { tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp); } else { @@ -5062,6 +5240,8 @@ static void disas_data_proc_3src(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_op1); tcg_temp_free_i64(tcg_op2); tcg_temp_free_i64(tcg_tmp); + + gen_trace_store_reg(rd, false); } /* Add/subtract (with carry) @@ -5220,8 +5400,10 @@ static void disas_cc(DisasContext *s, uint32_t insn) tcg_gen_movi_i64(tcg_y, y); } else { tcg_y = cpu_reg(s, y); + gen_trace_load_reg(y, false); } tcg_rn = cpu_reg(s, rn); + gen_trace_load_reg(rn, false); /* Set the flags for the new comparison. */ tcg_tmp = tcg_temp_new_i64(); @@ -5329,6 +5511,7 @@ static void disas_cond_select(DisasContext *s, uint32_t insn) } else if (else_inc) { tcg_gen_addi_i64(t_false, t_false, 1); } + gen_trace_load_reg(rn, false); tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false); } @@ -5338,6 +5521,7 @@ static void disas_cond_select(DisasContext *s, uint32_t insn) if (!sf) { tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } + gen_trace_store_reg(rd, false); } static void handle_clz(DisasContext *s, unsigned int sf, @@ -5347,6 +5531,7 @@ static void handle_clz(DisasContext *s, unsigned int sf, tcg_rd = cpu_reg(s, rd); tcg_rn = cpu_reg(s, rn); + gen_trace_load_reg(rn, false); if (sf) { tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64); } else { @@ -5356,6 +5541,7 @@ static void handle_clz(DisasContext *s, unsigned int sf, tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_tmp32); } + gen_trace_store_reg(rd, false); } static void handle_cls(DisasContext *s, unsigned int sf, @@ -5365,6 +5551,7 @@ static void handle_cls(DisasContext *s, unsigned int sf, tcg_rd = cpu_reg(s, rd); tcg_rn = cpu_reg(s, rn); + gen_trace_load_reg(rn, false); if (sf) { tcg_gen_clrsb_i64(tcg_rd, tcg_rn); } else { @@ -5374,6 +5561,7 @@ static void handle_cls(DisasContext *s, unsigned int sf, tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_tmp32); } + gen_trace_store_reg(rd, false); } static void handle_rbit(DisasContext *s, unsigned int sf, @@ -5383,6 +5571,7 @@ static void handle_rbit(DisasContext *s, unsigned int sf, tcg_rd = cpu_reg(s, rd); tcg_rn = cpu_reg(s, rn); + gen_trace_load_reg(rn, false); if (sf) { gen_helper_rbit64(tcg_rd, tcg_rn); } else { @@ -5392,6 +5581,7 @@ static void handle_rbit(DisasContext *s, unsigned int sf, tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_tmp32); } + gen_trace_store_reg(rd, false); } /* REV with sf==1, opcode==3 ("REV64") */ @@ -5402,7 +5592,9 @@ static void handle_rev64(DisasContext *s, unsigned int sf, unallocated_encoding(s); return; } + gen_trace_load_reg(rn, false); tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn)); + gen_trace_store_reg(rd, false); } /* REV with sf==0, opcode==2 @@ -5414,12 +5606,14 @@ static void handle_rev32(DisasContext *s, unsigned int sf, TCGv_i64 tcg_rd = cpu_reg(s, rd); TCGv_i64 tcg_rn = cpu_reg(s, rn); + gen_trace_load_reg(rn, false); if (sf) { tcg_gen_bswap64_i64(tcg_rd, tcg_rn); tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32); } else { tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ); } + gen_trace_store_reg(rd, false); } /* REV16 (opcode==1) */ @@ -5439,6 +5633,7 @@ static void handle_rev16(DisasContext *s, unsigned int sf, tcg_temp_free_i64(mask); tcg_temp_free_i64(tcg_tmp); + gen_trace_store_reg(rd, false); } /* Data-processing (1 source) @@ -5649,6 +5844,8 @@ static void handle_div(DisasContext *s, bool is_signed, unsigned int sf, tcg_rd = cpu_reg(s, rd); if (!sf && is_signed) { + gen_trace_load_reg(rn, false); + gen_trace_load_reg(rm, false); tcg_n = new_tmp_a64(s); tcg_m = new_tmp_a64(s); tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn)); @@ -5667,6 +5864,7 @@ static void handle_div(DisasContext *s, bool is_signed, unsigned int sf, if (!sf) { /* zero extend final result */ tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } + gen_trace_store_reg(rd, false); } /* LSLV, LSRV, ASRV, RORV */ @@ -5678,9 +5876,11 @@ static void handle_shift_reg(DisasContext *s, TCGv_i64 tcg_rd = cpu_reg(s, rd); TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); + gen_trace_load_reg(rm, false); tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31); shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift); tcg_temp_free_i64(tcg_shift); + gen_trace_store_reg(rd, false); } /* CRC32[BHWX], CRC32C[BHWX] */ @@ -14825,6 +15025,10 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) } } +#ifdef HAS_TRACEWRAP + gen_trace_newframe(s); +#endif + switch (extract32(insn, 25, 4)) { case 0x0: case 0x1: case 0x3: /* UNALLOCATED */ unallocated_encoding(s); @@ -14870,6 +15074,10 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) reset_btype(s); } +#ifdef HAS_TRACEWRAP + gen_trace_endframe(s); +#endif + translator_loop_temp_check(&s->base); } diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c index 7486756c7a26..00d81f1b61ae 100644 --- a/target/arm/translate-vfp.c +++ b/target/arm/translate-vfp.c @@ -40,9 +40,9 @@ static inline void vfp_load_reg64(TCGv_i64 var, int reg) { tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg)); #ifdef HAS_TRACEWRAP - TCGv t = tcg_const_i32(reg); + TCGv_i32 t = tcg_const_i32(REG64_D0 + reg); gen_helper_trace_load_reg64(t, var); - tcg_temp_free(t); + tcg_temp_free_i32(t); #endif } @@ -50,9 +50,9 @@ static inline void vfp_store_reg64(TCGv_i64 var, int reg) { tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg)); #ifdef HAS_TRACEWRAP - TCGv t = tcg_const_i32(reg); + TCGv_i32 t = tcg_const_i32(REG64_D0 + reg); gen_helper_trace_store_reg64(t, var); - tcg_temp_free(t); + tcg_temp_free_i32(t); #endif } @@ -60,9 +60,9 @@ static inline void vfp_load_reg32(TCGv_i32 var, int reg) { tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg)); #ifdef HAS_TRACEWRAP - TCGv t = tcg_const_i32(REG_S0 + reg); + TCGv_i32 t = tcg_const_i32(REG_S0 + reg); gen_helper_trace_load_reg(t, var); - tcg_temp_free(t); + tcg_temp_free_i32(t); #endif } @@ -70,9 +70,9 @@ static inline void vfp_store_reg32(TCGv_i32 var, int reg) { tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg)); #ifdef HAS_TRACEWRAP - TCGv t = tcg_const_i32(REG_S0 + reg); + TCGv_i32 t = tcg_const_i32(REG_S0 + reg); gen_helper_trace_store_reg(t, var); - tcg_temp_free(t); + tcg_temp_free_i32(t); #endif } diff --git a/target/arm/translate.c b/target/arm/translate.c index 611383bedeed..5b6020e19fab 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -68,10 +68,8 @@ static const char * const regnames[] = #ifdef HAS_TRACEWRAP #include -/* Set to 1 if cpsr contents have already been written for the current instruction. */ -static int loaded_cpsr = 0; -/* Set to 1 if an instruction affects cpsr. */ -static int store_cpsr = 0; +uint32_t loaded_cpsr = 0; +uint32_t store_cpsr = 0; #endif //HAS_TRACEWRAP /* initialize TCG globals. */ @@ -263,36 +261,28 @@ static uint32_t read_pc(DisasContext *s) #ifdef HAS_TRACEWRAP static void gen_trace_load_reg(int reg, TCGv_i32 var) { - TCGv t = tcg_const_i32(reg); + TCGv_i32 t = tcg_const_i32(reg); gen_helper_trace_load_reg(t, var); - tcg_temp_free(t); + tcg_temp_free_i32(t); } static void gen_trace_store_reg(int reg, TCGv_i32 var) { - TCGv t = tcg_const_i32(reg); + TCGv_i32 t = tcg_const_i32(reg); gen_helper_trace_store_reg(t, var); - tcg_temp_free(t); -} - -static void trace_read_cpsr(void) -{ - if (loaded_cpsr) { - return; - } - gen_helper_log_read_cpsr(cpu_env); - loaded_cpsr = 1; + tcg_temp_free_i32(t); } -static void trace_store_cpsr(void) +// Deprecated, use trace_store_cpsr_all() with a fine-grained mask +static void trace_read_cpsr_all(void) { - store_cpsr = 1; + trace_read_cpsr(TRACE_CPSR_ALL); } -static void trace_instr_state_reset(void) +// Deprecated, use trace_store_cpsr_all() with a fine-grained mask +static void trace_store_cpsr_all(void) { - loaded_cpsr = 0; - store_cpsr = 0; + trace_store_cpsr(TRACE_CPSR_ALL); } #endif //HAS_TRACEWRAP @@ -321,7 +311,7 @@ TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs) if (reg == 15) { tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs); #ifdef HAS_TRACEWRAP - TCGv pc_tmp = tcg_const_i32(read_pc(s)); + TCGv_i32 pc_tmp = tcg_const_i32(read_pc(s)); gen_trace_load_reg(reg, pc_tmp); tcg_temp_free_i32(pc_tmp); #endif //HAS_TRACEWRAP @@ -484,8 +474,8 @@ static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) static inline void gen_logic_CC(TCGv_i32 var) { #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif //HAS_TRACEWRAP tcg_gen_mov_i32(cpu_NF, var); tcg_gen_mov_i32(cpu_ZF, var); @@ -495,7 +485,7 @@ static inline void gen_logic_CC(TCGv_i32 var) static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { #ifdef HAS_TRACEWRAP - trace_read_cpsr(); + trace_read_cpsr_all(); #endif //HAS_TRACEWRAP tcg_gen_add_i32(dest, t0, t1); tcg_gen_add_i32(dest, dest, cpu_CF); @@ -505,7 +495,7 @@ static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { #ifdef HAS_TRACEWRAP - trace_read_cpsr(); + trace_read_cpsr_all(); #endif //HAS_TRACEWRAP tcg_gen_sub_i32(dest, t0, t1); tcg_gen_add_i32(dest, dest, cpu_CF); @@ -516,8 +506,8 @@ static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif //HAS_TRACEWRAP TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); @@ -534,8 +524,8 @@ static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif //HAS_TRACEWRAP TCGv_i32 tmp = tcg_temp_new_i32(); if (TCG_TARGET_HAS_add2_i32) { @@ -566,8 +556,8 @@ static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif //HAS_TRACEWRAP TCGv_i32 tmp; tcg_gen_sub_i32(cpu_NF, t0, t1); @@ -631,8 +621,8 @@ static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop, int shift, int flags) { #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif //HAS_TRACEWRAP switch (shiftop) { case 0: /* LSL */ @@ -718,29 +708,34 @@ void arm_test_cc(DisasCompare *cmp, int cc) TCGCond cond; bool global = true; + uint32_t cpsr_mask = 0; switch (cc) { case 0: /* eq: Z */ case 1: /* ne: !Z */ cond = TCG_COND_EQ; value = cpu_ZF; + cpsr_mask = TRACE_CPSR_ZF; break; case 2: /* cs: C */ case 3: /* cc: !C */ cond = TCG_COND_NE; value = cpu_CF; + cpsr_mask = TRACE_CPSR_CF; break; case 4: /* mi: N */ case 5: /* pl: !N */ cond = TCG_COND_LT; value = cpu_NF; + cpsr_mask = TRACE_CPSR_NF; break; case 6: /* vs: V */ case 7: /* vc: !V */ cond = TCG_COND_LT; value = cpu_VF; + cpsr_mask = TRACE_CPSR_VF; break; case 8: /* hi: C && !Z */ @@ -752,6 +747,7 @@ void arm_test_cc(DisasCompare *cmp, int cc) ZF is non-zero for !Z; so AND the two subexpressions. */ tcg_gen_neg_i32(value, cpu_CF); tcg_gen_and_i32(value, value, cpu_ZF); + cpsr_mask = TRACE_CPSR_CF | TRACE_CPSR_ZF; break; case 10: /* ge: N == V -> N ^ V == 0 */ @@ -761,6 +757,7 @@ void arm_test_cc(DisasCompare *cmp, int cc) value = tcg_temp_new_i32(); global = false; tcg_gen_xor_i32(value, cpu_VF, cpu_NF); + cpsr_mask = TRACE_CPSR_VF | TRACE_CPSR_NF; break; case 12: /* gt: !Z && N == V */ @@ -773,6 +770,7 @@ void arm_test_cc(DisasCompare *cmp, int cc) tcg_gen_xor_i32(value, cpu_VF, cpu_NF); tcg_gen_sari_i32(value, value, 31); tcg_gen_andc_i32(value, cpu_ZF, value); + cpsr_mask = TRACE_CPSR_VF | TRACE_CPSR_NF | TRACE_CPSR_ZF; break; case 14: /* always */ @@ -793,7 +791,9 @@ void arm_test_cc(DisasCompare *cmp, int cc) } #ifdef HAS_TRACEWRAP - trace_read_cpsr(); + trace_read_cpsr(cpsr_mask); +#else + (void)cpsr_mask; #endif no_invert: @@ -1075,9 +1075,9 @@ void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, { gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc)); #ifdef HAS_TRACEWRAP - TCGv t = tcg_const_i32(opc); + TCGv_i32 t = tcg_const_i32(opc); gen_helper_trace_ld(cpu_env, val, a32, t); - tcg_temp_free(t); + tcg_temp_free_i32(t); #endif } @@ -1086,9 +1086,9 @@ void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, { gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc)); #ifdef HAS_TRACEWRAP - TCGv t = tcg_const_i32(opc); + TCGv_i32 t = tcg_const_i32(opc); gen_helper_trace_st(cpu_env, val, a32, t); - tcg_temp_free(t); + tcg_temp_free_i32(t); #endif } @@ -1097,9 +1097,9 @@ void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, { gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc)); #ifdef HAS_TRACEWRAP - TCGv t = tcg_const_i32(opc); + TCGv_i32 t = tcg_const_i32(opc); gen_helper_trace_ld64(cpu_env, val, a32, t); - tcg_temp_free(t); + tcg_temp_free_i32(t); #endif } @@ -1108,9 +1108,9 @@ void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, { gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc)); #ifdef HAS_TRACEWRAP - TCGv t = tcg_const_i32(opc); + TCGv_i32 t = tcg_const_i32(opc); gen_helper_trace_st64(cpu_env, val, a32, t); - tcg_temp_free(t); + tcg_temp_free_i32(t); #endif } @@ -2670,21 +2670,13 @@ static void gen_goto_ptr(void) #ifdef HAS_TRACEWRAP static inline void gen_trace_newframe(DisasContext *s) { - TCGv t = tcg_const_i32(s->pc_curr); + TCGv_i32 t = tcg_const_i32(s->pc_curr); gen_helper_trace_newframe(t); - tcg_temp_free(t); + tcg_temp_free_i32(t); TCGv_ptr mt = tcg_const_ptr(s->thumb ? FRAME_MODE_ARM_T32 : FRAME_MODE_ARM_A32); gen_helper_trace_mode(mt); tcg_temp_free_ptr(mt); - trace_instr_state_reset(); -} - -static inline void gen_trace_store_cpsr(void) -{ - if (!store_cpsr) { - return; - } - gen_helper_log_store_cpsr(cpu_env); + trace_cpsr_reset(); } static inline void gen_trace_endframe(DisasContext *s) @@ -5072,7 +5064,7 @@ static void disas_xscale_insn(DisasContext *s, uint32_t insn) } } #ifdef HAS_TRACEWRAP - trace_store_cpsr(); + trace_store_cpsr_all(); #endif //HAS_TRACEWRAP } @@ -5110,8 +5102,8 @@ static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh) static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi) { #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif //HAS_TRACEWRAP tcg_gen_mov_i32(cpu_NF, hi); tcg_gen_or_i32(cpu_ZF, lo, hi); @@ -5221,13 +5213,13 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64, get_mem_index(s), opc); #ifdef HAS_TRACEWRAP - TCGv mot = tcg_const_i32(opc); - gen_helper_trace_st(cpu_env, t1, taddr, mot); - TCGv taddr2 = tcg_temp_new_i32(); - tcg_gen_addi_i32(taddr2, taddr, 4); - gen_helper_trace_st(cpu_env, t2, taddr2, mot); - tcg_temp_free(taddr2); - tcg_temp_free(mot); + TCGv_i32 mot = tcg_const_i32(opc); + gen_helper_trace_st(cpu_env, t1, addr, mot); + TCGv_i32 addr2 = tcg_temp_new_i32(); + tcg_gen_addi_i32(addr2, addr, 4); + gen_helper_trace_st(cpu_env, t2, addr2, mot); + tcg_temp_free_i32(addr2); + tcg_temp_free_i32(mot); #endif //HAS_TRACEWRAP tcg_temp_free_i64(n64); @@ -5240,9 +5232,9 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val); tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc); #ifdef HAS_TRACEWRAP - TCGv mot = tcg_const_i32(opc); - gen_helper_trace_st(cpu_env, t1, taddr, mot); - tcg_temp_free(mot); + TCGv_i32 mot = tcg_const_i32(opc); + gen_helper_trace_st(cpu_env, t1, addr, mot); + tcg_temp_free_i32(mot); #endif //HAS_TRACEWRAP tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2); tcg_temp_free_i32(t2); @@ -6273,8 +6265,8 @@ static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub) gen_helper_sub_saturate(t0, cpu_env, t0, t1); } #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif tcg_temp_free_i32(t1); store_reg(s, a->rd, t0); @@ -6321,8 +6313,8 @@ static bool op_smlaxxx(DisasContext *s, arg_rrrr *a, case 1: t1 = load_reg(s, a->ra); #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif gen_helper_add_setq(t0, cpu_env, t0, t1); tcg_temp_free_i32(t1); @@ -6393,8 +6385,8 @@ static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt) if (add) { t0 = load_reg(s, a->ra); #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif gen_helper_add_setq(t1, cpu_env, t1, t0); tcg_temp_free_i32(t0); @@ -6565,7 +6557,7 @@ static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a) tmp = tcg_temp_new_i32(); gen_helper_cpsr_read(tmp, cpu_env); #ifdef HAS_TRACEWRAP - trace_read_cpsr(); + trace_read_cpsr_all(); #endif } store_reg(s, a->rd, tmp); @@ -6585,8 +6577,8 @@ static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a) unallocated_encoding(s); } #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif return true; } @@ -6663,9 +6655,9 @@ static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a) tmp = load_reg(s, a->rm); target_ulong lr = s->base.pc_next | s->thumb; #ifdef HAS_TRACEWRAP - TCGv t = tcg_constant_i32(lr); + TCGv_i32 t = tcg_constant_i32(lr); gen_trace_store_reg(14, t); - tcg_temp_free(t); + tcg_temp_free_i32(t); #endif tcg_gen_movi_i32(cpu_R[14], lr); gen_bx(s, tmp); @@ -7815,8 +7807,8 @@ static bool op_sat(DisasContext *s, arg_sat *a, } #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif satimm = tcg_const_i32(a->satimm); gen(tmp, cpu_env, tmp, satimm); @@ -7926,7 +7918,7 @@ static bool trans_SEL(DisasContext *s, arg_rrr *a) } #ifdef HAS_TRACEWRAP - trace_read_cpsr(); + trace_read_cpsr_all(); #endif t1 = load_reg(s, a->rn); @@ -8014,8 +8006,8 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) if (a->ra != 15) { t2 = load_reg(s, a->ra); #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif gen_helper_add_setq(t1, cpu_env, t1, t2); tcg_temp_free_i32(t2); @@ -8023,8 +8015,8 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) } else if (a->ra == 15) { /* Single saturation-checking addition */ #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif gen_helper_add_setq(t1, cpu_env, t1, t2); tcg_temp_free_i32(t2); @@ -8059,8 +8051,8 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) t3 = tcg_temp_new_i32(); tcg_gen_sari_i32(t3, t1, 31); #ifdef HAS_TRACEWRAP - trace_read_cpsr(); - trace_store_cpsr(); + trace_read_cpsr_all(); + trace_store_cpsr_all(); #endif qf = load_cpu_field(QF); one = tcg_constant_i32(1); @@ -8572,9 +8564,9 @@ static bool trans_BL(DisasContext *s, arg_i *a) int32_t lr = s->base.pc_next | s->thumb; tcg_gen_movi_i32(cpu_R[14], lr); #ifdef HAS_TRACEWRAP - TCGv t = tcg_constant_i32(lr); + TCGv_i32 t = tcg_constant_i32(lr); gen_trace_store_reg(14, t); - tcg_temp_free(t); + tcg_temp_free_i32(t); #endif //HAS_TRACEWRAP gen_jmp(s, read_pc(s) + a->imm); return true; @@ -8596,9 +8588,9 @@ static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a) } int32_t lr = s->base.pc_next | s->thumb; #ifdef HAS_TRACEWRAP - TCGv t = tcg_constant_i32(lr); + TCGv_i32 t = tcg_constant_i32(lr); gen_trace_store_reg(14, t); - tcg_temp_free(t); + tcg_temp_free_i32(t); #endif tcg_gen_movi_i32(cpu_R[14], lr); store_cpu_field_constant(!s->thumb, thumb); @@ -9800,7 +9792,7 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) #endif //HAS_TRACEWRAP disas_arm_insn(dc, insn); #ifdef HAS_TRACEWRAP - gen_trace_store_cpsr(); + gen_trace_flush_cpsr(); #endif //HAS_TRACEWRAP arm_post_translate_insn(dc); @@ -9957,7 +9949,7 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) } #ifdef HAS_TRACEWRAP - gen_trace_store_cpsr(); + gen_trace_flush_cpsr(); #endif //HAS_TRACEWRAP if (dc->eci && !dc->eci_handled) { diff --git a/target/arm/translate.h b/target/arm/translate.h index ea54a7d3b762..9d38d4709371 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -584,4 +584,48 @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc) */ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op); +#ifdef HAS_TRACEWRAP +// Mask of TRACE_CPSR_* bits of which flags have already been created as read operands +extern uint32_t loaded_cpsr; +// Mask of TRACE_CPSR_* bits of which flags should be created as write operands when ending the frame +extern uint32_t store_cpsr; + +static inline void trace_cpsr_reset(void) +{ + loaded_cpsr = 0; + store_cpsr = 0; +} + +static inline void trace_read_cpsr(uint32_t mask) +{ + uint32_t new_flags = mask & ~loaded_cpsr; + if (!new_flags) { + return; + } + TCGv_i32 t = tcg_const_i32(new_flags); + gen_helper_trace_read_cpsr(cpu_env, t); + tcg_temp_free_i32(t); + loaded_cpsr |= mask; +} + +static inline void trace_store_cpsr(uint32_t mask) +{ + store_cpsr |= mask; +} + +static inline void gen_trace_flush_cpsr(void) +{ + if (!store_cpsr) { + return; + } + TCGv_i32 t = tcg_const_i32(store_cpsr); + gen_helper_trace_store_cpsr(cpu_env, t); + tcg_temp_free_i32(t); + store_cpsr = 0; +} +#else //HAS_TRACEWRAP +static inline void trace_read_cpsr(uint32_t mask) {} +static inline void trace_store_cpsr(uint32_t mask) {} +#endif //HAS_TRACEWRAP + #endif /* TARGET_ARM_TRANSLATE_H */ diff --git a/tracewrap.c b/tracewrap.c index a6d96a0e1326..32ca3a07d2d1 100644 --- a/tracewrap.c +++ b/tracewrap.c @@ -249,7 +249,7 @@ void qemu_trace_init(const char *filename, void qemu_trace_newframe(target_ulong addr, int __unused/*thread_id*/ ) { int thread_id = 1; if (open_frame) { - qemu_log("frame is still open"); + qemu_log("Error: frame is still open when reaching pc 0x%llx!\n", (unsigned long long)addr); qemu_trace_endframe(NULL, 0, 0); }