From 49ab1ace75aeb1d0dd6940cc91c29fd6db4a02dc Mon Sep 17 00:00:00 2001 From: Yen-Fu Chen Date: Tue, 13 Feb 2024 12:17:43 +0800 Subject: [PATCH] Introducing local register allocation for the tier-1 JIT compiler Local register allocation effectively reuses the host register value within a basic block scope, thereby reducing the number of load and store instructions. Take continuous addi instructions as an example: addi t0, t0, 1 addi t0, t0, 1 addi t0, t0, 1 * The generated machine code without register allocation load t0, t0_addr add t0, 1 sw t0, t0_addr load t0, t0_addr add t0, 1 sw t0, t0_addr load t0, t0_addr add t0, 1 sw t0, t0_addr * The generated machine code without register allocation load t0, t0_addr add t0, 1 add t0, 1 add t0, 1 sw t0, t0_addr As shown in the above example, register allocation reuses the host register and reduces the number of load and store instructions. * x86-64(i7-11700) | Metric | W/O RA | W/ RA | SpeedUp | |----------+----------+----------+---------| | dhrystone| 0.342 s | 0.328 s | +4.27% | | miniz | 1.243 s | 1.185 s | +4.89% | | primes | 1.716 s | 1.689 s | +1.60% | | sha512 | 2.063 s | 1.880 s | +9.73% | | stream |11.619 s |11.419 s | +1.75% | * Aarch64 (eMag) | Metric | W/O RA | W/ RA | SpeedUp | |----------+----------+----------+---------| | dhrystone| 1.935 s | 1.301 s | +48.73% | | miniz | 7.706 s | 4.362 s | +76.66% | | primes |10.513 s | 9.633 s | +9.14% | | sha512 | 6.508 s | 6.119 s | +6.36% | | stream |45.174 s |38.037 s | +18.76% | As demonstrated in the performance analysis, the register allocation improves the overall performance for the T1C generated machine code. Without RA, the generated machine need to store back the register value in the end of intruction. With RA, we only need to store back the register value in the end of basic block or when host registers are fully occupied. The performance enhancement is particularly pronounced on Aarch64 due to its increased availability of registers, providing a more extensive mapping capability for VM registers. --- src/jit.c | 318 ++++++++++----- src/riscv.c | 2 +- src/rv32_template.c | 789 ++++++++++++++++++++------------------ tools/gen-jit-template.py | 40 +- 4 files changed, 670 insertions(+), 479 deletions(-) diff --git a/src/jit.c b/src/jit.c index 5af3155f..dea92c1e 100644 --- a/src/jit.c +++ b/src/jit.c @@ -145,6 +145,7 @@ typedef enum { LS_LDRSHW = 0x40c00000U, // 0100_0000_1100_0000_0000_0000_0000_0000 LS_STRW = 0x80000000U, // 1000_0000_0000_0000_0000_0000_0000_0000 LS_LDRW = 0x80400000U, // 1000_0000_0100_0000_0000_0000_0000_0000 + LS_LDRSW = 0x80800000U, // 1000_0000_1000_0000_0000_0000_0000_0000 LS_STRX = 0xc0000000U, // 1100_0000_0000_0000_0000_0000_0000_0000 LS_LDRX = 0xc0400000U, // 1100_0000_0100_0000_0000_0000_0000_0000 /* LoadStorePairOpcode */ @@ -187,21 +188,6 @@ enum { }; #endif -enum vm_reg { - VM_REG_0 = 0, - VM_REG_1, - VM_REG_2, - VM_REG_3, - VM_REG_4, - VM_REG_5, - VM_REG_6, - VM_REG_7, - VM_REG_8, - VM_REG_9, - VM_REG_10, - N_VM_REGS, -}; - enum operand_size { S8, S16, @@ -219,18 +205,17 @@ enum operand_size { #if defined(_WIN32) static const int nonvolatile_reg[] = {RBP, RBX, RDI, RSI, R13, R14, R15}; static const int parameter_reg[] = {RCX, RDX, R8, R9}; -#define RCX_ALT R10 static const int register_map[] = { RAX, R10, RDX, R8, R9, R14, R15, RDI, RSI, RBX, RBP, }; +static int temp_reg = RCX; #else -#define RCX_ALT R9 static const int nonvolatile_reg[] = {RBP, RBX, R13, R14, R15}; static const int parameter_reg[] = {RDI, RSI, RDX, RCX, R8, R9}; -static const int temp_reg[] = {RAX, RBX, RCX}; static const int register_map[] = { - RAX, RDI, RSI, RDX, R9, R8, RBX, R13, R14, R15, RBP, + RAX, RBX, RDX, R8, R9, R10, R11, R13, R14, R15, }; +static int temp_reg = RCX; #endif #elif defined(__aarch64__) /* callee_reg - this must be a multiple of two because of how we save the stack @@ -238,7 +223,7 @@ static const int register_map[] = { static const int callee_reg[] = {R19, R20, R21, R22, R23, R24, R25, R26}; /* parameter_reg (Caller saved registers) */ static const int parameter_reg[] = {R0, R1, R2, R3, R4}; -static const int temp_reg[] = {R6, R7, R8}; +static int temp_reg = R8; /* Register assignments: * Arm64 Usage @@ -250,20 +235,11 @@ static const int temp_reg[] = {R6, R7, R8}; */ static const int register_map[] = { - R5, /* result */ - R0, R1, R2, R3, R4, /* parameters */ - R19, R20, R21, R22, R23, /* callee-saved */ + R5, R6, R7, R9, R11, R12, R13, R14, R15, R16, R17, R18, R26, }; static inline void emit_load_imm(struct jit_state *state, int dst, int64_t imm); #endif -/* Return the register for the given JIT register */ -static int map_register(int r) -{ - assert(r < N_VM_REGS); - return register_map[r % N_VM_REGS]; -} - static inline void offset_map_insert(struct jit_state *state, int32_t target_pc) { struct offset_map *map_entry = &state->offset_map[state->n_insn++]; @@ -365,13 +341,15 @@ static inline void emit_basic_rex(struct jit_state *state, static inline void emit_push(struct jit_state *state, int r) { - emit_basic_rex(state, 0, 0, r); + if (r & 8) + emit_basic_rex(state, 0, 0, r); emit1(state, 0x50 | (r & 7)); } static inline void emit_pop(struct jit_state *state, int r) { - emit_basic_rex(state, 0, 0, r); + if (r & 8) + emit_basic_rex(state, 0, 0, r); emit1(state, 0x58 | (r & 7)); } @@ -591,7 +569,8 @@ static inline void emit_alu32(struct jit_state *state, int op, int src, int dst) * The MR encoding is utilized when a choice is available. The 'src' is * often used as an opcode extension. */ - emit_basic_rex(state, 0, src, dst); + if (src & 8 || dst & 8) + emit_basic_rex(state, 0, src, dst); emit1(state, op); emit_modrm_reg2reg(state, src, dst); #elif defined(__aarch64__) @@ -613,11 +592,11 @@ static inline void emit_alu32(struct jit_state *state, int op, int src, int dst) break; case 0xd3: if (src == 4) /* SLL */ - emit_dataproc_2source(state, false, DP2_LSLV, dst, dst, R8); + emit_dataproc_2source(state, false, DP2_LSLV, dst, dst, temp_reg); else if (src == 5) /* SRL */ - emit_dataproc_2source(state, false, DP2_LSRV, dst, dst, R8); + emit_dataproc_2source(state, false, DP2_LSRV, dst, dst, temp_reg); else if (src == 7) /* SRA */ - emit_dataproc_2source(state, false, DP2_ASRV, dst, dst, R8); + emit_dataproc_2source(state, false, DP2_ASRV, dst, dst, temp_reg); break; default: __UNREACHABLE; @@ -652,7 +631,7 @@ static inline void emit_alu32_imm32(struct jit_state *state, break; case 6: emit_load_imm(state, R10, imm); - emit_logical_register(state, false, LOG_EOR, dst, src, R10); + emit_logical_register(state, false, LOG_EOR, dst, dst, R10); break; default: __UNREACHABLE; @@ -726,13 +705,19 @@ static inline void emit_alu64_imm8(struct jit_state *state, #endif } -#if defined(__x86_64__) /* Register to register mov */ static inline void emit_mov(struct jit_state *state, int src, int dst) { +#if defined(__x86_64__) emit_alu64(state, 0x89, src, dst); +#elif defined(__aarch64__) + emit_load_imm(state, R10, 0); + emit_addsub_register(state, false, AS_ADD, dst, src, R10); +#endif } + +#if defined(__x86_64__) /* REX.W prefix, ModRM byte, and 32-bit immediate */ static inline void emit_alu64_imm32(struct jit_state *state, int op, @@ -825,6 +810,8 @@ static inline void emit_load(struct jit_state *state, int32_t offset) { #if defined(__x86_64__) + if (src & 8 || dst & 8) + emit_basic_rex(state, 0, dst, src); if (size == S8 || size == S16) { /* movzx */ emit1(state, 0x0f); @@ -861,6 +848,8 @@ static inline void emit_load_sext(struct jit_state *state, { #if defined(__x86_64__) if (size == S8 || size == S16) { + if (src & 8 || dst & 8) + emit_basic_rex(state, 0, dst, src); /* movsx */ emit1(state, 0x0f); emit1(state, size == S8 ? 0xbe : 0xbf); @@ -878,6 +867,9 @@ static inline void emit_load_sext(struct jit_state *state, case S16: emit_loadstore_imm(state, LS_LDRSHW, dst, src, offset); break; + case S32: + emit_loadstore_imm(state, LS_LDRSW, dst, src, offset); + break; default: __UNREACHABLE; break; @@ -915,6 +907,8 @@ static inline void emit_store(struct jit_state *state, #if defined(__x86_64__) if (size == S16) emit1(state, 0x66); /* 16-bit override */ + if (src & 8 || dst & 8 || size == S8) + emit_rex(state, 0, !!(src & 8), 0, !!(dst & 8)); emit1(state, size == S8 ? 0x88 : 0x89); emit_modrm_and_displacement(state, src, dst, offset); #elif defined(__aarch64__) @@ -996,10 +990,7 @@ static inline void emit_call(struct jit_state *state, intptr_t target) emit_movewide_imm(state, true, temp_imm_reg, target); emit_uncond_branch_reg(state, BR_BLR, temp_imm_reg); - int dest = map_register(0); - if (dest != R0) { - emit_logical_register(state, true, LOG_ORR, dest, RZ, R0); - } + emit_logical_register(state, true, LOG_ORR, R5, RZ, R0); emit_loadstore_imm(state, LS_LDRX, R30, SP, 0); emit_addsub_imm(state, true, AS_ADD, SP, SP, stack_movement); @@ -1182,7 +1173,7 @@ static void prepare_translate(struct jit_state *state) emit_alu64_imm32(state, 0x81, 5, RSP, 0x8); /* Set JIT R10 (the way to access the frame in JIT) to match RSP. */ - emit_mov(state, RSP, map_register(VM_REG_10)); + emit_mov(state, RSP, RBP); /* Allocate stack space */ emit_alu64_imm32(state, 0x81, 5, RSP, STACK_SIZE); @@ -1200,12 +1191,8 @@ static void prepare_translate(struct jit_state *state) /* Epilogue */ state->exit_loc = state->offset; - /* Move register 0 into rax */ - if (map_register(VM_REG_0) != RAX) - emit_mov(state, map_register(VM_REG_0), RAX); - /* Deallocate stack space by restoring RSP from JIT R10. */ - emit_mov(state, map_register(VM_REG_10), RSP); + emit_mov(state, RBP, RSP); if (!(ARRAYS_SIZE(nonvolatile_reg) % 2)) emit_alu64_imm32(state, 0x81, 0, RSP, 0x8); @@ -1236,11 +1223,6 @@ static void prepare_translate(struct jit_state *state) /* Epilogue */ state->exit_loc = state->offset; - /* Move register 0 into R0 */ - if (map_register(0) != R0) { - emit_logical_register(state, true, LOG_ORR, R0, RZ, map_register(0)); - } - /* Restore callee-saved registers). */ for (size_t i = 0; i < ARRAYS_SIZE(callee_reg); i += 2) { emit_loadstorepair_imm(state, LSP_LDPX, callee_reg[i], @@ -1252,6 +1234,146 @@ static void prepare_translate(struct jit_state *state) #endif } + +static int n_reg = + ARRAYS_SIZE(register_map); /* the number of avavliable host register */ +static int count = 0; +static int reg_table[32]; +static int vm_reg[3] = {0}; + +static void reset_reg() +{ + count = 0; + for (int i = 0; i < 32; i++) { + reg_table[i] = -1; + } +} + +static void store_back(struct jit_state *state) +{ + for (int i = 0; i < 32; i++) { + if (reg_table[i] != -1) { + emit_store(state, S32, reg_table[i], parameter_reg[0], + offsetof(riscv_t, X) + 4 * i); + } + } +} + +FORCE_INLINE void store_back_target(struct jit_state *state, int target_reg) +{ + for (int i = 0; i < 32; i++) { + if (reg_table[i] == target_reg) { + reg_table[i] = -1; + emit_store(state, S32, target_reg, parameter_reg[0], + offsetof(riscv_t, X) + 4 * i); + return; + } + } +} + +static int map_reg(struct jit_state *state, int reg_number) +{ + int target_reg = -1; + if (reg_table[reg_number] != -1) + return reg_table[reg_number]; + count = (count + 1) % n_reg; + target_reg = register_map[count]; + store_back_target(state, target_reg); + reg_table[reg_number] = target_reg; + return target_reg; +} + +static int ra_load(struct jit_state *state, int reg_number) +{ + if (reg_table[reg_number] != -1) + return reg_table[reg_number]; + count = (count + 1) % n_reg; + int target_reg = register_map[count]; + store_back_target(state, target_reg); + reg_table[reg_number] = target_reg; + emit_load(state, S32, parameter_reg[0], reg_table[reg_number], + offsetof(riscv_t, X) + 4 * reg_number); + return target_reg; +} + +static void ra_load2(struct jit_state *state, int reg_number1, int reg_number2) +{ + if (reg_number1 == reg_number2) { + vm_reg[1] = vm_reg[0] = ra_load(state, reg_number1); + return; + } + vm_reg[0] = reg_table[reg_number1]; + vm_reg[1] = reg_table[reg_number2]; + if (vm_reg[0] == -1) { + while (vm_reg[0] == -1 || vm_reg[0] == vm_reg[1]) { + count = (count + 1) % n_reg; + vm_reg[0] = register_map[count]; + } + store_back_target(state, vm_reg[0]); + reg_table[reg_number1] = vm_reg[0]; + emit_load(state, S32, parameter_reg[0], reg_table[reg_number1], + offsetof(riscv_t, X) + 4 * reg_number1); + } + if (vm_reg[1] == -1) { + while (vm_reg[1] == -1 || vm_reg[0] == vm_reg[1]) { + count = (count + 1) % n_reg; + vm_reg[1] = register_map[count]; + } + store_back_target(state, vm_reg[1]); + reg_table[reg_number2] = vm_reg[1]; + emit_load(state, S32, parameter_reg[0], reg_table[reg_number2], + offsetof(riscv_t, X) + 4 * reg_number2); + } +} + +static void ra_load2_sext(struct jit_state *state, + int reg_number1, + int reg_number2, + bool sext1, + bool sext2) +{ + vm_reg[0] = reg_table[reg_number1]; + vm_reg[1] = reg_table[reg_number2]; + if (vm_reg[0] == -1) { + while (vm_reg[0] == -1 || vm_reg[0] == vm_reg[1]) { + count = (count + 1) % n_reg; + vm_reg[0] = register_map[count]; + } + store_back_target(state, vm_reg[0]); + reg_table[reg_number1] = vm_reg[0]; + if (sext1) + emit_load_sext(state, S32, parameter_reg[0], reg_table[reg_number1], + offsetof(riscv_t, X) + 4 * reg_number1); + else + emit_load(state, S32, parameter_reg[0], reg_table[reg_number1], + offsetof(riscv_t, X) + 4 * reg_number1); + } else if (sext1) { + emit_store(state, S32, reg_table[reg_number1], parameter_reg[0], + offsetof(riscv_t, X) + 4 * reg_number1); + emit_load_sext(state, S32, parameter_reg[0], reg_table[reg_number1], + offsetof(riscv_t, X) + 4 * reg_number1); + } + if (vm_reg[1] == -1) { + while (vm_reg[1] == -1 || vm_reg[1] == vm_reg[0]) { + count = (count + 1) % n_reg; + vm_reg[1] = register_map[count]; + } + store_back_target(state, vm_reg[1]); + reg_table[reg_number2] = vm_reg[1]; + if (sext2) + emit_load_sext(state, S32, parameter_reg[0], reg_table[reg_number2], + offsetof(riscv_t, X) + 4 * reg_number2); + else + emit_load(state, S32, parameter_reg[0], reg_table[reg_number2], + offsetof(riscv_t, X) + 4 * reg_number2); + } else if (sext2) { + emit_store(state, S32, reg_table[reg_number2], parameter_reg[0], + offsetof(riscv_t, X) + 4 * reg_number2); + emit_load_sext(state, S32, parameter_reg[0], reg_table[reg_number2], + offsetof(riscv_t, X) + 4 * reg_number2); + } +} + #define GEN(inst, code) \ static void do_##inst(struct jit_state *state UNUSED, riscv_t *rv UNUSED, \ rv_insn_t *ir UNUSED) \ @@ -1265,22 +1387,20 @@ static void do_fuse1(struct jit_state *state, riscv_t *rv UNUSED, rv_insn_t *ir) { opcode_fuse_t *fuse = ir->fuse; for (int i = 0; i < ir->imm2; i++) { - emit_load_imm(state, temp_reg[0], fuse[i].imm); - emit_store(state, S32, temp_reg[0], parameter_reg[0], - offsetof(riscv_t, X) + 4 * fuse[i].rd); + vm_reg[0] = map_reg(state, fuse[i].rd); + emit_load_imm(state, vm_reg[0], fuse[i].imm); } } static void do_fuse2(struct jit_state *state, riscv_t *rv UNUSED, rv_insn_t *ir) { - emit_load_imm(state, temp_reg[0], ir->imm); - emit_store(state, S32, temp_reg[0], parameter_reg[0], - offsetof(riscv_t, X) + 4 * ir->rd); - emit_load(state, S32, parameter_reg[0], temp_reg[1], - offsetof(riscv_t, X) + 4 * ir->rs1); - emit_alu32(state, 0x01, temp_reg[1], temp_reg[0]); - emit_store(state, S32, temp_reg[0], parameter_reg[0], - offsetof(riscv_t, X) + 4 * ir->rs2); + vm_reg[0] = map_reg(state, ir->rd); + emit_load_imm(state, vm_reg[0], ir->imm); + emit_mov(state, vm_reg[0], temp_reg); + vm_reg[1] = ra_load(state, ir->rs1); + vm_reg[2] = map_reg(state, ir->rs2); + emit_mov(state, vm_reg[1], vm_reg[2]); + emit_alu32(state, 0x01, temp_reg, vm_reg[2]); } static void do_fuse3(struct jit_state *state, riscv_t *rv, rv_insn_t *ir) @@ -1288,14 +1408,11 @@ static void do_fuse3(struct jit_state *state, riscv_t *rv, rv_insn_t *ir) memory_t *m = PRIV(rv)->mem; opcode_fuse_t *fuse = ir->fuse; for (int i = 0; i < ir->imm2; i++) { - emit_load(state, S32, parameter_reg[0], temp_reg[0], - offsetof(riscv_t, X) + 4 * fuse[i].rs1); - emit_load_imm(state, temp_reg[1], - (intptr_t) (m->mem_base + fuse[i].imm)); - emit_alu64(state, 0x01, temp_reg[1], temp_reg[0]); - emit_load(state, S32, parameter_reg[0], temp_reg[1], - offsetof(riscv_t, X) + 4 * fuse[i].rs2); - emit_store(state, S32, temp_reg[1], temp_reg[0], 0); + vm_reg[0] = ra_load(state, fuse[i].rs1); + emit_load_imm(state, temp_reg, (intptr_t) (m->mem_base + fuse[i].imm)); + emit_alu64(state, 0x01, vm_reg[0], temp_reg); + vm_reg[1] = ra_load(state, fuse[i].rs2); + emit_store(state, S32, vm_reg[1], temp_reg, 0); } } @@ -1304,31 +1421,28 @@ static void do_fuse4(struct jit_state *state, riscv_t *rv, rv_insn_t *ir) memory_t *m = PRIV(rv)->mem; opcode_fuse_t *fuse = ir->fuse; for (int i = 0; i < ir->imm2; i++) { - emit_load(state, S32, parameter_reg[0], temp_reg[0], - offsetof(riscv_t, X) + 4 * fuse[i].rs1); - emit_load_imm(state, temp_reg[1], - (intptr_t) (m->mem_base + fuse[i].imm)); - emit_alu64(state, 0x01, temp_reg[1], temp_reg[0]); - emit_load(state, S32, temp_reg[0], temp_reg[1], 0); - emit_store(state, S32, temp_reg[1], parameter_reg[0], - offsetof(riscv_t, X) + 4 * fuse[i].rd); + vm_reg[0] = ra_load(state, fuse[i].rs1); + emit_load_imm(state, temp_reg, (intptr_t) (m->mem_base + fuse[i].imm)); + emit_alu64(state, 0x01, vm_reg[0], temp_reg); + vm_reg[1] = map_reg(state, fuse[i].rd); + emit_load(state, S32, temp_reg, vm_reg[1], 0); } } static void do_fuse5(struct jit_state *state, riscv_t *rv UNUSED, rv_insn_t *ir) { - emit_load_imm(state, temp_reg[0], ir->pc + 4); - emit_store(state, S32, temp_reg[0], parameter_reg[0], - offsetof(riscv_t, PC)); + store_back(state); + emit_load_imm(state, temp_reg, ir->pc + 4); + emit_store(state, S32, temp_reg, parameter_reg[0], offsetof(riscv_t, PC)); emit_call(state, (intptr_t) rv->io.on_memset); emit_exit(&(*state)); } static void do_fuse6(struct jit_state *state, riscv_t *rv UNUSED, rv_insn_t *ir) { - emit_load_imm(state, temp_reg[0], ir->pc + 4); - emit_store(state, S32, temp_reg[0], parameter_reg[0], - offsetof(riscv_t, PC)); + store_back(state); + emit_load_imm(state, temp_reg, ir->pc + 4); + emit_store(state, S32, temp_reg, parameter_reg[0], offsetof(riscv_t, PC)); emit_call(state, (intptr_t) rv->io.on_memcpy); emit_exit(&(*state)); } @@ -1339,25 +1453,25 @@ static void do_fuse7(struct jit_state *state, riscv_t *rv UNUSED, rv_insn_t *ir) for (int i = 0; i < ir->imm2; i++) { switch (fuse[i].opcode) { case rv_insn_slli: - emit_load(state, S32, parameter_reg[0], temp_reg[0], - offsetof(riscv_t, X) + 4 * fuse[i].rs1); - emit_alu32_imm8(state, 0xc1, 4, temp_reg[0], fuse[i].imm & 0x1f); - emit_store(state, S32, temp_reg[0], parameter_reg[0], - offsetof(riscv_t, X) + 4 * fuse[i].rd); + vm_reg[0] = ra_load(state, fuse[i].rs1); + vm_reg[1] = map_reg(state, fuse[i].rd); + if (vm_reg[0] != vm_reg[1]) + emit_mov(state, vm_reg[0], vm_reg[1]); + emit_alu32_imm8(state, 0xc1, 4, vm_reg[1], fuse[i].imm & 0x1f); break; case rv_insn_srli: - emit_load(state, S32, parameter_reg[0], temp_reg[0], - offsetof(riscv_t, X) + 4 * fuse[i].rs1); - emit_alu32_imm8(state, 0xc1, 5, temp_reg[0], fuse[i].imm & 0x1f); - emit_store(state, S32, temp_reg[0], parameter_reg[0], - offsetof(riscv_t, X) + 4 * fuse[i].rd); + vm_reg[0] = ra_load(state, fuse[i].rs1); + vm_reg[1] = map_reg(state, fuse[i].rd); + if (vm_reg[0] != vm_reg[1]) + emit_mov(state, vm_reg[0], vm_reg[1]); + emit_alu32_imm8(state, 0xc1, 5, vm_reg[1], fuse[i].imm & 0x1f); break; case rv_insn_srai: - emit_load(state, S32, parameter_reg[0], temp_reg[0], - offsetof(riscv_t, X) + 4 * fuse[i].rs1); - emit_alu32_imm8(state, 0xc1, 7, temp_reg[0], fuse[i].imm & 0x1f); - emit_store(state, S32, temp_reg[0], parameter_reg[0], - offsetof(riscv_t, X) + 4 * fuse[i].rd); + vm_reg[0] = ra_load(state, fuse[i].rs1); + vm_reg[1] = map_reg(state, fuse[i].rd); + if (vm_reg[0] != vm_reg[1]) + emit_mov(state, vm_reg[0], vm_reg[1]); + emit_alu32_imm8(state, 0xc1, 7, vm_reg[1], fuse[i].imm & 0x1f); break; default: __UNREACHABLE; @@ -1387,6 +1501,7 @@ static void translate(struct jit_state *state, riscv_t *rv, block_t *block) { uint32_t idx; rv_insn_t *ir, *next; + reset_reg(); for (idx = 0, ir = block->ir_head; idx < block->n_insn; idx++, ir = next) { next = ir->next; ((codegen_block_func_t) dispatch_table[ir->opcode])(state, rv, ir); @@ -1468,7 +1583,6 @@ uint32_t jit_translate(riscv_t *rv, block_t *block) set_t set; set_reset(&set); translate_chained_block(&(*state), rv, block, &set); - if (state->offset == state->size) { printf("Target buffer too small\n"); goto out; diff --git a/src/riscv.c b/src/riscv.c index e68a2648..7dc65325 100644 --- a/src/riscv.c +++ b/src/riscv.c @@ -26,7 +26,7 @@ #if RV32_HAS(JIT) #include "cache.h" #include "jit.h" -#define CODE_CACHE_SIZE (1024 * 1024) +#define CODE_CACHE_SIZE (4 * 1024 * 1024) #endif #define BLOCK_IR_MAP_CAPACITY_BITS 10 diff --git a/src/rv32_template.c b/src/rv32_template.c index 32a39e41..c568bd6c 100644 --- a/src/rv32_template.c +++ b/src/rv32_template.c @@ -26,27 +26,32 @@ * addi, * { rv->X[ir->rd] = (int32_t) (rv->X[ir->rs1]) + ir->imm; }, * GEN({ - * ld, S32, TMP0, X, rs1; - * alu32_imm, 32, 0x81, 0, TMP0, imm; - * st, S32, TMP0, X, rd; + * rald, VR0, rs1; + * map, VR1, rd; + * cond, regneq; + * mov, VR0, VR1; + * end; + * alu32imm, 32, 0x81, 0, VR1, imm; * })) * - * TMP0, TMP1, TMP2 are host registers for storing calculated value during - * execution. The block defined as 'GEN' is mapped to the generic C code used in - * the interpreter. The following instructions will be generated by JIT - * compiler: - * - Load X->rs1 (target field) from the rv data structure to TMP0 (destination - * register). - * - Add imm to TMP0 (source register) and store the result into TMP0 - * (destination register). - * - Store TMP0 (source register) value to the X->rd (target field) of the rv - * data structure. + * VR0, VR1, VR2 are host registers for storing calculated value + * during execution. TMP are host registers for storing temporary calculated + * value or memory address during execution. The block defined as 'GEN' is + * mapped to the generic C code used in the interpreter. The following + * instructions will be generated by JIT compiler: + * - Load X->rs1 (target field) from the rv data structure to VR0 + * (destination register), if X->rs1 has been loaded to the host register, the + * host register number would be assigned to VR0. + * - Map the host register to VM register X->rd. + * - Move the register value of VR0 (X->rs1) into VR1 (X->rd) if the + * VR0 (X->rs1) is not equal to VR1 (X->rd). + * - Add imm to VR1 (X->rd) * * The sequence of host instructions generated during dynamic binary translation * for the addi instruction: - * mov TMP0, [memory address of (rv->X + rs1)] - * add TMP0, imm - * mov [memory address of (rv->X + rd)], TMP0 + * mov VR0, [memory address of (rv->X + rs1)] + * mov VR1, VR0 + * add VR1, imm * * The parameter of x64 or arm64 instruction API * - size: size of data @@ -59,29 +64,37 @@ * * | Mnemonic | Meaning | * |--------------------------------+----------------------------------------| - * | alu[32|64]_imm, size, op, | Do ALU operation on src and imm and | + * | alu[32|64]imm, size, op, | Do ALU operation on src and imm and | * | src, dst, imm; | store the result into dst. | * | alu[32|64], op, src, dst; | Do ALU operation on src and dst and | * | | store the result into dst. | - * | ld_imm, dst, imm; | Load immediate into dst. | - * | ld_sext, size, src, dst, | Load data of a specified size from | + * | ldimm, dst, imm; | Load immediate into dst. | + * | lds, size, src, dst, | Load data of a specified size from | * | offset; | memory and sign-extend it into the dst,| * | | using the memory address calculated as | * | | the sum of the src and the specified | * | | offset. | + * | rald, dst, field | Map VM register to host register, and | + * | | load the target field from rv data | + * | | if needed. | + * | rald2, field1, field2 | Map 2 VM register to 2 host register, | + * | | and load the target fields from rv data| + * | | respectively if needed. | + * | rald2s, field1, field2 | Map 2 VM register to 2 host register, | + * | | and load the target fields from rv data| + * | | and sign-extend it respectively. | + * | map, dst, field | Map VM register to host register. | * | ld, size, dst, member, field; | load the target field from rv data | * | | structure to dst. | - * | st_imm, size, field, imm; | store immediate to the target field of | - * | | rv data structure. | * | st, size, src, member, field; | store src value to the target field of | * | | rv data structure. | * | cmp, src, dst; | compare the value between src and dst. | - * | cmp_imm, src, imm; | compare the value of src and imm. | + * | cmpimm, src, imm; | compare the value of src and imm. | * | jmp, pc, imm; | jump to the program counter of pc + imm| * | jcc, op; | jump with condition. | - * | set_jmp_off; | set the location of jump with condition| + * | setjmpoff; | set the location of jump with condition| * | | instruction. | - * | jmp_off; | set the jump target of jump with | + * | jmpoff; | set the jump target of jump with | * | | condition instruction. | * | mem; | get memory base. | * | call, handler; | call function handler stored in rv->io | @@ -93,7 +106,11 @@ * | mod, op, src, dst, imm; | Do mod operation on src and dst and | * | | store the result into dst. | * | cond, src; | set condition if (src) | - * | end; | set the end of condition if (src) | + * | end; | set the end of condition if (src) | + * | break; | In the end of a basic block, we need | + * | | to store all VM register value to rv | + * | | data, becasue the register allocation | + * | | is only applied on a basic block. | */ /* Internal */ @@ -111,8 +128,8 @@ RVOP( lui, { rv->X[ir->rd] = ir->imm; }, GEN({ - ld_imm, TMP0, imm; - st, S32, TMP0, X, rd; + map, VR0, rd; + ldimm, VR0, imm; })) /* AUIPC is used to build pc-relative addresses and uses the U-type format. @@ -124,8 +141,8 @@ RVOP( auipc, { rv->X[ir->rd] = ir->imm + PC; }, GEN({ - ld_imm, TMP0, pc, imm; - st, S32, TMP0, X, rd; + map, VR0, rd; + ldimm, VR0, pc, imm; })) /* JAL: Jump and Link @@ -164,12 +181,13 @@ RVOP( }, GEN({ cond, rd; - ld_imm, TMP0, pc, 4; - st, S32, TMP0, X, rd; + map, VR0, rd; + ldimm, VR0, pc, 4; end; - ld_imm, TMP0, pc, imm; - st, S32, TMP0, PC; + break; jmp, pc, imm; + ldimm, TMP, pc, imm; + st, S32, TMP, PC; exit; })) @@ -225,13 +243,15 @@ RVOP( }, GEN({ cond, rd; - ld_imm, TMP0, pc, 4; - st, S32, TMP0, X, rd; + map, VR0, rd; + ldimm, VR0, pc, 4; end; - ld, S32, TMP0, X, rs1; - alu32_imm, 32, 0x81, 0, TMP0, imm; - alu32_imm, 32, 0x81, 4, TMP0, ~1U; - st, S32, TMP0, PC; + rald, VR1, rs1; + mov, VR1, TMP; + alu32imm, 32, 0x81, 0, TMP, imm; + alu32imm, 32, 0x81, 4, TMP, ~1U; + st, S32, TMP, PC; + break; exit; })) @@ -303,23 +323,23 @@ RVOP( beq, { BRANCH_FUNC(uint32_t, !=); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - cmp, TMP1, TMP0; - set_jmp_off; + rald2, rs1, rs2; + cmp, VR1, VR0; + break; + setjmpoff; jcc, 0x84; cond, branch_untaken; jmp, pc, 4; end; - ld_imm, TMP0, pc, 4; - st, S32, TMP0, PC; + ldimm, TMP, pc, 4; + st, S32, TMP, PC; exit; - jmp_off; + jmpoff; cond, branch_taken; jmp, pc, imm; end; - ld_imm, TMP0, pc, imm; - st, S32, TMP0, PC; + ldimm, TMP, pc, imm; + st, S32, TMP, PC; exit; })) @@ -328,23 +348,23 @@ RVOP( bne, { BRANCH_FUNC(uint32_t, ==); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - cmp, TMP1, TMP0; - set_jmp_off; + rald2, rs1, rs2; + cmp, VR1, VR0; + break; + setjmpoff; jcc, 0x85; cond, branch_untaken; jmp, pc, 4; end; - ld_imm, TMP0, pc, 4; - st, S32, TMP0, PC; + ldimm, TMP, pc, 4; + st, S32, TMP, PC; exit; - jmp_off; + jmpoff; cond, branch_taken; jmp, pc, imm; end; - ld_imm, TMP0, pc, imm; - st, S32, TMP0, PC; + ldimm, TMP, pc, imm; + st, S32, TMP, PC; exit; })) @@ -353,23 +373,23 @@ RVOP( blt, { BRANCH_FUNC(int32_t, >=); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - cmp, TMP1, TMP0; - set_jmp_off; + rald2, rs1, rs2; + cmp, VR1, VR0; + break; + setjmpoff; jcc, 0x8c; cond, branch_untaken; jmp, pc, 4; end; - ld_imm, TMP0, pc, 4; - st, S32, TMP0, PC; + ldimm, TMP, pc, 4; + st, S32, TMP, PC; exit; - jmp_off; + jmpoff; cond, branch_taken; jmp, pc, imm; end; - ld_imm, TMP0, pc, imm; - st, S32, TMP0, PC; + ldimm, TMP, pc, imm; + st, S32, TMP, PC; exit; })) @@ -378,23 +398,23 @@ RVOP( bge, { BRANCH_FUNC(int32_t, <); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - cmp, TMP1, TMP0; - set_jmp_off; + rald2, rs1, rs2; + cmp, VR1, VR0; + break; + setjmpoff; jcc, 0x8d; cond, branch_untaken; jmp, pc, 4; end; - ld_imm, TMP0, pc, 4; - st, S32, TMP0, PC; + ldimm, TMP, pc, 4; + st, S32, TMP, PC; exit; - jmp_off; + jmpoff; cond, branch_taken; jmp, pc, imm; end; - ld_imm, TMP0, pc, imm; - st, S32, TMP0, PC; + ldimm, TMP, pc, imm; + st, S32, TMP, PC; exit; })) @@ -403,23 +423,23 @@ RVOP( bltu, { BRANCH_FUNC(uint32_t, >=); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - cmp, TMP1, TMP0; - set_jmp_off; + rald2, rs1, rs2; + cmp, VR1, VR0; + break; + setjmpoff; jcc, 0x82; cond, branch_untaken; jmp, pc, 4; end; - ld_imm, TMP0, pc, 4; - st, S32, TMP0, PC; + ldimm, TMP, pc, 4; + st, S32, TMP, PC; exit; - jmp_off; + jmpoff; cond, branch_taken; jmp, pc, imm; end; - ld_imm, TMP0, pc, imm; - st, S32, TMP0, PC; + ldimm, TMP, pc, imm; + st, S32, TMP, PC; exit; })) @@ -428,23 +448,23 @@ RVOP( bgeu, { BRANCH_FUNC(uint32_t, <); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - cmp, TMP1, TMP0; - set_jmp_off; + rald2, rs1, rs2; + cmp, VR1, VR0; + break; + setjmpoff; jcc, 0x83; cond, branch_untaken; jmp, pc, 4; end; - ld_imm, TMP0, pc, 4; - st, S32, TMP0, PC; + ldimm, TMP, pc, 4; + st, S32, TMP, PC; exit; - jmp_off; + jmpoff; cond, branch_taken; jmp, pc, imm; end; - ld_imm, TMP0, pc, imm; - st, S32, TMP0, PC; + ldimm, TMP, pc, imm; + st, S32, TMP, PC; exit; })) @@ -464,11 +484,11 @@ RVOP( }, GEN({ mem; - ld, S32, TMP0, X, rs1; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld_sext, S8, TMP0, TMP1, 0; - st, S32, TMP1, X, rd; + rald, VR0, rs1; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + map, VR1, rd; + lds, S8, TMP, VR1, 0; })) /* LH: Load Halfword */ @@ -481,11 +501,11 @@ RVOP( }, GEN({ mem; - ld, S32, TMP0, X, rs1; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld_sext, S16, TMP0, TMP1, 0; - st, S32, TMP1, X, rd; + rald, VR0, rs1; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + map, VR1, rd; + lds, S16, TMP, VR1, 0; })) /* LW: Load Word */ @@ -498,11 +518,11 @@ RVOP( }, GEN({ mem; - ld, S32, TMP0, X, rs1; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld, S32, TMP0, TMP1, 0; - st, S32, TMP1, X, rd; + rald, VR0, rs1; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + map, VR1, rd; + ld, S32, TMP, VR1, 0; })) /* LBU: Load Byte Unsigned */ @@ -511,11 +531,11 @@ RVOP( { rv->X[ir->rd] = rv->io.mem_read_b(rv->X[ir->rs1] + ir->imm); }, GEN({ mem; - ld, S32, TMP0, X, rs1; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld, S8, TMP0, TMP1, 0; - st, S32, TMP1, X, rd; + rald, VR0, rs1; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + map, VR1, rd; + ld, S8, TMP, VR1, 0; })) /* LHU: Load Halfword Unsigned */ @@ -528,11 +548,11 @@ RVOP( }, GEN({ mem; - ld, S32, TMP0, X, rs1; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld, S16, TMP0, TMP1, 0; - st, S32, TMP1, X, rd; + rald, VR0, rs1; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + map, VR1, rd; + ld, S16, TMP, VR1, 0; })) /* There are 3 types of stores: byte, halfword, and word-sized. Unlike loads, @@ -547,11 +567,11 @@ RVOP( { rv->io.mem_write_b(rv->X[ir->rs1] + ir->imm, rv->X[ir->rs2]); }, GEN({ mem; - ld, S32, TMP0, X, rs1; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld, S8, TMP1, X, rs2; - st, S8, TMP1, TMP0, 0; + rald, VR0, rs1; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + rald, VR1, rs2; + st, S8, VR1, TMP, 0; })) /* SH: Store Halfword */ @@ -564,11 +584,11 @@ RVOP( }, GEN({ mem; - ld, S32, TMP0, X, rs1; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld, S16, TMP1, X, rs2; - st, S16, TMP1, TMP0, 0; + rald, VR0, rs1; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + rald, VR1, rs2; + st, S16, VR1, TMP, 0; })) /* SW: Store Word */ @@ -581,11 +601,11 @@ RVOP( }, GEN({ mem; - ld, S32, TMP0, X, rs1; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld, S32, TMP1, X, rs2; - st, S32, TMP1, TMP0, 0; + rald, VR0, rs1; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + rald, VR1, rs2; + st, S32, VR1, TMP, 0; })) /* ADDI adds the sign-extended 12-bit immediate to register rs1. Arithmetic @@ -597,9 +617,12 @@ RVOP( addi, { rv->X[ir->rd] = rv->X[ir->rs1] + ir->imm; }, GEN({ - ld, S32, TMP0, X, rs1; - alu32_imm, 32, 0x81, 0, TMP0, imm; - st, S32, TMP0, X, rd; + rald, VR0, rs1; + map, VR1, rd; + cond, regneq; + mov, VR0, VR1; + end; + alu32imm, 32, 0x81, 0, VR1, imm; })) /* SLTI place the value 1 in register rd if register rs1 is less than the @@ -610,13 +633,14 @@ RVOP( slti, { rv->X[ir->rd] = ((int32_t) (rv->X[ir->rs1]) < ir->imm) ? 1 : 0; }, GEN({ - ld, S32, TMP0, X, rs1; - cmp_imm, TMP0, imm; - st_imm, S32, rd, 1; - set_jmp_off; + rald, VR0, rs1; + cmpimm, VR0, imm; + map, VR1, rd; + ldimm, VR1, 1; + setjmpoff; jcc, 0x8c; - st_imm, S32, rd, 0; - jmp_off; + ldimm, VR1, 0; + jmpoff; })) /* SLTIU places the value 1 in register rd if register rs1 is less than the @@ -626,13 +650,14 @@ RVOP( sltiu, { rv->X[ir->rd] = (rv->X[ir->rs1] < (uint32_t) ir->imm) ? 1 : 0; }, GEN({ - ld, S32, TMP0, X, rs1; - cmp_imm, TMP0, imm; - st_imm, S32, rd, 1; - set_jmp_off; + rald, VR0, rs1; + cmpimm, VR0, imm; + map, VR1, rd; + ldimm, VR1, 1; + setjmpoff; jcc, 0x82; - st_imm, S32, rd, 0; - jmp_off; + ldimm, VR1, 0; + jmpoff; })) /* XORI: Exclusive OR Immediate */ @@ -640,9 +665,12 @@ RVOP( xori, { rv->X[ir->rd] = rv->X[ir->rs1] ^ ir->imm; }, GEN({ - ld, S32, TMP0, X, rs1; - alu32_imm, 32, 0x81, 6, TMP0, imm; - st, S32, TMP0, X, rd; + rald, VR0, rs1; + map, VR1, rd; + cond, regneq; + mov, VR0, VR1; + end; + alu32imm, 32, 0x81, 6, VR1, imm; })) /* ORI: OR Immediate */ @@ -650,9 +678,12 @@ RVOP( ori, { rv->X[ir->rd] = rv->X[ir->rs1] | ir->imm; }, GEN({ - ld, S32, TMP0, X, rs1; - alu32_imm, 32, 0x81, 1, TMP0, imm; - st, S32, TMP0, X, rd; + rald, VR0, rs1; + map, VR1, rd; + cond, regneq; + mov, VR0, VR1; + end; + alu32imm, 32, 0x81, 1, VR1, imm; })) /* ANDI performs bitwise AND on register rs1 and the sign-extended 12-bit @@ -662,9 +693,12 @@ RVOP( andi, { rv->X[ir->rd] = rv->X[ir->rs1] & ir->imm; }, GEN({ - ld, S32, TMP0, X, rs1; - alu32_imm, 32, 0x81, 4, TMP0, imm; - st, S32, TMP0, X, rd; + rald, VR0, rs1; + map, VR1, rd; + cond, regneq; + mov, VR0, VR1; + end; + alu32imm, 32, 0x81, 4, VR1, imm; })) FORCE_INLINE void shift_func(riscv_t *rv, const rv_insn_t *ir) @@ -692,9 +726,12 @@ RVOP( slli, { shift_func(rv, ir); }, GEN({ - ld, S32, TMP0, X, rs1; - alu32_imm, 8, 0xc1, 4, TMP0, imm, 0x1f; - st, S32, TMP0, X, rd; + rald, VR0, rs1; + map, VR1, rd; + cond, regneq; + mov, VR0, VR1; + end; + alu32imm, 8, 0xc1, 4, VR1, imm, 0x1f; })) /* SRLI performs logical right shift on the value in register rs1 by the shift @@ -704,9 +741,12 @@ RVOP( srli, { shift_func(rv, ir); }, GEN({ - ld, S32, TMP0, X, rs1; - alu32_imm, 8, 0xc1, 5, TMP0, imm, 0x1f; - st, S32, TMP0, X, rd; + rald, VR0, rs1; + map, VR1, rd; + cond, regneq; + mov, VR0, VR1; + end; + alu32imm, 8, 0xc1, 5, VR1, imm, 0x1f; })) /* SRAI performs arithmetic right shift on the value in register rs1 by the @@ -716,9 +756,12 @@ RVOP( srai, { shift_func(rv, ir); }, GEN({ - ld, S32, TMP0, X, rs1; - alu32_imm, 8, 0xc1, 7, TMP0, imm, 0x1f; - st, S32, TMP0, X, rd; + rald, VR0, rs1; + map, VR1, rd; + cond, regneq; + mov, VR0, VR1; + end; + alu32imm, 8, 0xc1, 7, VR1, imm, 0x1f; })) /* ADD */ @@ -726,10 +769,11 @@ RVOP( add, { rv->X[ir->rd] = rv->X[ir->rs1] + rv->X[ir->rs2]; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - alu32, 0x01, TMP1, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32, 0x01, TMP, VR2; })) /* SUB: Substract */ @@ -737,10 +781,11 @@ RVOP( sub, { rv->X[ir->rd] = rv->X[ir->rs1] - rv->X[ir->rs2]; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - alu32, 0x29, TMP1, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32, 0x29, TMP, VR2; })) /* SLL: Shift Left Logical */ @@ -748,11 +793,12 @@ RVOP( sll, { rv->X[ir->rd] = rv->X[ir->rs1] << (rv->X[ir->rs2] & 0x1f); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP2, X, rs2; - alu32_imm, 32, 0x81, 4, TMP2, 0x1f; - alu32, 0xd3, 4, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32imm, 32, 0x81, 4, TMP, 0x1f; + alu32, 0xd3, 4, VR2; })) /* SLT: Set on Less Than */ @@ -763,14 +809,14 @@ RVOP( ((int32_t) (rv->X[ir->rs1]) < (int32_t) (rv->X[ir->rs2])) ? 1 : 0; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - cmp, TMP1, TMP0; - st_imm, S32, rd, 1; - set_jmp_off; + rald2, rs1, rs2; + map, VR2, rd; + cmp, VR1, VR0; + ldimm, VR2, 1; + setjmpoff; jcc, 0x8c; - st_imm, S32, rd, 0; - jmp_off; + ldimm, VR2, 0; + jmpoff; })) /* SLTU: Set on Less Than Unsigned */ @@ -778,14 +824,14 @@ RVOP( sltu, { rv->X[ir->rd] = (rv->X[ir->rs1] < rv->X[ir->rs2]) ? 1 : 0; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - cmp, TMP1, TMP0; - st_imm, S32, rd, 1; - set_jmp_off; + rald2, rs1, rs2; + map, VR2, rd; + cmp, VR1, VR0; + ldimm, VR2, 1; + setjmpoff; jcc, 0x82; - st_imm, S32, rd, 0; - jmp_off; + ldimm, VR2, 0; + jmpoff; })) /* XOR: Exclusive OR */ @@ -795,10 +841,11 @@ RVOP( rv->X[ir->rd] = rv->X[ir->rs1] ^ rv->X[ir->rs2]; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - alu32, 0x31, TMP1, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32, 0x31, TMP, VR2; })) /* SRL: Shift Right Logical */ @@ -806,11 +853,12 @@ RVOP( srl, { rv->X[ir->rd] = rv->X[ir->rs1] >> (rv->X[ir->rs2] & 0x1f); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP2, X, rs2; - alu32_imm, 32, 0x81, 4, TMP2, 0x1f; - alu32, 0xd3, 5, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32imm, 32, 0x81, 4, TMP, 0x1f; + alu32, 0xd3, 5, VR2; })) /* SRA: Shift Right Arithmetic */ @@ -818,11 +866,12 @@ RVOP( sra, { rv->X[ir->rd] = ((int32_t) rv->X[ir->rs1]) >> (rv->X[ir->rs2] & 0x1f); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP2, X, rs2; - alu32_imm, 32, 0x81, 4, TMP2, 0x1f; - alu32, 0xd3, 7, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32imm, 32, 0x81, 4, TMP, 0x1f; + alu32, 0xd3, 7, VR2; })) /* OR */ @@ -831,10 +880,11 @@ RVOP( , { rv->X[ir->rd] = rv->X[ir->rs1] | rv->X[ir->rs2]; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - alu32, 0x09, TMP1, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32, 0x09, TMP, VR2; })) /* AND */ @@ -843,10 +893,11 @@ RVOP( and, { rv->X[ir->rd] = rv->X[ir->rs1] & rv->X[ir->rs2]; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - alu32, 0x21, TMP1, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32, 0x21, TMP, VR2; })) /* clang-format on */ @@ -861,8 +912,9 @@ RVOP( return true; }, GEN({ - ld_imm, TMP0, pc; - st, S32, TMP0, PC; + break; + ldimm, TMP, pc; + st, S32, TMP, PC; call, ecall; exit; })) @@ -878,8 +930,9 @@ RVOP( return true; }, GEN({ - ld_imm, TMP0, pc; - st, S32, TMP0, PC; + break; + ldimm, TMP, pc; + st, S32, TMP, PC; call, ebreak; exit; })) @@ -1046,10 +1099,11 @@ RVOP( ((uint64_t) (multiplicand * multiplier)) & ((1ULL << 32) - 1); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - mul, 0x28, TMP1, TMP0, 0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + mul, 0x28, TMP, VR2, 0; })) /* MULH: Multiply High Signed Signed */ @@ -1064,11 +1118,12 @@ RVOP( rv->X[ir->rd] = ((uint64_t) (multiplicand * multiplier)) >> 32; }, GEN({ - ld_sext, S32, TMP0, X, rs1; - ld_sext, S32, TMP1, X, rs2; - mul, 0x2f, TMP1, TMP0, 0; - alu64_imm, 8, 0xc1, 5, TMP0, 32; - st, S32, TMP0, X, rd; + rald2s, rs1, rs2, true, true; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + mul, 0x2f, TMP, VR2, 0; + alu64imm, 8, 0xc1, 5, VR2, 32; })) /* MULHSU: Multiply High Signed Unsigned */ @@ -1084,11 +1139,12 @@ RVOP( rv->X[ir->rd] = ((uint64_t) (multiplicand * umultiplier)) >> 32; }, GEN({ - ld_sext, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - mul, 0x2f, TMP1, TMP0, 0; - alu64_imm, 8, 0xc1, 5, TMP0, 32; - st, S32, TMP0, X, rd; + rald2s, rs1, rs2, true, false; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + mul, 0x2f, TMP, VR2, 0; + alu64imm, 8, 0xc1, 5, VR2, 32; })) /* MULHU: Multiply High Unsigned Unsigned */ @@ -1099,11 +1155,12 @@ RVOP( ((uint64_t) rv->X[ir->rs1] * (uint64_t) rv->X[ir->rs2]) >> 32; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - mul, 0x2f, TMP1, TMP0, 0; - alu64_imm, 8, 0xc1, 5, TMP0, 32; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + mul, 0x2f, TMP, VR2, 0; + alu64imm, 8, 0xc1, 5, VR2, 32; })) /* DIV: Divide Signed */ @@ -1125,15 +1182,11 @@ RVOP( : (unsigned int) (dividend / divisor); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - div, 0x38, TMP1, TMP0, 0; - cmp_imm, TMP1, 0; - set_jmp_off; - jcc, 0x85; - ld_imm, TMP0, -1; - jmp_off; - st, S32, TMP0, X, rd; + rald2s, rs1, rs2, true, true; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + div, 0x38, TMP, VR2, 0; /* FIXME: handle overflow */ })) @@ -1152,15 +1205,11 @@ RVOP( rv->X[ir->rd] = !udivisor ? ~0U : udividend / udivisor; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - div, 0x38, TMP1, TMP0, 0; - cmp_imm, TMP1, 0; - set_jmp_off; - jcc, 0x85; - ld_imm, TMP0, ~0U; - jmp_off; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + div, 0x38, TMP, VR2, 0; })) /* clang-format off */ @@ -1181,10 +1230,11 @@ RVOP(rem, { % divisor); }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - mod, 0x98, TMP1, TMP0, 0; - st, S32, TMP0, X, rd; + rald2s, rs1, rs2, true, true; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + mod, 0x98, TMP, VR2, 0; /* FIXME: handle overflow */ })) @@ -1202,10 +1252,11 @@ RVOP(remu, { % udivisor; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - mod, 0x98, TMP1, TMP0, 0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + mod, 0x98, TMP, VR2, 0; })) /* clang-format on */ #endif @@ -1782,9 +1833,12 @@ RVOP( caddi4spn, { rv->X[ir->rd] = rv->X[rv_reg_sp] + (uint16_t) ir->imm; }, GEN({ - ld, S32, TMP0, X, rv_reg_sp; - alu32_imm, 32, 0x81, 0, TMP0, uint, 16, imm; - st, S32, TMP0, X, rd; + rald, VR0, rv_reg_sp; + map, VR1, rd; + cond, regneq; + mov, VR0, VR1; + end; + alu32imm, 32, 0x81, 0, VR1, uint, 16, imm; })) /* C.LW loads a 32-bit value from memory into register rd'. It computes an @@ -1800,11 +1854,11 @@ RVOP( }, GEN({ mem; - ld, S32, TMP0, X, rs1; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld, S32, TMP0, TMP1, 0; - st, S32, TMP1, X, rd; + rald, VR0, rs1; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + map, VR1, rd; + ld, S32, TMP, VR1, 0; })) /* C.SW stores a 32-bit value in register rs2' to memory. It computes an @@ -1821,11 +1875,11 @@ RVOP( }, GEN({ mem; - ld, S32, TMP0, X, rs1; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld, S32, TMP1, X, rs2; - st, S32, TMP1, TMP0, 0; + rald, VR0, rs1; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + rald, VR1, rs2; + st, S32, VR1, TMP, 0; })) /* C.NOP */ @@ -1841,9 +1895,8 @@ RVOP( caddi, { rv->X[ir->rd] += (int16_t) ir->imm; }, GEN({ - ld, S32, TMP0, X, rd; - alu32_imm, 32, 0x81, 0, TMP0, int, 16, imm; - st, S32, TMP0, X, rd; + rald, VR0, rd; + alu32imm, 32, 0x81, 0, VR0, int, 16, imm; })) /* C.JAL */ @@ -1870,11 +1923,12 @@ RVOP( return true; }, GEN({ - ld_imm, TMP0, pc, 2; - st, S32, TMP0, X, rv_reg_ra; - ld_imm, TMP0, pc, imm; - st, S32, TMP0, PC; + map, VR0, rv_reg_ra; + ldimm, VR0, pc, 2; + break; jmp, pc, imm; + ldimm, TMP, pc, imm; + st, S32, TMP, PC; exit; })) @@ -1886,8 +1940,8 @@ RVOP( cli, { rv->X[ir->rd] = ir->imm; }, GEN({ - ld_imm, TMP0, imm; - st, S32, TMP0, X, rd; + map, VR0, rd; + ldimm, VR0, imm; })) /* C.ADDI16SP is used to adjust the stack pointer in procedure prologues @@ -1899,9 +1953,8 @@ RVOP( caddi16sp, { rv->X[ir->rd] += ir->imm; }, GEN({ - ld, S32, TMP0, X, rd; - alu32_imm, 32, 0x81, 0, TMP0, imm; - st, S32, TMP0, X, rd; + rald, VR0, rd; + alu32imm, 32, 0x81, 0, VR0, imm; })) /* C.LUI loads the non-zero 6-bit immediate field into bits 17–12 of the @@ -1915,8 +1968,8 @@ RVOP( clui, { rv->X[ir->rd] = ir->imm; }, GEN({ - ld_imm, TMP0, imm; - st, S32, TMP0, X, rd; + map, VR0, rd; + ldimm, VR0, imm; })) /* C.SRLI is a CB-format instruction that performs a logical right shift @@ -1928,9 +1981,8 @@ RVOP( csrli, { rv->X[ir->rs1] >>= ir->shamt; }, GEN({ - ld, S32, TMP0, X, rs1; - alu32_imm, 8, 0xc1, 5, TMP0, shamt; - st, S32, TMP0, X, rs1; + rald, VR0, rs1; + alu32imm, 8, 0xc1, 5, VR0, shamt; })) /* C.SRAI is defined analogously to C.SRLI, but instead performs an @@ -1945,9 +1997,8 @@ RVOP( rv->X[ir->rs1] |= mask >> i; }, GEN({ - ld, S32, TMP0, X, rs1; - alu32_imm, 8, 0xc1, 7, TMP0, shamt; - st, S32, TMP0, X, rs1; + rald, VR0, rs1; + alu32imm, 8, 0xc1, 7, VR0, shamt; /* FIXME: Incomplete */ })) @@ -1959,9 +2010,8 @@ RVOP( candi, { rv->X[ir->rs1] &= ir->imm; }, GEN({ - ld, S32, TMP0, X, rs1; - alu32_imm, 32, 0x81, 4, TMP0, imm; - st, S32, TMP0, X, rs1; + rald, VR0, rs1; + alu32imm, 32, 0x81, 4, VR0, imm; })) /* C.SUB */ @@ -1969,10 +2019,11 @@ RVOP( csub, { rv->X[ir->rd] = rv->X[ir->rs1] - rv->X[ir->rs2]; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - alu32, 0x29, TMP1, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32, 0x29, TMP, VR2; })) /* C.XOR */ @@ -1980,30 +2031,33 @@ RVOP( cxor, { rv->X[ir->rd] = rv->X[ir->rs1] ^ rv->X[ir->rs2]; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - alu32, 0x31, TMP1, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32, 0x31, TMP, VR2; })) RVOP( cor, { rv->X[ir->rd] = rv->X[ir->rs1] | rv->X[ir->rs2]; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - alu32, 0x09, TMP1, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32, 0x09, TMP, VR2; })) RVOP( cand, { rv->X[ir->rd] = rv->X[ir->rs1] & rv->X[ir->rs2]; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - alu32, 0x21, TMP1, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32, 0x21, TMP, VR2; })) /* C.J performs an unconditional control transfer. The offset is sign-extended @@ -2033,9 +2087,10 @@ RVOP( return true; }, GEN({ - ld_imm, TMP0, pc, imm; - st, S32, TMP0, PC; + break; jmp, pc, imm; + ldimm, TMP, pc, imm; + st, S32, TMP, PC; exit; })) @@ -2083,22 +2138,23 @@ RVOP( return true; }, GEN({ - ld, S32, TMP0, X, rs1; - cmp_imm, TMP0, 0; - set_jmp_off; + rald, VR0, rs1; + cmpimm, VR0, 0; + break; + setjmpoff; jcc, 0x84; cond, branch_untaken; jmp, pc, 2; end; - ld_imm, TMP0, pc, 2; - st, S32, TMP0, PC; + ldimm, TMP, pc, 2; + st, S32, TMP, PC; exit; - jmp_off; + jmpoff; cond, branch_taken; jmp, pc, imm; end; - ld_imm, TMP0, pc, imm; - st, S32, TMP0, PC; + ldimm, TMP, pc, imm; + st, S32, TMP, PC; exit; })) @@ -2142,22 +2198,23 @@ RVOP( return true; }, GEN({ - ld, S32, TMP0, X, rs1; - cmp_imm, TMP0, 0; - set_jmp_off; + rald, VR0, rs1; + cmpimm, VR0, 0; + break; + setjmpoff; jcc, 0x85; cond, branch_untaken; jmp, pc, 2; end; - ld_imm, TMP0, pc, 2; - st, S32, TMP0, PC; + ldimm, TMP, pc, 2; + st, S32, TMP, PC; exit; - jmp_off; + jmpoff; cond, branch_taken; jmp, pc, imm; end; - ld_imm, TMP0, pc, imm; - st, S32, TMP0, PC; + ldimm, TMP, pc, imm; + st, S32, TMP, PC; exit; })) @@ -2169,9 +2226,8 @@ RVOP( cslli, { rv->X[ir->rd] <<= (uint8_t) ir->imm; }, GEN({ - ld, S32, TMP0, X, rd; - alu32_imm, 8, 0xc1, 4, TMP0, uint, 8, imm; - st, S32, TMP0, X, rd; + rald, VR0, rd; + alu32imm, 8, 0xc1, 4, VR0, uint, 8, imm; })) /* C.LWSP */ @@ -2184,11 +2240,11 @@ RVOP( }, GEN({ mem; - ld, S32, TMP0, X, rv_reg_sp; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld, S32, TMP0, TMP1, 0; - st, S32, TMP1, X, rd; + rald, VR0, rv_reg_sp; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + map, VR1, rd; + ld, S32, TMP, VR1, 0; })) /* C.JR */ @@ -2204,8 +2260,10 @@ RVOP( return true; }, GEN({ - ld, S32, TMP0, X, rs1; - st, S32, TMP0, PC; + rald, VR0, rs1; + mov, VR0, TMP; + st, S32, TMP, PC; + break; exit; })) @@ -2214,8 +2272,11 @@ RVOP( cmv, { rv->X[ir->rd] = rv->X[ir->rs2]; }, GEN({ - ld, S32, TMP0, X, rs2; - st, S32, TMP0, X, rd; + rald, VR0, rs2; + map, VR1, rd; + cond, regneq; + mov, VR0, VR1; + end; })) /* C.EBREAK */ @@ -2229,10 +2290,9 @@ RVOP( return true; }, GEN({ - ld_imm, TMP0, pc; - st, S32, TMP0, PC; - ld_imm, TMP0, 1; - st, S32, TMP0, compressed; + break; + ldimm, TMP, pc; + st, S32, TMP, PC; call, ebreak; exit; })) @@ -2253,10 +2313,12 @@ RVOP( return true; }, GEN({ - ld_imm, TMP0, pc, 2; - st, S32, TMP0, X, rv_reg_ra; - ld, S32, TMP0, X, rs1; - st, S32, TMP0, PC; + map, VR0, rv_reg_ra; + ldimm, VR0, pc, 2; + rald, VR1, rs1; + mov, VR1, TMP; + st, S32, TMP, PC; + break; exit; })) @@ -2271,10 +2333,11 @@ RVOP( cadd, { rv->X[ir->rd] = rv->X[ir->rs1] + rv->X[ir->rs2]; }, GEN({ - ld, S32, TMP0, X, rs1; - ld, S32, TMP1, X, rs2; - alu32, 0x01, TMP1, TMP0; - st, S32, TMP0, X, rd; + rald2, rs1, rs2; + map, VR2, rd; + mov, VR1, TMP; + mov, VR0, VR2; + alu32, 0x01, TMP, VR2; })) /* C.SWSP */ @@ -2287,11 +2350,11 @@ RVOP( }, GEN({ mem; - ld, S32, TMP0, X, rv_reg_sp; - ld_imm, TMP1, mem; - alu64, 0x01, TMP1, TMP0; - ld, S32, TMP1, X, rs2; - st, S32, TMP1, TMP0, 0; + rald, VR0, rv_reg_sp; + ldimm, TMP, mem; + alu64, 0x01, VR0, TMP; + rald, VR1, rs2; + st, S32, VR1, TMP, 0; })) #endif diff --git a/tools/gen-jit-template.py b/tools/gen-jit-template.py index 5199525b..389fcf59 100755 --- a/tools/gen-jit-template.py +++ b/tools/gen-jit-template.py @@ -131,7 +131,7 @@ def parse_argv(EXT_LIST, SKIP_LIST): f.close() fields = {"imm", "pc", "rs1", "rs2", "rd", "shamt", "branch_taken", "branch_untaken"} -temp_regs = {"TMP0", "TMP1", "TMP2"} +virt_regs = {"VR0", "VR1", "VR2"} # generate jit template for i in range(len(op)): if (not SKIP_LIST.count(op[i])): @@ -145,9 +145,11 @@ def parse_argv(EXT_LIST, SKIP_LIST): for i in range(len(items)): if items[i] in fields: items[i] = "ir->" + items[i] - if items[i] in temp_regs: - items[i] = "temp_reg[" + items[i][-1] + "]" - if items[0] == "alu32_imm": + if items[i] in virt_regs: + items[i] = "vm_reg[" + items[i][-1] + "]" + if items[i] == "TMP": + items[i] = "temp_reg" + if items[0] == "alu32imm": if len(items) == 8: asm = "emit_alu32_imm{}(state, {}, {}, {}, ({}{}_t) {});".format( items[1], items[2], items[3], items[4], items[5], items[6], items[7]) @@ -157,7 +159,7 @@ def parse_argv(EXT_LIST, SKIP_LIST): else: asm = "emit_alu32_imm{}(state, {}, {}, {}, {});".format( items[1], items[2], items[3], items[4], items[5]) - elif items[0] == "alu64_imm": + elif items[0] == "alu64imm": asm = "emit_alu64_imm{}(state, {}, {}, {}, {});".format( items[1], items[2], items[3], items[4], items[5]) elif items[0] == "alu64": @@ -166,7 +168,7 @@ def parse_argv(EXT_LIST, SKIP_LIST): elif items[0] == "alu32": asm = "emit_alu32(state, {}, {}, {});".format( items[1], items[2], items[3]) - elif items[0] == "ld_imm": + elif items[0] == "ldimm": if items[2] == "mem": asm = "emit_load_imm(state, {}, (intptr_t) (m->mem_base + ir->imm));".format( items[1]) @@ -176,13 +178,21 @@ def parse_argv(EXT_LIST, SKIP_LIST): else: asm = "emit_load_imm(state, {}, {});".format( items[1], items[2]) - elif items[0] == "ld_sext": + elif items[0] == "lds": if (items[3] == "X"): asm = "emit_load_sext(state, {}, parameter_reg[0], {}, offsetof(riscv_t, X) + 4 * {});".format( items[1], items[2], items[4]) else: asm = "emit_load_sext(state, {}, {}, {}, {});".format( items[1], items[2], items[3], items[4]) + elif items[0] == "rald": + asm = "{} = ra_load(state, {});".format(items[1], items[2]) + elif items[0] == "rald2": + asm = "ra_load2(state, {}, {});".format(items[1], items[2]) + elif items[0] == "rald2s": + asm = "ra_load2_sext(state, {}, {}, {}, {});".format(items[1], items[2], items[3], items[4]) + elif items[0] == "map": + asm = "{} = map_reg(state, {});".format(items[1], items[2]) elif items[0] == "ld": if (items[3] == "X"): asm = "emit_load(state, {}, parameter_reg[0], {}, offsetof(riscv_t, X) + 4 * {});".format( @@ -190,9 +200,6 @@ def parse_argv(EXT_LIST, SKIP_LIST): else: asm = "emit_load(state, {}, {}, {}, {});".format( items[1], items[2], items[3], items[4]) - elif items[0] == "st_imm": - asm = "emit_store_imm32(state, {}, parameter_reg[0], offsetof(riscv_t, X) + 4 * {}, {});".format( - items[1], items[2], items[3]) elif items[0] == "st": if (items[3] == "X"): asm = "emit_store(state, {}, {}, parameter_reg[0], offsetof(riscv_t, X) + 4 * {});".format( @@ -203,10 +210,13 @@ def parse_argv(EXT_LIST, SKIP_LIST): else: asm = "emit_store(state, {}, {}, {}, {});".format( items[1], items[2], items[3], items[4]) + elif items[0] == "mov": + asm = "emit_mov(state, {}, {});".format( + items[1], items[2]) elif items[0] == "cmp": asm = "emit_cmp32(state, {}, {});".format( items[1], items[2]) - elif items[0] == "cmp_imm": + elif items[0] == "cmpimm": asm = "emit_cmp_imm32(state, {}, {});".format( items[1], items[2]) elif items[0] == "jmp": @@ -214,9 +224,9 @@ def parse_argv(EXT_LIST, SKIP_LIST): items[1], items[2]) elif items[0] == "jcc": asm = "emit_jcc_offset(state, {});".format(items[1]) - elif items[0] == "set_jmp_off": + elif items[0] == "setjmpoff": asm = "uint32_t jump_loc = state->offset;" - elif items[0] == "jmp_off": + elif items[0] == "jmpoff": asm = "emit_jump_target_offset(state, JUMP_LOC, state->offset);" elif items[0] == "mem": asm = "memory_t *m = PRIV(rv)->mem;" @@ -235,9 +245,13 @@ def parse_argv(EXT_LIST, SKIP_LIST): asm = "muldivmod(state, {}, {}, {}, {});".format( items[1], items[2], items[3], items[4]) elif items[0] == "cond": + if items[1] == "regneq": + items[1] = "vm_reg[0] != vm_reg[1]" asm = "if({})".format(items[1]) + "{" elif items[0] == "end": asm = "}" + elif items[0] == "break": + asm = "store_back(state);" elif items[0] == "assert": asm = "assert(NULL);" output += asm + "\n"