From cccf448ae0fcddb97090dfeb9c1dde499bb82d13 Mon Sep 17 00:00:00 2001 From: "Unknown W. Brackets" Date: Wed, 27 Dec 2017 16:19:36 -0800 Subject: [PATCH 1/8] arm64jit: Allow disabling pointerification. For platforms where we can't get base aligned. --- Core/MIPS/ARM64/Arm64RegCache.cpp | 60 ++++++++++++++++++++++++++++--- Core/MIPS/ARM64/Arm64RegCache.h | 2 ++ Core/MIPS/JitCommon/JitState.cpp | 6 ++++ Core/MIPS/JitCommon/JitState.h | 1 + 4 files changed, 64 insertions(+), 5 deletions(-) diff --git a/Core/MIPS/ARM64/Arm64RegCache.cpp b/Core/MIPS/ARM64/Arm64RegCache.cpp index 53d07ec05f6d..5754d3212bb6 100644 --- a/Core/MIPS/ARM64/Arm64RegCache.cpp +++ b/Core/MIPS/ARM64/Arm64RegCache.cpp @@ -150,6 +150,8 @@ bool Arm64RegCache::IsMappedAsPointer(MIPSGPReg mipsReg) { if (ar[mr[mipsReg].reg].pointerified) { ELOG("Really shouldn't be pointerified here"); } + } else if (mr[mipsReg].loc == ML_ARMREG_AS_PTR) { + return true; } return false; } @@ -315,6 +317,21 @@ ARM64Reg Arm64RegCache::MapReg(MIPSGPReg mipsReg, int mapFlags) { } return mr[mipsReg].reg; + } else if (mr[mipsReg].loc == ML_ARMREG_AS_PTR) { + // Was mapped as pointer, now we want it mapped as a value, presumably to + // add or subtract stuff to it. + if ((mapFlags & MAP_NOINIT) != MAP_NOINIT) { + ARM64Reg loadReg = armReg; + if (mipsReg == MIPS_REG_LO) { + loadReg = EncodeRegTo64(loadReg); + } + emit_->LDR(INDEX_UNSIGNED, loadReg, CTXREG, GetMipsRegOffset(mipsReg)); + } + mr[mipsReg].loc = ML_ARMREG; + if (mapFlags & MAP_DIRTY) { + ar[armReg].isDirty = true; + } + return (ARM64Reg)mr[mipsReg].reg; } // Okay, not mapped, so we need to allocate an ARM register. @@ -358,6 +375,11 @@ ARM64Reg Arm64RegCache::MapReg(MIPSGPReg mipsReg, int mapFlags) { } Arm64Gen::ARM64Reg Arm64RegCache::MapRegAsPointer(MIPSGPReg reg) { + // Already mapped. + if (mr[reg].loc == ML_ARMREG_AS_PTR) { + return mr[reg].reg; + } + ARM64Reg retval = INVALID_REG; if (mr[reg].loc != ML_ARMREG && mr[reg].loc != ML_ARMREG_IMM) { retval = MapReg(reg); @@ -368,9 +390,23 @@ Arm64Gen::ARM64Reg Arm64RegCache::MapRegAsPointer(MIPSGPReg reg) { if (mr[reg].loc == ML_ARMREG || mr[reg].loc == ML_ARMREG_IMM) { // If there was an imm attached, discard it. mr[reg].loc = ML_ARMREG; - int a = DecodeReg(mr[reg].reg); - if (!ar[a].pointerified) { - emit_->MOVK(ARM64Reg(X0 + a), ((uint64_t)Memory::base) >> 32, SHIFT_32); + ARM64Reg a = DecodeReg(mr[reg].reg); + if (!jo_->enablePointerify) { + // First, flush the value. + if (ar[a].isDirty) { + ARM64Reg storeReg = ARM64RegForFlush(ar[a].mipsReg); + if (storeReg != INVALID_REG) + emit_->STR(INDEX_UNSIGNED, storeReg, CTXREG, GetMipsRegOffset(ar[a].mipsReg)); + ar[a].isDirty = false; + } + + // Convert to a pointer by adding the base and clearing off the top bits. + // If SP, we can probably avoid the top bit clear, let's play with that later. + emit_->ANDI2R(a, a, 0x3FFFFFFF, INVALID_REG); + emit_->ADD(ARM64Reg(X0 + (int)a), ARM64Reg(X0 + (int)a), MEMBASEREG); + mr[reg].loc = ML_ARMREG_AS_PTR; + } else if (!ar[a].pointerified) { + emit_->MOVK(ARM64Reg(X0 + (int)a), ((uint64_t)Memory::base) >> 32, SHIFT_32); ar[a].pointerified = true; } } else { @@ -450,6 +486,7 @@ void Arm64RegCache::FlushArmReg(ARM64Reg r) { mreg.loc = ML_IMM; mreg.reg = INVALID_REG; } else { + _assert_msg_(JIT, mreg.loc != ML_ARMREG_AS_PTR, "Cannot flush reg as pointer"); // Note: may be a 64-bit reg. ARM64Reg storeReg = ARM64RegForFlush(ar[r].mipsReg); if (storeReg != INVALID_REG) @@ -476,7 +513,7 @@ void Arm64RegCache::DiscardR(MIPSGPReg mipsReg) { return; } const RegMIPSLoc prevLoc = mr[mipsReg].loc; - if (prevLoc == ML_ARMREG || prevLoc == ML_ARMREG_IMM) { + if (prevLoc == ML_ARMREG || prevLoc == ML_ARMREG_IMM || prevLoc == ML_ARMREG_AS_PTR) { ARM64Reg armReg = mr[mipsReg].reg; ar[armReg].isDirty = false; ar[armReg].mipsReg = MIPS_REG_INVALID; @@ -532,6 +569,9 @@ ARM64Reg Arm64RegCache::ARM64RegForFlush(MIPSGPReg r) { } return mr[r].reg; + case ML_ARMREG_AS_PTR: + return INVALID_REG; + case ML_MEM: return INVALID_REG; @@ -578,6 +618,14 @@ void Arm64RegCache::FlushR(MIPSGPReg r) { ar[mr[r].reg].pointerified = false; break; + case ML_ARMREG_AS_PTR: + // Never dirty. + if (ar[mr[r].reg].isDirty) { + ERROR_LOG_REPORT(JIT, "ARMREG_AS_PTR cannot be dirty (yet)"); + } + ar[mr[r].reg].mipsReg = MIPS_REG_INVALID; + break; + case ML_MEM: // Already there, nothing to do. break; @@ -792,7 +840,9 @@ ARM64Reg Arm64RegCache::R(MIPSGPReg mipsReg) { } ARM64Reg Arm64RegCache::RPtr(MIPSGPReg mipsReg) { - if (mr[mipsReg].loc == ML_ARMREG || mr[mipsReg].loc == ML_ARMREG_IMM) { + if (mr[mipsReg].loc == ML_ARMREG_AS_PTR) { + return (ARM64Reg)mr[mipsReg].reg; + } else if (mr[mipsReg].loc == ML_ARMREG || mr[mipsReg].loc == ML_ARMREG_IMM) { int a = mr[mipsReg].reg; if (ar[a].pointerified) { return (ARM64Reg)mr[mipsReg].reg; diff --git a/Core/MIPS/ARM64/Arm64RegCache.h b/Core/MIPS/ARM64/Arm64RegCache.h index d58a650688cc..e4bfc2e8cf53 100644 --- a/Core/MIPS/ARM64/Arm64RegCache.h +++ b/Core/MIPS/ARM64/Arm64RegCache.h @@ -41,6 +41,8 @@ enum { enum RegMIPSLoc { ML_IMM, ML_ARMREG, + // In an arm reg, but an adjusted pointer (not pointerified - unaligned.) + ML_ARMREG_AS_PTR, // In an arm reg, but also has a known immediate value. ML_ARMREG_IMM, ML_MEM, diff --git a/Core/MIPS/JitCommon/JitState.cpp b/Core/MIPS/JitCommon/JitState.cpp index 1d147d4c171b..79a8daa8e736 100644 --- a/Core/MIPS/JitCommon/JitState.cpp +++ b/Core/MIPS/JitCommon/JitState.cpp @@ -51,8 +51,14 @@ namespace MIPSComp { continueMaxInstructions = 300; useStaticAlloc = false; + enablePointerify = false; #if PPSSPP_ARCH(ARM64) useStaticAlloc = true; + enablePointerify = true; +#endif +#if PPSSPP_PLATFORM(IOS) + useStaticAlloc = false; + enablePointerify = false; #endif } } diff --git a/Core/MIPS/JitCommon/JitState.h b/Core/MIPS/JitCommon/JitState.h index b1fe63892c36..4e1d2c8898f6 100644 --- a/Core/MIPS/JitCommon/JitState.h +++ b/Core/MIPS/JitCommon/JitState.h @@ -198,6 +198,7 @@ namespace MIPSComp { // ARM64 only bool useASIMDVFPU; bool useStaticAlloc; + bool enablePointerify; // Common bool enableBlocklink; From 3fae092ecb3e67144e33c0e1c5cdd4b59f5774bc Mon Sep 17 00:00:00 2001 From: "Unknown W. Brackets" Date: Wed, 27 Dec 2017 17:02:29 -0800 Subject: [PATCH 2/8] arm64jit: Only adjust pointers if pointerified. --- Core/MIPS/ARM64/Arm64CompALU.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Core/MIPS/ARM64/Arm64CompALU.cpp b/Core/MIPS/ARM64/Arm64CompALU.cpp index 094686050e00..beeeb7e6bbc6 100644 --- a/Core/MIPS/ARM64/Arm64CompALU.cpp +++ b/Core/MIPS/ARM64/Arm64CompALU.cpp @@ -86,7 +86,7 @@ void Arm64Jit::Comp_IType(MIPSOpcode op) { case 8: // same as addiu? case 9: // R(rt) = R(rs) + simm; break; //addiu // Special-case for small adjustments of pointerified registers. Commonly for SP but happens for others. - if (rs == rt && gpr.IsMappedAsPointer(rs) && IsImmArithmetic(simm < 0 ? -simm : simm, nullptr, nullptr)) { + if (rs == rt && jo.enablePointerify && gpr.IsMappedAsPointer(rs) && IsImmArithmetic(simm < 0 ? -simm : simm, nullptr, nullptr)) { ARM64Reg r32 = gpr.R(rs); gpr.MarkDirty(r32); ARM64Reg r = EncodeRegTo64(r32); From 9573a791b41297cf6f946f9725d5671fbb9dc4e6 Mon Sep 17 00:00:00 2001 From: "Unknown W. Brackets" Date: Wed, 27 Dec 2017 17:15:18 -0800 Subject: [PATCH 3/8] arm64jit: Skip storing spilled but not dirty. Unless IMM, we don't need to store non-dirty mapped regs. --- Core/MIPS/ARM64/Arm64RegCache.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/Core/MIPS/ARM64/Arm64RegCache.cpp b/Core/MIPS/ARM64/Arm64RegCache.cpp index 5754d3212bb6..8d375d03f00f 100644 --- a/Core/MIPS/ARM64/Arm64RegCache.cpp +++ b/Core/MIPS/ARM64/Arm64RegCache.cpp @@ -486,11 +486,13 @@ void Arm64RegCache::FlushArmReg(ARM64Reg r) { mreg.loc = ML_IMM; mreg.reg = INVALID_REG; } else { - _assert_msg_(JIT, mreg.loc != ML_ARMREG_AS_PTR, "Cannot flush reg as pointer"); - // Note: may be a 64-bit reg. - ARM64Reg storeReg = ARM64RegForFlush(ar[r].mipsReg); - if (storeReg != INVALID_REG) - emit_->STR(INDEX_UNSIGNED, storeReg, CTXREG, GetMipsRegOffset(ar[r].mipsReg)); + if (mreg.loc == ML_IMM || ar[r].isDirty) { + _assert_msg_(JIT, mreg.loc != ML_ARMREG_AS_PTR, "Cannot flush reg as pointer"); + // Note: may be a 64-bit reg. + ARM64Reg storeReg = ARM64RegForFlush(ar[r].mipsReg); + if (storeReg != INVALID_REG) + emit_->STR(INDEX_UNSIGNED, storeReg, CTXREG, GetMipsRegOffset(ar[r].mipsReg)); + } mreg.loc = ML_MEM; mreg.reg = INVALID_REG; mreg.imm = 0; From d82efc4b0b43419507f640d8bf15b93a5c97e2c9 Mon Sep 17 00:00:00 2001 From: "Unknown W. Brackets" Date: Wed, 27 Dec 2017 17:29:18 -0800 Subject: [PATCH 4/8] arm64jit: Allow static alloc without pointerify. --- Core/MIPS/ARM64/Arm64RegCache.cpp | 39 +++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/Core/MIPS/ARM64/Arm64RegCache.cpp b/Core/MIPS/ARM64/Arm64RegCache.cpp index 8d375d03f00f..7493a4ad5aff 100644 --- a/Core/MIPS/ARM64/Arm64RegCache.cpp +++ b/Core/MIPS/ARM64/Arm64RegCache.cpp @@ -57,7 +57,7 @@ void Arm64RegCache::Start(MIPSAnalyst::AnalysisResults &stats) { const StaticAllocation *statics = GetStaticAllocations(numStatics); for (int i = 0; i < numStatics; i++) { ar[statics[i].ar].mipsReg = statics[i].mr; - ar[statics[i].ar].pointerified = statics[i].pointerified; + ar[statics[i].ar].pointerified = statics[i].pointerified && jo_->enablePointerify; mr[statics[i].mr].loc = ML_ARMREG; mr[statics[i].mr].reg = statics[i].ar; mr[statics[i].mr].isStatic = true; @@ -110,7 +110,7 @@ void Arm64RegCache::EmitLoadStaticRegisters() { for (int i = 0; i < count; i++) { int offset = GetMipsRegOffset(allocs[i].mr); emit_->LDR(INDEX_UNSIGNED, allocs[i].ar, CTXREG, offset); - if (allocs[i].pointerified) { + if (allocs[i].pointerified && jo_->enablePointerify) { emit_->MOVK(EncodeRegTo64(allocs[i].ar), ((uint64_t)Memory::base) >> 32, SHIFT_32); } } @@ -290,6 +290,17 @@ ARM64Reg Arm64RegCache::MapReg(MIPSGPReg mipsReg, int mapFlags) { mr[mipsReg].loc = ML_ARMREG_IMM; ar[armReg].pointerified = false; } + } else if (mr[mipsReg].loc == ML_ARMREG_AS_PTR) { + // Was mapped as pointer, now we want it mapped as a value, presumably to + // add or subtract stuff to it. + if ((mapFlags & MAP_NOINIT) != MAP_NOINIT) { + ARM64Reg loadReg = armReg; + if (mipsReg == MIPS_REG_LO) { + loadReg = EncodeRegTo64(loadReg); + } + emit_->LDR(INDEX_UNSIGNED, loadReg, CTXREG, GetMipsRegOffset(mipsReg)); + } + mr[mipsReg].loc = ML_ARMREG; } // Erasing the imm on dirty (necessary since otherwise we will still think it's ML_ARMREG_IMM and return // true for IsImm and calculate crazily wrong things). /unknown @@ -504,14 +515,24 @@ void Arm64RegCache::FlushArmReg(ARM64Reg r) { void Arm64RegCache::DiscardR(MIPSGPReg mipsReg) { if (mr[mipsReg].isStatic) { - // Simply do nothing unless it's an IMM or ARMREG_IMM, in case we just switch it over to ARMREG, losing the value. + // Simply do nothing unless it's an IMM/ARMREG_IMM/ARMREG_AS_PTR, in case we just switch it over to ARMREG, losing the value. + ARM64Reg armReg = mr[mipsReg].reg; if (mr[mipsReg].loc == ML_ARMREG_IMM || mr[mipsReg].loc == ML_IMM) { - ARM64Reg armReg = mr[mipsReg].reg; // Ignore the imm value, restore sanity mr[mipsReg].loc = ML_ARMREG; ar[armReg].pointerified = false; ar[armReg].isDirty = false; } + if (mr[mipsReg].loc == ML_ARMREG_AS_PTR) { + ARM64Reg loadReg = armReg; + if (mipsReg == MIPS_REG_LO) { + loadReg = EncodeRegTo64(loadReg); + } + emit_->LDR(INDEX_UNSIGNED, loadReg, CTXREG, GetMipsRegOffset(mipsReg)); + mr[mipsReg].loc = ML_ARMREG; + ar[armReg].pointerified = false; + ar[armReg].isDirty = false; + } return; } const RegMIPSLoc prevLoc = mr[mipsReg].loc; @@ -704,6 +725,14 @@ void Arm64RegCache::FlushAll() { ar[armReg].pointerified = false; } mr[i].loc = ML_ARMREG; + } else if (mr[i].loc == ML_ARMREG_AS_PTR) { + // Need to reload the register (could also subtract, TODO...) + ARM64Reg loadReg = armReg; + if (mipsReg == MIPS_REG_LO) { + loadReg = EncodeRegTo64(loadReg); + } + emit_->LDR(INDEX_UNSIGNED, loadReg, CTXREG, GetMipsRegOffset(i)); + mr[i].loc = ML_ARMREG; } if (i != MIPS_REG_ZERO && mr[i].reg == INVALID_REG) { ELOG("ARM reg of static %i is invalid", i); @@ -717,7 +746,7 @@ void Arm64RegCache::FlushAll() { int count = 0; const StaticAllocation *allocs = GetStaticAllocations(count); for (int i = 0; i < count; i++) { - if (allocs[i].pointerified && !ar[allocs[i].ar].pointerified) { + if (allocs[i].pointerified && !ar[allocs[i].ar].pointerified && jo_->enablePointerify) { // Re-pointerify emit_->MOVK(EncodeRegTo64(allocs[i].ar), ((uint64_t)Memory::base) >> 32, SHIFT_32); ar[allocs[i].ar].pointerified = true; From 7c2fc90def793a7c37af63368a11aa75491288cf Mon Sep 17 00:00:00 2001 From: "Unknown W. Brackets" Date: Wed, 27 Dec 2017 17:54:44 -0800 Subject: [PATCH 5/8] arm64jit: Avoid MOVK elsewhere without pointerify. --- Core/MIPS/ARM64/Arm64CompFPU.cpp | 12 ++++++++++-- Core/MIPS/ARM64/Arm64CompVFPU.cpp | 24 ++++++++++++++++++++---- Core/MIPS/ARM64/Arm64RegCache.cpp | 2 +- 3 files changed, 31 insertions(+), 7 deletions(-) diff --git a/Core/MIPS/ARM64/Arm64CompFPU.cpp b/Core/MIPS/ARM64/Arm64CompFPU.cpp index bab68ca30234..f6cc5920f75d 100644 --- a/Core/MIPS/ARM64/Arm64CompFPU.cpp +++ b/Core/MIPS/ARM64/Arm64CompFPU.cpp @@ -111,7 +111,11 @@ void Arm64Jit::Comp_FPULS(MIPSOpcode op) } else { skips = SetScratch1ForSafeAddress(rs, offset, SCRATCH2); } - MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + if (jo.enablePointerify) { + MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + } else { + ADD(SCRATCH1_64, SCRATCH1_64, MEMBASEREG); + } } fp.LDR(32, INDEX_UNSIGNED, fpr.R(ft), SCRATCH1_64, 0); for (auto skip : skips) { @@ -139,7 +143,11 @@ void Arm64Jit::Comp_FPULS(MIPSOpcode op) } else { skips = SetScratch1ForSafeAddress(rs, offset, SCRATCH2); } - MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + if (jo.enablePointerify) { + MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + } else { + ADD(SCRATCH1_64, SCRATCH1_64, MEMBASEREG); + } } fp.STR(32, INDEX_UNSIGNED, fpr.R(ft), SCRATCH1_64, 0); for (auto skip : skips) { diff --git a/Core/MIPS/ARM64/Arm64CompVFPU.cpp b/Core/MIPS/ARM64/Arm64CompVFPU.cpp index 53105681a227..62f0cc9b5f34 100644 --- a/Core/MIPS/ARM64/Arm64CompVFPU.cpp +++ b/Core/MIPS/ARM64/Arm64CompVFPU.cpp @@ -231,7 +231,11 @@ namespace MIPSComp { skips = SetScratch1ForSafeAddress(rs, offset, SCRATCH2); } // Pointerify - MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + if (jo.enablePointerify) { + MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + } else { + ADD(SCRATCH1_64, SCRATCH1_64, MEMBASEREG); + } } fp.LDR(32, INDEX_UNSIGNED, fpr.V(vt), SCRATCH1_64, 0); for (auto skip : skips) { @@ -261,7 +265,11 @@ namespace MIPSComp { } else { skips = SetScratch1ForSafeAddress(rs, offset, SCRATCH2); } - MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + if (jo.enablePointerify) { + MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + } else { + ADD(SCRATCH1_64, SCRATCH1_64, MEMBASEREG); + } } fp.STR(32, INDEX_UNSIGNED, fpr.V(vt), SCRATCH1_64, 0); for (auto skip : skips) { @@ -303,7 +311,11 @@ namespace MIPSComp { } else { skips = SetScratch1ForSafeAddress(rs, imm, SCRATCH2); } - MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + if (jo.enablePointerify) { + MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + } else { + ADD(SCRATCH1_64, SCRATCH1_64, MEMBASEREG); + } } fp.LDP(32, INDEX_SIGNED, fpr.V(vregs[0]), fpr.V(vregs[1]), SCRATCH1_64, 0); @@ -332,7 +344,11 @@ namespace MIPSComp { } else { skips = SetScratch1ForSafeAddress(rs, imm, SCRATCH2); } - MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + if (jo.enablePointerify) { + MOVK(SCRATCH1_64, ((uint64_t)Memory::base) >> 32, SHIFT_32); + } else { + ADD(SCRATCH1_64, SCRATCH1_64, MEMBASEREG); + } } fp.STP(32, INDEX_SIGNED, fpr.V(vregs[0]), fpr.V(vregs[1]), SCRATCH1_64, 0); diff --git a/Core/MIPS/ARM64/Arm64RegCache.cpp b/Core/MIPS/ARM64/Arm64RegCache.cpp index 7493a4ad5aff..b3f31d8a671a 100644 --- a/Core/MIPS/ARM64/Arm64RegCache.cpp +++ b/Core/MIPS/ARM64/Arm64RegCache.cpp @@ -731,7 +731,7 @@ void Arm64RegCache::FlushAll() { if (mipsReg == MIPS_REG_LO) { loadReg = EncodeRegTo64(loadReg); } - emit_->LDR(INDEX_UNSIGNED, loadReg, CTXREG, GetMipsRegOffset(i)); + emit_->LDR(INDEX_UNSIGNED, loadReg, CTXREG, GetMipsRegOffset(MIPSGPReg(i))); mr[i].loc = ML_ARMREG; } if (i != MIPS_REG_ZERO && mr[i].reg == INVALID_REG) { From 4a27e99ee962f82f42127b683c6e9179ba50a97f Mon Sep 17 00:00:00 2001 From: "Unknown W. Brackets" Date: Wed, 27 Dec 2017 19:30:10 -0800 Subject: [PATCH 6/8] arm64jit: Remove buggy icache invalidate. This is always of size 0, and crashes on iOS anyway... --- Common/Arm64Emitter.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/Common/Arm64Emitter.cpp b/Common/Arm64Emitter.cpp index ecff815da199..165ab81b0b2c 100644 --- a/Common/Arm64Emitter.cpp +++ b/Common/Arm64Emitter.cpp @@ -3906,7 +3906,6 @@ void ARM64CodeBlock::PoisonMemory(int offset) { // AArch64: 0xD4200000 = BRK 0 while (ptr < maxptr) *ptr++ = 0xD4200000; - FlushIcacheSection((u8 *)ptr, (u8 *)maxptr); } } // namespace From 257a4fdd127791805e02f7db60b3672526c8e94c Mon Sep 17 00:00:00 2001 From: "Unknown W. Brackets" Date: Wed, 27 Dec 2017 19:33:02 -0800 Subject: [PATCH 7/8] arm64jit: Reprotect fixed code after icache flush. --- Common/CodeBlock.h | 4 ++++ Core/MIPS/ARM64/Arm64Jit.cpp | 1 + 2 files changed, 5 insertions(+) diff --git a/Common/CodeBlock.h b/Common/CodeBlock.h index 03ae3f860631..a0f5dd55f4c1 100644 --- a/Common/CodeBlock.h +++ b/Common/CodeBlock.h @@ -68,6 +68,10 @@ template class CodeBlock : public CodeBlockCommon, public T { // If not WX Exclusive, no need to call ProtectMemoryPages because we never change the protection from RWX. PoisonMemory(offset); ResetCodePtr(offset); + if (PlatformIsWXExclusive()) { + // Need to re-protect the part we didn't clear. + ProtectMemoryPages(region, offset, MEM_PROT_READ | MEM_PROT_EXEC); + } } // BeginWrite/EndWrite assume that we keep appending. diff --git a/Core/MIPS/ARM64/Arm64Jit.cpp b/Core/MIPS/ARM64/Arm64Jit.cpp index ae9b6b8f4e19..d656c0f9518e 100644 --- a/Core/MIPS/ARM64/Arm64Jit.cpp +++ b/Core/MIPS/ARM64/Arm64Jit.cpp @@ -129,6 +129,7 @@ void Arm64Jit::ClearCache() { ILOG("ARM64Jit: Clearing the cache!"); blocks.Clear(); ClearCodeSpace(jitStartOffset); + FlushIcacheSection(region + jitStartOffset, region + region_size - jitStartOffset); } void Arm64Jit::InvalidateCacheAt(u32 em_address, int length) { From 092f98d313386219e95040432fe91470070b578c Mon Sep 17 00:00:00 2001 From: "Unknown W. Brackets" Date: Wed, 27 Dec 2017 19:34:43 -0800 Subject: [PATCH 8/8] arm64jit: Fix an integer truncation warning. --- Core/MIPS/ARM64/Arm64CompFPU.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Core/MIPS/ARM64/Arm64CompFPU.cpp b/Core/MIPS/ARM64/Arm64CompFPU.cpp index f6cc5920f75d..c9194b8ebea7 100644 --- a/Core/MIPS/ARM64/Arm64CompFPU.cpp +++ b/Core/MIPS/ARM64/Arm64CompFPU.cpp @@ -357,7 +357,8 @@ void Arm64Jit::Comp_mxc1(MIPSOpcode op) case 4: //FI(fs) = R(rt); break; //mtc1 if (gpr.IsImm(rt)) { - uint32_t ival = gpr.GetImm(rt); + // This can't be run on LO/HI. + uint32_t ival = (uint32_t)gpr.GetImm(rt); float floatval; memcpy(&floatval, &ival, sizeof(floatval)); uint8_t imm8;