Skip to content

Commit 9f89682

Browse files
committed
8369506: Bytecode rewriting causes Java heap corruption on AArch64
Reviewed-by: aph, jcking Backport-of: e6ef74bd722c69f8b0cf144e0b5eba95d30dcd39
1 parent 98532ee commit 9f89682

File tree

3 files changed

+29
-2
lines changed

3 files changed

+29
-2
lines changed

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1850,3 +1850,15 @@ void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register t
18501850
bind(profile_continue);
18511851
}
18521852
}
1853+
1854+
#ifdef ASSERT
1855+
void InterpreterMacroAssembler::verify_field_offset(Register reg) {
1856+
// Verify the field offset is not in the header, implicitly checks for 0
1857+
Label L;
1858+
subs(zr, reg, static_cast<int>(sizeof(markWord) + (UseCompressedClassPointers ? sizeof(narrowKlass) : sizeof(Klass*))));
1859+
br(Assembler::GE, L);
1860+
stop("bad field offset");
1861+
bind(L);
1862+
}
1863+
#endif
1864+

src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -292,6 +292,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
292292
set_last_Java_frame(esp, rfp, (address) pc(), rscratch1);
293293
MacroAssembler::_call_Unimplemented(call_site);
294294
}
295+
296+
void verify_field_offset(Register reg) NOT_DEBUG_RETURN;
295297
};
296298

297299
#endif // CPU_AARCH64_INTERP_MASM_AARCH64_HPP

src/hotspot/cpu/aarch64/templateTable_aarch64.cpp

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
165165
Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
166166
int byte_no)
167167
{
168+
assert_different_registers(bc_reg, temp_reg);
168169
if (!RewriteBytecodes) return;
169170
Label L_patch_done;
170171

@@ -222,8 +223,12 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
222223
__ bind(L_okay);
223224
#endif
224225

225-
// patch bytecode
226-
__ strb(bc_reg, at_bcp(0));
226+
// Patch bytecode with release store to coordinate with ConstantPoolCacheEntry loads
227+
// in fast bytecode codelets. The fast bytecode codelets have a memory barrier that gains
228+
// the needed ordering, together with control dependency on entering the fast codelet
229+
// itself.
230+
__ lea(temp_reg, at_bcp(0));
231+
__ stlrb(bc_reg, temp_reg);
227232
__ bind(L_patch_done);
228233
}
229234

@@ -2914,6 +2919,7 @@ void TemplateTable::fast_storefield(TosState state)
29142919

29152920
// replace index with field offset from cache entry
29162921
__ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2922+
__ verify_field_offset(r1);
29172923

29182924
{
29192925
Label notVolatile;
@@ -3007,6 +3013,8 @@ void TemplateTable::fast_accessfield(TosState state)
30073013

30083014
__ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
30093015
ConstantPoolCacheEntry::f2_offset())));
3016+
__ verify_field_offset(r1);
3017+
30103018
__ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
30113019
ConstantPoolCacheEntry::flags_offset())));
30123020

@@ -3074,8 +3082,13 @@ void TemplateTable::fast_xaccess(TosState state)
30743082
__ ldr(r0, aaddress(0));
30753083
// access constant pool cache
30763084
__ get_cache_and_index_at_bcp(r2, r3, 2);
3085+
3086+
// Must prevent reordering of the following cp cache loads with bytecode load
3087+
__ membar(MacroAssembler::LoadLoad);
3088+
30773089
__ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
30783090
ConstantPoolCacheEntry::f2_offset())));
3091+
__ verify_field_offset(r1);
30793092

30803093
// 8179954: We need to make sure that the code generated for
30813094
// volatile accesses forms a sequentially-consistent set of

0 commit comments

Comments
 (0)