Skip to content

Commit e6ef74b

Browse files
jckingTheRealMDoerr
authored andcommitted
8369506: Bytecode rewriting causes Java heap corruption on AArch64
Reviewed-by: mdoerr Backport-of: 18fd04770294e27011bd576b5ea5fe43fa03e5e3
1 parent c5275d3 commit e6ef74b

File tree

3 files changed

+29
-2
lines changed

3 files changed

+29
-2
lines changed

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1892,3 +1892,15 @@ void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Registe
18921892
add(cache, cache, Array<ResolvedIndyEntry>::base_offset_in_bytes());
18931893
lea(cache, Address(cache, index));
18941894
}
1895+
1896+
#ifdef ASSERT
1897+
void InterpreterMacroAssembler::verify_field_offset(Register reg) {
1898+
// Verify the field offset is not in the header, implicitly checks for 0
1899+
Label L;
1900+
subs(zr, reg, static_cast<int>(sizeof(markWord) + (UseCompressedClassPointers ? sizeof(narrowKlass) : sizeof(Klass*))));
1901+
br(Assembler::GE, L);
1902+
stop("bad field offset");
1903+
bind(L);
1904+
}
1905+
#endif
1906+

src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -321,6 +321,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
321321
}
322322

323323
void load_resolved_indy_entry(Register cache, Register index);
324+
325+
void verify_field_offset(Register reg) NOT_DEBUG_RETURN;
324326
};
325327

326328
#endif // CPU_AARCH64_INTERP_MASM_AARCH64_HPP

src/hotspot/cpu/aarch64/templateTable_aarch64.cpp

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
166166
Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
167167
int byte_no)
168168
{
169+
assert_different_registers(bc_reg, temp_reg);
169170
if (!RewriteBytecodes) return;
170171
Label L_patch_done;
171172

@@ -223,8 +224,12 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
223224
__ bind(L_okay);
224225
#endif
225226

226-
// patch bytecode
227-
__ strb(bc_reg, at_bcp(0));
227+
// Patch bytecode with release store to coordinate with ConstantPoolCacheEntry loads
228+
// in fast bytecode codelets. The fast bytecode codelets have a memory barrier that gains
229+
// the needed ordering, together with control dependency on entering the fast codelet
230+
// itself.
231+
__ lea(temp_reg, at_bcp(0));
232+
__ stlrb(bc_reg, temp_reg);
228233
__ bind(L_patch_done);
229234
}
230235

@@ -2982,6 +2987,7 @@ void TemplateTable::fast_storefield(TosState state)
29822987

29832988
// replace index with field offset from cache entry
29842989
__ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2990+
__ verify_field_offset(r1);
29852991

29862992
{
29872993
Label notVolatile;
@@ -3075,6 +3081,8 @@ void TemplateTable::fast_accessfield(TosState state)
30753081

30763082
__ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
30773083
ConstantPoolCacheEntry::f2_offset())));
3084+
__ verify_field_offset(r1);
3085+
30783086
__ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
30793087
ConstantPoolCacheEntry::flags_offset())));
30803088

@@ -3142,8 +3150,13 @@ void TemplateTable::fast_xaccess(TosState state)
31423150
__ ldr(r0, aaddress(0));
31433151
// access constant pool cache
31443152
__ get_cache_and_index_at_bcp(r2, r3, 2);
3153+
3154+
// Must prevent reordering of the following cp cache loads with bytecode load
3155+
__ membar(MacroAssembler::LoadLoad);
3156+
31453157
__ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
31463158
ConstantPoolCacheEntry::f2_offset())));
3159+
__ verify_field_offset(r1);
31473160

31483161
// 8179954: We need to make sure that the code generated for
31493162
// volatile accesses forms a sequentially-consistent set of

0 commit comments

Comments
 (0)