Skip to content

Commit 0b0b088

Browse files
committed
Assert aligned sp offsets in vector spilling
1 parent a2c2c99 commit 0b0b088

File tree

2 files changed

+14
-30
lines changed

2 files changed

+14
-30
lines changed

src/hotspot/cpu/aarch64/aarch64.ad

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2007,6 +2007,9 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
20072007

20082008
if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
20092009
uint ireg = ideal_reg();
2010+
DEBUG_ONLY(int algm = MIN2(RegMask::num_registers(ireg), (int)Matcher::stack_alignment_in_bytes()));
2011+
assert((src_lo_rc != rc_stack) || is_aligned(src_offset, algm), "unaligned vector spill sp offset %d (src)", src_offset);
2012+
assert((dst_lo_rc != rc_stack) || is_aligned(dst_offset, algm), "unaligned vector spill sp offset %d (dst)", dst_offset);
20102013
if (ireg == Op_VecA && masm) {
20112014
int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
20122015
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {

src/hotspot/cpu/ppc/ppc.ad

Lines changed: 11 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1795,10 +1795,13 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
17951795
return size; // Self copy, no move.
17961796

17971797
if (bottom_type()->isa_vect() != nullptr && ideal_reg() == Op_VecX) {
1798+
int src_offset = ra_->reg2offset(src_lo);
1799+
int dst_offset = ra_->reg2offset(dst_lo);
1800+
DEBUG_ONLY(int algm = MIN2(RegMask::num_registers(ideal_reg()), (int)Matcher::stack_alignment_in_bytes()));
1801+
assert((src_lo_rc != rc_stack) || is_aligned(src_offset, algm), "unaligned vector spill sp offset %d (src)", src_offset);
1802+
assert((dst_lo_rc != rc_stack) || is_aligned(dst_offset, algm), "unaligned vector spill sp offset %d (dst)", dst_offset);
17981803
// Memory->Memory Spill.
17991804
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1800-
int src_offset = ra_->reg2offset(src_lo);
1801-
int dst_offset = ra_->reg2offset(dst_lo);
18021805
if (masm) {
18031806
__ ld(R0, src_offset, R1_SP);
18041807
__ std(R0, dst_offset, R1_SP);
@@ -1810,23 +1813,11 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
18101813
// VectorRegister->Memory Spill.
18111814
else if (src_lo_rc == rc_vec && dst_lo_rc == rc_stack) {
18121815
VectorSRegister Rsrc = as_VectorRegister(Matcher::_regEncode[src_lo]).to_vsr();
1813-
int dst_offset = ra_->reg2offset(dst_lo);
18141816
if (PowerArchitecturePPC64 >= 9) {
1815-
if (is_aligned(dst_offset, 16)) {
1816-
if (masm) {
1817-
__ stxv(Rsrc, dst_offset, R1_SP); // matches storeV16_Power9
1818-
}
1819-
size += 4;
1820-
} else {
1821-
// Other alignment can be used by Vector API (VectorPayload in rearrangeOp,
1822-
// observed with VectorRearrangeTest.java on Power9).
1823-
if (masm) {
1824-
log_develop_trace(newcode)("unaligned vector spill (sp offset:%d)", dst_offset);
1825-
__ addi(R0, R1_SP, dst_offset);
1826-
__ stxvx(Rsrc, R0); // matches storeV16_Power9 (regarding element ordering)
1827-
}
1828-
size += 8;
1817+
if (masm) {
1818+
__ stxv(Rsrc, dst_offset, R1_SP); // matches storeV16_Power9
18291819
}
1820+
size += 4;
18301821
} else {
18311822
if (masm) {
18321823
__ addi(R0, R1_SP, dst_offset);
@@ -1844,21 +1835,11 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r
18441835
// Memory->VectorRegister Spill.
18451836
else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vec) {
18461837
VectorSRegister Rdst = as_VectorRegister(Matcher::_regEncode[dst_lo]).to_vsr();
1847-
int src_offset = ra_->reg2offset(src_lo);
18481838
if (PowerArchitecturePPC64 >= 9) {
1849-
if (is_aligned(src_offset, 16)) {
1850-
if (masm) {
1851-
__ lxv(Rdst, src_offset, R1_SP);
1852-
}
1853-
size += 4;
1854-
} else {
1855-
if (masm) {
1856-
log_develop_trace(newcode)("unaligned vector spill (sp offset:%d)", src_offset);
1857-
__ addi(R0, R1_SP, src_offset);
1858-
__ lxvx(Rdst, R0);
1859-
}
1860-
size += 8;
1839+
if (masm) {
1840+
__ lxv(Rdst, src_offset, R1_SP);
18611841
}
1842+
size += 4;
18621843
} else {
18631844
if (masm) {
18641845
__ addi(R0, R1_SP, src_offset);

0 commit comments

Comments
 (0)