Skip to content

[AArch64][SVE] Pair SVE fill/spill into LDP/STP with -msve-vector-bits=128. #134068

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
Original file line number Diff line number Diff line change
@@ -2759,6 +2759,9 @@ bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) {
case AArch64::LDRXpre:
case AArch64::LDURSWi:
case AArch64::LDRSWpre:
// SVE instructions.
case AArch64::LDR_ZXI:
case AArch64::STR_ZXI:
return true;
}
}
@@ -2911,6 +2914,18 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {
return false;
}

// Pairing SVE fills/spills is only valid for little-endian targets that
// implement VLS 128.
switch (MI.getOpcode()) {
default:
break;
case AArch64::LDR_ZXI:
case AArch64::STR_ZXI:
if (!Subtarget.isLittleEndian() ||
Subtarget.getSVEVectorSizeInBits() != 128)
return false;
}

// Check if this load/store has a hint to avoid pair formation.
// MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
if (isLdStPairSuppressed(MI))
24 changes: 23 additions & 1 deletion llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
Original file line number Diff line number Diff line change
@@ -300,6 +300,7 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc,
case AArch64::STRXui:
case AArch64::STRXpre:
case AArch64::STURXi:
case AArch64::STR_ZXI:
case AArch64::LDRDui:
case AArch64::LDURDi:
case AArch64::LDRDpre:
@@ -318,6 +319,7 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc,
case AArch64::LDRSui:
case AArch64::LDURSi:
case AArch64::LDRSpre:
case AArch64::LDR_ZXI:
return Opc;
case AArch64::LDRSWui:
return AArch64::LDRWui;
@@ -363,6 +365,7 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
return AArch64::STPDpre;
case AArch64::STRQui:
case AArch64::STURQi:
case AArch64::STR_ZXI:
return AArch64::STPQi;
case AArch64::STRQpre:
return AArch64::STPQpre;
@@ -388,6 +391,7 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
return AArch64::LDPDpre;
case AArch64::LDRQui:
case AArch64::LDURQi:
case AArch64::LDR_ZXI:
return AArch64::LDPQi;
case AArch64::LDRQpre:
return AArch64::LDPQpre;
@@ -1227,6 +1231,16 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
(void)MIBSXTW;
LLVM_DEBUG(dbgs() << " Extend operand:\n ");
LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
} else if (Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI) {
// We are combining SVE fill/spill to LDP/STP, so we need to use the Q
// variant of the registers.
MachineOperand &MOp0 = MIB->getOperand(0);
MachineOperand &MOp1 = MIB->getOperand(1);
assert(AArch64::ZPRRegClass.contains(MOp0.getReg()) &&
AArch64::ZPRRegClass.contains(MOp1.getReg()) && "Invalid register.");
MOp0.setReg(AArch64::Q0 + (MOp0.getReg() - AArch64::Z0));
MOp1.setReg(AArch64::Q0 + (MOp1.getReg() - AArch64::Z0));
Comment on lines +1234 to +1242
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can this be done earlier, e.g. applied to RegOp0 and RegOp1 just before they are added to MIB?

If not then this block needs

LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));

so the result gets dumped.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the reason I didn't place this earlier is that the machine verifier segfaults in verifyUseList if I do so, although I believe it should work if I create a copy of the operands (say, with MachineOperand::CreateReg) instead of using setReg.

For now I've just added the missing debug message, but please let me know if you'd like it done differently.

LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
} else {
LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
}
@@ -2661,7 +2675,8 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
// Get the needed alignments to check them if
// ldp-aligned-only/stp-aligned-only features are opted.
uint64_t MemAlignment = MemOp->getAlign().value();
uint64_t TypeAlignment = Align(MemOp->getSize().getValue()).value();
uint64_t TypeAlignment =
Align(MemOp->getSize().getValue().getKnownMinValue()).value();

if (MemAlignment < 2 * TypeAlignment) {
NumFailedAlignmentCheck++;
@@ -2822,11 +2837,18 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
}
// 3) Find loads and stores that can be merged into a single load or store
// pair instruction.
// When compiling for SVE 128, also try to combine SVE fill/spill
// instructions into LDP/STP.
// e.g.,
// ldr x0, [x2]
// ldr x1, [x2, #8]
// ; becomes
// ldp x0, x1, [x2]
// e.g.,
// ldr z0, [x2]
// ldr z1, [x2, #1, mul vl]
// ; becomes
// ldp q0, q1, [x2]

if (MBB.getParent()->getRegInfo().tracksLiveness()) {
DefinedInBB.clear();
283 changes: 283 additions & 0 deletions llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,283 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -mtriple=aarch64_be-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-BE
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,ldp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-LDPALIGNEDONLY
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,stp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-STPALIGNEDONLY
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK-OFF
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK-OFF

define void @nxv16i8(ptr %ldptr, ptr %stptr) {
; CHECK-LABEL: nxv16i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: stp q0, q1, [x1]
; CHECK-NEXT: ret
;
; CHECK-BE-LABEL: nxv16i8:
; CHECK-BE: // %bb.0:
; CHECK-BE-NEXT: ptrue p0.b
; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, #1, mul vl]
; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1]
; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, #1, mul vl]
; CHECK-BE-NEXT: ret
;
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8:
; CHECK-LDPALIGNEDONLY: // %bb.0:
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0]
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1]
; CHECK-LDPALIGNEDONLY-NEXT: ret
;
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8:
; CHECK-STPALIGNEDONLY: // %bb.0:
; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1]
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: ret
;
; CHECK-OFF-LABEL: nxv16i8:
; CHECK-OFF: // %bb.0:
; CHECK-OFF-NEXT: ldr z0, [x0]
; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl]
; CHECK-OFF-NEXT: str z0, [x1]
; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl]
; CHECK-OFF-NEXT: ret
%vscale = tail call i64 @llvm.vscale()
%vl = shl nuw nsw i64 %vscale, 4
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
%ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
ret void
}

define void @nxv16i8_max_range(ptr %ldptr, ptr %stptr) {
; CHECK-LABEL: nxv16i8_max_range:
; CHECK: // %bb.0:
; CHECK-NEXT: ldp q0, q1, [x0, #-1024]
; CHECK-NEXT: stp q0, q1, [x1, #1008]
; CHECK-NEXT: ret
;
; CHECK-BE-LABEL: nxv16i8_max_range:
; CHECK-BE: // %bb.0:
; CHECK-BE-NEXT: rdvl x8, #1
; CHECK-BE-NEXT: mov x9, #-1008 // =0xfffffffffffffc10
; CHECK-BE-NEXT: mov x10, #-1024 // =0xfffffffffffffc00
; CHECK-BE-NEXT: lsr x8, x8, #4
; CHECK-BE-NEXT: mov w11, #1008 // =0x3f0
; CHECK-BE-NEXT: mov w12, #1024 // =0x400
; CHECK-BE-NEXT: ptrue p0.b
; CHECK-BE-NEXT: mul x9, x8, x9
; CHECK-BE-NEXT: mul x10, x8, x10
; CHECK-BE-NEXT: mul x11, x8, x11
; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, x9]
; CHECK-BE-NEXT: mul x8, x8, x12
; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0, x10]
; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1, x11]
; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, x8]
; CHECK-BE-NEXT: ret
;
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_max_range:
; CHECK-LDPALIGNEDONLY: // %bb.0:
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-64, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-63, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1, #1008]
; CHECK-LDPALIGNEDONLY-NEXT: ret
;
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_max_range:
; CHECK-STPALIGNEDONLY: // %bb.0:
; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0, #-1024]
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #63, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #64, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: ret
;
; CHECK-OFF-LABEL: nxv16i8_max_range:
; CHECK-OFF: // %bb.0:
; CHECK-OFF-NEXT: ldr z0, [x0, #-64, mul vl]
; CHECK-OFF-NEXT: ldr z1, [x0, #-63, mul vl]
; CHECK-OFF-NEXT: str z0, [x1, #63, mul vl]
; CHECK-OFF-NEXT: str z1, [x1, #64, mul vl]
; CHECK-OFF-NEXT: ret
%vscale = tail call i64 @llvm.vscale()
%ldoff1 = mul i64 %vscale, -1024
%ldoff2 = mul i64 %vscale, -1008
%stoff1 = mul i64 %vscale, 1008
%stoff2 = mul i64 %vscale, 1024
%ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
%stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
%ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
ret void
}

define void @nxv16i8_outside_range(ptr %ldptr, ptr %stptr) {
; CHECK-LABEL: nxv16i8_outside_range:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0, #-65, mul vl]
; CHECK-NEXT: ldr z1, [x0, #-64, mul vl]
; CHECK-NEXT: str z0, [x1, #64, mul vl]
; CHECK-NEXT: str z1, [x1, #65, mul vl]
; CHECK-NEXT: ret
;
; CHECK-BE-LABEL: nxv16i8_outside_range:
; CHECK-BE: // %bb.0:
; CHECK-BE-NEXT: rdvl x8, #1
; CHECK-BE-NEXT: mov x9, #-1040 // =0xfffffffffffffbf0
; CHECK-BE-NEXT: mov x10, #-1024 // =0xfffffffffffffc00
; CHECK-BE-NEXT: lsr x8, x8, #4
; CHECK-BE-NEXT: mov w11, #1024 // =0x400
; CHECK-BE-NEXT: mov w12, #1040 // =0x410
; CHECK-BE-NEXT: ptrue p0.b
; CHECK-BE-NEXT: mul x9, x8, x9
; CHECK-BE-NEXT: mul x10, x8, x10
; CHECK-BE-NEXT: mul x11, x8, x11
; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0, x9]
; CHECK-BE-NEXT: mul x8, x8, x12
; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, x10]
; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1, x11]
; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, x8]
; CHECK-BE-NEXT: ret
;
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_outside_range:
; CHECK-LDPALIGNEDONLY: // %bb.0:
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: ret
;
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_outside_range:
; CHECK-STPALIGNEDONLY: // %bb.0:
; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: ret
;
; CHECK-OFF-LABEL: nxv16i8_outside_range:
; CHECK-OFF: // %bb.0:
; CHECK-OFF-NEXT: ldr z0, [x0, #-65, mul vl]
; CHECK-OFF-NEXT: ldr z1, [x0, #-64, mul vl]
; CHECK-OFF-NEXT: str z0, [x1, #64, mul vl]
; CHECK-OFF-NEXT: str z1, [x1, #65, mul vl]
; CHECK-OFF-NEXT: ret
%vscale = tail call i64 @llvm.vscale()
%ldoff1 = mul i64 %vscale, -1040
%ldoff2 = mul i64 %vscale, -1024
%stoff1 = mul i64 %vscale, 1024
%stoff2 = mul i64 %vscale, 1040
%ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
%stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
%ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
ret void
}

define void @nxv16i8_2vl_stride(ptr %ldptr, ptr %stptr) {
; CHECK-LABEL: nxv16i8_2vl_stride:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x0, #2, mul vl]
; CHECK-NEXT: str z0, [x1]
; CHECK-NEXT: str z1, [x1, #2, mul vl]
; CHECK-NEXT: ret
;
; CHECK-BE-LABEL: nxv16i8_2vl_stride:
; CHECK-BE: // %bb.0:
; CHECK-BE-NEXT: ptrue p0.b
; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, #2, mul vl]
; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1]
; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, #2, mul vl]
; CHECK-BE-NEXT: ret
;
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
; CHECK-LDPALIGNEDONLY: // %bb.0:
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0]
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1]
; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl]
; CHECK-LDPALIGNEDONLY-NEXT: ret
;
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
; CHECK-STPALIGNEDONLY: // %bb.0:
; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0]
; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1]
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl]
; CHECK-STPALIGNEDONLY-NEXT: ret
;
; CHECK-OFF-LABEL: nxv16i8_2vl_stride:
; CHECK-OFF: // %bb.0:
; CHECK-OFF-NEXT: ldr z0, [x0]
; CHECK-OFF-NEXT: ldr z1, [x0, #2, mul vl]
; CHECK-OFF-NEXT: str z0, [x1]
; CHECK-OFF-NEXT: str z1, [x1, #2, mul vl]
; CHECK-OFF-NEXT: ret
%vscale = tail call i64 @llvm.vscale()
%vl = shl nuw nsw i64 %vscale, 5
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
%ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
ret void
}

define void @nxv2f64_32b_aligned(ptr %ldptr, ptr %stptr) {
; CHECK-LABEL: nxv2f64_32b_aligned:
; CHECK: // %bb.0:
; CHECK-NEXT: ldp q0, q1, [x0]
; CHECK-NEXT: stp q0, q1, [x1]
; CHECK-NEXT: ret
;
; CHECK-BE-LABEL: nxv2f64_32b_aligned:
; CHECK-BE: // %bb.0:
; CHECK-BE-NEXT: ptrue p0.d
; CHECK-BE-NEXT: ld1d { z0.d }, p0/z, [x0]
; CHECK-BE-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
; CHECK-BE-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-BE-NEXT: st1d { z1.d }, p0, [x1, #1, mul vl]
; CHECK-BE-NEXT: ret
;
; CHECK-LDPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
; CHECK-LDPALIGNEDONLY: // %bb.0:
; CHECK-LDPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1]
; CHECK-LDPALIGNEDONLY-NEXT: ret
;
; CHECK-STPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
; CHECK-STPALIGNEDONLY: // %bb.0:
; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
; CHECK-STPALIGNEDONLY-NEXT: stp q0, q1, [x1]
; CHECK-STPALIGNEDONLY-NEXT: ret
;
; CHECK-OFF-LABEL: nxv2f64_32b_aligned:
; CHECK-OFF: // %bb.0:
; CHECK-OFF-NEXT: ldr z0, [x0]
; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl]
; CHECK-OFF-NEXT: str z0, [x1]
; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl]
; CHECK-OFF-NEXT: ret
%vscale = tail call i64 @llvm.vscale()
%vl = shl nuw nsw i64 %vscale, 4
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
%ld1 = load <vscale x 2 x double>, ptr %ldptr, align 32
%ld2 = load <vscale x 2 x double>, ptr %ldptr2, align 32
store <vscale x 2 x double> %ld1, ptr %stptr, align 32
store <vscale x 2 x double> %ld2, ptr %stptr2, align 32
ret void
}