From 30309da8ae6dfa387270fe902a9f70c0c55b734d Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Wed, 24 Aug 2022 03:04:59 +0100 Subject: [PATCH] [DAG] MatchRotate - bail if we fail to match a shl/srl pair extractShiftForRotate may fail to return canonicalized shifts due to constant folding or other simplification that can occur in getNode() Fixes Issue #57283 (cherry picked from commit e624f8a3bb88075493dec521408993ea0ef7bde0) --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 4 ++ llvm/test/CodeGen/X86/pr57283.ll | 38 +++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 llvm/test/CodeGen/X86/pr57283.ll diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 3db6579fb9bc..42a141e8876b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7574,6 +7574,10 @@ SDValue DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) { std::swap(LHSMask, RHSMask); } + // Something has gone wrong - we've lost the shl/srl pair - bail. + if (LHSShift.getOpcode() != ISD::SHL || RHSShift.getOpcode() != ISD::SRL) + return SDValue(); + unsigned EltSizeInBits = VT.getScalarSizeInBits(); SDValue LHSShiftArg = LHSShift.getOperand(0); SDValue LHSShiftAmt = LHSShift.getOperand(1); diff --git a/llvm/test/CodeGen/X86/pr57283.ll b/llvm/test/CodeGen/X86/pr57283.ll new file mode 100644 index 000000000000..58e1432cc329 --- /dev/null +++ b/llvm/test/CodeGen/X86/pr57283.ll @@ -0,0 +1,38 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64 + +define void @PR57283() nounwind { +; X86-LABEL: PR57283: +; X86: # %bb.0: # %BB +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $16, %esp +; X86-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-NEXT: movl $0, (%esp) +; X86-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: PR57283: +; X64: # %bb.0: # %BB +; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp) +; X64-NEXT: movq $0, -{{[0-9]+}}(%rsp) +; X64-NEXT: retq +BB: + %A6 = alloca i64, align 8 + %A = alloca i64, align 8 + %L = load i64, i64* %A, align 4 + %B3 = sub i64 %L, %L + %B2 = mul i64 %B3, 4294967296 + %B1 = add i64 %B2, %B2 + %B4 = udiv i64 %B2, -9223372036854775808 + %B = xor i64 %B1, %B4 + store i64 %B, i64* %A, align 4 + %B5 = sdiv i64 %B, -1 + store i64 %B5, i64* %A6, align 4 + ret void +}