@@ -64,7 +64,7 @@ bool UnrolledInstAnalyzer::simplifyInstWithSCEV(Instruction *I) {
6464 return false ;
6565 SimplifiedAddress Address;
6666 Address.Base = Base->getValue ();
67- Address.Offset = Offset->getValue ();
67+ Address.Offset = Offset->getAPInt ();
6868 SimplifiedAddresses[I] = Address;
6969 return false ;
7070}
@@ -105,7 +105,7 @@ bool UnrolledInstAnalyzer::visitLoad(LoadInst &I) {
105105 auto AddressIt = SimplifiedAddresses.find (AddrOp);
106106 if (AddressIt == SimplifiedAddresses.end ())
107107 return false ;
108- ConstantInt * SimplifiedAddrOp = AddressIt->second .Offset ;
108+ const APInt & SimplifiedAddrOp = AddressIt->second .Offset ;
109109
110110 auto *GV = dyn_cast<GlobalVariable>(AddressIt->second .Base );
111111 // We're only interested in loads that can be completely folded to a
@@ -125,9 +125,9 @@ bool UnrolledInstAnalyzer::visitLoad(LoadInst &I) {
125125 return false ;
126126
127127 unsigned ElemSize = CDS->getElementType ()->getPrimitiveSizeInBits () / 8U ;
128- if (SimplifiedAddrOp-> getValue () .getActiveBits () > 64 )
128+ if (SimplifiedAddrOp.getActiveBits () > 64 )
129129 return false ;
130- int64_t SimplifiedAddrOpV = SimplifiedAddrOp-> getSExtValue ();
130+ int64_t SimplifiedAddrOpV = SimplifiedAddrOp. getSExtValue ();
131131 if (SimplifiedAddrOpV < 0 ) {
132132 // FIXME: For now we conservatively ignore out of bound accesses, but
133133 // we're allowed to perform the optimization in this case.
@@ -186,10 +186,9 @@ bool UnrolledInstAnalyzer::visitCmpInst(CmpInst &I) {
186186 if (SimplifiedRHS != SimplifiedAddresses.end ()) {
187187 SimplifiedAddress &LHSAddr = SimplifiedLHS->second ;
188188 SimplifiedAddress &RHSAddr = SimplifiedRHS->second ;
189- if (LHSAddr.Base == RHSAddr.Base ) {
190- LHS = LHSAddr.Offset ;
191- RHS = RHSAddr.Offset ;
192- }
189+ if (LHSAddr.Base == RHSAddr.Base )
190+ return ICmpInst::compare (LHSAddr.Offset , RHSAddr.Offset ,
191+ I.getPredicate ());
193192 }
194193 }
195194 }
0 commit comments