diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index df818b39cf72d..eaac88aa6df4f 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -5367,7 +5367,7 @@ bool Lowering::TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* par #ifdef TARGET_ARM64 // Check if we can "contain" LEA(BFIZ) in order to extend 32bit index to 64bit as part of load/store. if ((index != nullptr) && index->OperIs(GT_BFIZ) && index->gtGetOp1()->OperIs(GT_CAST) && - index->gtGetOp2()->IsCnsIntOrI() && varTypeIsIntegral(targetType)) + index->gtGetOp2()->IsCnsIntOrI() && (varTypeIsIntegral(targetType) || varTypeIsFloating(targetType))) { // BFIZ node is a binary op where op1 is GT_CAST and op2 is GT_CNS_INT GenTreeCast* cast = index->gtGetOp1()->AsCast(); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index ed702b872e8e8..2a8f2c32f0b8c 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -5511,7 +5511,7 @@ GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) // See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497 // Use 2) form only for primitive types for now - it significantly reduced number of size regressions - if (!varTypeIsIntegral(elemTyp)) + if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp)) { groupArrayRefWithElemOffset = false; }