diff --git a/src/jit/CMakeLists.txt b/src/jit/CMakeLists.txt index 38d5e8c93578..ea122ad1c889 100644 --- a/src/jit/CMakeLists.txt +++ b/src/jit/CMakeLists.txt @@ -27,6 +27,7 @@ set( JIT_SOURCES assertionprop.cpp bitset.cpp block.cpp + layout.cpp codegencommon.cpp codegenlinear.cpp compiler.cpp @@ -97,6 +98,7 @@ if (WIN32) alloc.h arraystack.h bitset.h + layout.h bitsetasshortlong.h bitsetasuint64.h bitsetasuint64inclass.h diff --git a/src/jit/codegenarm.cpp b/src/jit/codegenarm.cpp index 93eb16ad7876..3a20924d795e 100644 --- a/src/jit/codegenarm.cpp +++ b/src/jit/codegenarm.cpp @@ -820,7 +820,7 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) // This GenTree node has data about GC pointers, this means we're dealing // with CpObj. - assert(cpObjNode->gtGcPtrCount > 0); + assert(cpObjNode->GetLayout()->HasGCPtr()); #endif // DEBUG // Consume the operands and get them into the right registers. @@ -839,29 +839,16 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) instGen_MemoryBarrier(); } - unsigned slots = cpObjNode->gtSlots; - emitter* emit = getEmitter(); - - BYTE* gcPtrs = cpObjNode->gtGcPtrs; + emitter* emit = getEmitter(); + ClassLayout* layout = cpObjNode->GetLayout(); + unsigned slots = layout->GetSlotCount(); // If we can prove it's on the stack we don't need to use the write barrier. - emitAttr attr = EA_PTRSIZE; if (dstOnStack) { for (unsigned i = 0; i < slots; ++i) { - if (gcPtrs[i] == GCT_GCREF) - { - attr = EA_GCREF; - } - else if (gcPtrs[i] == GCT_BYREF) - { - attr = EA_BYREF; - } - else - { - attr = EA_PTRSIZE; - } + emitAttr attr = emitTypeSize(layout->GetGCPtrType(i)); emit->emitIns_R_R_I(INS_ldr, attr, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE, INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC); @@ -871,26 +858,22 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) } else { - unsigned gcPtrCount = cpObjNode->gtGcPtrCount; + unsigned gcPtrCount = layout->GetGCPtrCount(); unsigned i = 0; while (i < slots) { - switch (gcPtrs[i]) + if (!layout->IsGCPtr(i)) + { + emit->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE, + INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC); + emit->emitIns_R_R_I(INS_str, EA_PTRSIZE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE, + INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC); + } + else { - case TYPE_GC_NONE: - emit->emitIns_R_R_I(INS_ldr, attr, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE, - INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC); - emit->emitIns_R_R_I(INS_str, attr, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE, - INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC); - break; - - default: - // In the case of a GC-Pointer we'll call the ByRef write barrier helper - genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE); - - gcPtrCount--; - break; + genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE); + gcPtrCount--; } ++i; } diff --git a/src/jit/codegenarm64.cpp b/src/jit/codegenarm64.cpp index 1d05fde7256d..b7c7c5ee5408 100644 --- a/src/jit/codegenarm64.cpp +++ b/src/jit/codegenarm64.cpp @@ -2647,7 +2647,7 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) // This GenTree node has data about GC pointers, this means we're dealing // with CpObj. - assert(cpObjNode->gtGcPtrCount > 0); + assert(cpObjNode->GetLayout()->HasGCPtr()); #endif // DEBUG // Consume the operands and get them into the right registers. @@ -2656,7 +2656,8 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_SRC_BYREF, srcAddrType); gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_DST_BYREF, dstAddr->TypeGet()); - unsigned slots = cpObjNode->gtSlots; + ClassLayout* layout = cpObjNode->GetLayout(); + unsigned slots = layout->GetSlotCount(); // Temp register(s) used to perform the sequence of loads and stores. regNumber tmpReg = cpObjNode->ExtractTempReg(); @@ -2683,8 +2684,6 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) emitter* emit = getEmitter(); - BYTE* gcPtrs = cpObjNode->gtGcPtrs; - // If we can prove it's on the stack we don't need to use the write barrier. if (dstOnStack) { @@ -2692,8 +2691,8 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) // Check if two or more remaining slots and use a ldp/stp sequence while (i < slots - 1) { - emitAttr attr0 = emitTypeSize(compiler->getJitGCType(gcPtrs[i + 0])); - emitAttr attr1 = emitTypeSize(compiler->getJitGCType(gcPtrs[i + 1])); + emitAttr attr0 = emitTypeSize(layout->GetGCPtrType(i + 0)); + emitAttr attr1 = emitTypeSize(layout->GetGCPtrType(i + 1)); emit->emitIns_R_R_R_I(INS_ldp, attr0, tmpReg, tmpReg2, REG_WRITE_BARRIER_SRC_BYREF, 2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX, attr1); @@ -2705,7 +2704,7 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) // Use a ldr/str sequence for the last remainder if (i < slots) { - emitAttr attr0 = emitTypeSize(compiler->getJitGCType(gcPtrs[i + 0])); + emitAttr attr0 = emitTypeSize(layout->GetGCPtrType(i + 0)); emit->emitIns_R_R_I(INS_ldr, attr0, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX); @@ -2715,38 +2714,35 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) } else { - unsigned gcPtrCount = cpObjNode->gtGcPtrCount; + unsigned gcPtrCount = cpObjNode->GetLayout()->GetGCPtrCount(); unsigned i = 0; while (i < slots) { - switch (gcPtrs[i]) + if (!layout->IsGCPtr(i)) { - case TYPE_GC_NONE: - // Check if the next slot's type is also TYP_GC_NONE and use ldp/stp - if ((i + 1 < slots) && (gcPtrs[i + 1] == TYPE_GC_NONE)) - { - emit->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_SRC_BYREF, - 2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX); - emit->emitIns_R_R_R_I(INS_stp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_DST_BYREF, - 2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX); - ++i; // extra increment of i, since we are copying two items - } - else - { - emit->emitIns_R_R_I(INS_ldr, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE, - INS_OPTS_POST_INDEX); - emit->emitIns_R_R_I(INS_str, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE, - INS_OPTS_POST_INDEX); - } - break; - - default: - // In the case of a GC-Pointer we'll call the ByRef write barrier helper - genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE); - - gcPtrCount--; - break; + // Check if the next slot's type is also TYP_GC_NONE and use ldp/stp + if ((i + 1 < slots) && !layout->IsGCPtr(i + 1)) + { + emit->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_SRC_BYREF, + 2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX); + emit->emitIns_R_R_R_I(INS_stp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_DST_BYREF, + 2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX); + ++i; // extra increment of i, since we are copying two items + } + else + { + emit->emitIns_R_R_I(INS_ldr, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE, + INS_OPTS_POST_INDEX); + emit->emitIns_R_R_I(INS_str, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE, + INS_OPTS_POST_INDEX); + } + } + else + { + // In the case of a GC-Pointer we'll call the ByRef write barrier helper + genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE); + gcPtrCount--; } ++i; } diff --git a/src/jit/codegenarmarch.cpp b/src/jit/codegenarmarch.cpp index 21617ae14ee3..cff89664e8f4 100644 --- a/src/jit/codegenarmarch.cpp +++ b/src/jit/codegenarmarch.cpp @@ -849,18 +849,10 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) // the xor ensures that only one of the two is setup, not both assert((varNode != nullptr) ^ (addrNode != nullptr)); - BYTE gcPtrArray[MAX_ARG_REG_COUNT] = {}; // TYPE_GC_NONE = 0 - BYTE* gcPtrs = gcPtrArray; + ClassLayout* layout; + unsigned structSize; + bool isHfa; - unsigned gcPtrCount; // The count of GC pointers in the struct - unsigned structSize; - bool isHfa; - -#ifdef _TARGET_ARM_ - // On ARM32, size of reference map can be larger than MAX_ARG_REG_COUNT - gcPtrs = treeNode->gtGcPtrs; - gcPtrCount = treeNode->gtNumberReferenceSlots; -#endif // Setup the structSize, isHFa, and gcPtrCount if (source->OperGet() == GT_LCL_VAR) { @@ -874,12 +866,8 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) structSize = varDsc->lvSize(); // This yields the roundUp size, but that is fine // as that is how much stack is allocated for this LclVar - isHfa = varDsc->lvIsHfa(); -#ifdef _TARGET_ARM64_ - gcPtrCount = varDsc->lvStructGcCount; - for (unsigned i = 0; i < gcPtrCount; ++i) - gcPtrs[i] = varDsc->lvGcLayout[i]; -#endif // _TARGET_ARM_ + isHfa = varDsc->lvIsHfa(); + layout = varDsc->GetLayout(); } else // we must have a GT_OBJ { @@ -889,9 +877,9 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) // it provides (size and GC layout) even if the node wraps a lclvar. Due // to struct reinterpretation (e.g. Unsafe.As) it is possible that // the OBJ node has a different type than the lclvar. - CORINFO_CLASS_HANDLE objClass = source->gtObj.gtClass; + layout = source->AsObj()->GetLayout(); - structSize = compiler->info.compCompHnd->getClassSize(objClass); + structSize = layout->GetSize(); // The codegen code below doesn't have proper support for struct sizes // that are not multiple of the slot size. Call arg morphing handles this @@ -908,22 +896,14 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) assert((structSize % TARGET_POINTER_SIZE) == 0); } - isHfa = compiler->IsHfa(objClass); - -#ifdef _TARGET_ARM64_ - // On ARM32, Lowering places the correct GC layout information in the - // GenTreePutArgStk node and the code above already use that. On ARM64, - // this information is not available (in order to keep GenTreePutArgStk - // nodes small) and we need to retrieve it from the VM here. - gcPtrCount = compiler->info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]); -#endif + isHfa = compiler->IsHfa(layout->GetClassHandle()); } // If we have an HFA we can't have any GC pointers, // if not then the max size for the the struct is 16 bytes if (isHfa) { - noway_assert(gcPtrCount == 0); + noway_assert(!layout->HasGCPtr()); } #ifdef _TARGET_ARM64_ else @@ -945,8 +925,8 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) while (remainingSize >= 2 * TARGET_POINTER_SIZE) { - var_types type0 = compiler->getJitGCType(gcPtrs[nextIndex + 0]); - var_types type1 = compiler->getJitGCType(gcPtrs[nextIndex + 1]); + var_types type0 = layout->GetGCPtrType(nextIndex + 0); + var_types type1 = layout->GetGCPtrType(nextIndex + 1); if (varNode != nullptr) { @@ -981,7 +961,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) // str r2, [sp, #16] while (remainingSize >= TARGET_POINTER_SIZE) { - var_types type = compiler->getJitGCType(gcPtrs[nextIndex]); + var_types type = layout->GetGCPtrType(nextIndex); if (varNode != nullptr) { @@ -1018,7 +998,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) { if (remainingSize >= TARGET_POINTER_SIZE) { - var_types nextType = compiler->getJitGCType(gcPtrs[nextIndex]); + var_types nextType = layout->GetGCPtrType(nextIndex); emitAttr nextAttr = emitTypeSize(nextType); remainingSize -= TARGET_POINTER_SIZE; @@ -1051,7 +1031,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) assert(varNode == nullptr); // the left over size is smaller than a pointer and thus can never be a GC type - assert(varTypeIsGC(compiler->getJitGCType(gcPtrs[nextIndex])) == false); + assert(!layout->IsGCPtr(nextIndex)); var_types loadType = TYP_UINT; if (loadSize == 1) @@ -1224,11 +1204,6 @@ void CodeGen::genPutArgSplit(GenTreePutArgSplit* treeNode) // the xor ensures that only one of the two is setup, not both assert((varNode != nullptr) ^ (addrNode != nullptr)); - // Setup the structSize, isHFa, and gcPtrCount - BYTE* gcPtrs = treeNode->gtGcPtrs; - unsigned gcPtrCount = treeNode->gtNumberReferenceSlots; // The count of GC pointers in the struct - int structSize = treeNode->getArgSize(); - // This is the varNum for our load operations, // only used when we have a struct with a LclVar source unsigned srcVarNum = BAD_VAR_NUM; @@ -1263,9 +1238,12 @@ void CodeGen::genPutArgSplit(GenTreePutArgSplit* treeNode) assert(baseReg != addrReg); // We don't split HFA struct - assert(!compiler->IsHfa(source->gtObj.gtClass)); + assert(!compiler->IsHfa(source->AsObj()->GetLayout()->GetClassHandle())); } + int structSize = treeNode->getArgSize(); + ClassLayout* layout = source->AsObj()->GetLayout(); + // Put on stack first unsigned nextIndex = treeNode->gtNumRegs; unsigned structOffset = nextIndex * TARGET_POINTER_SIZE; @@ -1275,7 +1253,7 @@ void CodeGen::genPutArgSplit(GenTreePutArgSplit* treeNode) assert(remainingSize % TARGET_POINTER_SIZE == 0); while (remainingSize > 0) { - var_types type = compiler->getJitGCType(gcPtrs[nextIndex]); + var_types type = layout->GetGCPtrType(nextIndex); if (varNode != nullptr) { @@ -2804,7 +2782,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) // Must be <= 16 bytes or else it wouldn't be passed in registers, except for HFA, // which can be bigger (and is handled above). noway_assert(EA_SIZE_IN_BYTES(varDsc->lvSize()) <= 16); - loadType = compiler->getJitGCType(varDsc->lvGcLayout[0]); + loadType = varDsc->GetLayout()->GetGCPtrType(0); } else { @@ -2825,7 +2803,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) // Restore the second register. argRegNext = genRegArgNext(argReg); - loadType = compiler->getJitGCType(varDsc->lvGcLayout[1]); + loadType = varDsc->GetLayout()->GetGCPtrType(1); loadSize = emitActualTypeSize(loadType); getEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argRegNext, varNum, TARGET_POINTER_SIZE); @@ -2918,7 +2896,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) for (unsigned ofs = 0; ofs < maxSize; ofs += REGSIZE_BYTES) { unsigned idx = ofs / REGSIZE_BYTES; - loadType = compiler->getJitGCType(varDsc->lvGcLayout[idx]); + loadType = varDsc->GetLayout()->GetGCPtrType(idx); if (varDsc->lvRegNum != argReg) { @@ -3389,7 +3367,7 @@ void CodeGen::genCodeForStoreBlk(GenTreeBlk* blkOp) if (blkOp->OperIs(GT_STORE_OBJ) && blkOp->OperIsCopyBlkOp()) { - assert(blkOp->AsObj()->gtGcPtrCount != 0); + assert(blkOp->AsObj()->GetLayout()->HasGCPtr()); genCodeForCpObj(blkOp->AsObj()); return; } diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp index 31037376ba57..e9adda13d7fd 100644 --- a/src/jit/codegencommon.cpp +++ b/src/jit/codegencommon.cpp @@ -1089,8 +1089,8 @@ unsigned CodeGenInterface::InferStructOpSizeAlign(GenTree* op, unsigned* alignme if (op->gtOper == GT_OBJ) { - CORINFO_CLASS_HANDLE clsHnd = op->AsObj()->gtClass; - opSize = compiler->info.compCompHnd->getClassSize(clsHnd); + CORINFO_CLASS_HANDLE clsHnd = op->AsObj()->GetLayout()->GetClassHandle(); + opSize = op->AsObj()->GetLayout()->GetSize(); alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE); } else if (op->gtOper == GT_LCL_VAR) @@ -4510,8 +4510,7 @@ void CodeGen::genCheckUseBlockInit() continue; } - if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet()) || (varDsc->lvStructGcCount > 0) || - varDsc->lvMustInit) + if (compiler->info.compInitMem || varDsc->HasGCPtr() || varDsc->lvMustInit) { if (varDsc->lvTracked) { @@ -4557,7 +4556,7 @@ void CodeGen::genCheckUseBlockInit() CLANG_FORMAT_COMMENT_ANCHOR; if ((!varDsc->lvTracked || (varDsc->lvType == TYP_STRUCT)) && varDsc->lvOnFrame && - (!varDsc->lvIsTemp || varTypeIsGC(varDsc->TypeGet()) || (varDsc->lvStructGcCount > 0))) + (!varDsc->lvIsTemp || varDsc->HasGCPtr())) { varDsc->lvMustInit = true; @@ -4574,7 +4573,7 @@ void CodeGen::genCheckUseBlockInit() /* Ignore if not a pointer variable or value class with a GC field */ - if (!compiler->lvaTypeIsGC(varNum)) + if (!varDsc->HasGCPtr()) { continue; } @@ -4595,13 +4594,18 @@ void CodeGen::genCheckUseBlockInit() /* Is this a 'must-init' stack pointer local? */ - if (varDsc->lvMustInit && varDsc->lvOnFrame) + if (varDsc->lvMustInit && varDsc->lvOnFrame && !counted) { - if (!counted) + if (varDsc->TypeGet() == TYP_STRUCT) { - initStkLclCnt += varDsc->lvStructGcCount; - counted = true; + initStkLclCnt += varDsc->GetLayout()->GetGCPtrCount(); } + else + { + assert(varTypeIsGC(varDsc->TypeGet())); + initStkLclCnt += 1; + } + counted = true; } if ((compiler->lvaLclSize(varNum) > (3 * TARGET_POINTER_SIZE)) && (largeGcStructs <= 4)) @@ -6336,11 +6340,11 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, { // We only initialize the GC variables in the TYP_STRUCT const unsigned slots = (unsigned)compiler->lvaLclSize(varNum) / REGSIZE_BYTES; - const BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum); + ClassLayout* layout = varDsc->GetLayout(); for (unsigned i = 0; i < slots; i++) { - if (gcPtrs[i] != TYPE_GC_NONE) + if (layout->IsGCPtr(i)) { getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, genGetZeroReg(initReg, pInitRegZeroed), varNum, i * REGSIZE_BYTES); @@ -7700,7 +7704,7 @@ void CodeGen::genFnProlog() /* We need to know the offset range of tracked stack GC refs */ /* We assume that the GC reference can be anywhere in the TYP_STRUCT */ - if (compiler->lvaTypeIsGC(varNum) && varDsc->lvTrackedNonStruct() && varDsc->lvOnFrame) + if (varDsc->HasGCPtr() && varDsc->lvTrackedNonStruct() && varDsc->lvOnFrame) { // For fields of PROMOTION_TYPE_DEPENDENT type of promotion, they should have been // taken care of by the parent struct. diff --git a/src/jit/codegenlinear.cpp b/src/jit/codegenlinear.cpp index b76aadd15bb6..7d054fb4c48c 100644 --- a/src/jit/codegenlinear.cpp +++ b/src/jit/codegenlinear.cpp @@ -1093,7 +1093,6 @@ void CodeGen::genUnspillRegIfNeeded(GenTree* tree) unsigned flags = splitArg->GetRegSpillFlagByIdx(i); if ((flags & GTF_SPILLED) != 0) { - BYTE* gcPtrs = splitArg->gtGcPtrs; var_types dstType = splitArg->GetRegType(i); regNumber dstReg = splitArg->GetRegNumByIdx(i); diff --git a/src/jit/codegenxarch.cpp b/src/jit/codegenxarch.cpp index 94ad12309061..1b5e11a50990 100644 --- a/src/jit/codegenxarch.cpp +++ b/src/jit/codegenxarch.cpp @@ -2912,7 +2912,7 @@ void CodeGen::genCodeForStoreBlk(GenTreeBlk* storeBlkNode) if (storeBlkNode->OperIs(GT_STORE_OBJ) && storeBlkNode->OperIsCopyBlkOp() && !storeBlkNode->gtBlkOpGcUnsafe) { - assert(storeBlkNode->AsObj()->gtGcPtrCount != 0); + assert(storeBlkNode->AsObj()->GetLayout()->HasGCPtr()); genCodeForCpObj(storeBlkNode->AsObj()); return; } @@ -3660,9 +3660,9 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) bool dstOnStack = dstAddr->gtSkipReloadOrCopy()->OperIsLocalAddr(); #ifdef DEBUG - - // A CpObj always involves one or more GC pointers. - assert(cpObjNode->gtGcPtrCount > 0); + // If the GenTree node has data about GC pointers, this means we're dealing + // with CpObj, so this requires special logic. + assert(cpObjNode->GetLayout()->HasGCPtr()); // MovSp (alias for movsq on x64 and movsd on x86) instruction is used for copying non-gcref fields // and it needs src = RSI and dst = RDI. @@ -3702,7 +3702,7 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) gcInfo.gcMarkRegPtrVal(REG_RSI, srcAddrType); gcInfo.gcMarkRegPtrVal(REG_RDI, dstAddr->TypeGet()); - unsigned slots = cpObjNode->gtSlots; + unsigned slots = cpObjNode->GetLayout()->GetSlotCount(); // If we can prove it's on the stack we don't need to use the write barrier. if (dstOnStack) @@ -3729,52 +3729,49 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) } else { - BYTE* gcPtrs = cpObjNode->gtGcPtrs; - unsigned gcPtrCount = cpObjNode->gtGcPtrCount; + ClassLayout* layout = cpObjNode->GetLayout(); + unsigned gcPtrCount = layout->GetGCPtrCount(); unsigned i = 0; while (i < slots) { - switch (gcPtrs[i]) + if (!layout->IsGCPtr(i)) { - case TYPE_GC_NONE: - // Let's see if we can use rep movsp instead of a sequence of movsp instructions - // to save cycles and code size. + // Let's see if we can use rep movsp instead of a sequence of movsp instructions + // to save cycles and code size. + unsigned nonGcSlotCount = 0; + + do + { + nonGcSlotCount++; + i++; + } while ((i < slots) && !layout->IsGCPtr(i)); + + // If we have a very small contiguous non-gc region, it's better just to + // emit a sequence of movsp instructions + if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT) + { + while (nonGcSlotCount > 0) { - unsigned nonGcSlotCount = 0; - - do - { - nonGcSlotCount++; - i++; - } while (i < slots && gcPtrs[i] == TYPE_GC_NONE); - - // If we have a very small contiguous non-gc region, it's better just to - // emit a sequence of movsp instructions - if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT) - { - while (nonGcSlotCount > 0) - { - instGen(INS_movsp); - nonGcSlotCount--; - } - } - else - { - // Otherwise, we can save code-size and improve CQ by emitting - // rep movsp (alias for movsd/movsq for x86/x64) - assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0); - - getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount); - instGen(INS_r_movsp); - } + instGen(INS_movsp); + nonGcSlotCount--; } - break; - default: - // We have a GC pointer, call the memory barrier. - genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE); - gcPtrCount--; - i++; + } + else + { + // Otherwise, we can save code-size and improve CQ by emitting + // rep movsp (alias for movsd/movsq for x86/x64) + assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0); + + getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount); + instGen(INS_r_movsp); + } + } + else + { + genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE); + gcPtrCount--; + i++; } } @@ -5441,12 +5438,12 @@ void CodeGen::genCallInstruction(GenTreeCall* call) if (source->TypeGet() == TYP_STRUCT) { GenTreeObj* obj = source->AsObj(); - unsigned argBytes = roundUp(obj->gtBlkSize, TARGET_POINTER_SIZE); + unsigned argBytes = roundUp(obj->GetLayout()->GetSize(), TARGET_POINTER_SIZE); #ifdef _TARGET_X86_ // If we have an OBJ, we must have created a copy if the original arg was not a // local and was not a multiple of TARGET_POINTER_SIZE. // Note that on x64/ux this will be handled by unrolling in genStructPutArgUnroll. - assert((argBytes == obj->gtBlkSize) || obj->Addr()->IsLocalAddrExpr()); + assert((argBytes == obj->GetLayout()->GetSize()) || obj->Addr()->IsLocalAddrExpr()); #endif // _TARGET_X86_ assert((curArgTabEntry->numSlots * TARGET_POINTER_SIZE) == argBytes); } @@ -7645,11 +7642,11 @@ bool CodeGen::genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk) { case GenTreePutArgStk::Kind::RepInstr: case GenTreePutArgStk::Kind::Unroll: - assert((putArgStk->gtNumberReferenceSlots == 0) && (source->OperGet() != GT_FIELD_LIST) && (argSize >= 16)); + assert(!source->AsObj()->GetLayout()->HasGCPtr() && (argSize >= 16)); break; case GenTreePutArgStk::Kind::Push: case GenTreePutArgStk::Kind::PushAllSlots: - assert((putArgStk->gtNumberReferenceSlots != 0) || (source->OperGet() == GT_FIELD_LIST) || (argSize < 16)); + assert(source->OperIs(GT_FIELD_LIST) || source->AsObj()->GetLayout()->HasGCPtr() || (argSize < 16)); break; case GenTreePutArgStk::Kind::Invalid: default: @@ -8193,7 +8190,9 @@ void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk) assert(targetType == TYP_STRUCT); - if (putArgStk->gtNumberReferenceSlots == 0) + ClassLayout* layout = source->AsObj()->GetLayout(); + + if (!layout->HasGCPtr()) { switch (putArgStk->gtPutArgStkKind) { @@ -8225,7 +8224,6 @@ void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk) assert(m_pushStkArg); GenTree* srcAddr = source->gtGetOp1(); - BYTE* gcPtrs = putArgStk->gtGcPtrs; const unsigned numSlots = putArgStk->gtNumSlots; regNumber srcRegNum = srcAddr->gtRegNum; @@ -8250,22 +8248,8 @@ void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk) for (int i = numSlots - 1; i >= 0; --i) { - emitAttr slotAttr; - if (gcPtrs[i] == TYPE_GC_NONE) - { - slotAttr = EA_4BYTE; - } - else if (gcPtrs[i] == TYPE_GC_REF) - { - slotAttr = EA_GCREF; - } - else - { - assert(gcPtrs[i] == TYPE_GC_BYREF); - slotAttr = EA_BYREF; - } - - const unsigned offset = i * TARGET_POINTER_SIZE; + emitAttr slotAttr = emitTypeSize(layout->GetGCPtrType(i)); + const unsigned offset = i * TARGET_POINTER_SIZE; if (srcAddrInReg) { getEmitter()->emitIns_AR_R(INS_push, slotAttr, REG_NA, srcRegNum, offset); @@ -8289,11 +8273,10 @@ void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk) unsigned numGCSlotsCopied = 0; #endif // DEBUG - BYTE* gcPtrs = putArgStk->gtGcPtrs; const unsigned numSlots = putArgStk->gtNumSlots; for (unsigned i = 0; i < numSlots;) { - if (gcPtrs[i] == TYPE_GC_NONE) + if (!layout->IsGCPtr(i)) { // Let's see if we can use rep movsp (alias for movsd or movsq for 32 and 64 bits respectively) // instead of a sequence of movsp instructions to save cycles and code size. @@ -8302,7 +8285,7 @@ void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk) { adjacentNonGCSlotCount++; i++; - } while ((i < numSlots) && (gcPtrs[i] == TYPE_GC_NONE)); + } while ((i < numSlots) && !layout->IsGCPtr(i)); // If we have a very small contiguous non-ref region, it's better just to // emit a sequence of movsp instructions @@ -8321,15 +8304,13 @@ void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk) } else { - assert((gcPtrs[i] == TYPE_GC_REF) || (gcPtrs[i] == TYPE_GC_BYREF)); - // We have a GC (byref or ref) pointer // TODO-Amd64-Unix: Here a better solution (for code size and CQ) would be to use movsp instruction, // but the logic for emitting a GC info record is not available (it is internal for the emitter // only.) See emitGCVarLiveUpd function. If we could call it separately, we could do // instGen(INS_movsp); and emission of gc info. - var_types memType = (gcPtrs[i] == TYPE_GC_REF) ? TYP_REF : TYP_BYREF; + var_types memType = layout->GetGCPtrType(i); getEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0); genStoreRegToStackArg(memType, REG_RCX, i * TARGET_POINTER_SIZE); #ifdef DEBUG @@ -8351,7 +8332,7 @@ void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk) } } - assert(numGCSlotsCopied == putArgStk->gtNumberReferenceSlots); + assert(numGCSlotsCopied == layout->GetGCPtrCount()); #endif // _TARGET_X86_ } } diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp index f21249d7d01e..aa446a90606c 100644 --- a/src/jit/compiler.cpp +++ b/src/jit/compiler.cpp @@ -469,61 +469,6 @@ var_types Compiler::getJitGCType(BYTE gcType) return result; } -#if FEATURE_MULTIREG_ARGS -//--------------------------------------------------------------------------- -// getStructGcPtrsFromOp: Given a GenTree node of TYP_STRUCT that represents -// a pass by value argument, return the gcPtr layout -// for the pointers sized fields -// Arguments: -// op - the operand of TYP_STRUCT that is passed by value -// gcPtrsOut - an array of BYTES that are written by this method -// they will contain the VM's CorInfoGCType values -// for each pointer sized field -// Return Value: -// Two [or more] values are written into the gcPtrs array -// -// Note that for ARM64 there will always be exactly two pointer sized fields - -void Compiler::getStructGcPtrsFromOp(GenTree* op, BYTE* gcPtrsOut) -{ - assert(op->TypeGet() == TYP_STRUCT); - -#ifdef _TARGET_ARM64_ - if (op->OperGet() == GT_OBJ) - { - CORINFO_CLASS_HANDLE objClass = op->gtObj.gtClass; - - int structSize = info.compCompHnd->getClassSize(objClass); - assert(structSize <= 2 * TARGET_POINTER_SIZE); - - BYTE gcPtrsTmp[2] = {TYPE_GC_NONE, TYPE_GC_NONE}; - - info.compCompHnd->getClassGClayout(objClass, &gcPtrsTmp[0]); - - gcPtrsOut[0] = gcPtrsTmp[0]; - gcPtrsOut[1] = gcPtrsTmp[1]; - } - else if (op->OperGet() == GT_LCL_VAR) - { - GenTreeLclVarCommon* varNode = op->AsLclVarCommon(); - unsigned varNum = varNode->gtLclNum; - assert(varNum < lvaCount); - LclVarDsc* varDsc = &lvaTable[varNum]; - - // At this point any TYP_STRUCT LclVar must be a 16-byte pass by value argument - assert(varDsc->lvSize() == 2 * TARGET_POINTER_SIZE); - - gcPtrsOut[0] = varDsc->lvGcLayout[0]; - gcPtrsOut[1] = varDsc->lvGcLayout[1]; - } - else -#endif - { - noway_assert(!"Unsupported Oper for getStructGcPtrsFromOp"); - } -} -#endif // FEATURE_MULTIREG_ARGS - #ifdef ARM_SOFTFP //--------------------------------------------------------------------------- // IsSingleFloat32Struct: @@ -1927,6 +1872,8 @@ void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo) // We start with the flow graph in tree-order fgOrder = FGOrderTree; + m_classLayoutTable = nullptr; + #ifdef FEATURE_SIMD m_simdHandleCache = nullptr; #endif // FEATURE_SIMD @@ -4989,28 +4936,12 @@ bool Compiler::compQuirkForPPP() // This fixes the PPP backward compat issue varDscExposedStruct->lvExactSize += 32; - // Update the GC info to indicate that the padding area does - // not contain any GC pointers. - // // The struct is now 64 bytes. - // // We're on x64 so this should be 8 pointer slots. assert((varDscExposedStruct->lvExactSize / TARGET_POINTER_SIZE) == 8); - BYTE* oldGCPtrs = varDscExposedStruct->lvGcLayout; - BYTE* newGCPtrs = getAllocator(CMK_LvaTable).allocate(8); - - for (int i = 0; i < 4; i++) - { - newGCPtrs[i] = oldGCPtrs[i]; - } - - for (int i = 4; i < 8; i++) - { - newGCPtrs[i] = TYPE_GC_NONE; - } - - varDscExposedStruct->lvGcLayout = newGCPtrs; + varDscExposedStruct->SetLayout( + varDscExposedStruct->GetLayout()->GetPPPQuirkLayout(getAllocator(CMK_ClassLayout))); return true; } @@ -9240,7 +9171,7 @@ int cTreeFlagsIR(Compiler* comp, GenTree* tree) case GT_OBJ: case GT_STORE_OBJ: - if (tree->AsObj()->HasGCPtr()) + if (tree->AsObj()->GetLayout()->HasGCPtr()) { chars += printf("[BLK_HASGCPTR]"); } diff --git a/src/jit/compiler.h b/src/jit/compiler.h index d9a2936f3a9a..f057acfdc05b 100644 --- a/src/jit/compiler.h +++ b/src/jit/compiler.h @@ -381,12 +381,10 @@ class LclVarDsc unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) - unsigned char lvStructGcCount : 3; // if struct, how many GC pointer (stop counting at 7). The only use of values >1 - // is to help determine whether to use block init in the prolog. - unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame - unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the - // variable is in the same register for the entire function. - unsigned char lvTracked : 1; // is this a tracked variable? + unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame + unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the + // variable is in the same register for the entire function. + unsigned char lvTracked : 1; // is this a tracked variable? bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; @@ -853,8 +851,10 @@ class LclVarDsc CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields - BYTE* lvGcLayout; // GC layout info for structs +private: + ClassLayout* m_layout; // layout info for structs +public: #if ASSERTION_PROP BlockSet lvRefBlks; // Set of blocks that contain refs GenTreeStmt* lvDefStmt; // Pointer to the statement with the single definition @@ -911,6 +911,26 @@ class LclVarDsc var_types lvaArgType(); + // Returns true if this variable contains GC pointers (including being a GC pointer itself). + bool HasGCPtr() + { + return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr()); + } + + // Returns the layout of a struct variable. + ClassLayout* GetLayout() + { + assert(varTypeIsStruct(lvType)); + return m_layout; + } + + // Sets the layout of a struct variable. + void SetLayout(ClassLayout* layout) + { + assert(varTypeIsStruct(lvType)); + m_layout = layout; + } + SsaDefArray lvPerSsaData; // Returns the address of the per-Ssa data for the given ssaNum (which is required @@ -3378,8 +3398,6 @@ class Compiler } #endif // defined(FEATURE_SIMD) - BYTE* lvaGetGcLayout(unsigned varNum); - bool lvaTypeIsGC(unsigned varNum); unsigned lvaGSSecurityCookie; // LclVar number bool lvaTempsHaveLargerOffsetThanVars(); @@ -3723,10 +3741,7 @@ class Compiler GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref); - var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, - BYTE* gcLayout = nullptr, - unsigned* numGCVars = nullptr, - var_types* simdBaseType = nullptr); + var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, var_types* simdBaseType = nullptr); GenTree* impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, @@ -8857,12 +8872,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // FEATURE_MULTIREG_RET } -#if FEATURE_MULTIREG_ARGS - // Given a GenTree node of TYP_STRUCT that represents a pass by value argument - // return the gcPtr layout for the pointers sized fields - void getStructGcPtrsFromOp(GenTree* op, BYTE* gcPtrsOut); -#endif // FEATURE_MULTIREG_ARGS - // Returns true if the method being compiled returns a value bool compMethodHasRetVal() { @@ -8876,6 +8885,26 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG +private: + class ClassLayoutTable* m_classLayoutTable; + + class ClassLayoutTable* typCreateClassLayoutTable(); + class ClassLayoutTable* typGetClassLayoutTable(); + +public: + // Get the layout having the specified layout number. + ClassLayout* typGetLayoutByNum(unsigned layoutNum); + // Get the layout number of the specified layout. + unsigned typGetLayoutNum(ClassLayout* layout); + // Get the layout having the specified size but no class handle. + ClassLayout* typGetBlkLayout(unsigned blockSize); + // Get the number of a layout having the specified size but no class handle. + unsigned typGetBlkLayoutNum(unsigned blockSize); + // Get the layout for the specified class handle. + ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle); + // Get the number of a layout for the specified class handle. + unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle); + //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG diff --git a/src/jit/compiler.hpp b/src/jit/compiler.hpp index 27e73821a198..0ea0a25a9834 100644 --- a/src/jit/compiler.hpp +++ b/src/jit/compiler.hpp @@ -1867,22 +1867,6 @@ inline VARSET_VALRET_TP Compiler::lvaStmtLclMask(GenTreeStmt* stmt) return lclMask; } -/***************************************************************************** - * Returns true if the lvType is a TYP_REF or a TYP_BYREF. - * When the lvType is a TYP_STRUCT it searches the GC layout - * of the struct and returns true iff it contains a GC ref. - */ - -inline bool Compiler::lvaTypeIsGC(unsigned varNum) -{ - if (lvaTable[varNum].TypeGet() == TYP_STRUCT) - { - assert(lvaTable[varNum].lvGcLayout != nullptr); // bits are intialized - return (lvaTable[varNum].lvStructGcCount != 0); - } - return (varTypeIsGC(lvaTable[varNum].TypeGet())); -} - /***************************************************************************** Is this a synchronized instance method? If so, we will need to report "this" in the GC information, so that the EE can release the object lock @@ -4141,8 +4125,8 @@ inline void Compiler::CLR_API_Leave(API_ICorJitInfo_Names ename) bool Compiler::fgStructTempNeedsExplicitZeroInit(LclVarDsc* varDsc, BasicBlock* block) { - bool containsGCPtr = (varDsc->lvStructGcCount > 0); - return (!info.compInitMem || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0) || (!containsGCPtr && varDsc->lvIsTemp)); + return !info.compInitMem || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0) || + (!varDsc->HasGCPtr() && varDsc->lvIsTemp); } /*****************************************************************************/ diff --git a/src/jit/compmemkind.h b/src/jit/compmemkind.h index 244979872e8d..03b9d1468ca7 100644 --- a/src/jit/compmemkind.h +++ b/src/jit/compmemkind.h @@ -55,6 +55,7 @@ CompMemKindMacro(CopyProp) CompMemKindMacro(SideEffects) CompMemKindMacro(ObjectAllocator) CompMemKindMacro(VariableLiveRanges) +CompMemKindMacro(ClassLayout) //clang-format on #undef CompMemKindMacro diff --git a/src/jit/gcencode.cpp b/src/jit/gcencode.cpp index 7019a7348305..a7b70a041792 100644 --- a/src/jit/gcencode.cpp +++ b/src/jit/gcencode.cpp @@ -2238,43 +2238,45 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un totalSize += sz; } } - - else if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE)) + else if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvOnFrame && varDsc->HasGCPtr()) { - // A struct will have gcSlots only if it is at least TARGET_POINTER_SIZE. - unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE; - BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum); + ClassLayout* layout = varDsc->GetLayout(); + unsigned slots = layout->GetSlotCount(); - // walk each member of the array for (unsigned i = 0; i < slots; i++) { - if (gcPtrs[i] == TYPE_GC_NONE) // skip non-gc slots - continue; - + if (!layout->IsGCPtr(i)) { + continue; + } - unsigned offset = varDsc->lvStkOffs + i * TARGET_POINTER_SIZE; + unsigned offset = varDsc->lvStkOffs + i * TARGET_POINTER_SIZE; #if DOUBLE_ALIGN - // For genDoubleAlign(), locals are addressed relative to ESP and - // arguments are addressed relative to EBP. + // For genDoubleAlign(), locals are addressed relative to ESP and + // arguments are addressed relative to EBP. - if (compiler->genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg) - offset += compiler->codeGen->genTotalFrameSize(); + if (compiler->genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg) + { + offset += compiler->codeGen->genTotalFrameSize(); + } #endif - if (gcPtrs[i] == TYPE_GC_BYREF) - offset |= byref_OFFSET_FLAG; // indicate it is a byref GC pointer + if (layout->GetGCPtrType(i) == TYP_BYREF) + { + offset |= byref_OFFSET_FLAG; // indicate it is a byref GC pointer + } - int encodedoffset = lastoffset - offset; - lastoffset = offset; + int encodedoffset = lastoffset - offset; + lastoffset = offset; - if (mask == 0) - totalSize += encodeSigned(NULL, encodedoffset); - else - { - unsigned sz = encodeSigned(dest, encodedoffset); - dest += sz; - totalSize += sz; - } + if (mask == 0) + { + totalSize += encodeSigned(NULL, encodedoffset); + } + else + { + unsigned sz = encodeSigned(dest, encodedoffset); + dest += sz; + totalSize += sz; } } } @@ -4166,16 +4168,15 @@ void GCInfo::gcMakeRegPtrTable( // If this is a TYP_STRUCT, handle its GC pointers. // Note that the enregisterable struct types cannot have GC pointers in them. - if ((varDsc->lvType == TYP_STRUCT) && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE)) + if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE)) { - unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE; - BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum); + ClassLayout* layout = varDsc->GetLayout(); + unsigned slots = layout->GetSlotCount(); - // walk each member of the array for (unsigned i = 0; i < slots; i++) { - if (gcPtrs[i] == TYPE_GC_NONE) - { // skip non-gc slots + if (!layout->IsGCPtr(i)) + { continue; } @@ -4188,7 +4189,7 @@ void GCInfo::gcMakeRegPtrTable( offset += compiler->codeGen->genTotalFrameSize(); #endif GcSlotFlags flags = GC_SLOT_UNTRACKED; - if (gcPtrs[i] == TYPE_GC_BYREF) + if (layout->GetGCPtrType(i) == TYP_BYREF) { flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR); } diff --git a/src/jit/gcinfo.cpp b/src/jit/gcinfo.cpp index cf953ec39116..ced21f59eda6 100644 --- a/src/jit/gcinfo.cpp +++ b/src/jit/gcinfo.cpp @@ -415,20 +415,9 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int* pUntrackedCount, UNALIGNED untrackedCount++; } - else if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE)) + else if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvOnFrame) { - // A struct will have gcSlots only if it is at least TARGET_POINTER_SIZE. - unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE; - BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum); - - // walk each member of the array - for (unsigned i = 0; i < slots; i++) - { - if (gcPtrs[i] != TYPE_GC_NONE) - { // count only gc slots - untrackedCount++; - } - } + untrackedCount += varDsc->GetLayout()->GetGCPtrCount(); } } diff --git a/src/jit/gentree.cpp b/src/jit/gentree.cpp index 91c0dec7c633..854ab8354690 100644 --- a/src/jit/gentree.cpp +++ b/src/jit/gentree.cpp @@ -258,12 +258,10 @@ void GenTree::InitNodeSize() GenTree::s_gtNodeSizes[GT_ARR_INDEX] = TREE_NODE_SZ_LARGE; GenTree::s_gtNodeSizes[GT_ARR_OFFSET] = TREE_NODE_SZ_LARGE; GenTree::s_gtNodeSizes[GT_RET_EXPR] = TREE_NODE_SZ_LARGE; - GenTree::s_gtNodeSizes[GT_OBJ] = TREE_NODE_SZ_LARGE; GenTree::s_gtNodeSizes[GT_FIELD] = TREE_NODE_SZ_LARGE; GenTree::s_gtNodeSizes[GT_CMPXCHG] = TREE_NODE_SZ_LARGE; GenTree::s_gtNodeSizes[GT_QMARK] = TREE_NODE_SZ_LARGE; GenTree::s_gtNodeSizes[GT_LEA] = TREE_NODE_SZ_LARGE; - GenTree::s_gtNodeSizes[GT_STORE_OBJ] = TREE_NODE_SZ_LARGE; GenTree::s_gtNodeSizes[GT_DYN_BLK] = TREE_NODE_SZ_LARGE; GenTree::s_gtNodeSizes[GT_STORE_DYN_BLK] = TREE_NODE_SZ_LARGE; GenTree::s_gtNodeSizes[GT_INTRINSIC] = TREE_NODE_SZ_LARGE; @@ -325,8 +323,9 @@ void GenTree::InitNodeSize() static_assert_no_msg(sizeof(GenTreeIndir) <= TREE_NODE_SZ_SMALL); static_assert_no_msg(sizeof(GenTreeStoreInd) <= TREE_NODE_SZ_SMALL); static_assert_no_msg(sizeof(GenTreeAddrMode) <= TREE_NODE_SZ_SMALL); - static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_LARGE); // *** large node + static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_SMALL); static_assert_no_msg(sizeof(GenTreeBlk) <= TREE_NODE_SZ_SMALL); + static_assert_no_msg(sizeof(GenTreeDynBlk) <= TREE_NODE_SZ_LARGE); // *** large node static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node static_assert_no_msg(sizeof(GenTreeILOffset) <= TREE_NODE_SZ_SMALL); static_assert_no_msg(sizeof(GenTreeStmt) <= TREE_NODE_SZ_LARGE); // *** large node @@ -1260,7 +1259,7 @@ bool GenTree::Compare(GenTree* op1, GenTree* op2, bool swapOK) } break; case GT_OBJ: - if (op1->AsObj()->gtClass != op2->AsObj()->gtClass) + if (op1->AsObj()->GetLayout() != op2->AsObj()->GetLayout()) { return false; } @@ -1963,7 +1962,8 @@ unsigned Compiler::gtHashValue(GenTree* tree) case GT_OBJ: hash = - genTreeHashAdd(hash, static_cast(reinterpret_cast(tree->gtObj.gtClass))); + genTreeHashAdd(hash, + static_cast(reinterpret_cast(tree->AsObj()->GetLayout()))); break; // For the ones below no extra argument matters for comparison. case GT_BOX: @@ -2000,12 +2000,12 @@ unsigned Compiler::gtHashValue(GenTree* tree) case GT_BLK: case GT_STORE_BLK: - hash += tree->gtBlk.gtBlkSize; + hash += tree->AsBlk()->GetLayout()->GetSize(); break; case GT_OBJ: case GT_STORE_OBJ: - hash ^= PtrToUlong(tree->AsObj()->gtClass); + hash ^= PtrToUlong(tree->AsObj()->GetLayout()); break; case GT_DYN_BLK: @@ -6223,24 +6223,15 @@ GenTree* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src) // Return Value: // Returns a node representing the struct value at the given address. // -// Assumptions: -// Any entry and exit conditions, such as required preconditions of -// data structures, memory to be freed by caller, etc. -// // Notes: // It will currently return a GT_OBJ node for any struct type, but may // return a GT_IND or a non-indirection for a scalar type. -// The node will not yet have its GC info initialized. This is because -// we may not need this info if this is an r-value. GenTree* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr) { var_types nodeType = impNormStructType(structHnd); assert(varTypeIsStruct(nodeType)); - unsigned size = info.compCompHnd->getClassSize(structHnd); - // It would be convenient to set the GC info at this time, but we don't actually require - // it unless this is going to be a destination. if (!varTypeIsStruct(nodeType)) { if ((addr->gtOper == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == nodeType)) @@ -6252,7 +6243,8 @@ GenTree* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr) return gtNewOperNode(GT_IND, nodeType, addr); } } - GenTreeBlk* newBlkOrObjNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, structHnd, size); + + GenTreeObj* objNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, typGetObjLayout(structHnd)); // An Obj is not a global reference, if it is known to be a local struct. if ((addr->gtFlags & GTF_GLOB_REF) == 0) @@ -6260,14 +6252,14 @@ GenTree* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr) GenTreeLclVarCommon* lclNode = addr->IsLocalAddrExpr(); if (lclNode != nullptr) { - newBlkOrObjNode->gtFlags |= GTF_IND_NONFAULTING; + objNode->gtFlags |= GTF_IND_NONFAULTING; if (!lvaIsImplicitByRefLocal(lclNode->gtLclNum)) { - newBlkOrObjNode->gtFlags &= ~GTF_GLOB_REF; + objNode->gtFlags &= ~GTF_GLOB_REF; } } } - return newBlkOrObjNode; + return objNode; } //------------------------------------------------------------------------ @@ -6278,30 +6270,13 @@ GenTree* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr) void Compiler::gtSetObjGcInfo(GenTreeObj* objNode) { - CORINFO_CLASS_HANDLE structHnd = objNode->gtClass; - var_types nodeType = objNode->TypeGet(); - unsigned size = objNode->gtBlkSize; - unsigned slots = 0; - unsigned gcPtrCount = 0; - BYTE* gcPtrs = nullptr; - - assert(varTypeIsStruct(nodeType)); - assert(size == info.compCompHnd->getClassSize(structHnd)); - assert(nodeType == impNormStructType(structHnd)); + assert(varTypeIsStruct(objNode->TypeGet())); + assert(objNode->TypeGet() == impNormStructType(objNode->GetLayout()->GetClassHandle())); - if (nodeType == TYP_STRUCT) + if (!objNode->GetLayout()->HasGCPtr()) { - if (size >= TARGET_POINTER_SIZE) - { - // Get the GC fields info - var_types simdBaseType; // Dummy argument - slots = roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; - gcPtrs = new (this, CMK_ASTNode) BYTE[slots]; - nodeType = impNormStructType(structHnd, gcPtrs, &gcPtrCount, &simdBaseType); - } + objNode->SetOper(objNode->OperIs(GT_OBJ) ? GT_BLK : GT_STORE_BLK); } - objNode->SetGCInfo(gcPtrs, gcPtrCount, slots); - assert(objNode->gtType == nodeType); } //------------------------------------------------------------------------ @@ -6366,7 +6341,7 @@ GenTree* Compiler::gtNewBlockVal(GenTree* addr, unsigned size) } } } - return new (this, GT_BLK) GenTreeBlk(GT_BLK, blkType, addr, size); + return new (this, GT_BLK) GenTreeBlk(GT_BLK, blkType, addr, typGetBlkLayout(size)); } // Creates a new assignment node for a CpObj. @@ -6386,7 +6361,7 @@ GenTree* Compiler::gtNewCpObjNode(GenTree* dstAddr, GenTree* srcAddr, CORINFO_CL if (lhs->OperIsBlk()) { - size = lhs->AsBlk()->gtBlkSize; + size = lhs->AsBlk()->GetLayout()->GetSize(); if (lhs->OperGet() == GT_OBJ) { gtSetObjGcInfo(lhs->AsObj()); @@ -6481,33 +6456,6 @@ void Compiler::gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVa assert(dst->TypeGet() != TYP_STRUCT); return; } -#ifdef DEBUG - // If the copy involves GC pointers, the caller must have already set - // the node additional members (gtGcPtrs, gtGcPtrCount, gtSlots) on the dst. - if ((dst->gtOper == GT_OBJ) && dst->AsBlk()->HasGCPtr()) - { - GenTreeObj* objNode = dst->AsObj(); - assert(objNode->gtGcPtrs != nullptr); - assert(!IsUninitialized(objNode->gtGcPtrs)); - assert(!IsUninitialized(objNode->gtGcPtrCount)); - assert(!IsUninitialized(objNode->gtSlots) && objNode->gtSlots > 0); - - for (unsigned i = 0; i < objNode->gtGcPtrCount; ++i) - { - CorInfoGCType t = (CorInfoGCType)objNode->gtGcPtrs[i]; - switch (t) - { - case TYPE_GC_NONE: - case TYPE_GC_REF: - case TYPE_GC_BYREF: - case TYPE_GC_OTHER: - break; - default: - unreached(); - } - } - } -#endif // DEBUG /* In the case of CpBlk, we want to avoid generating * nodes where the source and destination are the same @@ -7190,14 +7138,14 @@ GenTree* Compiler::gtCloneExpr( break; case GT_OBJ: - copy = new (this, GT_OBJ) - GenTreeObj(tree->TypeGet(), tree->gtOp.gtOp1, tree->AsObj()->gtClass, tree->gtBlk.gtBlkSize); - copy->AsObj()->CopyGCInfo(tree->AsObj()); + copy = + new (this, GT_OBJ) GenTreeObj(tree->TypeGet(), tree->AsObj()->Addr(), tree->AsObj()->GetLayout()); copy->gtBlk.gtBlkOpGcUnsafe = tree->gtBlk.gtBlkOpGcUnsafe; break; case GT_BLK: - copy = new (this, GT_BLK) GenTreeBlk(GT_BLK, tree->TypeGet(), tree->gtOp.gtOp1, tree->gtBlk.gtBlkSize); + copy = new (this, GT_BLK) + GenTreeBlk(GT_BLK, tree->TypeGet(), tree->AsBlk()->Addr(), tree->AsBlk()->GetLayout()); copy->gtBlk.gtBlkOpGcUnsafe = tree->gtBlk.gtBlkOpGcUnsafe; break; @@ -9275,7 +9223,7 @@ void Compiler::gtDispNodeName(GenTree* tree) } else if (tree->OperIsBlk() && !tree->OperIsDynBlk()) { - sprintf_s(bufp, sizeof(buf), " %s(%d)", name, tree->AsBlk()->gtBlkSize); + sprintf_s(bufp, sizeof(buf), " %s(%d)", name, tree->AsBlk()->GetLayout()->GetSize()); } else { @@ -14528,12 +14476,8 @@ GenTree* Compiler::gtNewTempAssign( if (dstTyp == TYP_UNDEF) { varDsc->lvType = dstTyp = genActualType(valTyp); - if (varTypeIsGC(dstTyp)) - { - varDsc->lvStructGcCount = 1; - } #if FEATURE_SIMD - else if (varTypeIsSIMD(dstTyp)) + if (varTypeIsSIMD(dstTyp)) { varDsc->lvSIMDType = 1; } @@ -15522,7 +15466,7 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo if (blkNode != nullptr) { GenTree* destAddr = blkNode->Addr(); - unsigned width = blkNode->gtBlkSize; + unsigned width = blkNode->Size(); // Do we care about whether this assigns the entire variable? if (pIsEntire != nullptr && blkNode->OperIs(GT_DYN_BLK)) { @@ -16549,7 +16493,7 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree) structHnd = impGetRefAnyClass(); break; case GT_OBJ: - structHnd = tree->gtObj.gtClass; + structHnd = tree->AsObj()->GetLayout()->GetClassHandle(); break; case GT_CALL: structHnd = tree->gtCall.gtRetClsHnd; diff --git a/src/jit/gentree.h b/src/jit/gentree.h index e52d5d5afd19..ed8b8cdc9870 100644 --- a/src/jit/gentree.h +++ b/src/jit/gentree.h @@ -28,6 +28,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "jithashtable.h" #include "simd.h" #include "namedintrinsiclist.h" +#include "layout.h" // Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type // it's got. This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the @@ -4738,7 +4739,21 @@ struct GenTreeIndir : public GenTreeOp struct GenTreeBlk : public GenTreeIndir { +private: + ClassLayout* m_layout; + public: + ClassLayout* GetLayout() const + { + return m_layout; + } + + void SetLayout(ClassLayout* layout) + { + assert((layout != nullptr) || OperIs(GT_DYN_BLK, GT_STORE_DYN_BLK)); + m_layout = layout; + } + // The data to be stored (null for GT_BLK) GenTree*& Data() { @@ -4752,14 +4767,10 @@ struct GenTreeBlk : public GenTreeIndir // The size of the buffer to be copied. unsigned Size() const { - return gtBlkSize; + assert((m_layout != nullptr) || OperIs(GT_DYN_BLK, GT_STORE_DYN_BLK)); + return (m_layout != nullptr) ? m_layout->GetSize() : 0; } - unsigned gtBlkSize; - - // Return true iff the object being copied contains one or more GC pointers. - bool HasGCPtr(); - // True if this BlkOpNode is a volatile memory operation. bool IsVolatile() const { @@ -4784,20 +4795,22 @@ struct GenTreeBlk : public GenTreeIndir bool gtBlkOpGcUnsafe; - GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, unsigned size) + GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout) : GenTreeIndir(oper, type, addr, nullptr) - , gtBlkSize(size) + , m_layout(layout) , gtBlkOpKind(BlkOpKindInvalid) , gtBlkOpGcUnsafe(false) { assert(OperIsBlk(oper)); + assert((layout != nullptr) || OperIs(GT_DYN_BLK, GT_STORE_DYN_BLK)); gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT); } - GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, unsigned size) - : GenTreeIndir(oper, type, addr, data), gtBlkSize(size), gtBlkOpKind(BlkOpKindInvalid), gtBlkOpGcUnsafe(false) + GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout) + : GenTreeIndir(oper, type, addr, data), m_layout(layout), gtBlkOpKind(BlkOpKindInvalid), gtBlkOpGcUnsafe(false) { assert(OperIsBlk(oper)); + assert((layout != nullptr) || OperIs(GT_DYN_BLK, GT_STORE_DYN_BLK)); gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT); gtFlags |= (data->gtFlags & GTF_ALL_EFFECT); } @@ -4817,68 +4830,6 @@ struct GenTreeBlk : public GenTreeIndir struct GenTreeObj : public GenTreeBlk { - CORINFO_CLASS_HANDLE gtClass; // the class of the object - - // If non-null, this array represents the gc-layout of the class. - // This may be simply copied when cloning this node, because it is not changed once computed. - BYTE* gtGcPtrs; - - // If non-zero, this is the number of slots in the class layout that - // contain gc-pointers. - __declspec(property(get = GetGcPtrCount)) unsigned gtGcPtrCount; - unsigned GetGcPtrCount() const - { - assert(_gtGcPtrCount != UINT32_MAX); - return _gtGcPtrCount; - } - unsigned _gtGcPtrCount; - - // If non-zero, the number of pointer-sized slots that constitutes the class token. - unsigned gtSlots; - - bool IsGCInfoInitialized() - { - return (_gtGcPtrCount != UINT32_MAX); - } - - void SetGCInfo(BYTE* gcPtrs, unsigned gcPtrCount, unsigned slots) - { - gtGcPtrs = gcPtrs; - _gtGcPtrCount = gcPtrCount; - gtSlots = slots; - if (gtGcPtrCount != 0) - { - // We assume that we cannot have a struct with GC pointers that is not a multiple - // of the register size. - // The EE currently does not allow this, but it could change. - // Let's assert it just to be safe. - noway_assert(roundUp(gtBlkSize, REGSIZE_BYTES) == gtBlkSize); - } - else - { - genTreeOps newOper = GT_BLK; - if (gtOper == GT_STORE_OBJ) - { - newOper = GT_STORE_BLK; - } - else - { - assert(gtOper == GT_OBJ); - } - SetOper(newOper); - } - } - - void CopyGCInfo(GenTreeObj* srcObj) - { - if (srcObj->IsGCInfoInitialized()) - { - gtGcPtrs = srcObj->gtGcPtrs; - _gtGcPtrCount = srcObj->gtGcPtrCount; - gtSlots = srcObj->gtSlots; - } - } - void Init() { // By default, an OBJ is assumed to be a global reference, unless it is local. @@ -4887,18 +4838,16 @@ struct GenTreeObj : public GenTreeBlk { gtFlags |= GTF_GLOB_REF; } - noway_assert(gtClass != NO_CLASS_HANDLE); - _gtGcPtrCount = UINT32_MAX; + noway_assert(GetLayout()->GetClassHandle() != NO_CLASS_HANDLE); } - GenTreeObj(var_types type, GenTree* addr, CORINFO_CLASS_HANDLE cls, unsigned size) - : GenTreeBlk(GT_OBJ, type, addr, size), gtClass(cls) + GenTreeObj(var_types type, GenTree* addr, ClassLayout* layout) : GenTreeBlk(GT_OBJ, type, addr, layout) { Init(); } - GenTreeObj(var_types type, GenTree* addr, GenTree* data, CORINFO_CLASS_HANDLE cls, unsigned size) - : GenTreeBlk(GT_STORE_OBJ, type, addr, data, size), gtClass(cls) + GenTreeObj(var_types type, GenTree* addr, GenTree* data, ClassLayout* layout) + : GenTreeBlk(GT_STORE_OBJ, type, addr, data, layout) { Init(); } @@ -4922,7 +4871,7 @@ struct GenTreeDynBlk : public GenTreeBlk bool gtEvalSizeFirst; GenTreeDynBlk(GenTree* addr, GenTree* dynamicSize) - : GenTreeBlk(GT_DYN_BLK, TYP_STRUCT, addr, 0), gtDynamicSize(dynamicSize), gtEvalSizeFirst(false) + : GenTreeBlk(GT_DYN_BLK, TYP_STRUCT, addr, nullptr), gtDynamicSize(dynamicSize), gtEvalSizeFirst(false) { // Conservatively the 'addr' could be null or point into the global heap. gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; @@ -5238,8 +5187,6 @@ struct GenTreePutArgStk : public GenTreeUnOp #ifdef FEATURE_PUT_STRUCT_ARG_STK , gtPutArgStkKind(Kind::Invalid) , gtNumSlots(numSlots) - , gtNumberReferenceSlots(0) - , gtGcPtrs(nullptr) #endif // FEATURE_PUT_STRUCT_ARG_STK #if defined(DEBUG) || defined(UNIX_X86_ABI) , gtCall(callNode) @@ -5301,28 +5248,6 @@ struct GenTreePutArgStk : public GenTreeUnOp return (varTypeIsSIMD(gtOp1) && (gtNumSlots == 3)); } - //------------------------------------------------------------------------ - // setGcPointers: Sets the number of references and the layout of the struct object returned by the VM. - // - // Arguments: - // numPointers - Number of pointer references. - // pointers - layout of the struct (with pointers marked.) - // - // Return Value: - // None - // - // Notes: - // This data is used in the codegen for GT_PUTARG_STK to decide how to copy the struct to the stack by value. - // If no pointer references are used, block copying instructions are used. - // Otherwise the pointer reference slots are copied atomically in a way that gcinfo is emitted. - // Any non pointer references between the pointer reference slots are copied in block fashion. - // - void setGcPointers(unsigned numPointers, BYTE* pointers) - { - gtNumberReferenceSlots = numPointers; - gtGcPtrs = pointers; - } - // Instruction selection: during codegen time, what code sequence we will be using // to encode this operation. // TODO-Throughput: The following information should be obtained from the child @@ -5338,9 +5263,7 @@ struct GenTreePutArgStk : public GenTreeUnOp return (gtPutArgStkKind == Kind::Push) || (gtPutArgStkKind == Kind::PushAllSlots); } - unsigned gtNumSlots; // Number of slots for the argument to be passed on stack - unsigned gtNumberReferenceSlots; // Number of reference slots. - BYTE* gtGcPtrs; // gcPointers + unsigned gtNumSlots; // Number of slots for the argument to be passed on stack #else // !FEATURE_PUT_STRUCT_ARG_STK unsigned getArgSize(); @@ -6670,27 +6593,6 @@ inline var_types& GenTree::CastToType() return this->gtCast.gtCastType; } -//----------------------------------------------------------------------------------- -// HasGCPtr: determine whether this block op involves GC pointers -// -// Arguments: -// None -// -// Return Value: -// Returns true iff the object being copied contains one or more GC pointers. -// -// Notes: -// Of the block nodes, only GT_OBJ and ST_STORE_OBJ are allowed to have GC pointers. -// -inline bool GenTreeBlk::HasGCPtr() -{ - if ((gtOper == GT_OBJ) || (gtOper == GT_STORE_OBJ)) - { - return (AsObj()->gtGcPtrCount != 0); - } - return false; -} - inline bool GenTree::isUsedFromSpillTemp() const { // If spilled and no reg at use, then it is used from the spill temp location rather than being reloaded. diff --git a/src/jit/gschecks.cpp b/src/jit/gschecks.cpp index 92db6d97753a..4a2483abebf3 100644 --- a/src/jit/gschecks.cpp +++ b/src/jit/gschecks.cpp @@ -423,8 +423,11 @@ void Compiler::gsParamsToShadows() lvaTable[shadowVar].lvLclFieldExpr = varDsc->lvLclFieldExpr; lvaTable[shadowVar].lvLiveAcrossUCall = varDsc->lvLiveAcrossUCall; #endif - lvaTable[shadowVar].lvVerTypeInfo = varDsc->lvVerTypeInfo; - lvaTable[shadowVar].lvGcLayout = varDsc->lvGcLayout; + lvaTable[shadowVar].lvVerTypeInfo = varDsc->lvVerTypeInfo; + if (varTypeIsStruct(type)) + { + lvaTable[shadowVar].SetLayout(varDsc->GetLayout()); + } lvaTable[shadowVar].lvIsUnsafeBuffer = varDsc->lvIsUnsafeBuffer; lvaTable[shadowVar].lvIsPtr = varDsc->lvIsPtr; diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp index 467540b6967a..7fff80eef923 100644 --- a/src/jit/importer.cpp +++ b/src/jit/importer.cpp @@ -1323,7 +1323,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, asgType = impNormStructType(structHnd); if (src->gtOper == GT_OBJ) { - assert(src->gtObj.gtClass == structHnd); + assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd); } } else if (src->gtOper == GT_INDEX) @@ -1458,7 +1458,7 @@ GenTree* Compiler::impGetStructAddr(GenTree* structVal, if (oper == GT_OBJ && willDeref) { - assert(structVal->gtObj.gtClass == structHnd); + assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd); return (structVal->gtObj.Addr()); } else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY || @@ -1512,99 +1512,58 @@ GenTree* Compiler::impGetStructAddr(GenTree* structVal, } //------------------------------------------------------------------------ -// impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type, -// and optionally determine the GC layout of the struct. +// impNormStructType: Normalize the type of a (known to be) struct class handle. // // Arguments: // structHnd - The class handle for the struct type of interest. -// gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller, -// into which the gcLayout will be written. -// pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned, -// which will be set to the number of GC fields in the struct. // pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD // type, set to the SIMD base type // // Return Value: // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*). -// The gcLayout will be returned using the pointers provided by the caller, if non-null. // It may also modify the compFloatingPointUsed flag if the type is a SIMD type. // -// Assumptions: -// The caller must set gcLayout to nullptr OR ensure that it is large enough -// (see ICorStaticInfo::getClassGClayout in corinfo.h). -// // Notes: // Normalizing the type involves examining the struct type to determine if it should // be modified to one that is handled specially by the JIT, possibly being a candidate // for full enregistration, e.g. TYP_SIMD16. -var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, - BYTE* gcLayout, - unsigned* pNumGCVars, - var_types* pSimdBaseType) +var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, var_types* pSimdBaseType) { assert(structHnd != NO_CLASS_HANDLE); - const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd); - var_types structType = TYP_STRUCT; - - // On coreclr the check for GC includes a "may" to account for the special - // ByRef like span structs. The added check for "CONTAINS_STACK_PTR" is the particular bit. - // When this is set the struct will contain a ByRef that could be a GC pointer or a native - // pointer. - const bool mayContainGCPtrs = - ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)); + var_types structType = TYP_STRUCT; #ifdef FEATURE_SIMD - // Check to see if this is a SIMD type. - if (supportSIMDTypes() && !mayContainGCPtrs) + if (supportSIMDTypes()) { - unsigned originalSize = info.compCompHnd->getClassSize(structHnd); + const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd); - if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes())) + // Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type. + if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_CONTAINS_STACK_PTR)) == 0) { - unsigned int sizeBytes; - var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes); - if (simdBaseType != TYP_UNKNOWN) + unsigned originalSize = info.compCompHnd->getClassSize(structHnd); + + if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes())) { - assert(sizeBytes == originalSize); - structType = getSIMDTypeForSize(sizeBytes); - if (pSimdBaseType != nullptr) + unsigned int sizeBytes; + var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes); + if (simdBaseType != TYP_UNKNOWN) { - *pSimdBaseType = simdBaseType; + assert(sizeBytes == originalSize); + structType = getSIMDTypeForSize(sizeBytes); + if (pSimdBaseType != nullptr) + { + *pSimdBaseType = simdBaseType; + } + // Also indicate that we use floating point registers. + compFloatingPointUsed = true; } - // Also indicate that we use floating point registers. - compFloatingPointUsed = true; } } } #endif // FEATURE_SIMD - // Fetch GC layout info if requested - if (gcLayout != nullptr) - { - unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout); - - // Verify that the quick test up above via the class attributes gave a - // safe view of the type's GCness. - // - // Note there are cases where mayContainGCPtrs is true but getClassGClayout - // does not report any gc fields. - - assert(mayContainGCPtrs || (numGCVars == 0)); - - if (pNumGCVars != nullptr) - { - *pNumGCVars = numGCVars; - } - } - else - { - // Can't safely ask for number of GC pointers without also - // asking for layout. - assert(pNumGCVars == nullptr); - } - return structType; } @@ -9117,7 +9076,6 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re op = op1->gtOp.gtOp1; goto REDO_RETURN_NODE; } - op->gtObj.gtClass = NO_CLASS_HANDLE; op->ChangeOperUnchecked(GT_IND); op->gtFlags |= GTF_IND_TGTANYWHERE; } @@ -15657,7 +15615,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); - op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size); + op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); } else { @@ -15681,7 +15639,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); - op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size); + op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); } else { diff --git a/src/jit/layout.cpp b/src/jit/layout.cpp new file mode 100644 index 000000000000..f3f9eee1cd08 --- /dev/null +++ b/src/jit/layout.cpp @@ -0,0 +1,418 @@ +#include "jitpch.h" +#include "layout.h" +#include "compiler.h" + +// Keeps track of layout objects associated to class handles or block sizes. A layout is usually +// referenced by a pointer (ClassLayout*) but can also be referenced by a number (unsigned, +// FirstLayoutNum-based), when space constraints or other needs make numbers more appealing. +// Layout objects are immutable and there's always a 1:1 mapping between class handles/block sizes, +// pointers and numbers (e.g. class handle equality implies ClassLayout pointer equality). +class ClassLayoutTable +{ + // Each layout is assigned a number, starting with TYP_UNKNOWN + 1. This way one could use a single + // unsigned value to represent the notion of type - values below TYP_UNKNOWN are var_types and values + // above it are struct layouts. + static constexpr unsigned FirstLayoutNum = TYP_UNKNOWN + 1; + + typedef JitHashTable, unsigned> BlkLayoutIndexMap; + typedef JitHashTable, unsigned> ObjLayoutIndexMap; + + union { + // Up to 3 layouts can be stored "inline" and finding a layout by handle/size can be done using linear search. + // Most methods need no more than 2 layouts. + ClassLayout* m_layoutArray[3]; + // Otherwise a dynamic array is allocated and hashtables are used to map from handle/size to layout array index. + struct + { + ClassLayout** m_layoutLargeArray; + BlkLayoutIndexMap* m_blkLayoutMap; + ObjLayoutIndexMap* m_objLayoutMap; + }; + }; + // The number of layout objects stored in this table. + unsigned m_layoutCount; + // The capacity of m_layoutLargeArray (when more than 3 layouts are stored). + unsigned m_layoutLargeCapacity; + +public: + ClassLayoutTable() : m_layoutCount(0), m_layoutLargeCapacity(0) + { + } + + // Get the layout number (FirstLayoutNum-based) of the specified layout. + unsigned GetLayoutNum(ClassLayout* layout) const + { + return GetLayoutIndex(layout) + FirstLayoutNum; + } + + // Get the layout having the specified layout number (FirstLayoutNum-based) + ClassLayout* GetLayoutByNum(unsigned num) const + { + assert(num >= FirstLayoutNum); + return GetLayoutByIndex(num - FirstLayoutNum); + } + + // Get the layout having the specified size but no class handle. + ClassLayout* GetBlkLayout(Compiler* compiler, unsigned blockSize) + { + return GetLayoutByIndex(GetBlkLayoutIndex(compiler, blockSize)); + } + + // Get the number of a layout having the specified size but no class handle. + unsigned GetBlkLayoutNum(Compiler* compiler, unsigned blockSize) + { + return GetBlkLayoutIndex(compiler, blockSize) + FirstLayoutNum; + } + + // Get the layout for the specified class handle. + ClassLayout* GetObjLayout(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle) + { + return GetLayoutByIndex(GetObjLayoutIndex(compiler, classHandle)); + } + + // Get the number of a layout for the specified class handle. + unsigned GetObjLayoutNum(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle) + { + return GetObjLayoutIndex(compiler, classHandle) + FirstLayoutNum; + } + +private: + bool HasSmallCapacity() const + { + return m_layoutCount <= _countof(m_layoutArray); + } + + ClassLayout* GetLayoutByIndex(unsigned index) const + { + assert(index < m_layoutCount); + + if (HasSmallCapacity()) + { + return m_layoutArray[index]; + } + else + { + return m_layoutLargeArray[index]; + } + } + + unsigned GetLayoutIndex(ClassLayout* layout) const + { + assert(layout != nullptr); + + if (HasSmallCapacity()) + { + for (unsigned i = 0; i < m_layoutCount; i++) + { + if (m_layoutArray[i] == layout) + { + return i; + } + } + } + else + { + unsigned index = 0; + if ((layout->IsBlockLayout() && m_blkLayoutMap->Lookup(layout->GetSize(), &index)) || + m_objLayoutMap->Lookup(layout->GetClassHandle(), &index)) + { + return index; + } + } + + unreached(); + } + + unsigned GetBlkLayoutIndex(Compiler* compiler, unsigned blockSize) + { + if (HasSmallCapacity()) + { + for (unsigned i = 0; i < m_layoutCount; i++) + { + if (m_layoutArray[i]->IsBlockLayout() && (m_layoutArray[i]->GetSize() == blockSize)) + { + return i; + } + } + } + else + { + unsigned index; + if (m_blkLayoutMap->Lookup(blockSize, &index)) + { + return index; + } + } + + return AddBlkLayout(compiler, CreateBlkLayout(compiler, blockSize)); + } + + ClassLayout* CreateBlkLayout(Compiler* compiler, unsigned blockSize) + { + return new (compiler, CMK_ClassLayout) ClassLayout(blockSize); + } + + unsigned AddBlkLayout(Compiler* compiler, ClassLayout* layout) + { + if (m_layoutCount < _countof(m_layoutArray)) + { + m_layoutArray[m_layoutCount] = layout; + return m_layoutCount++; + } + + unsigned index = AddLayoutLarge(compiler, layout); + m_blkLayoutMap->Set(layout->GetSize(), index); + return index; + } + + unsigned GetObjLayoutIndex(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle) + { + assert(classHandle != NO_CLASS_HANDLE); + + if (HasSmallCapacity()) + { + for (unsigned i = 0; i < m_layoutCount; i++) + { + if (m_layoutArray[i]->GetClassHandle() == classHandle) + { + return i; + } + } + } + else + { + unsigned index; + if (m_objLayoutMap->Lookup(classHandle, &index)) + { + return index; + } + } + + return AddObjLayout(compiler, CreateObjLayout(compiler, classHandle)); + } + + ClassLayout* CreateObjLayout(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle) + { + return ClassLayout::Create(compiler, classHandle); + } + + unsigned AddObjLayout(Compiler* compiler, ClassLayout* layout) + { + if (m_layoutCount < _countof(m_layoutArray)) + { + m_layoutArray[m_layoutCount] = layout; + return m_layoutCount++; + } + + unsigned index = AddLayoutLarge(compiler, layout); + m_objLayoutMap->Set(layout->GetClassHandle(), index); + return index; + } + + unsigned AddLayoutLarge(Compiler* compiler, ClassLayout* layout) + { + if (m_layoutCount >= m_layoutLargeCapacity) + { + CompAllocator alloc = compiler->getAllocator(CMK_ClassLayout); + unsigned newCapacity = m_layoutCount * 2; + ClassLayout** newArray = alloc.allocate(newCapacity); + + if (m_layoutCount <= _countof(m_layoutArray)) + { + BlkLayoutIndexMap* blkLayoutMap = new (alloc) BlkLayoutIndexMap(alloc); + ObjLayoutIndexMap* objLayoutMap = new (alloc) ObjLayoutIndexMap(alloc); + + for (unsigned i = 0; i < m_layoutCount; i++) + { + ClassLayout* l = m_layoutArray[i]; + newArray[i] = l; + + if (l->IsBlockLayout()) + { + blkLayoutMap->Set(l->GetSize(), i); + } + else + { + objLayoutMap->Set(l->GetClassHandle(), i); + } + } + + m_blkLayoutMap = blkLayoutMap; + m_objLayoutMap = objLayoutMap; + } + else + { + memcpy(newArray, m_layoutLargeArray, m_layoutCount * sizeof(newArray[0])); + } + + m_layoutLargeArray = newArray; + m_layoutLargeCapacity = newCapacity; + } + + m_layoutLargeArray[m_layoutCount] = layout; + return m_layoutCount++; + } +}; + +ClassLayoutTable* Compiler::typCreateClassLayoutTable() +{ + assert(m_classLayoutTable == nullptr); + + if (compIsForInlining()) + { + m_classLayoutTable = impInlineInfo->InlinerCompiler->m_classLayoutTable; + + if (m_classLayoutTable == nullptr) + { + m_classLayoutTable = new (this, CMK_ClassLayout) ClassLayoutTable(); + + impInlineInfo->InlinerCompiler->m_classLayoutTable = m_classLayoutTable; + } + } + else + { + m_classLayoutTable = new (this, CMK_ClassLayout) ClassLayoutTable(); + } + + return m_classLayoutTable; +} + +ClassLayoutTable* Compiler::typGetClassLayoutTable() +{ + if (m_classLayoutTable == nullptr) + { + return typCreateClassLayoutTable(); + } + + return m_classLayoutTable; +} + +ClassLayout* Compiler::typGetLayoutByNum(unsigned layoutNum) +{ + return typGetClassLayoutTable()->GetLayoutByNum(layoutNum); +} + +unsigned Compiler::typGetLayoutNum(ClassLayout* layout) +{ + return typGetClassLayoutTable()->GetLayoutNum(layout); +} + +unsigned Compiler::typGetBlkLayoutNum(unsigned blockSize) +{ + return typGetClassLayoutTable()->GetBlkLayoutNum(this, blockSize); +} + +ClassLayout* Compiler::typGetBlkLayout(unsigned blockSize) +{ + return typGetClassLayoutTable()->GetBlkLayout(this, blockSize); +} + +unsigned Compiler::typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle) +{ + return typGetClassLayoutTable()->GetObjLayoutNum(this, classHandle); +} + +ClassLayout* Compiler::typGetObjLayout(CORINFO_CLASS_HANDLE classHandle) +{ + return typGetClassLayoutTable()->GetObjLayout(this, classHandle); +} + +ClassLayout* ClassLayout::Create(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle) +{ + bool isValueClass = compiler->info.compCompHnd->isValueClass(classHandle); + unsigned size; + + if (isValueClass) + { + size = compiler->info.compCompHnd->getClassSize(classHandle); + } + else + { + size = compiler->info.compCompHnd->getHeapClassSize(classHandle); + } + + INDEBUG(const char* className = compiler->info.compCompHnd->getClassName(classHandle);) + + ClassLayout* layout = + new (compiler, CMK_ClassLayout) ClassLayout(classHandle, isValueClass, size DEBUGARG(className)); + layout->InitializeGCPtrs(compiler); + return layout; +} + +void ClassLayout::InitializeGCPtrs(Compiler* compiler) +{ + assert(!m_gcPtrsInitialized); + assert(!IsBlockLayout()); + + if (m_size < TARGET_POINTER_SIZE) + { + assert(GetSlotCount() == 1); + assert(m_gcPtrCount == 0); + + m_gcPtrsArray[0] = TYPE_GC_NONE; + } + else + { + BYTE* gcPtrs; + + if (GetSlotCount() > sizeof(m_gcPtrsArray)) + { + gcPtrs = m_gcPtrs = new (compiler, CMK_ClassLayout) BYTE[GetSlotCount()]; + } + else + { + gcPtrs = m_gcPtrsArray; + } + + unsigned gcPtrCount = compiler->info.compCompHnd->getClassGClayout(m_classHandle, gcPtrs); + + assert((gcPtrCount == 0) || ((compiler->info.compCompHnd->getClassAttribs(m_classHandle) & + (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_CONTAINS_STACK_PTR)) != 0)); + + // Since class size is unsigned there's no way we could have more than 2^30 slots + // so it should be safe to fit this into a 30 bits bit field. + assert(gcPtrCount < (1 << 30)); + + // We assume that we cannot have a struct with GC pointers that is not a multiple + // of the register size. + // The EE currently does not allow this, but it could change. + // Let's assert it just to be safe. + noway_assert((gcPtrCount == 0) || (roundUp(m_size, REGSIZE_BYTES) == m_size)); + + m_gcPtrCount = gcPtrCount; + } + + INDEBUG(m_gcPtrsInitialized = true;) +} + +#ifdef _TARGET_AMD64_ +ClassLayout* ClassLayout::GetPPPQuirkLayout(CompAllocator alloc) +{ + assert(m_gcPtrsInitialized); + assert(m_classHandle != NO_CLASS_HANDLE); + assert(m_isValueClass); + assert(m_size == 32); + + if (m_pppQuirkLayout == nullptr) + { + m_pppQuirkLayout = new (alloc) ClassLayout(m_classHandle, m_isValueClass, 64 DEBUGARG(m_className)); + m_pppQuirkLayout->m_gcPtrCount = m_gcPtrCount; + + static_assert_no_msg(_countof(m_gcPtrsArray) == 8); + + for (int i = 0; i < 4; i++) + { + m_pppQuirkLayout->m_gcPtrsArray[i] = m_gcPtrsArray[i]; + } + + for (int i = 4; i < 8; i++) + { + m_pppQuirkLayout->m_gcPtrsArray[i] = TYPE_GC_NONE; + } + + INDEBUG(m_pppQuirkLayout->m_gcPtrsInitialized = true;) + } + + return m_pppQuirkLayout; +} +#endif // _TARGET_AMD64_ diff --git a/src/jit/layout.h b/src/jit/layout.h new file mode 100644 index 000000000000..6eb71e22f881 --- /dev/null +++ b/src/jit/layout.h @@ -0,0 +1,185 @@ +#ifndef LAYOUT_H +#define LAYOUT_H + +#include "jit.h" + +// Encapsulates layout information about a class (typically a value class but this can also be +// be used for reference classes when they are stack allocated). The class handle is optional, +// allowing the creation of "block" layout objects having a specific size but lacking any other +// layout information. The JIT uses such layout objects in cases where a class handle is not +// available (cpblk/initblk operations) or not necessary (classes that do not contain GC pointers). +class ClassLayout +{ + // Class handle or NO_CLASS_HANDLE for "block" layouts. + const CORINFO_CLASS_HANDLE m_classHandle; + + // Size of the layout in bytes (as reported by ICorJitInfo::getClassSize/getHeapClassSize + // for non "block" layouts). For "block" layouts this may be 0 due to 0 being a valid size + // for cpblk/initblk. + const unsigned m_size; + + const unsigned m_isValueClass : 1; + INDEBUG(unsigned m_gcPtrsInitialized : 1;) + // The number of GC pointers in this layout. Since the the maximum size is 2^32-1 the count + // can fit in at most 30 bits. + unsigned m_gcPtrCount : 30; + + // Array of CorInfoGCType (as BYTE) that describes the GC layout of the class. + // For small classes the array is stored inline, avoiding an extra allocation + // and the pointer size overhead. + union { + BYTE* m_gcPtrs; + BYTE m_gcPtrsArray[sizeof(BYTE*)]; + }; + +#ifdef _TARGET_AMD64_ + // A layout that has its size artificially inflated to avoid stack corruption due to + // bugs in user code - see Compiler::compQuirkForPPP for details. + ClassLayout* m_pppQuirkLayout; +#endif + + // Class name as reported by ICorJitInfo::getClassName + INDEBUG(const char* m_className;) + + // ClassLayout instances should only be obtained via ClassLayoutTable. + friend class ClassLayoutTable; + + ClassLayout(unsigned size) + : m_classHandle(NO_CLASS_HANDLE) + , m_size(size) + , m_isValueClass(false) +#ifdef DEBUG + , m_gcPtrsInitialized(true) +#endif + , m_gcPtrCount(0) + , m_gcPtrs(nullptr) +#ifdef _TARGET_AMD64_ + , m_pppQuirkLayout(nullptr) +#endif +#ifdef DEBUG + , m_className("block") +#endif + { + } + + static ClassLayout* Create(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle); + + ClassLayout(CORINFO_CLASS_HANDLE classHandle, bool isValueClass, unsigned size DEBUGARG(const char* className)) + : m_classHandle(classHandle) + , m_size(size) + , m_isValueClass(isValueClass) +#ifdef DEBUG + , m_gcPtrsInitialized(false) +#endif + , m_gcPtrCount(0) + , m_gcPtrs(nullptr) +#ifdef _TARGET_AMD64_ + , m_pppQuirkLayout(nullptr) +#endif +#ifdef DEBUG + , m_className(className) +#endif + { + assert(size != 0); + } + + void InitializeGCPtrs(Compiler* compiler); + +public: +#ifdef _TARGET_AMD64_ + // Get the layout for the PPP quirk - see Compiler::compQuirkForPPP for details. + ClassLayout* GetPPPQuirkLayout(CompAllocator alloc); +#endif + + CORINFO_CLASS_HANDLE GetClassHandle() const + { + return m_classHandle; + } + + bool IsBlockLayout() const + { + return m_classHandle == NO_CLASS_HANDLE; + } + +#ifdef DEBUG + const char* GetClassName() const + { + return m_className; + } +#endif + + bool IsValueClass() const + { + assert(!IsBlockLayout()); + + return m_isValueClass; + } + + unsigned GetSize() const + { + return m_size; + } + + unsigned GetSlotCount() const + { + return roundUp(m_size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; + } + + unsigned GetGCPtrCount() const + { + assert(m_gcPtrsInitialized); + + return m_gcPtrCount; + } + + bool HasGCPtr() const + { + assert(m_gcPtrsInitialized); + + return m_gcPtrCount != 0; + } + + bool IsGCPtr(unsigned slot) const + { + return GetGCPtr(slot) != TYPE_GC_NONE; + } + + var_types GetGCPtrType(unsigned slot) const + { + switch (GetGCPtr(slot)) + { + case TYPE_GC_NONE: + return TYP_I_IMPL; + case TYPE_GC_REF: + return TYP_REF; + case TYPE_GC_BYREF: + return TYP_BYREF; + default: + unreached(); + } + } + +private: + const BYTE* GetGCPtrs() const + { + assert(m_gcPtrsInitialized); + assert(!IsBlockLayout()); + + return (GetSlotCount() > sizeof(m_gcPtrsArray)) ? m_gcPtrs : m_gcPtrsArray; + } + + CorInfoGCType GetGCPtr(unsigned slot) const + { + assert(m_gcPtrsInitialized); + assert(slot < GetSlotCount()); + + if (m_gcPtrCount == 0) + { + return TYPE_GC_NONE; + } + + return static_cast(GetGCPtrs()[slot]); + } +}; + +#endif // LAYOUT_H diff --git a/src/jit/lclvars.cpp b/src/jit/lclvars.cpp index b865e94c4b53..249aa93244de 100644 --- a/src/jit/lclvars.cpp +++ b/src/jit/lclvars.cpp @@ -412,7 +412,7 @@ void Compiler::lvaInitThisPtr(InitVarDscInfo* varDscInfo) if (supportSIMDTypes()) { var_types simdBaseType = TYP_UNKNOWN; - var_types type = impNormStructType(info.compClassHnd, nullptr, nullptr, &simdBaseType); + var_types type = impNormStructType(info.compClassHnd, &simdBaseType); if (simdBaseType != TYP_UNKNOWN) { assert(varTypeIsSIMD(type)); @@ -1292,11 +1292,6 @@ void Compiler::lvaInitVarDsc(LclVarDsc* varDsc, varDsc->lvOverlappingFields = StructHasOverlappingFields(cFlags); } - if (varTypeIsGC(type)) - { - varDsc->lvStructGcCount = 1; - } - #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) varDsc->lvIsImplicitByRef = 0; #endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) @@ -2504,43 +2499,14 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool } if (varDsc->lvExactSize == 0) { - BOOL isValueClass = info.compCompHnd->isValueClass(typeHnd); - - if (isValueClass) - { - varDsc->lvExactSize = info.compCompHnd->getClassSize(typeHnd); - } - else - { - varDsc->lvExactSize = info.compCompHnd->getHeapClassSize(typeHnd); - } - - // Normalize struct types, and fill in GC info for all types - unsigned lvSize = varDsc->lvSize(); - // The struct needs to be a multiple of TARGET_POINTER_SIZE bytes for getClassGClayout() to be valid. - assert((lvSize % TARGET_POINTER_SIZE) == 0); - varDsc->lvGcLayout = getAllocator(CMK_LvaTable).allocate(lvSize / TARGET_POINTER_SIZE); - unsigned numGCVars = 0; - var_types simdBaseType = TYP_UNKNOWN; - if (isValueClass) - { - varDsc->lvType = impNormStructType(typeHnd, varDsc->lvGcLayout, &numGCVars, &simdBaseType); - } - else - { - numGCVars = info.compCompHnd->getClassGClayout(typeHnd, varDsc->lvGcLayout); - } - - // We only save the count of GC vars in a struct up to 7. - if (numGCVars >= 8) - { - numGCVars = 7; - } + ClassLayout* layout = typGetObjLayout(typeHnd); + varDsc->SetLayout(layout); + varDsc->lvExactSize = layout->GetSize(); - varDsc->lvStructGcCount = numGCVars; - - if (isValueClass) + if (layout->IsValueClass()) { + var_types simdBaseType = TYP_UNKNOWN; + varDsc->lvType = impNormStructType(typeHnd, &simdBaseType); #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) // Mark implicit byref struct parameters @@ -2575,7 +2541,7 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool varDsc->SetHfaType(hfaType); // hfa variables can never contain GC pointers - assert(varDsc->lvStructGcCount == 0); + assert(!layout->HasGCPtr()); // The size of this struct should be evenly divisible by 4 or 8 assert((varDsc->lvExactSize % genTypeSize(hfaType)) == 0); // The number of elements in the HFA should fit into our MAX_ARG_REG_COUNT limit @@ -2838,17 +2804,6 @@ void Compiler::lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HAND } } -/***************************************************************************** - * Returns the array of BYTEs containing the GC layout information - */ - -BYTE* Compiler::lvaGetGcLayout(unsigned varNum) -{ - assert(varTypeIsStruct(lvaTable[varNum].lvType) && (lvaTable[varNum].lvExactSize >= TARGET_POINTER_SIZE)); - - return lvaTable[varNum].lvGcLayout; -} - //------------------------------------------------------------------------ // lvaLclSize: returns size of a local variable, in bytes // @@ -3618,25 +3573,8 @@ var_types LclVarDsc::lvaArgType() type = TYP_INT; break; case 8: - switch (*lvGcLayout) - { - case TYPE_GC_NONE: - type = TYP_I_IMPL; - break; - - case TYPE_GC_REF: - type = TYP_REF; - break; - - case TYPE_GC_BYREF: - type = TYP_BYREF; - break; - - default: - unreached(); - } + type = m_layout->GetGCPtrType(0); break; - default: type = TYP_BYREF; break; diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp index 344d592e65cd..19b4ebc07243 100644 --- a/src/jit/lower.cpp +++ b/src/jit/lower.cpp @@ -1055,33 +1055,12 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf if (arg->OperGet() == GT_OBJ) { - BYTE* gcLayout = nullptr; - unsigned numRefs = 0; - GenTreeObj* argObj = arg->AsObj(); - - if (argObj->IsGCInfoInitialized()) - { - gcLayout = argObj->gtGcPtrs; - numRefs = argObj->GetGcPtrCount(); - } - else - { - // Set GC Pointer info - gcLayout = new (comp, CMK_Codegen) BYTE[info->numSlots + info->numRegs]; - numRefs = comp->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout); - argSplit->setGcPointers(numRefs, gcLayout); - } + ClassLayout* layout = arg->AsObj()->GetLayout(); // Set type of registers for (unsigned index = 0; index < info->numRegs; index++) { - var_types regType = comp->getJitGCType(gcLayout[index]); - // Account for the possibility that float fields may be passed in integer registers. - if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(index))) - { - regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG; - } - argSplit->m_regType[index] = regType; + argSplit->m_regType[index] = layout->GetGCPtrType(index); } } else @@ -1190,11 +1169,7 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf } else if (arg->OperIs(GT_OBJ)) { - unsigned numRefs = 0; - BYTE* gcLayout = new (comp, CMK_Codegen) BYTE[info->numSlots]; assert(!varTypeIsSIMD(arg)); - numRefs = comp->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout); - putArg->AsPutArgStk()->setGcPointers(numRefs, gcLayout); #ifdef _TARGET_X86_ // On x86 VM lies about the type of a struct containing a pointer sized diff --git a/src/jit/lowerarmarch.cpp b/src/jit/lowerarmarch.cpp index 36988ebb4397..984be77eddb8 100644 --- a/src/jit/lowerarmarch.cpp +++ b/src/jit/lowerarmarch.cpp @@ -233,7 +233,7 @@ void Lowering::LowerStoreIndir(GenTreeIndir* node) void Lowering::LowerBlockStore(GenTreeBlk* blkNode) { GenTree* dstAddr = blkNode->Addr(); - unsigned size = blkNode->gtBlkSize; + unsigned size = blkNode->Size(); GenTree* source = blkNode->Data(); Compiler* compiler = comp; @@ -244,7 +244,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) if (!isInitBlk) { // CopyObj or CopyBlk - if ((blkNode->OperGet() == GT_STORE_OBJ) && ((blkNode->AsObj()->gtGcPtrCount == 0) || blkNode->gtBlkOpGcUnsafe)) + if (blkNode->OperIs(GT_STORE_OBJ) && (!blkNode->AsObj()->GetLayout()->HasGCPtr() || blkNode->gtBlkOpGcUnsafe)) { blkNode->SetOper(GT_STORE_BLK); } @@ -312,17 +312,16 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) // CopyObj GenTreeObj* objNode = blkNode->AsObj(); - unsigned slots = objNode->gtSlots; + unsigned slots = objNode->GetLayout()->GetSlotCount(); #ifdef DEBUG // CpObj must always have at least one GC-Pointer as a member. - assert(objNode->gtGcPtrCount > 0); + assert(objNode->GetLayout()->HasGCPtr()); assert(dstAddr->gtType == TYP_BYREF || dstAddr->gtType == TYP_I_IMPL); - CORINFO_CLASS_HANDLE clsHnd = objNode->gtClass; - size_t classSize = compiler->info.compCompHnd->getClassSize(clsHnd); - size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE); + size_t classSize = objNode->GetLayout()->GetSize(); + size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE); // Currently, the EE always round up a class data structure so // we are not handling the case where we have a non multiple of pointer sized @@ -331,7 +330,6 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) // handle this case. assert(classSize == blkSize); assert((blkSize / TARGET_POINTER_SIZE) == slots); - assert(objNode->HasGCPtr()); #endif blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll; diff --git a/src/jit/lowerxarch.cpp b/src/jit/lowerxarch.cpp index 613f6cb9f8b7..df5f5833c5d2 100644 --- a/src/jit/lowerxarch.cpp +++ b/src/jit/lowerxarch.cpp @@ -146,7 +146,7 @@ void Lowering::LowerStoreIndir(GenTreeIndir* node) void Lowering::LowerBlockStore(GenTreeBlk* blkNode) { GenTree* dstAddr = blkNode->Addr(); - unsigned size = blkNode->gtBlkSize; + unsigned size = blkNode->Size(); GenTree* source = blkNode->Data(); GenTree* srcAddrOrFill = nullptr; bool isInitBlk = blkNode->OperIsInitBlkOp(); @@ -154,7 +154,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) if (!isInitBlk) { // CopyObj or CopyBlk - if ((blkNode->OperGet() == GT_STORE_OBJ) && ((blkNode->AsObj()->gtGcPtrCount == 0) || blkNode->gtBlkOpGcUnsafe)) + if (blkNode->OperIs(GT_STORE_OBJ) && (!blkNode->AsObj()->GetLayout()->HasGCPtr() || blkNode->gtBlkOpGcUnsafe)) { blkNode->SetOper(GT_STORE_BLK); } @@ -247,15 +247,15 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) GenTreeObj* cpObjNode = blkNode->AsObj(); - unsigned slots = cpObjNode->gtSlots; + unsigned slots = cpObjNode->GetLayout()->GetSlotCount(); #ifdef DEBUG // CpObj must always have at least one GC-Pointer as a member. - assert(cpObjNode->gtGcPtrCount > 0); + assert(cpObjNode->GetLayout()->HasGCPtr()); assert(dstAddr->gtType == TYP_BYREF || dstAddr->gtType == TYP_I_IMPL); - CORINFO_CLASS_HANDLE clsHnd = cpObjNode->gtClass; + CORINFO_CLASS_HANDLE clsHnd = cpObjNode->GetLayout()->GetClassHandle(); size_t classSize = comp->info.compCompHnd->getClassSize(clsHnd); size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE); @@ -266,7 +266,6 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) // handle this case. assert(classSize == blkSize); assert((blkSize / TARGET_POINTER_SIZE) == slots); - assert(cpObjNode->HasGCPtr()); #endif bool IsRepMovsProfitable = false; @@ -279,20 +278,20 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) // Let's inspect the struct/class layout and determine if it's profitable // to use rep movsq for copying non-gc memory instead of using single movsq // instructions for each memory slot. - unsigned i = 0; - BYTE* gcPtrs = cpObjNode->gtGcPtrs; + unsigned i = 0; + ClassLayout* layout = cpObjNode->GetLayout(); do { unsigned nonGCSlots = 0; // Measure a contiguous non-gc area inside the struct and note the maximum. - while (i < slots && gcPtrs[i] == TYPE_GC_NONE) + while ((i < slots) && !layout->IsGCPtr(i)) { nonGCSlots++; i++; } - while (i < slots && gcPtrs[i] != TYPE_GC_NONE) + while ((i < slots) && layout->IsGCPtr(i)) { i++; } @@ -412,8 +411,7 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) #ifdef _TARGET_X86_ if (putArgStk->gtOp1->gtOper == GT_FIELD_LIST) { - putArgStk->gtNumberReferenceSlots = 0; - putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Invalid; + putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Invalid; GenTreeFieldList* fieldList = putArgStk->gtOp1->AsFieldList(); @@ -500,11 +498,6 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) allFieldsAreSlots = false; } - if (varTypeIsGC(fieldType)) - { - putArgStk->gtNumberReferenceSlots++; - } - // For x86 we must mark all integral fields as contained or reg-optional, and handle them // accordingly in code generation, since we may have up to 8 fields, which cannot all be in // registers to be consumed atomically by the call. @@ -606,6 +599,8 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) assert(varTypeIsSIMD(putArgStk)); } + ClassLayout* layout = src->AsObj()->GetLayout(); + // In case of a CpBlk we could use a helper call. In case of putarg_stk we // can't do that since the helper call could kill some already set up outgoing args. // TODO-Amd64-Unix: converge the code for putarg_stk with cpyblk/cpyobj. @@ -623,7 +618,7 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) // If we have a buffer between XMM_REGSIZE_BYTES and CPBLK_UNROLL_LIMIT bytes, we'll use SSE2. // Structs and buffer with sizes <= CPBLK_UNROLL_LIMIT bytes are occurring in more than 95% of // our framework assemblies, so this is the main code generation scheme we'll use. - if (size <= CPBLK_UNROLL_LIMIT && putArgStk->gtNumberReferenceSlots == 0) + if (size <= CPBLK_UNROLL_LIMIT && !layout->HasGCPtr()) { #ifdef _TARGET_X86_ if (size < XMM_REGSIZE_BYTES) @@ -637,7 +632,7 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) } } #ifdef _TARGET_X86_ - else if (putArgStk->gtNumberReferenceSlots != 0) + else if (layout->HasGCPtr()) { // On x86, we must use `push` to store GC references to the stack in order for the emitter to properly update // the function's GC info. These `putargstk` nodes will generate a sequence of `push` instructions. diff --git a/src/jit/lsraarmarch.cpp b/src/jit/lsraarmarch.cpp index e51c73f7b068..eec4e2626e46 100644 --- a/src/jit/lsraarmarch.cpp +++ b/src/jit/lsraarmarch.cpp @@ -568,7 +568,7 @@ int LinearScan::BuildPutArgSplit(GenTreePutArgSplit* argNode) int LinearScan::BuildBlockStore(GenTreeBlk* blkNode) { GenTree* dstAddr = blkNode->Addr(); - unsigned size = blkNode->gtBlkSize; + unsigned size = blkNode->Size(); GenTree* source = blkNode->Data(); int srcCount = 0; diff --git a/src/jit/lsrabuild.cpp b/src/jit/lsrabuild.cpp index 222579ef1a1b..724171b8ebc6 100644 --- a/src/jit/lsrabuild.cpp +++ b/src/jit/lsrabuild.cpp @@ -906,7 +906,7 @@ regMaskTP LinearScan::getKillSetForBlockStore(GenTreeBlk* blkNode) if ((blkNode->OperGet() == GT_STORE_OBJ) && blkNode->OperIsCopyBlkOp()) { - assert(blkNode->AsObj()->gtGcPtrCount != 0); + assert(blkNode->AsObj()->GetLayout()->HasGCPtr()); killMask = compiler->compHelperCallKillSet(CORINFO_HELP_ASSIGN_BYREF); } else @@ -3236,7 +3236,7 @@ int LinearScan::BuildPutArgReg(GenTreeUnOp* node) { GenTreeObj* obj = op1->AsObj(); GenTree* addr = obj->Addr(); - unsigned size = obj->gtBlkSize; + unsigned size = obj->GetLayout()->GetSize(); assert(size <= MAX_PASS_SINGLEREG_BYTES); if (addr->OperIsLocalAddr()) { diff --git a/src/jit/lsraxarch.cpp b/src/jit/lsraxarch.cpp index 33c077ee932c..a81c29fc29e0 100644 --- a/src/jit/lsraxarch.cpp +++ b/src/jit/lsraxarch.cpp @@ -1269,7 +1269,7 @@ int LinearScan::BuildCall(GenTreeCall* call) int LinearScan::BuildBlockStore(GenTreeBlk* blkNode) { GenTree* dstAddr = blkNode->Addr(); - unsigned size = blkNode->gtBlkSize; + unsigned size = blkNode->Size(); GenTree* source = blkNode->Data(); int srcCount = 0; @@ -1524,10 +1524,6 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* putArgStk) } #endif // _TARGET_X86_ - if (varTypeIsGC(fieldType)) - { - putArgStk->gtNumberReferenceSlots++; - } prevOffset = fieldOffset; } @@ -1565,8 +1561,7 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* putArgStk) return BuildSimple(putArgStk); } - GenTree* dst = putArgStk; - GenTree* srcAddr = nullptr; + ClassLayout* layout = src->AsObj()->GetLayout(); // If we have a buffer between XMM_REGSIZE_BYTES and CPBLK_UNROLL_LIMIT bytes, we'll use SSE2. // Structs and buffer with sizes <= CPBLK_UNROLL_LIMIT bytes are occurring in more than 95% of @@ -1582,7 +1577,7 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* putArgStk) // x86 specific note: if the size is odd, the last copy operation would be of size 1 byte. // But on x86 only RBM_BYTE_REGS could be used as byte registers. Therefore, exclude // RBM_NON_BYTE_REGS from internal candidates. - if ((putArgStk->gtNumberReferenceSlots == 0) && (size & (XMM_REGSIZE_BYTES - 1)) != 0) + if (!layout->HasGCPtr() && (size & (XMM_REGSIZE_BYTES - 1)) != 0) { regMaskTP regMask = allRegs(TYP_INT); diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp index 2deb038bd2a9..64ab82331e99 100644 --- a/src/jit/morph.cpp +++ b/src/jit/morph.cpp @@ -1628,9 +1628,8 @@ void fgArgInfo::ArgsComplete() // else if (argx->OperGet() == GT_OBJ) { - GenTreeObj* argObj = argx->AsObj(); - CORINFO_CLASS_HANDLE objClass = argObj->gtClass; - unsigned structSize = compiler->info.compCompHnd->getClassSize(objClass); + GenTreeObj* argObj = argx->AsObj(); + unsigned structSize = argObj->GetLayout()->GetSize(); switch (structSize) { case 3: @@ -3193,8 +3192,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) switch (actualArg->OperGet()) { case GT_OBJ: - // Get the size off the OBJ node. - structSize = actualArg->AsObj()->gtBlkSize; + structSize = actualArg->AsObj()->GetLayout()->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); break; case GT_LCL_VAR: @@ -3838,7 +3836,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) if (argObj->OperIs(GT_OBJ)) { // Get the size off the OBJ node. - originalSize = argObj->AsObj()->gtBlkSize; + originalSize = argObj->AsObj()->GetLayout()->GetSize(); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } else @@ -4403,7 +4401,7 @@ void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call) unsigned structSize; if (argx->OperIs(GT_OBJ)) { - structSize = argx->AsObj()->gtBlkSize; + structSize = argx->AsObj()->GetLayout()->GetSize(); } else { @@ -4545,7 +4543,7 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry else if (arg->OperGet() == GT_OBJ) { GenTreeObj* argObj = arg->AsObj(); - structSize = argObj->Size(); + structSize = argObj->GetLayout()->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); // If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR. @@ -4730,14 +4728,14 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry for (unsigned inx = 0; inx < elemCount; inx++) { - CorInfoGCType currentGcLayoutType = (CorInfoGCType)varDsc->lvGcLayout[inx]; + var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx); // We setup the type[inx] value above using the GC info from 'objClass' // This GT_LCL_VAR must have the same GC layout info // - if (currentGcLayoutType != TYPE_GC_NONE) + if (varTypeIsGC(currentGcLayoutType)) { - noway_assert(type[inx] == getJitGCType((BYTE)currentGcLayoutType)); + noway_assert(type[inx] == currentGcLayoutType); } else { @@ -4891,21 +4889,21 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry // The allocated size of our LocalVar must be at least as big as lastOffset assert(varDsc->lvSize() >= lastOffset); - if (varDsc->lvStructGcCount > 0) + if (varDsc->HasGCPtr()) { // alignment of the baseOffset is required noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0); #ifndef UNIX_AMD64_ABI noway_assert(elemSize == TARGET_POINTER_SIZE); #endif - unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE; - const BYTE* gcPtrs = varDsc->lvGcLayout; // Get the GC layout for the local variable + unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE; + ClassLayout* layout = varDsc->GetLayout(); for (unsigned inx = 0; (inx < elemCount); inx++) { // The GC information must match what we setup using 'objClass' - if ((gcPtrs[baseIndex + inx] != TYPE_GC_NONE) || varTypeGCtype(type[inx])) + if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx])) { - noway_assert(type[inx] == getJitGCType(gcPtrs[baseIndex + inx])); + noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx)); } } } @@ -8307,7 +8305,7 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa { var_types lclType = varDsc->TypeGet(); bool isUserLocal = (varNum < info.compLocalsCount); - bool structWithGCFields = ((lclType == TYP_STRUCT) && (varDsc->lvStructGcCount > 0)); + bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr()); if (isUserLocal || structWithGCFields) { GenTree* lcl = gtNewLclvNode(varNum, lclType); @@ -8973,7 +8971,7 @@ GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree) } if (lhsBlk->OperGet() == GT_OBJ) { - clsHnd = lhsBlk->AsObj()->gtClass; + clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle(); } } else @@ -9814,7 +9812,7 @@ GenTree* Compiler::fgMorphBlkNode(GenTree* tree, bool isDest) } else { - tree = new (this, GT_BLK) GenTreeBlk(GT_BLK, structType, addr, genTypeSize(structType)); + tree = new (this, GT_BLK) GenTreeBlk(GT_BLK, structType, addr, typGetBlkLayout(genTypeSize(structType))); } gtUpdateNodeSideEffects(tree); @@ -9839,7 +9837,7 @@ GenTree* Compiler::fgMorphBlkNode(GenTree* tree, bool isDest) { blkNode->AsDynBlk()->gtDynamicSize = nullptr; blkNode->ChangeOper(GT_BLK); - blkNode->gtBlkSize = size; + blkNode->SetLayout(typGetBlkLayout(size)); } else { @@ -9971,7 +9969,7 @@ GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigne CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal); if (clsHnd == NO_CLASS_HANDLE) { - newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, blockWidth); + newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth)); } else { @@ -10012,8 +10010,8 @@ GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigne void Compiler::fgMorphUnsafeBlk(GenTreeObj* dest) { #if defined(CPBLK_UNROLL_LIMIT) && !defined(JIT32_GCENCODER) - assert(dest->gtGcPtrCount != 0); - unsigned blockWidth = dest->AsBlk()->gtBlkSize; + assert(dest->GetLayout()->HasGCPtr()); + unsigned blockWidth = dest->GetLayout()->GetSize(); #ifdef DEBUG bool destOnStack = false; GenTree* destAddr = dest->Addr(); @@ -10135,7 +10133,7 @@ GenTree* Compiler::fgMorphCopyBlock(GenTree* tree) { blockWidth = genTypeSize(destLclVar->lvType); } - hasGCPtrs = destLclVar->lvStructGcCount != 0; + hasGCPtrs = destLclVar->HasGCPtr(); } else { @@ -10164,7 +10162,7 @@ GenTree* Compiler::fgMorphCopyBlock(GenTree* tree) assert(effectiveDest->OperIsBlk()); GenTreeBlk* blk = effectiveDest->AsBlk(); - blockWidth = blk->gtBlkSize; + blockWidth = blk->Size(); blockWidthIsConst = (blk->gtOper != GT_DYN_BLK); if ((dest == effectiveDest) && ((dest->gtFlags & GTF_IND_ARR_INDEX) == 0)) { @@ -18554,7 +18552,7 @@ class LocalAddressVisitor final : public GenTreeVisitor m_compiler->info.compCompHnd->getFieldClass(indir->AsField()->gtFldHnd)); case GT_BLK: case GT_OBJ: - return indir->AsBlk()->gtBlkSize; + return indir->AsBlk()->GetLayout()->GetSize(); default: assert(indir->OperIs(GT_IND, GT_DYN_BLK)); return 0; diff --git a/src/jit/rationalize.cpp b/src/jit/rationalize.cpp index 6670f7a8ae7a..288cddab9b8d 100644 --- a/src/jit/rationalize.cpp +++ b/src/jit/rationalize.cpp @@ -373,13 +373,10 @@ void Rationalizer::RewriteAssignment(LIR::Use& use) GenTreeBlk* storeBlk = nullptr; unsigned int size = varDsc->lvExactSize; - if (varDsc->lvStructGcCount != 0) + if (varDsc->HasGCPtr()) { CORINFO_CLASS_HANDLE structHnd = varDsc->lvVerTypeInfo.GetClassHandle(); GenTreeObj* objNode = comp->gtNewObjNode(structHnd, location)->AsObj(); - unsigned int slots = roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; - - objNode->SetGCInfo(varDsc->lvGcLayout, varDsc->lvStructGcCount, slots); objNode->ChangeOper(GT_STORE_OBJ); objNode->SetData(value); comp->fgMorphUnsafeBlk(objNode); @@ -387,7 +384,8 @@ void Rationalizer::RewriteAssignment(LIR::Use& use) } else { - storeBlk = new (comp, GT_STORE_BLK) GenTreeBlk(GT_STORE_BLK, TYP_STRUCT, location, value, size); + storeBlk = new (comp, GT_STORE_BLK) + GenTreeBlk(GT_STORE_BLK, TYP_STRUCT, location, value, comp->typGetBlkLayout(size)); } storeBlk->gtFlags |= GTF_ASG; storeBlk->gtFlags |= ((location->gtFlags | value->gtFlags) & GTF_ALL_EFFECT); diff --git a/src/jit/simd.cpp b/src/jit/simd.cpp index 81494b02361c..9bbb2fc9d463 100644 --- a/src/jit/simd.cpp +++ b/src/jit/simd.cpp @@ -1065,10 +1065,10 @@ GenTree* Compiler::impSIMDPopStack(var_types type, bool expectAddr, CORINFO_CLAS // if (tree->OperGet() == GT_OBJ) { - if ((structHandle != NO_CLASS_HANDLE) && (tree->AsObj()->gtClass != structHandle)) + if ((structHandle != NO_CLASS_HANDLE) && (tree->AsObj()->GetLayout()->GetClassHandle() != structHandle)) { // In this case we need to retain the GT_OBJ to retype the value. - tree->AsObj()->gtClass = structHandle; + tree->AsObj()->SetLayout(typGetObjLayout(structHandle)); } else { @@ -3128,17 +3128,19 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, // Widen the lower half and assign it to dstAddrLo. simdTree = gtNewSIMDNode(simdType, op1, nullptr, SIMDIntrinsicWidenLo, baseType, size); - GenTree* loDest = - new (this, GT_BLK) GenTreeBlk(GT_BLK, simdType, dstAddrLo, getSIMDTypeSizeInBytes(clsHnd)); + // TODO-1stClassStructs: With the introduction of ClassLayout it would be preferrable to use + // GT_OBJ instead of GT_BLK nodes to avoid losing information about the actual vector type. + GenTree* loDest = new (this, GT_BLK) + GenTreeBlk(GT_BLK, simdType, dstAddrLo, typGetBlkLayout(getSIMDTypeSizeInBytes(clsHnd))); GenTree* loAsg = gtNewBlkOpNode(loDest, simdTree, getSIMDTypeSizeInBytes(clsHnd), false, // not volatile true); // copyBlock loAsg->gtFlags |= ((simdTree->gtFlags | dstAddrLo->gtFlags) & GTF_ALL_EFFECT); // Widen the upper half and assign it to dstAddrHi. - simdTree = gtNewSIMDNode(simdType, dupOp1, nullptr, SIMDIntrinsicWidenHi, baseType, size); - GenTree* hiDest = - new (this, GT_BLK) GenTreeBlk(GT_BLK, simdType, dstAddrHi, getSIMDTypeSizeInBytes(clsHnd)); + simdTree = gtNewSIMDNode(simdType, dupOp1, nullptr, SIMDIntrinsicWidenHi, baseType, size); + GenTree* hiDest = new (this, GT_BLK) + GenTreeBlk(GT_BLK, simdType, dstAddrHi, typGetBlkLayout(getSIMDTypeSizeInBytes(clsHnd))); GenTree* hiAsg = gtNewBlkOpNode(hiDest, simdTree, getSIMDTypeSizeInBytes(clsHnd), false, // not volatile true); // copyBlock @@ -3176,7 +3178,8 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, // block ops. if (doCopyBlk) { - GenTree* dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, simdType, copyBlkDst, getSIMDTypeSizeInBytes(clsHnd)); + GenTree* dest = new (this, GT_BLK) + GenTreeBlk(GT_BLK, simdType, copyBlkDst, typGetBlkLayout(getSIMDTypeSizeInBytes(clsHnd))); dest->gtFlags |= GTF_GLOB_REF; retVal = gtNewBlkOpNode(dest, simdTree, getSIMDTypeSizeInBytes(clsHnd), false, // not volatile