diff --git a/src/coreclr/jit/layout.cpp b/src/coreclr/jit/layout.cpp index 2344c301bc9e69..113414ddfd7f7a 100644 --- a/src/coreclr/jit/layout.cpp +++ b/src/coreclr/jit/layout.cpp @@ -416,6 +416,25 @@ void ClassLayout::InitializeGCPtrs(Compiler* compiler) INDEBUG(m_gcPtrsInitialized = true;) } +//------------------------------------------------------------------------ +// HasGCByRef: does the layout contain at least one GC ByRef +// +// Return value: +// true if at least one GC ByRef, false otherwise. +bool ClassLayout::HasGCByRef() const +{ + unsigned slots = GetSlotCount(); + for (unsigned i = 0; i < slots; i++) + { + if (IsGCByRef(i)) + { + return true; + } + } + + return false; +} + //------------------------------------------------------------------------ // AreCompatible: check if 2 layouts are the same for copying. // diff --git a/src/coreclr/jit/layout.h b/src/coreclr/jit/layout.h index 732b46fdb89570..0e9d6ed65d03d3 100644 --- a/src/coreclr/jit/layout.h +++ b/src/coreclr/jit/layout.h @@ -184,11 +184,23 @@ class ClassLayout return m_gcPtrCount != 0; } + bool HasGCByRef() const; + bool IsGCPtr(unsigned slot) const { return GetGCPtr(slot) != TYPE_GC_NONE; } + bool IsGCRef(unsigned slot) const + { + return GetGCPtr(slot) == TYPE_GC_REF; + } + + bool IsGCByRef(unsigned slot) const + { + return GetGCPtr(slot) == TYPE_GC_BYREF; + } + var_types GetGCPtrType(unsigned slot) const { switch (GetGCPtr(slot)) diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp index 64826c428b77fa..50261d341757be 100644 --- a/src/coreclr/jit/lowerarmarch.cpp +++ b/src/coreclr/jit/lowerarmarch.cpp @@ -552,8 +552,6 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) GenTree* src = blkNode->Data(); unsigned size = blkNode->Size(); - const bool isDstAddrLocal = dstAddr->OperIs(GT_LCL_ADDR); - if (blkNode->OperIsInitBlkOp()) { if (src->OperIs(GT_INIT_VAL)) @@ -617,12 +615,22 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) comp->lvaSetVarDoNotEnregister(srcLclNum DEBUGARG(DoNotEnregisterReason::BlockOp)); } - bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && blkNode->GetLayout()->HasGCPtr(); - unsigned copyBlockUnrollLimit = comp->getUnrollThreshold(Compiler::UnrollKind::Memcpy); - if (doCpObj && isDstAddrLocal && (size <= copyBlockUnrollLimit)) + ClassLayout* layout = blkNode->GetLayout(); + bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && layout->HasGCPtr(); + unsigned copyBlockUnrollLimit = comp->getUnrollThreshold(Compiler::UnrollKind::Memcpy); + + if (doCpObj && (size <= copyBlockUnrollLimit)) { - doCpObj = false; - blkNode->gtBlkOpGcUnsafe = true; + // No write barriers are needed on the stack. + // If the layout contains a byref, then we know it must live on the stack. + if (dstAddr->OperIs(GT_LCL_ADDR) || layout->HasGCByRef()) + { + // If the size is small enough to unroll then we need to mark the block as non-interruptible + // to actually allow unrolling. The generated code does not report GC references loaded in the + // temporary register(s) used for copying. + doCpObj = false; + blkNode->gtBlkOpGcUnsafe = true; + } } if (doCpObj) diff --git a/src/coreclr/jit/lowerloongarch64.cpp b/src/coreclr/jit/lowerloongarch64.cpp index 0173f5f42fa42e..d89c8723e80f66 100644 --- a/src/coreclr/jit/lowerloongarch64.cpp +++ b/src/coreclr/jit/lowerloongarch64.cpp @@ -343,15 +343,22 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) comp->lvaSetVarDoNotEnregister(srcLclNum DEBUGARG(DoNotEnregisterReason::BlockOp)); } - bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && blkNode->GetLayout()->HasGCPtr(); - unsigned copyBlockUnrollLimit = comp->getUnrollThreshold(Compiler::UnrollKind::Memcpy); - if (doCpObj && dstAddr->OperIs(GT_LCL_ADDR) && (size <= copyBlockUnrollLimit)) + ClassLayout* layout = blkNode->GetLayout(); + bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && layout->HasGCPtr(); + unsigned copyBlockUnrollLimit = comp->getUnrollThreshold(Compiler::UnrollKind::Memcpy); + + if (doCpObj && (size <= copyBlockUnrollLimit)) { - // If the size is small enough to unroll then we need to mark the block as non-interruptible - // to actually allow unrolling. The generated code does not report GC references loaded in the - // temporary register(s) used for copying. - doCpObj = false; - blkNode->gtBlkOpGcUnsafe = true; + // No write barriers are needed on the stack. + // If the layout contains a byref, then we know it must live on the stack. + if (dstAddr->OperIs(GT_LCL_ADDR) || layout->HasGCByRef()) + { + // If the size is small enough to unroll then we need to mark the block as non-interruptible + // to actually allow unrolling. The generated code does not report GC references loaded in the + // temporary register(s) used for copying. + doCpObj = false; + blkNode->gtBlkOpGcUnsafe = true; + } } // CopyObj or CopyBlk diff --git a/src/coreclr/jit/lowerriscv64.cpp b/src/coreclr/jit/lowerriscv64.cpp index 8bd0a8003fe34f..576325ae5ef702 100644 --- a/src/coreclr/jit/lowerriscv64.cpp +++ b/src/coreclr/jit/lowerriscv64.cpp @@ -297,14 +297,21 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) comp->lvaSetVarDoNotEnregister(srcLclNum DEBUGARG(DoNotEnregisterReason::BlockOp)); } - bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && blkNode->GetLayout()->HasGCPtr(); - if (doCpObj && dstAddr->OperIs(GT_LCL_ADDR) && (size <= CPBLK_UNROLL_LIMIT)) + ClassLayout* layout = blkNode->GetLayout(); + bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && layout->HasGCPtr(); + + if (doCpObj && (size <= CPBLK_UNROLL_LIMIT)) { - // If the size is small enough to unroll then we need to mark the block as non-interruptible - // to actually allow unrolling. The generated code does not report GC references loaded in the - // temporary register(s) used for copying. - doCpObj = false; - blkNode->gtBlkOpGcUnsafe = true; + // No write barriers are needed on the stack. + // If the layout contains a byref, then we know it must live on the stack. + if (dstAddr->OperIs(GT_LCL_ADDR) || layout->HasGCByRef()) + { + // If the size is small enough to unroll then we need to mark the block as non-interruptible + // to actually allow unrolling. The generated code does not report GC references loaded in the + // temporary register(s) used for copying. + doCpObj = false; + blkNode->gtBlkOpGcUnsafe = true; + } } // CopyObj or CopyBlk diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp index 4b516a5b0ce36a..7ba7df806921f0 100644 --- a/src/coreclr/jit/lowerxarch.cpp +++ b/src/coreclr/jit/lowerxarch.cpp @@ -394,18 +394,23 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) comp->lvaSetVarDoNotEnregister(srcLclNum DEBUGARG(DoNotEnregisterReason::StoreBlkSrc)); } - ClassLayout* layout = blkNode->GetLayout(); - bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && layout->HasGCPtr(); + ClassLayout* layout = blkNode->GetLayout(); + bool doCpObj = !blkNode->OperIs(GT_STORE_DYN_BLK) && layout->HasGCPtr(); + unsigned copyBlockUnrollLimit = comp->getUnrollThreshold(Compiler::UnrollKind::Memcpy, false); #ifndef JIT32_GCENCODER - if (doCpObj && dstAddr->OperIs(GT_LCL_ADDR) && - (size <= comp->getUnrollThreshold(Compiler::UnrollKind::Memcpy, false))) - { - // If the size is small enough to unroll then we need to mark the block as non-interruptible - // to actually allow unrolling. The generated code does not report GC references loaded in the - // temporary register(s) used for copying. This is not supported for the JIT32_GCENCODER. - doCpObj = false; - blkNode->gtBlkOpGcUnsafe = true; + if (doCpObj && (size <= copyBlockUnrollLimit)) + { + // No write barriers are needed on the stack. + // If the layout contains a byref, then we know it must live on the stack. + if (dstAddr->OperIs(GT_LCL_ADDR) || layout->HasGCByRef()) + { + // If the size is small enough to unroll then we need to mark the block as non-interruptible + // to actually allow unrolling. The generated code does not report GC references loaded in the + // temporary register(s) used for copying. This is not supported for the JIT32_GCENCODER. + doCpObj = false; + blkNode->gtBlkOpGcUnsafe = true; + } } #endif diff --git a/src/tests/JIT/opt/Misc/Runtime_80086/Runtime_80086.cs b/src/tests/JIT/opt/Misc/Runtime_80086/Runtime_80086.cs new file mode 100644 index 00000000000000..754c1014869075 --- /dev/null +++ b/src/tests/JIT/opt/Misc/Runtime_80086/Runtime_80086.cs @@ -0,0 +1,70 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using Xunit; + +#pragma warning disable CS8500 + +namespace Runtime_80086 +{ + public static unsafe class Test + { + [MethodImpl(MethodImplOptions.NoInlining)] + public static Span Marshal1(Span a) + { + return MemoryMarshal.CreateSpan(ref MemoryMarshal.GetReference(a), a.Length); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + public static Span Marshal2(Span* a) + { + return MemoryMarshal.CreateSpan(ref MemoryMarshal.GetReference(*a), a->Length); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Span Copy1(Span s) => s; + + [MethodImpl(MethodImplOptions.NoInlining)] + static Span Copy2(Span a) + { + ref Span ra = ref a; + return ra; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Span Copy3(Span* a) + { + return *a; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Span Copy4(scoped ref Span a) + { + return a; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Span Copy5(ReadOnlySpan a) + { + // Example is used to check code generation but shouldn't be used elsewhere + return *(Span*)&a; + } + + [Fact] + public static void TestEntryPoint() + { + Span s = new int[1] { 13 }; + s = Marshal1(s); + s = Marshal2(&s); + s = Copy1(s); + s = Copy2(s); + s = Copy3(&s); + s = Copy4(ref s); + s = Copy5(s); + Assert.Equal(13, s[0]); + } + } +} diff --git a/src/tests/JIT/opt/Misc/Runtime_80086/Runtime_80086.csproj b/src/tests/JIT/opt/Misc/Runtime_80086/Runtime_80086.csproj new file mode 100644 index 00000000000000..b2ab702ae90e2b --- /dev/null +++ b/src/tests/JIT/opt/Misc/Runtime_80086/Runtime_80086.csproj @@ -0,0 +1,9 @@ + + + True + True + + + + +