From bc70bf59d6b5d08c87eb4f370517a0b3cec365b7 Mon Sep 17 00:00:00 2001 From: Noah Falk Date: Tue, 30 Jul 2024 03:13:58 -0700 Subject: [PATCH] PR feedback -update the breaking change number on the debug header -remove unneeded whitespace -adjust combined_limit -> m_CombinedLimit --- src/coreclr/nativeaot/Runtime/AsmOffsets.h | 2 +- src/coreclr/nativeaot/Runtime/DebugHeader.cpp | 2 +- src/coreclr/nativeaot/Runtime/amd64/AllocFast.S | 6 +++--- src/coreclr/nativeaot/Runtime/amd64/AllocFast.asm | 6 +++--- src/coreclr/nativeaot/Runtime/amd64/AsmMacros.inc | 2 +- src/coreclr/nativeaot/Runtime/arm/AllocFast.S | 10 +++++----- src/coreclr/nativeaot/Runtime/arm64/AllocFast.S | 8 ++++---- src/coreclr/nativeaot/Runtime/arm64/AllocFast.asm | 6 +++--- src/coreclr/nativeaot/Runtime/arm64/AsmMacros.h | 2 +- src/coreclr/nativeaot/Runtime/gcenv.ee.cpp | 6 +++--- src/coreclr/nativeaot/Runtime/i386/AllocFast.asm | 6 +++--- src/coreclr/nativeaot/Runtime/i386/AsmMacros.inc | 2 +- .../nativeaot/Runtime/loongarch64/AllocFast.S | 8 ++++---- src/coreclr/nativeaot/Runtime/thread.h | 14 +++++++------- src/coreclr/nativeaot/Runtime/thread.inl | 6 +++--- .../nativeaot/Runtime/unix/unixasmmacrosamd64.inc | 2 +- .../nativeaot/Runtime/unix/unixasmmacrosarm.inc | 2 +- 17 files changed, 45 insertions(+), 45 deletions(-) diff --git a/src/coreclr/nativeaot/Runtime/AsmOffsets.h b/src/coreclr/nativeaot/Runtime/AsmOffsets.h index cb6bf8842e04b9..0c7286cc5c3cf6 100644 --- a/src/coreclr/nativeaot/Runtime/AsmOffsets.h +++ b/src/coreclr/nativeaot/Runtime/AsmOffsets.h @@ -61,7 +61,7 @@ ASM_SIZEOF( 14, 20, EHEnum) ASM_OFFSET( 0, 0, gc_alloc_context, alloc_ptr) ASM_OFFSET( 4, 8, gc_alloc_context, alloc_limit) -ASM_OFFSET( 0, 0, ee_alloc_context, combined_limit) +ASM_OFFSET( 0, 0, ee_alloc_context, m_CombinedLimit) ASM_OFFSET( 4, 8, ee_alloc_context, m_rgbAllocContextBuffer) #ifdef FEATURE_CACHED_INTERFACE_DISPATCH diff --git a/src/coreclr/nativeaot/Runtime/DebugHeader.cpp b/src/coreclr/nativeaot/Runtime/DebugHeader.cpp index e32956dde4ee28..449cba983ffcdd 100644 --- a/src/coreclr/nativeaot/Runtime/DebugHeader.cpp +++ b/src/coreclr/nativeaot/Runtime/DebugHeader.cpp @@ -77,7 +77,7 @@ struct DotNetRuntimeDebugHeader // This counter can be incremented to indicate breaking changes // This field must be encoded little endian, regardless of the typical endianness of // the machine - const uint16_t MajorVersion = 4; + const uint16_t MajorVersion = 5; // This counter can be incremented to indicate back-compatible changes // This field must be encoded little endian, regardless of the typical endianness of diff --git a/src/coreclr/nativeaot/Runtime/amd64/AllocFast.S b/src/coreclr/nativeaot/Runtime/amd64/AllocFast.S index 8923a7a4fbb64b..765e83b91cb4a8 100644 --- a/src/coreclr/nativeaot/Runtime/amd64/AllocFast.S +++ b/src/coreclr/nativeaot/Runtime/amd64/AllocFast.S @@ -28,7 +28,7 @@ NESTED_ENTRY RhpNewFast, _TEXT, NoHandler mov rsi, [rax + OFFSETOF__Thread__m_alloc_context__alloc_ptr] add rdx, rsi - cmp rdx, [rax + OFFSETOF__Thread__m_eeAllocContext__combined_limit] + cmp rdx, [rax + OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] ja LOCAL_LABEL(RhpNewFast_RarePath) // set the new alloc pointer @@ -143,7 +143,7 @@ NESTED_ENTRY RhNewString, _TEXT, NoHandler // rcx == Thread* // rdx == string size // r12 == element count - cmp rax, [rcx + OFFSETOF__Thread__m_eeAllocContext__combined_limit] + cmp rax, [rcx + OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] ja LOCAL_LABEL(RhNewString_RarePath) mov [rcx + OFFSETOF__Thread__m_alloc_context__alloc_ptr], rax @@ -226,7 +226,7 @@ NESTED_ENTRY RhpNewArray, _TEXT, NoHandler // rcx == Thread* // rdx == array size // r12 == element count - cmp rax, [rcx + OFFSETOF__Thread__m_eeAllocContext__combined_limit] + cmp rax, [rcx + OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] ja LOCAL_LABEL(RhpNewArray_RarePath) mov [rcx + OFFSETOF__Thread__m_alloc_context__alloc_ptr], rax diff --git a/src/coreclr/nativeaot/Runtime/amd64/AllocFast.asm b/src/coreclr/nativeaot/Runtime/amd64/AllocFast.asm index 6ba69c0c141274..7e08dcdef6e806 100644 --- a/src/coreclr/nativeaot/Runtime/amd64/AllocFast.asm +++ b/src/coreclr/nativeaot/Runtime/amd64/AllocFast.asm @@ -25,7 +25,7 @@ LEAF_ENTRY RhpNewFast, _TEXT mov rax, [rdx + OFFSETOF__Thread__m_alloc_context__alloc_ptr] add r8, rax - cmp r8, [rdx + OFFSETOF__Thread__m_eeAllocContext__combined_limit] + cmp r8, [rdx + OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] ja RhpNewFast_RarePath ;; set the new alloc pointer @@ -118,7 +118,7 @@ LEAF_ENTRY RhNewString, _TEXT ; rdx == element count ; r8 == array size ; r10 == thread - cmp rax, [r10 + OFFSETOF__Thread__m_eeAllocContext__combined_limit] + cmp rax, [r10 + OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] ja RhpNewArrayRare mov [r10 + OFFSETOF__Thread__m_alloc_context__alloc_ptr], rax @@ -179,7 +179,7 @@ LEAF_ENTRY RhpNewArray, _TEXT ; rdx == element count ; r8 == array size ; r10 == thread - cmp rax, [r10 + OFFSETOF__Thread__m_eeAllocContext__combined_limit] + cmp rax, [r10 + OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] ja RhpNewArrayRare mov [r10 + OFFSETOF__Thread__m_alloc_context__alloc_ptr], rax diff --git a/src/coreclr/nativeaot/Runtime/amd64/AsmMacros.inc b/src/coreclr/nativeaot/Runtime/amd64/AsmMacros.inc index 41c43252317d9a..8cac5b016f61b6 100644 --- a/src/coreclr/nativeaot/Runtime/amd64/AsmMacros.inc +++ b/src/coreclr/nativeaot/Runtime/amd64/AsmMacros.inc @@ -337,7 +337,7 @@ TSF_DoNotTriggerGc equ 10h ;; Rename fields of nested structs ;; OFFSETOF__Thread__m_alloc_context__alloc_ptr equ OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_rgbAllocContextBuffer + OFFSETOF__gc_alloc_context__alloc_ptr -OFFSETOF__Thread__m_eeAllocContext__combined_limit equ OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__combined_limit +OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit equ OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_CombinedLimit diff --git a/src/coreclr/nativeaot/Runtime/arm/AllocFast.S b/src/coreclr/nativeaot/Runtime/arm/AllocFast.S index 76091303696546..154063d829e021 100644 --- a/src/coreclr/nativeaot/Runtime/arm/AllocFast.S +++ b/src/coreclr/nativeaot/Runtime/arm/AllocFast.S @@ -26,7 +26,7 @@ LEAF_ENTRY RhpNewFast, _TEXT ldr r3, [r0, #OFFSETOF__Thread__m_alloc_context__alloc_ptr] add r2, r3 - ldr r1, [r0, #OFFSETOF__Thread__m_eeAllocContext__combined_limit] + ldr r1, [r0, #OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] cmp r2, r1 bhi LOCAL_LABEL(RhpNewFast_RarePath) @@ -132,7 +132,7 @@ LEAF_ENTRY RhNewString, _TEXT adds r6, r12 bcs LOCAL_LABEL(RhNewString_RarePath) // if we get a carry here, the string is too large to fit below 4 GB - ldr r12, [r0, #OFFSETOF__Thread__m_eeAllocContext__combined_limit] + ldr r12, [r0, #OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] cmp r6, r12 bhi LOCAL_LABEL(RhNewString_RarePath) @@ -213,7 +213,7 @@ LOCAL_LABEL(ArrayAlignSize): adds r6, r12 bcs LOCAL_LABEL(RhpNewArray_RarePath) // if we get a carry here, the array is too large to fit below 4 GB - ldr r12, [r0, #OFFSETOF__Thread__m_eeAllocContext__combined_limit] + ldr r12, [r0, #OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] cmp r6, r12 bhi LOCAL_LABEL(RhpNewArray_RarePath) @@ -349,7 +349,7 @@ LEAF_ENTRY RhpNewFastAlign8, _TEXT // Determine whether the end of the object would lie outside of the current allocation context. If so, // we abandon the attempt to allocate the object directly and fall back to the slow helper. add r2, r3 - ldr r3, [r0, #OFFSETOF__Thread__m_eeAllocContext__combined_limit] + ldr r3, [r0, #OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] cmp r2, r3 bhi LOCAL_LABEL(Alloc8Failed) @@ -412,7 +412,7 @@ LEAF_ENTRY RhpNewFastMisalign, _TEXT // Determine whether the end of the object would lie outside of the current allocation context. If so, // we abandon the attempt to allocate the object directly and fall back to the slow helper. add r2, r3 - ldr r3, [r0, #OFFSETOF__Thread__m_eeAllocContext__combined_limit] + ldr r3, [r0, #OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] cmp r2, r3 bhi LOCAL_LABEL(BoxAlloc8Failed) diff --git a/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S b/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S index ebe5387d8d9306..90b343f598dd95 100644 --- a/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S +++ b/src/coreclr/nativeaot/Runtime/arm64/AllocFast.S @@ -11,7 +11,7 @@ GC_ALLOC_FINALIZE = 1 // Rename fields of nested structs // OFFSETOF__Thread__m_alloc_context__alloc_ptr = OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_rgbAllocContextBuffer + OFFSETOF__gc_alloc_context__alloc_ptr -OFFSETOF__Thread__m_eeAllocContext__combined_limit = OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__combined_limit +OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit = OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_CombinedLimit @@ -44,7 +44,7 @@ OFFSETOF__Thread__m_eeAllocContext__combined_limit = OFFSETOF__Thread__m_eeAllo // Determine whether the end of the object would lie outside of the current allocation context. If so, // we abandon the attempt to allocate the object directly and fall back to the slow helper. add x2, x2, x12 - ldr x13, [x1, #OFFSETOF__Thread__m_eeAllocContext__combined_limit] + ldr x13, [x1, #OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] cmp x2, x13 bhi LOCAL_LABEL(RhpNewFast_RarePath) @@ -139,7 +139,7 @@ LOCAL_LABEL(NewOutOfMemory): // Determine whether the end of the object would lie outside of the current allocation context. If so, // we abandon the attempt to allocate the object directly and fall back to the slow helper. add x2, x2, x12 - ldr x12, [x3, #OFFSETOF__Thread__m_eeAllocContext__combined_limit] + ldr x12, [x3, #OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] cmp x2, x12 bhi LOCAL_LABEL(RhNewString_Rare) @@ -207,7 +207,7 @@ LOCAL_LABEL(RhNewString_Rare): // Determine whether the end of the object would lie outside of the current allocation context. If so, // we abandon the attempt to allocate the object directly and fall back to the slow helper. add x2, x2, x12 - ldr x12, [x3, #OFFSETOF__Thread__m_eeAllocContext__combined_limit] + ldr x12, [x3, #OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] cmp x2, x12 bhi LOCAL_LABEL(RhpNewArray_Rare) diff --git a/src/coreclr/nativeaot/Runtime/arm64/AllocFast.asm b/src/coreclr/nativeaot/Runtime/arm64/AllocFast.asm index d8e506335d77f2..b898a88abebd03 100644 --- a/src/coreclr/nativeaot/Runtime/arm64/AllocFast.asm +++ b/src/coreclr/nativeaot/Runtime/arm64/AllocFast.asm @@ -30,7 +30,7 @@ ;; Determine whether the end of the object would lie outside of the current allocation context. If so, ;; we abandon the attempt to allocate the object directly and fall back to the slow helper. add x2, x2, x12 - ldr x13, [x1, #OFFSETOF__Thread__m_eeAllocContext__combined_limit] + ldr x13, [x1, #OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] cmp x2, x13 bhi RhpNewFast_RarePath @@ -118,7 +118,7 @@ NewOutOfMemory ;; Determine whether the end of the object would lie outside of the current allocation context. If so, ;; we abandon the attempt to allocate the object directly and fall back to the slow helper. add x2, x2, x12 - ldr x12, [x3, #OFFSETOF__Thread__m_eeAllocContext__combined_limit] + ldr x12, [x3, #OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] cmp x2, x12 bhi RhpNewArrayRare @@ -179,7 +179,7 @@ StringSizeOverflow ;; Determine whether the end of the object would lie outside of the current allocation context. If so, ;; we abandon the attempt to allocate the object directly and fall back to the slow helper. add x2, x2, x12 - ldr x12, [x3, #OFFSETOF__Thread__m_eeAllocContext__combined_limit] + ldr x12, [x3, #OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] cmp x2, x12 bhi RhpNewArrayRare diff --git a/src/coreclr/nativeaot/Runtime/arm64/AsmMacros.h b/src/coreclr/nativeaot/Runtime/arm64/AsmMacros.h index 2f6e83e2cf9b66..f9747917781691 100644 --- a/src/coreclr/nativeaot/Runtime/arm64/AsmMacros.h +++ b/src/coreclr/nativeaot/Runtime/arm64/AsmMacros.h @@ -88,7 +88,7 @@ STATUS_REDHAWK_THREAD_ABORT equ 0x43 ;; Rename fields of nested structs ;; OFFSETOF__Thread__m_alloc_context__alloc_ptr equ OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_rgbAllocContextBuffer + OFFSETOF__gc_alloc_context__alloc_ptr -OFFSETOF__Thread__m_eeAllocContext__combined_limit equ OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__combined_limit +OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit equ OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_CombinedLimit ;; ;; IMPORTS diff --git a/src/coreclr/nativeaot/Runtime/gcenv.ee.cpp b/src/coreclr/nativeaot/Runtime/gcenv.ee.cpp index 56defbdd21b9c3..391a93150055ed 100644 --- a/src/coreclr/nativeaot/Runtime/gcenv.ee.cpp +++ b/src/coreclr/nativeaot/Runtime/gcenv.ee.cpp @@ -140,12 +140,12 @@ void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* par gc_alloc_context* ac = palloc_context->GetGCAllocContext(); (*fn) (ac, param); // The GC may zero the alloc_ptr and alloc_limit fields of AC during enumeration and we need to keep - // combined_limit up-to-date. Note that the GC has multiple threads running this enumeration concurrently + // m_CombinedLimit up-to-date. Note that the GC has multiple threads running this enumeration concurrently // with no synchronization. If you need to change this code think carefully about how that concurrency // may affect the results. - if (ac->alloc_limit == 0 && palloc_context->combined_limit != 0) + if (ac->alloc_limit == 0 && palloc_context->m_CombinedLimit != 0) { - palloc_context->combined_limit = 0; + palloc_context->m_CombinedLimit = 0; } } END_FOREACH_THREAD diff --git a/src/coreclr/nativeaot/Runtime/i386/AllocFast.asm b/src/coreclr/nativeaot/Runtime/i386/AllocFast.asm index d557f5ec750774..e47684d5784a46 100644 --- a/src/coreclr/nativeaot/Runtime/i386/AllocFast.asm +++ b/src/coreclr/nativeaot/Runtime/i386/AllocFast.asm @@ -29,7 +29,7 @@ FASTCALL_FUNC RhpNewFast, 4 ;; add eax, [edx + OFFSETOF__Thread__m_alloc_context__alloc_ptr] - cmp eax, [edx + OFFSETOF__Thread__m_eeAllocContext__combined_limit] + cmp eax, [edx + OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] ja AllocFailed ;; set the new alloc pointer @@ -165,7 +165,7 @@ FASTCALL_FUNC RhNewString, 8 mov ecx, eax add eax, [edx + OFFSETOF__Thread__m_alloc_context__alloc_ptr] jc StringAllocContextOverflow - cmp eax, [edx + OFFSETOF__Thread__m_eeAllocContext__combined_limit] + cmp eax, [edx + OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] ja StringAllocContextOverflow ; ECX == allocation size @@ -282,7 +282,7 @@ ArrayAlignSize: mov ecx, eax add eax, [edx + OFFSETOF__Thread__m_alloc_context__alloc_ptr] jc ArrayAllocContextOverflow - cmp eax, [edx + OFFSETOF__Thread__m_eeAllocContext__combined_limit] + cmp eax, [edx + OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit] ja ArrayAllocContextOverflow ; ECX == array size diff --git a/src/coreclr/nativeaot/Runtime/i386/AsmMacros.inc b/src/coreclr/nativeaot/Runtime/i386/AsmMacros.inc index 9541f73940215a..10ff220912a3b4 100644 --- a/src/coreclr/nativeaot/Runtime/i386/AsmMacros.inc +++ b/src/coreclr/nativeaot/Runtime/i386/AsmMacros.inc @@ -141,7 +141,7 @@ STATUS_REDHAWK_THREAD_ABORT equ 43h ;; Rename fields of nested structs ;; OFFSETOF__Thread__m_alloc_context__alloc_ptr equ OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_rgbAllocContextBuffer + OFFSETOF__gc_alloc_context__alloc_ptr -OFFSETOF__Thread__m_eeAllocContext__combined_limit equ OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__combined_limit +OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit equ OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_CombinedLimit ;; ;; CONSTANTS -- SYMBOLS diff --git a/src/coreclr/nativeaot/Runtime/loongarch64/AllocFast.S b/src/coreclr/nativeaot/Runtime/loongarch64/AllocFast.S index 5f03faa6938490..d32951f3bf64ba 100644 --- a/src/coreclr/nativeaot/Runtime/loongarch64/AllocFast.S +++ b/src/coreclr/nativeaot/Runtime/loongarch64/AllocFast.S @@ -11,7 +11,7 @@ GC_ALLOC_FINALIZE = 1 // Rename fields of nested structs // OFFSETOF__Thread__m_alloc_context__alloc_ptr = OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_rgbAllocContextBuffer + OFFSETOF__gc_alloc_context__alloc_ptr -OFFSETOF__Thread__m_eeAllocContext__combined_limit = OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__combined_limit +OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit = OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_CombinedLimit @@ -44,7 +44,7 @@ OFFSETOF__Thread__m_eeAllocContext__combined_limit = OFFSETOF__Thread__m_eeAllo // Determine whether the end of the object would lie outside of the current allocation context. If so, // we abandon the attempt to allocate the object directly and fall back to the slow helper. add.d $a2, $a2, $t3 - ld.d $t4, $a1, OFFSETOF__Thread__m_eeAllocContext__combined_limit + ld.d $t4, $a1, OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit bltu $t4, $a2, RhpNewFast_RarePath // Update the alloc pointer to account for the allocation. @@ -137,7 +137,7 @@ NewOutOfMemory: // Determine whether the end of the object would lie outside of the current allocation context. If so, // we abandon the attempt to allocate the object directly and fall back to the slow helper. add.d $a2, $a2, $t3 - ld.d $t3, $a3, OFFSETOF__Thread__m_eeAllocContext__combined_limit + ld.d $t3, $a3, OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit bltu $t3, $a2, RhNewString_Rare // Reload new object address into r12. @@ -199,7 +199,7 @@ RhNewString_Rare: // Determine whether the end of the object would lie outside of the current allocation context. If so, // we abandon the attempt to allocate the object directly and fall back to the slow helper. add.d $a2, $a2, $t3 - ld.d $t3, $a3, OFFSETOF__Thread__m_eeAllocContext__combined_limit + ld.d $t3, $a3, OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit bltu $t3, $a2, RhpNewArray_Rare // Reload new object address into t3. diff --git a/src/coreclr/nativeaot/Runtime/thread.h b/src/coreclr/nativeaot/Runtime/thread.h index 70f776de2ee9a1..f6f7f28cb605c1 100644 --- a/src/coreclr/nativeaot/Runtime/thread.h +++ b/src/coreclr/nativeaot/Runtime/thread.h @@ -86,12 +86,12 @@ struct InlinedThreadStaticRoot // This struct allows adding some state that is only visible to the EE onto the standard gc_alloc_context struct ee_alloc_context { - // Any allocation that would overlap combined_limit needs to be handled by the allocation slow path. - // combined_limit is the minimum of: + // Any allocation that would overlap m_CombinedLimit needs to be handled by the allocation slow path. + // m_CombinedLimit is the minimum of: // - gc_alloc_context.alloc_limit (the end of the current AC) // - the sampling_limit // - // In the simple case that randomized sampling is disabled, combined_limit is always equal to alloc_limit. + // In the simple case that randomized sampling is disabled, m_CombinedLimit is always equal to alloc_limit. // // There are two different useful interpretations for the sampling_limit. One is to treat the sampling_limit // as an address and when we allocate an object that overlaps that address we should emit a sampling event. @@ -102,13 +102,13 @@ struct ee_alloc_context // flexible to handle those cases. // // The sampling limit isn't stored in any separate field explicitly, instead it is implied: - // - if combined_limit == alloc_limit there is no sampled byte in the AC. In the budget interpretation + // - if m_CombinedLimit == alloc_limit there is no sampled byte in the AC. In the budget interpretation // we can allocate (alloc_limit - alloc_ptr) unsampled bytes. We'll need a new random number after // that to determine whether future allocated bytes should be sampled. // This occurs either because the sampling feature is disabled, or because the randomized selection // of sampled bytes didn't select a byte in this AC. - // - if combined_limit < alloc_limit there is a sample limit in the AC. sample_limit = combined_limit. - uint8_t* combined_limit; + // - if m_CombinedLimit < alloc_limit there is a sample limit in the AC. sample_limit = m_CombinedLimit. + uint8_t* m_CombinedLimit; uint8_t m_rgbAllocContextBuffer[SIZEOF_ALLOC_CONTEXT]; gc_alloc_context* GetGCAllocContext(); @@ -119,7 +119,7 @@ struct ee_alloc_context struct RuntimeThreadLocals { - ee_alloc_context m_eeAllocContext; + ee_alloc_context m_eeAllocContext; uint32_t volatile m_ThreadStateFlags; // see Thread::ThreadStateFlags enum PInvokeTransitionFrame* m_pTransitionFrame; PInvokeTransitionFrame* m_pDeferredTransitionFrame; // see Thread::EnablePreemptiveMode diff --git a/src/coreclr/nativeaot/Runtime/thread.inl b/src/coreclr/nativeaot/Runtime/thread.inl index 5c17da3e61f3f3..8063264f8467c8 100644 --- a/src/coreclr/nativeaot/Runtime/thread.inl +++ b/src/coreclr/nativeaot/Runtime/thread.inl @@ -12,7 +12,7 @@ inline gc_alloc_context* ee_alloc_context::GetGCAllocContext() inline uint8_t* ee_alloc_context::GetCombinedLimit() { - return combined_limit; + return m_CombinedLimit; } // Workaround for https://github.com/dotnet/runtime/issues/96081 @@ -25,8 +25,8 @@ struct _thread_inl_gc_alloc_context inline void ee_alloc_context::UpdateCombinedLimit() { // The randomized allocation sampling feature is being submitted in stages. For now sampling is never enabled so - // combined_limit is always the same as alloc_limit. - combined_limit = ((_thread_inl_gc_alloc_context*)GetGCAllocContext())->alloc_limit; + // m_CombinedLimit is always the same as alloc_limit. + m_CombinedLimit = ((_thread_inl_gc_alloc_context*)GetGCAllocContext())->alloc_limit; } // Set the m_pDeferredTransitionFrame field for GC allocation helpers that setup transition frame diff --git a/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosamd64.inc b/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosamd64.inc index b4c0a74d45509b..6cf9c7097a51a5 100644 --- a/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosamd64.inc +++ b/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosamd64.inc @@ -241,7 +241,7 @@ C_FUNC(\Name): // Rename fields of nested structs // #define OFFSETOF__Thread__m_alloc_context__alloc_ptr OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_rgbAllocContextBuffer + OFFSETOF__gc_alloc_context__alloc_ptr -#define OFFSETOF__Thread__m_eeAllocContext__combined_limit OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__combined_limit +#define OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_CombinedLimit // GC type flags #define GC_ALLOC_FINALIZE 1 diff --git a/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosarm.inc b/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosarm.inc index 4ccd38b19c7bef..18cc3e59fda3c4 100644 --- a/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosarm.inc +++ b/src/coreclr/nativeaot/Runtime/unix/unixasmmacrosarm.inc @@ -29,7 +29,7 @@ // Rename fields of nested structs #define OFFSETOF__Thread__m_alloc_context__alloc_ptr (OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_rgbAllocContextBuffer + OFFSETOF__gc_alloc_context__alloc_ptr) -#define OFFSETOF__Thread__m_eeAllocContext__combined_limit (OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__combined_limit) +#define OFFSETOF__Thread__m_eeAllocContext__m_CombinedLimit (OFFSETOF__Thread__m_eeAllocContext + OFFSETOF__ee_alloc_context__m_CombinedLimit) // GC minimal sized object. We use this to switch between 4 and 8 byte alignment in the GC heap (see AllocFast.asm). #define SIZEOF__MinObject 12