-
Notifications
You must be signed in to change notification settings - Fork 4.9k
/
Copy pathgchelpers.cpp
1552 lines (1308 loc) · 51.9 KB
/
gchelpers.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*
* GCHELPERS.CPP
*
* GC Allocation and Write Barrier Helpers
*
*
*/
#include "common.h"
#include "object.h"
#include "threads.h"
#include "eetwain.h"
#include "eeconfig.h"
#include "gcheaputilities.h"
#include "corhost.h"
#include "threads.h"
#include "fieldmarshaler.h"
#include "interoputil.h"
#include "dynamicmethod.h"
#include "stubhelpers.h"
#include "eventtrace.h"
#include "excep.h"
#include "gchelpers.inl"
#include "eeprofinterfaces.inl"
#include "frozenobjectheap.h"
#ifdef FEATURE_COMINTEROP
#include "runtimecallablewrapper.h"
#endif // FEATURE_COMINTEROP
//========================================================================
//
// ALLOCATION HELPERS
//
//========================================================================
inline ee_alloc_context* GetThreadEEAllocContext()
{
WRAPPER_NO_CONTRACT;
assert(GCHeapUtilities::UseThreadAllocationContexts());
return &t_runtime_thread_locals.alloc_context;
}
// When not using per-thread allocation contexts, we (the EE) need to take care that
// no two threads are concurrently modifying the global allocation context. This lock
// must be acquired before any sort of operations involving the global allocation context
// can occur.
//
// This lock is acquired by all allocations when not using per-thread allocation contexts.
// It is acquired in two kinds of places:
// 1) JIT_TrialAllocFastSP (and related assembly alloc helpers), which attempt to
// acquire it but move into an alloc slow path if acquiring fails
// (but does not decrement the lock variable when doing so)
// 2) Alloc in gchelpers.cpp, which acquire the lock using
// the Acquire and Release methods below.
class GlobalAllocLock {
friend struct AsmOffsets;
private:
// The lock variable. This field must always be first.
LONG m_lock;
public:
// Creates a new GlobalAllocLock in the unlocked state.
GlobalAllocLock() : m_lock(-1) {}
// Copy and copy-assignment operators should never be invoked
// for this type
GlobalAllocLock(const GlobalAllocLock&) = delete;
GlobalAllocLock& operator=(const GlobalAllocLock&) = delete;
// Acquires the lock, spinning if necessary to do so. When this method
// returns, m_lock will be zero and the lock will be acquired.
void Acquire()
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS; // switch to preemptive mode
MODE_COOPERATIVE;
} CONTRACTL_END;
DWORD spinCount = 0;
while(InterlockedExchange(&m_lock, 0) != -1)
{
GCX_PREEMP();
__SwitchToThread(0, spinCount++);
}
assert(m_lock == 0);
}
// Releases the lock.
void Release()
{
LIMITED_METHOD_CONTRACT;
// the lock may not be exactly 0. This is because the
// assembly alloc routines increment the lock variable and
// jump if not zero to the slow alloc path, which eventually
// will try to acquire the lock again. At that point, it will
// spin in Acquire (since m_lock is some number that's not zero).
// When the thread that /does/ hold the lock releases it, the spinning
// thread will continue.
MemoryBarrier();
assert(m_lock >= 0);
m_lock = -1;
}
// Static helper to acquire a lock, for use with the Holder template.
static void AcquireLock(GlobalAllocLock *lock)
{
WRAPPER_NO_CONTRACT;
lock->Acquire();
}
// Static helper to release a lock, for use with the Holder template
static void ReleaseLock(GlobalAllocLock *lock)
{
WRAPPER_NO_CONTRACT;
lock->Release();
}
typedef class Holder<GlobalAllocLock *, GlobalAllocLock::AcquireLock, GlobalAllocLock::ReleaseLock> Holder;
};
typedef GlobalAllocLock::Holder GlobalAllocLockHolder;
struct AsmOffsets {
static_assert(offsetof(GlobalAllocLock, m_lock) == 0, "ASM code relies on this property");
};
// For single-proc machines, the global allocation context is protected
// from concurrent modification by this lock.
//
// When not using per-thread allocation contexts, certain methods on IGCHeap
// require that this lock be held before calling. These methods are documented
// on the IGCHeap interface.
extern "C"
{
GlobalAllocLock g_global_alloc_lock;
}
// Checks to see if the given allocation size exceeds the
// largest object size allowed - if it does, it throws
// an OutOfMemoryException with a message indicating that
// the OOM was not from memory pressure but from an object
// being too large.
inline void CheckObjectSize(size_t alloc_size)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
} CONTRACTL_END;
size_t max_object_size;
#ifdef HOST_64BIT
if (g_pConfig->GetGCAllowVeryLargeObjects())
{
max_object_size = (INT64_MAX - 7 - min_obj_size);
}
else
#endif // HOST_64BIT
{
max_object_size = (INT32_MAX - 7 - min_obj_size);
}
if (alloc_size >= max_object_size)
{
if (g_pConfig->IsGCBreakOnOOMEnabled())
{
DebugBreak();
}
ThrowOutOfMemoryDimensionsExceeded();
}
}
void FireAllocationSampled(GC_ALLOC_FLAGS flags, size_t size, size_t samplingBudgetOffset, Object* orObject)
{
#ifdef FEATURE_EVENT_TRACE
// Note: this code is duplicated from GCToCLREventSink::FireGCAllocationTick_V4
void* typeId = nullptr;
const WCHAR* name = nullptr;
InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
EX_TRY
{
TypeHandle th = GetThread()->GetTHAllocContextObj();
if (th != 0)
{
th.GetName(strTypeName);
name = strTypeName.GetUnicode();
typeId = th.GetMethodTable();
}
}
EX_CATCH{}
EX_END_CATCH(SwallowAllExceptions)
// end of duplication
if (typeId != nullptr)
{
unsigned int allocKind =
(flags & GC_ALLOC_PINNED_OBJECT_HEAP) ? 2 :
(flags & GC_ALLOC_LARGE_OBJECT_HEAP) ? 1 :
0; // SOH
FireEtwAllocationSampled(allocKind, GetClrInstanceId(), typeId, name, (BYTE*)orObject, size, samplingBudgetOffset);
}
#endif //FEATURE_EVENT_TRACE
}
inline Object* Alloc(ee_alloc_context* pEEAllocContext, size_t size, GC_ALLOC_FLAGS flags)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
Object* retVal = nullptr;
gc_alloc_context* pAllocContext = &pEEAllocContext->m_GCAllocContext;
bool isSampled = false;
size_t availableSpace = 0;
size_t aligned_size = 0;
size_t samplingBudget = 0;
bool isRandomizedSamplingEnabled = ee_alloc_context::IsRandomizedSamplingEnabled();
if (isRandomizedSamplingEnabled)
{
// object allocations are always padded up to pointer size
aligned_size = AlignUp(size, sizeof(uintptr_t));
// The number bytes we can allocate before we need to emit a sampling event.
// This calculation is only valid if combined_limit < alloc_limit.
samplingBudget = (size_t)(pEEAllocContext->m_CombinedLimit - pAllocContext->alloc_ptr);
// The number of bytes available in the current allocation context
availableSpace = (size_t)(pAllocContext->alloc_limit - pAllocContext->alloc_ptr);
// Check to see if the allocated object overlaps a sampled byte
// in this AC. This happens when both:
// 1) The AC contains a sampled byte (combined_limit < alloc_limit)
// 2) The object is large enough to overlap it (samplingBudget < aligned_size)
//
// Note that the AC could have no remaining space for allocations (alloc_ptr =
// alloc_limit = combined_limit). When a thread hasn't done any SOH allocations
// yet it also starts in an empty state where alloc_ptr = alloc_limit =
// combined_limit = nullptr. The (1) check handles both of these situations
// properly as an empty AC can not have a sampled byte inside of it.
isSampled =
(pEEAllocContext->m_CombinedLimit < pAllocContext->alloc_limit) &&
(samplingBudget < aligned_size);
// if the object overflows the AC, we need to sample the remaining bytes
// the sampling budget only included at most the bytes inside the AC
if (aligned_size > availableSpace && !isSampled)
{
samplingBudget = ee_alloc_context::ComputeGeometricRandom() + availableSpace;
isSampled = (samplingBudget < aligned_size);
}
}
GCStress<gc_on_alloc>::MaybeTrigger(pAllocContext);
// for SOH, if there is enough space in the current allocation context, then
// the allocation will be done in place (like in the fast path),
// otherwise a new allocation context will be provided
retVal = GCHeapUtilities::GetGCHeap()->Alloc(pAllocContext, size, flags);
if (isSampled)
{
// At this point the object methodtable isn't initialized yet but it doesn't matter when we are
// just emitting an ETW/EventPipe event. If we want this event to be more useful from ICorProfiler
// in the future we probably want to pass the isSampled flag back to callers so that the event
// can be raised after the MethodTable is initialized.
FireAllocationSampled(flags, aligned_size, samplingBudget, retVal);
}
// There are a variety of conditions that may have invalidated the previous combined_limit value
// such as not allocating the object in the AC memory region (UOH allocations), moving the AC, adding
// extra alignment padding, allocating a new AC, or allocating an object that consumed the sampling budget.
// Rather than test for all the different invalidation conditions individually we conservatively always
// recompute it. If sampling isn't enabled this inlined function is just trivially setting
// combined_limit=alloc_limit.
pEEAllocContext->UpdateCombinedLimit(isRandomizedSamplingEnabled);
return retVal;
}
// There are only two ways to allocate an object.
// * Call optimized helpers that were generated on the fly. This is how JIT compiled code does most
// allocations, however they fall back code:Alloc, when for all but the most common code paths. These
// helpers are NOT used if profiler has asked to track GC allocation (see code:TrackAllocations)
// * Call code:Alloc - When the jit helpers fall back, or we do allocations within the runtime code
// itself, we ultimately call here.
//
// While this is a choke point into allocating an object, it is primitive (it does not want to know about
// MethodTable and thus does not initialize that pointer. It also does not know if the object is finalizable
// or contains pointers. Thus we quickly wrap this function in more user-friendly ones that know about
// MethodTables etc. (see code:AllocateSzArray code:AllocateArrayEx code:AllocateObject)
//
// You can get an exhaustive list of code sites that allocate GC objects by finding all calls to
// code:ProfilerObjectAllocatedCallback (since the profiler has to hook them all).
inline Object* Alloc(size_t size, GC_ALLOC_FLAGS flags)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
#ifdef _DEBUG
if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
{
char *a = new char;
delete a;
}
#endif
if (flags & GC_ALLOC_CONTAINS_REF)
flags &= ~GC_ALLOC_ZEROING_OPTIONAL;
Object *retVal = NULL;
CheckObjectSize(size);
if (GCHeapUtilities::UseThreadAllocationContexts())
{
ee_alloc_context *threadContext = GetThreadEEAllocContext();
GCStress<gc_on_alloc>::MaybeTrigger(&threadContext->m_GCAllocContext);
retVal = Alloc(threadContext, size, flags);
}
else
{
GlobalAllocLockHolder holder(&g_global_alloc_lock);
ee_alloc_context *globalContext = &g_global_alloc_context;
GCStress<gc_on_alloc>::MaybeTrigger(&globalContext->m_GCAllocContext);
retVal = Alloc(globalContext, size, flags);
}
if (!retVal)
{
ThrowOutOfMemory();
}
return retVal;
}
#ifdef _LOGALLOC
int g_iNumAllocs = 0;
bool ToLogOrNotToLog(size_t size, const char *typeName)
{
WRAPPER_NO_CONTRACT;
g_iNumAllocs++;
if (g_iNumAllocs > g_pConfig->AllocNumThreshold())
return true;
if (size > (size_t)g_pConfig->AllocSizeThreshold())
return true;
if (g_pConfig->ShouldLogAlloc(typeName))
return true;
return false;
}
// READ THIS!!!!!
// this function is called on managed allocation path with unprotected Object*
// as a result LogAlloc cannot call anything that would toggle the GC mode else
// you'll introduce several GC holes!
inline void LogAlloc(Object* object)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
}
CONTRACTL_END;
#ifdef LOGGING
MethodTable* pMT = object->GetMethodTable();
size_t size = object->GetSize();
if (LoggingOn(LF_GCALLOC, LL_INFO10))
{
LogSpewAlways("Allocated %5d bytes for %s_TYPE" FMT_ADDR FMT_CLASS "\n",
size,
pMT->IsValueType() ? "VAL" : "REF",
DBG_ADDR(object),
DBG_CLASS_NAME_MT(pMT));
if (LoggingOn(LF_GCALLOC, LL_INFO1000000) ||
(LoggingOn(LF_GCALLOC, LL_INFO100) &&
ToLogOrNotToLog(size, DBG_CLASS_NAME_MT(pMT))))
{
void LogStackTrace();
LogStackTrace();
}
}
#endif
}
#else
#define LogAlloc( object)
#endif
// signals completion of the object to GC and sends events if necessary
template <class TObj>
void PublishObjectAndNotify(TObj* &orObject, GC_ALLOC_FLAGS flags)
{
_ASSERTE(orObject->HasEmptySyncBlockInfo());
if (flags & GC_ALLOC_USER_OLD_HEAP)
{
GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
}
#ifdef _LOGALLOC
LogAlloc(orObject);
#endif // _LOGALLOC
// Notify the profiler of the allocation
// do this after initializing bounds so callback has size information
if (TrackAllocations() ||
(TrackLargeAllocations() && flags & GC_ALLOC_LARGE_OBJECT_HEAP) ||
(TrackPinnedAllocations() && flags & GC_ALLOC_PINNED_OBJECT_HEAP))
{
OBJECTREF objref = ObjectToOBJECTREF((Object*)orObject);
GCPROTECT_BEGIN(objref);
ProfilerObjectAllocatedCallback(objref, (ClassID) orObject->GetTypeHandle().AsPtr());
GCPROTECT_END();
orObject = (TObj*) OBJECTREFToObject(objref);
}
#ifdef FEATURE_EVENT_TRACE
// Send ETW event for allocation
if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
{
ETW::TypeSystemLog::SendObjectAllocatedEvent(orObject);
}
#endif // FEATURE_EVENT_TRACE
}
void PublishFrozenObject(Object*& orObject)
{
PublishObjectAndNotify(orObject, GC_ALLOC_NO_FLAGS);
}
inline SIZE_T MaxArrayLength()
{
// Impose limits on maximum array length to prevent corner case integer overflow bugs
// Keep in sync with Array.MaxLength in BCL.
return 0X7FFFFFC7;
}
OBJECTREF AllocateSzArray(TypeHandle arrayType, INT32 cElements, GC_ALLOC_FLAGS flags)
{
CONTRACTL{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
MethodTable* pArrayMT = arrayType.AsMethodTable();
return AllocateSzArray(pArrayMT, cElements, flags);
}
OBJECTREF AllocateSzArray(MethodTable* pArrayMT, INT32 cElements, GC_ALLOC_FLAGS flags)
{
CONTRACTL{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
SetTypeHandleOnThreadForAlloc(TypeHandle(pArrayMT));
_ASSERTE(pArrayMT->CheckInstanceActivated());
_ASSERTE(pArrayMT->GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
if (cElements < 0)
COMPlusThrow(kOverflowException);
if ((SIZE_T)cElements > MaxArrayLength())
ThrowOutOfMemoryDimensionsExceeded();
// Allocate the space from the GC heap
SIZE_T componentSize = pArrayMT->GetComponentSize();
#ifdef TARGET_64BIT
// POSITIVE_INT32 * UINT16 + SMALL_CONST
// this cannot overflow on 64bit
size_t totalSize = cElements * componentSize + pArrayMT->GetBaseSize();
#else
S_SIZE_T safeTotalSize = S_SIZE_T((DWORD)cElements) * S_SIZE_T((DWORD)componentSize) + S_SIZE_T((DWORD)pArrayMT->GetBaseSize());
if (safeTotalSize.IsOverflow())
ThrowOutOfMemoryDimensionsExceeded();
size_t totalSize = safeTotalSize.Value();
#endif
#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
if ((pArrayMT->GetArrayElementTypeHandle() == CoreLibBinder::GetElementType(ELEMENT_TYPE_R8)) &&
((DWORD)cElements >= g_pConfig->GetDoubleArrayToLargeObjectHeapThreshold()))
{
STRESS_LOG2(LF_GC, LL_INFO10, "Allocating double MD array of size %d and length %d to large object heap\n", totalSize, cElements);
flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
}
#endif
if (totalSize >= LARGE_OBJECT_SIZE && totalSize >= GCHeapUtilities::GetGCHeap()->GetLOHThreshold())
flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
if (pArrayMT->ContainsGCPointers())
flags |= GC_ALLOC_CONTAINS_REF;
ArrayBase* orArray = NULL;
if (flags & GC_ALLOC_USER_OLD_HEAP)
{
orArray = (ArrayBase*)Alloc(totalSize, flags);
orArray->SetMethodTableForUOHObject(pArrayMT);
}
else
{
#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
if (pArrayMT->GetArrayElementTypeHandle() == CoreLibBinder::GetElementType(ELEMENT_TYPE_R8))
{
flags |= GC_ALLOC_ALIGN8;
}
#endif
#ifdef FEATURE_64BIT_ALIGNMENT
MethodTable* pElementMT = pArrayMT->GetArrayElementTypeHandle().GetMethodTable();
if (pElementMT->RequiresAlign8() && pElementMT->IsValueType())
{
// This platform requires that certain fields are 8-byte aligned (and the runtime doesn't provide
// this guarantee implicitly, e.g. on 32-bit platforms). Since it's the array payload, not the
// header that requires alignment we need to be careful. However it just so happens that all the
// cases we care about (single and multi-dim arrays of value types) have an even number of DWORDs
// in their headers so the alignment requirements for the header and the payload are the same.
_ASSERTE(((pArrayMT->GetBaseSize() - SIZEOF_OBJHEADER) & 7) == 0);
flags |= GC_ALLOC_ALIGN8;
}
#endif
orArray = (ArrayBase*)Alloc(totalSize, flags);
orArray->SetMethodTable(pArrayMT);
}
// Initialize Object
orArray->m_NumComponents = cElements;
PublishObjectAndNotify(orArray, flags);
return ObjectToOBJECTREF((Object*)orArray);
}
OBJECTREF TryAllocateFrozenSzArray(MethodTable* pArrayMT, INT32 cElements)
{
CONTRACTL{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
SetTypeHandleOnThreadForAlloc(TypeHandle(pArrayMT));
_ASSERTE(pArrayMT->CheckInstanceActivated());
_ASSERTE(pArrayMT->GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
// The initial validation is copied from AllocateSzArray impl
if (pArrayMT->ContainsGCPointers() && cElements > 0)
{
// For arrays with GC pointers we can only work with empty arrays
return NULL;
}
if (cElements < 0)
COMPlusThrow(kOverflowException);
if ((SIZE_T)cElements > MaxArrayLength())
ThrowOutOfMemoryDimensionsExceeded();
SIZE_T componentSize = pArrayMT->GetComponentSize();
#ifdef TARGET_64BIT
// POSITIVE_INT32 * UINT16 + SMALL_CONST
// this cannot overflow on 64bit
size_t totalSize = cElements * componentSize + pArrayMT->GetBaseSize();
#else
S_SIZE_T safeTotalSize = S_SIZE_T((DWORD)cElements) * S_SIZE_T((DWORD)componentSize) + S_SIZE_T((DWORD)pArrayMT->GetBaseSize());
if (safeTotalSize.IsOverflow())
ThrowOutOfMemoryDimensionsExceeded();
size_t totalSize = safeTotalSize.Value();
#endif
// FrozenObjectHeapManager doesn't yet support objects with a custom alignment,
// so we give up on arrays of value types requiring 8 byte alignment on 32bit platforms.
if ((DATA_ALIGNMENT < sizeof(double)) && (pArrayMT->GetArrayElementTypeHandle() == CoreLibBinder::GetElementType(ELEMENT_TYPE_R8)))
{
return NULL;
}
#ifdef FEATURE_64BIT_ALIGNMENT
MethodTable* pElementMT = pArrayMT->GetArrayElementTypeHandle().GetMethodTable();
if (pElementMT->RequiresAlign8() && pElementMT->IsValueType())
{
return NULL;
}
#endif
FrozenObjectHeapManager* foh = SystemDomain::GetFrozenObjectHeapManager();
ArrayBase* orArray = static_cast<ArrayBase*>(
foh->TryAllocateObject(pArrayMT, PtrAlign(totalSize), [](Object* obj, void* elemCntPtr){
// Initialize newly allocated object before publish
static_cast<ArrayBase*>(obj)->m_NumComponents = *static_cast<DWORD*>(elemCntPtr);
}, &cElements));
if (orArray == nullptr)
{
// We failed to allocate on a frozen segment, fallback to AllocateSzArray
// E.g. if the array is too big to fit on a frozen segment
return NULL;
}
return ObjectToOBJECTREF(orArray);
}
void ThrowOutOfMemoryDimensionsExceeded()
{
CONTRACTL {
THROWS;
} CONTRACTL_END;
#ifdef HOST_64BIT
EX_THROW(EEMessageException, (kOutOfMemoryException, IDS_EE_ARRAY_DIMENSIONS_EXCEEDED));
#else
ThrowOutOfMemory();
#endif
}
//
// Handles arrays of arbitrary dimensions
//
// This is wrapper overload to handle TypeHandle arrayType
//
OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags)
{
CONTRACTL
{
WRAPPER_NO_CONTRACT;
} CONTRACTL_END;
MethodTable* pArrayMT = arrayType.AsMethodTable();
return AllocateArrayEx(pArrayMT, pArgs, dwNumArgs, flags);
}
//
// Handles arrays of arbitrary dimensions
//
// If dwNumArgs is set to greater than 1 for a SZARRAY this function will recursively
// allocate sub-arrays and fill them in.
//
// For arrays with lower bounds, pBounds is <lower bound 1>, <count 1>, <lower bound 2>, ...
OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
PRECONDITION(CheckPointer(pArgs));
PRECONDITION(dwNumArgs > 0);
} CONTRACTL_END;
#ifdef _DEBUG
if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
{
char *a = new char;
delete a;
}
#endif
SetTypeHandleOnThreadForAlloc(TypeHandle(pArrayMT));
// keep original flags in case the call is recursive (jugged array case)
// the aditional flags that we infer here, such as GC_ALLOC_CONTAINS_REF
// may not be applicable to inner arrays
GC_ALLOC_FLAGS flagsOriginal = flags;
_ASSERTE(pArrayMT->CheckInstanceActivated());
PREFIX_ASSUME(pArrayMT != NULL);
CorElementType kind = pArrayMT->GetInternalCorElementType();
_ASSERTE(kind == ELEMENT_TYPE_ARRAY || kind == ELEMENT_TYPE_SZARRAY);
// Calculate the total number of elements in the array
UINT32 cElements;
bool maxArrayDimensionLengthOverflow = false;
bool providedLowerBounds = false;
if (kind == ELEMENT_TYPE_ARRAY)
{
unsigned rank = pArrayMT->GetRank();
_ASSERTE(dwNumArgs == rank || dwNumArgs == 2*rank);
// Morph a ARRAY rank 1 with 0 lower bound into an SZARRAY
if (rank == 1 && (dwNumArgs == 1 || pArgs[0] == 0))
{
TypeHandle szArrayType = ClassLoader::LoadArrayTypeThrowing(pArrayMT->GetArrayElementTypeHandle(), ELEMENT_TYPE_SZARRAY, 1);
return AllocateSzArray(szArrayType, pArgs[dwNumArgs - 1], flags);
}
providedLowerBounds = (dwNumArgs == 2*rank);
S_UINT32 safeTotalElements = S_UINT32(1);
for (unsigned i = 0; i < dwNumArgs; i++)
{
int lowerBound = 0;
if (providedLowerBounds)
{
lowerBound = pArgs[i];
i++;
}
int length = pArgs[i];
if (length < 0)
COMPlusThrow(kOverflowException);
if ((SIZE_T)length > MaxArrayLength())
maxArrayDimensionLengthOverflow = true;
if ((length > 0) && (lowerBound + (length - 1) < lowerBound))
COMPlusThrow(kArgumentOutOfRangeException, W("ArgumentOutOfRange_ArrayLBAndLength"));
safeTotalElements = safeTotalElements * S_UINT32(length);
if (safeTotalElements.IsOverflow())
ThrowOutOfMemoryDimensionsExceeded();
}
cElements = safeTotalElements.Value();
}
else
{
int length = pArgs[0];
if (length < 0)
COMPlusThrow(kOverflowException);
if ((SIZE_T)length > MaxArrayLength())
maxArrayDimensionLengthOverflow = true;
cElements = length;
}
// Throw this exception only after everything else was validated for backward compatibility.
if (maxArrayDimensionLengthOverflow)
ThrowOutOfMemoryDimensionsExceeded();
// Allocate the space from the GC heap
SIZE_T componentSize = pArrayMT->GetComponentSize();
#ifdef TARGET_64BIT
// POSITIVE_INT32 * UINT16 + SMALL_CONST
// this cannot overflow on 64bit
size_t totalSize = cElements * componentSize + pArrayMT->GetBaseSize();
#else
S_SIZE_T safeTotalSize = S_SIZE_T((DWORD)cElements) * S_SIZE_T((DWORD)componentSize) + S_SIZE_T((DWORD)pArrayMT->GetBaseSize());
if (safeTotalSize.IsOverflow())
ThrowOutOfMemoryDimensionsExceeded();
size_t totalSize = safeTotalSize.Value();
#endif
#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
if ((pArrayMT->GetArrayElementTypeHandle() == CoreLibBinder::GetElementType(ELEMENT_TYPE_R8)) &&
(cElements >= g_pConfig->GetDoubleArrayToLargeObjectHeapThreshold()))
{
STRESS_LOG2(LF_GC, LL_INFO10, "Allocating double MD array of size %d and length %d to large object heap\n", totalSize, cElements);
flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
}
#endif
if (totalSize >= LARGE_OBJECT_SIZE && totalSize >= GCHeapUtilities::GetGCHeap()->GetLOHThreshold())
flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
if (pArrayMT->ContainsGCPointers())
flags |= GC_ALLOC_CONTAINS_REF;
ArrayBase* orArray = NULL;
if (flags & GC_ALLOC_USER_OLD_HEAP)
{
orArray = (ArrayBase*)Alloc(totalSize, flags);
orArray->SetMethodTableForUOHObject(pArrayMT);
}
else
{
#ifdef FEATURE_64BIT_ALIGNMENT
MethodTable *pElementMT = pArrayMT->GetArrayElementTypeHandle().GetMethodTable();
if (pElementMT->RequiresAlign8() && pElementMT->IsValueType())
{
// This platform requires that certain fields are 8-byte aligned (and the runtime doesn't provide
// this guarantee implicitly, e.g. on 32-bit platforms). Since it's the array payload, not the
// header that requires alignment we need to be careful. However it just so happens that all the
// cases we care about (single and multi-dim arrays of value types) have an even number of DWORDs
// in their headers so the alignment requirements for the header and the payload are the same.
_ASSERTE(((pArrayMT->GetBaseSize() - SIZEOF_OBJHEADER) & 7) == 0);
flags |= GC_ALLOC_ALIGN8;
}
#endif
orArray = (ArrayBase*)Alloc(totalSize, flags);
orArray->SetMethodTable(pArrayMT);
}
// Initialize Object
orArray->m_NumComponents = cElements;
if (kind == ELEMENT_TYPE_ARRAY)
{
INT32 *pCountsPtr = (INT32 *) orArray->GetBoundsPtr();
INT32 *pLowerBoundsPtr = (INT32 *) orArray->GetLowerBoundsPtr();
for (unsigned i = 0; i < dwNumArgs; i++)
{
if (providedLowerBounds)
*pLowerBoundsPtr++ = pArgs[i++]; // if not stated, lower bound becomes 0
*pCountsPtr++ = pArgs[i];
}
}
PublishObjectAndNotify(orArray, flags);
if (kind != ELEMENT_TYPE_ARRAY)
{
// Handle allocating multiple jagged array dimensions at once
if (dwNumArgs > 1)
{
PTRARRAYREF outerArray = (PTRARRAYREF) ObjectToOBJECTREF((Object *) orArray);
GCPROTECT_BEGIN(outerArray);
// Turn off GC stress, it is of little value here
{
GCStressPolicy::InhibitHolder iholder;
// Allocate dwProvidedBounds arrays
if (!pArrayMT->GetArrayElementTypeHandle().IsArray())
{
orArray = NULL;
}
else
{
TypeHandle subArrayType = pArrayMT->GetArrayElementTypeHandle();
for (UINT32 i = 0; i < cElements; i++)
{
OBJECTREF obj = AllocateArrayEx(subArrayType, &pArgs[1], dwNumArgs-1, flagsOriginal);
outerArray->SetAt(i, obj);
}
iholder.Release();
orArray = (ArrayBase *) OBJECTREFToObject(outerArray);
}
} // GcStressPolicy::~InhibitHolder()
GCPROTECT_END();
}
}
return ObjectToOBJECTREF((Object *) orArray);
}
/*
* Allocates a single dimensional array of primitive types.
*/
OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
INJECT_FAULT(COMPlusThrowOM());
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
}
CONTRACTL_END
// Allocating simple primite arrays is done in various places as internal storage.
// Because this is unlikely to result in any bad recursions, we will override the type limit
// here rather forever chase down all the callers.
OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
_ASSERTE(CorTypeInfo::IsPrimitiveType(type));
// Fetch the proper array type
if (g_pPredefinedArrayTypes[type] == NULL)
{
TypeHandle elemType = TypeHandle(CoreLibBinder::GetElementType(type));
TypeHandle typHnd = ClassLoader::LoadArrayTypeThrowing(elemType, ELEMENT_TYPE_SZARRAY, 0);
g_pPredefinedArrayTypes[type] = typHnd;
}
return AllocateSzArray(g_pPredefinedArrayTypes[type].AsMethodTable(), cElements);
}
//
// Allocate an array which is the same size as pRef. However, do not zero out the array.
//
OBJECTREF DupArrayForCloning(BASEARRAYREF pRef)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
MethodTable *pArrayMT = pRef->GetMethodTable();
unsigned rank = pArrayMT->GetRank();
DWORD numArgs = rank*2;
INT32* args = (INT32*) _alloca(sizeof(INT32)*numArgs);
if (pArrayMT->GetInternalCorElementType() == ELEMENT_TYPE_ARRAY)
{
const INT32* bounds = pRef->GetBoundsPtr();
const INT32* lowerBounds = pRef->GetLowerBoundsPtr();
for(unsigned int i=0; i < rank; i++)
{
args[2*i] = lowerBounds[i];
args[2*i+1] = bounds[i];
}
}
else
{
numArgs = 1;
args[0] = pRef->GetNumComponents();
}
return AllocateArrayEx(pArrayMT, args, numArgs, GC_ALLOC_ZEROING_OPTIONAL);
}
//
// Helper for parts of the EE which are allocating arrays
//
OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle elementType, BOOL bAllocateInPinnedHeap)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
// The object array class is loaded at startup.
_ASSERTE(g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT] != NULL);
TypeHandle arrayType = ClassLoader::LoadArrayTypeThrowing(elementType);
#ifdef _DEBUG
_ASSERTE(arrayType.GetRank() == 1);
_ASSERTE(arrayType.GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
#endif //_DEBUG
GC_ALLOC_FLAGS flags = bAllocateInPinnedHeap ? GC_ALLOC_PINNED_OBJECT_HEAP : GC_ALLOC_NO_FLAGS;
return AllocateSzArray(arrayType, (INT32) cElements, flags);
}
STRINGREF AllocateString( DWORD cchStringLength )
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
#ifdef _DEBUG
if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
{
char *a = new char;
delete a;
}
#endif
// Limit the maximum string size to <2GB to mitigate risk of security issues caused by 32-bit integer
// overflows in buffer size calculations.
if (cchStringLength > CORINFO_String_MaxLength)
ThrowOutOfMemory();
SIZE_T totalSize = PtrAlign(StringObject::GetSize(cchStringLength));
_ASSERTE(totalSize > cchStringLength);
SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass));
GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS;
if (totalSize >= LARGE_OBJECT_SIZE && totalSize >= GCHeapUtilities::GetGCHeap()->GetLOHThreshold())
flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
StringObject* orString = (StringObject*)Alloc(totalSize, flags);
// Initialize Object
orString->SetMethodTable(g_pStringClass);
orString->SetStringLength(cchStringLength);