forked from dotnet/runtime
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathobjecthandle.cpp
1989 lines (1692 loc) · 67.8 KB
/
objecthandle.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*
* Wraps handle table to implement various handle types (Strong, Weak, etc.)
*
*
*/
#include "common.h"
#include "gcenv.h"
#include "gc.h"
#include "gcscan.h"
#include "objecthandle.h"
#include "handletablepriv.h"
#include "gchandletableimpl.h"
HandleTableMap g_HandleTableMap;
// Array of contexts used while scanning dependent handles for promotion. There are as many contexts as GC
// heaps and they're allocated by Ref_Initialize and initialized during each GC by GcDhInitialScan.
DhContext *g_pDependentHandleContexts;
#ifndef DACCESS_COMPILE
//----------------------------------------------------------------------------
/*
* struct VARSCANINFO
*
* used when tracing variable-strength handles.
*/
struct VARSCANINFO
{
uintptr_t lEnableMask; // mask of types to trace
HANDLESCANPROC pfnTrace; // tracing function to use
uintptr_t lp2; // second parameter
};
//----------------------------------------------------------------------------
#ifdef FEATURE_VARIABLE_HANDLES
/*
* Scan callback for tracing variable-strength handles.
*
* This callback is called to trace individual objects referred to by handles
* in the variable-strength table.
*/
void CALLBACK VariableTraceDispatcher(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
WRAPPER_NO_CONTRACT;
// lp2 is a pointer to our VARSCANINFO
struct VARSCANINFO *pInfo = (struct VARSCANINFO *)lp2;
// is the handle's dynamic type one we're currently scanning?
if ((*pExtraInfo & pInfo->lEnableMask) != 0)
{
// yes - call the tracing function for this handle
pInfo->pfnTrace(pObjRef, NULL, lp1, pInfo->lp2);
}
}
#endif // FEATURE_VARIABLE_HANDLES
#ifdef FEATURE_REFCOUNTED_HANDLES
/*
* Scan callback for tracing ref-counted handles.
*
* This callback is called to trace individual objects referred to by handles
* in the refcounted table.
*/
void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
WRAPPER_NO_CONTRACT;
UNREFERENCED_PARAMETER(pExtraInfo);
// there are too many races when asynchronously scanning ref-counted handles so we no longer support it
_ASSERTE(!((ScanContext*)lp1)->concurrent);
LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("", pObjRef, "causes promotion of ", *pObjRef)));
Object *pObj = VolatileLoad((PTR_Object*)pObjRef);
#ifdef _DEBUG
Object *pOldObj = pObj;
#endif
if (!HndIsNullOrDestroyedHandle(pObj) && !g_theGCHeap->IsPromoted(pObj))
{
if (GCToEEInterface::RefCountedHandleCallbacks(pObj))
{
_ASSERTE(lp2);
promote_func* callback = (promote_func*) lp2;
callback(&pObj, (ScanContext *)lp1, 0);
}
}
// Assert this object wasn't relocated since we are passing a temporary object's address.
_ASSERTE(pOldObj == pObj);
}
#endif // FEATURE_REFCOUNTED_HANDLES
// Only used by profiling/ETW.
//----------------------------------------------------------------------------
/*
* struct DIAG_DEPSCANINFO
*
* used when tracing dependent handles for profiling/ETW.
*/
struct DIAG_DEPSCANINFO
{
HANDLESCANPROC pfnTrace; // tracing function to use
uintptr_t pfnProfilingOrETW;
};
void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
WRAPPER_NO_CONTRACT;
if (pObjRef == NULL || pExtraInfo == NULL)
return;
// At this point, it's possible that either or both of the primary and secondary
// objects are NULL. However, if the secondary object is non-NULL, then the primary
// object should also be non-NULL.
_ASSERTE(*pExtraInfo == 0 || *pObjRef != NULL);
struct DIAG_DEPSCANINFO *pInfo = (struct DIAG_DEPSCANINFO*)lp2;
HANDLESCANPROC pfnTrace = pInfo->pfnTrace;
// is the handle's secondary object non-NULL?
if ((*pObjRef != NULL) && (*pExtraInfo != 0))
{
// yes - call the tracing function for this handle
pfnTrace(pObjRef, NULL, lp1, (uintptr_t)(pInfo->pfnProfilingOrETW));
}
}
void CALLBACK UpdateWeakInteriorHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pExtraInfo);
Object **pPrimaryRef = (Object **)pObjRef;
uintptr_t **ppInteriorPtrRef = (uintptr_t **)pExtraInfo;
LOG((LF_GC, LL_INFO10000, LOG_HANDLE_OBJECT("Querying for new location of ",
pPrimaryRef, "to ", *pPrimaryRef)));
Object *pOldPrimary = *pPrimaryRef;
_ASSERTE(lp2);
promote_func* callback = (promote_func*) lp2;
callback(pPrimaryRef, (ScanContext *)lp1, 0);
Object *pNewPrimary = *pPrimaryRef;
if (pNewPrimary != NULL)
{
uintptr_t pOldInterior = **ppInteriorPtrRef;
uintptr_t delta = ((uintptr_t)pNewPrimary) - ((uintptr_t)pOldPrimary);
uintptr_t pNewInterior = pOldInterior + delta;
**ppInteriorPtrRef = pNewInterior;
#ifdef _DEBUG
if (pOldPrimary != *pPrimaryRef)
LOG((LF_GC, LL_INFO10000, "Updating " FMT_HANDLE "from" FMT_ADDR "to " FMT_OBJECT "\n",
DBG_ADDR(pPrimaryRef), DBG_ADDR(pOldPrimary), DBG_ADDR(*pPrimaryRef)));
else
LOG((LF_GC, LL_INFO10000, "Updating " FMT_HANDLE "- " FMT_OBJECT "did not move\n",
DBG_ADDR(pPrimaryRef), DBG_ADDR(*pPrimaryRef)));
if (pOldInterior != pNewInterior)
LOG((LF_GC, LL_INFO10000, "Updating " FMT_HANDLE "from" FMT_ADDR "to " FMT_OBJECT "\n",
DBG_ADDR(*ppInteriorPtrRef), DBG_ADDR(pOldInterior), DBG_ADDR(pNewInterior)));
else
LOG((LF_GC, LL_INFO10000, "Updating " FMT_HANDLE "- " FMT_OBJECT "did not move\n",
DBG_ADDR(*ppInteriorPtrRef), DBG_ADDR(pOldInterior)));
#endif
}
}
void CALLBACK UpdateDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pExtraInfo);
Object **pPrimaryRef = (Object **)pObjRef;
Object **pSecondaryRef = (Object **)pExtraInfo;
LOG((LF_GC, LL_INFO10000, LOG_HANDLE_OBJECT("Querying for new location of ",
pPrimaryRef, "to ", *pPrimaryRef)));
LOG((LF_GC, LL_INFO10000, LOG_HANDLE_OBJECT(" and ",
pSecondaryRef, "to ", *pSecondaryRef)));
#ifdef _DEBUG
Object *pOldPrimary = *pPrimaryRef;
Object *pOldSecondary = *pSecondaryRef;
#endif
_ASSERTE(lp2);
promote_func* callback = (promote_func*) lp2;
callback(pPrimaryRef, (ScanContext *)lp1, 0);
callback(pSecondaryRef, (ScanContext *)lp1, 0);
#ifdef _DEBUG
if (pOldPrimary != *pPrimaryRef)
LOG((LF_GC, LL_INFO10000, "Updating " FMT_HANDLE "from" FMT_ADDR "to " FMT_OBJECT "\n",
DBG_ADDR(pPrimaryRef), DBG_ADDR(pOldPrimary), DBG_ADDR(*pPrimaryRef)));
else
LOG((LF_GC, LL_INFO10000, "Updating " FMT_HANDLE "- " FMT_OBJECT "did not move\n",
DBG_ADDR(pPrimaryRef), DBG_ADDR(*pPrimaryRef)));
if (pOldSecondary != *pSecondaryRef)
LOG((LF_GC, LL_INFO10000, "Updating " FMT_HANDLE "from" FMT_ADDR "to " FMT_OBJECT "\n",
DBG_ADDR(pSecondaryRef), DBG_ADDR(pOldSecondary), DBG_ADDR(*pSecondaryRef)));
else
LOG((LF_GC, LL_INFO10000, "Updating " FMT_HANDLE "- " FMT_OBJECT "did not move\n",
DBG_ADDR(pSecondaryRef), DBG_ADDR(*pSecondaryRef)));
#endif
}
void CALLBACK PromoteDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pExtraInfo);
Object **pPrimaryRef = (Object **)pObjRef;
Object **pSecondaryRef = (Object **)pExtraInfo;
LOG((LF_GC, LL_INFO1000, "Checking promotion of DependentHandle\n"));
LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tPrimary:\t", pObjRef, "to ", *pObjRef)));
LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tSecondary\t", pSecondaryRef, "to ", *pSecondaryRef)));
ScanContext *sc = (ScanContext*)lp1;
DhContext *pDhContext = Ref_GetDependentHandleContext(sc);
if (*pObjRef && g_theGCHeap->IsPromoted(*pPrimaryRef))
{
if (!g_theGCHeap->IsPromoted(*pSecondaryRef))
{
LOG((LF_GC, LL_INFO10000, "\tPromoting secondary " LOG_OBJECT_CLASS(*pSecondaryRef)));
_ASSERTE(lp2);
promote_func* callback = (promote_func*) lp2;
callback(pSecondaryRef, (ScanContext *)lp1, 0);
// need to rescan because we might have promoted an object that itself has added fields and this
// promotion might be all that is pinning that object. If we've already scanned that dependent
// handle relationship, we could lose it secondary object.
pDhContext->m_fPromoted = true;
}
}
else if (*pObjRef)
{
// If we see a non-cleared primary which hasn't been promoted, record the fact. We will only require a
// rescan if this flag has been set (if it's clear then the previous scan found only clear and
// promoted handles, so there's no chance of finding an additional handle being promoted on a
// subsequent scan).
pDhContext->m_fUnpromotedPrimaries = true;
}
}
void CALLBACK ClearDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t /*lp1*/, uintptr_t /*lp2*/)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pExtraInfo);
Object **pPrimaryRef = (Object **)pObjRef;
Object **pSecondaryRef = (Object **)pExtraInfo;
LOG((LF_GC, LL_INFO1000, "Checking referent of DependentHandle"));
LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tPrimary:\t", pPrimaryRef, "to ", *pPrimaryRef)));
LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tSecondary\t", pSecondaryRef, "to ", *pSecondaryRef)));
if (!g_theGCHeap->IsPromoted(*pPrimaryRef))
{
LOG((LF_GC, LL_INFO1000, "\tunreachable ", LOG_OBJECT_CLASS(*pPrimaryRef)));
LOG((LF_GC, LL_INFO1000, "\tunreachable ", LOG_OBJECT_CLASS(*pSecondaryRef)));
*pPrimaryRef = NULL;
*pSecondaryRef = NULL;
}
else
{
_ASSERTE(g_theGCHeap->IsPromoted(*pSecondaryRef));
LOG((LF_GC, LL_INFO10000, "\tPrimary is reachable " LOG_OBJECT_CLASS(*pPrimaryRef)));
LOG((LF_GC, LL_INFO10000, "\tSecondary is reachable " LOG_OBJECT_CLASS(*pSecondaryRef)));
}
}
/*
* Scan callback for pinning handles.
*
* This callback is called to pin individual objects referred to by handles in
* the pinning table.
*/
void CALLBACK PinObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_COOPERATIVE;
UNREFERENCED_PARAMETER(pExtraInfo);
// PINNING IS BAD - DON'T DO IT IF YOU CAN AVOID IT
LOG((LF_GC, LL_WARNING, LOG_HANDLE_OBJECT_CLASS("WARNING: ", pObjRef, "causes pinning of ", *pObjRef)));
Object **pRef = (Object **)pObjRef;
_ASSERTE(lp2);
promote_func* callback = (promote_func*) lp2;
callback(pRef, (ScanContext *)lp1, GC_CALL_PINNED);
}
#ifdef FEATURE_ASYNC_PINNED_HANDLES
void CALLBACK AsyncPinObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
UNREFERENCED_PARAMETER(pExtraInfo);
LOG((LF_GC, LL_WARNING, LOG_HANDLE_OBJECT_CLASS("WARNING: ", pObjRef, "causes (async) pinning of ", *pObjRef)));
Object **pRef = (Object **)pObjRef;
_ASSERTE(lp2);
promote_func* callback = (promote_func*)lp2;
callback(pRef, (ScanContext *)lp1, 0);
Object* pPinnedObj = *pRef;
if (!HndIsNullOrDestroyedHandle(pPinnedObj))
{
GCToEEInterface::WalkAsyncPinnedForPromotion(pPinnedObj, (ScanContext *)lp1, callback);
}
}
#endif // FEATURE_ASYNC_PINNED_HANDLES
/*
* Scan callback for tracing strong handles.
*
* This callback is called to trace individual objects referred to by handles
* in the strong table.
*/
void CALLBACK PromoteObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
WRAPPER_NO_CONTRACT;
UNREFERENCED_PARAMETER(pExtraInfo);
LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("", pObjRef, "causes promotion of ", *pObjRef)));
Object **ppRef = (Object **)pObjRef;
_ASSERTE(lp2);
promote_func* callback = (promote_func*) lp2;
callback(ppRef, (ScanContext *)lp1, 0);
}
/*
* Scan callback for disconnecting dead handles.
*
* This callback is called to check promotion of individual objects referred to by
* handles in the weak tables.
*/
void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
WRAPPER_NO_CONTRACT;
UNREFERENCED_PARAMETER(pExtraInfo);
UNREFERENCED_PARAMETER(lp1);
UNREFERENCED_PARAMETER(lp2);
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef)));
Object **ppRef = (Object **)pObjRef;
if (!g_theGCHeap->IsPromoted(*ppRef))
{
LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-", pObjRef, "to unreachable ", *pObjRef)));
*ppRef = NULL;
}
else
{
LOG((LF_GC, LL_INFO1000000, "reachable " LOG_OBJECT_CLASS(*pObjRef)));
}
}
void CALLBACK CalculateSizedRefSize(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pExtraInfo);
Object **ppSizedRef = (Object **)pObjRef;
size_t* pSize = (size_t *)pExtraInfo;
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Getting size of referent of SizedRef-", pObjRef, "to ", *pObjRef)));
ScanContext* sc = (ScanContext *)lp1;
promote_func* callback = (promote_func*) lp2;
size_t sizeBegin = g_theGCHeap->GetPromotedBytes(sc->thread_number);
callback(ppSizedRef, (ScanContext *)lp1, 0);
size_t sizeEnd = g_theGCHeap->GetPromotedBytes(sc->thread_number);
*pSize = sizeEnd - sizeBegin;
}
/*
* Scan callback for updating pointers.
*
* This callback is called to update pointers for individual objects referred to by
* handles in the weak and strong tables.
*/
void CALLBACK UpdatePointer(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
LIMITED_METHOD_CONTRACT;
UNREFERENCED_PARAMETER(pExtraInfo);
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT("Querying for new location of ", pObjRef, "to ", *pObjRef)));
Object **ppRef = (Object **)pObjRef;
#ifdef _DEBUG
Object *pOldLocation = *ppRef;
#endif
_ASSERTE(lp2);
promote_func* callback = (promote_func*) lp2;
callback(ppRef, (ScanContext *)lp1, 0);
#ifdef _DEBUG
if (pOldLocation != *pObjRef)
LOG((LF_GC, LL_INFO10000, "Updating " FMT_HANDLE "from" FMT_ADDR "to " FMT_OBJECT "\n",
DBG_ADDR(pObjRef), DBG_ADDR(pOldLocation), DBG_ADDR(*pObjRef)));
else
LOG((LF_GC, LL_INFO100000, "Updating " FMT_HANDLE "- " FMT_OBJECT "did not move\n",
DBG_ADDR(pObjRef), DBG_ADDR(*pObjRef)));
#endif
}
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
/*
* Scan callback for updating pointers.
*
* This callback is called to update pointers for individual objects referred to by
* handles in the weak and strong tables.
*/
void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
UNREFERENCED_PARAMETER(pExtraInfo);
handle_scan_fn fn = (handle_scan_fn)lp2;
LOG((LF_GC | LF_CORPROF, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Notifying profiler of ", pObjRef, "to ", *pObjRef)));
// Get the baseobject (which can subsequently be cast into an OBJECTREF == ObjectID
Object **pRef = (Object **)pObjRef;
// Get a hold of the heap ID that's tacked onto the end of the scancontext struct.
ScanContext *pSC = (ScanContext *)lp1;
uint32_t rootFlags = 0;
bool isDependent = false;
OBJECTHANDLE handle = (OBJECTHANDLE)(pRef);
switch (HandleFetchType(handle))
{
case HNDTYPE_DEPENDENT:
isDependent = true;
break;
case HNDTYPE_WEAK_SHORT:
case HNDTYPE_WEAK_LONG:
case HNDTYPE_WEAK_INTERIOR_POINTER:
#ifdef FEATURE_WEAK_NATIVE_COM_HANDLES
case HNDTYPE_WEAK_NATIVE_COM:
#endif // FEATURE_WEAK_NATIVE_COM_HANDLES
rootFlags |= kEtwGCRootFlagsWeakRef;
break;
case HNDTYPE_STRONG:
case HNDTYPE_SIZEDREF:
break;
case HNDTYPE_PINNED:
#ifdef FEATURE_ASYNC_PINNED_HANDLES
case HNDTYPE_ASYNCPINNED:
#endif // FEATURE_ASYNC_PINNED_HANDLES
rootFlags |= kEtwGCRootFlagsPinning;
break;
#ifdef FEATURE_VARIABLE_HANDLES
case HNDTYPE_VARIABLE:
{
// Set the appropriate ETW flags for the current strength of this variable handle
uint32_t nVarHandleType = GetVariableHandleType(handle);
if (((nVarHandleType & VHT_WEAK_SHORT) != 0) ||
((nVarHandleType & VHT_WEAK_LONG) != 0))
{
rootFlags |= kEtwGCRootFlagsWeakRef;
}
if ((nVarHandleType & VHT_PINNED) != 0)
{
rootFlags |= kEtwGCRootFlagsPinning;
}
// No special ETW flag for strong handles (VHT_STRONG)
}
break;
#endif // FEATURE_VARIABLE_HANDLES
#ifdef FEATURE_REFCOUNTED_HANDLES
case HNDTYPE_REFCOUNTED:
rootFlags |= kEtwGCRootFlagsRefCounted;
if (*pRef != NULL)
{
if (!GCToEEInterface::RefCountedHandleCallbacks(*pRef))
rootFlags |= kEtwGCRootFlagsWeakRef;
}
break;
#endif // FEATURE_REFCOUNTED_HANDLES
default:
_ASSERTE(!"Unexpected handle type");
break;
}
_UNCHECKED_OBJECTREF pSec = NULL;
if (isDependent)
{
pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle);
}
fn(pRef, pSec, rootFlags, pSC, isDependent);
}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
/*
* Scan callback for updating pointers.
*
* This callback is called to update pointers for individual objects referred to by
* handles in the pinned table.
*/
void CALLBACK UpdatePointerPinned(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
LIMITED_METHOD_CONTRACT;
UNREFERENCED_PARAMETER(pExtraInfo);
Object **ppRef = (Object **)pObjRef;
_ASSERTE(lp2);
promote_func* callback = (promote_func*) lp2;
callback(ppRef, (ScanContext *)lp1, GC_CALL_PINNED);
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT("Updating ", pObjRef, "to pinned ", *pObjRef)));
}
//----------------------------------------------------------------------------
// flags describing the handle types
static const uint32_t s_rgTypeFlags[] =
{
HNDF_NORMAL, // HNDTYPE_WEAK_SHORT
HNDF_NORMAL, // HNDTYPE_WEAK_LONG
HNDF_NORMAL, // HNDTYPE_STRONG
HNDF_NORMAL, // HNDTYPE_PINNED
HNDF_EXTRAINFO, // HNDTYPE_VARIABLE
HNDF_NORMAL, // HNDTYPE_REFCOUNTED
HNDF_EXTRAINFO, // HNDTYPE_DEPENDENT
HNDF_NORMAL, // HNDTYPE_ASYNCPINNED
HNDF_EXTRAINFO, // HNDTYPE_SIZEDREF
HNDF_EXTRAINFO, // HNDTYPE_WEAK_NATIVE_COM
HNDF_EXTRAINFO, // HNDTYPE_WEAK_INTERIOR_POINTER
};
int getNumberOfSlots()
{
WRAPPER_NO_CONTRACT;
// when Ref_Initialize called, IGCHeap::GetNumberOfHeaps() is still 0, so use #procs as a workaround
// it is legal since even if later #heaps < #procs we create handles by thread home heap
// and just have extra unused slots in HandleTableBuckets, which does not take a lot of space
if (!IsServerHeap())
return 1;
return GCToOSInterface::GetTotalProcessorCount();
}
class HandleTableBucketHolder
{
private:
HandleTableBucket* m_bucket;
int m_slots;
BOOL m_SuppressRelease;
public:
HandleTableBucketHolder(HandleTableBucket* bucket, int slots);
~HandleTableBucketHolder();
void SuppressRelease()
{
m_SuppressRelease = TRUE;
}
};
HandleTableBucketHolder::HandleTableBucketHolder(HandleTableBucket* bucket, int slots)
:m_bucket(bucket), m_slots(slots), m_SuppressRelease(FALSE)
{
}
HandleTableBucketHolder::~HandleTableBucketHolder()
{
if (m_SuppressRelease)
{
return;
}
if (m_bucket->pTable)
{
for (int n = 0; n < m_slots; n ++)
{
if (m_bucket->pTable[n])
{
HndDestroyHandleTable(m_bucket->pTable[n]);
}
}
delete [] m_bucket->pTable;
}
// we do not own m_bucket, so we shouldn't delete it here.
}
bool Ref_Initialize()
{
CONTRACTL
{
NOTHROW;
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(return false);
}
CONTRACTL_END;
// sanity
_ASSERTE(g_HandleTableMap.pBuckets == NULL);
// Create an array of INITIAL_HANDLE_TABLE_ARRAY_SIZE HandleTableBuckets to hold the handle table sets
HandleTableBucket** pBuckets = new (nothrow) HandleTableBucket * [ INITIAL_HANDLE_TABLE_ARRAY_SIZE ];
if (pBuckets == NULL)
return false;
ZeroMemory(pBuckets, INITIAL_HANDLE_TABLE_ARRAY_SIZE * sizeof (HandleTableBucket *));
g_gcGlobalHandleStore = new (nothrow) GCHandleStore();
if (g_gcGlobalHandleStore == NULL)
{
delete[] pBuckets;
return false;
}
// Initialize the bucket in the global handle store
HandleTableBucket* pBucket = &g_gcGlobalHandleStore->_underlyingBucket;
pBucket->HandleTableIndex = 0;
int n_slots = getNumberOfSlots();
HandleTableBucketHolder bucketHolder(pBucket, n_slots);
// create the handle table set for the first bucket
pBucket->pTable = new (nothrow) HHANDLETABLE[n_slots];
if (pBucket->pTable == NULL)
goto CleanupAndFail;
ZeroMemory(pBucket->pTable,
n_slots * sizeof(HHANDLETABLE));
for (int uCPUindex = 0; uCPUindex < n_slots; uCPUindex++)
{
pBucket->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, ARRAY_SIZE(s_rgTypeFlags));
if (pBucket->pTable[uCPUindex] == NULL)
goto CleanupAndFail;
HndSetHandleTableIndex(pBucket->pTable[uCPUindex], 0);
}
pBuckets[0] = pBucket;
bucketHolder.SuppressRelease();
g_HandleTableMap.pBuckets = pBuckets;
g_HandleTableMap.dwMaxIndex = INITIAL_HANDLE_TABLE_ARRAY_SIZE;
g_HandleTableMap.pNext = NULL;
// Allocate contexts used during dependent handle promotion scanning. There's one of these for every GC
// heap since they're scanned in parallel.
g_pDependentHandleContexts = new (nothrow) DhContext[n_slots];
if (g_pDependentHandleContexts == NULL)
goto CleanupAndFail;
return true;
CleanupAndFail:
if (pBuckets != NULL)
delete[] pBuckets;
if (g_gcGlobalHandleStore != NULL)
delete g_gcGlobalHandleStore;
return false;
}
void Ref_Shutdown()
{
WRAPPER_NO_CONTRACT;
if (g_pDependentHandleContexts)
{
delete [] g_pDependentHandleContexts;
g_pDependentHandleContexts = NULL;
}
// are there any handle tables?
if (g_HandleTableMap.pBuckets)
{
// don't destroy any of the indexed handle tables; they should
// be destroyed externally.
// destroy the handle table bucket array
HandleTableMap *walk = &g_HandleTableMap;
while (walk) {
delete [] walk->pBuckets;
walk = walk->pNext;
}
// null out the handle table array
g_HandleTableMap.pNext = NULL;
g_HandleTableMap.dwMaxIndex = 0;
// null out the global table handle
g_HandleTableMap.pBuckets = NULL;
}
}
#ifndef FEATURE_NATIVEAOT
bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket)
{
CONTRACTL
{
NOTHROW;
WRAPPER(GC_TRIGGERS);
INJECT_FAULT(return false);
}
CONTRACTL_END;
HandleTableBucket *result = bucket;
HandleTableMap *walk = &g_HandleTableMap;
HandleTableMap *last = NULL;
uint32_t offset = 0;
result->pTable = NULL;
// create handle table set for the bucket
int n_slots = getNumberOfSlots();
HandleTableBucketHolder bucketHolder(result, n_slots);
result->pTable = new (nothrow) HHANDLETABLE[n_slots];
if (!result->pTable)
{
return false;
}
ZeroMemory(result->pTable, n_slots * sizeof(HHANDLETABLE));
for (int uCPUindex=0; uCPUindex < n_slots; uCPUindex++) {
result->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, ARRAY_SIZE(s_rgTypeFlags));
if (!result->pTable[uCPUindex])
return false;
}
for (;;) {
// Do we have free slot
while (walk) {
for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) {
if (walk->pBuckets[i] == 0) {
for (int uCPUindex=0; uCPUindex < n_slots; uCPUindex++)
HndSetHandleTableIndex(result->pTable[uCPUindex], i+offset);
result->HandleTableIndex = i+offset;
if (Interlocked::CompareExchangePointer(&walk->pBuckets[i], result, NULL) == 0) {
// Get a free slot.
bucketHolder.SuppressRelease();
return true;
}
}
}
last = walk;
offset = walk->dwMaxIndex;
walk = walk->pNext;
}
// No free slot.
// Let's create a new node
HandleTableMap *newMap = new (nothrow) HandleTableMap;
if (!newMap)
{
return false;
}
newMap->pBuckets = new (nothrow) HandleTableBucket * [ INITIAL_HANDLE_TABLE_ARRAY_SIZE ];
if (!newMap->pBuckets)
{
delete newMap;
return false;
}
newMap->dwMaxIndex = last->dwMaxIndex + INITIAL_HANDLE_TABLE_ARRAY_SIZE;
newMap->pNext = NULL;
ZeroMemory(newMap->pBuckets,
INITIAL_HANDLE_TABLE_ARRAY_SIZE * sizeof (HandleTableBucket *));
if (Interlocked::CompareExchangePointer(&last->pNext, newMap, NULL) != NULL)
{
// This thread loses.
delete [] newMap->pBuckets;
delete newMap;
}
walk = last->pNext;
offset = last->dwMaxIndex;
}
}
#endif // !FEATURE_NATIVEAOT
void Ref_RemoveHandleTableBucket(HandleTableBucket *pBucket)
{
LIMITED_METHOD_CONTRACT;
size_t index = pBucket->HandleTableIndex;
HandleTableMap* walk = &g_HandleTableMap;
size_t offset = 0;
while (walk)
{
if ((index < walk->dwMaxIndex) && (index >= offset))
{
// During AppDomain unloading, we first remove a handle table and then destroy
// the table. As soon as the table is removed, the slot can be reused.
if (walk->pBuckets[index - offset] == pBucket)
{
walk->pBuckets[index - offset] = NULL;
return;
}
}
offset = walk->dwMaxIndex;
walk = walk->pNext;
}
// Didn't find it. This will happen typically from Ref_DestroyHandleTableBucket if
// we explicitly call Ref_RemoveHandleTableBucket first.
}
void Ref_DestroyHandleTableBucket(HandleTableBucket *pBucket)
{
WRAPPER_NO_CONTRACT;
Ref_RemoveHandleTableBucket(pBucket);
for (int uCPUindex=0; uCPUindex < getNumberOfSlots(); uCPUindex++)
{
HndDestroyHandleTable(pBucket->pTable[uCPUindex]);
}
delete [] pBucket->pTable;
}
int getSlotNumber(ScanContext* sc)
{
WRAPPER_NO_CONTRACT;
return (IsServerHeap() ? sc->thread_number : 0);
}
int getThreadCount(ScanContext* sc)
{
WRAPPER_NO_CONTRACT;
return sc->thread_count;
}
void SetDependentHandleSecondary(OBJECTHANDLE handle, OBJECTREF objref)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// sanity
_ASSERTE(handle);
#ifdef _DEBUG
// Make sure the objref is valid before it is assigned to a handle
ValidateAssignObjrefForHandle(objref);
#endif
// unwrap the objectref we were given
_UNCHECKED_OBJECTREF value = OBJECTREF_TO_UNCHECKED_OBJECTREF(objref);
// if we are doing a non-NULL pointer store then invoke the write-barrier
if (value)
HndWriteBarrier(handle, objref);
// store the pointer
HndSetHandleExtraInfo(handle, HNDTYPE_DEPENDENT, (uintptr_t)value);
}
#ifdef FEATURE_VARIABLE_HANDLES
//----------------------------------------------------------------------------
/*
* GetVariableHandleType.
*
* Retrieves the dynamic type of a variable-strength handle.
*/
uint32_t GetVariableHandleType(OBJECTHANDLE handle)
{
WRAPPER_NO_CONTRACT;
return (uint32_t)HndGetHandleExtraInfo(handle);
}
/*
* UpdateVariableHandleType.
*
* Changes the dynamic type of a variable-strength handle.
*
* N.B. This routine is not a macro since we do validation in RETAIL.
* We always validate the type here because it can come from external callers.
*/
void UpdateVariableHandleType(OBJECTHANDLE handle, uint32_t type)
{
WRAPPER_NO_CONTRACT;
// verify that we are being asked to set a valid type
if (!IS_VALID_VHT_VALUE(type))
{
// bogus value passed in
_ASSERTE(FALSE);
return;
}
// <REVISIT_TODO> (francish) CONCURRENT GC NOTE</REVISIT_TODO>
//
// If/when concurrent GC is implemented, we need to make sure variable handles
// DON'T change type during an asynchronous scan, OR that we properly recover
// from the change. Some changes are benign, but for example changing to or
// from a pinning handle in the middle of a scan would not be fun.
//
// store the type in the handle's extra info
HndSetHandleExtraInfo(handle, HNDTYPE_VARIABLE, (uintptr_t)type);
}
/*
* CompareExchangeVariableHandleType.
*
* Changes the dynamic type of a variable-strength handle. Unlike UpdateVariableHandleType we assume that the
* types have already been validated.
*/
uint32_t CompareExchangeVariableHandleType(OBJECTHANDLE handle, uint32_t oldType, uint32_t newType)
{
WRAPPER_NO_CONTRACT;
// verify that we are being asked to get/set valid types
_ASSERTE(IS_VALID_VHT_VALUE(oldType) && IS_VALID_VHT_VALUE(newType));
// attempt to store the type in the handle's extra info
return (uint32_t)HndCompareExchangeHandleExtraInfo(handle, HNDTYPE_VARIABLE, (uintptr_t)oldType, (uintptr_t)newType);
}
/*
* TraceVariableHandles.
*
* Convenience function for tracing variable-strength handles.
* Wraps HndScanHandlesForGC.
*/
void TraceVariableHandles(HANDLESCANPROC pfnTrace, ScanContext *sc, uintptr_t lp2, uint32_t uEnableMask, uint32_t condemned, uint32_t maxgen, uint32_t flags)
{
WRAPPER_NO_CONTRACT;
// set up to scan variable handles with the specified mask and trace function
uint32_t type = HNDTYPE_VARIABLE;
struct VARSCANINFO info = { (uintptr_t)uEnableMask, pfnTrace, lp2 };
HandleTableMap *walk = &g_HandleTableMap;
while (walk) {
for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i++)
if (walk->pBuckets[i] != NULL)
{
int uCPUindex = getSlotNumber(sc);
int uCPUlimit = getNumberOfSlots();
assert(uCPUlimit > 0);
int uCPUstep = getThreadCount(sc);