-
Notifications
You must be signed in to change notification settings - Fork 10
/
PartitionAlloc.h
1093 lines (971 loc) · 45.5 KB
/
PartitionAlloc.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#include "config.h"
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
/*
* Copyright (C) 2013 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef WTF_PartitionAlloc_h
#define WTF_PartitionAlloc_h
// DESCRIPTION
// partitionAlloc() / partitionAllocGeneric() and partitionFree() /
// partitionFreeGeneric() are approximately analagous to malloc() and free().
//
// The main difference is that a PartitionRoot / PartitionRootGeneric object
// must be supplied to these functions, representing a specific "heap partition"
// that will be used to satisfy the allocation. Different partitions are
// guaranteed to exist in separate address spaces, including being separate from
// the main system heap. If the contained objects are all freed, physical memory
// is returned to the system but the address space remains reserved.
//
// THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
// SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To
// minimize the instruction count to the fullest extent possible, the
// PartitionRoot is really just a header adjacent to other data areas provided
// by the allocator class.
//
// The partitionAlloc() variant of the API has the following caveats:
// - Allocations and frees against a single partition must be single threaded.
// - Allocations must not exceed a max size, chosen at compile-time via a
// templated parameter to PartitionAllocator.
// - Allocation sizes must be aligned to the system pointer size.
// - Allocations are bucketed exactly according to size.
//
// And for partitionAllocGeneric():
// - Multi-threaded use against a single partition is ok; locking is handled.
// - Allocations of any arbitrary size can be handled (subject to a limit of
// INT_MAX bytes for security reasons).
// - Bucketing is by approximate size, for example an allocation of 4000 bytes
// might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
// keep worst-case waste to ~10%.
//
// The allocators are designed to be extremely fast, thanks to the following
// properties and design:
// - Just a single (reasonably predicatable) branch in the hot / fast path for
// both allocating and (significantly) freeing.
// - A minimal number of operations in the hot / fast path, with the slow paths
// in separate functions, leading to the possibility of inlining.
// - Each partition page (which is usually multiple physical pages) has a
// metadata structure which allows fast mapping of free() address to an
// underlying bucket.
// - Supports a lock-free API for fast performance in single-threaded cases.
// - The freelist for a given bucket is split across a number of partition
// pages, enabling various simple tricks to try and minimize fragmentation.
// - Fine-grained bucket sizes leading to less waste and better packing.
//
// The following security properties are provided at this time:
// - Linear overflows cannot corrupt into the partition.
// - Linear overflows cannot corrupt out of the partition.
// - Freed pages will only be re-used within the partition.
// (exception: large allocations > ~1MB)
// - Freed pages will only hold same-sized objects when re-used.
// - Dereference of freelist pointer should fault.
// - Out-of-line main metadata: linear over or underflow cannot corrupt it.
// - Partial pointer overwrite of freelist pointer should fault.
// - Rudimentary double-free detection.
// - Large allocations (> ~1MB) are guard-paged at the beginning and end.
//
// The following security properties could be investigated in the future:
// - Per-object bucketing (instead of per-size) is mostly available at the API,
// but not used yet.
// - No randomness of freelist entries or bucket position.
// - Better checking for wild pointers in free().
// - Better freelist masking function to guarantee fault on 32-bit.
#include "Assertions.h"
#include "BitwiseOperations.h"
#include "ByteSwap.h"
#include "PageAllocator.h"
#include "SpinLock.h"
#include <limits.h>
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#include <stdlib.h>
#endif
#if ENABLE(ASSERT)
#include <string.h>
#endif
namespace WTF {
// Allocation granularity of sizeof(void*) bytes.
static const size_t kAllocationGranularity = sizeof(void*);
static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
// Underlying partition storage pages are a power-of-two size. It is typical
// for a partition page to be based on multiple system pages. Most references to
// "page" refer to partition pages.
// We also have the concept of "super pages" -- these are the underlying system
// allocations we make. Super pages contain multiple partition pages inside them
// and include space for a small amount of metadata per partition page.
// Inside super pages, we store "slot spans". A slot span is a continguous range
// of one or more partition pages that stores allocations of the same size.
// Slot span sizes are adjusted depending on the allocation size, to make sure
// the packing does not lead to unused (wasted) space at the end of the last
// system page of the span. For our current max slot span size of 64k and other
// constant values, we pack _all_ partitionAllocGeneric() sizes perfectly up
// against the end of a system page.
static const size_t kPartitionPageShift = 14; // 16KB
static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
static const size_t kMaxPartitionPagesPerSlotSpan = 4;
// To avoid fragmentation via never-used freelist entries, we hand out partition
// freelist sections gradually, in units of the dominant system page size.
// What we're actually doing is avoiding filling the full partition page
// (typically 16KB) will freelist pointers right away. Writing freelist
// pointers will fault and dirty a private page, which is very wasteful if we
// never actually store objects there.
static const size_t kNumSystemPagesPerPartitionPage = kPartitionPageSize / kSystemPageSize;
static const size_t kMaxSystemPagesPerSlotSpan = kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
// We reserve virtual address space in 2MB chunks (aligned to 2MB as well).
// These chunks are called "super pages". We do this so that we can store
// metadata in the first few pages of each 2MB aligned section. This leads to
// a very fast free(). We specifically choose 2MB because this virtual address
// block represents a full but single PTE allocation on ARM, ia32 and x64.
//
// The layout of the super page is as follows. The sizes below are the same
// for 32 bit and 64 bit.
//
// | Guard page (4KB) | Metadata page (4KB) | Guard pages (8KB) | Slot span | Slot span | ... | Slot span | Guard page (4KB) |
//
// - Each slot span is a contiguous range of one or more PartitionPages.
// - The metadata page has the following format. Note that the PartitionPage
// that is not at the head of a slot span is "unused". In other words,
// the metadata for the slot span is stored only in the first PartitionPage
// of the slot span. Metadata accesses to other PartitionPages are
// redirected to the first PartitionPage.
//
// | SuperPageExtentEntry (32B) | PartitionPage of slot span 1 (32B, used) | PartitionPage of slot span 1 (32B, unused) | PartitionPage of slot span 1 (32B, unused) | PartitionPage of slot span 2 (32B, used) | PartitionPage of slot span 3 (32B, used) | ... | PartitionPage of slot span N (32B, unused) |
//
// A direct mapped page has a similar layout to fake it looking like a super page:
//
// | Guard page (4KB) | Metadata page (4KB) | Guard pages (8KB) | Direct mapped object | Guard page (4KB) |
//
// - The metadata page has the following layout:
//
// | SuperPageExtentEntry (32B) | PartitionPage (32B) | PartitionBucket (32B) | PartitionDirectMapExtent (8B) |
static const size_t kSuperPageShift = 21; // 2MB
static const size_t kSuperPageSize = 1 << kSuperPageShift;
static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
static const size_t kNumPartitionPagesPerSuperPage = kSuperPageSize / kPartitionPageSize;
static const size_t kPageMetadataShift = 5; // 32 bytes per partition page.
static const size_t kPageMetadataSize = 1 << kPageMetadataShift;
// The following kGeneric* constants apply to the generic variants of the API.
// The "order" of an allocation is closely related to the power-of-two size of
// the allocation. More precisely, the order is the bit index of the
// most-significant-bit in the allocation size, where the bit numbers starts
// at index 1 for the least-significant-bit.
// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
static const size_t kGenericMinBucketedOrder = 4; // 8 bytes.
static const size_t kGenericMaxBucketedOrder = 20; // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB)
static const size_t kGenericNumBucketedOrders = (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1;
static const size_t kGenericNumBucketsPerOrderBits = 3; // Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144, 160, ..., 240
static const size_t kGenericNumBucketsPerOrder = 1 << kGenericNumBucketsPerOrderBits;
static const size_t kGenericNumBuckets = kGenericNumBucketedOrders * kGenericNumBucketsPerOrder;
static const size_t kGenericSmallestBucket = 1 << (kGenericMinBucketedOrder - 1);
static const size_t kGenericMaxBucketSpacing = 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
static const size_t kGenericMaxBucketed = (1 << (kGenericMaxBucketedOrder - 1)) + ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
static const size_t kGenericMinDirectMappedDownsize = kGenericMaxBucketed + 1; // Limit when downsizing a direct mapping using realloc().
static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize;
static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT;
// Constants for the memory reclaim logic.
static const size_t kMaxFreeableSpans = 16;
// If the total size in bytes of allocated but not committed pages exceeds this
// value (probably it is a "out of virtual address space" crash),
// a special crash stack trace is generated at |partitionOutOfMemory|.
// This is to distinguish "out of virtual address space" from
// "out of physical memory" in crash reports.
static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB
#if ENABLE(ASSERT)
// Uninitialized memory
static const unsigned char kUninitializedByte = 0xDE;
// Freed memory
static const unsigned char kFreedByte = 0xDF;
static const size_t kCookieSize = 16; // Handles alignment up to XMM instructions on Intel.
#endif
struct PartitionBucket;
struct PartitionRootBase;
struct PartitionFreelistEntry {
PartitionFreelistEntry* next;
};
// Some notes on page states. A page can be in one of four major states:
// 1) Active.
// 2) Full.
// 3) Empty.
// 4) Decommitted.
// An active page has available free slots. A full page has no free slots. An
// empty page has no free slots, and a decommitted page is an empty page that
// had its backing memory released back to the system.
// There are two linked lists tracking the pages. The "active page" list is an
// approximation of a list of active pages. It is an approximation because
// full, empty and decommitted pages may briefly be present in the list until
// we next do a scan over it.
// The "empty page" list is an accurate list of pages which are either empty
// or decommitted.
//
// The significant page transitions are:
// - free() will detect when a full page has a slot free()'d and immediately
// return the page to the head of the active list.
// - free() will detect when a page is fully emptied. It _may_ add it to the
// empty list or it _may_ leave it on the active list until a future list scan.
// - malloc() _may_ scan the active page list in order to fulfil the request.
// If it does this, full, empty and decommitted pages encountered will be
// booted out of the active list. If there are no suitable active pages found,
// an empty or decommitted page (if one exists) will be pulled from the empty
// list on to the active list.
struct PartitionPage {
PartitionFreelistEntry* freelistHead;
PartitionPage* nextPage;
PartitionBucket* bucket;
int16_t numAllocatedSlots; // Deliberately signed, 0 for empty or decommitted page, -n for full pages.
uint16_t numUnprovisionedSlots;
uint16_t pageOffset;
int16_t emptyCacheIndex; // -1 if not in the empty cache.
};
struct PartitionBucket {
PartitionPage* activePagesHead; // Accessed most in hot path => goes first.
PartitionPage* emptyPagesHead;
PartitionPage* decommittedPagesHead;
uint32_t slotSize;
uint16_t numSystemPagesPerSlotSpan;
uint16_t numFullPages;
};
// An "extent" is a span of consecutive superpages. We link to the partition's
// next extent (if there is one) at the very start of a superpage's metadata
// area.
struct PartitionSuperPageExtentEntry {
PartitionRootBase* root;
char* superPageBase;
char* superPagesEnd;
PartitionSuperPageExtentEntry* next;
};
struct PartitionDirectMapExtent {
PartitionDirectMapExtent* nextExtent;
PartitionDirectMapExtent* prevExtent;
PartitionBucket* bucket;
size_t mapSize; // Mapped size, not including guard pages and meta-data.
};
struct WTF_EXPORT PartitionRootBase {
size_t totalSizeOfCommittedPages;
size_t totalSizeOfSuperPages;
size_t totalSizeOfDirectMappedPages;
// Invariant: totalSizeOfCommittedPages <= totalSizeOfSuperPages + totalSizeOfDirectMappedPages.
unsigned numBuckets;
unsigned maxAllocation;
bool initialized;
char* nextSuperPage;
char* nextPartitionPage;
char* nextPartitionPageEnd;
PartitionSuperPageExtentEntry* currentExtent;
PartitionSuperPageExtentEntry* firstExtent;
PartitionDirectMapExtent* directMapList;
PartitionPage* globalEmptyPageRing[kMaxFreeableSpans];
int16_t globalEmptyPageRingIndex;
uintptr_t invertedSelf;
static int gInitializedLock;
static bool gInitialized;
// gSeedPage is used as a sentinel to indicate that there is no page
// in the active page list. We can use nullptr, but in that case we need
// to add a null-check branch to the hot allocation path. We want to avoid
// that.
static PartitionPage gSeedPage;
static PartitionBucket gPagedBucket;
// This is the maximum size of the delayed freelist (16)
size_t delayed_free_list_max_sz;
// A pointer to a vector of pointers we are waiting to free()
std::vector<void *> delayed_free_list;
// User heap allocations have a canary before and after them
// This canary value is per-partition-root and XOR'd by the
// the last byte of the address they're located at
bool kCookieInitialized;
unsigned char kCookieValue[WTF::kCookieSize];
};
// Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
struct PartitionRoot : public PartitionRootBase {
// The PartitionAlloc templated class ensures the following is correct.
ALWAYS_INLINE PartitionBucket* buckets() { return reinterpret_cast<PartitionBucket*>(this + 1); }
ALWAYS_INLINE const PartitionBucket* buckets() const { return reinterpret_cast<const PartitionBucket*>(this + 1); }
};
// Never instantiate a PartitionRootGeneric directly, instead use PartitionAllocatorGeneric.
struct PartitionRootGeneric : public PartitionRootBase {
int lock;
// Some pre-computed constants.
size_t orderIndexShifts[kBitsPerSizet + 1];
size_t orderSubIndexMasks[kBitsPerSizet + 1];
// The bucket lookup table lets us map a size_t to a bucket quickly.
// The trailing +1 caters for the overflow case for very large allocation sizes.
// It is one flat array instead of a 2D array because in the 2D world, we'd
// need to index array[blah][max+1] which risks undefined behavior.
PartitionBucket* bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1];
PartitionBucket buckets[kGenericNumBuckets];
};
// Flags for partitionAllocGenericFlags.
enum PartitionAllocFlags {
PartitionAllocReturnNull = 1 << 0,
};
// Struct used to retrieve total memory usage of a partition. Used by
// PartitionStatsDumper implementation.
struct PartitionMemoryStats {
size_t totalMmappedBytes; // Total bytes mmaped from the system.
size_t totalCommittedBytes; // Total size of commmitted pages.
size_t totalResidentBytes; // Total bytes provisioned by the partition.
size_t totalActiveBytes; // Total active bytes in the partition.
size_t totalDecommittableBytes; // Total bytes that could be decommitted.
size_t totalDiscardableBytes; // Total bytes that could be discarded.
};
// Struct used to retrieve memory statistics about a partition bucket. Used by
// PartitionStatsDumper implementation.
struct PartitionBucketMemoryStats {
bool isValid; // Used to check if the stats is valid.
bool isDirectMap; // True if this is a direct mapping; size will not be unique.
uint32_t bucketSlotSize; // The size of the slot in bytes.
uint32_t allocatedPageSize; // Total size the partition page allocated from the system.
uint32_t activeBytes; // Total active bytes used in the bucket.
uint32_t residentBytes; // Total bytes provisioned in the bucket.
uint32_t decommittableBytes; // Total bytes that could be decommitted.
uint32_t discardableBytes; // Total bytes that could be discarded.
uint32_t numFullPages; // Number of pages with all slots allocated.
uint32_t numActivePages; // Number of pages that have at least one provisioned slot.
uint32_t numEmptyPages; // Number of pages that are empty but not decommitted.
uint32_t numDecommittedPages; // Number of pages that are empty and decommitted.
};
int _rand (int i) { return rand() % i; }
// Interface that is passed to partitionDumpStats and
// partitionDumpStatsGeneric for using the memory statistics.
class WTF_EXPORT PartitionStatsDumper {
public:
// Called to dump total memory used by partition, once per partition.
virtual void partitionDumpTotals(const char* partitionName, const PartitionMemoryStats*) = 0;
// Called to dump stats about buckets, for each bucket.
virtual void partitionsDumpBucketStats(const char* partitionName, const PartitionBucketMemoryStats*) = 0;
};
WTF_EXPORT void partitionAllocInit(PartitionRoot*, size_t numBuckets, size_t maxAllocation);
WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*);
WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*);
WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*);
enum PartitionPurgeFlags {
// Decommitting the ring list of empty pages is reasonably fast.
PartitionPurgeDecommitEmptyPages = 1 << 0,
// Discarding unused system pages is slower, because it involves walking all
// freelists in all active partition pages of all buckets >= system page
// size. It often frees a similar amount of memory to decommitting the empty
// pages, though.
PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
};
WTF_EXPORT void partitionPurgeMemory(PartitionRoot*, int);
WTF_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, int, size_t, PartitionBucket*);
WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*);
WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, void*, size_t);
WTF_EXPORT void partitionDumpStats(PartitionRoot*, const char* partitionName, bool isLightDump, PartitionStatsDumper*);
WTF_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, const char* partitionName, bool isLightDump, PartitionStatsDumper*);
ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask(PartitionFreelistEntry* ptr)
{
// We use bswap on little endian as a fast mask for two reasons:
// 1) If an object is freed and its vtable used where the attacker doesn't
// get the chance to run allocations between the free and use, the vtable
// dereference is likely to fault.
// 2) If the attacker has a linear buffer overflow and elects to try and
// corrupt a freelist pointer, partial pointer overwrite attacks are
// thwarted.
// For big endian, similar guarantees are arrived at with a negation.
#if CPU(BIG_ENDIAN)
uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
#else
uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr));
#endif
return reinterpret_cast<PartitionFreelistEntry*>(masked);
}
ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size)
{
#if ENABLE(ASSERT)
// Add space for cookies, checking for integer overflow.
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(size + (2 * kCookieSize) > size);
size += 2 * kCookieSize;
#endif
return size;
}
ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size)
{
#if ENABLE(ASSERT)
// Remove space for cookies.
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(size >= 2 * kCookieSize);
size -= 2 * kCookieSize;
#endif
return size;
}
ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr)
{
#if ENABLE(ASSERT)
// The value given to the application is actually just after the cookie.
ptr = static_cast<char*>(ptr) - kCookieSize;
#endif
return ptr;
}
ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr);
ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page)
{
PartitionSuperPageExtentEntry* extentEntry = reinterpret_cast<PartitionSuperPageExtentEntry*>(reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
return extentEntry->root;
}
ALWAYS_INLINE void partitionCookieWriteValue(void* ptr, PartitionPage *page)
{
#if ENABLE(ASSERT)
unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
PartitionRootBase* root = partitionPageToRoot(page);
uint8_t x = (uintptr_t) cookiePtr & 0xff;
for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) {
*cookiePtr = root->kCookieValue[i] ^ x;
}
#endif
}
ALWAYS_INLINE void partitionCookieCheckValue(void* ptr, PartitionPage *page)
{
#if ENABLE(ASSERT)
unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
uint8_t x = (uintptr_t) cookiePtr & 0xff;
PartitionRootBase* root = partitionPageToRoot(page);
for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) {
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(*cookiePtr == (root->kCookieValue[i] ^ x));
}
#endif
}
ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr)
{
uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!(pointerAsUint & kSuperPageOffsetMask));
// The metadata area is exactly one system page (the guard page) into the
// super page.
return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize);
}
ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr)
{
uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
char* superPagePtr = reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask);
uintptr_t partitionPageIndex = (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift;
// Index 0 is invalid because it is the metadata and guard area and
// the last index is invalid because it is a guard page.
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageIndex);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
PartitionPage* page = reinterpret_cast<PartitionPage*>(partitionSuperPageToMetadataArea(superPagePtr) + (partitionPageIndex << kPageMetadataShift));
// Partition pages in the same slot span can share the same page object. Adjust for that.
size_t delta = page->pageOffset << kPageMetadataShift;
page = reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
return page;
}
ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page)
{
uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page);
uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(superPageOffset > kSystemPageSize);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * kPageMetadataSize));
uintptr_t partitionPageIndex = (superPageOffset - kSystemPageSize) >> kPageMetadataShift;
// Index 0 is invalid because it is the metadata area and the last index is invalid because it is a guard page.
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageIndex);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask);
void* ret = reinterpret_cast<void*>(superPageBase + (partitionPageIndex << kPartitionPageShift));
return ret;
}
ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr)
{
PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr);
// Checks that the pointer is a multiple of bucket size.
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!((reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % page->bucket->slotSize));
return page;
}
ALWAYS_INLINE bool partitionBucketIsDirectMapped(const PartitionBucket* bucket)
{
return !bucket->numSystemPagesPerSlotSpan;
}
ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket)
{
return bucket->numSystemPagesPerSlotSpan * kSystemPageSize;
}
ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket)
{
return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize);
}
ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page)
{
// For single-slot buckets which span more than one partition page, we
// have some spare metadata space to store the raw allocation size. We
// can use this to report better statistics.
PartitionBucket* bucket = page->bucket;
if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
return nullptr;
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION((bucket->slotSize % kSystemPageSize) == 0);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionBucketIsDirectMapped(bucket) || partitionBucketSlots(bucket) == 1);
page++;
return reinterpret_cast<size_t*>(&page->freelistHead);
}
ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page)
{
size_t* rawSizePtr = partitionPageGetRawSizePtr(page);
if (UNLIKELY(rawSizePtr != nullptr))
return *rawSizePtr;
return 0;
}
ALWAYS_INLINE bool partitionPointerIsValid(void* ptr)
{
PartitionPage* page = partitionPointerToPage(ptr);
PartitionRootBase* root = partitionPageToRoot(page);
return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root);
}
ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, int flags, size_t size, PartitionBucket* bucket)
{
PartitionPage* page = bucket->activePagesHead;
// Check that this page is neither full nor freed.
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numAllocatedSlots >= 0);
void* ret = page->freelistHead;
if (LIKELY(ret != 0)) {
PartitionFreelistEntry* t = page->freelistHead;
PartitionFreelistEntry* z = t;
while(t) {
if((rand() % 10) == 1) {
break;
}
z = t;
t = partitionFreelistMask(t->next);
if(t == NULL) {
break;
}
// Ensure that t and page mask to the same base address.
// This should catch most corruptions of the freelist
// where the value is not explicitly controlled or
// informed from a prior memory disclosure
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(((uintptr_t) t & (uintptr_t) kSuperPageBaseMask) == ((uintptr_t) page & (uintptr_t) kSuperPageBaseMask));
// This check will ensure we can mask to a valid page
// pointer from the user pointer. This also checks the
// root pointer has a valid inverted self
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPointerIsValid(t));
}
if(t) {
ret = t;
} else {
ret = page->freelistHead;
// Ensure that t and page mask to the same base address.
// This should catch most corruptions of the freelist
// where the value is not explicitly controlled or
// informed from a prior memory disclosure
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(((uintptr_t) ret & (uintptr_t) kSuperPageBaseMask) == ((uintptr_t) page & (uintptr_t) kSuperPageBaseMask));
// This check will ensure we can mask to a valid page
// pointer from the user pointer. This also checks the
// root pointer has a valid inverted self
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPointerIsValid(ret));
}
// All large allocations must go through the slow path to correctly
// update the size metadata.
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageGetRawSize(page) == 0);
if(ret == page->freelistHead) {
PartitionFreelistEntry* newHead = partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
page->freelistHead = newHead;
} else {
z->next = t->next;
}
page->numAllocatedSlots++;
} else {
ret = partitionAllocSlowPath(root, flags, size, bucket);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!ret || partitionPointerIsValid(ret));
}
#if ENABLE(ASSERT)
if (!ret)
return 0;
// Fill the uninitialized pattern, and write the cookies.
page = partitionPointerToPage(ret);
size_t slotSize = page->bucket->slotSize;
size_t rawSize = partitionPageGetRawSize(page);
if (rawSize) {
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(rawSize == size);
slotSize = rawSize;
}
size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize);
char* charRet = static_cast<char*>(ret);
// The value given to the application is actually just after the cookie.
ret = charRet + kCookieSize;
memset(ret, kUninitializedByte, noCookieSize);
partitionCookieWriteValue(charRet, page);
partitionCookieWriteValue(charRet + kCookieSize + noCookieSize, page);
#endif
return ret;
}
ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, size_t size)
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
void* result = malloc(size);
RELEASE_RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(result);
return result;
#else
size = partitionCookieSizeAdjustAdd(size);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->initialized);
size_t index = size >> kBucketShift;
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(index < root->numBuckets);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(size == index << kBucketShift);
PartitionBucket* bucket = &root->buckets()[index];
return partitionBucketAlloc(root, 0, size, bucket);
#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
}
ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page, bool delay = false)
{
if(delay == true) {
PartitionRootBase *prb = partitionPageToRoot(page);
// Make sure the pointer is not already on our delayed
// free list. Assert if it is
for(auto p : prb->delayed_free_list) {
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(p != ptr);
}
if(prb->delayed_free_list.size() < prb->delayed_free_list_max_sz) {
size_t rawSize = partitionPageGetRawSize(page);
size_t slotSize = page->bucket->slotSize;
if (rawSize)
slotSize = rawSize;
// Destroy the user data before adding the
// pointer to the delayed free list
memset(reinterpret_cast<char*>(ptr) + kCookieSize, kFreedByte, slotSize-(kCookieSize*2));
prb->delayed_free_list.push_back(ptr);
return;
} else {
std::random_shuffle(prb->delayed_free_list.begin(), prb->delayed_free_list.end(), _rand);
void *tptr = prb->delayed_free_list[prb->delayed_free_list.size()-1];
prb->delayed_free_list.pop_back();
prb->delayed_free_list.push_back(ptr);
ptr = tptr;
page = partitionPointerToPage(ptr);
}
}
// If these asserts fire, you probably corrupted memory.
#if ENABLE(ASSERT)
size_t rawSize = partitionPageGetRawSize(page);
size_t slotSize = page->bucket->slotSize;
if (rawSize)
slotSize = rawSize;
// Canary values should be intact even though user data
// was previously memset
partitionCookieCheckValue(ptr, page);
partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - kCookieSize, page);
memset(ptr, kFreedByte, slotSize);
#endif
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numAllocatedSlots);
PartitionFreelistEntry* freelistHead = page->freelistHead;
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!freelistHead || partitionPointerIsValid(freelistHead));
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(ptr != freelistHead); // Catches an immediate double free.
ASSERT_WITH_SECURITY_IMPLICATION(!freelistHead || ptr != partitionFreelistMask(freelistHead->next)); // Look for double free one level deeper in debug.
PartitionFreelistEntry* f = freelistHead;
while(f != NULL) {
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(f != ptr);
f = partitionFreelistMask(f->next);
}
PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
entry->next = partitionFreelistMask(freelistHead);
page->freelistHead = entry;
--page->numAllocatedSlots;
if (UNLIKELY(page->numAllocatedSlots <= 0)) {
partitionFreeSlowPath(page);
} else {
// All single-slot allocations must go through the slow path to
// correctly update the size metadata.
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPageGetRawSize(page) == 0);
}
}
ALWAYS_INLINE void partitionFree(void* ptr)
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
free(ptr);
#else
// It is safe to call 'free(NULL)' in
// almost all heap implementations
if(ptr == NULL) {
return;
}
ptr = partitionCookieFreePointerAdjust(ptr);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPointerIsValid(ptr));
PartitionPage* page = partitionPointerToPage(ptr);
partitionFreeWithPage(ptr, page, true);
#endif
}
ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket(PartitionRootGeneric* root, size_t size)
{
size_t order = kBitsPerSizet - countLeadingZerosSizet(size);
// The order index is simply the next few bits after the most significant bit.
size_t orderIndex = (size >> root->orderIndexShifts[order]) & (kGenericNumBucketsPerOrder - 1);
// And if the remaining bits are non-zero we must bump the bucket up.
size_t subOrderIndex = size & root->orderSubIndexMasks[order];
PartitionBucket* bucket = root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + orderIndex + !!subOrderIndex];
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!bucket->slotSize || bucket->slotSize >= size);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(!(bucket->slotSize % kGenericSmallestBucket));
return bucket;
}
ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, int flags, size_t size)
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
void* result = malloc(size);
RELEASE_RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(result);
return result;
#else
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->initialized);
size = partitionCookieSizeAdjustAdd(size);
PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
spinLockLock(&root->lock);
void* ret = partitionBucketAlloc(root, flags, size, bucket);
spinLockUnlock(&root->lock);
return ret;
#endif
}
ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, size_t size)
{
return partitionAllocGenericFlags(root, 0, size);
}
ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr)
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
free(ptr);
#else
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->initialized);
if (UNLIKELY(!ptr))
return;
ptr = partitionCookieFreePointerAdjust(ptr);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPointerIsValid(ptr));
PartitionPage* page = partitionPointerToPage(ptr);
spinLockLock(&root->lock);
partitionFreeWithPage(ptr, page, true);
spinLockUnlock(&root->lock);
#endif
}
ALWAYS_INLINE size_t partitionDirectMapSize(size_t size)
{
// Caller must check that the size is not above the kGenericMaxDirectMapped
// limit before calling. This also guards against integer overflow in the
// calculation here.
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(size <= kGenericMaxDirectMapped);
return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
}
ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, size_t size)
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return size;
#else
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(root->initialized);
size = partitionCookieSizeAdjustAdd(size);
PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
if (LIKELY(!partitionBucketIsDirectMapped(bucket))) {
size = bucket->slotSize;
} else if (size > kGenericMaxDirectMapped) {
// Too large to allocate => return the size unchanged.
} else {
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(bucket == &PartitionRootBase::gPagedBucket);
size = partitionDirectMapSize(size);
}
return partitionCookieSizeAdjustSubtract(size);
#endif
}
ALWAYS_INLINE bool partitionAllocSupportsGetSize()
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return false;
#else
return true;
#endif
}
ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr)
{
// No need to lock here. Only 'ptr' being freed by another thread could
// cause trouble, and the caller is responsible for that not happening.
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionAllocSupportsGetSize());
ptr = partitionCookieFreePointerAdjust(ptr);
RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(partitionPointerIsValid(ptr));
PartitionPage* page = partitionPointerToPage(ptr);
size_t size = page->bucket->slotSize;
return partitionCookieSizeAdjustSubtract(size);
}
// N (or more accurately, N - sizeof(void*)) represents the largest size in
// bytes that will be handled by a SizeSpecificPartitionAllocator.
// Attempts to partitionAlloc() more than this amount will fail.
template <size_t N>
class SizeSpecificPartitionAllocator {
public:
static const size_t kMaxAllocation = N - kAllocationGranularity;
static const size_t kNumBuckets = N + (WTF::kCookieSize*2) / kAllocationGranularity;
void init() { partitionAllocInit(&m_partitionRoot, kNumBuckets, kMaxAllocation); }
bool shutdown() { return partitionAllocShutdown(&m_partitionRoot); }
ALWAYS_INLINE PartitionRoot* root() { return &m_partitionRoot; }
private:
PartitionRoot m_partitionRoot;
PartitionBucket m_actualBuckets[kNumBuckets];
};
class PartitionAllocatorGeneric {
public:
void init() { partitionAllocGenericInit(&m_partitionRoot); }
bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); }
ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; }
private:
PartitionRootGeneric m_partitionRoot;
};
} // namespace WTF
using WTF::SizeSpecificPartitionAllocator;
using WTF::PartitionAllocatorGeneric;
using WTF::PartitionRoot;
using WTF::partitionAllocInit;
using WTF::partitionAllocShutdown;
using WTF::partitionAlloc;
using WTF::partitionFree;
using WTF::partitionAllocGeneric;
using WTF::partitionFreeGeneric;
using WTF::partitionReallocGeneric;
using WTF::partitionAllocActualSize;
using WTF::partitionAllocSupportsGetSize;
using WTF::partitionAllocGetSize;
#endif // WTF_PartitionAlloc_h
// Hardened PartitionAlloc C API
extern "C" {
// Size specific partitions/slots for common allocations
// These templates define the maximum size allocation that
// can occur within them
static SizeSpecificPartitionAllocator<64> _PA;
static SizeSpecificPartitionAllocator<128> __PA;
static SizeSpecificPartitionAllocator<256> ___PA;
static SizeSpecificPartitionAllocator<512> ____PA;
// Generic partition for strings
static PartitionAllocatorGeneric g_string_partition;
// Generic object partition for other objects
static PartitionAllocatorGeneric g_other_partition;
// C wrapper for creating a generic partition
void *new_generic_partition() {
PartitionAllocatorGeneric *np = new PartitionAllocatorGeneric;
np->init();
return (void *)np;
}
// C wrapper for allocating from a generic partition
void *generic_partition_alloc(void *p, size_t sz) {
PartitionAllocatorGeneric *np = reinterpret_cast<PartitionAllocatorGeneric *>(p);
return (void *) partitionAllocGeneric(np->root(), sz);
}
// C wrapper for reallocating from a generic partition
void *generic_partition_realloc(void *p, void *t, size_t sz) {
PartitionAllocatorGeneric *np = reinterpret_cast<PartitionAllocatorGeneric *>(p);
return (void *) partitionReallocGeneric(np->root(), t, sz);
}
// C wrapper for freeing from a generic partition
void generic_partition_free(void *p, void *m) {
PartitionAllocatorGeneric *np = reinterpret_cast<PartitionAllocatorGeneric *>(p);
partitionFreeGeneric(np->root(), m);
}
// C wrapper for deleting a generic partition
void delete_generic_partition(void *p) {
PartitionAllocatorGeneric *np = reinterpret_cast<PartitionAllocatorGeneric *>(p);
np->shutdown();
}
// Initialization function that must be called
// before any other operations are performed
void partitionalloc_init() {
_PA.init();
__PA.init();
___PA.init();
____PA.init();
g_string_partition.init();
g_other_partition.init();
}