forked from GerHobbelt/pthread-win32
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrpmalloc.c
3258 lines (2944 loc) · 118 KB
/
rpmalloc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* rpmalloc.c - Memory allocator - Public Domain - 2016-2020 Mattias Jansson
*
* This library provides a cross-platform lock free thread caching malloc implementation in C11.
* The latest source code is always available at
*
* https://github.com/mjansson/rpmalloc
*
* This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
*
*/
////////////
///
/// Build time configurable limits
///
//////
#if defined(__clang__)
#pragma clang diagnostic ignored "-Wunused-macros"
#pragma clang diagnostic ignored "-Wunused-function"
#if __has_warning("-Wreserved-identifier")
#pragma clang diagnostic ignored "-Wreserved-identifier"
#endif
#if __has_warning("-Wstatic-in-inline")
#pragma clang diagnostic ignored "-Wstatic-in-inline"
#endif
#elif defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-macros"
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
#if !defined(__has_builtin)
#define __has_builtin(b) 0
#endif
#if defined(__GNUC__) || defined(__clang__)
#if __has_builtin(__builtin_memcpy_inline)
#define _rpmalloc_memcpy_const(x, y, s) __builtin_memcpy_inline(x, y, s)
#else
#define _rpmalloc_memcpy_const(x, y, s) \
do { \
_Static_assert(__builtin_choose_expr(__builtin_constant_p(s), 1, 0), "len must be a constant integer"); \
memcpy(x, y, s); \
} while (0)
#endif
#if __has_builtin(__builtin_memset_inline)
#define _rpmalloc_memset_const(x, y, s) __builtin_memset_inline(x, y, s)
#else
#define _rpmalloc_memset_const(x, y, s) \
do { \
_Static_assert(__builtin_choose_expr(__builtin_constant_p(s), 1, 0), "len must be a constant integer"); \
memset(x, y, s); \
} while (0)
#endif
#else
#define _rpmalloc_memcpy_const(x, y, s) memcpy(x, y, s)
#define _rpmalloc_memset_const(x, y, s) memset(x, y, s)
#endif
#if __has_builtin(__builtin_assume)
#define rpmalloc_assume(cond) __builtin_assume(cond)
#elif defined(__GNUC__) && !defined(__APPLE__)
#define rpmalloc_assume(cond) \
do { \
if (!__builtin_expect(cond, 0)) \
__builtin_unreachable(); \
} while (0)
#elif defined(_MSC_VER)
#define rpmalloc_assume(cond) __assume(cond)
#else
#define rpmalloc_assume(cond) 0
#endif
#ifndef HEAP_ARRAY_SIZE
//! Size of heap hashmap
#define HEAP_ARRAY_SIZE 47
#endif
#ifndef ENABLE_THREAD_CACHE
//! Enable per-thread cache
#define ENABLE_THREAD_CACHE 1
#endif
#ifndef ENABLE_GLOBAL_CACHE
//! Enable global cache shared between all threads, requires thread cache
#define ENABLE_GLOBAL_CACHE 1
#endif
#ifndef ENABLE_VALIDATE_ARGS
//! Enable validation of args to public entry points
#define ENABLE_VALIDATE_ARGS 0
#endif
#ifndef ENABLE_ASSERTS
//! Enable asserts
#define ENABLE_ASSERTS 0
#endif
#ifndef DISABLE_UNMAP
//! Disable unmapping memory pages (also enables unlimited cache)
#define DISABLE_UNMAP 0
#endif
#ifndef ENABLE_UNLIMITED_CACHE
//! Enable unlimited global cache (no unmapping until finalization)
#define ENABLE_UNLIMITED_CACHE 0
#endif
#ifndef ENABLE_ADAPTIVE_THREAD_CACHE
//! Enable adaptive thread cache size based on use heuristics
#define ENABLE_ADAPTIVE_THREAD_CACHE 0
#endif
#ifndef DEFAULT_SPAN_MAP_COUNT
//! Default number of spans to map in call to map more virtual memory (default values yield 4MiB here)
#define DEFAULT_SPAN_MAP_COUNT 64
#endif
#ifndef GLOBAL_CACHE_MULTIPLIER
//! Multiplier for global cache
#define GLOBAL_CACHE_MULTIPLIER 8
#endif
#if DISABLE_UNMAP && !ENABLE_GLOBAL_CACHE
#error Must use global cache if unmap is disabled
#endif
#if DISABLE_UNMAP
#undef ENABLE_UNLIMITED_CACHE
#define ENABLE_UNLIMITED_CACHE 1
#endif
#if !ENABLE_GLOBAL_CACHE
#undef ENABLE_UNLIMITED_CACHE
#define ENABLE_UNLIMITED_CACHE 0
#endif
#if !ENABLE_THREAD_CACHE
#undef ENABLE_ADAPTIVE_THREAD_CACHE
#define ENABLE_ADAPTIVE_THREAD_CACHE 0
#endif
#if defined(_WIN32) || defined(__WIN32__) || defined(_WIN64)
# define PLATFORM_WINDOWS 1
# define PLATFORM_POSIX 0
#else
# define PLATFORM_WINDOWS 0
# define PLATFORM_POSIX 1
#endif
#if PLATFORM_WINDOWS
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# if ENABLE_VALIDATE_ARGS
# include <intsafe.h>
# endif
#else
# include <unistd.h>
# include <stdio.h>
# include <time.h>
# if defined(__linux__) || defined(__ANDROID__)
# include <sys/prctl.h>
# if !defined(PR_SET_VMA)
# define PR_SET_VMA 0x53564d41
# define PR_SET_VMA_ANON_NAME 0
# endif
# endif
# if defined(__APPLE__)
# include <TargetConditionals.h>
# if !TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
# include <mach/mach_vm.h>
# include <mach/vm_statistics.h>
# endif
# endif
#endif
#include "rpmalloc.h"
#include <stdint.h>
#include <string.h>
#include <errno.h>
#if defined(_WIN32) && defined(_MSC_VER)
#include <fibersapi.h>
static tls_t fls_key = 0;
#endif
#if PLATFORM_POSIX
# include <sys/mman.h>
# include <sched.h>
# ifdef __FreeBSD__
# include <sys/sysctl.h>
# define MAP_HUGETLB MAP_ALIGNED_SUPER
# ifndef PROT_MAX
# define PROT_MAX(f) 0
# endif
# else
# define PROT_MAX(f) 0
# endif
# ifdef __sun
extern int madvise(caddr_t, size_t, int);
# endif
# ifndef MAP_UNINITIALIZED
# define MAP_UNINITIALIZED 0
# endif
#endif
#include <errno.h>
#if defined(ENABLE_ASSERTS)
# include <stdio.h>
#endif
#if ENABLE_ASSERTS
# undef NDEBUG
# if defined(_MSC_VER) && !defined(_DEBUG)
# define _DEBUG
# endif
# include <assert.h>
#define RPMALLOC_TOSTRING_M(x) #x
#define RPMALLOC_TOSTRING(x) RPMALLOC_TOSTRING_M(x)
#define rpmalloc_assert(truth, message) \
do { \
if (!(truth)) { \
if (_memory_config.error_callback) { \
_memory_config.error_callback( \
message " (" RPMALLOC_TOSTRING(truth) ") at " __FILE__ ":" RPMALLOC_TOSTRING(__LINE__)); \
} else { \
assert((truth) && message); \
} \
} \
} while (0)
#else
# define rpmalloc_assert(truth, message) do {} while(0)
#endif
//////
///
/// Atomic access abstraction (since MSVC does not do C11 yet)
///
//////
//! Current thread heap
static tls_t _memory_thread_heap = 0;
//! Initialized flag
static int _rpmalloc_initialized = 0;
static int rpmalloc_atexit_set = 0;
static int _rpmalloc_shuting_down = 0;
#if defined(__TINYC__)
# if defined(_WIN32)
# include <intrin.h>
# endif
# include "stdatomic.h"
#endif
#include "catomic.h"
#ifdef _WIN32
# define EXPECTED(x) (x)
# define UNEXPECTED(x) (x)
#else
# define EXPECTED(x) __builtin_expect((x), 1)
# define UNEXPECTED(x) __builtin_expect((x), 0)
#endif
make_atomic(unsigned int, atomic32_t)
make_atomic(unsigned long long, atomic64_t)
#if defined(__TINYC__) && (defined(__arm__) || defined(__aarch64__) || defined(__riscv))
static FORCEINLINE int32_t atomic_load32(atomic32_t *src) {
return atomic_load_explicit((volatile c89atomic_uint32 *)src, memory_order_relaxed);
}
static FORCEINLINE void atomic_store32(atomic32_t *dst, int32_t val) {
atomic_store_explicit((volatile c89atomic_uint32 *)dst, (c89atomic_uint32)val, memory_order_relaxed);
}
static FORCEINLINE int32_t atomic_incr32(atomic32_t *val) {
return atomic_fetch_add_explicit((volatile c89atomic_uint32 *)val, 1, memory_order_relaxed) + 1;
}
static FORCEINLINE int32_t atomic_decr32(atomic32_t *val) {
return atomic_fetch_add_explicit((volatile c89atomic_uint32 *)val, -1, memory_order_relaxed) - 1;
}
static FORCEINLINE int32_t atomic_add32(atomic32_t *val, int32_t add) {
return atomic_fetch_add_explicit((volatile c89atomic_uint32 *)val, (c89atomic_uint32)add, memory_order_relaxed) + add;
}
static FORCEINLINE int atomic_cas32_acquire(atomic32_t *dst, int32_t val, int32_t ref) {
return atomic_compare_exchange_weak_explicit((volatile c89atomic_uint32 *)dst, &ref, (c89atomic_uint32)val, memory_order_acquire, memory_order_relaxed);
}
static FORCEINLINE void atomic_store32_release(atomic32_t *dst, int32_t val) {
atomic_store_explicit((volatile c89atomic_uint32 *)dst, (c89atomic_uint32)val, memory_order_release);
}
#else
static FORCEINLINE int32_t atomic_load32(atomic32_t *src) {
return c89atomic_load_explicit_32(src, memory_order_relaxed);
}
static FORCEINLINE void atomic_store32(atomic32_t *dst, int32_t val) {
c89atomic_store_explicit_32(dst, val, memory_order_relaxed);
}
static FORCEINLINE int32_t atomic_incr32(atomic32_t *val) {
return c89atomic_fetch_add_explicit_32(val, 1, memory_order_relaxed) + 1;
}
static FORCEINLINE int32_t atomic_decr32(atomic32_t *val) {
return c89atomic_fetch_add_explicit_32(val, -1, memory_order_relaxed) - 1;
}
static FORCEINLINE int32_t atomic_add32(atomic32_t *val, int32_t add) {
return c89atomic_fetch_add_explicit_32(val, add, memory_order_relaxed) + add;
}
static FORCEINLINE int atomic_cas32_acquire(atomic32_t *dst, int32_t val, int32_t ref) {
return c89atomic_compare_exchange_weak_explicit_32(dst, &ref, val, memory_order_acquire, memory_order_relaxed);
}
static FORCEINLINE void atomic_store32_release(atomic32_t *dst, int32_t val) {
c89atomic_store_explicit_32(dst, val, memory_order_release);
}
#endif
#if defined(__arm__)
static FORCEINLINE void *atomic_load_ptr(atomic_ptr_t *src) {
return (void *)atomic_load_explicit(src, memory_order_relaxed);
}
static FORCEINLINE void atomic_store_ptr(atomic_ptr_t *dst, void *val) {
atomic_store_explicit(dst, val, memory_order_relaxed);
}
static FORCEINLINE void atomic_store_ptr_release(atomic_ptr_t *dst, void *val) {
atomic_store_explicit(dst, val, memory_order_release);
}
static FORCEINLINE void *atomic_exchange_ptr_acquire(atomic_ptr_t *dst, void *val) {
return (void *)atomic_exchange_explicit(dst, val, memory_order_acquire);
}
static FORCEINLINE int atomic_cas_ptr(atomic_ptr_t *dst, void *val, void *ref) {
return (int)atomic_swap(dst, &ref, val);
}
#else
static FORCEINLINE void *atomic_load_ptr(atomic_ptr_t *src) {
return (void *)c89atomic_load_explicit_64((volatile c89atomic_uint64 *)src, memory_order_relaxed);
}
static FORCEINLINE void atomic_store_ptr(atomic_ptr_t *dst, void *val) {
c89atomic_store_explicit_64((volatile c89atomic_uint64 *)dst, (c89atomic_uint64)val, memory_order_relaxed);
}
static FORCEINLINE void atomic_store_ptr_release(atomic_ptr_t *dst, void *val) {
c89atomic_store_explicit_64((volatile c89atomic_uint64 *)dst, (c89atomic_uint64)val, memory_order_release);
}
static FORCEINLINE void *atomic_exchange_ptr_acquire(atomic_ptr_t *dst, void *val) {
return (void *)c89atomic_exchange_explicit_64((volatile c89atomic_uint64 *)dst, (c89atomic_uint64)val, memory_order_acquire);
}
static FORCEINLINE int atomic_cas_ptr(atomic_ptr_t *dst, void *val, void *ref) {
return (int)atomic_swap((volatile c89atomic_uint64 *)dst, (c89atomic_uint64 *)&ref, (c89atomic_uint64)val);
}
#endif
#if defined(__TINYC__) || !defined(_WIN32)
int rpmalloc_tls_create(tls_t *key, tls_dtor_t dtor) {
if (!key) return -1;
return (pthread_key_create(key, dtor) == 0) ? 0 : -1;
}
FORCEINLINE void rpmalloc_tls_delete(tls_t key) {
void *ptr = rpmalloc_tls_get(key);
if (ptr != NULL)
rpfree(ptr);
pthread_key_delete(key);
}
FORCEINLINE void *rpmalloc_tls_get(tls_t key) {
return pthread_getspecific(key);
}
FORCEINLINE int rpmalloc_tls_set(tls_t key, void *val) {
return (pthread_setspecific(key, val) == 0) ? 0 : -1;
}
#elif defined(_WIN32_PLATFORM_X86)
static struct impl_tls_dtor_entry {
tls_t key;
tls_dtor_t dtor;
} impl_tls_dtor_tbl[EMULATED_THREADS_TSS_DTOR_SLOTNUM];
static int impl_tls_dtor_register(tls_t key, tls_dtor_t dtor) {
int i;
for (i = 0; i < EMULATED_THREADS_TSS_DTOR_SLOTNUM; i++) {
if (!impl_tls_dtor_tbl[i].dtor)
break;
}
if (i == EMULATED_THREADS_TSS_DTOR_SLOTNUM)
return 1;
impl_tls_dtor_tbl[i].key = key;
impl_tls_dtor_tbl[i].dtor = dtor;
return 0;
}
static void impl_tls_dtor_invoke() {
int i;
for (i = 0; i < EMULATED_THREADS_TSS_DTOR_SLOTNUM; i++) {
if (impl_tls_dtor_tbl[i].dtor) {
void *val = rpmalloc_tls_get(impl_tls_dtor_tbl[i].key);
if (val) {
(impl_tls_dtor_tbl[i].dtor)(val);
}
}
}
}
int rpmalloc_tls_create(tls_t *key, tls_dtor_t dtor) {
if (!key) return -1;
*key = TlsAlloc();
if (dtor) {
if (impl_tls_dtor_register(*key, dtor)) {
TlsFree(*key);
return -1;
}
}
return (*key != 0xFFFFFFFF) ? 0 : -1;
}
FORCEINLINE void rpmalloc_tls_delete(tls_t key) {
TlsFree(key);
}
FORCEINLINE void *rpmalloc_tls_get(tls_t key) {
return TlsGetValue(key);
}
FORCEINLINE int rpmalloc_tls_set(tls_t key, void *val) {
return TlsSetValue(key, val) ? 0 : -1;
}
#else
int rpmalloc_tls_create(tls_t *key, tls_dtor_t dtor) {
if (!key) return -1;
*key = FlsAlloc(dtor);
return (*key != 0xFFFFFFFF) ? 0 : -1;
}
FORCEINLINE void rpmalloc_tls_delete(tls_t key) {
tls_t temp = key;
if (key != 0) {
key = 0;
FlsFree(temp);
}
}
FORCEINLINE void *rpmalloc_tls_get(tls_t key) {
return FlsGetValue(key);
}
FORCEINLINE int rpmalloc_tls_set(tls_t key, void *val) {
return FlsSetValue(key, val) ? 0 : -1;
}
#endif
////////////
///
/// Statistics related functions (evaluate to nothing when statistics not enabled)
///
//////
# define _rpmalloc_stat_inc(counter) do {} while(0)
# define _rpmalloc_stat_dec(counter) do {} while(0)
# define _rpmalloc_stat_add(counter, value) do {} while(0)
# define _rpmalloc_stat_add64(counter, value) do {} while(0)
# define _rpmalloc_stat_add_peak(counter, value, peak) do {} while (0)
# define _rpmalloc_stat_sub(counter, value) do {} while(0)
# define _rpmalloc_stat_inc_alloc(heap, class_idx) do {} while(0)
# define _rpmalloc_stat_inc_free(heap, class_idx) do {} while(0)
///
/// Preconfigured limits and sizes
///
//! Granularity of a small allocation block (must be power of two)
#define SMALL_GRANULARITY 16
//! Small granularity shift count
#define SMALL_GRANULARITY_SHIFT 4
//! Number of small block size classes
#define SMALL_CLASS_COUNT 65
//! Maximum size of a small block
#define SMALL_SIZE_LIMIT (SMALL_GRANULARITY * (SMALL_CLASS_COUNT - 1))
//! Granularity of a medium allocation block
#define MEDIUM_GRANULARITY 512
//! Medium granularity shift count
#define MEDIUM_GRANULARITY_SHIFT 9
//! Number of medium block size classes
#define MEDIUM_CLASS_COUNT 61
//! Total number of small + medium size classes
#define SIZE_CLASS_COUNT (SMALL_CLASS_COUNT + MEDIUM_CLASS_COUNT)
//! Number of large block size classes
#define LARGE_CLASS_COUNT 63
//! Maximum size of a medium block
#define MEDIUM_SIZE_LIMIT (SMALL_SIZE_LIMIT + (MEDIUM_GRANULARITY * MEDIUM_CLASS_COUNT))
//! Maximum size of a large block
#define LARGE_SIZE_LIMIT ((LARGE_CLASS_COUNT * _memory_span_size) - SPAN_HEADER_SIZE)
//! Size of a span header (must be a multiple of SMALL_GRANULARITY and a power of two)
#define SPAN_HEADER_SIZE 128
//! Number of spans in thread cache
#define MAX_THREAD_SPAN_CACHE 400
//! Number of spans to transfer between thread and global cache
#define THREAD_SPAN_CACHE_TRANSFER 64
//! Number of spans in thread cache for large spans (must be greater than LARGE_CLASS_COUNT / 2)
#define MAX_THREAD_SPAN_LARGE_CACHE 100
//! Number of spans to transfer between thread and global cache for large spans
#define THREAD_SPAN_LARGE_CACHE_TRANSFER 6
#if defined(__GNUC__) || defined(__clang__)
_Static_assert((SMALL_GRANULARITY &(SMALL_GRANULARITY - 1)) == 0, "Small granularity must be power of two");
_Static_assert((SPAN_HEADER_SIZE &(SPAN_HEADER_SIZE - 1)) == 0, "Span header size must be power of two");
#endif
#if ENABLE_VALIDATE_ARGS
//! Maximum allocation size to avoid integer overflow
#undef MAX_ALLOC_SIZE
#define MAX_ALLOC_SIZE (((size_t)-1) - _memory_span_size)
#endif
#define pointer_offset(ptr, ofs) (void*)((char*)(ptr) + (ptrdiff_t)(ofs))
#define pointer_diff(first, second) (ptrdiff_t)((const char*)(first) - (const char*)(second))
#define INVALID_POINTER ((void*)((uintptr_t)-1))
#define SIZE_CLASS_LARGE SIZE_CLASS_COUNT
#define SIZE_CLASS_HUGE ((uint32_t)-1)
////////////
///
/// Data types
///
//////
//! A memory heap, per thread
typedef struct heap_t heap_t;
//! Span of memory pages
typedef struct span_t span_t;
//! Span list
typedef struct span_list_t span_list_t;
//! Span active data
typedef struct span_active_t span_active_t;
//! Size class definition
typedef struct size_class_t size_class_t;
//! Global cache
typedef struct global_cache_t global_cache_t;
//! Flag indicating span is the first (master) span of a split superspan
#define SPAN_FLAG_MASTER 1U
//! Flag indicating span is a secondary (sub) span of a split superspan
#define SPAN_FLAG_SUBSPAN 2U
//! Flag indicating span has blocks with increased alignment
#define SPAN_FLAG_ALIGNED_BLOCKS 4U
//! Flag indicating an unmapped master span
#define SPAN_FLAG_UNMAPPED_MASTER 8U
#if ENABLE_ADAPTIVE_THREAD_CACHE
struct span_use_t {
//! Current number of spans used (actually used, not in cache)
atomic32_t current;
//! High water mark of spans used
atomic32_t high;
}
typedef struct span_use_t span_use_t;
#endif
// A span can either represent a single span of memory pages with size declared by span_map_count configuration variable,
// or a set of spans in a continuous region, a super span. Any reference to the term "span" usually refers to both a single
// span or a super span. A super span can further be divided into multiple spans (or this, super spans), where the first
// (super)span is the master and subsequent (super)spans are subspans. The master span keeps track of how many subspans
// that are still alive and mapped in virtual memory, and once all subspans and master have been unmapped the entire
// superspan region is released and unmapped (on Windows for example, the entire superspan range has to be released
// in the same call to release the virtual memory range, but individual subranges can be decommitted individually
// to reduce physical memory use).
struct span_t {
//! Free list
void *free_list;
//! Total block count of size class
uint32_t block_count;
//! Size class
uint32_t size_class;
//! Index of last block initialized in free list
uint32_t free_list_limit;
//! Number of used blocks remaining when in partial state
uint32_t used_count;
//! Deferred free list
atomic_ptr_t free_list_deferred;
//! Size of deferred free list, or list of spans when part of a cache list
uint32_t list_size;
//! Size of a block
uint32_t block_size;
//! Flags and counters
uint32_t flags;
//! Number of spans
uint32_t span_count;
//! Total span counter for master spans
uint32_t total_spans;
//! Offset from master span for subspans
uint32_t offset_from_master;
//! Remaining span counter, for master spans
atomic32_t remaining_spans;
//! Alignment offset
uint32_t align_offset;
//! Owning heap
heap_t *heap;
//! Next span
span_t *next;
//! Previous span
span_t *prev;
};
#if defined(__GNUC__) || defined(__clang__)
_Static_assert(sizeof(span_t) <= SPAN_HEADER_SIZE, "span size mismatch");
#endif
struct span_cache_t {
size_t count;
span_t *span[MAX_THREAD_SPAN_CACHE];
};
typedef struct span_cache_t span_cache_t;
struct span_large_cache_t {
size_t count;
span_t *span[MAX_THREAD_SPAN_LARGE_CACHE];
};
typedef struct span_large_cache_t span_large_cache_t;
struct heap_size_class_t {
//! Free list of active span
void *free_list;
//! Double linked list of partially used spans with free blocks.
// Previous span pointer in head points to tail span of list.
span_t *partial_span;
//! Early level cache of fully free spans
span_t *cache;
};
typedef struct heap_size_class_t heap_size_class_t;
// Control structure for a heap, either a thread heap or a first class heap if enabled
struct heap_t {
//! Owning thread ID
uintptr_t owner_thread;
//! Free lists for each size class
heap_size_class_t size_class[SIZE_CLASS_COUNT];
#if ENABLE_THREAD_CACHE
//! Arrays of fully freed spans, single span
span_cache_t span_cache;
#endif
//! List of deferred free spans (single linked list)
atomic_ptr_t span_free_deferred;
//! Number of full spans
size_t full_span_count;
//! Mapped but unused spans
span_t *span_reserve;
//! Master span for mapped but unused spans
span_t *span_reserve_master;
//! Number of mapped but unused spans
uint32_t spans_reserved;
//! Child count
atomic32_t child_count;
//! Next heap in id list
heap_t *next_heap;
//! Next heap in orphan list
heap_t *next_orphan;
//! Heap ID
int32_t id;
//! Finalization state flag
int finalize;
//! Master heap owning the memory pages
heap_t *master_heap;
#if ENABLE_THREAD_CACHE
//! Arrays of fully freed spans, large spans with > 1 span count
span_large_cache_t span_large_cache[LARGE_CLASS_COUNT - 1];
#endif
#if ENABLE_ADAPTIVE_THREAD_CACHE
//! Current and high water mark of spans used per span count
span_use_t span_use[LARGE_CLASS_COUNT];
#endif
};
// Size class for defining a block size bucket
struct size_class_t {
//! Size of blocks in this class
uint32_t block_size;
//! Number of blocks in each chunk
uint16_t block_count;
//! Class index this class is merged with
uint16_t class_idx;
};
#if defined(__GNUC__) || defined(__clang__)
_Static_assert(sizeof(size_class_t) == 8, "Size class size mismatch");
#endif
struct global_cache_t {
//! Cache lock
atomic32_t lock;
//! Cache count
uint32_t count;
//! Cached spans
span_t *span[GLOBAL_CACHE_MULTIPLIER * MAX_THREAD_SPAN_CACHE];
//! Unlimited cache overflow
span_t *overflow;
};
////////////
///
/// Global data
///
//////
//! Default span size (64KiB)
#define _memory_default_span_size (64 * 1024)
#define _memory_default_span_size_shift 16
#define _memory_default_span_mask (~((uintptr_t)(_memory_span_size - 1)))
//! Main thread ID
static uintptr_t _rpmalloc_main_thread_id;
//! Configuration
static rpmalloc_config_t _memory_config;
//! Memory page size
static size_t _memory_page_size;
//! Shift to divide by page size
static size_t _memory_page_size_shift;
//! Granularity at which memory pages are mapped by OS
static size_t _memory_map_granularity;
#if RPMALLOC_CONFIGURABLE
//! Size of a span of memory pages
static size_t _memory_span_size;
//! Shift to divide by span size
static size_t _memory_span_size_shift;
//! Mask to get to start of a memory span
static uintptr_t _memory_span_mask;
#else
//! Hardwired span size
#define _memory_span_size _memory_default_span_size
#define _memory_span_size_shift _memory_default_span_size_shift
#define _memory_span_mask _memory_default_span_mask
#endif
//! Number of spans to map in each map call
static size_t _memory_span_map_count;
//! Number of spans to keep reserved in each heap
static size_t _memory_heap_reserve_count;
//! Global size classes
static size_class_t _memory_size_class[SIZE_CLASS_COUNT];
//! Run-time size limit of medium blocks
static size_t _memory_medium_size_limit;
//! Heap ID counter
static atomic32_t _memory_heap_id;
//! Huge page support
static int _memory_huge_pages;
#if ENABLE_GLOBAL_CACHE
//! Global span cache
static global_cache_t _memory_span_cache[LARGE_CLASS_COUNT];
#endif
//! Global reserved spans
static span_t *_memory_global_reserve;
//! Global reserved count
static size_t _memory_global_reserve_count;
//! Global reserved master
static span_t *_memory_global_reserve_master;
//! All heaps
static heap_t *_memory_heaps[HEAP_ARRAY_SIZE];
//! Used to restrict access to mapping memory for huge pages
static atomic32_t _memory_global_lock;
//! Orphaned heaps
static heap_t *_memory_orphan_heaps;
////////////
///
/// Thread local heap and ID
///
//////
static inline heap_t *
get_thread_heap_raw(void) {
#if defined(__TINYC__) || !defined(_WIN32)
return (heap_t *)pthread_getspecific(_memory_thread_heap);
#else
return (heap_t *)rpmalloc_tls_get(_memory_thread_heap);
#endif
}
//! Get the current thread heap
static inline heap_t *
get_thread_heap(void) {
heap_t *heap = get_thread_heap_raw();
#if ENABLE_PRELOAD
if (EXPECTED(heap != 0))
return heap;
rpmalloc_initialize();
return get_thread_heap_raw();
#else
return heap;
#endif
}
//! Fast thread ID
static inline uintptr_t
get_thread_id(void) {
#if defined(__TINYC__) || !defined(_WIN32)
uintptr_t tid;
# if defined(__i386__) && !defined(_WIN32)
__asm__("movl %%gs:0, %0" : "=r" (tid) : : );
# elif defined(__x86_64__) && !defined(_WIN32)
# if defined(__MACH__)
__asm__("movq %%gs:0, %0" : "=r" (tid) : : );
# elif defined(__TINYC__)
tid = (uintptr_t)pthread_self();
# else
__asm__("movq %%fs:0, %0" : "=r" (tid) : : );
# endif
# elif defined(__arm__) && !defined(_WIN32)
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3" : "=r" (tid));
# elif defined(__aarch64__) && !defined(_WIN32)
# if defined(__MACH__)
// tpidr_el0 likely unused, always return 0 on iOS
__asm__ volatile ("mrs %0, tpidrro_el0" : "=r" (tid));
# elif defined(__TINYC__)
tid = (uintptr_t)pthread_self();
# else
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tid));
# endif
# elif defined(_WIN32)
tid = (uintptr_t)pthread_self().p;
# else
tid = (uintptr_t)pthread_self();
# endif
return tid;
#else
return (uintptr_t)((void *)NtCurrentTeb());
#endif
}
//! Set the current thread heap
static void
set_thread_heap(heap_t *heap) {
#if defined(__TINYC__) || !defined(_WIN32)
pthread_setspecific(_memory_thread_heap, (void *)heap);
#else
rpmalloc_tls_set(_memory_thread_heap, (void *)heap);
#endif
if (heap)
heap->owner_thread = get_thread_id();
}
//! Set main thread ID
extern void
rpmalloc_set_main_thread(void);
void
rpmalloc_set_main_thread(void) {
_rpmalloc_main_thread_id = get_thread_id();
}
static void
_rpmalloc_spin(void) {
#if defined(_MSC_VER)
#if defined(_M_ARM64)
__yield();
#else
_mm_pause();
#endif
#elif defined(__x86_64__) || defined(__i386__)
__asm__ volatile("pause" ::: "memory");
#elif !defined(__TINYC__) && defined(__aarch64__) || (defined(__arm__) && __ARM_ARCH >= 7)
__asm__ volatile("yield" ::: "memory");
#elif defined(__powerpc__) || defined(__powerpc64__)
// No idea if ever been compiled in such archs but ... as precaution
__asm__ volatile("or 27,27,27");
#elif defined(__sparc__)
__asm__ volatile("rd %ccr, %g0 \n\trd %ccr, %g0 \n\trd %ccr, %g0");
#elif defined(__TINYC__)
sched_yield();
#else
struct timespec ts = {0};
nanosleep(&ts, 0);
#endif
}
#if defined(_WIN32)
static void NTAPI
_rpmalloc_thread_destructor(void *value) {
#ifdef _WIN32_PLATFORM_X86
impl_tls_dtor_invoke();
#endif
// If this is called on main thread it means rpmalloc_finalize
// has not been called and shutdown is forced (through _exit) or unclean
if (get_thread_id() == _rpmalloc_main_thread_id)
return;
if (value)
rpmalloc_thread_finalize(1);
}
#endif
////////////
///
/// Low level memory map/unmap
///
//////
static void
_rpmalloc_set_name(void *address, size_t size) {
#if defined(__linux__) || defined(__ANDROID__)
const char *name = _memory_huge_pages ? _memory_config.huge_page_name : _memory_config.page_name;
if (address == MAP_FAILED || !name)
return;
// If the kernel does not support CONFIG_ANON_VMA_NAME or if the call fails
// (e.g. invalid name) it is a no-op basically.
(void)prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (uintptr_t)address, size, (uintptr_t)name);
#else
(void)sizeof(size);
(void)sizeof(address);
#endif
}
//! Map more virtual memory
// size is number of bytes to map
// offset receives the offset in bytes from start of mapped region
// returns address to start of mapped region to use
static void *
_rpmalloc_mmap(size_t size, size_t *offset) {
rpmalloc_assert(!(size % _memory_page_size), "Invalid mmap size");
rpmalloc_assert(size >= _memory_page_size, "Invalid mmap size");
void *address = _memory_config.memory_map(size, offset);
if (EXPECTED(address != 0)) {
_rpmalloc_stat_add_peak(&_mapped_pages, (size >> _memory_page_size_shift), _mapped_pages_peak);
_rpmalloc_stat_add(&_mapped_total, (size >> _memory_page_size_shift));
}
return address;
}
//! Unmap virtual memory
// address is the memory address to unmap, as returned from _memory_map
// size is the number of bytes to unmap, which might be less than full region for a partial unmap
// offset is the offset in bytes to the actual mapped region, as set by _memory_map
// release is set to 0 for partial unmap, or size of entire range for a full unmap
static void
_rpmalloc_unmap(void *address, size_t size, size_t offset, size_t release) {
rpmalloc_assert(!release || (release >= size), "Invalid unmap size");
rpmalloc_assert(!release || (release >= _memory_page_size), "Invalid unmap size");
if (release) {
rpmalloc_assert(!(release % _memory_page_size), "Invalid unmap size");
_rpmalloc_stat_sub(&_mapped_pages, (release >> _memory_page_size_shift));
_rpmalloc_stat_add(&_unmapped_total, (release >> _memory_page_size_shift));
}
_memory_config.memory_unmap(address, size, offset, release);
}
//! Default implementation to map new pages to virtual memory
static void *
_rpmalloc_mmap_os(size_t size, size_t *offset) {
//Either size is a heap (a single page) or a (multiple) span - we only need to align spans, and only if larger than map granularity
size_t padding = ((size >= _memory_span_size) && (_memory_span_size > _memory_map_granularity)) ? _memory_span_size : 0;
rpmalloc_assert(size >= _memory_page_size, "Invalid mmap size");
#if PLATFORM_WINDOWS
//Ok to MEM_COMMIT - according to MSDN, "actual physical pages are not allocated unless/until the virtual addresses are actually accessed"
void *ptr = VirtualAlloc(0, size + padding, (_memory_huge_pages ? MEM_LARGE_PAGES : 0) | MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (!ptr) {
if (_memory_config.map_fail_callback) {
if (_memory_config.map_fail_callback(size + padding))
return _rpmalloc_mmap_os(size, offset);
} else {
rpmalloc_assert(ptr, "Failed to map virtual memory block");
}
return 0;
}
#else
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED;
# if defined(__APPLE__) && !TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
int fd = (int)VM_MAKE_TAG(240U);
if (_memory_huge_pages)
fd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
void *ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, fd, 0);
# elif defined(MAP_HUGETLB)
void *ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE | PROT_MAX(PROT_READ | PROT_WRITE), (_memory_huge_pages ? MAP_HUGETLB : 0) | flags, -1, 0);
# if defined(MADV_HUGEPAGE)
// In some configurations, huge pages allocations might fail thus
// we fallback to normal allocations and promote the region as transparent huge page
if ((ptr == MAP_FAILED || !ptr) && _memory_huge_pages) {
ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, -1, 0);
if (ptr && ptr != MAP_FAILED) {
int prm = madvise(ptr, size + padding, MADV_HUGEPAGE);
(void)prm;
rpmalloc_assert((prm == 0), "Failed to promote the page to THP");
}
}
# endif
_rpmalloc_set_name(ptr, size + padding);
# elif defined(MAP_ALIGNED)
const size_t align = (sizeof(size_t) * 8) - (size_t)(__builtin_clzl(size - 1));
void *ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, (_memory_huge_pages ? MAP_ALIGNED(align) : 0) | flags, -1, 0);
# elif defined(MAP_ALIGN)
caddr_t base = (_memory_huge_pages ? (caddr_t)(4 << 20) : 0);
void *ptr = mmap(base, size + padding, PROT_READ | PROT_WRITE, (_memory_huge_pages ? MAP_ALIGN : 0) | flags, -1, 0);
# else
void *ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, -1, 0);
# endif
if ((ptr == MAP_FAILED) || !ptr) {
if (_memory_config.map_fail_callback) {
if (_memory_config.map_fail_callback(size + padding))
return _rpmalloc_mmap_os(size, offset);
} else if (errno != ENOMEM) {