@@ -181,7 +181,7 @@ typedef struct _pool_t {
181
181
#define GC_PAGE_OFFSET (JL_SMALL_BYTE_ALIGNMENT - (sizeof_jl_taggedvalue_t % JL_SMALL_BYTE_ALIGNMENT))
182
182
183
183
// pool page metadata
184
- typedef struct _gcpage_t {
184
+ typedef struct {
185
185
struct {
186
186
uint16_t pool_n : 8 ; // index (into norm_pool) of pool that owns this page
187
187
uint16_t allocd : 1 ; // true if an allocation happened in this page since last sweep
@@ -195,7 +195,7 @@ typedef struct _gcpage_t {
195
195
uint16_t thread_n ; // index (into jl_thread_heap) of heap that owns this page
196
196
char * data ;
197
197
uint8_t * ages ;
198
- } gcpage_t ;
198
+ } jl_gc_pagemeta_t ;
199
199
200
200
#define PAGE_PFL_BEG (p ) ((gcval_t**)((p->data) + (p)->fl_begin_offset))
201
201
#define PAGE_PFL_END (p ) ((gcval_t**)((p->data) + (p)->fl_end_offset))
@@ -211,15 +211,19 @@ typedef struct _gcpage_t {
211
211
#endif
212
212
#define REGION_COUNT 8
213
213
214
+ typedef struct {
215
+ char data [GC_PAGE_SZ ];
216
+ } jl_gc_page_t ;
217
+
214
218
typedef struct {
215
219
// Page layout:
216
220
// Padding: GC_PAGE_OFFSET
217
221
// Blocks: osize * n
218
222
// Tag: sizeof_jl_taggedvalue_t
219
223
// Data: <= osize - sizeof_jl_taggedvalue_t
220
- char pages [REGION_PG_COUNT ][ GC_PAGE_SZ ]; // must be first, to preserve page alignment
224
+ jl_gc_page_t pages [REGION_PG_COUNT ]; // must be first, to preserve page alignment
221
225
uint32_t freemap [REGION_PG_COUNT /32 ];
222
- gcpage_t meta [REGION_PG_COUNT ];
226
+ jl_gc_pagemeta_t meta [REGION_PG_COUNT ];
223
227
} region_t
224
228
#if !defined(_COMPILER_MICROSOFT_ ) && !(defined(_COMPILER_MINGW_ ) && defined(_COMPILER_CLANG_ ))
225
229
__attribute__((aligned (GC_PAGE_SZ )))
@@ -336,14 +340,15 @@ static arraylist_t to_finalize;
336
340
#define gc_marked (o ) (((gcval_t*)(o))->gc_bits & GC_MARKED)
337
341
#define _gc_setmark (o , mark_mode ) (((gcval_t*)(o))->gc_bits = mark_mode)
338
342
339
- static gcpage_t * page_metadata (void * data );
343
+ static jl_gc_pagemeta_t * page_metadata (void * data );
340
344
static void pre_mark (void );
341
345
static void post_mark (arraylist_t * list , int dryrun );
342
346
static region_t * find_region (void * ptr , int maybe );
343
347
344
- #define PAGE_INDEX (region , data ) \
345
- ((GC_PAGE_DATA((data) - GC_PAGE_OFFSET) - \
346
- &(region)->pages[0][0])/GC_PAGE_SZ)
348
+ STATIC_INLINE int page_index (region_t * region , void * data )
349
+ {
350
+ return (GC_PAGE_DATA (data ) - region -> pages -> data ) / GC_PAGE_SZ ;
351
+ }
347
352
348
353
NOINLINE static uintptr_t gc_get_stack_ptr (void )
349
354
{
@@ -542,7 +547,7 @@ static region_t *find_region(void *ptr, int maybe)
542
547
{
543
548
// on 64bit systems we could probably use a single region and remove this loop
544
549
for (int i = 0 ; i < REGION_COUNT && regions [i ]; i ++ ) {
545
- char * begin = & regions [i ]-> pages [ 0 ][ 0 ] ;
550
+ char * begin = regions [i ]-> pages -> data ;
546
551
char * end = begin + sizeof (regions [i ]-> pages );
547
552
if ((char * )ptr >= begin && (char * )ptr <= end )
548
553
return regions [i ];
@@ -552,14 +557,14 @@ static region_t *find_region(void *ptr, int maybe)
552
557
return NULL ;
553
558
}
554
559
555
- static gcpage_t * page_metadata (void * data )
560
+ static jl_gc_pagemeta_t * page_metadata (void * data )
556
561
{
557
562
region_t * r = find_region (data , 0 );
558
- int pg_idx = PAGE_INDEX (r , (char * )data );
563
+ int pg_idx = page_index (r , (char * )data - GC_PAGE_OFFSET );
559
564
return & r -> meta [pg_idx ];
560
565
}
561
566
562
- static uint8_t * page_age (gcpage_t * pg )
567
+ static uint8_t * page_age (jl_gc_pagemeta_t * pg )
563
568
{
564
569
return pg -> ages ;
565
570
}
@@ -690,7 +695,7 @@ static inline int gc_setmark_pool(void *o, int mark_mode)
690
695
return mark_mode ;
691
696
}
692
697
#endif
693
- gcpage_t * page = page_metadata (o );
698
+ jl_gc_pagemeta_t * page = page_metadata (o );
694
699
int bits = gc_bits (o );
695
700
if (bits == GC_QUEUED || bits == GC_MARKED ) {
696
701
mark_mode = GC_MARKED ;
@@ -774,7 +779,7 @@ static NOINLINE void *malloc_page(void)
774
779
}
775
780
#ifdef _OS_WINDOWS_
776
781
VirtualAlloc (region -> freemap , REGION_PG_COUNT /8 , MEM_COMMIT , PAGE_READWRITE );
777
- VirtualAlloc (region -> meta , REGION_PG_COUNT * sizeof (gcpage_t ), MEM_COMMIT , PAGE_READWRITE );
782
+ VirtualAlloc (region -> meta , REGION_PG_COUNT * sizeof (jl_gc_pagemeta_t ), MEM_COMMIT , PAGE_READWRITE );
778
783
#endif
779
784
memset (region -> freemap , 0xff , REGION_PG_COUNT /8 );
780
785
regions [region_i ] = region ;
@@ -809,7 +814,7 @@ static NOINLINE void *malloc_page(void)
809
814
#endif
810
815
811
816
region -> freemap [i ] &= ~(uint32_t )(1 << j );
812
- ptr = region -> pages [i * 32 + j ];
817
+ ptr = region -> pages [i * 32 + j ]. data ;
813
818
#ifdef _OS_WINDOWS_
814
819
VirtualAlloc (ptr , GC_PAGE_SZ , MEM_COMMIT , PAGE_READWRITE );
815
820
#endif
@@ -824,7 +829,7 @@ static void free_page(void *p)
824
829
int pg_idx = -1 ;
825
830
int i ;
826
831
for (i = 0 ; i < REGION_COUNT && regions [i ] != NULL ; i ++ ) {
827
- pg_idx = PAGE_INDEX (regions [i ], ( char * ) p + GC_PAGE_OFFSET );
832
+ pg_idx = page_index (regions [i ], p );
828
833
if (pg_idx >= 0 && pg_idx < REGION_PG_COUNT ) break ;
829
834
}
830
835
assert (i < REGION_COUNT && regions [i ] != NULL );
@@ -839,8 +844,8 @@ static void free_page(void *p)
839
844
// ensure so we don't release more memory than intended
840
845
size_t n_pages = (GC_PAGE_SZ + jl_page_size - 1 ) / GC_PAGE_SZ ;
841
846
decommit_size = jl_page_size ;
842
- p = (void * )((uintptr_t )& region -> pages [pg_idx ][ 0 ] & ~(jl_page_size - 1 )); // round down to the nearest page
843
- pg_idx = PAGE_INDEX (region , ( char * ) p + GC_PAGE_OFFSET );
847
+ p = (void * )((uintptr_t )region -> pages [pg_idx ]. data & ~(jl_page_size - 1 )); // round down to the nearest page
848
+ pg_idx = page_index (region , p );
844
849
if (pg_idx + n_pages > REGION_PG_COUNT ) goto no_decommit ;
845
850
for (; n_pages -- ; pg_idx ++ ) {
846
851
msk = (uint32_t )(1 << ((pg_idx % 32 )));
@@ -1080,7 +1085,7 @@ static void sweep_malloced_arrays(void)
1080
1085
}
1081
1086
1082
1087
// pool allocation
1083
- static inline gcval_t * reset_page (pool_t * p , gcpage_t * pg , gcval_t * fl )
1088
+ static inline gcval_t * reset_page (pool_t * p , jl_gc_pagemeta_t * pg , gcval_t * fl )
1084
1089
{
1085
1090
pg -> gc_bits = 0 ;
1086
1091
pg -> nfree = (GC_PAGE_SZ - GC_PAGE_OFFSET ) / p -> osize ;
@@ -1101,7 +1106,7 @@ static NOINLINE void add_page(pool_t *p)
1101
1106
char * data = (char * )malloc_page ();
1102
1107
if (data == NULL )
1103
1108
jl_throw (jl_memory_exception );
1104
- gcpage_t * pg = page_metadata (data + GC_PAGE_OFFSET );
1109
+ jl_gc_pagemeta_t * pg = page_metadata (data + GC_PAGE_OFFSET );
1105
1110
pg -> data = data ;
1106
1111
pg -> osize = p -> osize ;
1107
1112
pg -> ages = (uint8_t * )malloc (LLT_ALIGN (GC_PAGE_SZ / p -> osize , 8 ));
@@ -1136,7 +1141,7 @@ static inline void *__pool_alloc(pool_t *p, int osize, int end_offset)
1136
1141
if (__unlikely (GC_PAGE_DATA (v ) != GC_PAGE_DATA (next ))) {
1137
1142
// we only update pg's fields when the freelist changes page
1138
1143
// since pg's metadata is likely not in cache
1139
- gcpage_t * pg = page_metadata (v );
1144
+ jl_gc_pagemeta_t * pg = page_metadata (v );
1140
1145
assert (pg -> osize == p -> osize );
1141
1146
pg -> nfree = 0 ;
1142
1147
pg -> allocd = 1 ;
@@ -1157,7 +1162,7 @@ static inline void *__pool_alloc(pool_t *p, int osize, int end_offset)
1157
1162
}
1158
1163
else {
1159
1164
// like the freelist case, but only update the page metadata when it is full
1160
- gcpage_t * pg = page_metadata (v );
1165
+ jl_gc_pagemeta_t * pg = page_metadata (v );
1161
1166
assert (pg -> osize == p -> osize );
1162
1167
pg -> nfree = 0 ;
1163
1168
pg -> allocd = 1 ;
@@ -1240,7 +1245,7 @@ static int total_pages = 0;
1240
1245
static int freed_pages = 0 ;
1241
1246
static int lazy_freed_pages = 0 ;
1242
1247
static int page_done = 0 ;
1243
- static gcval_t * * sweep_page (pool_t * p , gcpage_t * pg , gcval_t * * pfl ,int ,int );
1248
+ static gcval_t * * sweep_page (pool_t * p , jl_gc_pagemeta_t * pg , gcval_t * * pfl ,int ,int );
1244
1249
static void sweep_pool_region (gcval_t * * * pfl , int region_i , int sweep_mask )
1245
1250
{
1246
1251
region_t * region = regions [region_i ];
@@ -1254,7 +1259,7 @@ static void sweep_pool_region(gcval_t ***pfl, int region_i, int sweep_mask)
1254
1259
ub = pg_i ;
1255
1260
for (int j = 0 ; j < 32 ; j ++ ) {
1256
1261
if (!((line >> j ) & 1 )) {
1257
- gcpage_t * pg = & region -> meta [pg_i * 32 + j ];
1262
+ jl_gc_pagemeta_t * pg = & region -> meta [pg_i * 32 + j ];
1258
1263
int p_n = pg -> pool_n ;
1259
1264
int t_n = pg -> thread_n ;
1260
1265
pool_t * p = NULL ;
@@ -1274,7 +1279,7 @@ static void sweep_pool_region(gcval_t ***pfl, int region_i, int sweep_mask)
1274
1279
}
1275
1280
1276
1281
// Returns pointer to terminal pointer of list rooted at *pfl.
1277
- static gcval_t * * sweep_page (pool_t * p , gcpage_t * pg , gcval_t * * pfl , int sweep_mask , int osize )
1282
+ static gcval_t * * sweep_page (pool_t * p , jl_gc_pagemeta_t * pg , gcval_t * * pfl , int sweep_mask , int osize )
1278
1283
{
1279
1284
int freedall ;
1280
1285
gcval_t * * prev_pfl = pfl ;
@@ -1375,7 +1380,7 @@ static gcval_t **sweep_page(pool_t *p, gcpage_t *pg, gcval_t **pfl, int sweep_ma
1375
1380
#endif
1376
1381
free_page (data );
1377
1382
#ifdef MEMDEBUG
1378
- memset (pg , 0xbb , sizeof (gcpage_t ));
1383
+ memset (pg , 0xbb , sizeof (jl_gc_pagemeta_t ));
1379
1384
#endif
1380
1385
}
1381
1386
freed_pages ++ ;
@@ -1438,7 +1443,7 @@ static int gc_sweep_inc(int sweep_mask)
1438
1443
pool_t * p = & pools [i ];
1439
1444
gcval_t * last = p -> freelist ;
1440
1445
if (last ) {
1441
- gcpage_t * pg = page_metadata (last );
1446
+ jl_gc_pagemeta_t * pg = page_metadata (last );
1442
1447
pg -> allocd = 1 ;
1443
1448
pg -> nfree = p -> nfree ;
1444
1449
}
@@ -1447,7 +1452,7 @@ static int gc_sweep_inc(int sweep_mask)
1447
1452
1448
1453
last = p -> newpages ;
1449
1454
if (last ) {
1450
- gcpage_t * pg = page_metadata (last );
1455
+ jl_gc_pagemeta_t * pg = page_metadata (last );
1451
1456
pg -> nfree = (GC_PAGE_SZ - ((char * )last - GC_PAGE_DATA (last ))) / p -> osize ;
1452
1457
pg -> allocd = 1 ;
1453
1458
}
@@ -2510,7 +2515,7 @@ void jl_gc_init(void)
2510
2515
static size_t pool_stats (pool_t * p , size_t * pwaste , size_t * np , size_t * pnold )
2511
2516
{
2512
2517
gcval_t * v ;
2513
- gcpage_t * pg = p -> pages ;
2518
+ jl_gc_pagemeta_t * pg = p -> pages ;
2514
2519
size_t osize = p -> osize ;
2515
2520
size_t nused = 0 , nfree = 0 , npgs = 0 , nold = 0 ;
2516
2521
@@ -2532,7 +2537,7 @@ static size_t pool_stats(pool_t *p, size_t *pwaste, size_t *np, size_t *pnold)
2532
2537
v = (gcval_t * )((char * )v + osize );
2533
2538
i ++ ;
2534
2539
}
2535
- gcpage_t * nextpg = NULL ;
2540
+ jl_gc_pagemeta_t * nextpg = NULL ;
2536
2541
pg = nextpg ;
2537
2542
}
2538
2543
* pwaste = npgs * GC_PAGE_SZ - (nused * p -> osize );
0 commit comments