@@ -46,11 +46,7 @@ jl_mutex_t finalizers_lock;
46
46
jl_gc_num_t gc_num = {0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 };
47
47
static size_t last_long_collect_interval ;
48
48
49
- region_t * regions [REGION_COUNT ] = {NULL };
50
- // store a lower bound of the first free page in each region
51
- int regions_lb [REGION_COUNT ] = {0 };
52
- // an upper bound of the last non-free page
53
- int regions_ub [REGION_COUNT ] = {REGION_PG_COUNT /32 - 1 };
49
+ region_t regions [REGION_COUNT ];
54
50
55
51
#ifndef JULIA_ENABLE_THREADING
56
52
static jl_thread_heap_t _jl_thread_heap ;
@@ -436,11 +432,15 @@ static NOINLINE void *malloc_page(void)
436
432
int region_i = 0 ;
437
433
JL_LOCK_NOGC (& pagealloc_lock );
438
434
while (region_i < REGION_COUNT ) {
439
- region = regions [region_i ];
440
- if (region == NULL ) {
441
- size_t alloc_size = sizeof (region_t );
435
+ region = & regions [region_i ];
436
+ if (region -> pages == NULL ) {
437
+ const size_t pages_sz = sizeof (jl_gc_page_t ) * REGION_PG_COUNT ;
438
+ const size_t freemap_sz = sizeof (uint32_t ) * REGION_PG_COUNT / 32 ;
439
+ const size_t meta_sz = sizeof (jl_gc_pagemeta_t ) * REGION_PG_COUNT ;
440
+ size_t alloc_size = pages_sz + freemap_sz + meta_sz ;
442
441
#ifdef _OS_WINDOWS_
443
- char * mem = (char * )VirtualAlloc (NULL , sizeof (region_t ) + GC_PAGE_SZ , MEM_RESERVE , PAGE_READWRITE );
442
+ char * mem = (char * )VirtualAlloc (NULL , alloc_size + GC_PAGE_SZ ,
443
+ MEM_RESERVE , PAGE_READWRITE );
444
444
#else
445
445
if (GC_PAGE_SZ > jl_page_size )
446
446
alloc_size += GC_PAGE_SZ ;
@@ -453,23 +453,27 @@ static NOINLINE void *malloc_page(void)
453
453
abort ();
454
454
}
455
455
if (GC_PAGE_SZ > jl_page_size ) {
456
- // round data pointer up to the nearest gc_page_data-aligned boundary
457
- // if mmap didn't already do so
458
- alloc_size += GC_PAGE_SZ ;
459
- region = (region_t * )((char * )gc_page_data (mem + GC_PAGE_SZ - 1 ));
460
- }
461
- else {
462
- region = (region_t * )mem ;
456
+ // round data pointer up to the nearest gc_page_data-aligned
457
+ // boundary if mmap didn't already do so.
458
+ mem = (char * )gc_page_data (mem + GC_PAGE_SZ - 1 );
463
459
}
460
+ region -> pages = (jl_gc_page_t * )mem ;
461
+ region -> freemap = (uint32_t * )(mem + pages_sz );
462
+ region -> meta = (jl_gc_pagemeta_t * )(mem + pages_sz + freemap_sz );
463
+ region -> lb = 0 ;
464
+ region -> ub = 0 ;
464
465
#ifdef _OS_WINDOWS_
465
- VirtualAlloc (region -> freemap , REGION_PG_COUNT /8 , MEM_COMMIT , PAGE_READWRITE );
466
- VirtualAlloc (region -> meta , REGION_PG_COUNT * sizeof (jl_gc_pagemeta_t ), MEM_COMMIT , PAGE_READWRITE );
466
+ VirtualAlloc (region -> freemap , REGION_PG_COUNT / 8 ,
467
+ MEM_COMMIT , PAGE_READWRITE );
468
+ VirtualAlloc (region -> meta ,
469
+ REGION_PG_COUNT * sizeof (jl_gc_pagemeta_t ),
470
+ MEM_COMMIT , PAGE_READWRITE );
467
471
#endif
468
472
memset (region -> freemap , 0xff , REGION_PG_COUNT /8 );
469
- regions [region_i ] = region ;
470
473
}
471
- for (i = regions_lb [region_i ]; i < REGION_PG_COUNT /32 ; i ++ ) {
472
- if (region -> freemap [i ]) break ;
474
+ for (i = region -> lb ; i < REGION_PG_COUNT /32 ; i ++ ) {
475
+ if (region -> freemap [i ])
476
+ break ;
473
477
}
474
478
if (i == REGION_PG_COUNT /32 ) {
475
479
// region full
@@ -483,10 +487,10 @@ static NOINLINE void *malloc_page(void)
483
487
gc_debug_critical_error ();
484
488
abort ();
485
489
}
486
- if (regions_lb [ region_i ] < i )
487
- regions_lb [ region_i ] = i ;
488
- if (regions_ub [ region_i ] < i )
489
- regions_ub [ region_i ] = i ;
490
+ if (region -> lb < i )
491
+ region -> lb = i ;
492
+ if (region -> ub < i )
493
+ region -> ub = i ;
490
494
491
495
#if defined(_COMPILER_MINGW_ )
492
496
int j = __builtin_ffs (region -> freemap [i ]) - 1 ;
@@ -512,12 +516,12 @@ static void free_page(void *p)
512
516
{
513
517
int pg_idx = -1 ;
514
518
int i ;
515
- for (i = 0 ; i < REGION_COUNT && regions [i ] != NULL ; i ++ ) {
516
- pg_idx = page_index (regions [i ], p );
519
+ for (i = 0 ; i < REGION_COUNT && regions [i ]. pages != NULL ; i ++ ) {
520
+ pg_idx = page_index (& regions [i ], p );
517
521
if (pg_idx >= 0 && pg_idx < REGION_PG_COUNT ) break ;
518
522
}
519
- assert (i < REGION_COUNT && regions [i ] != NULL );
520
- region_t * region = regions [i ];
523
+ assert (i < REGION_COUNT && regions [i ]. pages != NULL );
524
+ region_t * region = & regions [i ];
521
525
uint32_t msk = (uint32_t )(1 << (pg_idx % 32 ));
522
526
assert (!(region -> freemap [pg_idx /32 ] & msk ));
523
527
region -> freemap [pg_idx /32 ] ^= msk ;
@@ -542,7 +546,8 @@ static void free_page(void *p)
542
546
madvise (p , decommit_size , MADV_DONTNEED );
543
547
#endif
544
548
no_decommit :
545
- if (regions_lb [i ] > pg_idx /32 ) regions_lb [i ] = pg_idx /32 ;
549
+ if (region -> lb > pg_idx / 32 )
550
+ region -> lb = pg_idx / 32 ;
546
551
current_pg_count -- ;
547
552
}
548
553
@@ -932,12 +937,12 @@ static int page_done = 0;
932
937
static gcval_t * * sweep_page (pool_t * p , jl_gc_pagemeta_t * pg , gcval_t * * pfl ,int ,int );
933
938
static void sweep_pool_region (gcval_t * * * pfl , int region_i , int sweep_mask )
934
939
{
935
- region_t * region = regions [region_i ];
940
+ region_t * region = & regions [region_i ];
936
941
937
942
// the actual sweeping
938
943
int ub = 0 ;
939
- int lb = regions_lb [ region_i ] ;
940
- for (int pg_i = 0 ; pg_i <= regions_ub [ region_i ] ; pg_i ++ ) {
944
+ int lb = region -> lb ;
945
+ for (int pg_i = 0 ; pg_i <= region -> ub ; pg_i ++ ) {
941
946
uint32_t line = region -> freemap [pg_i ];
942
947
if (!!~line ) {
943
948
ub = pg_i ;
@@ -958,8 +963,8 @@ static void sweep_pool_region(gcval_t ***pfl, int region_i, int sweep_mask)
958
963
lb = pg_i ;
959
964
}
960
965
}
961
- regions_ub [ region_i ] = ub ;
962
- regions_lb [ region_i ] = lb ;
966
+ region -> ub = ub ;
967
+ region -> lb = lb ;
963
968
}
964
969
965
970
// Returns pointer to terminal pointer of list rooted at *pfl.
@@ -1145,7 +1150,7 @@ static int gc_sweep_inc(int sweep_mask)
1145
1150
}
1146
1151
1147
1152
for (int i = 0 ; i < REGION_COUNT ; i ++ ) {
1148
- if (regions [i ])
1153
+ if (regions [i ]. pages )
1149
1154
/*finished &= */ sweep_pool_region (pfl , i , sweep_mask );
1150
1155
}
1151
1156
@@ -2119,7 +2124,7 @@ void jl_print_gc_stats(JL_STREAM *s)
2119
2124
(int )(total_fin_time * 100 / gc_num .total_time ));
2120
2125
}
2121
2126
int i = 0 ;
2122
- while (i < REGION_COUNT && regions [i ]) i ++ ;
2127
+ while (i < REGION_COUNT && regions [i ]. pages ) i ++ ;
2123
2128
jl_printf (s , "max allocated regions : %d\n" , i );
2124
2129
struct mallinfo mi = mallinfo ();
2125
2130
jl_printf (s , "malloc size\t%d MB\n" , mi .uordblks /1024 /1024 );
0 commit comments