123
123
#define GC_EXIT ()
124
124
#endif
125
125
126
+ // Static functions for individual steps of the GC mark/sweep sequence
127
+ static void gc_collect_start_common (void );
128
+ static void * gc_get_ptr (void * * ptrs , int i );
129
+ #if MICROPY_GC_SPLIT_HEAP
130
+ static void gc_mark_subtree (mp_state_mem_area_t * area , size_t block );
131
+ #else
132
+ static void gc_mark_subtree (size_t block );
133
+ #endif
134
+ static void gc_deal_with_stack_overflow (void );
135
+ static void gc_sweep_run_finalisers (void );
136
+ static void gc_sweep_free_blocks (void );
137
+
126
138
// TODO waste less memory; currently requires that all entries in alloc_table have a corresponding block in pool
127
139
static void gc_setup_area (mp_state_mem_area_t * area , void * start , void * end ) {
128
140
// calculate parameters for GC (T=total, A=alloc table, F=finaliser table, P=pool; all in bytes):
@@ -379,6 +391,64 @@ static inline mp_state_mem_area_t *gc_get_ptr_area(const void *ptr) {
379
391
#endif
380
392
#endif
381
393
394
+ void gc_collect_start (void ) {
395
+ gc_collect_start_common ();
396
+ #if MICROPY_GC_ALLOC_THRESHOLD
397
+ MP_STATE_MEM (gc_alloc_amount ) = 0 ;
398
+ #endif
399
+
400
+ // Trace root pointers. This relies on the root pointers being organised
401
+ // correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals,
402
+ // dict_globals, then the root pointer section of mp_state_vm.
403
+ void * * ptrs = (void * * )(void * )& mp_state_ctx ;
404
+ size_t root_start = offsetof(mp_state_ctx_t , thread .dict_locals );
405
+ size_t root_end = offsetof(mp_state_ctx_t , vm .qstr_last_chunk );
406
+ gc_collect_root (ptrs + root_start / sizeof (void * ), (root_end - root_start ) / sizeof (void * ));
407
+
408
+ #if MICROPY_ENABLE_PYSTACK
409
+ // Trace root pointers from the Python stack.
410
+ ptrs = (void * * )(void * )MP_STATE_THREAD (pystack_start );
411
+ gc_collect_root (ptrs , (MP_STATE_THREAD (pystack_cur ) - MP_STATE_THREAD (pystack_start )) / sizeof (void * ));
412
+ #endif
413
+ }
414
+
415
+ static void gc_collect_start_common (void ) {
416
+ GC_ENTER ();
417
+ assert ((MP_STATE_THREAD (gc_lock_depth ) & GC_COLLECT_FLAG ) == 0 );
418
+ MP_STATE_THREAD (gc_lock_depth ) |= GC_COLLECT_FLAG ;
419
+ MP_STATE_MEM (gc_stack_overflow ) = 0 ;
420
+ }
421
+
422
+ void gc_collect_root (void * * ptrs , size_t len ) {
423
+ #if !MICROPY_GC_SPLIT_HEAP
424
+ mp_state_mem_area_t * area = & MP_STATE_MEM (area );
425
+ #endif
426
+ for (size_t i = 0 ; i < len ; i ++ ) {
427
+ MICROPY_GC_HOOK_LOOP (i );
428
+ void * ptr = gc_get_ptr (ptrs , i );
429
+ #if MICROPY_GC_SPLIT_HEAP
430
+ mp_state_mem_area_t * area = gc_get_ptr_area (ptr );
431
+ if (!area ) {
432
+ continue ;
433
+ }
434
+ #else
435
+ if (!VERIFY_PTR (ptr )) {
436
+ continue ;
437
+ }
438
+ #endif
439
+ size_t block = BLOCK_FROM_PTR (area , ptr );
440
+ if (ATB_GET_KIND (area , block ) == AT_HEAD ) {
441
+ // An unmarked head: mark it, and mark all its children
442
+ ATB_HEAD_TO_MARK (area , block );
443
+ #if MICROPY_GC_SPLIT_HEAP
444
+ gc_mark_subtree (area , block );
445
+ #else
446
+ gc_mark_subtree (block );
447
+ #endif
448
+ }
449
+ }
450
+ }
451
+
382
452
// Take the given block as the topmost block on the stack. Check all it's
383
453
// children: mark the unmarked child blocks and put those newly marked
384
454
// blocks on the stack. When all children have been checked, pop off the
@@ -457,6 +527,25 @@ static void gc_mark_subtree(size_t block)
457
527
}
458
528
}
459
529
530
+ void gc_sweep_all (void ) {
531
+ gc_collect_start_common ();
532
+ gc_collect_end ();
533
+ }
534
+
535
+ void gc_collect_end (void ) {
536
+ gc_deal_with_stack_overflow ();
537
+ gc_sweep_run_finalisers ();
538
+ gc_sweep_free_blocks ();
539
+ #if MICROPY_GC_SPLIT_HEAP
540
+ MP_STATE_MEM (gc_last_free_area ) = & MP_STATE_MEM (area );
541
+ #endif
542
+ for (mp_state_mem_area_t * area = & MP_STATE_MEM (area ); area != NULL ; area = NEXT_AREA (area )) {
543
+ area -> gc_last_free_atb_index = 0 ;
544
+ }
545
+ MP_STATE_THREAD (gc_lock_depth ) &= ~GC_COLLECT_FLAG ;
546
+ GC_EXIT ();
547
+ }
548
+
460
549
static void gc_deal_with_stack_overflow (void ) {
461
550
while (MP_STATE_MEM (gc_stack_overflow )) {
462
551
MP_STATE_MEM (gc_stack_overflow ) = 0 ;
@@ -520,18 +609,16 @@ static void gc_sweep_run_finalisers(void) {
520
609
#endif // MICROPY_ENABLE_FINALISER
521
610
}
522
611
523
- static void gc_sweep (void ) {
612
+ // Free unmarked heads and their tails
613
+ static void gc_sweep_free_blocks (void ) {
524
614
#if MICROPY_PY_GC_COLLECT_RETVAL
525
615
MP_STATE_MEM (gc_collected ) = 0 ;
526
616
#endif
527
- // free unmarked heads and their tails
528
617
int free_tail = 0 ;
529
618
#if MICROPY_GC_SPLIT_HEAP_AUTO
530
619
mp_state_mem_area_t * prev_area = NULL ;
531
620
#endif
532
621
533
- gc_sweep_run_finalisers ();
534
-
535
622
for (mp_state_mem_area_t * area = & MP_STATE_MEM (area ); area != NULL ; area = NEXT_AREA (area )) {
536
623
size_t last_used_block = 0 ;
537
624
assert (area -> gc_last_used_block <= area -> gc_alloc_table_byte_len * BLOCKS_PER_ATB );
@@ -541,7 +628,7 @@ static void gc_sweep(void) {
541
628
switch (ATB_GET_KIND (area , block )) {
542
629
case AT_HEAD :
543
630
free_tail = 1 ;
544
- DEBUG_printf ("gc_sweep (%p)\n" , (void * )PTR_FROM_BLOCK (area , block ));
631
+ DEBUG_printf ("gc_sweep_free_blocks (%p)\n" , (void * )PTR_FROM_BLOCK (area , block ));
545
632
#if MICROPY_PY_GC_COLLECT_RETVAL
546
633
MP_STATE_MEM (gc_collected )++ ;
547
634
#endif
@@ -572,7 +659,7 @@ static void gc_sweep(void) {
572
659
#if MICROPY_GC_SPLIT_HEAP_AUTO
573
660
// Free any empty area, aside from the first one
574
661
if (last_used_block == 0 && prev_area != NULL ) {
575
- DEBUG_printf ("gc_sweep free empty area %p\n" , area );
662
+ DEBUG_printf ("gc_sweep_free_blocks free empty area %p\n" , area );
576
663
NEXT_AREA (prev_area ) = NEXT_AREA (area );
577
664
MP_PLAT_FREE_HEAP (area );
578
665
area = prev_area ;
@@ -582,34 +669,6 @@ static void gc_sweep(void) {
582
669
}
583
670
}
584
671
585
- static void gc_collect_start_common (void ) {
586
- GC_ENTER ();
587
- assert ((MP_STATE_THREAD (gc_lock_depth ) & GC_COLLECT_FLAG ) == 0 );
588
- MP_STATE_THREAD (gc_lock_depth ) |= GC_COLLECT_FLAG ;
589
- MP_STATE_MEM (gc_stack_overflow ) = 0 ;
590
- }
591
-
592
- void gc_collect_start (void ) {
593
- gc_collect_start_common ();
594
- #if MICROPY_GC_ALLOC_THRESHOLD
595
- MP_STATE_MEM (gc_alloc_amount ) = 0 ;
596
- #endif
597
-
598
- // Trace root pointers. This relies on the root pointers being organised
599
- // correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals,
600
- // dict_globals, then the root pointer section of mp_state_vm.
601
- void * * ptrs = (void * * )(void * )& mp_state_ctx ;
602
- size_t root_start = offsetof(mp_state_ctx_t , thread .dict_locals );
603
- size_t root_end = offsetof(mp_state_ctx_t , vm .qstr_last_chunk );
604
- gc_collect_root (ptrs + root_start / sizeof (void * ), (root_end - root_start ) / sizeof (void * ));
605
-
606
- #if MICROPY_ENABLE_PYSTACK
607
- // Trace root pointers from the Python stack.
608
- ptrs = (void * * )(void * )MP_STATE_THREAD (pystack_start );
609
- gc_collect_root (ptrs , (MP_STATE_THREAD (pystack_cur ) - MP_STATE_THREAD (pystack_start )) / sizeof (void * ));
610
- #endif
611
- }
612
-
613
672
// Address sanitizer needs to know that the access to ptrs[i] must always be
614
673
// considered OK, even if it's a load from an address that would normally be
615
674
// prohibited (due to being undefined, in a red zone, etc).
@@ -625,54 +684,6 @@ static void *gc_get_ptr(void **ptrs, int i) {
625
684
return ptrs [i ];
626
685
}
627
686
628
- void gc_collect_root (void * * ptrs , size_t len ) {
629
- #if !MICROPY_GC_SPLIT_HEAP
630
- mp_state_mem_area_t * area = & MP_STATE_MEM (area );
631
- #endif
632
- for (size_t i = 0 ; i < len ; i ++ ) {
633
- MICROPY_GC_HOOK_LOOP (i );
634
- void * ptr = gc_get_ptr (ptrs , i );
635
- #if MICROPY_GC_SPLIT_HEAP
636
- mp_state_mem_area_t * area = gc_get_ptr_area (ptr );
637
- if (!area ) {
638
- continue ;
639
- }
640
- #else
641
- if (!VERIFY_PTR (ptr )) {
642
- continue ;
643
- }
644
- #endif
645
- size_t block = BLOCK_FROM_PTR (area , ptr );
646
- if (ATB_GET_KIND (area , block ) == AT_HEAD ) {
647
- // An unmarked head: mark it, and mark all its children
648
- ATB_HEAD_TO_MARK (area , block );
649
- #if MICROPY_GC_SPLIT_HEAP
650
- gc_mark_subtree (area , block );
651
- #else
652
- gc_mark_subtree (block );
653
- #endif
654
- }
655
- }
656
- }
657
-
658
- void gc_collect_end (void ) {
659
- gc_deal_with_stack_overflow ();
660
- gc_sweep ();
661
- #if MICROPY_GC_SPLIT_HEAP
662
- MP_STATE_MEM (gc_last_free_area ) = & MP_STATE_MEM (area );
663
- #endif
664
- for (mp_state_mem_area_t * area = & MP_STATE_MEM (area ); area != NULL ; area = NEXT_AREA (area )) {
665
- area -> gc_last_free_atb_index = 0 ;
666
- }
667
- MP_STATE_THREAD (gc_lock_depth ) &= ~GC_COLLECT_FLAG ;
668
- GC_EXIT ();
669
- }
670
-
671
- void gc_sweep_all (void ) {
672
- gc_collect_start_common ();
673
- gc_collect_end ();
674
- }
675
-
676
687
void gc_info (gc_info_t * info ) {
677
688
GC_ENTER ();
678
689
info -> total = 0 ;
0 commit comments