2 ** gc.c - garbage collector for mruby
4 ** See Copyright Notice in mruby.h
10 #include <mruby/array.h>
11 #include <mruby/class.h>
12 #include <mruby/data.h>
13 #include <mruby/hash.h>
14 #include <mruby/proc.h>
15 #include <mruby/range.h>
16 #include <mruby/string.h>
17 #include <mruby/variable.h>
19 #include <mruby/error.h>
20 #include <mruby/throw.h>
23 = Tri-color Incremental Garbage Collection
25 mruby's GC is Tri-color Incremental GC with Mark & Sweep.
26 Algorithm details are omitted.
27 Instead, the implementation part is described below.
31 Each object can be painted in three colors:
34 * Gray - Marked, But the child objects are unmarked.
35 * Black - Marked, the child objects are also marked.
39 There're two white color types in a flip-flop fashion: White-A and White-B,
40 which respectively represent the Current White color (the newly allocated
41 objects in the current GC cycle) and the Sweep Target White color (the
42 dead objects to be swept).
44 A and B will be switched just at the beginning of the next GC cycle. At
45 that time, all the dead objects have been swept, while the newly created
46 objects in the current GC cycle which finally remains White are now
47 regarded as dead objects. Instead of traversing all the White-A objects and
48 painting them as White-B, just switch the meaning of White-A and White-B as
49 this will be much cheaper.
51 As a result, the objects we sweep in the current GC cycle are always
52 left from the previous GC cycle. This allows us to sweep objects
53 incrementally, without the disturbance of the newly created objects.
57 GC Execution Time and Each step interval are decided by live objects count.
58 List of Adjustment API:
60 * gc_interval_ratio_set
63 For details, see the comments for each function.
67 mruby implementer and C extension library writer must insert a write
68 barrier when updating a reference from a field of an object.
69 When updating a reference from a field of object A to object B,
70 two different types of write barrier are available:
72 * mrb_field_write_barrier - target B object for a mark.
73 * mrb_write_barrier - target A object for a mark.
77 mruby's GC offers an Generational Mode while re-using the tri-color GC
78 infrastructure. It will treat the Black objects as Old objects after each
79 sweep phase, instead of painting them White. The key ideas are still the same
80 as traditional generational GC:
82 * Minor GC - just traverse the Young objects (Gray objects) in the mark
83 phase, then only sweep the newly created objects, and leave
86 * Major GC - same as a full regular GC cycle.
88 The difference from "traditional" generational GC is, that the major GC
89 in mruby is triggered incrementally in a tri-color manner.
92 For details, see the comments for each function.
103 struct free_obj free;
105 struct RObject object;
107 struct RString string;
114 struct RException exc;
116 #ifdef MRB_WORD_BOXING
117 #ifndef MRB_WITHOUT_FLOAT
118 struct RFloat floatv;
127 #include <sys/time.h>
129 static double program_invoke_time = 0;
130 static double gc_time = 0;
131 static double gc_total_time = 0;
134 gettimeofday_time(void)
137 gettimeofday(&tv, NULL);
138 return tv.tv_sec + tv.tv_usec * 1e-6;
141 #define GC_INVOKE_TIME_REPORT(with) do {\
142 fprintf(stderr, "%s\n", with);\
143 fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\
144 fprintf(stderr, "is_generational: %d\n", is_generational(gc));\
145 fprintf(stderr, "is_major_gc: %d\n", is_major_gc(gc));\
148 #define GC_TIME_START do {\
149 gc_time = gettimeofday_time();\
152 #define GC_TIME_STOP_AND_REPORT do {\
153 gc_time = gettimeofday_time() - gc_time;\
154 gc_total_time += gc_time;\
155 fprintf(stderr, "gc_state: %d\n", gc->state);\
156 fprintf(stderr, "live: %zu\n", gc->live);\
157 fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\
158 fprintf(stderr, "gc_threshold: %zu\n", gc->threshold);\
159 fprintf(stderr, "gc_time: %30.20f\n", gc_time);\
160 fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\
163 #define GC_INVOKE_TIME_REPORT(s)
164 #define GC_TIME_START
165 #define GC_TIME_STOP_AND_REPORT
174 #ifndef MRB_HEAP_PAGE_SIZE
175 #define MRB_HEAP_PAGE_SIZE 1024
178 #define GC_STEP_SIZE 1024
180 /* white: 001 or 010, black: 100, gray: 000 */
183 #define GC_WHITE_B (1 << 1)
184 #define GC_BLACK (1 << 2)
185 #define GC_WHITES (GC_WHITE_A | GC_WHITE_B)
186 #define GC_COLOR_MASK 7
188 #define paint_gray(o) ((o)->color = GC_GRAY)
189 #define paint_black(o) ((o)->color = GC_BLACK)
190 #define paint_white(o) ((o)->color = GC_WHITES)
191 #define paint_partial_white(s, o) ((o)->color = (s)->current_white_part)
192 #define is_gray(o) ((o)->color == GC_GRAY)
193 #define is_white(o) ((o)->color & GC_WHITES)
194 #define is_black(o) ((o)->color & GC_BLACK)
195 #define flip_white_part(s) ((s)->current_white_part = other_white_part(s))
196 #define other_white_part(s) ((s)->current_white_part ^ GC_WHITES)
197 #define is_dead(s, o) (((o)->color & other_white_part(s) & GC_WHITES) || (o)->tt == MRB_TT_FREE)
199 #define objects(p) ((RVALUE *)p->objects)
202 mrb_realloc_simple(mrb_state *mrb, void *p, size_t len)
206 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
207 if (!p2 && len > 0 && mrb->gc.heaps) {
209 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
216 mrb_realloc(mrb_state *mrb, void *p, size_t len)
220 p2 = mrb_realloc_simple(mrb, p, len);
221 if (len == 0) return p2;
223 if (mrb->gc.out_of_memory) {
224 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
225 /* mrb_panic(mrb); */
228 mrb->gc.out_of_memory = TRUE;
229 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
233 mrb->gc.out_of_memory = FALSE;
240 mrb_malloc(mrb_state *mrb, size_t len)
242 return mrb_realloc(mrb, 0, len);
246 mrb_malloc_simple(mrb_state *mrb, size_t len)
248 return mrb_realloc_simple(mrb, 0, len);
252 mrb_calloc(mrb_state *mrb, size_t nelem, size_t len)
256 if (nelem > 0 && len > 0 &&
257 nelem <= SIZE_MAX / len) {
260 p = mrb_malloc(mrb, size);
272 mrb_free(mrb_state *mrb, void *p)
274 (mrb->allocf)(mrb, p, 0, mrb->allocf_ud);
278 heap_p(mrb_gc *gc, struct RBasic *object)
287 if (&p[0].as.basic <= object && object <= &p[MRB_HEAP_PAGE_SIZE].as.basic) {
296 mrb_object_dead_p(mrb_state *mrb, struct RBasic *object) {
297 mrb_gc *gc = &mrb->gc;
298 if (!heap_p(gc, object)) return TRUE;
299 return is_dead(gc, object);
303 link_heap_page(mrb_gc *gc, mrb_heap_page *page)
305 page->next = gc->heaps;
307 gc->heaps->prev = page;
312 unlink_heap_page(mrb_gc *gc, mrb_heap_page *page)
315 page->prev->next = page->next;
317 page->next->prev = page->prev;
318 if (gc->heaps == page)
319 gc->heaps = page->next;
325 link_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
327 page->free_next = gc->free_heaps;
328 if (gc->free_heaps) {
329 gc->free_heaps->free_prev = page;
331 gc->free_heaps = page;
335 unlink_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
338 page->free_prev->free_next = page->free_next;
340 page->free_next->free_prev = page->free_prev;
341 if (gc->free_heaps == page)
342 gc->free_heaps = page->free_next;
343 page->free_prev = NULL;
344 page->free_next = NULL;
348 add_heap(mrb_state *mrb, mrb_gc *gc)
350 mrb_heap_page *page = (mrb_heap_page *)mrb_calloc(mrb, 1, sizeof(mrb_heap_page) + MRB_HEAP_PAGE_SIZE * sizeof(RVALUE));
352 struct RBasic *prev = NULL;
354 for (p = objects(page), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
355 p->as.free.tt = MRB_TT_FREE;
356 p->as.free.next = prev;
359 page->freelist = prev;
361 link_heap_page(gc, page);
362 link_free_heap_page(gc, page);
365 #define DEFAULT_GC_INTERVAL_RATIO 200
366 #define DEFAULT_GC_STEP_RATIO 200
367 #define MAJOR_GC_INC_RATIO 120
368 #define MAJOR_GC_TOOMANY 10000
369 #define is_generational(gc) ((gc)->generational)
370 #define is_major_gc(gc) (is_generational(gc) && (gc)->full)
371 #define is_minor_gc(gc) (is_generational(gc) && !(gc)->full)
374 mrb_gc_init(mrb_state *mrb, mrb_gc *gc)
376 #ifndef MRB_GC_FIXED_ARENA
377 gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE);
378 gc->arena_capa = MRB_GC_ARENA_SIZE;
381 gc->current_white_part = GC_WHITE_A;
383 gc->free_heaps = NULL;
385 gc->interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
386 gc->step_ratio = DEFAULT_GC_STEP_RATIO;
387 #ifndef MRB_GC_TURN_OFF_GENERATIONAL
388 gc->generational = TRUE;
393 program_invoke_time = gettimeofday_time();
397 static void obj_free(mrb_state *mrb, struct RBasic *obj, int end);
400 free_heap(mrb_state *mrb, mrb_gc *gc)
402 mrb_heap_page *page = gc->heaps;
409 for (p = objects(tmp), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
410 if (p->as.free.tt != MRB_TT_FREE)
411 obj_free(mrb, &p->as.basic, TRUE);
418 mrb_gc_destroy(mrb_state *mrb, mrb_gc *gc)
421 #ifndef MRB_GC_FIXED_ARENA
422 mrb_free(mrb, gc->arena);
427 gc_protect(mrb_state *mrb, mrb_gc *gc, struct RBasic *p)
429 #ifdef MRB_GC_FIXED_ARENA
430 if (gc->arena_idx >= MRB_GC_ARENA_SIZE) {
431 /* arena overflow error */
432 gc->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */
433 mrb_exc_raise(mrb, mrb_obj_value(mrb->arena_err));
436 if (gc->arena_idx >= gc->arena_capa) {
438 gc->arena_capa = (int)(gc->arena_capa * 3 / 2);
439 gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*gc->arena_capa);
442 gc->arena[gc->arena_idx++] = p;
445 /* mrb_gc_protect() leaves the object in the arena */
447 mrb_gc_protect(mrb_state *mrb, mrb_value obj)
449 if (mrb_immediate_p(obj)) return;
450 gc_protect(mrb, &mrb->gc, mrb_basic_ptr(obj));
453 #define GC_ROOT_NAME "_gc_root_"
455 /* mrb_gc_register() keeps the object from GC.
457 Register your object when it's exported to C world,
458 without reference from Ruby world, e.g. callback
459 arguments. Don't forget to remove the object using
460 mrb_gc_unregister, otherwise your object will leak.
464 mrb_gc_register(mrb_state *mrb, mrb_value obj)
466 mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME);
467 mrb_value table = mrb_gv_get(mrb, root);
469 if (mrb_nil_p(table) || mrb_type(table) != MRB_TT_ARRAY) {
470 table = mrb_ary_new(mrb);
471 mrb_gv_set(mrb, root, table);
473 mrb_ary_push(mrb, table, obj);
476 /* mrb_gc_unregister() removes the object from GC root. */
478 mrb_gc_unregister(mrb_state *mrb, mrb_value obj)
480 mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME);
481 mrb_value table = mrb_gv_get(mrb, root);
485 if (mrb_nil_p(table)) return;
486 if (mrb_type(table) != MRB_TT_ARRAY) {
487 mrb_gv_set(mrb, root, mrb_nil_value());
490 a = mrb_ary_ptr(table);
491 mrb_ary_modify(mrb, a);
492 for (i = 0; i < ARY_LEN(a); i++) {
493 if (mrb_obj_eq(mrb, ARY_PTR(a)[i], obj)) {
494 mrb_int len = ARY_LEN(a)-1;
495 mrb_value *ptr = ARY_PTR(a);
498 memmove(&ptr[i], &ptr[i + 1], (len - i) * sizeof(mrb_value));
504 MRB_API struct RBasic*
505 mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
508 static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } };
509 mrb_gc *gc = &mrb->gc;
521 mrb_raise(mrb, E_TYPE_ERROR, "allocation failure");
523 tt = MRB_INSTANCE_TT(cls);
524 if (tt != MRB_TT_FALSE &&
525 ttype != MRB_TT_SCLASS &&
526 ttype != MRB_TT_ICLASS &&
527 ttype != MRB_TT_ENV &&
529 mrb_raisef(mrb, E_TYPE_ERROR, "allocation failure of %S", mrb_obj_value(cls));
536 if (gc->threshold < gc->live) {
537 mrb_incremental_gc(mrb);
539 if (gc->free_heaps == NULL) {
543 p = gc->free_heaps->freelist;
544 gc->free_heaps->freelist = ((struct free_obj*)p)->next;
545 if (gc->free_heaps->freelist == NULL) {
546 unlink_free_heap_page(gc, gc->free_heaps);
550 gc_protect(mrb, gc, p);
551 *(RVALUE *)p = RVALUE_zero;
554 paint_partial_white(gc, p);
559 add_gray_list(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
562 if (obj->tt > MRB_TT_MAXDEFINE) {
567 obj->gcnext = gc->gray_list;
572 ci_nregs(mrb_callinfo *ci)
574 struct RProc *p = ci->proc;
578 if (ci->argc < 0) return 3;
581 if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
582 n = p->body.irep->nregs;
585 if (n < 3) n = 3; /* self + args + blk */
588 n = ci->argc + 2; /* self + blk */
594 mark_context_stack(mrb_state *mrb, struct mrb_context *c)
600 if (c->stack == NULL) return;
601 e = c->stack - c->stbase;
603 e += ci_nregs(c->ci);
605 if (c->stbase + e > c->stend) e = c->stend - c->stbase;
606 for (i=0; i<e; i++) {
607 mrb_value v = c->stbase[i];
609 if (!mrb_immediate_p(v)) {
610 mrb_gc_mark(mrb, mrb_basic_ptr(v));
613 e = c->stend - c->stbase;
614 nil = mrb_nil_value();
621 mark_context(mrb_state *mrb, struct mrb_context *c)
627 if (c->status == MRB_FIBER_TERMINATED) return;
630 mark_context_stack(mrb, c);
632 /* mark call stack */
634 for (ci = c->cibase; ci <= c->ci; ci++) {
635 mrb_gc_mark(mrb, (struct RBasic*)ci->env);
636 mrb_gc_mark(mrb, (struct RBasic*)ci->proc);
637 mrb_gc_mark(mrb, (struct RBasic*)ci->target_class);
640 /* mark ensure stack */
641 for (i=0; i<c->eidx; i++) {
642 mrb_gc_mark(mrb, (struct RBasic*)c->ensure[i]);
645 mrb_gc_mark(mrb, (struct RBasic*)c->fib);
653 gc_mark_children(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
655 mrb_assert(is_gray(obj));
657 gc->gray_list = obj->gcnext;
658 mrb_gc_mark(mrb, (struct RBasic*)obj->c);
662 struct RClass *c = (struct RClass*)obj;
663 if (MRB_FLAG_TEST(c, MRB_FL_CLASS_IS_ORIGIN))
664 mrb_gc_mark_mt(mrb, c);
665 mrb_gc_mark(mrb, (struct RBasic*)((struct RClass*)obj)->super);
673 struct RClass *c = (struct RClass*)obj;
675 mrb_gc_mark_mt(mrb, c);
676 mrb_gc_mark(mrb, (struct RBasic*)c->super);
682 case MRB_TT_EXCEPTION:
683 mrb_gc_mark_iv(mrb, (struct RObject*)obj);
688 struct RProc *p = (struct RProc*)obj;
690 mrb_gc_mark(mrb, (struct RBasic*)p->upper);
691 mrb_gc_mark(mrb, (struct RBasic*)p->e.env);
697 struct REnv *e = (struct REnv*)obj;
700 if (MRB_ENV_STACK_SHARED_P(e) && e->cxt && e->cxt->fib) {
701 mrb_gc_mark(mrb, (struct RBasic*)e->cxt->fib);
703 len = MRB_ENV_STACK_LEN(e);
704 for (i=0; i<len; i++) {
705 mrb_gc_mark_value(mrb, e->stack[i]);
712 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
714 if (c) mark_context(mrb, c);
720 struct RArray *a = (struct RArray*)obj;
723 for (i=0,e=ARY_LEN(a); i<e; i++) {
724 mrb_gc_mark_value(mrb, ARY_PTR(a)[i]);
730 mrb_gc_mark_iv(mrb, (struct RObject*)obj);
731 mrb_gc_mark_hash(mrb, (struct RHash*)obj);
735 if (RSTR_FSHARED_P(obj) && !RSTR_NOFREE_P(obj)) {
736 struct RString *s = (struct RString*)obj;
737 mrb_gc_mark(mrb, (struct RBasic*)s->as.heap.aux.fshared);
742 mrb_gc_mark_range(mrb, (struct RRange*)obj);
751 mrb_gc_mark(mrb_state *mrb, struct RBasic *obj)
753 if (obj == 0) return;
754 if (!is_white(obj)) return;
755 mrb_assert((obj)->tt != MRB_TT_FREE);
756 add_gray_list(mrb, &mrb->gc, obj);
760 obj_free(mrb_state *mrb, struct RBasic *obj, int end)
762 DEBUG(fprintf(stderr, "obj_free(%p,tt=%d)\n",obj,obj->tt));
764 /* immediate - no mark */
771 #ifndef MRB_WITHOUT_FLOAT
773 #ifdef MRB_WORD_BOXING
781 mrb_gc_free_iv(mrb, (struct RObject*)obj);
784 case MRB_TT_EXCEPTION:
785 mrb_gc_free_iv(mrb, (struct RObject*)obj);
791 mrb_gc_free_mt(mrb, (struct RClass*)obj);
792 mrb_gc_free_iv(mrb, (struct RObject*)obj);
795 if (MRB_FLAG_TEST(obj, MRB_FL_CLASS_IS_ORIGIN))
796 mrb_gc_free_mt(mrb, (struct RClass*)obj);
800 struct REnv *e = (struct REnv*)obj;
802 if (MRB_ENV_STACK_SHARED_P(e)) {
803 /* cannot be freed */
807 mrb_free(mrb, e->stack);
814 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
816 if (c && c != mrb->root_c) {
817 if (!end && c->status != MRB_FIBER_TERMINATED) {
818 mrb_callinfo *ci = c->ci;
819 mrb_callinfo *ce = c->cibase;
822 struct REnv *e = ci->env;
823 if (e && !mrb_object_dead_p(mrb, (struct RBasic*)e) &&
824 e->tt == MRB_TT_ENV && MRB_ENV_STACK_SHARED_P(e)) {
825 mrb_env_unshare(mrb, e);
830 mrb_free_context(mrb, c);
836 if (ARY_SHARED_P(obj))
837 mrb_ary_decref(mrb, ((struct RArray*)obj)->as.heap.aux.shared);
838 else if (!ARY_EMBED_P(obj))
839 mrb_free(mrb, ((struct RArray*)obj)->as.heap.ptr);
843 mrb_gc_free_iv(mrb, (struct RObject*)obj);
844 mrb_gc_free_hash(mrb, (struct RHash*)obj);
848 mrb_gc_free_str(mrb, (struct RString*)obj);
853 struct RProc *p = (struct RProc*)obj;
855 if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
856 mrb_irep *irep = p->body.irep;
858 mrb_irep_cutref(mrb, irep);
860 mrb_irep_decref(mrb, irep);
866 mrb_gc_free_range(mrb, ((struct RRange*)obj));
871 struct RData *d = (struct RData*)obj;
872 if (d->type && d->type->dfree) {
873 d->type->dfree(mrb, d->data);
875 mrb_gc_free_iv(mrb, (struct RObject*)obj);
882 obj->tt = MRB_TT_FREE;
886 root_scan_phase(mrb_state *mrb, mrb_gc *gc)
890 if (!is_minor_gc(gc)) {
891 gc->gray_list = NULL;
892 gc->atomic_gray_list = NULL;
897 for (i=0,e=gc->arena_idx; i<e; i++) {
898 mrb_gc_mark(mrb, gc->arena[i]);
900 /* mark class hierarchy */
901 mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
903 /* mark built-in classes */
904 mrb_gc_mark(mrb, (struct RBasic*)mrb->class_class);
905 mrb_gc_mark(mrb, (struct RBasic*)mrb->module_class);
906 mrb_gc_mark(mrb, (struct RBasic*)mrb->proc_class);
907 mrb_gc_mark(mrb, (struct RBasic*)mrb->string_class);
908 mrb_gc_mark(mrb, (struct RBasic*)mrb->array_class);
909 mrb_gc_mark(mrb, (struct RBasic*)mrb->hash_class);
910 mrb_gc_mark(mrb, (struct RBasic*)mrb->range_class);
912 #ifndef MRB_WITHOUT_FLOAT
913 mrb_gc_mark(mrb, (struct RBasic*)mrb->float_class);
915 mrb_gc_mark(mrb, (struct RBasic*)mrb->fixnum_class);
916 mrb_gc_mark(mrb, (struct RBasic*)mrb->true_class);
917 mrb_gc_mark(mrb, (struct RBasic*)mrb->false_class);
918 mrb_gc_mark(mrb, (struct RBasic*)mrb->nil_class);
919 mrb_gc_mark(mrb, (struct RBasic*)mrb->symbol_class);
920 mrb_gc_mark(mrb, (struct RBasic*)mrb->kernel_module);
922 mrb_gc_mark(mrb, (struct RBasic*)mrb->eException_class);
923 mrb_gc_mark(mrb, (struct RBasic*)mrb->eStandardError_class);
926 mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
928 mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
929 /* mark pre-allocated exception */
930 mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err);
931 mrb_gc_mark(mrb, (struct RBasic*)mrb->stack_err);
932 #ifdef MRB_GC_FIXED_ARENA
933 mrb_gc_mark(mrb, (struct RBasic*)mrb->arena_err);
936 mark_context(mrb, mrb->c);
937 if (mrb->root_c != mrb->c) {
938 mark_context(mrb, mrb->root_c);
943 gc_gray_mark(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
947 gc_mark_children(mrb, gc, obj);
958 struct RClass *c = (struct RClass*)obj;
960 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
961 children += mrb_gc_mark_mt_size(mrb, c);
968 case MRB_TT_EXCEPTION:
969 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
973 children += MRB_ENV_STACK_LEN(obj);
978 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
982 if (!c || c->status == MRB_FIBER_TERMINATED) break;
985 i = c->stack - c->stbase;
988 i += ci_nregs(c->ci);
990 if (c->stbase + i > c->stend) i = c->stend - c->stbase;
993 /* mark ensure stack */
998 for (i=0, ci = c->cibase; ci <= c->ci; i++, ci++)
1007 struct RArray *a = (struct RArray*)obj;
1008 children += ARY_LEN(a);
1013 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
1014 children += mrb_gc_mark_hash_size(mrb, (struct RHash*)obj);
1030 gc_mark_gray_list(mrb_state *mrb, mrb_gc *gc) {
1031 while (gc->gray_list) {
1032 if (is_gray(gc->gray_list))
1033 gc_mark_children(mrb, gc, gc->gray_list);
1035 gc->gray_list = gc->gray_list->gcnext;
1041 incremental_marking_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
1043 size_t tried_marks = 0;
1045 while (gc->gray_list && tried_marks < limit) {
1046 tried_marks += gc_gray_mark(mrb, gc, gc->gray_list);
1053 final_marking_phase(mrb_state *mrb, mrb_gc *gc)
1058 for (i=0,e=gc->arena_idx; i<e; i++) {
1059 mrb_gc_mark(mrb, gc->arena[i]);
1061 mrb_gc_mark_gv(mrb);
1062 mark_context(mrb, mrb->c);
1063 mark_context(mrb, mrb->root_c);
1064 mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
1065 gc_mark_gray_list(mrb, gc);
1066 mrb_assert(gc->gray_list == NULL);
1067 gc->gray_list = gc->atomic_gray_list;
1068 gc->atomic_gray_list = NULL;
1069 gc_mark_gray_list(mrb, gc);
1070 mrb_assert(gc->gray_list == NULL);
1074 prepare_incremental_sweep(mrb_state *mrb, mrb_gc *gc)
1076 gc->state = MRB_GC_STATE_SWEEP;
1077 gc->sweeps = gc->heaps;
1078 gc->live_after_mark = gc->live;
1082 incremental_sweep_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
1084 mrb_heap_page *page = gc->sweeps;
1085 size_t tried_sweep = 0;
1087 while (page && (tried_sweep < limit)) {
1088 RVALUE *p = objects(page);
1089 RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1091 mrb_bool dead_slot = TRUE;
1092 mrb_bool full = (page->freelist == NULL);
1094 if (is_minor_gc(gc) && page->old) {
1095 /* skip a slot which doesn't contain any young object */
1100 if (is_dead(gc, &p->as.basic)) {
1101 if (p->as.basic.tt != MRB_TT_FREE) {
1102 obj_free(mrb, &p->as.basic, FALSE);
1103 if (p->as.basic.tt == MRB_TT_FREE) {
1104 p->as.free.next = page->freelist;
1105 page->freelist = (struct RBasic*)p;
1114 if (!is_generational(gc))
1115 paint_partial_white(gc, &p->as.basic); /* next gc target */
1121 /* free dead slot */
1122 if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) {
1123 mrb_heap_page *next = page->next;
1125 unlink_heap_page(gc, page);
1126 unlink_free_heap_page(gc, page);
1127 mrb_free(mrb, page);
1131 if (full && freed > 0) {
1132 link_free_heap_page(gc, page);
1134 if (page->freelist == NULL && is_minor_gc(gc))
1140 tried_sweep += MRB_HEAP_PAGE_SIZE;
1142 gc->live_after_mark -= freed;
1149 incremental_gc(mrb_state *mrb, mrb_gc *gc, size_t limit)
1151 switch (gc->state) {
1152 case MRB_GC_STATE_ROOT:
1153 root_scan_phase(mrb, gc);
1154 gc->state = MRB_GC_STATE_MARK;
1155 flip_white_part(gc);
1157 case MRB_GC_STATE_MARK:
1158 if (gc->gray_list) {
1159 return incremental_marking_phase(mrb, gc, limit);
1162 final_marking_phase(mrb, gc);
1163 prepare_incremental_sweep(mrb, gc);
1166 case MRB_GC_STATE_SWEEP: {
1167 size_t tried_sweep = 0;
1168 tried_sweep = incremental_sweep_phase(mrb, gc, limit);
1169 if (tried_sweep == 0)
1170 gc->state = MRB_GC_STATE_ROOT;
1181 incremental_gc_until(mrb_state *mrb, mrb_gc *gc, mrb_gc_state to_state)
1184 incremental_gc(mrb, gc, SIZE_MAX);
1185 } while (gc->state != to_state);
1189 incremental_gc_step(mrb_state *mrb, mrb_gc *gc)
1191 size_t limit = 0, result = 0;
1192 limit = (GC_STEP_SIZE/100) * gc->step_ratio;
1193 while (result < limit) {
1194 result += incremental_gc(mrb, gc, limit);
1195 if (gc->state == MRB_GC_STATE_ROOT)
1199 gc->threshold = gc->live + GC_STEP_SIZE;
1203 clear_all_old(mrb_state *mrb, mrb_gc *gc)
1205 mrb_bool origin_mode = gc->generational;
1207 mrb_assert(is_generational(gc));
1208 if (is_major_gc(gc)) {
1209 /* finish the half baked GC */
1210 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1213 /* Sweep the dead objects, then reset all the live objects
1214 * (including all the old objects, of course) to white. */
1215 gc->generational = FALSE;
1216 prepare_incremental_sweep(mrb, gc);
1217 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1218 gc->generational = origin_mode;
1220 /* The gray objects have already been painted as white */
1221 gc->atomic_gray_list = gc->gray_list = NULL;
1225 mrb_incremental_gc(mrb_state *mrb)
1227 mrb_gc *gc = &mrb->gc;
1229 if (gc->disabled || gc->iterating) return;
1231 GC_INVOKE_TIME_REPORT("mrb_incremental_gc()");
1234 if (is_minor_gc(gc)) {
1235 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1238 incremental_gc_step(mrb, gc);
1241 if (gc->state == MRB_GC_STATE_ROOT) {
1242 mrb_assert(gc->live >= gc->live_after_mark);
1243 gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1244 if (gc->threshold < GC_STEP_SIZE) {
1245 gc->threshold = GC_STEP_SIZE;
1248 if (is_major_gc(gc)) {
1249 size_t threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1252 if (threshold < MAJOR_GC_TOOMANY) {
1253 gc->majorgc_old_threshold = threshold;
1256 /* too many objects allocated during incremental GC, */
1257 /* instead of increasing threshold, invoke full GC. */
1261 else if (is_minor_gc(gc)) {
1262 if (gc->live > gc->majorgc_old_threshold) {
1263 clear_all_old(mrb, gc);
1269 GC_TIME_STOP_AND_REPORT;
1272 /* Perform a full gc cycle */
1274 mrb_full_gc(mrb_state *mrb)
1276 mrb_gc *gc = &mrb->gc;
1278 if (gc->disabled || gc->iterating) return;
1280 GC_INVOKE_TIME_REPORT("mrb_full_gc()");
1283 if (is_generational(gc)) {
1284 /* clear all the old objects back to young */
1285 clear_all_old(mrb, gc);
1288 else if (gc->state != MRB_GC_STATE_ROOT) {
1289 /* finish half baked GC cycle */
1290 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1293 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1294 gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1296 if (is_generational(gc)) {
1297 gc->majorgc_old_threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1301 GC_TIME_STOP_AND_REPORT;
1305 mrb_garbage_collect(mrb_state *mrb)
1311 * Field write barrier
1312 * Paint obj(Black) -> value(White) to obj(Black) -> value(Gray).
1316 mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value)
1318 mrb_gc *gc = &mrb->gc;
1320 if (!is_black(obj)) return;
1321 if (!is_white(value)) return;
1323 mrb_assert(gc->state == MRB_GC_STATE_MARK || (!is_dead(gc, value) && !is_dead(gc, obj)));
1324 mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1326 if (is_generational(gc) || gc->state == MRB_GC_STATE_MARK) {
1327 add_gray_list(mrb, gc, value);
1330 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1331 paint_partial_white(gc, obj); /* for never write barriers */
1337 * Paint obj(Black) to obj(Gray).
1339 * The object that is painted gray will be traversed atomically in final
1340 * mark phase. So you use this write barrier if it's frequency written spot.
1341 * e.g. Set element on Array.
1345 mrb_write_barrier(mrb_state *mrb, struct RBasic *obj)
1347 mrb_gc *gc = &mrb->gc;
1349 if (!is_black(obj)) return;
1351 mrb_assert(!is_dead(gc, obj));
1352 mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1354 obj->gcnext = gc->atomic_gray_list;
1355 gc->atomic_gray_list = obj;
1362 * Initiates full garbage collection.
1367 gc_start(mrb_state *mrb, mrb_value obj)
1370 return mrb_nil_value();
1375 * GC.enable -> true or false
1377 * Enables garbage collection, returning <code>true</code> if garbage
1378 * collection was previously disabled.
1380 * GC.disable #=> false
1381 * GC.enable #=> true
1382 * GC.enable #=> false
1387 gc_enable(mrb_state *mrb, mrb_value obj)
1389 mrb_bool old = mrb->gc.disabled;
1391 mrb->gc.disabled = FALSE;
1393 return mrb_bool_value(old);
1398 * GC.disable -> true or false
1400 * Disables garbage collection, returning <code>true</code> if garbage
1401 * collection was already disabled.
1403 * GC.disable #=> false
1404 * GC.disable #=> true
1409 gc_disable(mrb_state *mrb, mrb_value obj)
1411 mrb_bool old = mrb->gc.disabled;
1413 mrb->gc.disabled = TRUE;
1415 return mrb_bool_value(old);
1420 * GC.interval_ratio -> fixnum
1422 * Returns ratio of GC interval. Default value is 200(%).
1427 gc_interval_ratio_get(mrb_state *mrb, mrb_value obj)
1429 return mrb_fixnum_value(mrb->gc.interval_ratio);
1434 * GC.interval_ratio = fixnum -> nil
1436 * Updates ratio of GC interval. Default value is 200(%).
1437 * GC start as soon as after end all step of GC if you set 100(%).
1442 gc_interval_ratio_set(mrb_state *mrb, mrb_value obj)
1446 mrb_get_args(mrb, "i", &ratio);
1447 mrb->gc.interval_ratio = (int)ratio;
1448 return mrb_nil_value();
1453 * GC.step_ratio -> fixnum
1455 * Returns step span ratio of Incremental GC. Default value is 200(%).
1460 gc_step_ratio_get(mrb_state *mrb, mrb_value obj)
1462 return mrb_fixnum_value(mrb->gc.step_ratio);
1467 * GC.step_ratio = fixnum -> nil
1469 * Updates step span ratio of Incremental GC. Default value is 200(%).
1470 * 1 step of incrementalGC becomes long if a rate is big.
1475 gc_step_ratio_set(mrb_state *mrb, mrb_value obj)
1479 mrb_get_args(mrb, "i", &ratio);
1480 mrb->gc.step_ratio = (int)ratio;
1481 return mrb_nil_value();
1485 change_gen_gc_mode(mrb_state *mrb, mrb_gc *gc, mrb_bool enable)
1487 if (gc->disabled || gc->iterating) {
1488 mrb_raise(mrb, E_RUNTIME_ERROR, "generational mode changed when GC disabled");
1491 if (is_generational(gc) && !enable) {
1492 clear_all_old(mrb, gc);
1493 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1496 else if (!is_generational(gc) && enable) {
1497 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1498 gc->majorgc_old_threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1501 gc->generational = enable;
1506 * GC.generational_mode -> true or false
1508 * Returns generational or normal gc mode.
1513 gc_generational_mode_get(mrb_state *mrb, mrb_value self)
1515 return mrb_bool_value(mrb->gc.generational);
1520 * GC.generational_mode = true or false -> true or false
1522 * Changes to generational or normal gc mode.
1527 gc_generational_mode_set(mrb_state *mrb, mrb_value self)
1531 mrb_get_args(mrb, "b", &enable);
1532 if (mrb->gc.generational != enable)
1533 change_gen_gc_mode(mrb, &mrb->gc, enable);
1535 return mrb_bool_value(enable);
1540 gc_each_objects(mrb_state *mrb, mrb_gc *gc, mrb_each_object_callback *callback, void *data)
1542 mrb_heap_page* page;
1545 while (page != NULL) {
1550 for (i=0; i < MRB_HEAP_PAGE_SIZE; i++) {
1551 if ((*callback)(mrb, &p[i].as.basic, data) == MRB_EACH_OBJ_BREAK)
1559 mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data)
1561 mrb_bool iterating = mrb->gc.iterating;
1564 mrb->gc.iterating = TRUE;
1566 gc_each_objects(mrb, &mrb->gc, callback, data);
1569 struct mrb_jmpbuf *prev_jmp = mrb->jmp;
1570 struct mrb_jmpbuf c_jmp;
1574 gc_each_objects(mrb, &mrb->gc, callback, data);
1575 mrb->jmp = prev_jmp;
1576 mrb->gc.iterating = iterating;
1577 } MRB_CATCH(&c_jmp) {
1578 mrb->gc.iterating = iterating;
1579 mrb->jmp = prev_jmp;
1580 MRB_THROW(prev_jmp);
1581 } MRB_END_EXC(&c_jmp);
1587 static mrb_value gc_test(mrb_state *, mrb_value);
1592 mrb_init_gc(mrb_state *mrb)
1596 gc = mrb_define_module(mrb, "GC");
1598 mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE());
1599 mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE());
1600 mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE());
1601 mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE());
1602 mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1));
1603 mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE());
1604 mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1));
1605 mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1));
1606 mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE());
1609 mrb_define_class_method(mrb, gc, "test", gc_test, MRB_ARGS_NONE());
1617 test_mrb_field_write_barrier(void)
1619 mrb_state *mrb = mrb_open();
1620 struct RBasic *obj, *value;
1621 mrb_gc *gc = &mrb->gc;
1623 puts("test_mrb_field_write_barrier");
1624 gc->generational = FALSE;
1625 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1626 value = mrb_basic_ptr(mrb_str_new_lit(mrb, "value"));
1628 paint_partial_white(gc, value);
1631 puts(" in MRB_GC_STATE_MARK");
1632 gc->state = MRB_GC_STATE_MARK;
1633 mrb_field_write_barrier(mrb, obj, value);
1635 mrb_assert(is_gray(value));
1638 puts(" in MRB_GC_STATE_SWEEP");
1639 paint_partial_white(gc, value);
1640 gc->state = MRB_GC_STATE_SWEEP;
1641 mrb_field_write_barrier(mrb, obj, value);
1643 mrb_assert(obj->color & gc->current_white_part);
1644 mrb_assert(value->color & gc->current_white_part);
1647 puts(" fail with black");
1648 gc->state = MRB_GC_STATE_MARK;
1650 paint_partial_white(gc, value);
1651 mrb_field_write_barrier(mrb, obj, value);
1653 mrb_assert(obj->color & gc->current_white_part);
1656 puts(" fail with gray");
1657 gc->state = MRB_GC_STATE_MARK;
1660 mrb_field_write_barrier(mrb, obj, value);
1662 mrb_assert(is_gray(value));
1666 puts("test_mrb_field_write_barrier_value");
1667 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1668 mrb_value value = mrb_str_new_lit(mrb, "value");
1670 paint_partial_white(gc, mrb_basic_ptr(value));
1672 gc->state = MRB_GC_STATE_MARK;
1673 mrb_field_write_barrier_value(mrb, obj, value);
1675 mrb_assert(is_gray(mrb_basic_ptr(value)));
1682 test_mrb_write_barrier(void)
1684 mrb_state *mrb = mrb_open();
1686 mrb_gc *gc = &mrb->gc;
1688 puts("test_mrb_write_barrier");
1689 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1692 puts(" in MRB_GC_STATE_MARK");
1693 gc->state = MRB_GC_STATE_MARK;
1694 mrb_write_barrier(mrb, obj);
1696 mrb_assert(is_gray(obj));
1697 mrb_assert(gc->atomic_gray_list == obj);
1700 puts(" fail with gray");
1702 mrb_write_barrier(mrb, obj);
1704 mrb_assert(is_gray(obj));
1710 test_add_gray_list(void)
1712 mrb_state *mrb = mrb_open();
1713 struct RBasic *obj1, *obj2;
1714 mrb_gc *gc = &mrb->gc;
1716 puts("test_add_gray_list");
1717 change_gen_gc_mode(mrb, gc, FALSE);
1718 mrb_assert(gc->gray_list == NULL);
1719 obj1 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
1720 add_gray_list(mrb, gc, obj1);
1721 mrb_assert(gc->gray_list == obj1);
1722 mrb_assert(is_gray(obj1));
1724 obj2 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
1725 add_gray_list(mrb, gc, obj2);
1726 mrb_assert(gc->gray_list == obj2);
1727 mrb_assert(gc->gray_list->gcnext == obj1);
1728 mrb_assert(is_gray(obj2));
1734 test_gc_gray_mark(void)
1736 mrb_state *mrb = mrb_open();
1737 mrb_value obj_v, value_v;
1739 size_t gray_num = 0;
1740 mrb_gc *gc = &mrb->gc;
1742 puts("test_gc_gray_mark");
1744 puts(" in MRB_TT_CLASS");
1745 obj = (struct RBasic*)mrb->object_class;
1747 gray_num = gc_gray_mark(mrb, gc, obj);
1748 mrb_assert(is_black(obj));
1749 mrb_assert(gray_num > 1);
1751 puts(" in MRB_TT_ARRAY");
1752 obj_v = mrb_ary_new(mrb);
1753 value_v = mrb_str_new_lit(mrb, "test");
1754 paint_gray(mrb_basic_ptr(obj_v));
1755 paint_partial_white(gc, mrb_basic_ptr(value_v));
1756 mrb_ary_push(mrb, obj_v, value_v);
1757 gray_num = gc_gray_mark(mrb, gc, mrb_basic_ptr(obj_v));
1758 mrb_assert(is_black(mrb_basic_ptr(obj_v)));
1759 mrb_assert(is_gray(mrb_basic_ptr(value_v)));
1760 mrb_assert(gray_num == 1);
1766 test_incremental_gc(void)
1768 mrb_state *mrb = mrb_open();
1769 size_t max = ~0, live = 0, total = 0, freed = 0;
1771 mrb_heap_page *page;
1772 mrb_gc *gc = &mrb->gc;
1774 puts("test_incremental_gc");
1775 change_gen_gc_mode(mrb, gc, FALSE);
1777 puts(" in mrb_full_gc");
1780 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1781 puts(" in MRB_GC_STATE_ROOT");
1782 incremental_gc(mrb, gc, max);
1783 mrb_assert(gc->state == MRB_GC_STATE_MARK);
1784 puts(" in MRB_GC_STATE_MARK");
1785 incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
1786 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1788 puts(" in MRB_GC_STATE_SWEEP");
1791 RVALUE *p = objects(page);
1792 RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1794 if (is_black(&p->as.basic)) {
1797 if (is_gray(&p->as.basic) && !is_dead(gc, &p->as.basic)) {
1798 printf("%p\n", &p->as.basic);
1803 total += MRB_HEAP_PAGE_SIZE;
1806 mrb_assert(gc->gray_list == NULL);
1808 incremental_gc(mrb, gc, max);
1809 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1811 incremental_gc(mrb, gc, max);
1812 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1814 free = (RVALUE*)gc->heaps->freelist;
1817 free = (RVALUE*)free->as.free.next;
1820 mrb_assert(gc->live == live);
1821 mrb_assert(gc->live == total-freed);
1823 puts("test_incremental_gc(gen)");
1824 incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
1825 change_gen_gc_mode(mrb, gc, TRUE);
1827 mrb_assert(gc->full == FALSE);
1828 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1831 mrb_assert(is_minor_gc(gc));
1832 mrb_assert(gc->majorgc_old_threshold > 0);
1833 gc->majorgc_old_threshold = 0;
1834 mrb_incremental_gc(mrb);
1835 mrb_assert(gc->full == TRUE);
1836 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1839 mrb_assert(is_major_gc(gc));
1841 mrb_incremental_gc(mrb);
1842 } while (gc->state != MRB_GC_STATE_ROOT);
1843 mrb_assert(gc->full == FALSE);
1849 test_incremental_sweep_phase(void)
1851 mrb_state *mrb = mrb_open();
1852 mrb_gc *gc = &mrb->gc;
1854 puts("test_incremental_sweep_phase");
1857 gc->sweeps = gc->heaps;
1859 mrb_assert(gc->heaps->next->next == NULL);
1860 mrb_assert(gc->free_heaps->next->next == NULL);
1861 incremental_sweep_phase(mrb, gc, MRB_HEAP_PAGE_SIZE * 3);
1863 mrb_assert(gc->heaps->next == NULL);
1864 mrb_assert(gc->heaps == gc->free_heaps);
1870 gc_test(mrb_state *mrb, mrb_value self)
1872 test_mrb_field_write_barrier();
1873 test_mrb_write_barrier();
1874 test_add_gray_list();
1875 test_gc_gray_mark();
1876 test_incremental_gc();
1877 test_incremental_sweep_phase();
1878 return mrb_nil_value();
1880 #endif /* GC_DEBUG */
1881 #endif /* GC_TEST */