Module: GC
- Defined in:
- gc.c
Overview
The GC module provides an interface to Ruby’s mark and sweep garbage collection mechanism.
Some of the underlying methods are also available via the ObjectSpace module.
You may obtain information about the operation of the GC through GC::Profiler.
Defined Under Namespace
Modules: Profiler
Constant Summary collapse
- INTERNAL_CONSTANTS =
gc_constants
- OPTS =
opts = rb_ary_new()
Class Method Summary collapse
- .add_stress_to_class(*args) ⇒ Object
-
.count ⇒ Integer
The number of times GC occurred.
-
.disable ⇒ Boolean
Disables garbage collection, returning
true
if garbage collection was already disabled. -
.enable ⇒ Boolean
Enables garbage collection, returning
true
if garbage collection was previously disabled. -
.latest_gc_info(*args) ⇒ Object
Returns information about the most recent garbage collection.
-
.malloc_allocated_size ⇒ Integer
Returns the size of memory allocated by malloc().
-
.malloc_allocations ⇒ Integer
Returns the number of malloc() allocations.
- .remove_stress_to_class(*args) ⇒ Object
-
.start(*args) ⇒ Object
Initiates garbage collection, unless manually disabled.
-
.stat(*args) ⇒ Object
Returns a Hash containing information about the GC.
-
.stress ⇒ Boolean
Returns current status of GC stress mode.
-
.stress=(flag) ⇒ Object
Updates the GC stress mode.
-
.verify_internal_consistency ⇒ nil
Verify internal consistency.
Instance Method Summary collapse
-
#garbage_collect(*args) ⇒ Object
Initiates garbage collection, unless manually disabled.
Class Method Details
.add_stress_to_class(*args) ⇒ Object
9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 |
# File 'gc.c', line 9187
static VALUE
rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
if (!stress_to_class) {
stress_to_class = rb_ary_tmp_new(argc);
}
rb_ary_cat(stress_to_class, argv, argc);
return self;
}
|
.count ⇒ Integer
The number of times GC occurred.
It returns the number of times GC occurred since the process started.
6536 6537 6538 6539 6540 |
# File 'gc.c', line 6536
static VALUE
gc_count(VALUE self)
{
return SIZET2NUM(rb_gc_count());
}
|
.disable ⇒ Boolean
7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 |
# File 'gc.c', line 7148
VALUE
rb_gc_disable(void)
{
rb_objspace_t *objspace = &rb_objspace;
int old = dont_gc;
gc_rest(objspace);
dont_gc = TRUE;
return old ? Qtrue : Qfalse;
}
|
.enable ⇒ Boolean
7126 7127 7128 7129 7130 7131 7132 7133 7134 |
# File 'gc.c', line 7126
VALUE
rb_gc_enable(void)
{
rb_objspace_t *objspace = &rb_objspace;
int old = dont_gc;
dont_gc = FALSE;
return old ? Qtrue : Qfalse;
}
|
.latest_gc_info ⇒ Object .latest_gc_info(hash) ⇒ Hash .latest_gc_info(: major_by) ⇒ Object
Returns information about the most recent garbage collection.
6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 |
# File 'gc.c', line 6651
static VALUE
gc_latest_gc_info(int argc, VALUE *argv, VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
VALUE arg = Qnil;
if (rb_scan_args(argc, argv, "01", &arg) == 1) {
if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
rb_raise(rb_eTypeError, "non-hash or symbol given");
}
}
if (arg == Qnil) {
arg = rb_hash_new();
}
return gc_info_decode(objspace, arg, 0);
}
|
.malloc_allocated_size ⇒ Integer
Returns the size of memory allocated by malloc().
Only available if ruby was built with CALC_EXACT_MALLOC_SIZE
.
7872 7873 7874 7875 7876 |
# File 'gc.c', line 7872
static VALUE
gc_malloc_allocated_size(VALUE self)
{
return UINT2NUM(rb_objspace.malloc_params.allocated_size);
}
|
.malloc_allocations ⇒ Integer
Returns the number of malloc() allocations.
Only available if ruby was built with CALC_EXACT_MALLOC_SIZE
.
7887 7888 7889 7890 7891 |
# File 'gc.c', line 7887
static VALUE
gc_malloc_allocations(VALUE self)
{
return UINT2NUM(rb_objspace.malloc_params.allocations);
}
|
.remove_stress_to_class(*args) ⇒ Object
9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 |
# File 'gc.c', line 9199
static VALUE
rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
int i;
if (stress_to_class) {
for (i = 0; i < argc; ++i) {
rb_ary_delete_same(stress_to_class, argv[i]);
}
if (RARRAY_LEN(stress_to_class) == 0) {
stress_to_class = 0;
}
}
return Qnil;
}
|
.start ⇒ nil .garbage_collect ⇒ nil .start(full_mark: true, immediate_sweep: true) ⇒ nil .garbage_collect(full_mark: true, immediate_sweep: true) ⇒ nil
Initiates garbage collection, unless manually disabled.
This method is defined with keyword arguments that default to true:
def GC.start(full_mark: true, immediate_sweep: true); end
Use full_mark: false to perform a minor GC. Use immediate_sweep: false to defer sweeping (use lazy sweep).
Note: These keyword arguments are implementation and version dependent. They are not guaranteed to be future-compatible, and may be ignored if the underlying implementation does not support them.
6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 |
# File 'gc.c', line 6449
static VALUE
gc_start_internal(int argc, VALUE *argv, VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
int full_mark = TRUE, immediate_mark = TRUE, immediate_sweep = TRUE;
VALUE opt = Qnil;
static ID keyword_ids[3];
rb_scan_args(argc, argv, "0:", &opt);
if (!NIL_P(opt)) {
VALUE kwvals[3];
if (!keyword_ids[0]) {
keyword_ids[0] = rb_intern("full_mark");
keyword_ids[1] = rb_intern("immediate_mark");
keyword_ids[2] = rb_intern("immediate_sweep");
}
rb_get_kwargs(opt, keyword_ids, 0, 3, kwvals);
if (kwvals[0] != Qundef) full_mark = RTEST(kwvals[0]);
if (kwvals[1] != Qundef) immediate_mark = RTEST(kwvals[1]);
if (kwvals[2] != Qundef) immediate_sweep = RTEST(kwvals[2]);
}
garbage_collect(objspace, full_mark, immediate_mark, immediate_sweep, GPR_FLAG_METHOD);
if (!finalizing) finalize_deferred(objspace);
return Qnil;
}
|
.stat ⇒ Hash .stat(hash) ⇒ Hash .stat(: key) ⇒ Numeric
Returns a Hash containing information about the GC.
The hash includes information about internal statistics about GC such as:
{
:count=>0,
:heap_allocated_pages=>24,
:heap_sorted_length=>24,
:heap_allocatable_pages=>0,
:heap_available_slots=>9783,
:heap_live_slots=>7713,
:heap_free_slots=>2070,
:heap_final_slots=>0,
:heap_marked_slots=>0,
:heap_swept_slots=>0,
:heap_eden_pages=>24,
:heap_tomb_pages=>0,
:total_allocated_pages=>24,
:total_freed_pages=>0,
:total_allocated_objects=>7796,
:total_freed_objects=>83,
:malloc_increase_bytes=>2389312,
:malloc_increase_bytes_limit=>16777216,
:minor_gc_count=>0,
:major_gc_count=>0,
:remembered_wb_unprotected_objects=>0,
:remembered_wb_unprotected_objects_limit=>0,
:old_objects=>0,
:old_objects_limit=>0,
:oldmalloc_increase_bytes=>2389760,
:oldmalloc_increase_bytes_limit=>16777216
}
The contents of the hash are implementation specific and may be changed in the future.
This method is only expected to work on C Ruby.
7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 |
# File 'gc.c', line 7032
static VALUE
gc_stat(int argc, VALUE *argv, VALUE self)
{
VALUE arg = Qnil;
if (rb_scan_args(argc, argv, "01", &arg) == 1) {
if (SYMBOL_P(arg)) {
size_t value = gc_stat_internal(arg);
return SIZET2NUM(value);
}
else if (!RB_TYPE_P(arg, T_HASH)) {
rb_raise(rb_eTypeError, "non-hash or symbol given");
}
}
if (arg == Qnil) {
arg = rb_hash_new();
}
gc_stat_internal(arg);
return arg;
}
|
.stress ⇒ Boolean
Returns current status of GC stress mode.
7074 7075 7076 7077 7078 7079 |
# File 'gc.c', line 7074
static VALUE
gc_stress_get(VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
return ruby_gc_stress_mode;
}
|
.stress=(flag) ⇒ Object
Updates the GC stress mode.
When stress mode is enabled, the GC is invoked at every GC opportunity: all memory and object allocations.
Enabling stress mode will degrade performance, it is only for debugging.
flag can be true, false, or a fixnum bit-ORed following flags.
0x01:: no major GC
0x02:: no immediate sweep
0x04:: full mark after malloc/calloc/realloc
7105 7106 7107 7108 7109 7110 7111 |
# File 'gc.c', line 7105
static VALUE
gc_stress_set_m(VALUE self, VALUE flag)
{
rb_objspace_t *objspace = &rb_objspace;
gc_stress_set(objspace, flag);
return flag;
}
|
.verify_internal_consistency ⇒ nil
Verify internal consistency.
This method is implementation specific. Now this method checks generational consistency if RGenGC is supported.
5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 |
# File 'gc.c', line 5086
static VALUE
gc_verify_internal_consistency(VALUE dummy)
{
rb_objspace_t *objspace = &rb_objspace;
struct verify_internal_consistency_struct data = {0};
struct each_obj_args eo_args;
data.objspace = objspace;
gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
/* check relations */
eo_args.callback = verify_internal_consistency_i;
eo_args.data = (void *)&data;
objspace_each_objects((VALUE)&eo_args);
if (data.err_count != 0) {
#if RGENGC_CHECK_MODE >= 5
objspace->rgengc.error_count = data.err_count;
gc_marks_check(objspace, NULL, NULL);
allrefs_dump(objspace);
#endif
rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
}
/* check heap_page status */
gc_verify_heap_pages(objspace);
/* check counters */
if (!is_lazy_sweeping(heap_eden) && !finalizing) {
if (objspace_live_slots(objspace) != data.live_object_count) {
fprintf(stderr, "heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
(int)heap_pages_final_slots, (int)objspace->profile.total_freed_objects);
rb_bug("inconsistent live slot nubmer: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace_live_slots(objspace), data.live_object_count);
}
}
#if USE_RGENGC
if (!is_marking(objspace)) {
if (objspace->rgengc.old_objects != data.old_object_count) {
rb_bug("inconsistent old slot nubmer: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.old_objects, data.old_object_count);
}
if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
rb_bug("inconsistent old slot nubmer: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
}
}
#endif
if (!finalizing) {
size_t list_count = 0;
{
VALUE z = heap_pages_deferred_final;
while (z) {
list_count++;
z = RZOMBIE(z)->next;
}
}
if (heap_pages_final_slots != data.zombie_object_count ||
heap_pages_final_slots != list_count) {
rb_bug("inconsistent finalizing object count:\n"
" expect %"PRIuSIZE"\n"
" but %"PRIuSIZE" zombies\n"
" heap_pages_deferred_final list has %"PRIuSIZE" items.",
heap_pages_final_slots,
data.zombie_object_count,
list_count);
}
}
gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
return Qnil;
}
|
Instance Method Details
#start ⇒ nil #garbage_collect ⇒ nil #start(full_mark: true, immediate_sweep: true) ⇒ nil #garbage_collect(full_mark: true, immediate_sweep: true) ⇒ nil
Initiates garbage collection, unless manually disabled.
This method is defined with keyword arguments that default to true:
def GC.start(full_mark: true, immediate_sweep: true); end
Use full_mark: false to perform a minor GC. Use immediate_sweep: false to defer sweeping (use lazy sweep).
Note: These keyword arguments are implementation and version dependent. They are not guaranteed to be future-compatible, and may be ignored if the underlying implementation does not support them.
6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 |
# File 'gc.c', line 6449
static VALUE
gc_start_internal(int argc, VALUE *argv, VALUE self)
{
rb_objspace_t *objspace = &rb_objspace;
int full_mark = TRUE, immediate_mark = TRUE, immediate_sweep = TRUE;
VALUE opt = Qnil;
static ID keyword_ids[3];
rb_scan_args(argc, argv, "0:", &opt);
if (!NIL_P(opt)) {
VALUE kwvals[3];
if (!keyword_ids[0]) {
keyword_ids[0] = rb_intern("full_mark");
keyword_ids[1] = rb_intern("immediate_mark");
keyword_ids[2] = rb_intern("immediate_sweep");
}
rb_get_kwargs(opt, keyword_ids, 0, 3, kwvals);
if (kwvals[0] != Qundef) full_mark = RTEST(kwvals[0]);
if (kwvals[1] != Qundef) immediate_mark = RTEST(kwvals[1]);
if (kwvals[2] != Qundef) immediate_sweep = RTEST(kwvals[2]);
}
garbage_collect(objspace, full_mark, immediate_mark, immediate_sweep, GPR_FLAG_METHOD);
if (!finalizing) finalize_deferred(objspace);
return Qnil;
}
|