1 /***********************************************************************/
5 /* Damien Doligez, projet Para, INRIA Rocquencourt */
7 /* Copyright 1996 Institut National de Recherche en Informatique et */
8 /* en Automatique. All rights reserved. This file is distributed */
9 /* under the terms of the GNU Library General Public License, with */
10 /* the special exception on linking described in file ../LICENSE. */
12 /***********************************************************************/
14 /* $Id: minor_gc.c 8954 2008-07-28 12:03:55Z doligez $ */
31 asize_t caml_minor_heap_size;
32 static void *caml_young_base = NULL;
33 CAMLexport char *caml_young_start = NULL, *caml_young_end = NULL;
34 CAMLexport char *caml_young_ptr = NULL, *caml_young_limit = NULL;
36 CAMLexport struct caml_ref_table
37 caml_ref_table = { NULL, NULL, NULL, NULL, NULL, 0, 0},
38 caml_weak_ref_table = { NULL, NULL, NULL, NULL, NULL, 0, 0};
40 int caml_in_minor_collection = 0;
43 static unsigned long minor_gc_counter = 0;
46 void caml_alloc_table (struct caml_ref_table *tbl, asize_t sz, asize_t rsv)
52 new_table = (value **) caml_stat_alloc ((tbl->size + tbl->reserve)
54 if (tbl->base != NULL) caml_stat_free (tbl->base);
55 tbl->base = new_table;
57 tbl->threshold = tbl->base + tbl->size;
58 tbl->limit = tbl->threshold;
59 tbl->end = tbl->base + tbl->size + tbl->reserve;
62 static void reset_table (struct caml_ref_table *tbl)
66 if (tbl->base != NULL) caml_stat_free (tbl->base);
67 tbl->base = tbl->ptr = tbl->threshold = tbl->limit = tbl->end = NULL;
70 static void clear_table (struct caml_ref_table *tbl)
73 tbl->limit = tbl->threshold;
76 void caml_set_minor_heap_size (asize_t size)
81 Assert (size >= Minor_heap_min);
82 Assert (size <= Minor_heap_max);
83 Assert (size % sizeof (value) == 0);
84 if (caml_young_ptr != caml_young_end) caml_minor_collection ();
85 Assert (caml_young_ptr == caml_young_end);
86 new_heap = caml_aligned_malloc(size, 0, &new_heap_base);
87 if (new_heap == NULL) caml_raise_out_of_memory();
88 if (caml_page_table_add(In_young, new_heap, new_heap + size) != 0)
89 caml_raise_out_of_memory();
91 if (caml_young_start != NULL){
92 caml_page_table_remove(In_young, caml_young_start, caml_young_end);
93 free (caml_young_base);
95 caml_young_base = new_heap_base;
96 caml_young_start = new_heap;
97 caml_young_end = new_heap + size;
98 caml_young_limit = caml_young_start;
99 caml_young_ptr = caml_young_end;
100 caml_minor_heap_size = size;
102 reset_table (&caml_ref_table);
103 reset_table (&caml_weak_ref_table);
106 static value oldify_todo_list = 0;
108 /* Note that the tests on the tag depend on the fact that Infix_tag,
109 Forward_tag, and No_scan_tag are contiguous. */
111 void caml_oldify_one (value v, value *p)
119 if (Is_block (v) && Is_young (v)){
120 Assert (Hp_val (v) >= caml_young_ptr);
122 if (hd == 0){ /* If already forwarded */
123 *p = Field (v, 0); /* then forward pointer is first field. */
126 if (tag < Infix_tag){
130 result = caml_alloc_shr (sz, tag);
132 field0 = Field (v, 0);
133 Hd_val (v) = 0; /* Set forward flag */
134 Field (v, 0) = result; /* and forward pointer. */
136 Field (result, 0) = field0;
137 Field (result, 1) = oldify_todo_list; /* Add this block */
138 oldify_todo_list = v; /* to the "to do" list. */
141 p = &Field (result, 0);
145 }else if (tag >= No_scan_tag){
147 result = caml_alloc_shr (sz, tag);
148 for (i = 0; i < sz; i++) Field (result, i) = Field (v, i);
149 Hd_val (v) = 0; /* Set forward flag */
150 Field (v, 0) = result; /* and forward pointer. */
152 }else if (tag == Infix_tag){
153 mlsize_t offset = Infix_offset_hd (hd);
154 caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */
157 value f = Forward_val (v);
161 Assert (tag == Forward_tag);
163 vv = Is_in_value_area(f);
165 ft = Tag_val (Hd_val (f) == 0 ? Field (f, 0) : f);
168 if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){
169 /* Do not short-circuit the pointer. Copy as a normal block. */
170 Assert (Wosize_hd (hd) == 1);
171 result = caml_alloc_shr (1, Forward_tag);
173 Hd_val (v) = 0; /* Set (GC) forward flag */
174 Field (v, 0) = result; /* and forward pointer. */
175 p = &Field (result, 0);
179 v = f; /* Follow the forwarding */
180 goto tail_call; /* then oldify. */
189 /* Finish the work that was put off by [caml_oldify_one].
190 Note that [caml_oldify_one] itself is called by oldify_mopup, so we
191 have to be careful to remove the first entry from the list before
192 oldifying its fields. */
193 void caml_oldify_mopup (void)
198 while (oldify_todo_list != 0){
199 v = oldify_todo_list; /* Get the head. */
200 Assert (Hd_val (v) == 0); /* It must be forwarded. */
201 new_v = Field (v, 0); /* Follow forward pointer. */
202 oldify_todo_list = Field (new_v, 1); /* Remove from list. */
204 f = Field (new_v, 0);
205 if (Is_block (f) && Is_young (f)){
206 caml_oldify_one (f, &Field (new_v, 0));
208 for (i = 1; i < Wosize_val (new_v); i++){
210 if (Is_block (f) && Is_young (f)){
211 caml_oldify_one (f, &Field (new_v, i));
213 Field (new_v, i) = f;
219 /* Make sure the minor heap is empty by performing a minor collection
222 void caml_empty_minor_heap (void)
226 if (caml_young_ptr != caml_young_end){
227 caml_in_minor_collection = 1;
228 caml_gc_message (0x02, "<", 0);
229 caml_oldify_local_roots();
230 for (r = caml_ref_table.base; r < caml_ref_table.ptr; r++){
231 caml_oldify_one (**r, *r);
233 caml_oldify_mopup ();
234 for (r = caml_weak_ref_table.base; r < caml_weak_ref_table.ptr; r++){
235 if (Is_block (**r) && Is_young (**r)){
236 if (Hd_val (**r) == 0){
237 **r = Field (**r, 0);
239 **r = caml_weak_none;
243 if (caml_young_ptr < caml_young_start) caml_young_ptr = caml_young_start;
244 caml_stat_minor_words += Wsize_bsize (caml_young_end - caml_young_ptr);
245 caml_young_ptr = caml_young_end;
246 caml_young_limit = caml_young_start;
247 clear_table (&caml_ref_table);
248 clear_table (&caml_weak_ref_table);
249 caml_gc_message (0x02, ">", 0);
250 caml_in_minor_collection = 0;
252 caml_final_empty_young ();
256 for (p = (value *) caml_young_start; p < (value *) caml_young_end; ++p){
257 *p = Debug_free_minor;
264 /* Do a minor collection and a slice of major collection, call finalisation
266 Leave the minor heap empty.
268 CAMLexport void caml_minor_collection (void)
270 intnat prev_alloc_words = caml_allocated_words;
272 caml_empty_minor_heap ();
274 caml_stat_promoted_words += caml_allocated_words - prev_alloc_words;
275 ++ caml_stat_minor_collections;
276 caml_major_collection_slice (0);
277 caml_force_major_slice = 0;
279 caml_final_do_calls ();
281 caml_empty_minor_heap ();
284 CAMLexport value caml_check_urgent_gc (value extra_root)
286 CAMLparam1 (extra_root);
287 if (caml_force_major_slice) caml_minor_collection();
288 CAMLreturn (extra_root);
291 void caml_realloc_ref_table (struct caml_ref_table *tbl)
292 { Assert (tbl->ptr == tbl->limit);
293 Assert (tbl->limit <= tbl->end);
294 Assert (tbl->limit >= tbl->threshold);
296 if (tbl->base == NULL){
297 caml_alloc_table (tbl, caml_minor_heap_size / sizeof (value) / 8, 256);
298 }else if (tbl->limit == tbl->threshold){
299 caml_gc_message (0x08, "ref_table threshold crossed\n", 0);
300 tbl->limit = tbl->end;
301 caml_urge_major_slice ();
302 }else{ /* This will almost never happen with the bytecode interpreter. */
304 asize_t cur_ptr = tbl->ptr - tbl->base;
305 Assert (caml_force_major_slice);
308 sz = (tbl->size + tbl->reserve) * sizeof (value *);
309 caml_gc_message (0x08, "Growing ref_table to %"
310 ARCH_INTNAT_PRINTF_FORMAT "dk bytes\n",
312 tbl->base = (value **) realloc ((char *) tbl->base, sz);
313 if (tbl->base == NULL){
314 caml_fatal_error ("Fatal error: ref_table overflow\n");
316 tbl->end = tbl->base + tbl->size + tbl->reserve;
317 tbl->threshold = tbl->base + tbl->size;
318 tbl->ptr = tbl->base + cur_ptr;
319 tbl->limit = tbl->end;