Reify bytevector? in the correct module
[bpt/guile.git] / libguile / finalizers.c
1 /* Copyright (C) 2012, 2013, 2014 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21
22 #ifdef HAVE_CONFIG_H
23 # include <config.h>
24 #endif
25
26 #include <unistd.h>
27 #include <fcntl.h>
28
29 #include <full-write.h>
30
31 #include "libguile/bdw-gc.h"
32 #include "libguile/_scm.h"
33 #include "libguile/finalizers.h"
34 #include "libguile/gc.h"
35 #include "libguile/threads.h"
36
37 \f
38
39 static int automatic_finalization_p = 1;
40
41 static size_t finalization_count;
42
43
44 \f
45
46 void
47 scm_i_set_finalizer (void *obj, scm_t_finalizer_proc proc, void *data)
48 {
49 GC_finalization_proc prev;
50 void *prev_data;
51 GC_REGISTER_FINALIZER_NO_ORDER (obj, proc, data, &prev, &prev_data);
52 }
53
54 struct scm_t_chained_finalizer
55 {
56 int resuscitating_p;
57 scm_t_finalizer_proc proc;
58 void *data;
59 scm_t_finalizer_proc prev;
60 void *prev_data;
61 };
62
63 static void
64 chained_finalizer (void *obj, void *data)
65 {
66 struct scm_t_chained_finalizer *chained_data = data;
67 if (chained_data->resuscitating_p)
68 {
69 if (chained_data->prev)
70 scm_i_set_finalizer (obj, chained_data->prev, chained_data->prev_data);
71 chained_data->proc (obj, chained_data->data);
72 }
73 else
74 {
75 chained_data->proc (obj, chained_data->data);
76 if (chained_data->prev)
77 chained_data->prev (obj, chained_data->prev_data);
78 }
79 }
80
81 void
82 scm_i_add_resuscitator (void *obj, scm_t_finalizer_proc proc, void *data)
83 {
84 struct scm_t_chained_finalizer *chained_data;
85 chained_data = scm_gc_malloc (sizeof (*chained_data), "chained finalizer");
86 chained_data->resuscitating_p = 1;
87 chained_data->proc = proc;
88 chained_data->data = data;
89 GC_REGISTER_FINALIZER_NO_ORDER (obj, chained_finalizer, chained_data,
90 &chained_data->prev,
91 &chained_data->prev_data);
92 }
93
94 static void
95 shuffle_resuscitators_to_front (struct scm_t_chained_finalizer *cd)
96 {
97 while (cd->prev == chained_finalizer)
98 {
99 struct scm_t_chained_finalizer *prev = cd->prev_data;
100 scm_t_finalizer_proc proc = cd->proc;
101 void *data = cd->data;
102
103 if (!prev->resuscitating_p)
104 break;
105
106 cd->resuscitating_p = 1;
107 cd->proc = prev->proc;
108 cd->data = prev->data;
109
110 prev->resuscitating_p = 0;
111 prev->proc = proc;
112 prev->data = data;
113
114 cd = prev;
115 }
116 }
117
118 void
119 scm_i_add_finalizer (void *obj, scm_t_finalizer_proc proc, void *data)
120 {
121 struct scm_t_chained_finalizer *chained_data;
122 chained_data = scm_gc_malloc (sizeof (*chained_data), "chained finalizer");
123 chained_data->resuscitating_p = 0;
124 chained_data->proc = proc;
125 chained_data->data = data;
126 GC_REGISTER_FINALIZER_NO_ORDER (obj, chained_finalizer, chained_data,
127 &chained_data->prev,
128 &chained_data->prev_data);
129 shuffle_resuscitators_to_front (chained_data);
130 }
131
132
133 \f
134
135 static SCM finalizer_async_cell;
136
137 static SCM
138 run_finalizers_async_thunk (void)
139 {
140 scm_run_finalizers ();
141 return SCM_UNSPECIFIED;
142 }
143
144
145 /* The function queue_finalizer_async is run by the GC when there are
146 * objects to finalize. It will enqueue an asynchronous call to
147 * GC_invoke_finalizers() at the next SCM_TICK in this thread.
148 */
149 static void
150 queue_finalizer_async (void)
151 {
152 scm_i_thread *t = SCM_I_CURRENT_THREAD;
153 static scm_i_pthread_mutex_t lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
154
155 scm_i_pthread_mutex_lock (&lock);
156 /* If t is NULL, that could be because we're allocating in
157 threads.c:guilify_self_1. In that case, rely on the
158 GC_invoke_finalizers call there after the thread spins up. */
159 if (t && scm_is_false (SCM_CDR (finalizer_async_cell)))
160 {
161 SCM_SETCDR (finalizer_async_cell, t->active_asyncs);
162 t->active_asyncs = finalizer_async_cell;
163 t->pending_asyncs = 1;
164 }
165 scm_i_pthread_mutex_unlock (&lock);
166 }
167
168
169 \f
170
171 #if SCM_USE_PTHREAD_THREADS
172
173 static int finalization_pipe[2];
174 static scm_i_pthread_mutex_t finalization_thread_lock =
175 SCM_I_PTHREAD_MUTEX_INITIALIZER;
176 static pthread_t finalization_thread;
177 static int finalization_thread_is_running = 0;
178
179 static void
180 notify_finalizers_to_run (void)
181 {
182 char byte = 0;
183 full_write (finalization_pipe[1], &byte, 1);
184 }
185
186 static void
187 notify_about_to_fork (void)
188 {
189 char byte = 1;
190 full_write (finalization_pipe[1], &byte, 1);
191 }
192
193 struct finalization_pipe_data
194 {
195 char byte;
196 ssize_t n;
197 int err;
198 };
199
200 static void*
201 read_finalization_pipe_data (void *data)
202 {
203 struct finalization_pipe_data *fdata = data;
204
205 fdata->n = read (finalization_pipe[0], &fdata->byte, 1);
206 fdata->err = errno;
207
208 return NULL;
209 }
210
211 static void*
212 finalization_thread_proc (void *unused)
213 {
214 while (1)
215 {
216 struct finalization_pipe_data data;
217
218 scm_without_guile (read_finalization_pipe_data, &data);
219
220 if (data.n <= 0 && data.err != EINTR)
221 {
222 perror ("error in finalization thread");
223 return NULL;
224 }
225
226 switch (data.byte)
227 {
228 case 0:
229 scm_run_finalizers ();
230 break;
231 case 1:
232 return NULL;
233 default:
234 abort ();
235 }
236 }
237 }
238
239 static void*
240 run_finalization_thread (void *arg)
241 {
242 return scm_with_guile (finalization_thread_proc, arg);
243 }
244
245 static void
246 start_finalization_thread (void)
247 {
248 scm_i_pthread_mutex_lock (&finalization_thread_lock);
249 if (!finalization_thread_is_running)
250 {
251 /* Use the raw pthread API and scm_with_guile, because we don't want
252 to block on any lock that scm_spawn_thread might want to take,
253 and we don't want to inherit the dynamic state (fluids) of the
254 caller. */
255 if (pthread_create (&finalization_thread, NULL,
256 run_finalization_thread, NULL))
257 perror ("error creating finalization thread");
258 else
259 finalization_thread_is_running = 1;
260 }
261 scm_i_pthread_mutex_unlock (&finalization_thread_lock);
262 }
263
264 static void
265 stop_finalization_thread (void)
266 {
267 scm_i_pthread_mutex_lock (&finalization_thread_lock);
268 if (finalization_thread_is_running)
269 {
270 notify_about_to_fork ();
271 if (pthread_join (finalization_thread, NULL))
272 perror ("joining finalization thread");
273 finalization_thread_is_running = 0;
274 }
275 scm_i_pthread_mutex_unlock (&finalization_thread_lock);
276 }
277
278 static void
279 spawn_finalizer_thread (void)
280 {
281 GC_set_finalizer_notifier (notify_finalizers_to_run);
282 start_finalization_thread ();
283 }
284
285 #endif /* SCM_USE_PTHREAD_THREADS */
286
287
288 \f
289
290 void
291 scm_i_finalizer_pre_fork (void)
292 {
293 #if SCM_USE_PTHREAD_THREADS
294 if (automatic_finalization_p)
295 {
296 stop_finalization_thread ();
297 GC_set_finalizer_notifier (spawn_finalizer_thread);
298 }
299 #endif
300 }
301
302
303 \f
304
305 static void*
306 weak_pointer_ref (void *weak_pointer)
307 {
308 return *(void **) weak_pointer;
309 }
310
311 static void
312 weak_gc_finalizer (void *ptr, void *data)
313 {
314 void **weak = ptr;
315 void *val;
316 void (*callback) (SCM) = weak[1];
317
318 val = GC_call_with_alloc_lock (weak_pointer_ref, &weak[0]);
319
320 if (!val)
321 return;
322
323 callback (SCM_PACK_POINTER (val));
324
325 scm_i_set_finalizer (ptr, weak_gc_finalizer, data);
326 }
327
328 /* CALLBACK will be called on OBJ, as long as OBJ is accessible. It
329 will be called from a finalizer, which may be from an async or from
330 another thread.
331
332 As an implementation detail, the way this works is that we allocate
333 a fresh pointer-less object holding two words. We know that this
334 object should get collected the next time GC is run, so we attach a
335 finalizer to it so that we get a callback after GC happens.
336
337 The first word of the object holds a weak reference to OBJ, and the
338 second holds the callback pointer. When the callback is called, we
339 check if the weak reference on OBJ still holds. If it doesn't hold,
340 then OBJ is no longer accessible, and we're done. Otherwise we call
341 the callback and re-register a finalizer for our two-word GC object,
342 effectively resuscitating the object so that we will get a callback
343 on the next GC.
344
345 We could use the scm_after_gc_hook, but using a finalizer has the
346 advantage of potentially running in another thread, decreasing pause
347 time. */
348 void
349 scm_i_register_weak_gc_callback (SCM obj, void (*callback) (SCM))
350 {
351 void **weak = GC_MALLOC_ATOMIC (sizeof (void*) * 2);
352
353 weak[0] = SCM_UNPACK_POINTER (obj);
354 weak[1] = (void*)callback;
355 GC_GENERAL_REGISTER_DISAPPEARING_LINK (weak, SCM2PTR (obj));
356
357 scm_i_set_finalizer (weak, weak_gc_finalizer, NULL);
358 }
359
360
361 int
362 scm_set_automatic_finalization_enabled (int enabled_p)
363 {
364 int was_enabled_p = automatic_finalization_p;
365
366 if (enabled_p == was_enabled_p)
367 return was_enabled_p;
368
369 if (!scm_initialized_p)
370 {
371 automatic_finalization_p = enabled_p;
372 return was_enabled_p;
373 }
374
375 if (enabled_p)
376 {
377 #if SCM_USE_PTHREAD_THREADS
378 if (pipe2 (finalization_pipe, O_CLOEXEC) != 0)
379 scm_syserror (NULL);
380 GC_set_finalizer_notifier (spawn_finalizer_thread);
381 #else
382 GC_set_finalizer_notifier (queue_finalizer_async);
383 #endif
384 }
385 else
386 {
387 GC_set_finalizer_notifier (0);
388
389 #if SCM_USE_PTHREAD_THREADS
390 stop_finalization_thread ();
391 close (finalization_pipe[0]);
392 close (finalization_pipe[1]);
393 finalization_pipe[0] = -1;
394 finalization_pipe[1] = -1;
395 #endif
396 }
397
398 automatic_finalization_p = enabled_p;
399
400 return was_enabled_p;
401 }
402
403 int
404 scm_run_finalizers (void)
405 {
406 int finalized = GC_invoke_finalizers ();
407
408 finalization_count += finalized;
409
410 return finalized;
411 }
412
413
414 \f
415
416 void
417 scm_init_finalizers (void)
418 {
419 /* When the async is to run, the cdr of the pair gets set to the
420 asyncs queue of the current thread. */
421 finalizer_async_cell =
422 scm_cons (scm_c_make_gsubr ("%run-finalizers", 0, 0, 0,
423 run_finalizers_async_thunk),
424 SCM_BOOL_F);
425
426 if (automatic_finalization_p)
427 GC_set_finalizer_notifier (queue_finalizer_async);
428 }
429
430 void
431 scm_init_finalizer_thread (void)
432 {
433 #if SCM_USE_PTHREAD_THREADS
434 if (automatic_finalization_p)
435 {
436 if (pipe2 (finalization_pipe, O_CLOEXEC) != 0)
437 scm_syserror (NULL);
438 GC_set_finalizer_notifier (spawn_finalizer_thread);
439 }
440 #endif
441 }