Fix segfault in thread_mark
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014
3 * Free Software Foundation, Inc.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 */
20
21
22 \f
23 #ifdef HAVE_CONFIG_H
24 # include <config.h>
25 #endif
26
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
29 #include "libguile/_scm.h"
30
31 #include <stdlib.h>
32 #if HAVE_UNISTD_H
33 #include <unistd.h>
34 #endif
35 #include <stdio.h>
36
37 #ifdef HAVE_STRING_H
38 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
39 #endif
40
41 #if HAVE_SYS_TIME_H
42 #include <sys/time.h>
43 #endif
44
45 #if HAVE_PTHREAD_NP_H
46 # include <pthread_np.h>
47 #endif
48
49 #include <sys/select.h>
50
51 #include <assert.h>
52 #include <fcntl.h>
53 #include <nproc.h>
54
55 #include "libguile/validate.h"
56 #include "libguile/root.h"
57 #include "libguile/eval.h"
58 #include "libguile/async.h"
59 #include "libguile/ports.h"
60 #include "libguile/threads.h"
61 #include "libguile/dynwind.h"
62 #include "libguile/iselect.h"
63 #include "libguile/fluids.h"
64 #include "libguile/continuations.h"
65 #include "libguile/gc.h"
66 #include "libguile/gc-inline.h"
67 #include "libguile/init.h"
68 #include "libguile/scmsigs.h"
69 #include "libguile/strings.h"
70 #include "libguile/vm.h"
71
72 #include <full-read.h>
73
74
75 \f
76
77 /* The GC "kind" for threads that allow them to mark their VM
78 stacks. */
79 static int thread_gc_kind;
80
81 static struct GC_ms_entry *
82 thread_mark (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
83 struct GC_ms_entry *mark_stack_limit, GC_word env)
84 {
85 int word;
86 const struct scm_i_thread *t = (struct scm_i_thread *) addr;
87
88 if (SCM_UNPACK (t->handle) == 0)
89 /* T must be on the free-list; ignore. (See warning in
90 gc_mark.h.) */
91 return mark_stack_ptr;
92
93 /* Mark T. We could be more precise, but it doesn't matter. */
94 for (word = 0; word * sizeof (*addr) < sizeof (*t); word++)
95 mark_stack_ptr = GC_MARK_AND_PUSH ((void *) addr[word],
96 mark_stack_ptr, mark_stack_limit,
97 NULL);
98
99 /* The pointerless freelists are threaded through their first word,
100 but GC doesn't know to trace them (as they are pointerless), so we
101 need to do that here. See the comments at the top of libgc's
102 gc_inline.h. */
103 if (t->pointerless_freelists)
104 {
105 size_t n;
106 for (n = 0; n < SCM_INLINE_GC_FREELIST_COUNT; n++)
107 {
108 void *chain = t->pointerless_freelists[n];
109 if (chain)
110 {
111 /* The first link is already marked by the freelist vector,
112 so we just have to mark the tail. */
113 while ((chain = *(void **)chain))
114 mark_stack_ptr = GC_mark_and_push (chain, mark_stack_ptr,
115 mark_stack_limit, NULL);
116 }
117 }
118 }
119
120 if (t->vp)
121 mark_stack_ptr = scm_i_vm_mark_stack (t->vp, mark_stack_ptr,
122 mark_stack_limit);
123
124 return mark_stack_ptr;
125 }
126
127
128 \f
129 static void
130 to_timespec (SCM t, scm_t_timespec *waittime)
131 {
132 if (scm_is_pair (t))
133 {
134 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
135 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
136 }
137 else
138 {
139 double time = scm_to_double (t);
140 double sec = scm_c_truncate (time);
141
142 waittime->tv_sec = (long) sec;
143 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
144 }
145 }
146
147
148 \f
149 /*** Queues */
150
151 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
152 the risk of false references leading to unbounded retained space as
153 described in "Bounding Space Usage of Conservative Garbage Collectors",
154 H.J. Boehm, 2001. */
155
156 /* Make an empty queue data structure.
157 */
158 static SCM
159 make_queue ()
160 {
161 return scm_cons (SCM_EOL, SCM_EOL);
162 }
163
164 /* Put T at the back of Q and return a handle that can be used with
165 remqueue to remove T from Q again.
166 */
167 static SCM
168 enqueue (SCM q, SCM t)
169 {
170 SCM c = scm_cons (t, SCM_EOL);
171 SCM_CRITICAL_SECTION_START;
172 if (scm_is_null (SCM_CDR (q)))
173 SCM_SETCDR (q, c);
174 else
175 SCM_SETCDR (SCM_CAR (q), c);
176 SCM_SETCAR (q, c);
177 SCM_CRITICAL_SECTION_END;
178 return c;
179 }
180
181 /* Remove the element that the handle C refers to from the queue Q. C
182 must have been returned from a call to enqueue. The return value
183 is zero when the element referred to by C has already been removed.
184 Otherwise, 1 is returned.
185 */
186 static int
187 remqueue (SCM q, SCM c)
188 {
189 SCM p, prev = q;
190 SCM_CRITICAL_SECTION_START;
191 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
192 {
193 if (scm_is_eq (p, c))
194 {
195 if (scm_is_eq (c, SCM_CAR (q)))
196 SCM_SETCAR (q, scm_is_eq (prev, q) ? SCM_EOL : prev);
197 SCM_SETCDR (prev, SCM_CDR (c));
198
199 /* GC-robust */
200 SCM_SETCDR (c, SCM_EOL);
201
202 SCM_CRITICAL_SECTION_END;
203 return 1;
204 }
205 prev = p;
206 }
207 SCM_CRITICAL_SECTION_END;
208 return 0;
209 }
210
211 /* Remove the front-most element from the queue Q and return it.
212 Return SCM_BOOL_F when Q is empty.
213 */
214 static SCM
215 dequeue (SCM q)
216 {
217 SCM c;
218 SCM_CRITICAL_SECTION_START;
219 c = SCM_CDR (q);
220 if (scm_is_null (c))
221 {
222 SCM_CRITICAL_SECTION_END;
223 return SCM_BOOL_F;
224 }
225 else
226 {
227 SCM_SETCDR (q, SCM_CDR (c));
228 if (scm_is_null (SCM_CDR (q)))
229 SCM_SETCAR (q, SCM_EOL);
230 SCM_CRITICAL_SECTION_END;
231
232 /* GC-robust */
233 SCM_SETCDR (c, SCM_EOL);
234
235 return SCM_CAR (c);
236 }
237 }
238
239 /*** Thread smob routines */
240
241
242 static int
243 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
244 {
245 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
246 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
247 the struct case, hence we go via a union, and extract according to the
248 size of pthread_t. */
249 union {
250 scm_i_pthread_t p;
251 unsigned short us;
252 unsigned int ui;
253 unsigned long ul;
254 scm_t_uintmax um;
255 } u;
256 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
257 scm_i_pthread_t p = t->pthread;
258 scm_t_uintmax id;
259 u.p = p;
260 if (sizeof (p) == sizeof (unsigned short))
261 id = u.us;
262 else if (sizeof (p) == sizeof (unsigned int))
263 id = u.ui;
264 else if (sizeof (p) == sizeof (unsigned long))
265 id = u.ul;
266 else
267 id = u.um;
268
269 scm_puts_unlocked ("#<thread ", port);
270 scm_uintprint (id, 10, port);
271 scm_puts_unlocked (" (", port);
272 scm_uintprint ((scm_t_bits)t, 16, port);
273 scm_puts_unlocked (")>", port);
274 return 1;
275 }
276
277 \f
278 /*** Blocking on queues. */
279
280 /* See also scm_i_queue_async_cell for how such a block is
281 interrputed.
282 */
283
284 /* Put the current thread on QUEUE and go to sleep, waiting for it to
285 be woken up by a call to 'unblock_from_queue', or to be
286 interrupted. Upon return of this function, the current thread is
287 no longer on QUEUE, even when the sleep has been interrupted.
288
289 The caller of block_self must hold MUTEX. It will be atomically
290 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
291
292 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
293 as MUTEX is needed.
294
295 When WAITTIME is not NULL, the sleep will be aborted at that time.
296
297 The return value of block_self is an errno value. It will be zero
298 when the sleep has been successfully completed by a call to
299 unblock_from_queue, EINTR when it has been interrupted by the
300 delivery of a system async, and ETIMEDOUT when the timeout has
301 expired.
302
303 The system asyncs themselves are not executed by block_self.
304 */
305 static int
306 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
307 const scm_t_timespec *waittime)
308 {
309 scm_i_thread *t = SCM_I_CURRENT_THREAD;
310 SCM q_handle;
311 int err;
312
313 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
314 err = EINTR;
315 else
316 {
317 t->block_asyncs++;
318 q_handle = enqueue (queue, t->handle);
319 if (waittime == NULL)
320 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
321 else
322 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
323
324 /* When we are still on QUEUE, we have been interrupted. We
325 report this only when no other error (such as a timeout) has
326 happened above.
327 */
328 if (remqueue (queue, q_handle) && err == 0)
329 err = EINTR;
330 t->block_asyncs--;
331 scm_i_reset_sleep (t);
332 }
333
334 return err;
335 }
336
337 /* Wake up the first thread on QUEUE, if any. The awoken thread is
338 returned, or #f if the queue was empty.
339 */
340 static SCM
341 unblock_from_queue (SCM queue)
342 {
343 SCM thread = dequeue (queue);
344 if (scm_is_true (thread))
345 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
346 return thread;
347 }
348
349 \f
350 /* Getting into and out of guile mode.
351 */
352
353 /* Key used to attach a cleanup handler to a given thread. Also, if
354 thread-local storage is unavailable, this key is used to retrieve the
355 current thread with `pthread_getspecific ()'. */
356 scm_i_pthread_key_t scm_i_thread_key;
357
358
359 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
360
361 /* When thread-local storage (TLS) is available, a pointer to the
362 current-thread object is kept in TLS. Note that storing the thread-object
363 itself in TLS (rather than a pointer to some malloc'd memory) is not
364 possible since thread objects may live longer than the actual thread they
365 represent. */
366 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
367
368 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
369
370
371 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
372 static scm_i_thread *all_threads = NULL;
373 static int thread_count;
374
375 static SCM scm_i_default_dynamic_state;
376
377 /* Run when a fluid is collected. */
378 void
379 scm_i_reset_fluid (size_t n)
380 {
381 scm_i_thread *t;
382
383 scm_i_pthread_mutex_lock (&thread_admin_mutex);
384 for (t = all_threads; t; t = t->next_thread)
385 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
386 {
387 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
388
389 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
390 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
391 }
392 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
393 }
394
395 /* Perform first stage of thread initialisation, in non-guile mode.
396 */
397 static void
398 guilify_self_1 (struct GC_stack_base *base)
399 {
400 scm_i_thread t;
401
402 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
403 before allocating anything in this thread, because allocation could
404 cause GC to run, and GC could cause finalizers, which could invoke
405 Scheme functions, which need the current thread to be set. */
406
407 t.pthread = scm_i_pthread_self ();
408 t.handle = SCM_BOOL_F;
409 t.result = SCM_BOOL_F;
410 t.cleanup_handler = SCM_BOOL_F;
411 t.mutexes = SCM_EOL;
412 t.held_mutex = NULL;
413 t.join_queue = SCM_EOL;
414 t.freelists = NULL;
415 t.pointerless_freelists = NULL;
416 t.dynamic_state = SCM_BOOL_F;
417 t.dynstack.base = NULL;
418 t.dynstack.top = NULL;
419 t.dynstack.limit = NULL;
420 t.active_asyncs = SCM_EOL;
421 t.block_asyncs = 1;
422 t.pending_asyncs = 1;
423 t.critical_section_level = 0;
424 t.base = base->mem_base;
425 #ifdef __ia64__
426 t.register_backing_store_base = base->reg_base;
427 #endif
428 t.continuation_root = SCM_EOL;
429 t.continuation_base = t.base;
430 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
431 t.sleep_mutex = NULL;
432 t.sleep_object = SCM_BOOL_F;
433 t.sleep_fd = -1;
434 t.vp = NULL;
435
436 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
437 /* FIXME: Error conditions during the initialization phase are handled
438 gracelessly since public functions such as `scm_init_guile ()'
439 currently have type `void'. */
440 abort ();
441
442 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
443 t.canceled = 0;
444 t.exited = 0;
445 t.guile_mode = 0;
446
447 /* The switcheroo. */
448 {
449 scm_i_thread *t_ptr = &t;
450
451 GC_disable ();
452 t_ptr = GC_generic_malloc (sizeof (*t_ptr), thread_gc_kind);
453 memcpy (t_ptr, &t, sizeof t);
454
455 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
456
457 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
458 /* Cache the current thread in TLS for faster lookup. */
459 scm_i_current_thread = t_ptr;
460 #endif
461
462 scm_i_pthread_mutex_lock (&thread_admin_mutex);
463 t_ptr->next_thread = all_threads;
464 all_threads = t_ptr;
465 thread_count++;
466 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
467
468 GC_enable ();
469 }
470 }
471
472 /* Perform second stage of thread initialisation, in guile mode.
473 */
474 static void
475 guilify_self_2 (SCM parent)
476 {
477 scm_i_thread *t = SCM_I_CURRENT_THREAD;
478
479 t->guile_mode = 1;
480
481 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
482
483 t->continuation_root = scm_cons (t->handle, SCM_EOL);
484 t->continuation_base = t->base;
485
486 {
487 size_t size = SCM_INLINE_GC_FREELIST_COUNT * sizeof (void *);
488 t->freelists = scm_gc_malloc (size, "freelists");
489 t->pointerless_freelists = scm_gc_malloc (size, "atomic freelists");
490 }
491
492 if (scm_is_true (parent))
493 t->dynamic_state = scm_make_dynamic_state (parent);
494 else
495 t->dynamic_state = scm_i_make_initial_dynamic_state ();
496
497 t->dynstack.base = scm_gc_malloc (16 * sizeof (scm_t_bits), "dynstack");
498 t->dynstack.limit = t->dynstack.base + 16;
499 t->dynstack.top = t->dynstack.base + SCM_DYNSTACK_HEADER_LEN;
500
501 t->join_queue = make_queue ();
502 t->block_asyncs = 0;
503
504 /* See note in finalizers.c:queue_finalizer_async(). */
505 GC_invoke_finalizers ();
506 }
507
508 \f
509 /*** Fat mutexes */
510
511 /* We implement our own mutex type since we want them to be 'fair', we
512 want to do fancy things while waiting for them (like running
513 asyncs) and we might want to add things that are nice for
514 debugging.
515 */
516
517 typedef struct {
518 scm_i_pthread_mutex_t lock;
519 SCM owner;
520 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
521
522 int recursive; /* allow recursive locking? */
523 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
524 int allow_external_unlock; /* is it an error to unlock a mutex that is not
525 owned by the current thread? */
526
527 SCM waiting; /* the threads waiting for this mutex. */
528 } fat_mutex;
529
530 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
531 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
532
533 static SCM
534 call_cleanup (void *data)
535 {
536 SCM *proc_p = data;
537 return scm_call_0 (*proc_p);
538 }
539
540 /* Perform thread tear-down, in guile mode.
541 */
542 static void *
543 do_thread_exit (void *v)
544 {
545 scm_i_thread *t = (scm_i_thread *) v;
546
547 if (!scm_is_false (t->cleanup_handler))
548 {
549 SCM ptr = t->cleanup_handler;
550
551 t->cleanup_handler = SCM_BOOL_F;
552 t->result = scm_internal_catch (SCM_BOOL_T,
553 call_cleanup, &ptr,
554 scm_handle_by_message_noexit, NULL);
555 }
556
557 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
558
559 t->exited = 1;
560 close (t->sleep_pipe[0]);
561 close (t->sleep_pipe[1]);
562 while (scm_is_true (unblock_from_queue (t->join_queue)))
563 ;
564
565 while (!scm_is_null (t->mutexes))
566 {
567 SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
568
569 if (scm_is_true (mutex))
570 {
571 fat_mutex *m = SCM_MUTEX_DATA (mutex);
572
573 scm_i_pthread_mutex_lock (&m->lock);
574
575 /* Check whether T owns MUTEX. This is usually the case, unless
576 T abandoned MUTEX; in that case, T is no longer its owner (see
577 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
578 if (scm_is_eq (m->owner, t->handle))
579 unblock_from_queue (m->waiting);
580
581 scm_i_pthread_mutex_unlock (&m->lock);
582 }
583
584 t->mutexes = scm_cdr (t->mutexes);
585 }
586
587 scm_i_pthread_mutex_unlock (&t->admin_mutex);
588
589 return NULL;
590 }
591
592 static void *
593 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
594 {
595 /* Won't hurt if we are already registered. */
596 #if SCM_USE_PTHREAD_THREADS
597 GC_register_my_thread (sb);
598 #endif
599
600 return scm_with_guile (do_thread_exit, v);
601 }
602
603 static void
604 on_thread_exit (void *v)
605 {
606 /* This handler is executed in non-guile mode. */
607 scm_i_thread *t = (scm_i_thread *) v, **tp;
608
609 /* If we were canceled, we were unable to clear `t->guile_mode', so do
610 it here. */
611 t->guile_mode = 0;
612
613 /* If this thread was cancelled while doing a cond wait, it will
614 still have a mutex locked, so we unlock it here. */
615 if (t->held_mutex)
616 {
617 scm_i_pthread_mutex_unlock (t->held_mutex);
618 t->held_mutex = NULL;
619 }
620
621 /* Reinstate the current thread for purposes of scm_with_guile
622 guile-mode cleanup handlers. Only really needed in the non-TLS
623 case but it doesn't hurt to be consistent. */
624 scm_i_pthread_setspecific (scm_i_thread_key, t);
625
626 /* Scheme-level thread finalizers and other cleanup needs to happen in
627 guile mode. */
628 GC_call_with_stack_base (do_thread_exit_trampoline, t);
629
630 /* Removing ourself from the list of all threads needs to happen in
631 non-guile mode since all SCM values on our stack become
632 unprotected once we are no longer in the list. */
633 scm_i_pthread_mutex_lock (&thread_admin_mutex);
634 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
635 if (*tp == t)
636 {
637 *tp = t->next_thread;
638
639 /* GC-robust */
640 t->next_thread = NULL;
641
642 break;
643 }
644 thread_count--;
645
646 /* If there's only one other thread, it could be the signal delivery
647 thread, so we need to notify it to shut down by closing its read pipe.
648 If it's not the signal delivery thread, then closing the read pipe isn't
649 going to hurt. */
650 if (thread_count <= 1)
651 scm_i_close_signal_pipe ();
652
653 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
654
655 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
656
657 if (t->vp)
658 {
659 scm_i_vm_free_stack (t->vp);
660 t->vp = NULL;
661 }
662
663 #if SCM_USE_PTHREAD_THREADS
664 GC_unregister_my_thread ();
665 #endif
666 }
667
668 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
669
670 static void
671 init_thread_key (void)
672 {
673 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
674 }
675
676 /* Perform any initializations necessary to make the current thread
677 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
678 if necessary.
679
680 BASE is the stack base to use with GC.
681
682 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
683 which case the default dynamic state is used.
684
685 Returns zero when the thread was known to guile already; otherwise
686 return 1.
687
688 Note that it could be the case that the thread was known
689 to Guile, but not in guile mode (because we are within a
690 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
691 be sure. New threads are put into guile mode implicitly. */
692
693 static int
694 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
695 {
696 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
697
698 if (SCM_I_CURRENT_THREAD)
699 {
700 /* Thread is already known to Guile.
701 */
702 return 0;
703 }
704 else
705 {
706 /* This thread has not been guilified yet.
707 */
708
709 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
710 if (scm_initialized_p == 0)
711 {
712 /* First thread ever to enter Guile. Run the full
713 initialization.
714 */
715 scm_i_init_guile (base);
716
717 #if SCM_USE_PTHREAD_THREADS
718 /* Allow other threads to come in later. */
719 GC_allow_register_threads ();
720 #endif
721
722 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
723 }
724 else
725 {
726 /* Guile is already initialized, but this thread enters it for
727 the first time. Only initialize this thread.
728 */
729 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
730
731 /* Register this thread with libgc. */
732 #if SCM_USE_PTHREAD_THREADS
733 GC_register_my_thread (base);
734 #endif
735
736 guilify_self_1 (base);
737 guilify_self_2 (parent);
738 }
739 return 1;
740 }
741 }
742
743 void
744 scm_init_guile ()
745 {
746 struct GC_stack_base stack_base;
747
748 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
749 scm_i_init_thread_for_guile (&stack_base,
750 scm_i_default_dynamic_state);
751 else
752 {
753 fprintf (stderr, "Failed to get stack base for current thread.\n");
754 exit (EXIT_FAILURE);
755 }
756 }
757
758 struct with_guile_args
759 {
760 GC_fn_type func;
761 void *data;
762 SCM parent;
763 };
764
765 static void *
766 with_guile_trampoline (void *data)
767 {
768 struct with_guile_args *args = data;
769
770 return scm_c_with_continuation_barrier (args->func, args->data);
771 }
772
773 static void *
774 with_guile_and_parent (struct GC_stack_base *base, void *data)
775 {
776 void *res;
777 int new_thread;
778 scm_i_thread *t;
779 struct with_guile_args *args = data;
780
781 new_thread = scm_i_init_thread_for_guile (base, args->parent);
782 t = SCM_I_CURRENT_THREAD;
783 if (new_thread)
784 {
785 /* We are in Guile mode. */
786 assert (t->guile_mode);
787
788 res = scm_c_with_continuation_barrier (args->func, args->data);
789
790 /* Leave Guile mode. */
791 t->guile_mode = 0;
792 }
793 else if (t->guile_mode)
794 {
795 /* Already in Guile mode. */
796 res = scm_c_with_continuation_barrier (args->func, args->data);
797 }
798 else
799 {
800 /* We are not in Guile mode, either because we are not within a
801 scm_with_guile, or because we are within a scm_without_guile.
802
803 This call to scm_with_guile() could happen from anywhere on the
804 stack, and in particular lower on the stack than when it was
805 when this thread was first guilified. Thus, `base' must be
806 updated. */
807 #if SCM_STACK_GROWS_UP
808 if (SCM_STACK_PTR (base->mem_base) < t->base)
809 t->base = SCM_STACK_PTR (base->mem_base);
810 #else
811 if (SCM_STACK_PTR (base->mem_base) > t->base)
812 t->base = SCM_STACK_PTR (base->mem_base);
813 #endif
814
815 t->guile_mode = 1;
816 res = GC_call_with_gc_active (with_guile_trampoline, args);
817 t->guile_mode = 0;
818 }
819 return res;
820 }
821
822 static void *
823 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
824 {
825 struct with_guile_args args;
826
827 args.func = func;
828 args.data = data;
829 args.parent = parent;
830
831 return GC_call_with_stack_base (with_guile_and_parent, &args);
832 }
833
834 void *
835 scm_with_guile (void *(*func)(void *), void *data)
836 {
837 return scm_i_with_guile_and_parent (func, data,
838 scm_i_default_dynamic_state);
839 }
840
841 void *
842 scm_without_guile (void *(*func)(void *), void *data)
843 {
844 void *result;
845 scm_i_thread *t = SCM_I_CURRENT_THREAD;
846
847 if (t->guile_mode)
848 {
849 SCM_I_CURRENT_THREAD->guile_mode = 0;
850 result = GC_do_blocking (func, data);
851 SCM_I_CURRENT_THREAD->guile_mode = 1;
852 }
853 else
854 /* Otherwise we're not in guile mode, so nothing to do. */
855 result = func (data);
856
857 return result;
858 }
859
860 \f
861 /*** Thread creation */
862
863 typedef struct {
864 SCM parent;
865 SCM thunk;
866 SCM handler;
867 SCM thread;
868 scm_i_pthread_mutex_t mutex;
869 scm_i_pthread_cond_t cond;
870 } launch_data;
871
872 static void *
873 really_launch (void *d)
874 {
875 launch_data *data = (launch_data *)d;
876 SCM thunk = data->thunk, handler = data->handler;
877 scm_i_thread *t;
878
879 t = SCM_I_CURRENT_THREAD;
880
881 scm_i_scm_pthread_mutex_lock (&data->mutex);
882 data->thread = scm_current_thread ();
883 scm_i_pthread_cond_signal (&data->cond);
884 scm_i_pthread_mutex_unlock (&data->mutex);
885
886 if (SCM_UNBNDP (handler))
887 t->result = scm_call_0 (thunk);
888 else
889 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
890
891 return 0;
892 }
893
894 static void *
895 launch_thread (void *d)
896 {
897 launch_data *data = (launch_data *)d;
898 scm_i_pthread_detach (scm_i_pthread_self ());
899 scm_i_with_guile_and_parent (really_launch, d, data->parent);
900 return NULL;
901 }
902
903 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
904 (SCM thunk, SCM handler),
905 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
906 "returning a new thread object representing the thread. The procedure\n"
907 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
908 "\n"
909 "When @var{handler} is specified, then @var{thunk} is called from\n"
910 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
911 "handler. This catch is established inside the continuation barrier.\n"
912 "\n"
913 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
914 "the @emph{exit value} of the thread and the thread is terminated.")
915 #define FUNC_NAME s_scm_call_with_new_thread
916 {
917 launch_data data;
918 scm_i_pthread_t id;
919 int err;
920
921 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
922 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
923 handler, SCM_ARG2, FUNC_NAME);
924
925 GC_collect_a_little ();
926 data.parent = scm_current_dynamic_state ();
927 data.thunk = thunk;
928 data.handler = handler;
929 data.thread = SCM_BOOL_F;
930 scm_i_pthread_mutex_init (&data.mutex, NULL);
931 scm_i_pthread_cond_init (&data.cond, NULL);
932
933 scm_i_scm_pthread_mutex_lock (&data.mutex);
934 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
935 if (err)
936 {
937 scm_i_pthread_mutex_unlock (&data.mutex);
938 errno = err;
939 scm_syserror (NULL);
940 }
941
942 while (scm_is_false (data.thread))
943 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
944
945 scm_i_pthread_mutex_unlock (&data.mutex);
946
947 return data.thread;
948 }
949 #undef FUNC_NAME
950
951 typedef struct {
952 SCM parent;
953 scm_t_catch_body body;
954 void *body_data;
955 scm_t_catch_handler handler;
956 void *handler_data;
957 SCM thread;
958 scm_i_pthread_mutex_t mutex;
959 scm_i_pthread_cond_t cond;
960 } spawn_data;
961
962 static void *
963 really_spawn (void *d)
964 {
965 spawn_data *data = (spawn_data *)d;
966 scm_t_catch_body body = data->body;
967 void *body_data = data->body_data;
968 scm_t_catch_handler handler = data->handler;
969 void *handler_data = data->handler_data;
970 scm_i_thread *t = SCM_I_CURRENT_THREAD;
971
972 scm_i_scm_pthread_mutex_lock (&data->mutex);
973 data->thread = scm_current_thread ();
974 scm_i_pthread_cond_signal (&data->cond);
975 scm_i_pthread_mutex_unlock (&data->mutex);
976
977 if (handler == NULL)
978 t->result = body (body_data);
979 else
980 t->result = scm_internal_catch (SCM_BOOL_T,
981 body, body_data,
982 handler, handler_data);
983
984 return 0;
985 }
986
987 static void *
988 spawn_thread (void *d)
989 {
990 spawn_data *data = (spawn_data *)d;
991 scm_i_pthread_detach (scm_i_pthread_self ());
992 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
993 return NULL;
994 }
995
996 SCM
997 scm_spawn_thread (scm_t_catch_body body, void *body_data,
998 scm_t_catch_handler handler, void *handler_data)
999 {
1000 spawn_data data;
1001 scm_i_pthread_t id;
1002 int err;
1003
1004 data.parent = scm_current_dynamic_state ();
1005 data.body = body;
1006 data.body_data = body_data;
1007 data.handler = handler;
1008 data.handler_data = handler_data;
1009 data.thread = SCM_BOOL_F;
1010 scm_i_pthread_mutex_init (&data.mutex, NULL);
1011 scm_i_pthread_cond_init (&data.cond, NULL);
1012
1013 scm_i_scm_pthread_mutex_lock (&data.mutex);
1014 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1015 if (err)
1016 {
1017 scm_i_pthread_mutex_unlock (&data.mutex);
1018 errno = err;
1019 scm_syserror (NULL);
1020 }
1021
1022 while (scm_is_false (data.thread))
1023 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1024
1025 scm_i_pthread_mutex_unlock (&data.mutex);
1026
1027 assert (SCM_I_IS_THREAD (data.thread));
1028
1029 return data.thread;
1030 }
1031
1032 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1033 (),
1034 "Move the calling thread to the end of the scheduling queue.")
1035 #define FUNC_NAME s_scm_yield
1036 {
1037 return scm_from_bool (scm_i_sched_yield ());
1038 }
1039 #undef FUNC_NAME
1040
1041 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1042 (SCM thread),
1043 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1044 "cannot be the current thread, and if @var{thread} has already terminated or "
1045 "been signaled to terminate, this function is a no-op.")
1046 #define FUNC_NAME s_scm_cancel_thread
1047 {
1048 scm_i_thread *t = NULL;
1049
1050 SCM_VALIDATE_THREAD (1, thread);
1051 t = SCM_I_THREAD_DATA (thread);
1052 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1053 if (!t->canceled)
1054 {
1055 t->canceled = 1;
1056 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1057 scm_i_pthread_cancel (t->pthread);
1058 }
1059 else
1060 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1061
1062 return SCM_UNSPECIFIED;
1063 }
1064 #undef FUNC_NAME
1065
1066 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1067 (SCM thread, SCM proc),
1068 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1069 "This handler will be called when the thread exits.")
1070 #define FUNC_NAME s_scm_set_thread_cleanup_x
1071 {
1072 scm_i_thread *t;
1073
1074 SCM_VALIDATE_THREAD (1, thread);
1075 if (!scm_is_false (proc))
1076 SCM_VALIDATE_THUNK (2, proc);
1077
1078 t = SCM_I_THREAD_DATA (thread);
1079 scm_i_pthread_mutex_lock (&t->admin_mutex);
1080
1081 if (!(t->exited || t->canceled))
1082 t->cleanup_handler = proc;
1083
1084 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1085
1086 return SCM_UNSPECIFIED;
1087 }
1088 #undef FUNC_NAME
1089
1090 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1091 (SCM thread),
1092 "Return the cleanup handler installed for the thread @var{thread}.")
1093 #define FUNC_NAME s_scm_thread_cleanup
1094 {
1095 scm_i_thread *t;
1096 SCM ret;
1097
1098 SCM_VALIDATE_THREAD (1, thread);
1099
1100 t = SCM_I_THREAD_DATA (thread);
1101 scm_i_pthread_mutex_lock (&t->admin_mutex);
1102 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1103 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1104
1105 return ret;
1106 }
1107 #undef FUNC_NAME
1108
1109 SCM scm_join_thread (SCM thread)
1110 {
1111 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1112 }
1113
1114 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1115 (SCM thread, SCM timeout, SCM timeoutval),
1116 "Suspend execution of the calling thread until the target @var{thread} "
1117 "terminates, unless the target @var{thread} has already terminated. ")
1118 #define FUNC_NAME s_scm_join_thread_timed
1119 {
1120 scm_i_thread *t;
1121 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1122 SCM res = SCM_BOOL_F;
1123
1124 if (! (SCM_UNBNDP (timeoutval)))
1125 res = timeoutval;
1126
1127 SCM_VALIDATE_THREAD (1, thread);
1128 if (scm_is_eq (scm_current_thread (), thread))
1129 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1130
1131 t = SCM_I_THREAD_DATA (thread);
1132 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1133
1134 if (! SCM_UNBNDP (timeout))
1135 {
1136 to_timespec (timeout, &ctimeout);
1137 timeout_ptr = &ctimeout;
1138 }
1139
1140 if (t->exited)
1141 res = t->result;
1142 else
1143 {
1144 while (1)
1145 {
1146 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1147 timeout_ptr);
1148 if (err == 0)
1149 {
1150 if (t->exited)
1151 {
1152 res = t->result;
1153 break;
1154 }
1155 }
1156 else if (err == ETIMEDOUT)
1157 break;
1158
1159 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1160 SCM_TICK;
1161 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1162
1163 /* Check for exit again, since we just released and
1164 reacquired the admin mutex, before the next block_self
1165 call (which would block forever if t has already
1166 exited). */
1167 if (t->exited)
1168 {
1169 res = t->result;
1170 break;
1171 }
1172 }
1173 }
1174
1175 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1176
1177 return res;
1178 }
1179 #undef FUNC_NAME
1180
1181 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1182 (SCM obj),
1183 "Return @code{#t} if @var{obj} is a thread.")
1184 #define FUNC_NAME s_scm_thread_p
1185 {
1186 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1187 }
1188 #undef FUNC_NAME
1189
1190
1191 static int
1192 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1193 {
1194 fat_mutex *m = SCM_MUTEX_DATA (mx);
1195 scm_puts_unlocked ("#<mutex ", port);
1196 scm_uintprint ((scm_t_bits)m, 16, port);
1197 scm_puts_unlocked (">", port);
1198 return 1;
1199 }
1200
1201 static SCM
1202 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1203 {
1204 fat_mutex *m;
1205 SCM mx;
1206 scm_i_pthread_mutex_t lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
1207
1208 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1209 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1210 and so we can just copy it. */
1211 memcpy (&m->lock, &lock, sizeof (m->lock));
1212 m->owner = SCM_BOOL_F;
1213 m->level = 0;
1214
1215 m->recursive = recursive;
1216 m->unchecked_unlock = unchecked_unlock;
1217 m->allow_external_unlock = external_unlock;
1218
1219 m->waiting = SCM_EOL;
1220 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1221 m->waiting = make_queue ();
1222 return mx;
1223 }
1224
1225 SCM scm_make_mutex (void)
1226 {
1227 return scm_make_mutex_with_flags (SCM_EOL);
1228 }
1229
1230 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1231 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1232 SCM_SYMBOL (recursive_sym, "recursive");
1233
1234 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1235 (SCM flags),
1236 "Create a new mutex. ")
1237 #define FUNC_NAME s_scm_make_mutex_with_flags
1238 {
1239 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1240
1241 SCM ptr = flags;
1242 while (! scm_is_null (ptr))
1243 {
1244 SCM flag = SCM_CAR (ptr);
1245 if (scm_is_eq (flag, unchecked_unlock_sym))
1246 unchecked_unlock = 1;
1247 else if (scm_is_eq (flag, allow_external_unlock_sym))
1248 external_unlock = 1;
1249 else if (scm_is_eq (flag, recursive_sym))
1250 recursive = 1;
1251 else
1252 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1253 ptr = SCM_CDR (ptr);
1254 }
1255 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1256 }
1257 #undef FUNC_NAME
1258
1259 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1260 (void),
1261 "Create a new recursive mutex. ")
1262 #define FUNC_NAME s_scm_make_recursive_mutex
1263 {
1264 return make_fat_mutex (1, 0, 0);
1265 }
1266 #undef FUNC_NAME
1267
1268 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1269
1270 static SCM
1271 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1272 {
1273 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1274
1275 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1276 SCM err = SCM_BOOL_F;
1277
1278 struct timeval current_time;
1279
1280 scm_i_scm_pthread_mutex_lock (&m->lock);
1281
1282 while (1)
1283 {
1284 if (m->level == 0)
1285 {
1286 m->owner = new_owner;
1287 m->level++;
1288
1289 if (SCM_I_IS_THREAD (new_owner))
1290 {
1291 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1292
1293 /* FIXME: The order in which `t->admin_mutex' and
1294 `m->lock' are taken differs from that in
1295 `on_thread_exit', potentially leading to deadlocks. */
1296 scm_i_pthread_mutex_lock (&t->admin_mutex);
1297
1298 /* Only keep a weak reference to MUTEX so that it's not
1299 retained when not referenced elsewhere (bug #27450).
1300 The weak pair itself is eventually removed when MUTEX
1301 is unlocked. Note that `t->mutexes' lists mutexes
1302 currently held by T, so it should be small. */
1303 t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
1304 t->mutexes);
1305
1306 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1307 }
1308 *ret = 1;
1309 break;
1310 }
1311 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1312 {
1313 m->owner = new_owner;
1314 err = scm_cons (scm_abandoned_mutex_error_key,
1315 scm_from_locale_string ("lock obtained on abandoned "
1316 "mutex"));
1317 *ret = 1;
1318 break;
1319 }
1320 else if (scm_is_eq (m->owner, new_owner))
1321 {
1322 if (m->recursive)
1323 {
1324 m->level++;
1325 *ret = 1;
1326 }
1327 else
1328 {
1329 err = scm_cons (scm_misc_error_key,
1330 scm_from_locale_string ("mutex already locked "
1331 "by thread"));
1332 *ret = 0;
1333 }
1334 break;
1335 }
1336 else
1337 {
1338 if (timeout != NULL)
1339 {
1340 gettimeofday (&current_time, NULL);
1341 if (current_time.tv_sec > timeout->tv_sec ||
1342 (current_time.tv_sec == timeout->tv_sec &&
1343 current_time.tv_usec * 1000 > timeout->tv_nsec))
1344 {
1345 *ret = 0;
1346 break;
1347 }
1348 }
1349 block_self (m->waiting, mutex, &m->lock, timeout);
1350 scm_i_pthread_mutex_unlock (&m->lock);
1351 SCM_TICK;
1352 scm_i_scm_pthread_mutex_lock (&m->lock);
1353 }
1354 }
1355 scm_i_pthread_mutex_unlock (&m->lock);
1356 return err;
1357 }
1358
1359 SCM scm_lock_mutex (SCM mx)
1360 {
1361 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1362 }
1363
1364 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1365 (SCM m, SCM timeout, SCM owner),
1366 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1367 "thread blocks until the mutex becomes available. The function\n"
1368 "returns when the calling thread owns the lock on @var{m}.\n"
1369 "Locking a mutex that a thread already owns will succeed right\n"
1370 "away and will not block the thread. That is, Guile's mutexes\n"
1371 "are @emph{recursive}.")
1372 #define FUNC_NAME s_scm_lock_mutex_timed
1373 {
1374 SCM exception;
1375 int ret = 0;
1376 scm_t_timespec cwaittime, *waittime = NULL;
1377
1378 SCM_VALIDATE_MUTEX (1, m);
1379
1380 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1381 {
1382 to_timespec (timeout, &cwaittime);
1383 waittime = &cwaittime;
1384 }
1385
1386 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1387 SCM_VALIDATE_THREAD (3, owner);
1388
1389 exception = fat_mutex_lock (m, waittime, owner, &ret);
1390 if (!scm_is_false (exception))
1391 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1392 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1393 }
1394 #undef FUNC_NAME
1395
1396 static void
1397 lock_mutex_return_void (SCM mx)
1398 {
1399 (void) scm_lock_mutex (mx);
1400 }
1401
1402 static void
1403 unlock_mutex_return_void (SCM mx)
1404 {
1405 (void) scm_unlock_mutex (mx);
1406 }
1407
1408 void
1409 scm_dynwind_lock_mutex (SCM mutex)
1410 {
1411 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1412 SCM_F_WIND_EXPLICITLY);
1413 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1414 SCM_F_WIND_EXPLICITLY);
1415 }
1416
1417 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1418 (SCM mutex),
1419 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1420 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1421 #define FUNC_NAME s_scm_try_mutex
1422 {
1423 SCM exception;
1424 int ret = 0;
1425 scm_t_timespec cwaittime, *waittime = NULL;
1426
1427 SCM_VALIDATE_MUTEX (1, mutex);
1428
1429 to_timespec (scm_from_int(0), &cwaittime);
1430 waittime = &cwaittime;
1431
1432 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1433 if (!scm_is_false (exception))
1434 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1435 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1436 }
1437 #undef FUNC_NAME
1438
1439 /*** Fat condition variables */
1440
1441 typedef struct {
1442 scm_i_pthread_mutex_t lock;
1443 SCM waiting; /* the threads waiting for this condition. */
1444 } fat_cond;
1445
1446 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1447 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1448
1449 static void
1450 remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
1451 {
1452 SCM walk, prev;
1453
1454 for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
1455 walk = SCM_CDR (walk))
1456 {
1457 if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
1458 {
1459 if (scm_is_pair (prev))
1460 SCM_SETCDR (prev, SCM_CDR (walk));
1461 else
1462 t->mutexes = SCM_CDR (walk);
1463 break;
1464 }
1465 }
1466 }
1467
1468 static int
1469 fat_mutex_unlock (SCM mutex, SCM cond,
1470 const scm_t_timespec *waittime, int relock)
1471 {
1472 SCM owner;
1473 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1474 fat_cond *c = NULL;
1475 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1476 int err = 0, ret = 0;
1477
1478 scm_i_scm_pthread_mutex_lock (&m->lock);
1479
1480 owner = m->owner;
1481
1482 if (!scm_is_eq (owner, t->handle))
1483 {
1484 if (m->level == 0)
1485 {
1486 if (!m->unchecked_unlock)
1487 {
1488 scm_i_pthread_mutex_unlock (&m->lock);
1489 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1490 }
1491 owner = t->handle;
1492 }
1493 else if (!m->allow_external_unlock)
1494 {
1495 scm_i_pthread_mutex_unlock (&m->lock);
1496 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1497 }
1498 }
1499
1500 if (! (SCM_UNBNDP (cond)))
1501 {
1502 c = SCM_CONDVAR_DATA (cond);
1503 while (1)
1504 {
1505 int brk = 0;
1506
1507 if (m->level > 0)
1508 m->level--;
1509 if (m->level == 0)
1510 {
1511 /* Change the owner of MUTEX. */
1512 remove_mutex_from_thread (mutex, t);
1513 m->owner = unblock_from_queue (m->waiting);
1514 }
1515
1516 t->block_asyncs++;
1517
1518 err = block_self (c->waiting, cond, &m->lock, waittime);
1519 scm_i_pthread_mutex_unlock (&m->lock);
1520
1521 if (err == 0)
1522 {
1523 ret = 1;
1524 brk = 1;
1525 }
1526 else if (err == ETIMEDOUT)
1527 {
1528 ret = 0;
1529 brk = 1;
1530 }
1531 else if (err != EINTR)
1532 {
1533 errno = err;
1534 scm_syserror (NULL);
1535 }
1536
1537 if (brk)
1538 {
1539 if (relock)
1540 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1541 t->block_asyncs--;
1542 break;
1543 }
1544
1545 t->block_asyncs--;
1546 scm_async_tick ();
1547
1548 scm_remember_upto_here_2 (cond, mutex);
1549
1550 scm_i_scm_pthread_mutex_lock (&m->lock);
1551 }
1552 }
1553 else
1554 {
1555 if (m->level > 0)
1556 m->level--;
1557 if (m->level == 0)
1558 {
1559 /* Change the owner of MUTEX. */
1560 remove_mutex_from_thread (mutex, t);
1561 m->owner = unblock_from_queue (m->waiting);
1562 }
1563
1564 scm_i_pthread_mutex_unlock (&m->lock);
1565 ret = 1;
1566 }
1567
1568 return ret;
1569 }
1570
1571 SCM scm_unlock_mutex (SCM mx)
1572 {
1573 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1574 }
1575
1576 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1577 (SCM mx, SCM cond, SCM timeout),
1578 "Unlocks @var{mutex} if the calling thread owns the lock on "
1579 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1580 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1581 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1582 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1583 "with a call to @code{unlock-mutex}. Only the last call to "
1584 "@code{unlock-mutex} will actually unlock the mutex. ")
1585 #define FUNC_NAME s_scm_unlock_mutex_timed
1586 {
1587 scm_t_timespec cwaittime, *waittime = NULL;
1588
1589 SCM_VALIDATE_MUTEX (1, mx);
1590 if (! (SCM_UNBNDP (cond)))
1591 {
1592 SCM_VALIDATE_CONDVAR (2, cond);
1593
1594 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1595 {
1596 to_timespec (timeout, &cwaittime);
1597 waittime = &cwaittime;
1598 }
1599 }
1600
1601 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1602 }
1603 #undef FUNC_NAME
1604
1605 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1606 (SCM obj),
1607 "Return @code{#t} if @var{obj} is a mutex.")
1608 #define FUNC_NAME s_scm_mutex_p
1609 {
1610 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1611 }
1612 #undef FUNC_NAME
1613
1614 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1615 (SCM mx),
1616 "Return the thread owning @var{mx}, or @code{#f}.")
1617 #define FUNC_NAME s_scm_mutex_owner
1618 {
1619 SCM owner;
1620 fat_mutex *m = NULL;
1621
1622 SCM_VALIDATE_MUTEX (1, mx);
1623 m = SCM_MUTEX_DATA (mx);
1624 scm_i_pthread_mutex_lock (&m->lock);
1625 owner = m->owner;
1626 scm_i_pthread_mutex_unlock (&m->lock);
1627
1628 return owner;
1629 }
1630 #undef FUNC_NAME
1631
1632 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1633 (SCM mx),
1634 "Return the lock level of mutex @var{mx}.")
1635 #define FUNC_NAME s_scm_mutex_level
1636 {
1637 SCM_VALIDATE_MUTEX (1, mx);
1638 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1639 }
1640 #undef FUNC_NAME
1641
1642 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1643 (SCM mx),
1644 "Returns @code{#t} if the mutex @var{mx} is locked.")
1645 #define FUNC_NAME s_scm_mutex_locked_p
1646 {
1647 SCM_VALIDATE_MUTEX (1, mx);
1648 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1649 }
1650 #undef FUNC_NAME
1651
1652 static int
1653 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1654 {
1655 fat_cond *c = SCM_CONDVAR_DATA (cv);
1656 scm_puts_unlocked ("#<condition-variable ", port);
1657 scm_uintprint ((scm_t_bits)c, 16, port);
1658 scm_puts_unlocked (">", port);
1659 return 1;
1660 }
1661
1662 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1663 (void),
1664 "Make a new condition variable.")
1665 #define FUNC_NAME s_scm_make_condition_variable
1666 {
1667 fat_cond *c;
1668 SCM cv;
1669
1670 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1671 c->waiting = SCM_EOL;
1672 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1673 c->waiting = make_queue ();
1674 return cv;
1675 }
1676 #undef FUNC_NAME
1677
1678 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1679 (SCM cv, SCM mx, SCM t),
1680 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1681 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1682 "is locked again when this function returns. When @var{t} is given, "
1683 "it specifies a point in time where the waiting should be aborted. It "
1684 "can be either a integer as returned by @code{current-time} or a pair "
1685 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1686 "mutex is locked and @code{#f} is returned. When the condition "
1687 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1688 "is returned. ")
1689 #define FUNC_NAME s_scm_timed_wait_condition_variable
1690 {
1691 scm_t_timespec waittime, *waitptr = NULL;
1692
1693 SCM_VALIDATE_CONDVAR (1, cv);
1694 SCM_VALIDATE_MUTEX (2, mx);
1695
1696 if (!SCM_UNBNDP (t))
1697 {
1698 to_timespec (t, &waittime);
1699 waitptr = &waittime;
1700 }
1701
1702 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1703 }
1704 #undef FUNC_NAME
1705
1706 static void
1707 fat_cond_signal (fat_cond *c)
1708 {
1709 unblock_from_queue (c->waiting);
1710 }
1711
1712 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1713 (SCM cv),
1714 "Wake up one thread that is waiting for @var{cv}")
1715 #define FUNC_NAME s_scm_signal_condition_variable
1716 {
1717 SCM_VALIDATE_CONDVAR (1, cv);
1718 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1719 return SCM_BOOL_T;
1720 }
1721 #undef FUNC_NAME
1722
1723 static void
1724 fat_cond_broadcast (fat_cond *c)
1725 {
1726 while (scm_is_true (unblock_from_queue (c->waiting)))
1727 ;
1728 }
1729
1730 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1731 (SCM cv),
1732 "Wake up all threads that are waiting for @var{cv}. ")
1733 #define FUNC_NAME s_scm_broadcast_condition_variable
1734 {
1735 SCM_VALIDATE_CONDVAR (1, cv);
1736 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1737 return SCM_BOOL_T;
1738 }
1739 #undef FUNC_NAME
1740
1741 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1742 (SCM obj),
1743 "Return @code{#t} if @var{obj} is a condition variable.")
1744 #define FUNC_NAME s_scm_condition_variable_p
1745 {
1746 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1747 }
1748 #undef FUNC_NAME
1749
1750
1751 \f
1752 /*** Select */
1753
1754 struct select_args
1755 {
1756 int nfds;
1757 fd_set *read_fds;
1758 fd_set *write_fds;
1759 fd_set *except_fds;
1760 struct timeval *timeout;
1761
1762 int result;
1763 int errno_value;
1764 };
1765
1766 static void *
1767 do_std_select (void *args)
1768 {
1769 struct select_args *select_args;
1770
1771 select_args = (struct select_args *) args;
1772
1773 select_args->result =
1774 select (select_args->nfds,
1775 select_args->read_fds, select_args->write_fds,
1776 select_args->except_fds, select_args->timeout);
1777 select_args->errno_value = errno;
1778
1779 return NULL;
1780 }
1781
1782 #if !SCM_HAVE_SYS_SELECT_H
1783 static int scm_std_select (int nfds,
1784 fd_set *readfds,
1785 fd_set *writefds,
1786 fd_set *exceptfds,
1787 struct timeval *timeout);
1788 #endif
1789
1790 int
1791 scm_std_select (int nfds,
1792 fd_set *readfds,
1793 fd_set *writefds,
1794 fd_set *exceptfds,
1795 struct timeval *timeout)
1796 {
1797 fd_set my_readfds;
1798 int res, eno, wakeup_fd;
1799 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1800 struct select_args args;
1801
1802 if (readfds == NULL)
1803 {
1804 FD_ZERO (&my_readfds);
1805 readfds = &my_readfds;
1806 }
1807
1808 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1809 SCM_TICK;
1810
1811 wakeup_fd = t->sleep_pipe[0];
1812 FD_SET (wakeup_fd, readfds);
1813 if (wakeup_fd >= nfds)
1814 nfds = wakeup_fd+1;
1815
1816 args.nfds = nfds;
1817 args.read_fds = readfds;
1818 args.write_fds = writefds;
1819 args.except_fds = exceptfds;
1820 args.timeout = timeout;
1821
1822 /* Explicitly cooperate with the GC. */
1823 scm_without_guile (do_std_select, &args);
1824
1825 res = args.result;
1826 eno = args.errno_value;
1827
1828 t->sleep_fd = -1;
1829 scm_i_reset_sleep (t);
1830
1831 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1832 {
1833 char dummy;
1834 full_read (wakeup_fd, &dummy, 1);
1835
1836 FD_CLR (wakeup_fd, readfds);
1837 res -= 1;
1838 if (res == 0)
1839 {
1840 eno = EINTR;
1841 res = -1;
1842 }
1843 }
1844 errno = eno;
1845 return res;
1846 }
1847
1848 /* Convenience API for blocking while in guile mode. */
1849
1850 #if SCM_USE_PTHREAD_THREADS
1851
1852 /* It seems reasonable to not run procedures related to mutex and condition
1853 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1854 without it, and (ii) the only potential gain would be GC latency. See
1855 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1856 for a discussion of the pros and cons. */
1857
1858 int
1859 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1860 {
1861 int res = scm_i_pthread_mutex_lock (mutex);
1862 return res;
1863 }
1864
1865 static void
1866 do_unlock (void *data)
1867 {
1868 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1869 }
1870
1871 void
1872 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1873 {
1874 scm_i_scm_pthread_mutex_lock (mutex);
1875 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1876 }
1877
1878 int
1879 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1880 {
1881 int res;
1882 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1883
1884 t->held_mutex = mutex;
1885 res = scm_i_pthread_cond_wait (cond, mutex);
1886 t->held_mutex = NULL;
1887
1888 return res;
1889 }
1890
1891 int
1892 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1893 scm_i_pthread_mutex_t *mutex,
1894 const scm_t_timespec *wt)
1895 {
1896 int res;
1897 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1898
1899 t->held_mutex = mutex;
1900 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1901 t->held_mutex = NULL;
1902
1903 return res;
1904 }
1905
1906 #endif
1907
1908 static void
1909 do_unlock_with_asyncs (void *data)
1910 {
1911 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1912 SCM_I_CURRENT_THREAD->block_asyncs--;
1913 }
1914
1915 void
1916 scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t *mutex)
1917 {
1918 SCM_I_CURRENT_THREAD->block_asyncs++;
1919 scm_i_scm_pthread_mutex_lock (mutex);
1920 scm_dynwind_unwind_handler (do_unlock_with_asyncs, mutex,
1921 SCM_F_WIND_EXPLICITLY);
1922 }
1923
1924 unsigned long
1925 scm_std_usleep (unsigned long usecs)
1926 {
1927 struct timeval tv;
1928 tv.tv_usec = usecs % 1000000;
1929 tv.tv_sec = usecs / 1000000;
1930 scm_std_select (0, NULL, NULL, NULL, &tv);
1931 return tv.tv_sec * 1000000 + tv.tv_usec;
1932 }
1933
1934 unsigned int
1935 scm_std_sleep (unsigned int secs)
1936 {
1937 struct timeval tv;
1938 tv.tv_usec = 0;
1939 tv.tv_sec = secs;
1940 scm_std_select (0, NULL, NULL, NULL, &tv);
1941 return tv.tv_sec;
1942 }
1943
1944 /*** Misc */
1945
1946 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1947 (void),
1948 "Return the thread that called this function.")
1949 #define FUNC_NAME s_scm_current_thread
1950 {
1951 return SCM_I_CURRENT_THREAD->handle;
1952 }
1953 #undef FUNC_NAME
1954
1955 static SCM
1956 scm_c_make_list (size_t n, SCM fill)
1957 {
1958 SCM res = SCM_EOL;
1959 while (n-- > 0)
1960 res = scm_cons (fill, res);
1961 return res;
1962 }
1963
1964 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1965 (void),
1966 "Return a list of all threads.")
1967 #define FUNC_NAME s_scm_all_threads
1968 {
1969 /* We can not allocate while holding the thread_admin_mutex because
1970 of the way GC is done.
1971 */
1972 int n = thread_count;
1973 scm_i_thread *t;
1974 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1975
1976 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1977 l = &list;
1978 for (t = all_threads; t && n > 0; t = t->next_thread)
1979 {
1980 if (t != scm_i_signal_delivery_thread)
1981 {
1982 SCM_SETCAR (*l, t->handle);
1983 l = SCM_CDRLOC (*l);
1984 }
1985 n--;
1986 }
1987 *l = SCM_EOL;
1988 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1989 return list;
1990 }
1991 #undef FUNC_NAME
1992
1993 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1994 (SCM thread),
1995 "Return @code{#t} iff @var{thread} has exited.\n")
1996 #define FUNC_NAME s_scm_thread_exited_p
1997 {
1998 return scm_from_bool (scm_c_thread_exited_p (thread));
1999 }
2000 #undef FUNC_NAME
2001
2002 int
2003 scm_c_thread_exited_p (SCM thread)
2004 #define FUNC_NAME s_scm_thread_exited_p
2005 {
2006 scm_i_thread *t;
2007 SCM_VALIDATE_THREAD (1, thread);
2008 t = SCM_I_THREAD_DATA (thread);
2009 return t->exited;
2010 }
2011 #undef FUNC_NAME
2012
2013 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2014 (void),
2015 "Return the total number of processors of the machine, which\n"
2016 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2017 "thread execution unit, which can be either:\n\n"
2018 "@itemize\n"
2019 "@item an execution core in a (possibly multi-core) chip, in a\n"
2020 " (possibly multi- chip) module, in a single computer, or\n"
2021 "@item a thread execution unit inside a core in the case of\n"
2022 " @dfn{hyper-threaded} CPUs.\n"
2023 "@end itemize\n\n"
2024 "Which of the two definitions is used, is unspecified.\n")
2025 #define FUNC_NAME s_scm_total_processor_count
2026 {
2027 return scm_from_ulong (num_processors (NPROC_ALL));
2028 }
2029 #undef FUNC_NAME
2030
2031 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2032 (void),
2033 "Like @code{total-processor-count}, but return the number of\n"
2034 "processors available to the current process. See\n"
2035 "@code{setaffinity} and @code{getaffinity} for more\n"
2036 "information.\n")
2037 #define FUNC_NAME s_scm_current_processor_count
2038 {
2039 return scm_from_ulong (num_processors (NPROC_CURRENT));
2040 }
2041 #undef FUNC_NAME
2042
2043
2044 \f
2045
2046 static scm_i_pthread_cond_t wake_up_cond;
2047 static int threads_initialized_p = 0;
2048
2049
2050 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2051 */
2052 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2053
2054 static SCM dynwind_critical_section_mutex;
2055
2056 void
2057 scm_dynwind_critical_section (SCM mutex)
2058 {
2059 if (scm_is_false (mutex))
2060 mutex = dynwind_critical_section_mutex;
2061 scm_dynwind_lock_mutex (mutex);
2062 scm_dynwind_block_asyncs ();
2063 }
2064
2065 /*** Initialization */
2066
2067 scm_i_pthread_mutex_t scm_i_misc_mutex;
2068
2069 #if SCM_USE_PTHREAD_THREADS
2070 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2071 #endif
2072
2073 void
2074 scm_threads_prehistory (void *base)
2075 {
2076 #if SCM_USE_PTHREAD_THREADS
2077 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2078 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2079 PTHREAD_MUTEX_RECURSIVE);
2080 #endif
2081
2082 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2083 scm_i_pthread_mutexattr_recursive);
2084 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2085 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2086
2087 thread_gc_kind =
2088 GC_new_kind (GC_new_free_list (),
2089 GC_MAKE_PROC (GC_new_proc (thread_mark), 0),
2090 0, 1);
2091
2092 guilify_self_1 ((struct GC_stack_base *) base);
2093 }
2094
2095 scm_t_bits scm_tc16_thread;
2096 scm_t_bits scm_tc16_mutex;
2097 scm_t_bits scm_tc16_condvar;
2098
2099 void
2100 scm_init_threads ()
2101 {
2102 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2103 scm_set_smob_print (scm_tc16_thread, thread_print);
2104
2105 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2106 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2107
2108 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2109 sizeof (fat_cond));
2110 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2111
2112 scm_i_default_dynamic_state = SCM_BOOL_F;
2113 guilify_self_2 (SCM_BOOL_F);
2114 threads_initialized_p = 1;
2115
2116 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2117 }
2118
2119 void
2120 scm_init_threads_default_dynamic_state ()
2121 {
2122 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2123 scm_i_default_dynamic_state = state;
2124 }
2125
2126 void
2127 scm_init_thread_procs ()
2128 {
2129 #include "libguile/threads.x"
2130 }
2131
2132 \f
2133 /* IA64-specific things. */
2134
2135 #ifdef __ia64__
2136 # ifdef __hpux
2137 # include <sys/param.h>
2138 # include <sys/pstat.h>
2139 void *
2140 scm_ia64_register_backing_store_base (void)
2141 {
2142 struct pst_vm_status vm_status;
2143 int i = 0;
2144 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2145 if (vm_status.pst_type == PS_RSESTACK)
2146 return (void *) vm_status.pst_vaddr;
2147 abort ();
2148 }
2149 void *
2150 scm_ia64_ar_bsp (const void *ctx)
2151 {
2152 uint64_t bsp;
2153 __uc_get_ar_bsp (ctx, &bsp);
2154 return (void *) bsp;
2155 }
2156 # endif /* hpux */
2157 # ifdef linux
2158 # include <ucontext.h>
2159 void *
2160 scm_ia64_register_backing_store_base (void)
2161 {
2162 extern void *__libc_ia64_register_backing_store_base;
2163 return __libc_ia64_register_backing_store_base;
2164 }
2165 void *
2166 scm_ia64_ar_bsp (const void *opaque)
2167 {
2168 const ucontext_t *ctx = opaque;
2169 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2170 }
2171 # endif /* linux */
2172 # ifdef __FreeBSD__
2173 # include <ucontext.h>
2174 void *
2175 scm_ia64_register_backing_store_base (void)
2176 {
2177 return (void *)0x8000000000000000;
2178 }
2179 void *
2180 scm_ia64_ar_bsp (const void *opaque)
2181 {
2182 const ucontext_t *ctx = opaque;
2183 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2184 + ctx->uc_mcontext.mc_special.ndirty);
2185 }
2186 # endif /* __FreeBSD__ */
2187 #endif /* __ia64__ */
2188
2189
2190 /*
2191 Local Variables:
2192 c-file-style: "gnu"
2193 End:
2194 */