Move `{total,current}-processor-count' outside of `posix.c'.
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include <assert.h>
42 #include <nproc.h>
43
44 #include "libguile/validate.h"
45 #include "libguile/root.h"
46 #include "libguile/eval.h"
47 #include "libguile/async.h"
48 #include "libguile/ports.h"
49 #include "libguile/threads.h"
50 #include "libguile/dynwind.h"
51 #include "libguile/iselect.h"
52 #include "libguile/fluids.h"
53 #include "libguile/continuations.h"
54 #include "libguile/gc.h"
55 #include "libguile/init.h"
56 #include "libguile/scmsigs.h"
57 #include "libguile/strings.h"
58 #include "libguile/weaks.h"
59
60 #ifdef __MINGW32__
61 #ifndef ETIMEDOUT
62 # define ETIMEDOUT WSAETIMEDOUT
63 #endif
64 # include <fcntl.h>
65 # include <process.h>
66 # define pipe(fd) _pipe (fd, 256, O_BINARY)
67 #endif /* __MINGW32__ */
68
69 #include <full-read.h>
70
71
72 \f
73
74 /* First some libgc shims. */
75
76 /* Make sure GC_fn_type is defined; it is missing from the public
77 headers of GC 7.1 and earlier. */
78 #ifndef HAVE_GC_FN_TYPE
79 typedef void * (* GC_fn_type) (void *);
80 #endif
81
82
83 #ifndef GC_SUCCESS
84 #define GC_SUCCESS 0
85 #endif
86
87 #ifndef GC_UNIMPLEMENTED
88 #define GC_UNIMPLEMENTED 3
89 #endif
90
91 /* Likewise struct GC_stack_base is missing before 7.1. */
92 #ifndef HAVE_GC_STACK_BASE
93 struct GC_stack_base {
94 void * mem_base; /* Base of memory stack. */
95 #ifdef __ia64__
96 void * reg_base; /* Base of separate register stack. */
97 #endif
98 };
99
100 static int
101 GC_register_my_thread (struct GC_stack_base *stack_base)
102 {
103 return GC_UNIMPLEMENTED;
104 }
105
106 static void
107 GC_unregister_my_thread ()
108 {
109 }
110
111 #if !SCM_USE_PTHREAD_THREADS
112 /* No threads; we can just use GC_stackbottom. */
113 static void *
114 get_thread_stack_base ()
115 {
116 return GC_stackbottom;
117 }
118
119 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
120 && defined PTHREAD_ATTR_GETSTACK_WORKS
121 /* This method for GNU/Linux and perhaps some other systems.
122 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
123 available on them. */
124 static void *
125 get_thread_stack_base ()
126 {
127 pthread_attr_t attr;
128 void *start, *end;
129 size_t size;
130
131 pthread_getattr_np (pthread_self (), &attr);
132 pthread_attr_getstack (&attr, &start, &size);
133 end = (char *)start + size;
134
135 #if SCM_STACK_GROWS_UP
136 return start;
137 #else
138 return end;
139 #endif
140 }
141
142 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
143 /* This method for MacOS X.
144 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
145 but as of 2006 there's nothing obvious at apple.com. */
146 static void *
147 get_thread_stack_base ()
148 {
149 return pthread_get_stackaddr_np (pthread_self ());
150 }
151
152 #else
153 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
154 #endif
155
156 static int
157 GC_get_stack_base (struct GC_stack_base *stack_base)
158 {
159 stack_base->mem_base = get_thread_stack_base ();
160 #ifdef __ia64__
161 /* Calculate and store off the base of this thread's register
162 backing store (RBS). Unfortunately our implementation(s) of
163 scm_ia64_register_backing_store_base are only reliable for the
164 main thread. For other threads, therefore, find out the current
165 top of the RBS, and use that as a maximum. */
166 stack_base->reg_base = scm_ia64_register_backing_store_base ();
167 {
168 ucontext_t ctx;
169 void *bsp;
170 getcontext (&ctx);
171 bsp = scm_ia64_ar_bsp (&ctx);
172 if (stack_base->reg_base > bsp)
173 stack_base->reg_base = bsp;
174 }
175 #endif
176 return GC_SUCCESS;
177 }
178
179 static void *
180 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
181 {
182 struct GC_stack_base stack_base;
183
184 stack_base.mem_base = (void*)&stack_base;
185 #ifdef __ia64__
186 /* FIXME: Untested. */
187 {
188 ucontext_t ctx;
189 getcontext (&ctx);
190 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
191 }
192 #endif
193
194 return fn (&stack_base, arg);
195 }
196 #endif /* HAVE_GC_STACK_BASE */
197
198
199 /* Now define with_gc_active and with_gc_inactive. */
200
201 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
202
203 /* We have a sufficiently new libgc (7.2 or newer). */
204
205 static void*
206 with_gc_inactive (GC_fn_type func, void *data)
207 {
208 return GC_do_blocking (func, data);
209 }
210
211 static void*
212 with_gc_active (GC_fn_type func, void *data)
213 {
214 return GC_call_with_gc_active (func, data);
215 }
216
217 #else
218
219 /* libgc not new enough, so never actually deactivate GC.
220
221 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
222 GC_call_with_gc_active. */
223
224 static void*
225 with_gc_inactive (GC_fn_type func, void *data)
226 {
227 return func (data);
228 }
229
230 static void*
231 with_gc_active (GC_fn_type func, void *data)
232 {
233 return func (data);
234 }
235
236 #endif /* HAVE_GC_DO_BLOCKING */
237
238
239 \f
240 static void
241 to_timespec (SCM t, scm_t_timespec *waittime)
242 {
243 if (scm_is_pair (t))
244 {
245 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
246 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
247 }
248 else
249 {
250 double time = scm_to_double (t);
251 double sec = scm_c_truncate (time);
252
253 waittime->tv_sec = (long) sec;
254 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
255 }
256 }
257
258 \f
259 /*** Queues */
260
261 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
262 the risk of false references leading to unbounded retained space as
263 described in "Bounding Space Usage of Conservative Garbage Collectors",
264 H.J. Boehm, 2001. */
265
266 /* Make an empty queue data structure.
267 */
268 static SCM
269 make_queue ()
270 {
271 return scm_cons (SCM_EOL, SCM_EOL);
272 }
273
274 /* Put T at the back of Q and return a handle that can be used with
275 remqueue to remove T from Q again.
276 */
277 static SCM
278 enqueue (SCM q, SCM t)
279 {
280 SCM c = scm_cons (t, SCM_EOL);
281 SCM_CRITICAL_SECTION_START;
282 if (scm_is_null (SCM_CDR (q)))
283 SCM_SETCDR (q, c);
284 else
285 SCM_SETCDR (SCM_CAR (q), c);
286 SCM_SETCAR (q, c);
287 SCM_CRITICAL_SECTION_END;
288 return c;
289 }
290
291 /* Remove the element that the handle C refers to from the queue Q. C
292 must have been returned from a call to enqueue. The return value
293 is zero when the element referred to by C has already been removed.
294 Otherwise, 1 is returned.
295 */
296 static int
297 remqueue (SCM q, SCM c)
298 {
299 SCM p, prev = q;
300 SCM_CRITICAL_SECTION_START;
301 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
302 {
303 if (scm_is_eq (p, c))
304 {
305 if (scm_is_eq (c, SCM_CAR (q)))
306 SCM_SETCAR (q, SCM_CDR (c));
307 SCM_SETCDR (prev, SCM_CDR (c));
308
309 /* GC-robust */
310 SCM_SETCDR (c, SCM_EOL);
311
312 SCM_CRITICAL_SECTION_END;
313 return 1;
314 }
315 prev = p;
316 }
317 SCM_CRITICAL_SECTION_END;
318 return 0;
319 }
320
321 /* Remove the front-most element from the queue Q and return it.
322 Return SCM_BOOL_F when Q is empty.
323 */
324 static SCM
325 dequeue (SCM q)
326 {
327 SCM c;
328 SCM_CRITICAL_SECTION_START;
329 c = SCM_CDR (q);
330 if (scm_is_null (c))
331 {
332 SCM_CRITICAL_SECTION_END;
333 return SCM_BOOL_F;
334 }
335 else
336 {
337 SCM_SETCDR (q, SCM_CDR (c));
338 if (scm_is_null (SCM_CDR (q)))
339 SCM_SETCAR (q, SCM_EOL);
340 SCM_CRITICAL_SECTION_END;
341
342 /* GC-robust */
343 SCM_SETCDR (c, SCM_EOL);
344
345 return SCM_CAR (c);
346 }
347 }
348
349 /*** Thread smob routines */
350
351
352 static int
353 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
354 {
355 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
356 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
357 the struct case, hence we go via a union, and extract according to the
358 size of pthread_t. */
359 union {
360 scm_i_pthread_t p;
361 unsigned short us;
362 unsigned int ui;
363 unsigned long ul;
364 scm_t_uintmax um;
365 } u;
366 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
367 scm_i_pthread_t p = t->pthread;
368 scm_t_uintmax id;
369 u.p = p;
370 if (sizeof (p) == sizeof (unsigned short))
371 id = u.us;
372 else if (sizeof (p) == sizeof (unsigned int))
373 id = u.ui;
374 else if (sizeof (p) == sizeof (unsigned long))
375 id = u.ul;
376 else
377 id = u.um;
378
379 scm_puts ("#<thread ", port);
380 scm_uintprint (id, 10, port);
381 scm_puts (" (", port);
382 scm_uintprint ((scm_t_bits)t, 16, port);
383 scm_puts (")>", port);
384 return 1;
385 }
386
387 \f
388 /*** Blocking on queues. */
389
390 /* See also scm_i_queue_async_cell for how such a block is
391 interrputed.
392 */
393
394 /* Put the current thread on QUEUE and go to sleep, waiting for it to
395 be woken up by a call to 'unblock_from_queue', or to be
396 interrupted. Upon return of this function, the current thread is
397 no longer on QUEUE, even when the sleep has been interrupted.
398
399 The caller of block_self must hold MUTEX. It will be atomically
400 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
401
402 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
403 as MUTEX is needed.
404
405 When WAITTIME is not NULL, the sleep will be aborted at that time.
406
407 The return value of block_self is an errno value. It will be zero
408 when the sleep has been successfully completed by a call to
409 unblock_from_queue, EINTR when it has been interrupted by the
410 delivery of a system async, and ETIMEDOUT when the timeout has
411 expired.
412
413 The system asyncs themselves are not executed by block_self.
414 */
415 static int
416 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
417 const scm_t_timespec *waittime)
418 {
419 scm_i_thread *t = SCM_I_CURRENT_THREAD;
420 SCM q_handle;
421 int err;
422
423 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
424 err = EINTR;
425 else
426 {
427 t->block_asyncs++;
428 q_handle = enqueue (queue, t->handle);
429 if (waittime == NULL)
430 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
431 else
432 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
433
434 /* When we are still on QUEUE, we have been interrupted. We
435 report this only when no other error (such as a timeout) has
436 happened above.
437 */
438 if (remqueue (queue, q_handle) && err == 0)
439 err = EINTR;
440 t->block_asyncs--;
441 scm_i_reset_sleep (t);
442 }
443
444 return err;
445 }
446
447 /* Wake up the first thread on QUEUE, if any. The awoken thread is
448 returned, or #f if the queue was empty.
449 */
450 static SCM
451 unblock_from_queue (SCM queue)
452 {
453 SCM thread = dequeue (queue);
454 if (scm_is_true (thread))
455 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
456 return thread;
457 }
458
459 \f
460 /* Getting into and out of guile mode.
461 */
462
463 /* Key used to attach a cleanup handler to a given thread. Also, if
464 thread-local storage is unavailable, this key is used to retrieve the
465 current thread with `pthread_getspecific ()'. */
466 scm_i_pthread_key_t scm_i_thread_key;
467
468
469 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
470
471 /* When thread-local storage (TLS) is available, a pointer to the
472 current-thread object is kept in TLS. Note that storing the thread-object
473 itself in TLS (rather than a pointer to some malloc'd memory) is not
474 possible since thread objects may live longer than the actual thread they
475 represent. */
476 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
477
478 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
479
480
481 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
482 static scm_i_thread *all_threads = NULL;
483 static int thread_count;
484
485 static SCM scm_i_default_dynamic_state;
486
487 /* Perform first stage of thread initialisation, in non-guile mode.
488 */
489 static void
490 guilify_self_1 (struct GC_stack_base *base)
491 {
492 scm_i_thread t;
493
494 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
495 before allocating anything in this thread, because allocation could
496 cause GC to run, and GC could cause finalizers, which could invoke
497 Scheme functions, which need the current thread to be set. */
498
499 t.pthread = scm_i_pthread_self ();
500 t.handle = SCM_BOOL_F;
501 t.result = SCM_BOOL_F;
502 t.cleanup_handler = SCM_BOOL_F;
503 t.mutexes = SCM_EOL;
504 t.held_mutex = NULL;
505 t.join_queue = SCM_EOL;
506 t.dynamic_state = SCM_BOOL_F;
507 t.dynwinds = SCM_EOL;
508 t.active_asyncs = SCM_EOL;
509 t.block_asyncs = 1;
510 t.pending_asyncs = 1;
511 t.critical_section_level = 0;
512 t.base = base->mem_base;
513 #ifdef __ia64__
514 t.register_backing_store_base = base->reg-base;
515 #endif
516 t.continuation_root = SCM_EOL;
517 t.continuation_base = t.base;
518 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
519 t.sleep_mutex = NULL;
520 t.sleep_object = SCM_BOOL_F;
521 t.sleep_fd = -1;
522
523 if (pipe (t.sleep_pipe) != 0)
524 /* FIXME: Error conditions during the initialization phase are handled
525 gracelessly since public functions such as `scm_init_guile ()'
526 currently have type `void'. */
527 abort ();
528
529 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
530 t.current_mark_stack_ptr = NULL;
531 t.current_mark_stack_limit = NULL;
532 t.canceled = 0;
533 t.exited = 0;
534 t.guile_mode = 0;
535
536 /* The switcheroo. */
537 {
538 scm_i_thread *t_ptr = &t;
539
540 GC_disable ();
541 t_ptr = GC_malloc (sizeof (scm_i_thread));
542 memcpy (t_ptr, &t, sizeof t);
543
544 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
545
546 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
547 /* Cache the current thread in TLS for faster lookup. */
548 scm_i_current_thread = t_ptr;
549 #endif
550
551 scm_i_pthread_mutex_lock (&thread_admin_mutex);
552 t_ptr->next_thread = all_threads;
553 all_threads = t_ptr;
554 thread_count++;
555 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
556
557 GC_enable ();
558 }
559 }
560
561 /* Perform second stage of thread initialisation, in guile mode.
562 */
563 static void
564 guilify_self_2 (SCM parent)
565 {
566 scm_i_thread *t = SCM_I_CURRENT_THREAD;
567
568 t->guile_mode = 1;
569
570 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
571
572 t->continuation_root = scm_cons (t->handle, SCM_EOL);
573 t->continuation_base = t->base;
574 t->vm = SCM_BOOL_F;
575
576 if (scm_is_true (parent))
577 t->dynamic_state = scm_make_dynamic_state (parent);
578 else
579 t->dynamic_state = scm_i_make_initial_dynamic_state ();
580
581 t->join_queue = make_queue ();
582 t->block_asyncs = 0;
583 }
584
585 \f
586 /*** Fat mutexes */
587
588 /* We implement our own mutex type since we want them to be 'fair', we
589 want to do fancy things while waiting for them (like running
590 asyncs) and we might want to add things that are nice for
591 debugging.
592 */
593
594 typedef struct {
595 scm_i_pthread_mutex_t lock;
596 SCM owner;
597 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
598
599 int recursive; /* allow recursive locking? */
600 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
601 int allow_external_unlock; /* is it an error to unlock a mutex that is not
602 owned by the current thread? */
603
604 SCM waiting; /* the threads waiting for this mutex. */
605 } fat_mutex;
606
607 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
608 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
609
610 /* Perform thread tear-down, in guile mode.
611 */
612 static void *
613 do_thread_exit (void *v)
614 {
615 scm_i_thread *t = (scm_i_thread *) v;
616
617 /* Ensure the signal handling thread has been launched, because we might be
618 shutting it down. This needs to be done in Guile mode. */
619 scm_i_ensure_signal_delivery_thread ();
620
621 if (!scm_is_false (t->cleanup_handler))
622 {
623 SCM ptr = t->cleanup_handler;
624
625 t->cleanup_handler = SCM_BOOL_F;
626 t->result = scm_internal_catch (SCM_BOOL_T,
627 (scm_t_catch_body) scm_call_0, ptr,
628 scm_handle_by_message_noexit, NULL);
629 }
630
631 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
632
633 t->exited = 1;
634 close (t->sleep_pipe[0]);
635 close (t->sleep_pipe[1]);
636 while (scm_is_true (unblock_from_queue (t->join_queue)))
637 ;
638
639 while (!scm_is_null (t->mutexes))
640 {
641 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
642
643 if (!SCM_UNBNDP (mutex))
644 {
645 fat_mutex *m = SCM_MUTEX_DATA (mutex);
646
647 scm_i_pthread_mutex_lock (&m->lock);
648
649 /* Since MUTEX is in `t->mutexes', T must be its owner. */
650 assert (scm_is_eq (m->owner, t->handle));
651
652 unblock_from_queue (m->waiting);
653
654 scm_i_pthread_mutex_unlock (&m->lock);
655 }
656
657 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
658 }
659
660 scm_i_pthread_mutex_unlock (&t->admin_mutex);
661
662 return NULL;
663 }
664
665 static void *
666 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
667 {
668 /* Won't hurt if we are already registered. */
669 #if SCM_USE_PTHREAD_THREADS
670 GC_register_my_thread (sb);
671 #endif
672
673 return scm_with_guile (do_thread_exit, v);
674 }
675
676 static void
677 on_thread_exit (void *v)
678 {
679 /* This handler is executed in non-guile mode. */
680 scm_i_thread *t = (scm_i_thread *) v, **tp;
681
682 /* If this thread was cancelled while doing a cond wait, it will
683 still have a mutex locked, so we unlock it here. */
684 if (t->held_mutex)
685 {
686 scm_i_pthread_mutex_unlock (t->held_mutex);
687 t->held_mutex = NULL;
688 }
689
690 /* Reinstate the current thread for purposes of scm_with_guile
691 guile-mode cleanup handlers. Only really needed in the non-TLS
692 case but it doesn't hurt to be consistent. */
693 scm_i_pthread_setspecific (scm_i_thread_key, t);
694
695 /* Scheme-level thread finalizers and other cleanup needs to happen in
696 guile mode. */
697 GC_call_with_stack_base (do_thread_exit_trampoline, t);
698
699 /* Removing ourself from the list of all threads needs to happen in
700 non-guile mode since all SCM values on our stack become
701 unprotected once we are no longer in the list. */
702 scm_i_pthread_mutex_lock (&thread_admin_mutex);
703 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
704 if (*tp == t)
705 {
706 *tp = t->next_thread;
707
708 /* GC-robust */
709 t->next_thread = NULL;
710
711 break;
712 }
713 thread_count--;
714
715 /* If there's only one other thread, it could be the signal delivery
716 thread, so we need to notify it to shut down by closing its read pipe.
717 If it's not the signal delivery thread, then closing the read pipe isn't
718 going to hurt. */
719 if (thread_count <= 1)
720 scm_i_close_signal_pipe ();
721
722 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
723
724 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
725
726 #if SCM_USE_PTHREAD_THREADS
727 GC_unregister_my_thread ();
728 #endif
729 }
730
731 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
732
733 static void
734 init_thread_key (void)
735 {
736 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
737 }
738
739 /* Perform any initializations necessary to make the current thread
740 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
741 if necessary.
742
743 BASE is the stack base to use with GC.
744
745 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
746 which case the default dynamic state is used.
747
748 Returns zero when the thread was known to guile already; otherwise
749 return 1.
750
751 Note that it could be the case that the thread was known
752 to Guile, but not in guile mode (because we are within a
753 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
754 be sure. New threads are put into guile mode implicitly. */
755
756 static int
757 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
758 {
759 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
760
761 if (SCM_I_CURRENT_THREAD)
762 {
763 /* Thread is already known to Guile.
764 */
765 return 0;
766 }
767 else
768 {
769 /* This thread has not been guilified yet.
770 */
771
772 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
773 if (scm_initialized_p == 0)
774 {
775 /* First thread ever to enter Guile. Run the full
776 initialization.
777 */
778 scm_i_init_guile (base);
779
780 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
781 /* Allow other threads to come in later. */
782 GC_allow_register_threads ();
783 #endif
784
785 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
786 }
787 else
788 {
789 /* Guile is already initialized, but this thread enters it for
790 the first time. Only initialize this thread.
791 */
792 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
793
794 /* Register this thread with libgc. */
795 #if SCM_USE_PTHREAD_THREADS
796 GC_register_my_thread (base);
797 #endif
798
799 guilify_self_1 (base);
800 guilify_self_2 (parent);
801 }
802 return 1;
803 }
804 }
805
806 void
807 scm_init_guile ()
808 {
809 struct GC_stack_base stack_base;
810
811 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
812 scm_i_init_thread_for_guile (&stack_base,
813 scm_i_default_dynamic_state);
814 else
815 {
816 fprintf (stderr, "Failed to get stack base for current thread.\n");
817 exit (1);
818 }
819 }
820
821 SCM_UNUSED static void
822 scm_leave_guile_cleanup (void *x)
823 {
824 on_thread_exit (SCM_I_CURRENT_THREAD);
825 }
826
827 struct with_guile_args
828 {
829 GC_fn_type func;
830 void *data;
831 SCM parent;
832 };
833
834 static void *
835 with_guile_trampoline (void *data)
836 {
837 struct with_guile_args *args = data;
838
839 return scm_c_with_continuation_barrier (args->func, args->data);
840 }
841
842 static void *
843 with_guile_and_parent (struct GC_stack_base *base, void *data)
844 {
845 void *res;
846 int new_thread;
847 scm_i_thread *t;
848 struct with_guile_args *args = data;
849
850 new_thread = scm_i_init_thread_for_guile (base, args->parent);
851 t = SCM_I_CURRENT_THREAD;
852 if (new_thread)
853 {
854 /* We are in Guile mode. */
855 assert (t->guile_mode);
856
857 res = scm_c_with_continuation_barrier (args->func, args->data);
858
859 /* Leave Guile mode. */
860 t->guile_mode = 0;
861 }
862 else if (t->guile_mode)
863 {
864 /* Already in Guile mode. */
865 res = scm_c_with_continuation_barrier (args->func, args->data);
866 }
867 else
868 {
869 /* We are not in Guile mode, either because we are not within a
870 scm_with_guile, or because we are within a scm_without_guile.
871
872 This call to scm_with_guile() could happen from anywhere on the
873 stack, and in particular lower on the stack than when it was
874 when this thread was first guilified. Thus, `base' must be
875 updated. */
876 #if SCM_STACK_GROWS_UP
877 if (SCM_STACK_PTR (base->mem_base) < t->base)
878 t->base = SCM_STACK_PTR (base->mem_base);
879 #else
880 if (SCM_STACK_PTR (base->mem_base) > t->base)
881 t->base = SCM_STACK_PTR (base->mem_base);
882 #endif
883
884 t->guile_mode = 1;
885 res = with_gc_active (with_guile_trampoline, args);
886 t->guile_mode = 0;
887 }
888 return res;
889 }
890
891 static void *
892 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
893 {
894 struct with_guile_args args;
895
896 args.func = func;
897 args.data = data;
898 args.parent = parent;
899
900 return GC_call_with_stack_base (with_guile_and_parent, &args);
901 }
902
903 void *
904 scm_with_guile (void *(*func)(void *), void *data)
905 {
906 return scm_i_with_guile_and_parent (func, data,
907 scm_i_default_dynamic_state);
908 }
909
910 void *
911 scm_without_guile (void *(*func)(void *), void *data)
912 {
913 void *result;
914 scm_i_thread *t = SCM_I_CURRENT_THREAD;
915
916 if (t->guile_mode)
917 {
918 SCM_I_CURRENT_THREAD->guile_mode = 0;
919 result = with_gc_inactive (func, data);
920 SCM_I_CURRENT_THREAD->guile_mode = 1;
921 }
922 else
923 /* Otherwise we're not in guile mode, so nothing to do. */
924 result = func (data);
925
926 return result;
927 }
928
929 \f
930 /*** Thread creation */
931
932 typedef struct {
933 SCM parent;
934 SCM thunk;
935 SCM handler;
936 SCM thread;
937 scm_i_pthread_mutex_t mutex;
938 scm_i_pthread_cond_t cond;
939 } launch_data;
940
941 static void *
942 really_launch (void *d)
943 {
944 launch_data *data = (launch_data *)d;
945 SCM thunk = data->thunk, handler = data->handler;
946 scm_i_thread *t;
947
948 t = SCM_I_CURRENT_THREAD;
949
950 scm_i_scm_pthread_mutex_lock (&data->mutex);
951 data->thread = scm_current_thread ();
952 scm_i_pthread_cond_signal (&data->cond);
953 scm_i_pthread_mutex_unlock (&data->mutex);
954
955 if (SCM_UNBNDP (handler))
956 t->result = scm_call_0 (thunk);
957 else
958 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
959
960 return 0;
961 }
962
963 static void *
964 launch_thread (void *d)
965 {
966 launch_data *data = (launch_data *)d;
967 scm_i_pthread_detach (scm_i_pthread_self ());
968 scm_i_with_guile_and_parent (really_launch, d, data->parent);
969 return NULL;
970 }
971
972 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
973 (SCM thunk, SCM handler),
974 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
975 "returning a new thread object representing the thread. The procedure\n"
976 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
977 "\n"
978 "When @var{handler} is specified, then @var{thunk} is called from\n"
979 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
980 "handler. This catch is established inside the continuation barrier.\n"
981 "\n"
982 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
983 "the @emph{exit value} of the thread and the thread is terminated.")
984 #define FUNC_NAME s_scm_call_with_new_thread
985 {
986 launch_data data;
987 scm_i_pthread_t id;
988 int err;
989
990 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
991 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
992 handler, SCM_ARG2, FUNC_NAME);
993
994 data.parent = scm_current_dynamic_state ();
995 data.thunk = thunk;
996 data.handler = handler;
997 data.thread = SCM_BOOL_F;
998 scm_i_pthread_mutex_init (&data.mutex, NULL);
999 scm_i_pthread_cond_init (&data.cond, NULL);
1000
1001 scm_i_scm_pthread_mutex_lock (&data.mutex);
1002 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1003 if (err)
1004 {
1005 scm_i_pthread_mutex_unlock (&data.mutex);
1006 errno = err;
1007 scm_syserror (NULL);
1008 }
1009 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1010 scm_i_pthread_mutex_unlock (&data.mutex);
1011
1012 return data.thread;
1013 }
1014 #undef FUNC_NAME
1015
1016 typedef struct {
1017 SCM parent;
1018 scm_t_catch_body body;
1019 void *body_data;
1020 scm_t_catch_handler handler;
1021 void *handler_data;
1022 SCM thread;
1023 scm_i_pthread_mutex_t mutex;
1024 scm_i_pthread_cond_t cond;
1025 } spawn_data;
1026
1027 static void *
1028 really_spawn (void *d)
1029 {
1030 spawn_data *data = (spawn_data *)d;
1031 scm_t_catch_body body = data->body;
1032 void *body_data = data->body_data;
1033 scm_t_catch_handler handler = data->handler;
1034 void *handler_data = data->handler_data;
1035 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1036
1037 scm_i_scm_pthread_mutex_lock (&data->mutex);
1038 data->thread = scm_current_thread ();
1039 scm_i_pthread_cond_signal (&data->cond);
1040 scm_i_pthread_mutex_unlock (&data->mutex);
1041
1042 if (handler == NULL)
1043 t->result = body (body_data);
1044 else
1045 t->result = scm_internal_catch (SCM_BOOL_T,
1046 body, body_data,
1047 handler, handler_data);
1048
1049 return 0;
1050 }
1051
1052 static void *
1053 spawn_thread (void *d)
1054 {
1055 spawn_data *data = (spawn_data *)d;
1056 scm_i_pthread_detach (scm_i_pthread_self ());
1057 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1058 return NULL;
1059 }
1060
1061 SCM
1062 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1063 scm_t_catch_handler handler, void *handler_data)
1064 {
1065 spawn_data data;
1066 scm_i_pthread_t id;
1067 int err;
1068
1069 data.parent = scm_current_dynamic_state ();
1070 data.body = body;
1071 data.body_data = body_data;
1072 data.handler = handler;
1073 data.handler_data = handler_data;
1074 data.thread = SCM_BOOL_F;
1075 scm_i_pthread_mutex_init (&data.mutex, NULL);
1076 scm_i_pthread_cond_init (&data.cond, NULL);
1077
1078 scm_i_scm_pthread_mutex_lock (&data.mutex);
1079 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1080 if (err)
1081 {
1082 scm_i_pthread_mutex_unlock (&data.mutex);
1083 errno = err;
1084 scm_syserror (NULL);
1085 }
1086 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1087 scm_i_pthread_mutex_unlock (&data.mutex);
1088
1089 return data.thread;
1090 }
1091
1092 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1093 (),
1094 "Move the calling thread to the end of the scheduling queue.")
1095 #define FUNC_NAME s_scm_yield
1096 {
1097 return scm_from_bool (scm_i_sched_yield ());
1098 }
1099 #undef FUNC_NAME
1100
1101 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1102 (SCM thread),
1103 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1104 "cannot be the current thread, and if @var{thread} has already terminated or "
1105 "been signaled to terminate, this function is a no-op.")
1106 #define FUNC_NAME s_scm_cancel_thread
1107 {
1108 scm_i_thread *t = NULL;
1109
1110 SCM_VALIDATE_THREAD (1, thread);
1111 t = SCM_I_THREAD_DATA (thread);
1112 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1113 if (!t->canceled)
1114 {
1115 t->canceled = 1;
1116 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1117 scm_i_pthread_cancel (t->pthread);
1118 }
1119 else
1120 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1121
1122 return SCM_UNSPECIFIED;
1123 }
1124 #undef FUNC_NAME
1125
1126 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1127 (SCM thread, SCM proc),
1128 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1129 "This handler will be called when the thread exits.")
1130 #define FUNC_NAME s_scm_set_thread_cleanup_x
1131 {
1132 scm_i_thread *t;
1133
1134 SCM_VALIDATE_THREAD (1, thread);
1135 if (!scm_is_false (proc))
1136 SCM_VALIDATE_THUNK (2, proc);
1137
1138 t = SCM_I_THREAD_DATA (thread);
1139 scm_i_pthread_mutex_lock (&t->admin_mutex);
1140
1141 if (!(t->exited || t->canceled))
1142 t->cleanup_handler = proc;
1143
1144 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1145
1146 return SCM_UNSPECIFIED;
1147 }
1148 #undef FUNC_NAME
1149
1150 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1151 (SCM thread),
1152 "Return the cleanup handler installed for the thread @var{thread}.")
1153 #define FUNC_NAME s_scm_thread_cleanup
1154 {
1155 scm_i_thread *t;
1156 SCM ret;
1157
1158 SCM_VALIDATE_THREAD (1, thread);
1159
1160 t = SCM_I_THREAD_DATA (thread);
1161 scm_i_pthread_mutex_lock (&t->admin_mutex);
1162 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1163 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1164
1165 return ret;
1166 }
1167 #undef FUNC_NAME
1168
1169 SCM scm_join_thread (SCM thread)
1170 {
1171 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1172 }
1173
1174 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1175 (SCM thread, SCM timeout, SCM timeoutval),
1176 "Suspend execution of the calling thread until the target @var{thread} "
1177 "terminates, unless the target @var{thread} has already terminated. ")
1178 #define FUNC_NAME s_scm_join_thread_timed
1179 {
1180 scm_i_thread *t;
1181 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1182 SCM res = SCM_BOOL_F;
1183
1184 if (! (SCM_UNBNDP (timeoutval)))
1185 res = timeoutval;
1186
1187 SCM_VALIDATE_THREAD (1, thread);
1188 if (scm_is_eq (scm_current_thread (), thread))
1189 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1190
1191 t = SCM_I_THREAD_DATA (thread);
1192 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1193
1194 if (! SCM_UNBNDP (timeout))
1195 {
1196 to_timespec (timeout, &ctimeout);
1197 timeout_ptr = &ctimeout;
1198 }
1199
1200 if (t->exited)
1201 res = t->result;
1202 else
1203 {
1204 while (1)
1205 {
1206 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1207 timeout_ptr);
1208 if (err == 0)
1209 {
1210 if (t->exited)
1211 {
1212 res = t->result;
1213 break;
1214 }
1215 }
1216 else if (err == ETIMEDOUT)
1217 break;
1218
1219 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1220 SCM_TICK;
1221 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1222
1223 /* Check for exit again, since we just released and
1224 reacquired the admin mutex, before the next block_self
1225 call (which would block forever if t has already
1226 exited). */
1227 if (t->exited)
1228 {
1229 res = t->result;
1230 break;
1231 }
1232 }
1233 }
1234
1235 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1236
1237 return res;
1238 }
1239 #undef FUNC_NAME
1240
1241 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1242 (SCM obj),
1243 "Return @code{#t} if @var{obj} is a thread.")
1244 #define FUNC_NAME s_scm_thread_p
1245 {
1246 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1247 }
1248 #undef FUNC_NAME
1249
1250
1251 static size_t
1252 fat_mutex_free (SCM mx)
1253 {
1254 fat_mutex *m = SCM_MUTEX_DATA (mx);
1255 scm_i_pthread_mutex_destroy (&m->lock);
1256 return 0;
1257 }
1258
1259 static int
1260 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1261 {
1262 fat_mutex *m = SCM_MUTEX_DATA (mx);
1263 scm_puts ("#<mutex ", port);
1264 scm_uintprint ((scm_t_bits)m, 16, port);
1265 scm_puts (">", port);
1266 return 1;
1267 }
1268
1269 static SCM
1270 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1271 {
1272 fat_mutex *m;
1273 SCM mx;
1274
1275 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1276 scm_i_pthread_mutex_init (&m->lock, NULL);
1277 m->owner = SCM_BOOL_F;
1278 m->level = 0;
1279
1280 m->recursive = recursive;
1281 m->unchecked_unlock = unchecked_unlock;
1282 m->allow_external_unlock = external_unlock;
1283
1284 m->waiting = SCM_EOL;
1285 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1286 m->waiting = make_queue ();
1287 return mx;
1288 }
1289
1290 SCM scm_make_mutex (void)
1291 {
1292 return scm_make_mutex_with_flags (SCM_EOL);
1293 }
1294
1295 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1296 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1297 SCM_SYMBOL (recursive_sym, "recursive");
1298
1299 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1300 (SCM flags),
1301 "Create a new mutex. ")
1302 #define FUNC_NAME s_scm_make_mutex_with_flags
1303 {
1304 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1305
1306 SCM ptr = flags;
1307 while (! scm_is_null (ptr))
1308 {
1309 SCM flag = SCM_CAR (ptr);
1310 if (scm_is_eq (flag, unchecked_unlock_sym))
1311 unchecked_unlock = 1;
1312 else if (scm_is_eq (flag, allow_external_unlock_sym))
1313 external_unlock = 1;
1314 else if (scm_is_eq (flag, recursive_sym))
1315 recursive = 1;
1316 else
1317 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1318 ptr = SCM_CDR (ptr);
1319 }
1320 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1321 }
1322 #undef FUNC_NAME
1323
1324 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1325 (void),
1326 "Create a new recursive mutex. ")
1327 #define FUNC_NAME s_scm_make_recursive_mutex
1328 {
1329 return make_fat_mutex (1, 0, 0);
1330 }
1331 #undef FUNC_NAME
1332
1333 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1334
1335 static SCM
1336 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1337 {
1338 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1339
1340 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1341 SCM err = SCM_BOOL_F;
1342
1343 struct timeval current_time;
1344
1345 scm_i_scm_pthread_mutex_lock (&m->lock);
1346
1347 while (1)
1348 {
1349 if (m->level == 0)
1350 {
1351 m->owner = new_owner;
1352 m->level++;
1353
1354 if (SCM_I_IS_THREAD (new_owner))
1355 {
1356 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1357 scm_i_pthread_mutex_lock (&t->admin_mutex);
1358
1359 /* Only keep a weak reference to MUTEX so that it's not
1360 retained when not referenced elsewhere (bug #27450).
1361 The weak pair itself is eventually removed when MUTEX
1362 is unlocked. Note that `t->mutexes' lists mutexes
1363 currently held by T, so it should be small. */
1364 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1365
1366 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1367 }
1368 *ret = 1;
1369 break;
1370 }
1371 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1372 {
1373 m->owner = new_owner;
1374 err = scm_cons (scm_abandoned_mutex_error_key,
1375 scm_from_locale_string ("lock obtained on abandoned "
1376 "mutex"));
1377 *ret = 1;
1378 break;
1379 }
1380 else if (scm_is_eq (m->owner, new_owner))
1381 {
1382 if (m->recursive)
1383 {
1384 m->level++;
1385 *ret = 1;
1386 }
1387 else
1388 {
1389 err = scm_cons (scm_misc_error_key,
1390 scm_from_locale_string ("mutex already locked "
1391 "by thread"));
1392 *ret = 0;
1393 }
1394 break;
1395 }
1396 else
1397 {
1398 if (timeout != NULL)
1399 {
1400 gettimeofday (&current_time, NULL);
1401 if (current_time.tv_sec > timeout->tv_sec ||
1402 (current_time.tv_sec == timeout->tv_sec &&
1403 current_time.tv_usec * 1000 > timeout->tv_nsec))
1404 {
1405 *ret = 0;
1406 break;
1407 }
1408 }
1409 block_self (m->waiting, mutex, &m->lock, timeout);
1410 scm_i_pthread_mutex_unlock (&m->lock);
1411 SCM_TICK;
1412 scm_i_scm_pthread_mutex_lock (&m->lock);
1413 }
1414 }
1415 scm_i_pthread_mutex_unlock (&m->lock);
1416 return err;
1417 }
1418
1419 SCM scm_lock_mutex (SCM mx)
1420 {
1421 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1422 }
1423
1424 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1425 (SCM m, SCM timeout, SCM owner),
1426 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1427 "blocks until the mutex becomes available. The function returns when "
1428 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1429 "a thread already owns will succeed right away and will not block the "
1430 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1431 #define FUNC_NAME s_scm_lock_mutex_timed
1432 {
1433 SCM exception;
1434 int ret = 0;
1435 scm_t_timespec cwaittime, *waittime = NULL;
1436
1437 SCM_VALIDATE_MUTEX (1, m);
1438
1439 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1440 {
1441 to_timespec (timeout, &cwaittime);
1442 waittime = &cwaittime;
1443 }
1444
1445 exception = fat_mutex_lock (m, waittime, owner, &ret);
1446 if (!scm_is_false (exception))
1447 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1448 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1449 }
1450 #undef FUNC_NAME
1451
1452 void
1453 scm_dynwind_lock_mutex (SCM mutex)
1454 {
1455 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1456 SCM_F_WIND_EXPLICITLY);
1457 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1458 SCM_F_WIND_EXPLICITLY);
1459 }
1460
1461 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1462 (SCM mutex),
1463 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1464 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1465 #define FUNC_NAME s_scm_try_mutex
1466 {
1467 SCM exception;
1468 int ret = 0;
1469 scm_t_timespec cwaittime, *waittime = NULL;
1470
1471 SCM_VALIDATE_MUTEX (1, mutex);
1472
1473 to_timespec (scm_from_int(0), &cwaittime);
1474 waittime = &cwaittime;
1475
1476 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1477 if (!scm_is_false (exception))
1478 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1479 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1480 }
1481 #undef FUNC_NAME
1482
1483 /*** Fat condition variables */
1484
1485 typedef struct {
1486 scm_i_pthread_mutex_t lock;
1487 SCM waiting; /* the threads waiting for this condition. */
1488 } fat_cond;
1489
1490 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1491 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1492
1493 static int
1494 fat_mutex_unlock (SCM mutex, SCM cond,
1495 const scm_t_timespec *waittime, int relock)
1496 {
1497 SCM owner;
1498 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1499 fat_cond *c = NULL;
1500 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1501 int err = 0, ret = 0;
1502
1503 scm_i_scm_pthread_mutex_lock (&m->lock);
1504
1505 owner = m->owner;
1506
1507 if (!scm_is_eq (owner, t->handle))
1508 {
1509 if (m->level == 0)
1510 {
1511 if (!m->unchecked_unlock)
1512 {
1513 scm_i_pthread_mutex_unlock (&m->lock);
1514 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1515 }
1516 owner = t->handle;
1517 }
1518 else if (!m->allow_external_unlock)
1519 {
1520 scm_i_pthread_mutex_unlock (&m->lock);
1521 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1522 }
1523 }
1524
1525 if (! (SCM_UNBNDP (cond)))
1526 {
1527 c = SCM_CONDVAR_DATA (cond);
1528 while (1)
1529 {
1530 int brk = 0;
1531
1532 if (m->level > 0)
1533 m->level--;
1534 if (m->level == 0)
1535 {
1536 /* Change the owner of MUTEX. */
1537 t->mutexes = scm_delq_x (mutex, t->mutexes);
1538 m->owner = unblock_from_queue (m->waiting);
1539 }
1540
1541 t->block_asyncs++;
1542
1543 err = block_self (c->waiting, cond, &m->lock, waittime);
1544 scm_i_pthread_mutex_unlock (&m->lock);
1545
1546 if (err == 0)
1547 {
1548 ret = 1;
1549 brk = 1;
1550 }
1551 else if (err == ETIMEDOUT)
1552 {
1553 ret = 0;
1554 brk = 1;
1555 }
1556 else if (err != EINTR)
1557 {
1558 errno = err;
1559 scm_syserror (NULL);
1560 }
1561
1562 if (brk)
1563 {
1564 if (relock)
1565 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1566 t->block_asyncs--;
1567 break;
1568 }
1569
1570 t->block_asyncs--;
1571 scm_async_click ();
1572
1573 scm_remember_upto_here_2 (cond, mutex);
1574
1575 scm_i_scm_pthread_mutex_lock (&m->lock);
1576 }
1577 }
1578 else
1579 {
1580 if (m->level > 0)
1581 m->level--;
1582 if (m->level == 0)
1583 {
1584 /* Change the owner of MUTEX. */
1585 t->mutexes = scm_delq_x (mutex, t->mutexes);
1586 m->owner = unblock_from_queue (m->waiting);
1587 }
1588
1589 scm_i_pthread_mutex_unlock (&m->lock);
1590 ret = 1;
1591 }
1592
1593 return ret;
1594 }
1595
1596 SCM scm_unlock_mutex (SCM mx)
1597 {
1598 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1599 }
1600
1601 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1602 (SCM mx, SCM cond, SCM timeout),
1603 "Unlocks @var{mutex} if the calling thread owns the lock on "
1604 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1605 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1606 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1607 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1608 "with a call to @code{unlock-mutex}. Only the last call to "
1609 "@code{unlock-mutex} will actually unlock the mutex. ")
1610 #define FUNC_NAME s_scm_unlock_mutex_timed
1611 {
1612 scm_t_timespec cwaittime, *waittime = NULL;
1613
1614 SCM_VALIDATE_MUTEX (1, mx);
1615 if (! (SCM_UNBNDP (cond)))
1616 {
1617 SCM_VALIDATE_CONDVAR (2, cond);
1618
1619 if (! (SCM_UNBNDP (timeout)))
1620 {
1621 to_timespec (timeout, &cwaittime);
1622 waittime = &cwaittime;
1623 }
1624 }
1625
1626 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1627 }
1628 #undef FUNC_NAME
1629
1630 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1631 (SCM obj),
1632 "Return @code{#t} if @var{obj} is a mutex.")
1633 #define FUNC_NAME s_scm_mutex_p
1634 {
1635 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1636 }
1637 #undef FUNC_NAME
1638
1639 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1640 (SCM mx),
1641 "Return the thread owning @var{mx}, or @code{#f}.")
1642 #define FUNC_NAME s_scm_mutex_owner
1643 {
1644 SCM owner;
1645 fat_mutex *m = NULL;
1646
1647 SCM_VALIDATE_MUTEX (1, mx);
1648 m = SCM_MUTEX_DATA (mx);
1649 scm_i_pthread_mutex_lock (&m->lock);
1650 owner = m->owner;
1651 scm_i_pthread_mutex_unlock (&m->lock);
1652
1653 return owner;
1654 }
1655 #undef FUNC_NAME
1656
1657 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1658 (SCM mx),
1659 "Return the lock level of mutex @var{mx}.")
1660 #define FUNC_NAME s_scm_mutex_level
1661 {
1662 SCM_VALIDATE_MUTEX (1, mx);
1663 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1664 }
1665 #undef FUNC_NAME
1666
1667 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1668 (SCM mx),
1669 "Returns @code{#t} if the mutex @var{mx} is locked.")
1670 #define FUNC_NAME s_scm_mutex_locked_p
1671 {
1672 SCM_VALIDATE_MUTEX (1, mx);
1673 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1674 }
1675 #undef FUNC_NAME
1676
1677 static int
1678 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1679 {
1680 fat_cond *c = SCM_CONDVAR_DATA (cv);
1681 scm_puts ("#<condition-variable ", port);
1682 scm_uintprint ((scm_t_bits)c, 16, port);
1683 scm_puts (">", port);
1684 return 1;
1685 }
1686
1687 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1688 (void),
1689 "Make a new condition variable.")
1690 #define FUNC_NAME s_scm_make_condition_variable
1691 {
1692 fat_cond *c;
1693 SCM cv;
1694
1695 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1696 c->waiting = SCM_EOL;
1697 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1698 c->waiting = make_queue ();
1699 return cv;
1700 }
1701 #undef FUNC_NAME
1702
1703 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1704 (SCM cv, SCM mx, SCM t),
1705 "Wait until @var{cond-var} has been signalled. While waiting, "
1706 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1707 "is locked again when this function returns. When @var{time} is given, "
1708 "it specifies a point in time where the waiting should be aborted. It "
1709 "can be either a integer as returned by @code{current-time} or a pair "
1710 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1711 "mutex is locked and @code{#f} is returned. When the condition "
1712 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1713 "is returned. ")
1714 #define FUNC_NAME s_scm_timed_wait_condition_variable
1715 {
1716 scm_t_timespec waittime, *waitptr = NULL;
1717
1718 SCM_VALIDATE_CONDVAR (1, cv);
1719 SCM_VALIDATE_MUTEX (2, mx);
1720
1721 if (!SCM_UNBNDP (t))
1722 {
1723 to_timespec (t, &waittime);
1724 waitptr = &waittime;
1725 }
1726
1727 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1728 }
1729 #undef FUNC_NAME
1730
1731 static void
1732 fat_cond_signal (fat_cond *c)
1733 {
1734 unblock_from_queue (c->waiting);
1735 }
1736
1737 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1738 (SCM cv),
1739 "Wake up one thread that is waiting for @var{cv}")
1740 #define FUNC_NAME s_scm_signal_condition_variable
1741 {
1742 SCM_VALIDATE_CONDVAR (1, cv);
1743 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1744 return SCM_BOOL_T;
1745 }
1746 #undef FUNC_NAME
1747
1748 static void
1749 fat_cond_broadcast (fat_cond *c)
1750 {
1751 while (scm_is_true (unblock_from_queue (c->waiting)))
1752 ;
1753 }
1754
1755 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1756 (SCM cv),
1757 "Wake up all threads that are waiting for @var{cv}. ")
1758 #define FUNC_NAME s_scm_broadcast_condition_variable
1759 {
1760 SCM_VALIDATE_CONDVAR (1, cv);
1761 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1762 return SCM_BOOL_T;
1763 }
1764 #undef FUNC_NAME
1765
1766 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1767 (SCM obj),
1768 "Return @code{#t} if @var{obj} is a condition variable.")
1769 #define FUNC_NAME s_scm_condition_variable_p
1770 {
1771 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1772 }
1773 #undef FUNC_NAME
1774
1775
1776 \f
1777 /*** Select */
1778
1779 struct select_args
1780 {
1781 int nfds;
1782 SELECT_TYPE *read_fds;
1783 SELECT_TYPE *write_fds;
1784 SELECT_TYPE *except_fds;
1785 struct timeval *timeout;
1786
1787 int result;
1788 int errno_value;
1789 };
1790
1791 static void *
1792 do_std_select (void *args)
1793 {
1794 struct select_args *select_args;
1795
1796 select_args = (struct select_args *) args;
1797
1798 select_args->result =
1799 select (select_args->nfds,
1800 select_args->read_fds, select_args->write_fds,
1801 select_args->except_fds, select_args->timeout);
1802 select_args->errno_value = errno;
1803
1804 return NULL;
1805 }
1806
1807 int
1808 scm_std_select (int nfds,
1809 SELECT_TYPE *readfds,
1810 SELECT_TYPE *writefds,
1811 SELECT_TYPE *exceptfds,
1812 struct timeval *timeout)
1813 {
1814 fd_set my_readfds;
1815 int res, eno, wakeup_fd;
1816 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1817 struct select_args args;
1818
1819 if (readfds == NULL)
1820 {
1821 FD_ZERO (&my_readfds);
1822 readfds = &my_readfds;
1823 }
1824
1825 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1826 SCM_TICK;
1827
1828 wakeup_fd = t->sleep_pipe[0];
1829 FD_SET (wakeup_fd, readfds);
1830 if (wakeup_fd >= nfds)
1831 nfds = wakeup_fd+1;
1832
1833 args.nfds = nfds;
1834 args.read_fds = readfds;
1835 args.write_fds = writefds;
1836 args.except_fds = exceptfds;
1837 args.timeout = timeout;
1838
1839 /* Explicitly cooperate with the GC. */
1840 scm_without_guile (do_std_select, &args);
1841
1842 res = args.result;
1843 eno = args.errno_value;
1844
1845 t->sleep_fd = -1;
1846 scm_i_reset_sleep (t);
1847
1848 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1849 {
1850 char dummy;
1851 full_read (wakeup_fd, &dummy, 1);
1852
1853 FD_CLR (wakeup_fd, readfds);
1854 res -= 1;
1855 if (res == 0)
1856 {
1857 eno = EINTR;
1858 res = -1;
1859 }
1860 }
1861 errno = eno;
1862 return res;
1863 }
1864
1865 /* Convenience API for blocking while in guile mode. */
1866
1867 #if SCM_USE_PTHREAD_THREADS
1868
1869 /* It seems reasonable to not run procedures related to mutex and condition
1870 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1871 without it, and (ii) the only potential gain would be GC latency. See
1872 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1873 for a discussion of the pros and cons. */
1874
1875 int
1876 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1877 {
1878 int res = scm_i_pthread_mutex_lock (mutex);
1879 return res;
1880 }
1881
1882 static void
1883 do_unlock (void *data)
1884 {
1885 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1886 }
1887
1888 void
1889 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1890 {
1891 scm_i_scm_pthread_mutex_lock (mutex);
1892 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1893 }
1894
1895 int
1896 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1897 {
1898 int res;
1899 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1900
1901 t->held_mutex = mutex;
1902 res = scm_i_pthread_cond_wait (cond, mutex);
1903 t->held_mutex = NULL;
1904
1905 return res;
1906 }
1907
1908 int
1909 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1910 scm_i_pthread_mutex_t *mutex,
1911 const scm_t_timespec *wt)
1912 {
1913 int res;
1914 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1915
1916 t->held_mutex = mutex;
1917 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1918 t->held_mutex = NULL;
1919
1920 return res;
1921 }
1922
1923 #endif
1924
1925 unsigned long
1926 scm_std_usleep (unsigned long usecs)
1927 {
1928 struct timeval tv;
1929 tv.tv_usec = usecs % 1000000;
1930 tv.tv_sec = usecs / 1000000;
1931 scm_std_select (0, NULL, NULL, NULL, &tv);
1932 return tv.tv_sec * 1000000 + tv.tv_usec;
1933 }
1934
1935 unsigned int
1936 scm_std_sleep (unsigned int secs)
1937 {
1938 struct timeval tv;
1939 tv.tv_usec = 0;
1940 tv.tv_sec = secs;
1941 scm_std_select (0, NULL, NULL, NULL, &tv);
1942 return tv.tv_sec;
1943 }
1944
1945 /*** Misc */
1946
1947 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1948 (void),
1949 "Return the thread that called this function.")
1950 #define FUNC_NAME s_scm_current_thread
1951 {
1952 return SCM_I_CURRENT_THREAD->handle;
1953 }
1954 #undef FUNC_NAME
1955
1956 static SCM
1957 scm_c_make_list (size_t n, SCM fill)
1958 {
1959 SCM res = SCM_EOL;
1960 while (n-- > 0)
1961 res = scm_cons (fill, res);
1962 return res;
1963 }
1964
1965 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1966 (void),
1967 "Return a list of all threads.")
1968 #define FUNC_NAME s_scm_all_threads
1969 {
1970 /* We can not allocate while holding the thread_admin_mutex because
1971 of the way GC is done.
1972 */
1973 int n = thread_count;
1974 scm_i_thread *t;
1975 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1976
1977 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1978 l = &list;
1979 for (t = all_threads; t && n > 0; t = t->next_thread)
1980 {
1981 if (t != scm_i_signal_delivery_thread)
1982 {
1983 SCM_SETCAR (*l, t->handle);
1984 l = SCM_CDRLOC (*l);
1985 }
1986 n--;
1987 }
1988 *l = SCM_EOL;
1989 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1990 return list;
1991 }
1992 #undef FUNC_NAME
1993
1994 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1995 (SCM thread),
1996 "Return @code{#t} iff @var{thread} has exited.\n")
1997 #define FUNC_NAME s_scm_thread_exited_p
1998 {
1999 return scm_from_bool (scm_c_thread_exited_p (thread));
2000 }
2001 #undef FUNC_NAME
2002
2003 int
2004 scm_c_thread_exited_p (SCM thread)
2005 #define FUNC_NAME s_scm_thread_exited_p
2006 {
2007 scm_i_thread *t;
2008 SCM_VALIDATE_THREAD (1, thread);
2009 t = SCM_I_THREAD_DATA (thread);
2010 return t->exited;
2011 }
2012 #undef FUNC_NAME
2013
2014 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2015 (void),
2016 "Return the total number of processors of the machine, which\n"
2017 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2018 "thread execution unit, which can be either:\n\n"
2019 "@itemize\n"
2020 "@item an execution core in a (possibly multi-core) chip, in a\n"
2021 " (possibly multi- chip) module, in a single computer, or\n"
2022 "@item a thread execution unit inside a core in the case of\n"
2023 " @dfn{hyper-threaded} CPUs.\n"
2024 "@end itemize\n\n"
2025 "Which of the two definitions is used, is unspecified.\n")
2026 #define FUNC_NAME s_scm_total_processor_count
2027 {
2028 return scm_from_ulong (num_processors (NPROC_ALL));
2029 }
2030 #undef FUNC_NAME
2031
2032 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2033 (void),
2034 "Like @code{total-processor-count}, but return the number of\n"
2035 "processors available to the current process. See\n"
2036 "@code{setaffinity} and @code{getaffinity} for more\n"
2037 "information.\n")
2038 #define FUNC_NAME s_scm_current_processor_count
2039 {
2040 return scm_from_ulong (num_processors (NPROC_CURRENT));
2041 }
2042 #undef FUNC_NAME
2043
2044
2045 \f
2046
2047 static scm_i_pthread_cond_t wake_up_cond;
2048 static int threads_initialized_p = 0;
2049
2050
2051 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2052 */
2053 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2054
2055 static SCM dynwind_critical_section_mutex;
2056
2057 void
2058 scm_dynwind_critical_section (SCM mutex)
2059 {
2060 if (scm_is_false (mutex))
2061 mutex = dynwind_critical_section_mutex;
2062 scm_dynwind_lock_mutex (mutex);
2063 scm_dynwind_block_asyncs ();
2064 }
2065
2066 /*** Initialization */
2067
2068 scm_i_pthread_mutex_t scm_i_misc_mutex;
2069
2070 #if SCM_USE_PTHREAD_THREADS
2071 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2072 #endif
2073
2074 void
2075 scm_threads_prehistory (void *base)
2076 {
2077 #if SCM_USE_PTHREAD_THREADS
2078 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2079 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2080 PTHREAD_MUTEX_RECURSIVE);
2081 #endif
2082
2083 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2084 scm_i_pthread_mutexattr_recursive);
2085 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2086 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2087
2088 guilify_self_1 ((struct GC_stack_base *) base);
2089 }
2090
2091 scm_t_bits scm_tc16_thread;
2092 scm_t_bits scm_tc16_mutex;
2093 scm_t_bits scm_tc16_condvar;
2094
2095 void
2096 scm_init_threads ()
2097 {
2098 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2099 scm_set_smob_print (scm_tc16_thread, thread_print);
2100
2101 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2102 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2103 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2104
2105 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2106 sizeof (fat_cond));
2107 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2108
2109 scm_i_default_dynamic_state = SCM_BOOL_F;
2110 guilify_self_2 (SCM_BOOL_F);
2111 threads_initialized_p = 1;
2112
2113 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2114 }
2115
2116 void
2117 scm_init_threads_default_dynamic_state ()
2118 {
2119 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2120 scm_i_default_dynamic_state = state;
2121 }
2122
2123 void
2124 scm_init_thread_procs ()
2125 {
2126 #include "libguile/threads.x"
2127 }
2128
2129 \f
2130 /* IA64-specific things. */
2131
2132 #ifdef __ia64__
2133 # ifdef __hpux
2134 # include <sys/param.h>
2135 # include <sys/pstat.h>
2136 void *
2137 scm_ia64_register_backing_store_base (void)
2138 {
2139 struct pst_vm_status vm_status;
2140 int i = 0;
2141 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2142 if (vm_status.pst_type == PS_RSESTACK)
2143 return (void *) vm_status.pst_vaddr;
2144 abort ();
2145 }
2146 void *
2147 scm_ia64_ar_bsp (const void *ctx)
2148 {
2149 uint64_t bsp;
2150 __uc_get_ar_bsp (ctx, &bsp);
2151 return (void *) bsp;
2152 }
2153 # endif /* hpux */
2154 # ifdef linux
2155 # include <ucontext.h>
2156 void *
2157 scm_ia64_register_backing_store_base (void)
2158 {
2159 extern void *__libc_ia64_register_backing_store_base;
2160 return __libc_ia64_register_backing_store_base;
2161 }
2162 void *
2163 scm_ia64_ar_bsp (const void *opaque)
2164 {
2165 const ucontext_t *ctx = opaque;
2166 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2167 }
2168 # endif /* linux */
2169 #endif /* __ia64__ */
2170
2171
2172 /*
2173 Local Variables:
2174 c-file-style: "gnu"
2175 End:
2176 */