Rename "boehm-gc.h" to "bdw-gc.h"; add to the distribution.
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32 #include <assert.h>
33
34 #ifdef HAVE_STRING_H
35 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
36 #endif
37
38 #if HAVE_SYS_TIME_H
39 #include <sys/time.h>
40 #endif
41
42 #include "libguile/validate.h"
43 #include "libguile/root.h"
44 #include "libguile/eval.h"
45 #include "libguile/async.h"
46 #include "libguile/ports.h"
47 #include "libguile/threads.h"
48 #include "libguile/dynwind.h"
49 #include "libguile/iselect.h"
50 #include "libguile/fluids.h"
51 #include "libguile/continuations.h"
52 #include "libguile/gc.h"
53 #include "libguile/init.h"
54 #include "libguile/scmsigs.h"
55 #include "libguile/strings.h"
56
57 #ifdef __MINGW32__
58 #ifndef ETIMEDOUT
59 # define ETIMEDOUT WSAETIMEDOUT
60 #endif
61 # include <fcntl.h>
62 # include <process.h>
63 # define pipe(fd) _pipe (fd, 256, O_BINARY)
64 #endif /* __MINGW32__ */
65
66 #include <full-read.h>
67
68 \f
69 static void
70 to_timespec (SCM t, scm_t_timespec *waittime)
71 {
72 if (scm_is_pair (t))
73 {
74 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
75 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
76 }
77 else
78 {
79 double time = scm_to_double (t);
80 double sec = scm_c_truncate (time);
81
82 waittime->tv_sec = (long) sec;
83 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
84 }
85 }
86
87 /*** Queues */
88
89 /* Make an empty queue data structure.
90 */
91 static SCM
92 make_queue ()
93 {
94 return scm_cons (SCM_EOL, SCM_EOL);
95 }
96
97 /* Put T at the back of Q and return a handle that can be used with
98 remqueue to remove T from Q again.
99 */
100 static SCM
101 enqueue (SCM q, SCM t)
102 {
103 SCM c = scm_cons (t, SCM_EOL);
104 SCM_CRITICAL_SECTION_START;
105 if (scm_is_null (SCM_CDR (q)))
106 SCM_SETCDR (q, c);
107 else
108 SCM_SETCDR (SCM_CAR (q), c);
109 SCM_SETCAR (q, c);
110 SCM_CRITICAL_SECTION_END;
111 return c;
112 }
113
114 /* Remove the element that the handle C refers to from the queue Q. C
115 must have been returned from a call to enqueue. The return value
116 is zero when the element referred to by C has already been removed.
117 Otherwise, 1 is returned.
118 */
119 static int
120 remqueue (SCM q, SCM c)
121 {
122 SCM p, prev = q;
123 SCM_CRITICAL_SECTION_START;
124 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
125 {
126 if (scm_is_eq (p, c))
127 {
128 if (scm_is_eq (c, SCM_CAR (q)))
129 SCM_SETCAR (q, SCM_CDR (c));
130 SCM_SETCDR (prev, SCM_CDR (c));
131 SCM_CRITICAL_SECTION_END;
132 return 1;
133 }
134 prev = p;
135 }
136 SCM_CRITICAL_SECTION_END;
137 return 0;
138 }
139
140 /* Remove the front-most element from the queue Q and return it.
141 Return SCM_BOOL_F when Q is empty.
142 */
143 static SCM
144 dequeue (SCM q)
145 {
146 SCM c;
147 SCM_CRITICAL_SECTION_START;
148 c = SCM_CDR (q);
149 if (scm_is_null (c))
150 {
151 SCM_CRITICAL_SECTION_END;
152 return SCM_BOOL_F;
153 }
154 else
155 {
156 SCM_SETCDR (q, SCM_CDR (c));
157 if (scm_is_null (SCM_CDR (q)))
158 SCM_SETCAR (q, SCM_EOL);
159 SCM_CRITICAL_SECTION_END;
160 return SCM_CAR (c);
161 }
162 }
163
164 /*** Thread smob routines */
165
166
167 static int
168 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
169 {
170 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
171 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
172 the struct case, hence we go via a union, and extract according to the
173 size of pthread_t. */
174 union {
175 scm_i_pthread_t p;
176 unsigned short us;
177 unsigned int ui;
178 unsigned long ul;
179 scm_t_uintmax um;
180 } u;
181 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
182 scm_i_pthread_t p = t->pthread;
183 scm_t_uintmax id;
184 u.p = p;
185 if (sizeof (p) == sizeof (unsigned short))
186 id = u.us;
187 else if (sizeof (p) == sizeof (unsigned int))
188 id = u.ui;
189 else if (sizeof (p) == sizeof (unsigned long))
190 id = u.ul;
191 else
192 id = u.um;
193
194 scm_puts ("#<thread ", port);
195 scm_uintprint (id, 10, port);
196 scm_puts (" (", port);
197 scm_uintprint ((scm_t_bits)t, 16, port);
198 scm_puts (")>", port);
199 return 1;
200 }
201
202 static size_t
203 thread_free (SCM obj)
204 {
205 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
206 assert (t->exited);
207 scm_gc_free (t, sizeof (*t), "thread");
208 return 0;
209 }
210
211 /*** Blocking on queues. */
212
213 /* See also scm_i_queue_async_cell for how such a block is
214 interrputed.
215 */
216
217 /* Put the current thread on QUEUE and go to sleep, waiting for it to
218 be woken up by a call to 'unblock_from_queue', or to be
219 interrupted. Upon return of this function, the current thread is
220 no longer on QUEUE, even when the sleep has been interrupted.
221
222 The caller of block_self must hold MUTEX. It will be atomically
223 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
224
225 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
226 as MUTEX is needed.
227
228 When WAITTIME is not NULL, the sleep will be aborted at that time.
229
230 The return value of block_self is an errno value. It will be zero
231 when the sleep has been successfully completed by a call to
232 unblock_from_queue, EINTR when it has been interrupted by the
233 delivery of a system async, and ETIMEDOUT when the timeout has
234 expired.
235
236 The system asyncs themselves are not executed by block_self.
237 */
238 static int
239 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
240 const scm_t_timespec *waittime)
241 {
242 scm_i_thread *t = SCM_I_CURRENT_THREAD;
243 SCM q_handle;
244 int err;
245
246 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
247 err = EINTR;
248 else
249 {
250 t->block_asyncs++;
251 q_handle = enqueue (queue, t->handle);
252 if (waittime == NULL)
253 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
254 else
255 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
256
257 /* When we are still on QUEUE, we have been interrupted. We
258 report this only when no other error (such as a timeout) has
259 happened above.
260 */
261 if (remqueue (queue, q_handle) && err == 0)
262 err = EINTR;
263 t->block_asyncs--;
264 scm_i_reset_sleep (t);
265 }
266
267 return err;
268 }
269
270 /* Wake up the first thread on QUEUE, if any. The awoken thread is
271 returned, or #f if the queue was empty.
272 */
273 static SCM
274 unblock_from_queue (SCM queue)
275 {
276 SCM thread = dequeue (queue);
277 if (scm_is_true (thread))
278 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
279 return thread;
280 }
281
282 \f
283 /* Getting into and out of guile mode.
284 */
285
286 scm_i_pthread_key_t scm_i_thread_key;
287
288
289 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
290 static scm_i_thread *all_threads = NULL;
291 static int thread_count;
292
293 static SCM scm_i_default_dynamic_state;
294
295 /* Perform first stage of thread initialisation, in non-guile mode.
296 */
297 static void
298 guilify_self_1 (SCM_STACKITEM *base)
299 {
300 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
301
302 t->pthread = scm_i_pthread_self ();
303 t->handle = SCM_BOOL_F;
304 t->result = SCM_BOOL_F;
305 t->cleanup_handler = SCM_BOOL_F;
306 t->mutexes = SCM_EOL;
307 t->held_mutex = NULL;
308 t->join_queue = SCM_EOL;
309 t->dynamic_state = SCM_BOOL_F;
310 t->dynwinds = SCM_EOL;
311 t->active_asyncs = SCM_EOL;
312 t->block_asyncs = 1;
313 t->pending_asyncs = 1;
314 t->last_debug_frame = NULL;
315 t->base = base;
316 #ifdef __ia64__
317 /* Calculate and store off the base of this thread's register
318 backing store (RBS). Unfortunately our implementation(s) of
319 scm_ia64_register_backing_store_base are only reliable for the
320 main thread. For other threads, therefore, find out the current
321 top of the RBS, and use that as a maximum. */
322 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
323 {
324 ucontext_t ctx;
325 void *bsp;
326 getcontext (&ctx);
327 bsp = scm_ia64_ar_bsp (&ctx);
328 if (t->register_backing_store_base > bsp)
329 t->register_backing_store_base = bsp;
330 }
331 #endif
332 t->continuation_root = SCM_EOL;
333 t->continuation_base = base;
334 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
335 t->sleep_mutex = NULL;
336 t->sleep_object = SCM_BOOL_F;
337 t->sleep_fd = -1;
338
339 if (pipe (t->sleep_pipe) != 0)
340 /* FIXME: Error conditions during the initialization phase are handled
341 gracelessly since public functions such as `scm_init_guile ()'
342 currently have type `void'. */
343 abort ();
344
345 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
346 t->current_mark_stack_ptr = NULL;
347 t->current_mark_stack_limit = NULL;
348 t->canceled = 0;
349 t->exited = 0;
350 t->guile_mode = 0;
351
352 scm_i_pthread_setspecific (scm_i_thread_key, t);
353
354 scm_i_pthread_mutex_lock (&thread_admin_mutex);
355 t->next_thread = all_threads;
356 all_threads = t;
357 thread_count++;
358 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
359 }
360
361 /* Perform second stage of thread initialisation, in guile mode.
362 */
363 static void
364 guilify_self_2 (SCM parent)
365 {
366 scm_i_thread *t = SCM_I_CURRENT_THREAD;
367
368 t->guile_mode = 1;
369
370 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
371
372 t->continuation_root = scm_cons (t->handle, SCM_EOL);
373 t->continuation_base = t->base;
374 t->vm = SCM_BOOL_F;
375
376 if (scm_is_true (parent))
377 t->dynamic_state = scm_make_dynamic_state (parent);
378 else
379 t->dynamic_state = scm_i_make_initial_dynamic_state ();
380
381 t->join_queue = make_queue ();
382 t->block_asyncs = 0;
383 }
384
385 \f
386 /*** Fat mutexes */
387
388 /* We implement our own mutex type since we want them to be 'fair', we
389 want to do fancy things while waiting for them (like running
390 asyncs) and we might want to add things that are nice for
391 debugging.
392 */
393
394 typedef struct {
395 scm_i_pthread_mutex_t lock;
396 SCM owner;
397 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
398
399 int recursive; /* allow recursive locking? */
400 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
401 int allow_external_unlock; /* is it an error to unlock a mutex that is not
402 owned by the current thread? */
403
404 SCM waiting; /* the threads waiting for this mutex. */
405 } fat_mutex;
406
407 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
408 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
409
410 /* Perform thread tear-down, in guile mode.
411 */
412 static void *
413 do_thread_exit (void *v)
414 {
415 scm_i_thread *t = (scm_i_thread *) v;
416
417 if (!scm_is_false (t->cleanup_handler))
418 {
419 SCM ptr = t->cleanup_handler;
420
421 t->cleanup_handler = SCM_BOOL_F;
422 t->result = scm_internal_catch (SCM_BOOL_T,
423 (scm_t_catch_body) scm_call_0, ptr,
424 scm_handle_by_message_noexit, NULL);
425 }
426
427 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
428
429 t->exited = 1;
430 close (t->sleep_pipe[0]);
431 close (t->sleep_pipe[1]);
432 while (scm_is_true (unblock_from_queue (t->join_queue)))
433 ;
434
435 while (!scm_is_null (t->mutexes))
436 {
437 SCM mutex = SCM_CAR (t->mutexes);
438 fat_mutex *m = SCM_MUTEX_DATA (mutex);
439 scm_i_pthread_mutex_lock (&m->lock);
440
441 unblock_from_queue (m->waiting);
442
443 scm_i_pthread_mutex_unlock (&m->lock);
444 t->mutexes = SCM_CDR (t->mutexes);
445 }
446
447 scm_i_pthread_mutex_unlock (&t->admin_mutex);
448
449 return NULL;
450 }
451
452 static void
453 on_thread_exit (void *v)
454 {
455 /* This handler is executed in non-guile mode. */
456 scm_i_thread *t = (scm_i_thread *) v, **tp;
457
458 /* If this thread was cancelled while doing a cond wait, it will
459 still have a mutex locked, so we unlock it here. */
460 if (t->held_mutex)
461 {
462 scm_i_pthread_mutex_unlock (t->held_mutex);
463 t->held_mutex = NULL;
464 }
465
466 scm_i_pthread_setspecific (scm_i_thread_key, v);
467
468 /* Ensure the signal handling thread has been launched, because we might be
469 shutting it down. */
470 scm_i_ensure_signal_delivery_thread ();
471
472 /* Unblocking the joining threads needs to happen in guile mode
473 since the queue is a SCM data structure. */
474
475 /* Note: Since `do_thread_exit ()' uses allocates memory via `libgc', we
476 assume the GC is usable at this point, and notably that thread-local
477 storage (TLS) hasn't been deallocated yet. */
478 do_thread_exit (v);
479
480 /* Removing ourself from the list of all threads needs to happen in
481 non-guile mode since all SCM values on our stack become
482 unprotected once we are no longer in the list. */
483 scm_i_pthread_mutex_lock (&thread_admin_mutex);
484 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
485 if (*tp == t)
486 {
487 *tp = t->next_thread;
488 break;
489 }
490 thread_count--;
491
492 /* If there's only one other thread, it could be the signal delivery
493 thread, so we need to notify it to shut down by closing its read pipe.
494 If it's not the signal delivery thread, then closing the read pipe isn't
495 going to hurt. */
496 if (thread_count <= 1)
497 scm_i_close_signal_pipe ();
498
499 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
500
501 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
502 }
503
504 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
505
506 static void
507 init_thread_key (void)
508 {
509 scm_i_pthread_key_create (&scm_i_thread_key, NULL);
510 }
511
512 /* Perform any initializations necessary to bring the current thread
513 into guile mode, initializing Guile itself, if necessary.
514
515 BASE is the stack base to use with GC.
516
517 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
518 which case the default dynamic state is used.
519
520 Return zero when the thread was in guile mode already; otherwise
521 return 1.
522 */
523
524 static int
525 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
526 {
527 scm_i_thread *t;
528
529 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
530
531 if ((t = SCM_I_CURRENT_THREAD) == NULL)
532 {
533 /* This thread has not been guilified yet.
534 */
535
536 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
537 if (scm_initialized_p == 0)
538 {
539 /* First thread ever to enter Guile. Run the full
540 initialization.
541 */
542 scm_i_init_guile (base);
543 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
544 }
545 else
546 {
547 /* Guile is already initialized, but this thread enters it for
548 the first time. Only initialize this thread.
549 */
550 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
551 guilify_self_1 (base);
552 guilify_self_2 (parent);
553 }
554 return 1;
555 }
556 else if (t->top)
557 {
558 /* This thread is already guilified but not in guile mode, just
559 resume it.
560
561 A user call to scm_with_guile() will lead us to here. This could
562 happen from anywhere on the stack, and in particular lower on the
563 stack than when it was when this thread was first guilified. Thus,
564 `base' must be updated. */
565 #if SCM_STACK_GROWS_UP
566 if (base < t->base)
567 t->base = base;
568 #else
569 if (base > t->base)
570 t->base = base;
571 #endif
572
573 t->top = NULL;
574 return 1;
575 }
576 else
577 {
578 /* Thread is already in guile mode. Nothing to do.
579 */
580 return 0;
581 }
582 }
583
584 #if SCM_USE_PTHREAD_THREADS
585
586 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
587 /* This method for GNU/Linux and perhaps some other systems.
588 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
589 available on them. */
590 #define HAVE_GET_THREAD_STACK_BASE
591
592 static SCM_STACKITEM *
593 get_thread_stack_base ()
594 {
595 pthread_attr_t attr;
596 void *start, *end;
597 size_t size;
598
599 pthread_getattr_np (pthread_self (), &attr);
600 pthread_attr_getstack (&attr, &start, &size);
601 end = (char *)start + size;
602
603 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
604 for the main thread, but we can use scm_get_stack_base in that
605 case.
606 */
607
608 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
609 if ((void *)&attr < start || (void *)&attr >= end)
610 return (SCM_STACKITEM *) GC_stackbottom;
611 else
612 #endif
613 {
614 #if SCM_STACK_GROWS_UP
615 return start;
616 #else
617 return end;
618 #endif
619 }
620 }
621
622 #elif HAVE_PTHREAD_GET_STACKADDR_NP
623 /* This method for MacOS X.
624 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
625 but as of 2006 there's nothing obvious at apple.com. */
626 #define HAVE_GET_THREAD_STACK_BASE
627 static SCM_STACKITEM *
628 get_thread_stack_base ()
629 {
630 return pthread_get_stackaddr_np (pthread_self ());
631 }
632
633 #elif defined (__MINGW32__)
634 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
635 in any thread. We don't like hard-coding the name of a system, but there
636 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
637 work. */
638 #define HAVE_GET_THREAD_STACK_BASE
639 static SCM_STACKITEM *
640 get_thread_stack_base ()
641 {
642 return (SCM_STACKITEM *) GC_stackbottom;
643 }
644
645 #endif /* pthread methods of get_thread_stack_base */
646
647 #else /* !SCM_USE_PTHREAD_THREADS */
648
649 #define HAVE_GET_THREAD_STACK_BASE
650
651 static SCM_STACKITEM *
652 get_thread_stack_base ()
653 {
654 return (SCM_STACKITEM *) GC_stackbottom;
655 }
656
657 #endif /* !SCM_USE_PTHREAD_THREADS */
658
659 #ifdef HAVE_GET_THREAD_STACK_BASE
660
661 void
662 scm_init_guile ()
663 {
664 scm_i_init_thread_for_guile (get_thread_stack_base (),
665 scm_i_default_dynamic_state);
666 }
667
668 #endif
669
670 void *
671 scm_with_guile (void *(*func)(void *), void *data)
672 {
673 return scm_i_with_guile_and_parent (func, data,
674 scm_i_default_dynamic_state);
675 }
676
677 SCM_UNUSED static void
678 scm_leave_guile_cleanup (void *x)
679 {
680 on_thread_exit (SCM_I_CURRENT_THREAD);
681 }
682
683 void *
684 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
685 {
686 void *res;
687 int really_entered;
688 SCM_STACKITEM base_item;
689
690 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
691 if (really_entered)
692 {
693 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
694 res = scm_c_with_continuation_barrier (func, data);
695 scm_i_pthread_cleanup_pop (0);
696 }
697 else
698 res = scm_c_with_continuation_barrier (func, data);
699
700 return res;
701 }
702
703 \f
704 /*** Non-guile mode. */
705
706 #if (defined HAVE_GC_DO_BLOCKING) && (!defined HAVE_DECL_GC_DO_BLOCKING)
707
708 /* This declaration is missing from the public headers of GC 7.1. */
709 extern void GC_do_blocking (void (*) (void *), void *);
710
711 #endif
712
713 #ifdef HAVE_GC_DO_BLOCKING
714 struct without_guile_arg
715 {
716 void * (*function) (void *);
717 void *data;
718 void *result;
719 };
720
721 static void
722 without_guile_trampoline (void *closure)
723 {
724 struct without_guile_arg *arg;
725
726 SCM_I_CURRENT_THREAD->guile_mode = 0;
727
728 arg = (struct without_guile_arg *) closure;
729 arg->result = arg->function (arg->data);
730
731 SCM_I_CURRENT_THREAD->guile_mode = 1;
732 }
733 #endif
734
735 void *
736 scm_without_guile (void *(*func)(void *), void *data)
737 {
738 void *result;
739
740 #ifdef HAVE_GC_DO_BLOCKING
741 if (SCM_I_CURRENT_THREAD->guile_mode)
742 {
743 struct without_guile_arg arg;
744
745 arg.function = func;
746 arg.data = data;
747 GC_do_blocking (without_guile_trampoline, &arg);
748 result = arg.result;
749 }
750 else
751 #endif
752 result = func (data);
753
754 return result;
755 }
756
757 \f
758 /*** Thread creation */
759
760 typedef struct {
761 SCM parent;
762 SCM thunk;
763 SCM handler;
764 SCM thread;
765 scm_i_pthread_mutex_t mutex;
766 scm_i_pthread_cond_t cond;
767 } launch_data;
768
769 static void *
770 really_launch (void *d)
771 {
772 launch_data *data = (launch_data *)d;
773 SCM thunk = data->thunk, handler = data->handler;
774 scm_i_thread *t;
775
776 t = SCM_I_CURRENT_THREAD;
777
778 scm_i_scm_pthread_mutex_lock (&data->mutex);
779 data->thread = scm_current_thread ();
780 scm_i_pthread_cond_signal (&data->cond);
781 scm_i_pthread_mutex_unlock (&data->mutex);
782
783 if (SCM_UNBNDP (handler))
784 t->result = scm_call_0 (thunk);
785 else
786 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
787
788 /* Trigger a call to `on_thread_exit ()'. */
789 pthread_exit (NULL);
790
791 return 0;
792 }
793
794 static void *
795 launch_thread (void *d)
796 {
797 launch_data *data = (launch_data *)d;
798 scm_i_pthread_detach (scm_i_pthread_self ());
799 scm_i_with_guile_and_parent (really_launch, d, data->parent);
800 return NULL;
801 }
802
803 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
804 (SCM thunk, SCM handler),
805 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
806 "returning a new thread object representing the thread. The procedure\n"
807 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
808 "\n"
809 "When @var{handler} is specified, then @var{thunk} is called from\n"
810 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
811 "handler. This catch is established inside the continuation barrier.\n"
812 "\n"
813 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
814 "the @emph{exit value} of the thread and the thread is terminated.")
815 #define FUNC_NAME s_scm_call_with_new_thread
816 {
817 launch_data data;
818 scm_i_pthread_t id;
819 int err;
820
821 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
822 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
823 handler, SCM_ARG2, FUNC_NAME);
824
825 data.parent = scm_current_dynamic_state ();
826 data.thunk = thunk;
827 data.handler = handler;
828 data.thread = SCM_BOOL_F;
829 scm_i_pthread_mutex_init (&data.mutex, NULL);
830 scm_i_pthread_cond_init (&data.cond, NULL);
831
832 scm_i_scm_pthread_mutex_lock (&data.mutex);
833 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
834 if (err)
835 {
836 scm_i_pthread_mutex_unlock (&data.mutex);
837 errno = err;
838 scm_syserror (NULL);
839 }
840 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
841 scm_i_pthread_mutex_unlock (&data.mutex);
842
843 return data.thread;
844 }
845 #undef FUNC_NAME
846
847 typedef struct {
848 SCM parent;
849 scm_t_catch_body body;
850 void *body_data;
851 scm_t_catch_handler handler;
852 void *handler_data;
853 SCM thread;
854 scm_i_pthread_mutex_t mutex;
855 scm_i_pthread_cond_t cond;
856 } spawn_data;
857
858 static void *
859 really_spawn (void *d)
860 {
861 spawn_data *data = (spawn_data *)d;
862 scm_t_catch_body body = data->body;
863 void *body_data = data->body_data;
864 scm_t_catch_handler handler = data->handler;
865 void *handler_data = data->handler_data;
866 scm_i_thread *t = SCM_I_CURRENT_THREAD;
867
868 scm_i_scm_pthread_mutex_lock (&data->mutex);
869 data->thread = scm_current_thread ();
870 scm_i_pthread_cond_signal (&data->cond);
871 scm_i_pthread_mutex_unlock (&data->mutex);
872
873 if (handler == NULL)
874 t->result = body (body_data);
875 else
876 t->result = scm_internal_catch (SCM_BOOL_T,
877 body, body_data,
878 handler, handler_data);
879
880 return 0;
881 }
882
883 static void *
884 spawn_thread (void *d)
885 {
886 spawn_data *data = (spawn_data *)d;
887 scm_i_pthread_detach (scm_i_pthread_self ());
888 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
889 return NULL;
890 }
891
892 SCM
893 scm_spawn_thread (scm_t_catch_body body, void *body_data,
894 scm_t_catch_handler handler, void *handler_data)
895 {
896 spawn_data data;
897 scm_i_pthread_t id;
898 int err;
899
900 data.parent = scm_current_dynamic_state ();
901 data.body = body;
902 data.body_data = body_data;
903 data.handler = handler;
904 data.handler_data = handler_data;
905 data.thread = SCM_BOOL_F;
906 scm_i_pthread_mutex_init (&data.mutex, NULL);
907 scm_i_pthread_cond_init (&data.cond, NULL);
908
909 scm_i_scm_pthread_mutex_lock (&data.mutex);
910 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
911 if (err)
912 {
913 scm_i_pthread_mutex_unlock (&data.mutex);
914 errno = err;
915 scm_syserror (NULL);
916 }
917 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
918 scm_i_pthread_mutex_unlock (&data.mutex);
919
920 return data.thread;
921 }
922
923 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
924 (),
925 "Move the calling thread to the end of the scheduling queue.")
926 #define FUNC_NAME s_scm_yield
927 {
928 return scm_from_bool (scm_i_sched_yield ());
929 }
930 #undef FUNC_NAME
931
932 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
933 (SCM thread),
934 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
935 "cannot be the current thread, and if @var{thread} has already terminated or "
936 "been signaled to terminate, this function is a no-op.")
937 #define FUNC_NAME s_scm_cancel_thread
938 {
939 scm_i_thread *t = NULL;
940
941 SCM_VALIDATE_THREAD (1, thread);
942 t = SCM_I_THREAD_DATA (thread);
943 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
944 if (!t->canceled)
945 {
946 t->canceled = 1;
947 scm_i_pthread_mutex_unlock (&t->admin_mutex);
948 scm_i_pthread_cancel (t->pthread);
949 }
950 else
951 scm_i_pthread_mutex_unlock (&t->admin_mutex);
952
953 return SCM_UNSPECIFIED;
954 }
955 #undef FUNC_NAME
956
957 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
958 (SCM thread, SCM proc),
959 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
960 "This handler will be called when the thread exits.")
961 #define FUNC_NAME s_scm_set_thread_cleanup_x
962 {
963 scm_i_thread *t;
964
965 SCM_VALIDATE_THREAD (1, thread);
966 if (!scm_is_false (proc))
967 SCM_VALIDATE_THUNK (2, proc);
968
969 t = SCM_I_THREAD_DATA (thread);
970 scm_i_pthread_mutex_lock (&t->admin_mutex);
971
972 if (!(t->exited || t->canceled))
973 t->cleanup_handler = proc;
974
975 scm_i_pthread_mutex_unlock (&t->admin_mutex);
976
977 return SCM_UNSPECIFIED;
978 }
979 #undef FUNC_NAME
980
981 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
982 (SCM thread),
983 "Return the cleanup handler installed for the thread @var{thread}.")
984 #define FUNC_NAME s_scm_thread_cleanup
985 {
986 scm_i_thread *t;
987 SCM ret;
988
989 SCM_VALIDATE_THREAD (1, thread);
990
991 t = SCM_I_THREAD_DATA (thread);
992 scm_i_pthread_mutex_lock (&t->admin_mutex);
993 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
994 scm_i_pthread_mutex_unlock (&t->admin_mutex);
995
996 return ret;
997 }
998 #undef FUNC_NAME
999
1000 SCM scm_join_thread (SCM thread)
1001 {
1002 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1003 }
1004
1005 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1006 (SCM thread, SCM timeout, SCM timeoutval),
1007 "Suspend execution of the calling thread until the target @var{thread} "
1008 "terminates, unless the target @var{thread} has already terminated. ")
1009 #define FUNC_NAME s_scm_join_thread_timed
1010 {
1011 scm_i_thread *t;
1012 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1013 SCM res = SCM_BOOL_F;
1014
1015 if (! (SCM_UNBNDP (timeoutval)))
1016 res = timeoutval;
1017
1018 SCM_VALIDATE_THREAD (1, thread);
1019 if (scm_is_eq (scm_current_thread (), thread))
1020 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1021
1022 t = SCM_I_THREAD_DATA (thread);
1023 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1024
1025 if (! SCM_UNBNDP (timeout))
1026 {
1027 to_timespec (timeout, &ctimeout);
1028 timeout_ptr = &ctimeout;
1029 }
1030
1031 if (t->exited)
1032 res = t->result;
1033 else
1034 {
1035 while (1)
1036 {
1037 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1038 timeout_ptr);
1039 if (err == 0)
1040 {
1041 if (t->exited)
1042 {
1043 res = t->result;
1044 break;
1045 }
1046 }
1047 else if (err == ETIMEDOUT)
1048 break;
1049
1050 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1051 SCM_TICK;
1052 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1053
1054 /* Check for exit again, since we just released and
1055 reacquired the admin mutex, before the next block_self
1056 call (which would block forever if t has already
1057 exited). */
1058 if (t->exited)
1059 {
1060 res = t->result;
1061 break;
1062 }
1063 }
1064 }
1065
1066 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1067
1068 return res;
1069 }
1070 #undef FUNC_NAME
1071
1072 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1073 (SCM obj),
1074 "Return @code{#t} if @var{obj} is a thread.")
1075 #define FUNC_NAME s_scm_thread_p
1076 {
1077 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1078 }
1079 #undef FUNC_NAME
1080
1081
1082 static size_t
1083 fat_mutex_free (SCM mx)
1084 {
1085 fat_mutex *m = SCM_MUTEX_DATA (mx);
1086 scm_i_pthread_mutex_destroy (&m->lock);
1087 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1088 return 0;
1089 }
1090
1091 static int
1092 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1093 {
1094 fat_mutex *m = SCM_MUTEX_DATA (mx);
1095 scm_puts ("#<mutex ", port);
1096 scm_uintprint ((scm_t_bits)m, 16, port);
1097 scm_puts (">", port);
1098 return 1;
1099 }
1100
1101 static SCM
1102 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1103 {
1104 fat_mutex *m;
1105 SCM mx;
1106
1107 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1108 scm_i_pthread_mutex_init (&m->lock, NULL);
1109 m->owner = SCM_BOOL_F;
1110 m->level = 0;
1111
1112 m->recursive = recursive;
1113 m->unchecked_unlock = unchecked_unlock;
1114 m->allow_external_unlock = external_unlock;
1115
1116 m->waiting = SCM_EOL;
1117 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1118 m->waiting = make_queue ();
1119 return mx;
1120 }
1121
1122 SCM scm_make_mutex (void)
1123 {
1124 return scm_make_mutex_with_flags (SCM_EOL);
1125 }
1126
1127 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1128 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1129 SCM_SYMBOL (recursive_sym, "recursive");
1130
1131 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1132 (SCM flags),
1133 "Create a new mutex. ")
1134 #define FUNC_NAME s_scm_make_mutex_with_flags
1135 {
1136 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1137
1138 SCM ptr = flags;
1139 while (! scm_is_null (ptr))
1140 {
1141 SCM flag = SCM_CAR (ptr);
1142 if (scm_is_eq (flag, unchecked_unlock_sym))
1143 unchecked_unlock = 1;
1144 else if (scm_is_eq (flag, allow_external_unlock_sym))
1145 external_unlock = 1;
1146 else if (scm_is_eq (flag, recursive_sym))
1147 recursive = 1;
1148 else
1149 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1150 ptr = SCM_CDR (ptr);
1151 }
1152 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1153 }
1154 #undef FUNC_NAME
1155
1156 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1157 (void),
1158 "Create a new recursive mutex. ")
1159 #define FUNC_NAME s_scm_make_recursive_mutex
1160 {
1161 return make_fat_mutex (1, 0, 0);
1162 }
1163 #undef FUNC_NAME
1164
1165 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1166
1167 static SCM
1168 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1169 {
1170 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1171
1172 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1173 SCM err = SCM_BOOL_F;
1174
1175 struct timeval current_time;
1176
1177 scm_i_scm_pthread_mutex_lock (&m->lock);
1178
1179 while (1)
1180 {
1181 if (m->level == 0)
1182 {
1183 m->owner = new_owner;
1184 m->level++;
1185
1186 if (SCM_I_IS_THREAD (new_owner))
1187 {
1188 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1189 scm_i_pthread_mutex_lock (&t->admin_mutex);
1190 t->mutexes = scm_cons (mutex, t->mutexes);
1191 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1192 }
1193 *ret = 1;
1194 break;
1195 }
1196 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1197 {
1198 m->owner = new_owner;
1199 err = scm_cons (scm_abandoned_mutex_error_key,
1200 scm_from_locale_string ("lock obtained on abandoned "
1201 "mutex"));
1202 *ret = 1;
1203 break;
1204 }
1205 else if (scm_is_eq (m->owner, new_owner))
1206 {
1207 if (m->recursive)
1208 {
1209 m->level++;
1210 *ret = 1;
1211 }
1212 else
1213 {
1214 err = scm_cons (scm_misc_error_key,
1215 scm_from_locale_string ("mutex already locked "
1216 "by thread"));
1217 *ret = 0;
1218 }
1219 break;
1220 }
1221 else
1222 {
1223 if (timeout != NULL)
1224 {
1225 gettimeofday (&current_time, NULL);
1226 if (current_time.tv_sec > timeout->tv_sec ||
1227 (current_time.tv_sec == timeout->tv_sec &&
1228 current_time.tv_usec * 1000 > timeout->tv_nsec))
1229 {
1230 *ret = 0;
1231 break;
1232 }
1233 }
1234 block_self (m->waiting, mutex, &m->lock, timeout);
1235 scm_i_pthread_mutex_unlock (&m->lock);
1236 SCM_TICK;
1237 scm_i_scm_pthread_mutex_lock (&m->lock);
1238 }
1239 }
1240 scm_i_pthread_mutex_unlock (&m->lock);
1241 return err;
1242 }
1243
1244 SCM scm_lock_mutex (SCM mx)
1245 {
1246 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1247 }
1248
1249 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1250 (SCM m, SCM timeout, SCM owner),
1251 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1252 "blocks until the mutex becomes available. The function returns when "
1253 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1254 "a thread already owns will succeed right away and will not block the "
1255 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1256 #define FUNC_NAME s_scm_lock_mutex_timed
1257 {
1258 SCM exception;
1259 int ret = 0;
1260 scm_t_timespec cwaittime, *waittime = NULL;
1261
1262 SCM_VALIDATE_MUTEX (1, m);
1263
1264 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1265 {
1266 to_timespec (timeout, &cwaittime);
1267 waittime = &cwaittime;
1268 }
1269
1270 exception = fat_mutex_lock (m, waittime, owner, &ret);
1271 if (!scm_is_false (exception))
1272 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1273 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1274 }
1275 #undef FUNC_NAME
1276
1277 void
1278 scm_dynwind_lock_mutex (SCM mutex)
1279 {
1280 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1281 SCM_F_WIND_EXPLICITLY);
1282 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1283 SCM_F_WIND_EXPLICITLY);
1284 }
1285
1286 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1287 (SCM mutex),
1288 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1289 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1290 #define FUNC_NAME s_scm_try_mutex
1291 {
1292 SCM exception;
1293 int ret = 0;
1294 scm_t_timespec cwaittime, *waittime = NULL;
1295
1296 SCM_VALIDATE_MUTEX (1, mutex);
1297
1298 to_timespec (scm_from_int(0), &cwaittime);
1299 waittime = &cwaittime;
1300
1301 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1302 if (!scm_is_false (exception))
1303 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1304 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1305 }
1306 #undef FUNC_NAME
1307
1308 /*** Fat condition variables */
1309
1310 typedef struct {
1311 scm_i_pthread_mutex_t lock;
1312 SCM waiting; /* the threads waiting for this condition. */
1313 } fat_cond;
1314
1315 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1316 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1317
1318 static int
1319 fat_mutex_unlock (SCM mutex, SCM cond,
1320 const scm_t_timespec *waittime, int relock)
1321 {
1322 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1323 fat_cond *c = NULL;
1324 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1325 int err = 0, ret = 0;
1326
1327 scm_i_scm_pthread_mutex_lock (&m->lock);
1328
1329 SCM owner = m->owner;
1330
1331 if (!scm_is_eq (owner, scm_current_thread ()))
1332 {
1333 if (m->level == 0)
1334 {
1335 if (!m->unchecked_unlock)
1336 {
1337 scm_i_pthread_mutex_unlock (&m->lock);
1338 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1339 }
1340 owner = scm_current_thread ();
1341 }
1342 else if (!m->allow_external_unlock)
1343 {
1344 scm_i_pthread_mutex_unlock (&m->lock);
1345 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1346 }
1347 }
1348
1349 if (! (SCM_UNBNDP (cond)))
1350 {
1351 c = SCM_CONDVAR_DATA (cond);
1352 while (1)
1353 {
1354 int brk = 0;
1355
1356 if (m->level > 0)
1357 m->level--;
1358 if (m->level == 0)
1359 m->owner = unblock_from_queue (m->waiting);
1360
1361 t->block_asyncs++;
1362
1363 err = block_self (c->waiting, cond, &m->lock, waittime);
1364 scm_i_pthread_mutex_unlock (&m->lock);
1365
1366 if (err == 0)
1367 {
1368 ret = 1;
1369 brk = 1;
1370 }
1371 else if (err == ETIMEDOUT)
1372 {
1373 ret = 0;
1374 brk = 1;
1375 }
1376 else if (err != EINTR)
1377 {
1378 errno = err;
1379 scm_syserror (NULL);
1380 }
1381
1382 if (brk)
1383 {
1384 if (relock)
1385 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1386 t->block_asyncs--;
1387 break;
1388 }
1389
1390 t->block_asyncs--;
1391 scm_async_click ();
1392
1393 scm_remember_upto_here_2 (cond, mutex);
1394
1395 scm_i_scm_pthread_mutex_lock (&m->lock);
1396 }
1397 }
1398 else
1399 {
1400 if (m->level > 0)
1401 m->level--;
1402 if (m->level == 0)
1403 m->owner = unblock_from_queue (m->waiting);
1404
1405 scm_i_pthread_mutex_unlock (&m->lock);
1406 ret = 1;
1407 }
1408
1409 return ret;
1410 }
1411
1412 SCM scm_unlock_mutex (SCM mx)
1413 {
1414 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1415 }
1416
1417 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1418 (SCM mx, SCM cond, SCM timeout),
1419 "Unlocks @var{mutex} if the calling thread owns the lock on "
1420 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1421 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1422 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1423 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1424 "with a call to @code{unlock-mutex}. Only the last call to "
1425 "@code{unlock-mutex} will actually unlock the mutex. ")
1426 #define FUNC_NAME s_scm_unlock_mutex_timed
1427 {
1428 scm_t_timespec cwaittime, *waittime = NULL;
1429
1430 SCM_VALIDATE_MUTEX (1, mx);
1431 if (! (SCM_UNBNDP (cond)))
1432 {
1433 SCM_VALIDATE_CONDVAR (2, cond);
1434
1435 if (! (SCM_UNBNDP (timeout)))
1436 {
1437 to_timespec (timeout, &cwaittime);
1438 waittime = &cwaittime;
1439 }
1440 }
1441
1442 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1443 }
1444 #undef FUNC_NAME
1445
1446 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1447 (SCM obj),
1448 "Return @code{#t} if @var{obj} is a mutex.")
1449 #define FUNC_NAME s_scm_mutex_p
1450 {
1451 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1452 }
1453 #undef FUNC_NAME
1454
1455 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1456 (SCM mx),
1457 "Return the thread owning @var{mx}, or @code{#f}.")
1458 #define FUNC_NAME s_scm_mutex_owner
1459 {
1460 SCM owner;
1461 fat_mutex *m = NULL;
1462
1463 SCM_VALIDATE_MUTEX (1, mx);
1464 m = SCM_MUTEX_DATA (mx);
1465 scm_i_pthread_mutex_lock (&m->lock);
1466 owner = m->owner;
1467 scm_i_pthread_mutex_unlock (&m->lock);
1468
1469 return owner;
1470 }
1471 #undef FUNC_NAME
1472
1473 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1474 (SCM mx),
1475 "Return the lock level of mutex @var{mx}.")
1476 #define FUNC_NAME s_scm_mutex_level
1477 {
1478 SCM_VALIDATE_MUTEX (1, mx);
1479 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1480 }
1481 #undef FUNC_NAME
1482
1483 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1484 (SCM mx),
1485 "Returns @code{#t} if the mutex @var{mx} is locked.")
1486 #define FUNC_NAME s_scm_mutex_locked_p
1487 {
1488 SCM_VALIDATE_MUTEX (1, mx);
1489 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1490 }
1491 #undef FUNC_NAME
1492
1493 static size_t
1494 fat_cond_free (SCM mx)
1495 {
1496 fat_cond *c = SCM_CONDVAR_DATA (mx);
1497 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1498 return 0;
1499 }
1500
1501 static int
1502 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1503 {
1504 fat_cond *c = SCM_CONDVAR_DATA (cv);
1505 scm_puts ("#<condition-variable ", port);
1506 scm_uintprint ((scm_t_bits)c, 16, port);
1507 scm_puts (">", port);
1508 return 1;
1509 }
1510
1511 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1512 (void),
1513 "Make a new condition variable.")
1514 #define FUNC_NAME s_scm_make_condition_variable
1515 {
1516 fat_cond *c;
1517 SCM cv;
1518
1519 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1520 c->waiting = SCM_EOL;
1521 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1522 c->waiting = make_queue ();
1523 return cv;
1524 }
1525 #undef FUNC_NAME
1526
1527 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1528 (SCM cv, SCM mx, SCM t),
1529 "Wait until @var{cond-var} has been signalled. While waiting, "
1530 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1531 "is locked again when this function returns. When @var{time} is given, "
1532 "it specifies a point in time where the waiting should be aborted. It "
1533 "can be either a integer as returned by @code{current-time} or a pair "
1534 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1535 "mutex is locked and @code{#f} is returned. When the condition "
1536 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1537 "is returned. ")
1538 #define FUNC_NAME s_scm_timed_wait_condition_variable
1539 {
1540 scm_t_timespec waittime, *waitptr = NULL;
1541
1542 SCM_VALIDATE_CONDVAR (1, cv);
1543 SCM_VALIDATE_MUTEX (2, mx);
1544
1545 if (!SCM_UNBNDP (t))
1546 {
1547 to_timespec (t, &waittime);
1548 waitptr = &waittime;
1549 }
1550
1551 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1552 }
1553 #undef FUNC_NAME
1554
1555 static void
1556 fat_cond_signal (fat_cond *c)
1557 {
1558 unblock_from_queue (c->waiting);
1559 }
1560
1561 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1562 (SCM cv),
1563 "Wake up one thread that is waiting for @var{cv}")
1564 #define FUNC_NAME s_scm_signal_condition_variable
1565 {
1566 SCM_VALIDATE_CONDVAR (1, cv);
1567 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1568 return SCM_BOOL_T;
1569 }
1570 #undef FUNC_NAME
1571
1572 static void
1573 fat_cond_broadcast (fat_cond *c)
1574 {
1575 while (scm_is_true (unblock_from_queue (c->waiting)))
1576 ;
1577 }
1578
1579 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1580 (SCM cv),
1581 "Wake up all threads that are waiting for @var{cv}. ")
1582 #define FUNC_NAME s_scm_broadcast_condition_variable
1583 {
1584 SCM_VALIDATE_CONDVAR (1, cv);
1585 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1586 return SCM_BOOL_T;
1587 }
1588 #undef FUNC_NAME
1589
1590 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1591 (SCM obj),
1592 "Return @code{#t} if @var{obj} is a condition variable.")
1593 #define FUNC_NAME s_scm_condition_variable_p
1594 {
1595 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1596 }
1597 #undef FUNC_NAME
1598
1599
1600 \f
1601 /*** Select */
1602
1603 struct select_args
1604 {
1605 int nfds;
1606 SELECT_TYPE *read_fds;
1607 SELECT_TYPE *write_fds;
1608 SELECT_TYPE *except_fds;
1609 struct timeval *timeout;
1610
1611 int result;
1612 int errno_value;
1613 };
1614
1615 static void *
1616 do_std_select (void *args)
1617 {
1618 struct select_args *select_args;
1619
1620 select_args = (struct select_args *) args;
1621
1622 select_args->result =
1623 select (select_args->nfds,
1624 select_args->read_fds, select_args->write_fds,
1625 select_args->except_fds, select_args->timeout);
1626 select_args->errno_value = errno;
1627
1628 return NULL;
1629 }
1630
1631 int
1632 scm_std_select (int nfds,
1633 SELECT_TYPE *readfds,
1634 SELECT_TYPE *writefds,
1635 SELECT_TYPE *exceptfds,
1636 struct timeval *timeout)
1637 {
1638 fd_set my_readfds;
1639 int res, eno, wakeup_fd;
1640 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1641 struct select_args args;
1642
1643 if (readfds == NULL)
1644 {
1645 FD_ZERO (&my_readfds);
1646 readfds = &my_readfds;
1647 }
1648
1649 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1650 SCM_TICK;
1651
1652 wakeup_fd = t->sleep_pipe[0];
1653 FD_SET (wakeup_fd, readfds);
1654 if (wakeup_fd >= nfds)
1655 nfds = wakeup_fd+1;
1656
1657 args.nfds = nfds;
1658 args.read_fds = readfds;
1659 args.write_fds = writefds;
1660 args.except_fds = exceptfds;
1661 args.timeout = timeout;
1662
1663 /* Explicitly cooperate with the GC. */
1664 scm_without_guile (do_std_select, &args);
1665
1666 res = args.result;
1667 eno = args.errno_value;
1668
1669 t->sleep_fd = -1;
1670 scm_i_reset_sleep (t);
1671
1672 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1673 {
1674 char dummy;
1675 full_read (wakeup_fd, &dummy, 1);
1676
1677 FD_CLR (wakeup_fd, readfds);
1678 res -= 1;
1679 if (res == 0)
1680 {
1681 eno = EINTR;
1682 res = -1;
1683 }
1684 }
1685 errno = eno;
1686 return res;
1687 }
1688
1689 /* Convenience API for blocking while in guile mode. */
1690
1691 #if SCM_USE_PTHREAD_THREADS
1692
1693 /* It seems reasonable to not run procedures related to mutex and condition
1694 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1695 without it, and (ii) the only potential gain would be GC latency. See
1696 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1697 for a discussion of the pros and cons. */
1698
1699 int
1700 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1701 {
1702 int res = scm_i_pthread_mutex_lock (mutex);
1703 return res;
1704 }
1705
1706 static void
1707 do_unlock (void *data)
1708 {
1709 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1710 }
1711
1712 void
1713 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1714 {
1715 scm_i_scm_pthread_mutex_lock (mutex);
1716 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1717 }
1718
1719 int
1720 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1721 {
1722 int res;
1723 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1724
1725 t->held_mutex = mutex;
1726 res = scm_i_pthread_cond_wait (cond, mutex);
1727 t->held_mutex = NULL;
1728
1729 return res;
1730 }
1731
1732 int
1733 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1734 scm_i_pthread_mutex_t *mutex,
1735 const scm_t_timespec *wt)
1736 {
1737 int res;
1738 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1739
1740 t->held_mutex = mutex;
1741 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1742 t->held_mutex = NULL;
1743
1744 return res;
1745 }
1746
1747 #endif
1748
1749 unsigned long
1750 scm_std_usleep (unsigned long usecs)
1751 {
1752 struct timeval tv;
1753 tv.tv_usec = usecs % 1000000;
1754 tv.tv_sec = usecs / 1000000;
1755 scm_std_select (0, NULL, NULL, NULL, &tv);
1756 return tv.tv_sec * 1000000 + tv.tv_usec;
1757 }
1758
1759 unsigned int
1760 scm_std_sleep (unsigned int secs)
1761 {
1762 struct timeval tv;
1763 tv.tv_usec = 0;
1764 tv.tv_sec = secs;
1765 scm_std_select (0, NULL, NULL, NULL, &tv);
1766 return tv.tv_sec;
1767 }
1768
1769 /*** Misc */
1770
1771 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1772 (void),
1773 "Return the thread that called this function.")
1774 #define FUNC_NAME s_scm_current_thread
1775 {
1776 return SCM_I_CURRENT_THREAD->handle;
1777 }
1778 #undef FUNC_NAME
1779
1780 static SCM
1781 scm_c_make_list (size_t n, SCM fill)
1782 {
1783 SCM res = SCM_EOL;
1784 while (n-- > 0)
1785 res = scm_cons (fill, res);
1786 return res;
1787 }
1788
1789 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1790 (void),
1791 "Return a list of all threads.")
1792 #define FUNC_NAME s_scm_all_threads
1793 {
1794 /* We can not allocate while holding the thread_admin_mutex because
1795 of the way GC is done.
1796 */
1797 int n = thread_count;
1798 scm_i_thread *t;
1799 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1800
1801 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1802 l = &list;
1803 for (t = all_threads; t && n > 0; t = t->next_thread)
1804 {
1805 if (t != scm_i_signal_delivery_thread)
1806 {
1807 SCM_SETCAR (*l, t->handle);
1808 l = SCM_CDRLOC (*l);
1809 }
1810 n--;
1811 }
1812 *l = SCM_EOL;
1813 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1814 return list;
1815 }
1816 #undef FUNC_NAME
1817
1818 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1819 (SCM thread),
1820 "Return @code{#t} iff @var{thread} has exited.\n")
1821 #define FUNC_NAME s_scm_thread_exited_p
1822 {
1823 return scm_from_bool (scm_c_thread_exited_p (thread));
1824 }
1825 #undef FUNC_NAME
1826
1827 int
1828 scm_c_thread_exited_p (SCM thread)
1829 #define FUNC_NAME s_scm_thread_exited_p
1830 {
1831 scm_i_thread *t;
1832 SCM_VALIDATE_THREAD (1, thread);
1833 t = SCM_I_THREAD_DATA (thread);
1834 return t->exited;
1835 }
1836 #undef FUNC_NAME
1837
1838 static scm_i_pthread_cond_t wake_up_cond;
1839 static int threads_initialized_p = 0;
1840
1841
1842 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1843 */
1844 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1845 int scm_i_critical_section_level = 0;
1846
1847 static SCM dynwind_critical_section_mutex;
1848
1849 void
1850 scm_dynwind_critical_section (SCM mutex)
1851 {
1852 if (scm_is_false (mutex))
1853 mutex = dynwind_critical_section_mutex;
1854 scm_dynwind_lock_mutex (mutex);
1855 scm_dynwind_block_asyncs ();
1856 }
1857
1858 /*** Initialization */
1859
1860 scm_i_pthread_mutex_t scm_i_misc_mutex;
1861
1862 #if SCM_USE_PTHREAD_THREADS
1863 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1864 #endif
1865
1866 void
1867 scm_threads_prehistory (SCM_STACKITEM *base)
1868 {
1869 #if SCM_USE_PTHREAD_THREADS
1870 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1871 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1872 PTHREAD_MUTEX_RECURSIVE);
1873 #endif
1874
1875 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1876 scm_i_pthread_mutexattr_recursive);
1877 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1878 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1879
1880 guilify_self_1 (base);
1881 }
1882
1883 scm_t_bits scm_tc16_thread;
1884 scm_t_bits scm_tc16_mutex;
1885 scm_t_bits scm_tc16_condvar;
1886
1887 void
1888 scm_init_threads ()
1889 {
1890 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1891 scm_set_smob_print (scm_tc16_thread, thread_print);
1892 scm_set_smob_free (scm_tc16_thread, thread_free); /* XXX: Could be removed */
1893
1894 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1895 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1896 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1897
1898 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1899 sizeof (fat_cond));
1900 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1901 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
1902
1903 scm_i_default_dynamic_state = SCM_BOOL_F;
1904 guilify_self_2 (SCM_BOOL_F);
1905 threads_initialized_p = 1;
1906
1907 dynwind_critical_section_mutex =
1908 scm_permanent_object (scm_make_recursive_mutex ());
1909 }
1910
1911 void
1912 scm_init_threads_default_dynamic_state ()
1913 {
1914 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1915 scm_i_default_dynamic_state = scm_permanent_object (state);
1916 }
1917
1918 void
1919 scm_init_thread_procs ()
1920 {
1921 #include "libguile/threads.x"
1922 }
1923
1924 \f
1925 /* IA64-specific things. */
1926
1927 #ifdef __ia64__
1928 # ifdef __hpux
1929 # include <sys/param.h>
1930 # include <sys/pstat.h>
1931 void *
1932 scm_ia64_register_backing_store_base (void)
1933 {
1934 struct pst_vm_status vm_status;
1935 int i = 0;
1936 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
1937 if (vm_status.pst_type == PS_RSESTACK)
1938 return (void *) vm_status.pst_vaddr;
1939 abort ();
1940 }
1941 void *
1942 scm_ia64_ar_bsp (const void *ctx)
1943 {
1944 uint64_t bsp;
1945 __uc_get_ar_bsp (ctx, &bsp);
1946 return (void *) bsp;
1947 }
1948 # endif /* hpux */
1949 # ifdef linux
1950 # include <ucontext.h>
1951 void *
1952 scm_ia64_register_backing_store_base (void)
1953 {
1954 extern void *__libc_ia64_register_backing_store_base;
1955 return __libc_ia64_register_backing_store_base;
1956 }
1957 void *
1958 scm_ia64_ar_bsp (const void *opaque)
1959 {
1960 const ucontext_t *ctx = opaque;
1961 return (void *) ctx->uc_mcontext.sc_ar_bsp;
1962 }
1963 # endif /* linux */
1964 #endif /* __ia64__ */
1965
1966
1967 /*
1968 Local Variables:
1969 c-file-style: "gnu"
1970 End:
1971 */