scm_with_guile calls GC_call_with_gc_active
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include <assert.h>
42
43 #include "libguile/validate.h"
44 #include "libguile/root.h"
45 #include "libguile/eval.h"
46 #include "libguile/async.h"
47 #include "libguile/ports.h"
48 #include "libguile/threads.h"
49 #include "libguile/dynwind.h"
50 #include "libguile/iselect.h"
51 #include "libguile/fluids.h"
52 #include "libguile/continuations.h"
53 #include "libguile/gc.h"
54 #include "libguile/init.h"
55 #include "libguile/scmsigs.h"
56 #include "libguile/strings.h"
57 #include "libguile/weaks.h"
58
59 #ifdef __MINGW32__
60 #ifndef ETIMEDOUT
61 # define ETIMEDOUT WSAETIMEDOUT
62 #endif
63 # include <fcntl.h>
64 # include <process.h>
65 # define pipe(fd) _pipe (fd, 256, O_BINARY)
66 #endif /* __MINGW32__ */
67
68 #include <full-read.h>
69
70
71 \f
72
73 /* First some libgc shims. */
74
75 /* Make sure GC_fn_type is defined; it is missing from the public
76 headers of GC 7.1 and earlier. */
77 #ifndef HAVE_GC_FN_TYPE
78 typedef void * (* GC_fn_type) (void *);
79 #endif
80
81
82 /* Now define with_gc_active and with_gc_inactive. */
83
84 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
85
86 /* We have a sufficiently new libgc (7.2 or newer). */
87
88 static void*
89 with_gc_inactive (GC_fn_type func, void *data)
90 {
91 return GC_do_blocking (func, data);
92 }
93
94 static void*
95 with_gc_active (GC_fn_type func, void *data)
96 {
97 return GC_call_with_gc_active (func, data);
98 }
99
100 #else
101
102 /* libgc not new enough, so never actually deactivate GC.
103
104 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
105 GC_call_with_gc_active. */
106
107 static void*
108 with_gc_inactive (GC_fn_type func, void *data)
109 {
110 return func (data);
111 }
112
113 static void*
114 with_gc_active (GC_fn_type func, void *data)
115 {
116 return func (data);
117 }
118
119 #endif /* HAVE_GC_DO_BLOCKING */
120
121
122 \f
123 static void
124 to_timespec (SCM t, scm_t_timespec *waittime)
125 {
126 if (scm_is_pair (t))
127 {
128 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
129 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
130 }
131 else
132 {
133 double time = scm_to_double (t);
134 double sec = scm_c_truncate (time);
135
136 waittime->tv_sec = (long) sec;
137 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
138 }
139 }
140
141 \f
142 /*** Queues */
143
144 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
145 the risk of false references leading to unbounded retained space as
146 described in "Bounding Space Usage of Conservative Garbage Collectors",
147 H.J. Boehm, 2001. */
148
149 /* Make an empty queue data structure.
150 */
151 static SCM
152 make_queue ()
153 {
154 return scm_cons (SCM_EOL, SCM_EOL);
155 }
156
157 /* Put T at the back of Q and return a handle that can be used with
158 remqueue to remove T from Q again.
159 */
160 static SCM
161 enqueue (SCM q, SCM t)
162 {
163 SCM c = scm_cons (t, SCM_EOL);
164 SCM_CRITICAL_SECTION_START;
165 if (scm_is_null (SCM_CDR (q)))
166 SCM_SETCDR (q, c);
167 else
168 SCM_SETCDR (SCM_CAR (q), c);
169 SCM_SETCAR (q, c);
170 SCM_CRITICAL_SECTION_END;
171 return c;
172 }
173
174 /* Remove the element that the handle C refers to from the queue Q. C
175 must have been returned from a call to enqueue. The return value
176 is zero when the element referred to by C has already been removed.
177 Otherwise, 1 is returned.
178 */
179 static int
180 remqueue (SCM q, SCM c)
181 {
182 SCM p, prev = q;
183 SCM_CRITICAL_SECTION_START;
184 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
185 {
186 if (scm_is_eq (p, c))
187 {
188 if (scm_is_eq (c, SCM_CAR (q)))
189 SCM_SETCAR (q, SCM_CDR (c));
190 SCM_SETCDR (prev, SCM_CDR (c));
191
192 /* GC-robust */
193 SCM_SETCDR (c, SCM_EOL);
194
195 SCM_CRITICAL_SECTION_END;
196 return 1;
197 }
198 prev = p;
199 }
200 SCM_CRITICAL_SECTION_END;
201 return 0;
202 }
203
204 /* Remove the front-most element from the queue Q and return it.
205 Return SCM_BOOL_F when Q is empty.
206 */
207 static SCM
208 dequeue (SCM q)
209 {
210 SCM c;
211 SCM_CRITICAL_SECTION_START;
212 c = SCM_CDR (q);
213 if (scm_is_null (c))
214 {
215 SCM_CRITICAL_SECTION_END;
216 return SCM_BOOL_F;
217 }
218 else
219 {
220 SCM_SETCDR (q, SCM_CDR (c));
221 if (scm_is_null (SCM_CDR (q)))
222 SCM_SETCAR (q, SCM_EOL);
223 SCM_CRITICAL_SECTION_END;
224
225 /* GC-robust */
226 SCM_SETCDR (c, SCM_EOL);
227
228 return SCM_CAR (c);
229 }
230 }
231
232 /*** Thread smob routines */
233
234
235 static int
236 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
237 {
238 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
239 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
240 the struct case, hence we go via a union, and extract according to the
241 size of pthread_t. */
242 union {
243 scm_i_pthread_t p;
244 unsigned short us;
245 unsigned int ui;
246 unsigned long ul;
247 scm_t_uintmax um;
248 } u;
249 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
250 scm_i_pthread_t p = t->pthread;
251 scm_t_uintmax id;
252 u.p = p;
253 if (sizeof (p) == sizeof (unsigned short))
254 id = u.us;
255 else if (sizeof (p) == sizeof (unsigned int))
256 id = u.ui;
257 else if (sizeof (p) == sizeof (unsigned long))
258 id = u.ul;
259 else
260 id = u.um;
261
262 scm_puts ("#<thread ", port);
263 scm_uintprint (id, 10, port);
264 scm_puts (" (", port);
265 scm_uintprint ((scm_t_bits)t, 16, port);
266 scm_puts (")>", port);
267 return 1;
268 }
269
270 \f
271 /*** Blocking on queues. */
272
273 /* See also scm_i_queue_async_cell for how such a block is
274 interrputed.
275 */
276
277 /* Put the current thread on QUEUE and go to sleep, waiting for it to
278 be woken up by a call to 'unblock_from_queue', or to be
279 interrupted. Upon return of this function, the current thread is
280 no longer on QUEUE, even when the sleep has been interrupted.
281
282 The caller of block_self must hold MUTEX. It will be atomically
283 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
284
285 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
286 as MUTEX is needed.
287
288 When WAITTIME is not NULL, the sleep will be aborted at that time.
289
290 The return value of block_self is an errno value. It will be zero
291 when the sleep has been successfully completed by a call to
292 unblock_from_queue, EINTR when it has been interrupted by the
293 delivery of a system async, and ETIMEDOUT when the timeout has
294 expired.
295
296 The system asyncs themselves are not executed by block_self.
297 */
298 static int
299 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
300 const scm_t_timespec *waittime)
301 {
302 scm_i_thread *t = SCM_I_CURRENT_THREAD;
303 SCM q_handle;
304 int err;
305
306 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
307 err = EINTR;
308 else
309 {
310 t->block_asyncs++;
311 q_handle = enqueue (queue, t->handle);
312 if (waittime == NULL)
313 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
314 else
315 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
316
317 /* When we are still on QUEUE, we have been interrupted. We
318 report this only when no other error (such as a timeout) has
319 happened above.
320 */
321 if (remqueue (queue, q_handle) && err == 0)
322 err = EINTR;
323 t->block_asyncs--;
324 scm_i_reset_sleep (t);
325 }
326
327 return err;
328 }
329
330 /* Wake up the first thread on QUEUE, if any. The awoken thread is
331 returned, or #f if the queue was empty.
332 */
333 static SCM
334 unblock_from_queue (SCM queue)
335 {
336 SCM thread = dequeue (queue);
337 if (scm_is_true (thread))
338 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
339 return thread;
340 }
341
342 \f
343 /* Getting into and out of guile mode.
344 */
345
346 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
347
348 /* When thread-local storage (TLS) is available, a pointer to the
349 current-thread object is kept in TLS. Note that storing the thread-object
350 itself in TLS (rather than a pointer to some malloc'd memory) is not
351 possible since thread objects may live longer than the actual thread they
352 represent. */
353 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
354
355 # define SET_CURRENT_THREAD(_t) scm_i_current_thread = (_t)
356
357 #else /* !SCM_HAVE_THREAD_STORAGE_CLASS */
358
359 /* Key used to retrieve the current thread with `pthread_getspecific ()'. */
360 scm_i_pthread_key_t scm_i_thread_key;
361
362 # define SET_CURRENT_THREAD(_t) \
363 scm_i_pthread_setspecific (scm_i_thread_key, (_t))
364
365 #endif /* !SCM_HAVE_THREAD_STORAGE_CLASS */
366
367
368 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
369 static scm_i_thread *all_threads = NULL;
370 static int thread_count;
371
372 static SCM scm_i_default_dynamic_state;
373
374 /* Perform first stage of thread initialisation, in non-guile mode.
375 */
376 static void
377 guilify_self_1 (SCM_STACKITEM *base)
378 {
379 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
380
381 t->pthread = scm_i_pthread_self ();
382 t->handle = SCM_BOOL_F;
383 t->result = SCM_BOOL_F;
384 t->cleanup_handler = SCM_BOOL_F;
385 t->mutexes = SCM_EOL;
386 t->held_mutex = NULL;
387 t->join_queue = SCM_EOL;
388 t->dynamic_state = SCM_BOOL_F;
389 t->dynwinds = SCM_EOL;
390 t->active_asyncs = SCM_EOL;
391 t->block_asyncs = 1;
392 t->pending_asyncs = 1;
393 t->critical_section_level = 0;
394 t->base = base;
395 #ifdef __ia64__
396 /* Calculate and store off the base of this thread's register
397 backing store (RBS). Unfortunately our implementation(s) of
398 scm_ia64_register_backing_store_base are only reliable for the
399 main thread. For other threads, therefore, find out the current
400 top of the RBS, and use that as a maximum. */
401 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
402 {
403 ucontext_t ctx;
404 void *bsp;
405 getcontext (&ctx);
406 bsp = scm_ia64_ar_bsp (&ctx);
407 if (t->register_backing_store_base > bsp)
408 t->register_backing_store_base = bsp;
409 }
410 #endif
411 t->continuation_root = SCM_EOL;
412 t->continuation_base = base;
413 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
414 t->sleep_mutex = NULL;
415 t->sleep_object = SCM_BOOL_F;
416 t->sleep_fd = -1;
417
418 if (pipe (t->sleep_pipe) != 0)
419 /* FIXME: Error conditions during the initialization phase are handled
420 gracelessly since public functions such as `scm_init_guile ()'
421 currently have type `void'. */
422 abort ();
423
424 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
425 t->current_mark_stack_ptr = NULL;
426 t->current_mark_stack_limit = NULL;
427 t->canceled = 0;
428 t->exited = 0;
429 t->guile_mode = 0;
430
431 SET_CURRENT_THREAD (t);
432
433 scm_i_pthread_mutex_lock (&thread_admin_mutex);
434 t->next_thread = all_threads;
435 all_threads = t;
436 thread_count++;
437 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
438 }
439
440 /* Perform second stage of thread initialisation, in guile mode.
441 */
442 static void
443 guilify_self_2 (SCM parent)
444 {
445 scm_i_thread *t = SCM_I_CURRENT_THREAD;
446
447 t->guile_mode = 1;
448
449 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
450
451 t->continuation_root = scm_cons (t->handle, SCM_EOL);
452 t->continuation_base = t->base;
453 t->vm = SCM_BOOL_F;
454
455 if (scm_is_true (parent))
456 t->dynamic_state = scm_make_dynamic_state (parent);
457 else
458 t->dynamic_state = scm_i_make_initial_dynamic_state ();
459
460 t->join_queue = make_queue ();
461 t->block_asyncs = 0;
462 }
463
464 \f
465 /*** Fat mutexes */
466
467 /* We implement our own mutex type since we want them to be 'fair', we
468 want to do fancy things while waiting for them (like running
469 asyncs) and we might want to add things that are nice for
470 debugging.
471 */
472
473 typedef struct {
474 scm_i_pthread_mutex_t lock;
475 SCM owner;
476 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
477
478 int recursive; /* allow recursive locking? */
479 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
480 int allow_external_unlock; /* is it an error to unlock a mutex that is not
481 owned by the current thread? */
482
483 SCM waiting; /* the threads waiting for this mutex. */
484 } fat_mutex;
485
486 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
487 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
488
489 /* Perform thread tear-down, in guile mode.
490 */
491 static void *
492 do_thread_exit (void *v)
493 {
494 scm_i_thread *t = (scm_i_thread *) v;
495
496 if (!scm_is_false (t->cleanup_handler))
497 {
498 SCM ptr = t->cleanup_handler;
499
500 t->cleanup_handler = SCM_BOOL_F;
501 t->result = scm_internal_catch (SCM_BOOL_T,
502 (scm_t_catch_body) scm_call_0, ptr,
503 scm_handle_by_message_noexit, NULL);
504 }
505
506 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
507
508 t->exited = 1;
509 close (t->sleep_pipe[0]);
510 close (t->sleep_pipe[1]);
511 while (scm_is_true (unblock_from_queue (t->join_queue)))
512 ;
513
514 while (!scm_is_null (t->mutexes))
515 {
516 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
517
518 if (!SCM_UNBNDP (mutex))
519 {
520 fat_mutex *m = SCM_MUTEX_DATA (mutex);
521
522 scm_i_pthread_mutex_lock (&m->lock);
523
524 /* Since MUTEX is in `t->mutexes', T must be its owner. */
525 assert (scm_is_eq (m->owner, t->handle));
526
527 unblock_from_queue (m->waiting);
528
529 scm_i_pthread_mutex_unlock (&m->lock);
530 }
531
532 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
533 }
534
535 scm_i_pthread_mutex_unlock (&t->admin_mutex);
536
537 return NULL;
538 }
539
540 static void
541 on_thread_exit (void *v)
542 {
543 /* This handler is executed in non-guile mode. */
544 scm_i_thread *t = (scm_i_thread *) v, **tp;
545
546 /* If this thread was cancelled while doing a cond wait, it will
547 still have a mutex locked, so we unlock it here. */
548 if (t->held_mutex)
549 {
550 scm_i_pthread_mutex_unlock (t->held_mutex);
551 t->held_mutex = NULL;
552 }
553
554 SET_CURRENT_THREAD (v);
555
556 /* Ensure the signal handling thread has been launched, because we might be
557 shutting it down. */
558 scm_i_ensure_signal_delivery_thread ();
559
560 /* Unblocking the joining threads needs to happen in guile mode
561 since the queue is a SCM data structure. */
562
563 /* Note: Since `do_thread_exit ()' uses allocates memory via `libgc', we
564 assume the GC is usable at this point, and notably that thread-local
565 storage (TLS) hasn't been deallocated yet. */
566 do_thread_exit (v);
567
568 /* Removing ourself from the list of all threads needs to happen in
569 non-guile mode since all SCM values on our stack become
570 unprotected once we are no longer in the list. */
571 scm_i_pthread_mutex_lock (&thread_admin_mutex);
572 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
573 if (*tp == t)
574 {
575 *tp = t->next_thread;
576
577 /* GC-robust */
578 t->next_thread = NULL;
579
580 break;
581 }
582 thread_count--;
583
584 /* If there's only one other thread, it could be the signal delivery
585 thread, so we need to notify it to shut down by closing its read pipe.
586 If it's not the signal delivery thread, then closing the read pipe isn't
587 going to hurt. */
588 if (thread_count <= 1)
589 scm_i_close_signal_pipe ();
590
591 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
592
593 SET_CURRENT_THREAD (NULL);
594 }
595
596 #ifndef SCM_HAVE_THREAD_STORAGE_CLASS
597
598 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
599
600 static void
601 init_thread_key (void)
602 {
603 scm_i_pthread_key_create (&scm_i_thread_key, NULL);
604 }
605
606 #endif
607
608 /* Perform any initializations necessary to make the current thread
609 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
610 if necessary.
611
612 BASE is the stack base to use with GC.
613
614 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
615 which case the default dynamic state is used.
616
617 Returns zero when the thread was known to guile already; otherwise
618 return 1.
619
620 Note that it could be the case that the thread was known
621 to Guile, but not in guile mode (because we are within a
622 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
623 be sure. New threads are put into guile mode implicitly. */
624
625 static int
626 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
627 {
628 #ifndef SCM_HAVE_THREAD_STORAGE_CLASS
629 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
630 #endif
631
632 if (SCM_I_CURRENT_THREAD)
633 {
634 /* Thread is already known to Guile.
635 */
636 return 0;
637 }
638 else
639 {
640 /* This thread has not been guilified yet.
641 */
642
643 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
644 if (scm_initialized_p == 0)
645 {
646 /* First thread ever to enter Guile. Run the full
647 initialization.
648 */
649 scm_i_init_guile (base);
650 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
651 }
652 else
653 {
654 /* Guile is already initialized, but this thread enters it for
655 the first time. Only initialize this thread.
656 */
657 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
658 guilify_self_1 (base);
659 guilify_self_2 (parent);
660 }
661 return 1;
662 }
663 }
664
665 #if SCM_USE_PTHREAD_THREADS
666
667 #if defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP
668 /* This method for GNU/Linux and perhaps some other systems.
669 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
670 available on them. */
671 #define HAVE_GET_THREAD_STACK_BASE
672
673 static SCM_STACKITEM *
674 get_thread_stack_base ()
675 {
676 pthread_attr_t attr;
677 void *start, *end;
678 size_t size;
679
680 pthread_getattr_np (pthread_self (), &attr);
681 pthread_attr_getstack (&attr, &start, &size);
682 end = (char *)start + size;
683
684 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
685 for the main thread, but we can use scm_get_stack_base in that
686 case.
687 */
688
689 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
690 if ((void *)&attr < start || (void *)&attr >= end)
691 return (SCM_STACKITEM *) GC_stackbottom;
692 else
693 #endif
694 {
695 #if SCM_STACK_GROWS_UP
696 return start;
697 #else
698 return end;
699 #endif
700 }
701 }
702
703 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
704 /* This method for MacOS X.
705 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
706 but as of 2006 there's nothing obvious at apple.com. */
707 #define HAVE_GET_THREAD_STACK_BASE
708 static SCM_STACKITEM *
709 get_thread_stack_base ()
710 {
711 return pthread_get_stackaddr_np (pthread_self ());
712 }
713
714 #elif defined (__MINGW32__)
715 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
716 in any thread. We don't like hard-coding the name of a system, but there
717 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
718 work. */
719 #define HAVE_GET_THREAD_STACK_BASE
720 static SCM_STACKITEM *
721 get_thread_stack_base ()
722 {
723 return (SCM_STACKITEM *) GC_stackbottom;
724 }
725
726 #endif /* pthread methods of get_thread_stack_base */
727
728 #else /* !SCM_USE_PTHREAD_THREADS */
729
730 #define HAVE_GET_THREAD_STACK_BASE
731
732 static SCM_STACKITEM *
733 get_thread_stack_base ()
734 {
735 return (SCM_STACKITEM *) GC_stackbottom;
736 }
737
738 #endif /* !SCM_USE_PTHREAD_THREADS */
739
740 #ifdef HAVE_GET_THREAD_STACK_BASE
741
742 void
743 scm_init_guile ()
744 {
745 scm_i_init_thread_for_guile (get_thread_stack_base (),
746 scm_i_default_dynamic_state);
747 }
748
749 #endif
750
751 void *
752 scm_with_guile (void *(*func)(void *), void *data)
753 {
754 return scm_i_with_guile_and_parent (func, data,
755 scm_i_default_dynamic_state);
756 }
757
758 SCM_UNUSED static void
759 scm_leave_guile_cleanup (void *x)
760 {
761 on_thread_exit (SCM_I_CURRENT_THREAD);
762 }
763
764 struct with_guile_trampoline_args
765 {
766 GC_fn_type func;
767 void *data;
768 };
769
770 static void *
771 with_guile_trampoline (void *data)
772 {
773 struct with_guile_trampoline_args *args = data;
774
775 return scm_c_with_continuation_barrier (args->func, args->data);
776 }
777
778 void *
779 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
780 {
781 void *res;
782 int new_thread;
783 scm_i_thread *t;
784 SCM_STACKITEM base_item;
785
786 new_thread = scm_i_init_thread_for_guile (&base_item, parent);
787 t = SCM_I_CURRENT_THREAD;
788 if (new_thread)
789 {
790 /* We are in Guile mode. */
791 assert (t->guile_mode);
792
793 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
794 res = scm_c_with_continuation_barrier (func, data);
795 scm_i_pthread_cleanup_pop (0);
796
797 /* Leave Guile mode. */
798 t->guile_mode = 0;
799 }
800 else if (t->guile_mode)
801 {
802 /* Already in Guile mode. */
803 res = scm_c_with_continuation_barrier (func, data);
804 }
805 else
806 {
807 struct with_guile_trampoline_args args;
808 args.func = func;
809 args.data = data;
810
811 /* We are not in Guile mode, either because we are not within a
812 scm_with_guile, or because we are within a scm_without_guile.
813
814 This call to scm_with_guile() could happen from anywhere on the
815 stack, and in particular lower on the stack than when it was
816 when this thread was first guilified. Thus, `base' must be
817 updated. */
818 #if SCM_STACK_GROWS_UP
819 if (SCM_STACK_PTR (&base_item) < t->base)
820 t->base = SCM_STACK_PTR (&base_item);
821 #else
822 if (SCM_STACK_PTR (&base_item) > t->base)
823 t->base = SCM_STACK_PTR (&base_item);
824 #endif
825
826 t->guile_mode = 1;
827 res = with_gc_active (with_guile_trampoline, &args);
828 t->guile_mode = 0;
829 }
830 return res;
831 }
832
833 void *
834 scm_without_guile (void *(*func)(void *), void *data)
835 {
836 void *result;
837 scm_i_thread *t = SCM_I_CURRENT_THREAD;
838
839 if (t->guile_mode)
840 {
841 SCM_I_CURRENT_THREAD->guile_mode = 0;
842 result = with_gc_inactive (func, data);
843 SCM_I_CURRENT_THREAD->guile_mode = 1;
844 }
845 else
846 /* Otherwise we're not in guile mode, so nothing to do. */
847 result = func (data);
848
849 return result;
850 }
851
852 \f
853 /*** Thread creation */
854
855 typedef struct {
856 SCM parent;
857 SCM thunk;
858 SCM handler;
859 SCM thread;
860 scm_i_pthread_mutex_t mutex;
861 scm_i_pthread_cond_t cond;
862 } launch_data;
863
864 static void *
865 really_launch (void *d)
866 {
867 launch_data *data = (launch_data *)d;
868 SCM thunk = data->thunk, handler = data->handler;
869 scm_i_thread *t;
870
871 t = SCM_I_CURRENT_THREAD;
872
873 scm_i_scm_pthread_mutex_lock (&data->mutex);
874 data->thread = scm_current_thread ();
875 scm_i_pthread_cond_signal (&data->cond);
876 scm_i_pthread_mutex_unlock (&data->mutex);
877
878 if (SCM_UNBNDP (handler))
879 t->result = scm_call_0 (thunk);
880 else
881 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
882
883 /* Trigger a call to `on_thread_exit ()'. */
884 pthread_exit (NULL);
885
886 return 0;
887 }
888
889 static void *
890 launch_thread (void *d)
891 {
892 launch_data *data = (launch_data *)d;
893 scm_i_pthread_detach (scm_i_pthread_self ());
894 scm_i_with_guile_and_parent (really_launch, d, data->parent);
895 return NULL;
896 }
897
898 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
899 (SCM thunk, SCM handler),
900 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
901 "returning a new thread object representing the thread. The procedure\n"
902 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
903 "\n"
904 "When @var{handler} is specified, then @var{thunk} is called from\n"
905 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
906 "handler. This catch is established inside the continuation barrier.\n"
907 "\n"
908 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
909 "the @emph{exit value} of the thread and the thread is terminated.")
910 #define FUNC_NAME s_scm_call_with_new_thread
911 {
912 launch_data data;
913 scm_i_pthread_t id;
914 int err;
915
916 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
917 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
918 handler, SCM_ARG2, FUNC_NAME);
919
920 data.parent = scm_current_dynamic_state ();
921 data.thunk = thunk;
922 data.handler = handler;
923 data.thread = SCM_BOOL_F;
924 scm_i_pthread_mutex_init (&data.mutex, NULL);
925 scm_i_pthread_cond_init (&data.cond, NULL);
926
927 scm_i_scm_pthread_mutex_lock (&data.mutex);
928 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
929 if (err)
930 {
931 scm_i_pthread_mutex_unlock (&data.mutex);
932 errno = err;
933 scm_syserror (NULL);
934 }
935 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
936 scm_i_pthread_mutex_unlock (&data.mutex);
937
938 return data.thread;
939 }
940 #undef FUNC_NAME
941
942 typedef struct {
943 SCM parent;
944 scm_t_catch_body body;
945 void *body_data;
946 scm_t_catch_handler handler;
947 void *handler_data;
948 SCM thread;
949 scm_i_pthread_mutex_t mutex;
950 scm_i_pthread_cond_t cond;
951 } spawn_data;
952
953 static void *
954 really_spawn (void *d)
955 {
956 spawn_data *data = (spawn_data *)d;
957 scm_t_catch_body body = data->body;
958 void *body_data = data->body_data;
959 scm_t_catch_handler handler = data->handler;
960 void *handler_data = data->handler_data;
961 scm_i_thread *t = SCM_I_CURRENT_THREAD;
962
963 scm_i_scm_pthread_mutex_lock (&data->mutex);
964 data->thread = scm_current_thread ();
965 scm_i_pthread_cond_signal (&data->cond);
966 scm_i_pthread_mutex_unlock (&data->mutex);
967
968 if (handler == NULL)
969 t->result = body (body_data);
970 else
971 t->result = scm_internal_catch (SCM_BOOL_T,
972 body, body_data,
973 handler, handler_data);
974
975 return 0;
976 }
977
978 static void *
979 spawn_thread (void *d)
980 {
981 spawn_data *data = (spawn_data *)d;
982 scm_i_pthread_detach (scm_i_pthread_self ());
983 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
984 return NULL;
985 }
986
987 SCM
988 scm_spawn_thread (scm_t_catch_body body, void *body_data,
989 scm_t_catch_handler handler, void *handler_data)
990 {
991 spawn_data data;
992 scm_i_pthread_t id;
993 int err;
994
995 data.parent = scm_current_dynamic_state ();
996 data.body = body;
997 data.body_data = body_data;
998 data.handler = handler;
999 data.handler_data = handler_data;
1000 data.thread = SCM_BOOL_F;
1001 scm_i_pthread_mutex_init (&data.mutex, NULL);
1002 scm_i_pthread_cond_init (&data.cond, NULL);
1003
1004 scm_i_scm_pthread_mutex_lock (&data.mutex);
1005 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1006 if (err)
1007 {
1008 scm_i_pthread_mutex_unlock (&data.mutex);
1009 errno = err;
1010 scm_syserror (NULL);
1011 }
1012 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1013 scm_i_pthread_mutex_unlock (&data.mutex);
1014
1015 return data.thread;
1016 }
1017
1018 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1019 (),
1020 "Move the calling thread to the end of the scheduling queue.")
1021 #define FUNC_NAME s_scm_yield
1022 {
1023 return scm_from_bool (scm_i_sched_yield ());
1024 }
1025 #undef FUNC_NAME
1026
1027 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1028 (SCM thread),
1029 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1030 "cannot be the current thread, and if @var{thread} has already terminated or "
1031 "been signaled to terminate, this function is a no-op.")
1032 #define FUNC_NAME s_scm_cancel_thread
1033 {
1034 scm_i_thread *t = NULL;
1035
1036 SCM_VALIDATE_THREAD (1, thread);
1037 t = SCM_I_THREAD_DATA (thread);
1038 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1039 if (!t->canceled)
1040 {
1041 t->canceled = 1;
1042 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1043 scm_i_pthread_cancel (t->pthread);
1044 }
1045 else
1046 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1047
1048 return SCM_UNSPECIFIED;
1049 }
1050 #undef FUNC_NAME
1051
1052 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1053 (SCM thread, SCM proc),
1054 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1055 "This handler will be called when the thread exits.")
1056 #define FUNC_NAME s_scm_set_thread_cleanup_x
1057 {
1058 scm_i_thread *t;
1059
1060 SCM_VALIDATE_THREAD (1, thread);
1061 if (!scm_is_false (proc))
1062 SCM_VALIDATE_THUNK (2, proc);
1063
1064 t = SCM_I_THREAD_DATA (thread);
1065 scm_i_pthread_mutex_lock (&t->admin_mutex);
1066
1067 if (!(t->exited || t->canceled))
1068 t->cleanup_handler = proc;
1069
1070 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1071
1072 return SCM_UNSPECIFIED;
1073 }
1074 #undef FUNC_NAME
1075
1076 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1077 (SCM thread),
1078 "Return the cleanup handler installed for the thread @var{thread}.")
1079 #define FUNC_NAME s_scm_thread_cleanup
1080 {
1081 scm_i_thread *t;
1082 SCM ret;
1083
1084 SCM_VALIDATE_THREAD (1, thread);
1085
1086 t = SCM_I_THREAD_DATA (thread);
1087 scm_i_pthread_mutex_lock (&t->admin_mutex);
1088 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1089 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1090
1091 return ret;
1092 }
1093 #undef FUNC_NAME
1094
1095 SCM scm_join_thread (SCM thread)
1096 {
1097 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1098 }
1099
1100 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1101 (SCM thread, SCM timeout, SCM timeoutval),
1102 "Suspend execution of the calling thread until the target @var{thread} "
1103 "terminates, unless the target @var{thread} has already terminated. ")
1104 #define FUNC_NAME s_scm_join_thread_timed
1105 {
1106 scm_i_thread *t;
1107 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1108 SCM res = SCM_BOOL_F;
1109
1110 if (! (SCM_UNBNDP (timeoutval)))
1111 res = timeoutval;
1112
1113 SCM_VALIDATE_THREAD (1, thread);
1114 if (scm_is_eq (scm_current_thread (), thread))
1115 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1116
1117 t = SCM_I_THREAD_DATA (thread);
1118 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1119
1120 if (! SCM_UNBNDP (timeout))
1121 {
1122 to_timespec (timeout, &ctimeout);
1123 timeout_ptr = &ctimeout;
1124 }
1125
1126 if (t->exited)
1127 res = t->result;
1128 else
1129 {
1130 while (1)
1131 {
1132 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1133 timeout_ptr);
1134 if (err == 0)
1135 {
1136 if (t->exited)
1137 {
1138 res = t->result;
1139 break;
1140 }
1141 }
1142 else if (err == ETIMEDOUT)
1143 break;
1144
1145 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1146 SCM_TICK;
1147 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1148
1149 /* Check for exit again, since we just released and
1150 reacquired the admin mutex, before the next block_self
1151 call (which would block forever if t has already
1152 exited). */
1153 if (t->exited)
1154 {
1155 res = t->result;
1156 break;
1157 }
1158 }
1159 }
1160
1161 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1162
1163 return res;
1164 }
1165 #undef FUNC_NAME
1166
1167 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1168 (SCM obj),
1169 "Return @code{#t} if @var{obj} is a thread.")
1170 #define FUNC_NAME s_scm_thread_p
1171 {
1172 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1173 }
1174 #undef FUNC_NAME
1175
1176
1177 static size_t
1178 fat_mutex_free (SCM mx)
1179 {
1180 fat_mutex *m = SCM_MUTEX_DATA (mx);
1181 scm_i_pthread_mutex_destroy (&m->lock);
1182 return 0;
1183 }
1184
1185 static int
1186 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1187 {
1188 fat_mutex *m = SCM_MUTEX_DATA (mx);
1189 scm_puts ("#<mutex ", port);
1190 scm_uintprint ((scm_t_bits)m, 16, port);
1191 scm_puts (">", port);
1192 return 1;
1193 }
1194
1195 static SCM
1196 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1197 {
1198 fat_mutex *m;
1199 SCM mx;
1200
1201 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1202 scm_i_pthread_mutex_init (&m->lock, NULL);
1203 m->owner = SCM_BOOL_F;
1204 m->level = 0;
1205
1206 m->recursive = recursive;
1207 m->unchecked_unlock = unchecked_unlock;
1208 m->allow_external_unlock = external_unlock;
1209
1210 m->waiting = SCM_EOL;
1211 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1212 m->waiting = make_queue ();
1213 return mx;
1214 }
1215
1216 SCM scm_make_mutex (void)
1217 {
1218 return scm_make_mutex_with_flags (SCM_EOL);
1219 }
1220
1221 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1222 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1223 SCM_SYMBOL (recursive_sym, "recursive");
1224
1225 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1226 (SCM flags),
1227 "Create a new mutex. ")
1228 #define FUNC_NAME s_scm_make_mutex_with_flags
1229 {
1230 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1231
1232 SCM ptr = flags;
1233 while (! scm_is_null (ptr))
1234 {
1235 SCM flag = SCM_CAR (ptr);
1236 if (scm_is_eq (flag, unchecked_unlock_sym))
1237 unchecked_unlock = 1;
1238 else if (scm_is_eq (flag, allow_external_unlock_sym))
1239 external_unlock = 1;
1240 else if (scm_is_eq (flag, recursive_sym))
1241 recursive = 1;
1242 else
1243 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1244 ptr = SCM_CDR (ptr);
1245 }
1246 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1247 }
1248 #undef FUNC_NAME
1249
1250 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1251 (void),
1252 "Create a new recursive mutex. ")
1253 #define FUNC_NAME s_scm_make_recursive_mutex
1254 {
1255 return make_fat_mutex (1, 0, 0);
1256 }
1257 #undef FUNC_NAME
1258
1259 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1260
1261 static SCM
1262 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1263 {
1264 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1265
1266 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1267 SCM err = SCM_BOOL_F;
1268
1269 struct timeval current_time;
1270
1271 scm_i_scm_pthread_mutex_lock (&m->lock);
1272
1273 while (1)
1274 {
1275 if (m->level == 0)
1276 {
1277 m->owner = new_owner;
1278 m->level++;
1279
1280 if (SCM_I_IS_THREAD (new_owner))
1281 {
1282 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1283 scm_i_pthread_mutex_lock (&t->admin_mutex);
1284
1285 /* Only keep a weak reference to MUTEX so that it's not
1286 retained when not referenced elsewhere (bug #27450).
1287 The weak pair itself is eventually removed when MUTEX
1288 is unlocked. Note that `t->mutexes' lists mutexes
1289 currently held by T, so it should be small. */
1290 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1291
1292 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1293 }
1294 *ret = 1;
1295 break;
1296 }
1297 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1298 {
1299 m->owner = new_owner;
1300 err = scm_cons (scm_abandoned_mutex_error_key,
1301 scm_from_locale_string ("lock obtained on abandoned "
1302 "mutex"));
1303 *ret = 1;
1304 break;
1305 }
1306 else if (scm_is_eq (m->owner, new_owner))
1307 {
1308 if (m->recursive)
1309 {
1310 m->level++;
1311 *ret = 1;
1312 }
1313 else
1314 {
1315 err = scm_cons (scm_misc_error_key,
1316 scm_from_locale_string ("mutex already locked "
1317 "by thread"));
1318 *ret = 0;
1319 }
1320 break;
1321 }
1322 else
1323 {
1324 if (timeout != NULL)
1325 {
1326 gettimeofday (&current_time, NULL);
1327 if (current_time.tv_sec > timeout->tv_sec ||
1328 (current_time.tv_sec == timeout->tv_sec &&
1329 current_time.tv_usec * 1000 > timeout->tv_nsec))
1330 {
1331 *ret = 0;
1332 break;
1333 }
1334 }
1335 block_self (m->waiting, mutex, &m->lock, timeout);
1336 scm_i_pthread_mutex_unlock (&m->lock);
1337 SCM_TICK;
1338 scm_i_scm_pthread_mutex_lock (&m->lock);
1339 }
1340 }
1341 scm_i_pthread_mutex_unlock (&m->lock);
1342 return err;
1343 }
1344
1345 SCM scm_lock_mutex (SCM mx)
1346 {
1347 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1348 }
1349
1350 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1351 (SCM m, SCM timeout, SCM owner),
1352 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1353 "blocks until the mutex becomes available. The function returns when "
1354 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1355 "a thread already owns will succeed right away and will not block the "
1356 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1357 #define FUNC_NAME s_scm_lock_mutex_timed
1358 {
1359 SCM exception;
1360 int ret = 0;
1361 scm_t_timespec cwaittime, *waittime = NULL;
1362
1363 SCM_VALIDATE_MUTEX (1, m);
1364
1365 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1366 {
1367 to_timespec (timeout, &cwaittime);
1368 waittime = &cwaittime;
1369 }
1370
1371 exception = fat_mutex_lock (m, waittime, owner, &ret);
1372 if (!scm_is_false (exception))
1373 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1374 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1375 }
1376 #undef FUNC_NAME
1377
1378 void
1379 scm_dynwind_lock_mutex (SCM mutex)
1380 {
1381 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1382 SCM_F_WIND_EXPLICITLY);
1383 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1384 SCM_F_WIND_EXPLICITLY);
1385 }
1386
1387 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1388 (SCM mutex),
1389 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1390 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1391 #define FUNC_NAME s_scm_try_mutex
1392 {
1393 SCM exception;
1394 int ret = 0;
1395 scm_t_timespec cwaittime, *waittime = NULL;
1396
1397 SCM_VALIDATE_MUTEX (1, mutex);
1398
1399 to_timespec (scm_from_int(0), &cwaittime);
1400 waittime = &cwaittime;
1401
1402 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1403 if (!scm_is_false (exception))
1404 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1405 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1406 }
1407 #undef FUNC_NAME
1408
1409 /*** Fat condition variables */
1410
1411 typedef struct {
1412 scm_i_pthread_mutex_t lock;
1413 SCM waiting; /* the threads waiting for this condition. */
1414 } fat_cond;
1415
1416 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1417 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1418
1419 static int
1420 fat_mutex_unlock (SCM mutex, SCM cond,
1421 const scm_t_timespec *waittime, int relock)
1422 {
1423 SCM owner;
1424 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1425 fat_cond *c = NULL;
1426 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1427 int err = 0, ret = 0;
1428
1429 scm_i_scm_pthread_mutex_lock (&m->lock);
1430
1431 owner = m->owner;
1432
1433 if (!scm_is_eq (owner, t->handle))
1434 {
1435 if (m->level == 0)
1436 {
1437 if (!m->unchecked_unlock)
1438 {
1439 scm_i_pthread_mutex_unlock (&m->lock);
1440 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1441 }
1442 owner = t->handle;
1443 }
1444 else if (!m->allow_external_unlock)
1445 {
1446 scm_i_pthread_mutex_unlock (&m->lock);
1447 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1448 }
1449 }
1450
1451 if (! (SCM_UNBNDP (cond)))
1452 {
1453 c = SCM_CONDVAR_DATA (cond);
1454 while (1)
1455 {
1456 int brk = 0;
1457
1458 if (m->level > 0)
1459 m->level--;
1460 if (m->level == 0)
1461 {
1462 /* Change the owner of MUTEX. */
1463 t->mutexes = scm_delq_x (mutex, t->mutexes);
1464 m->owner = unblock_from_queue (m->waiting);
1465 }
1466
1467 t->block_asyncs++;
1468
1469 err = block_self (c->waiting, cond, &m->lock, waittime);
1470 scm_i_pthread_mutex_unlock (&m->lock);
1471
1472 if (err == 0)
1473 {
1474 ret = 1;
1475 brk = 1;
1476 }
1477 else if (err == ETIMEDOUT)
1478 {
1479 ret = 0;
1480 brk = 1;
1481 }
1482 else if (err != EINTR)
1483 {
1484 errno = err;
1485 scm_syserror (NULL);
1486 }
1487
1488 if (brk)
1489 {
1490 if (relock)
1491 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1492 t->block_asyncs--;
1493 break;
1494 }
1495
1496 t->block_asyncs--;
1497 scm_async_click ();
1498
1499 scm_remember_upto_here_2 (cond, mutex);
1500
1501 scm_i_scm_pthread_mutex_lock (&m->lock);
1502 }
1503 }
1504 else
1505 {
1506 if (m->level > 0)
1507 m->level--;
1508 if (m->level == 0)
1509 {
1510 /* Change the owner of MUTEX. */
1511 t->mutexes = scm_delq_x (mutex, t->mutexes);
1512 m->owner = unblock_from_queue (m->waiting);
1513 }
1514
1515 scm_i_pthread_mutex_unlock (&m->lock);
1516 ret = 1;
1517 }
1518
1519 return ret;
1520 }
1521
1522 SCM scm_unlock_mutex (SCM mx)
1523 {
1524 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1525 }
1526
1527 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1528 (SCM mx, SCM cond, SCM timeout),
1529 "Unlocks @var{mutex} if the calling thread owns the lock on "
1530 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1531 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1532 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1533 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1534 "with a call to @code{unlock-mutex}. Only the last call to "
1535 "@code{unlock-mutex} will actually unlock the mutex. ")
1536 #define FUNC_NAME s_scm_unlock_mutex_timed
1537 {
1538 scm_t_timespec cwaittime, *waittime = NULL;
1539
1540 SCM_VALIDATE_MUTEX (1, mx);
1541 if (! (SCM_UNBNDP (cond)))
1542 {
1543 SCM_VALIDATE_CONDVAR (2, cond);
1544
1545 if (! (SCM_UNBNDP (timeout)))
1546 {
1547 to_timespec (timeout, &cwaittime);
1548 waittime = &cwaittime;
1549 }
1550 }
1551
1552 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1553 }
1554 #undef FUNC_NAME
1555
1556 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1557 (SCM obj),
1558 "Return @code{#t} if @var{obj} is a mutex.")
1559 #define FUNC_NAME s_scm_mutex_p
1560 {
1561 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1562 }
1563 #undef FUNC_NAME
1564
1565 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1566 (SCM mx),
1567 "Return the thread owning @var{mx}, or @code{#f}.")
1568 #define FUNC_NAME s_scm_mutex_owner
1569 {
1570 SCM owner;
1571 fat_mutex *m = NULL;
1572
1573 SCM_VALIDATE_MUTEX (1, mx);
1574 m = SCM_MUTEX_DATA (mx);
1575 scm_i_pthread_mutex_lock (&m->lock);
1576 owner = m->owner;
1577 scm_i_pthread_mutex_unlock (&m->lock);
1578
1579 return owner;
1580 }
1581 #undef FUNC_NAME
1582
1583 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1584 (SCM mx),
1585 "Return the lock level of mutex @var{mx}.")
1586 #define FUNC_NAME s_scm_mutex_level
1587 {
1588 SCM_VALIDATE_MUTEX (1, mx);
1589 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1590 }
1591 #undef FUNC_NAME
1592
1593 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1594 (SCM mx),
1595 "Returns @code{#t} if the mutex @var{mx} is locked.")
1596 #define FUNC_NAME s_scm_mutex_locked_p
1597 {
1598 SCM_VALIDATE_MUTEX (1, mx);
1599 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1600 }
1601 #undef FUNC_NAME
1602
1603 static int
1604 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1605 {
1606 fat_cond *c = SCM_CONDVAR_DATA (cv);
1607 scm_puts ("#<condition-variable ", port);
1608 scm_uintprint ((scm_t_bits)c, 16, port);
1609 scm_puts (">", port);
1610 return 1;
1611 }
1612
1613 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1614 (void),
1615 "Make a new condition variable.")
1616 #define FUNC_NAME s_scm_make_condition_variable
1617 {
1618 fat_cond *c;
1619 SCM cv;
1620
1621 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1622 c->waiting = SCM_EOL;
1623 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1624 c->waiting = make_queue ();
1625 return cv;
1626 }
1627 #undef FUNC_NAME
1628
1629 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1630 (SCM cv, SCM mx, SCM t),
1631 "Wait until @var{cond-var} has been signalled. While waiting, "
1632 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1633 "is locked again when this function returns. When @var{time} is given, "
1634 "it specifies a point in time where the waiting should be aborted. It "
1635 "can be either a integer as returned by @code{current-time} or a pair "
1636 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1637 "mutex is locked and @code{#f} is returned. When the condition "
1638 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1639 "is returned. ")
1640 #define FUNC_NAME s_scm_timed_wait_condition_variable
1641 {
1642 scm_t_timespec waittime, *waitptr = NULL;
1643
1644 SCM_VALIDATE_CONDVAR (1, cv);
1645 SCM_VALIDATE_MUTEX (2, mx);
1646
1647 if (!SCM_UNBNDP (t))
1648 {
1649 to_timespec (t, &waittime);
1650 waitptr = &waittime;
1651 }
1652
1653 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1654 }
1655 #undef FUNC_NAME
1656
1657 static void
1658 fat_cond_signal (fat_cond *c)
1659 {
1660 unblock_from_queue (c->waiting);
1661 }
1662
1663 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1664 (SCM cv),
1665 "Wake up one thread that is waiting for @var{cv}")
1666 #define FUNC_NAME s_scm_signal_condition_variable
1667 {
1668 SCM_VALIDATE_CONDVAR (1, cv);
1669 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1670 return SCM_BOOL_T;
1671 }
1672 #undef FUNC_NAME
1673
1674 static void
1675 fat_cond_broadcast (fat_cond *c)
1676 {
1677 while (scm_is_true (unblock_from_queue (c->waiting)))
1678 ;
1679 }
1680
1681 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1682 (SCM cv),
1683 "Wake up all threads that are waiting for @var{cv}. ")
1684 #define FUNC_NAME s_scm_broadcast_condition_variable
1685 {
1686 SCM_VALIDATE_CONDVAR (1, cv);
1687 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1688 return SCM_BOOL_T;
1689 }
1690 #undef FUNC_NAME
1691
1692 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1693 (SCM obj),
1694 "Return @code{#t} if @var{obj} is a condition variable.")
1695 #define FUNC_NAME s_scm_condition_variable_p
1696 {
1697 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1698 }
1699 #undef FUNC_NAME
1700
1701
1702 \f
1703 /*** Select */
1704
1705 struct select_args
1706 {
1707 int nfds;
1708 SELECT_TYPE *read_fds;
1709 SELECT_TYPE *write_fds;
1710 SELECT_TYPE *except_fds;
1711 struct timeval *timeout;
1712
1713 int result;
1714 int errno_value;
1715 };
1716
1717 static void *
1718 do_std_select (void *args)
1719 {
1720 struct select_args *select_args;
1721
1722 select_args = (struct select_args *) args;
1723
1724 select_args->result =
1725 select (select_args->nfds,
1726 select_args->read_fds, select_args->write_fds,
1727 select_args->except_fds, select_args->timeout);
1728 select_args->errno_value = errno;
1729
1730 return NULL;
1731 }
1732
1733 int
1734 scm_std_select (int nfds,
1735 SELECT_TYPE *readfds,
1736 SELECT_TYPE *writefds,
1737 SELECT_TYPE *exceptfds,
1738 struct timeval *timeout)
1739 {
1740 fd_set my_readfds;
1741 int res, eno, wakeup_fd;
1742 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1743 struct select_args args;
1744
1745 if (readfds == NULL)
1746 {
1747 FD_ZERO (&my_readfds);
1748 readfds = &my_readfds;
1749 }
1750
1751 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1752 SCM_TICK;
1753
1754 wakeup_fd = t->sleep_pipe[0];
1755 FD_SET (wakeup_fd, readfds);
1756 if (wakeup_fd >= nfds)
1757 nfds = wakeup_fd+1;
1758
1759 args.nfds = nfds;
1760 args.read_fds = readfds;
1761 args.write_fds = writefds;
1762 args.except_fds = exceptfds;
1763 args.timeout = timeout;
1764
1765 /* Explicitly cooperate with the GC. */
1766 scm_without_guile (do_std_select, &args);
1767
1768 res = args.result;
1769 eno = args.errno_value;
1770
1771 t->sleep_fd = -1;
1772 scm_i_reset_sleep (t);
1773
1774 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1775 {
1776 char dummy;
1777 full_read (wakeup_fd, &dummy, 1);
1778
1779 FD_CLR (wakeup_fd, readfds);
1780 res -= 1;
1781 if (res == 0)
1782 {
1783 eno = EINTR;
1784 res = -1;
1785 }
1786 }
1787 errno = eno;
1788 return res;
1789 }
1790
1791 /* Convenience API for blocking while in guile mode. */
1792
1793 #if SCM_USE_PTHREAD_THREADS
1794
1795 /* It seems reasonable to not run procedures related to mutex and condition
1796 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1797 without it, and (ii) the only potential gain would be GC latency. See
1798 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1799 for a discussion of the pros and cons. */
1800
1801 int
1802 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1803 {
1804 int res = scm_i_pthread_mutex_lock (mutex);
1805 return res;
1806 }
1807
1808 static void
1809 do_unlock (void *data)
1810 {
1811 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1812 }
1813
1814 void
1815 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1816 {
1817 scm_i_scm_pthread_mutex_lock (mutex);
1818 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1819 }
1820
1821 int
1822 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1823 {
1824 int res;
1825 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1826
1827 t->held_mutex = mutex;
1828 res = scm_i_pthread_cond_wait (cond, mutex);
1829 t->held_mutex = NULL;
1830
1831 return res;
1832 }
1833
1834 int
1835 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1836 scm_i_pthread_mutex_t *mutex,
1837 const scm_t_timespec *wt)
1838 {
1839 int res;
1840 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1841
1842 t->held_mutex = mutex;
1843 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1844 t->held_mutex = NULL;
1845
1846 return res;
1847 }
1848
1849 #endif
1850
1851 unsigned long
1852 scm_std_usleep (unsigned long usecs)
1853 {
1854 struct timeval tv;
1855 tv.tv_usec = usecs % 1000000;
1856 tv.tv_sec = usecs / 1000000;
1857 scm_std_select (0, NULL, NULL, NULL, &tv);
1858 return tv.tv_sec * 1000000 + tv.tv_usec;
1859 }
1860
1861 unsigned int
1862 scm_std_sleep (unsigned int secs)
1863 {
1864 struct timeval tv;
1865 tv.tv_usec = 0;
1866 tv.tv_sec = secs;
1867 scm_std_select (0, NULL, NULL, NULL, &tv);
1868 return tv.tv_sec;
1869 }
1870
1871 /*** Misc */
1872
1873 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1874 (void),
1875 "Return the thread that called this function.")
1876 #define FUNC_NAME s_scm_current_thread
1877 {
1878 return SCM_I_CURRENT_THREAD->handle;
1879 }
1880 #undef FUNC_NAME
1881
1882 static SCM
1883 scm_c_make_list (size_t n, SCM fill)
1884 {
1885 SCM res = SCM_EOL;
1886 while (n-- > 0)
1887 res = scm_cons (fill, res);
1888 return res;
1889 }
1890
1891 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1892 (void),
1893 "Return a list of all threads.")
1894 #define FUNC_NAME s_scm_all_threads
1895 {
1896 /* We can not allocate while holding the thread_admin_mutex because
1897 of the way GC is done.
1898 */
1899 int n = thread_count;
1900 scm_i_thread *t;
1901 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1902
1903 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1904 l = &list;
1905 for (t = all_threads; t && n > 0; t = t->next_thread)
1906 {
1907 if (t != scm_i_signal_delivery_thread)
1908 {
1909 SCM_SETCAR (*l, t->handle);
1910 l = SCM_CDRLOC (*l);
1911 }
1912 n--;
1913 }
1914 *l = SCM_EOL;
1915 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1916 return list;
1917 }
1918 #undef FUNC_NAME
1919
1920 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1921 (SCM thread),
1922 "Return @code{#t} iff @var{thread} has exited.\n")
1923 #define FUNC_NAME s_scm_thread_exited_p
1924 {
1925 return scm_from_bool (scm_c_thread_exited_p (thread));
1926 }
1927 #undef FUNC_NAME
1928
1929 int
1930 scm_c_thread_exited_p (SCM thread)
1931 #define FUNC_NAME s_scm_thread_exited_p
1932 {
1933 scm_i_thread *t;
1934 SCM_VALIDATE_THREAD (1, thread);
1935 t = SCM_I_THREAD_DATA (thread);
1936 return t->exited;
1937 }
1938 #undef FUNC_NAME
1939
1940 static scm_i_pthread_cond_t wake_up_cond;
1941 static int threads_initialized_p = 0;
1942
1943
1944 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1945 */
1946 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1947
1948 static SCM dynwind_critical_section_mutex;
1949
1950 void
1951 scm_dynwind_critical_section (SCM mutex)
1952 {
1953 if (scm_is_false (mutex))
1954 mutex = dynwind_critical_section_mutex;
1955 scm_dynwind_lock_mutex (mutex);
1956 scm_dynwind_block_asyncs ();
1957 }
1958
1959 /*** Initialization */
1960
1961 scm_i_pthread_mutex_t scm_i_misc_mutex;
1962
1963 #if SCM_USE_PTHREAD_THREADS
1964 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1965 #endif
1966
1967 void
1968 scm_threads_prehistory (SCM_STACKITEM *base)
1969 {
1970 #if SCM_USE_PTHREAD_THREADS
1971 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1972 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1973 PTHREAD_MUTEX_RECURSIVE);
1974 #endif
1975
1976 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1977 scm_i_pthread_mutexattr_recursive);
1978 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1979 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1980
1981 guilify_self_1 (base);
1982 }
1983
1984 scm_t_bits scm_tc16_thread;
1985 scm_t_bits scm_tc16_mutex;
1986 scm_t_bits scm_tc16_condvar;
1987
1988 void
1989 scm_init_threads ()
1990 {
1991 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1992 scm_set_smob_print (scm_tc16_thread, thread_print);
1993
1994 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1995 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1996 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1997
1998 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1999 sizeof (fat_cond));
2000 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2001
2002 scm_i_default_dynamic_state = SCM_BOOL_F;
2003 guilify_self_2 (SCM_BOOL_F);
2004 threads_initialized_p = 1;
2005
2006 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2007 }
2008
2009 void
2010 scm_init_threads_default_dynamic_state ()
2011 {
2012 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2013 scm_i_default_dynamic_state = state;
2014 }
2015
2016 void
2017 scm_init_thread_procs ()
2018 {
2019 #include "libguile/threads.x"
2020 }
2021
2022 \f
2023 /* IA64-specific things. */
2024
2025 #ifdef __ia64__
2026 # ifdef __hpux
2027 # include <sys/param.h>
2028 # include <sys/pstat.h>
2029 void *
2030 scm_ia64_register_backing_store_base (void)
2031 {
2032 struct pst_vm_status vm_status;
2033 int i = 0;
2034 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2035 if (vm_status.pst_type == PS_RSESTACK)
2036 return (void *) vm_status.pst_vaddr;
2037 abort ();
2038 }
2039 void *
2040 scm_ia64_ar_bsp (const void *ctx)
2041 {
2042 uint64_t bsp;
2043 __uc_get_ar_bsp (ctx, &bsp);
2044 return (void *) bsp;
2045 }
2046 # endif /* hpux */
2047 # ifdef linux
2048 # include <ucontext.h>
2049 void *
2050 scm_ia64_register_backing_store_base (void)
2051 {
2052 extern void *__libc_ia64_register_backing_store_base;
2053 return __libc_ia64_register_backing_store_base;
2054 }
2055 void *
2056 scm_ia64_ar_bsp (const void *opaque)
2057 {
2058 const ucontext_t *ctx = opaque;
2059 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2060 }
2061 # endif /* linux */
2062 #endif /* __ia64__ */
2063
2064
2065 /*
2066 Local Variables:
2067 c-file-style: "gnu"
2068 End:
2069 */