Use TLS when available for `SCM_I_CURRENT_THREAD'.
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32 #include <assert.h>
33
34 #ifdef HAVE_STRING_H
35 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
36 #endif
37
38 #if HAVE_SYS_TIME_H
39 #include <sys/time.h>
40 #endif
41
42 #include "libguile/validate.h"
43 #include "libguile/root.h"
44 #include "libguile/eval.h"
45 #include "libguile/async.h"
46 #include "libguile/ports.h"
47 #include "libguile/threads.h"
48 #include "libguile/dynwind.h"
49 #include "libguile/iselect.h"
50 #include "libguile/fluids.h"
51 #include "libguile/continuations.h"
52 #include "libguile/gc.h"
53 #include "libguile/init.h"
54 #include "libguile/scmsigs.h"
55 #include "libguile/strings.h"
56 #include "libguile/weaks.h"
57
58 #ifdef __MINGW32__
59 #ifndef ETIMEDOUT
60 # define ETIMEDOUT WSAETIMEDOUT
61 #endif
62 # include <fcntl.h>
63 # include <process.h>
64 # define pipe(fd) _pipe (fd, 256, O_BINARY)
65 #endif /* __MINGW32__ */
66
67 #include <full-read.h>
68
69 \f
70 static void
71 to_timespec (SCM t, scm_t_timespec *waittime)
72 {
73 if (scm_is_pair (t))
74 {
75 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
76 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
77 }
78 else
79 {
80 double time = scm_to_double (t);
81 double sec = scm_c_truncate (time);
82
83 waittime->tv_sec = (long) sec;
84 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
85 }
86 }
87
88 \f
89 /*** Queues */
90
91 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
92 the risk of false references leading to unbounded retained space as
93 described in "Bounding Space Usage of Conservative Garbage Collectors",
94 H.J. Boehm, 2001. */
95
96 /* Make an empty queue data structure.
97 */
98 static SCM
99 make_queue ()
100 {
101 return scm_cons (SCM_EOL, SCM_EOL);
102 }
103
104 /* Put T at the back of Q and return a handle that can be used with
105 remqueue to remove T from Q again.
106 */
107 static SCM
108 enqueue (SCM q, SCM t)
109 {
110 SCM c = scm_cons (t, SCM_EOL);
111 SCM_CRITICAL_SECTION_START;
112 if (scm_is_null (SCM_CDR (q)))
113 SCM_SETCDR (q, c);
114 else
115 SCM_SETCDR (SCM_CAR (q), c);
116 SCM_SETCAR (q, c);
117 SCM_CRITICAL_SECTION_END;
118 return c;
119 }
120
121 /* Remove the element that the handle C refers to from the queue Q. C
122 must have been returned from a call to enqueue. The return value
123 is zero when the element referred to by C has already been removed.
124 Otherwise, 1 is returned.
125 */
126 static int
127 remqueue (SCM q, SCM c)
128 {
129 SCM p, prev = q;
130 SCM_CRITICAL_SECTION_START;
131 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
132 {
133 if (scm_is_eq (p, c))
134 {
135 if (scm_is_eq (c, SCM_CAR (q)))
136 SCM_SETCAR (q, SCM_CDR (c));
137 SCM_SETCDR (prev, SCM_CDR (c));
138
139 /* GC-robust */
140 SCM_SETCDR (c, SCM_EOL);
141
142 SCM_CRITICAL_SECTION_END;
143 return 1;
144 }
145 prev = p;
146 }
147 SCM_CRITICAL_SECTION_END;
148 return 0;
149 }
150
151 /* Remove the front-most element from the queue Q and return it.
152 Return SCM_BOOL_F when Q is empty.
153 */
154 static SCM
155 dequeue (SCM q)
156 {
157 SCM c;
158 SCM_CRITICAL_SECTION_START;
159 c = SCM_CDR (q);
160 if (scm_is_null (c))
161 {
162 SCM_CRITICAL_SECTION_END;
163 return SCM_BOOL_F;
164 }
165 else
166 {
167 SCM_SETCDR (q, SCM_CDR (c));
168 if (scm_is_null (SCM_CDR (q)))
169 SCM_SETCAR (q, SCM_EOL);
170 SCM_CRITICAL_SECTION_END;
171
172 /* GC-robust */
173 SCM_SETCDR (c, SCM_EOL);
174
175 return SCM_CAR (c);
176 }
177 }
178
179 /*** Thread smob routines */
180
181
182 static int
183 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
184 {
185 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
186 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
187 the struct case, hence we go via a union, and extract according to the
188 size of pthread_t. */
189 union {
190 scm_i_pthread_t p;
191 unsigned short us;
192 unsigned int ui;
193 unsigned long ul;
194 scm_t_uintmax um;
195 } u;
196 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
197 scm_i_pthread_t p = t->pthread;
198 scm_t_uintmax id;
199 u.p = p;
200 if (sizeof (p) == sizeof (unsigned short))
201 id = u.us;
202 else if (sizeof (p) == sizeof (unsigned int))
203 id = u.ui;
204 else if (sizeof (p) == sizeof (unsigned long))
205 id = u.ul;
206 else
207 id = u.um;
208
209 scm_puts ("#<thread ", port);
210 scm_uintprint (id, 10, port);
211 scm_puts (" (", port);
212 scm_uintprint ((scm_t_bits)t, 16, port);
213 scm_puts (")>", port);
214 return 1;
215 }
216
217 \f
218 /*** Blocking on queues. */
219
220 /* See also scm_i_queue_async_cell for how such a block is
221 interrputed.
222 */
223
224 /* Put the current thread on QUEUE and go to sleep, waiting for it to
225 be woken up by a call to 'unblock_from_queue', or to be
226 interrupted. Upon return of this function, the current thread is
227 no longer on QUEUE, even when the sleep has been interrupted.
228
229 The caller of block_self must hold MUTEX. It will be atomically
230 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
231
232 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
233 as MUTEX is needed.
234
235 When WAITTIME is not NULL, the sleep will be aborted at that time.
236
237 The return value of block_self is an errno value. It will be zero
238 when the sleep has been successfully completed by a call to
239 unblock_from_queue, EINTR when it has been interrupted by the
240 delivery of a system async, and ETIMEDOUT when the timeout has
241 expired.
242
243 The system asyncs themselves are not executed by block_self.
244 */
245 static int
246 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
247 const scm_t_timespec *waittime)
248 {
249 scm_i_thread *t = SCM_I_CURRENT_THREAD;
250 SCM q_handle;
251 int err;
252
253 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
254 err = EINTR;
255 else
256 {
257 t->block_asyncs++;
258 q_handle = enqueue (queue, t->handle);
259 if (waittime == NULL)
260 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
261 else
262 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
263
264 /* When we are still on QUEUE, we have been interrupted. We
265 report this only when no other error (such as a timeout) has
266 happened above.
267 */
268 if (remqueue (queue, q_handle) && err == 0)
269 err = EINTR;
270 t->block_asyncs--;
271 scm_i_reset_sleep (t);
272 }
273
274 return err;
275 }
276
277 /* Wake up the first thread on QUEUE, if any. The awoken thread is
278 returned, or #f if the queue was empty.
279 */
280 static SCM
281 unblock_from_queue (SCM queue)
282 {
283 SCM thread = dequeue (queue);
284 if (scm_is_true (thread))
285 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
286 return thread;
287 }
288
289 \f
290 /* Getting into and out of guile mode.
291 */
292
293 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
294
295 /* When thread-local storage (TLS) is available, a pointer to the
296 current-thread object is kept in TLS. Note that storing the thread-object
297 itself in TLS (rather than a pointer to some malloc'd memory) is not
298 possible since thread objects may live longer than the actual thread they
299 represent. */
300 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
301
302 # define SET_CURRENT_THREAD(_t) scm_i_current_thread = (_t)
303
304 #else /* !SCM_HAVE_THREAD_STORAGE_CLASS */
305
306 /* Key used to retrieve the current thread with `pthread_getspecific ()'. */
307 scm_i_pthread_key_t scm_i_thread_key;
308
309 # define SET_CURRENT_THREAD(_t) \
310 scm_i_pthread_setspecific (scm_i_thread_key, (_t))
311
312 #endif /* !SCM_HAVE_THREAD_STORAGE_CLASS */
313
314
315 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
316 static scm_i_thread *all_threads = NULL;
317 static int thread_count;
318
319 static SCM scm_i_default_dynamic_state;
320
321 /* Perform first stage of thread initialisation, in non-guile mode.
322 */
323 static void
324 guilify_self_1 (SCM_STACKITEM *base)
325 {
326 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
327
328 t->pthread = scm_i_pthread_self ();
329 t->handle = SCM_BOOL_F;
330 t->result = SCM_BOOL_F;
331 t->cleanup_handler = SCM_BOOL_F;
332 t->mutexes = SCM_EOL;
333 t->held_mutex = NULL;
334 t->join_queue = SCM_EOL;
335 t->dynamic_state = SCM_BOOL_F;
336 t->dynwinds = SCM_EOL;
337 t->active_asyncs = SCM_EOL;
338 t->block_asyncs = 1;
339 t->pending_asyncs = 1;
340 t->critical_section_level = 0;
341 t->last_debug_frame = NULL;
342 t->base = base;
343 #ifdef __ia64__
344 /* Calculate and store off the base of this thread's register
345 backing store (RBS). Unfortunately our implementation(s) of
346 scm_ia64_register_backing_store_base are only reliable for the
347 main thread. For other threads, therefore, find out the current
348 top of the RBS, and use that as a maximum. */
349 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
350 {
351 ucontext_t ctx;
352 void *bsp;
353 getcontext (&ctx);
354 bsp = scm_ia64_ar_bsp (&ctx);
355 if (t->register_backing_store_base > bsp)
356 t->register_backing_store_base = bsp;
357 }
358 #endif
359 t->continuation_root = SCM_EOL;
360 t->continuation_base = base;
361 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
362 t->sleep_mutex = NULL;
363 t->sleep_object = SCM_BOOL_F;
364 t->sleep_fd = -1;
365
366 if (pipe (t->sleep_pipe) != 0)
367 /* FIXME: Error conditions during the initialization phase are handled
368 gracelessly since public functions such as `scm_init_guile ()'
369 currently have type `void'. */
370 abort ();
371
372 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
373 t->current_mark_stack_ptr = NULL;
374 t->current_mark_stack_limit = NULL;
375 t->canceled = 0;
376 t->exited = 0;
377 t->guile_mode = 0;
378
379 SET_CURRENT_THREAD (t);
380
381 scm_i_pthread_mutex_lock (&thread_admin_mutex);
382 t->next_thread = all_threads;
383 all_threads = t;
384 thread_count++;
385 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
386 }
387
388 /* Perform second stage of thread initialisation, in guile mode.
389 */
390 static void
391 guilify_self_2 (SCM parent)
392 {
393 scm_i_thread *t = SCM_I_CURRENT_THREAD;
394
395 t->guile_mode = 1;
396
397 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
398
399 t->continuation_root = scm_cons (t->handle, SCM_EOL);
400 t->continuation_base = t->base;
401 t->vm = SCM_BOOL_F;
402
403 if (scm_is_true (parent))
404 t->dynamic_state = scm_make_dynamic_state (parent);
405 else
406 t->dynamic_state = scm_i_make_initial_dynamic_state ();
407
408 t->join_queue = make_queue ();
409 t->block_asyncs = 0;
410 }
411
412 \f
413 /*** Fat mutexes */
414
415 /* We implement our own mutex type since we want them to be 'fair', we
416 want to do fancy things while waiting for them (like running
417 asyncs) and we might want to add things that are nice for
418 debugging.
419 */
420
421 typedef struct {
422 scm_i_pthread_mutex_t lock;
423 SCM owner;
424 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
425
426 int recursive; /* allow recursive locking? */
427 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
428 int allow_external_unlock; /* is it an error to unlock a mutex that is not
429 owned by the current thread? */
430
431 SCM waiting; /* the threads waiting for this mutex. */
432 } fat_mutex;
433
434 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
435 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
436
437 /* Perform thread tear-down, in guile mode.
438 */
439 static void *
440 do_thread_exit (void *v)
441 {
442 scm_i_thread *t = (scm_i_thread *) v;
443
444 if (!scm_is_false (t->cleanup_handler))
445 {
446 SCM ptr = t->cleanup_handler;
447
448 t->cleanup_handler = SCM_BOOL_F;
449 t->result = scm_internal_catch (SCM_BOOL_T,
450 (scm_t_catch_body) scm_call_0, ptr,
451 scm_handle_by_message_noexit, NULL);
452 }
453
454 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
455
456 t->exited = 1;
457 close (t->sleep_pipe[0]);
458 close (t->sleep_pipe[1]);
459 while (scm_is_true (unblock_from_queue (t->join_queue)))
460 ;
461
462 while (!scm_is_null (t->mutexes))
463 {
464 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
465
466 if (!SCM_UNBNDP (mutex))
467 {
468 fat_mutex *m = SCM_MUTEX_DATA (mutex);
469
470 scm_i_pthread_mutex_lock (&m->lock);
471 unblock_from_queue (m->waiting);
472 scm_i_pthread_mutex_unlock (&m->lock);
473 }
474
475 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
476 }
477
478 scm_i_pthread_mutex_unlock (&t->admin_mutex);
479
480 return NULL;
481 }
482
483 static void
484 on_thread_exit (void *v)
485 {
486 /* This handler is executed in non-guile mode. */
487 scm_i_thread *t = (scm_i_thread *) v, **tp;
488
489 /* If this thread was cancelled while doing a cond wait, it will
490 still have a mutex locked, so we unlock it here. */
491 if (t->held_mutex)
492 {
493 scm_i_pthread_mutex_unlock (t->held_mutex);
494 t->held_mutex = NULL;
495 }
496
497 SET_CURRENT_THREAD (v);
498
499 /* Ensure the signal handling thread has been launched, because we might be
500 shutting it down. */
501 scm_i_ensure_signal_delivery_thread ();
502
503 /* Unblocking the joining threads needs to happen in guile mode
504 since the queue is a SCM data structure. */
505
506 /* Note: Since `do_thread_exit ()' uses allocates memory via `libgc', we
507 assume the GC is usable at this point, and notably that thread-local
508 storage (TLS) hasn't been deallocated yet. */
509 do_thread_exit (v);
510
511 /* Removing ourself from the list of all threads needs to happen in
512 non-guile mode since all SCM values on our stack become
513 unprotected once we are no longer in the list. */
514 scm_i_pthread_mutex_lock (&thread_admin_mutex);
515 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
516 if (*tp == t)
517 {
518 *tp = t->next_thread;
519
520 /* GC-robust */
521 t->next_thread = NULL;
522
523 break;
524 }
525 thread_count--;
526
527 /* If there's only one other thread, it could be the signal delivery
528 thread, so we need to notify it to shut down by closing its read pipe.
529 If it's not the signal delivery thread, then closing the read pipe isn't
530 going to hurt. */
531 if (thread_count <= 1)
532 scm_i_close_signal_pipe ();
533
534 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
535
536 SET_CURRENT_THREAD (NULL);
537 }
538
539 #ifndef SCM_HAVE_THREAD_STORAGE_CLASS
540
541 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
542
543 static void
544 init_thread_key (void)
545 {
546 scm_i_pthread_key_create (&scm_i_thread_key, NULL);
547 }
548
549 #endif
550
551 /* Perform any initializations necessary to bring the current thread
552 into guile mode, initializing Guile itself, if necessary.
553
554 BASE is the stack base to use with GC.
555
556 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
557 which case the default dynamic state is used.
558
559 Return zero when the thread was in guile mode already; otherwise
560 return 1.
561 */
562
563 static int
564 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
565 {
566 scm_i_thread *t;
567
568 #ifndef SCM_HAVE_THREAD_STORAGE_CLASS
569 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
570 #endif
571
572 t = SCM_I_CURRENT_THREAD;
573 if (t == NULL)
574 {
575 /* This thread has not been guilified yet.
576 */
577
578 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
579 if (scm_initialized_p == 0)
580 {
581 /* First thread ever to enter Guile. Run the full
582 initialization.
583 */
584 scm_i_init_guile (base);
585 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
586 }
587 else
588 {
589 /* Guile is already initialized, but this thread enters it for
590 the first time. Only initialize this thread.
591 */
592 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
593 guilify_self_1 (base);
594 guilify_self_2 (parent);
595 }
596 return 1;
597 }
598 else if (t->top)
599 {
600 /* This thread is already guilified but not in guile mode, just
601 resume it.
602
603 A user call to scm_with_guile() will lead us to here. This could
604 happen from anywhere on the stack, and in particular lower on the
605 stack than when it was when this thread was first guilified. Thus,
606 `base' must be updated. */
607 #if SCM_STACK_GROWS_UP
608 if (base < t->base)
609 t->base = base;
610 #else
611 if (base > t->base)
612 t->base = base;
613 #endif
614
615 t->top = NULL;
616 return 1;
617 }
618 else
619 {
620 /* Thread is already in guile mode. Nothing to do.
621 */
622 return 0;
623 }
624 }
625
626 #if SCM_USE_PTHREAD_THREADS
627
628 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
629 /* This method for GNU/Linux and perhaps some other systems.
630 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
631 available on them. */
632 #define HAVE_GET_THREAD_STACK_BASE
633
634 static SCM_STACKITEM *
635 get_thread_stack_base ()
636 {
637 pthread_attr_t attr;
638 void *start, *end;
639 size_t size;
640
641 pthread_getattr_np (pthread_self (), &attr);
642 pthread_attr_getstack (&attr, &start, &size);
643 end = (char *)start + size;
644
645 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
646 for the main thread, but we can use scm_get_stack_base in that
647 case.
648 */
649
650 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
651 if ((void *)&attr < start || (void *)&attr >= end)
652 return (SCM_STACKITEM *) GC_stackbottom;
653 else
654 #endif
655 {
656 #if SCM_STACK_GROWS_UP
657 return start;
658 #else
659 return end;
660 #endif
661 }
662 }
663
664 #elif HAVE_PTHREAD_GET_STACKADDR_NP
665 /* This method for MacOS X.
666 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
667 but as of 2006 there's nothing obvious at apple.com. */
668 #define HAVE_GET_THREAD_STACK_BASE
669 static SCM_STACKITEM *
670 get_thread_stack_base ()
671 {
672 return pthread_get_stackaddr_np (pthread_self ());
673 }
674
675 #elif defined (__MINGW32__)
676 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
677 in any thread. We don't like hard-coding the name of a system, but there
678 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
679 work. */
680 #define HAVE_GET_THREAD_STACK_BASE
681 static SCM_STACKITEM *
682 get_thread_stack_base ()
683 {
684 return (SCM_STACKITEM *) GC_stackbottom;
685 }
686
687 #endif /* pthread methods of get_thread_stack_base */
688
689 #else /* !SCM_USE_PTHREAD_THREADS */
690
691 #define HAVE_GET_THREAD_STACK_BASE
692
693 static SCM_STACKITEM *
694 get_thread_stack_base ()
695 {
696 return (SCM_STACKITEM *) GC_stackbottom;
697 }
698
699 #endif /* !SCM_USE_PTHREAD_THREADS */
700
701 #ifdef HAVE_GET_THREAD_STACK_BASE
702
703 void
704 scm_init_guile ()
705 {
706 scm_i_init_thread_for_guile (get_thread_stack_base (),
707 scm_i_default_dynamic_state);
708 }
709
710 #endif
711
712 void *
713 scm_with_guile (void *(*func)(void *), void *data)
714 {
715 return scm_i_with_guile_and_parent (func, data,
716 scm_i_default_dynamic_state);
717 }
718
719 SCM_UNUSED static void
720 scm_leave_guile_cleanup (void *x)
721 {
722 on_thread_exit (SCM_I_CURRENT_THREAD);
723 }
724
725 void *
726 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
727 {
728 void *res;
729 int really_entered;
730 SCM_STACKITEM base_item;
731
732 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
733 if (really_entered)
734 {
735 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
736 res = scm_c_with_continuation_barrier (func, data);
737 scm_i_pthread_cleanup_pop (0);
738 }
739 else
740 res = scm_c_with_continuation_barrier (func, data);
741
742 return res;
743 }
744
745 \f
746 /*** Non-guile mode. */
747
748 #if (defined HAVE_GC_DO_BLOCKING) && (!defined HAVE_DECL_GC_DO_BLOCKING)
749
750 /* This declaration is missing from the public headers of GC 7.1. */
751 extern void GC_do_blocking (void (*) (void *), void *);
752
753 #endif
754
755 #ifdef HAVE_GC_DO_BLOCKING
756 struct without_guile_arg
757 {
758 void * (*function) (void *);
759 void *data;
760 void *result;
761 };
762
763 static void
764 without_guile_trampoline (void *closure)
765 {
766 struct without_guile_arg *arg;
767
768 SCM_I_CURRENT_THREAD->guile_mode = 0;
769
770 arg = (struct without_guile_arg *) closure;
771 arg->result = arg->function (arg->data);
772
773 SCM_I_CURRENT_THREAD->guile_mode = 1;
774 }
775 #endif
776
777 void *
778 scm_without_guile (void *(*func)(void *), void *data)
779 {
780 void *result;
781
782 #ifdef HAVE_GC_DO_BLOCKING
783 if (SCM_I_CURRENT_THREAD->guile_mode)
784 {
785 struct without_guile_arg arg;
786
787 arg.function = func;
788 arg.data = data;
789 GC_do_blocking (without_guile_trampoline, &arg);
790 result = arg.result;
791 }
792 else
793 #endif
794 result = func (data);
795
796 return result;
797 }
798
799 \f
800 /*** Thread creation */
801
802 typedef struct {
803 SCM parent;
804 SCM thunk;
805 SCM handler;
806 SCM thread;
807 scm_i_pthread_mutex_t mutex;
808 scm_i_pthread_cond_t cond;
809 } launch_data;
810
811 static void *
812 really_launch (void *d)
813 {
814 launch_data *data = (launch_data *)d;
815 SCM thunk = data->thunk, handler = data->handler;
816 scm_i_thread *t;
817
818 t = SCM_I_CURRENT_THREAD;
819
820 scm_i_scm_pthread_mutex_lock (&data->mutex);
821 data->thread = scm_current_thread ();
822 scm_i_pthread_cond_signal (&data->cond);
823 scm_i_pthread_mutex_unlock (&data->mutex);
824
825 if (SCM_UNBNDP (handler))
826 t->result = scm_call_0 (thunk);
827 else
828 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
829
830 /* Trigger a call to `on_thread_exit ()'. */
831 pthread_exit (NULL);
832
833 return 0;
834 }
835
836 static void *
837 launch_thread (void *d)
838 {
839 launch_data *data = (launch_data *)d;
840 scm_i_pthread_detach (scm_i_pthread_self ());
841 scm_i_with_guile_and_parent (really_launch, d, data->parent);
842 return NULL;
843 }
844
845 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
846 (SCM thunk, SCM handler),
847 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
848 "returning a new thread object representing the thread. The procedure\n"
849 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
850 "\n"
851 "When @var{handler} is specified, then @var{thunk} is called from\n"
852 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
853 "handler. This catch is established inside the continuation barrier.\n"
854 "\n"
855 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
856 "the @emph{exit value} of the thread and the thread is terminated.")
857 #define FUNC_NAME s_scm_call_with_new_thread
858 {
859 launch_data data;
860 scm_i_pthread_t id;
861 int err;
862
863 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
864 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
865 handler, SCM_ARG2, FUNC_NAME);
866
867 data.parent = scm_current_dynamic_state ();
868 data.thunk = thunk;
869 data.handler = handler;
870 data.thread = SCM_BOOL_F;
871 scm_i_pthread_mutex_init (&data.mutex, NULL);
872 scm_i_pthread_cond_init (&data.cond, NULL);
873
874 scm_i_scm_pthread_mutex_lock (&data.mutex);
875 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
876 if (err)
877 {
878 scm_i_pthread_mutex_unlock (&data.mutex);
879 errno = err;
880 scm_syserror (NULL);
881 }
882 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
883 scm_i_pthread_mutex_unlock (&data.mutex);
884
885 return data.thread;
886 }
887 #undef FUNC_NAME
888
889 typedef struct {
890 SCM parent;
891 scm_t_catch_body body;
892 void *body_data;
893 scm_t_catch_handler handler;
894 void *handler_data;
895 SCM thread;
896 scm_i_pthread_mutex_t mutex;
897 scm_i_pthread_cond_t cond;
898 } spawn_data;
899
900 static void *
901 really_spawn (void *d)
902 {
903 spawn_data *data = (spawn_data *)d;
904 scm_t_catch_body body = data->body;
905 void *body_data = data->body_data;
906 scm_t_catch_handler handler = data->handler;
907 void *handler_data = data->handler_data;
908 scm_i_thread *t = SCM_I_CURRENT_THREAD;
909
910 scm_i_scm_pthread_mutex_lock (&data->mutex);
911 data->thread = scm_current_thread ();
912 scm_i_pthread_cond_signal (&data->cond);
913 scm_i_pthread_mutex_unlock (&data->mutex);
914
915 if (handler == NULL)
916 t->result = body (body_data);
917 else
918 t->result = scm_internal_catch (SCM_BOOL_T,
919 body, body_data,
920 handler, handler_data);
921
922 return 0;
923 }
924
925 static void *
926 spawn_thread (void *d)
927 {
928 spawn_data *data = (spawn_data *)d;
929 scm_i_pthread_detach (scm_i_pthread_self ());
930 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
931 return NULL;
932 }
933
934 SCM
935 scm_spawn_thread (scm_t_catch_body body, void *body_data,
936 scm_t_catch_handler handler, void *handler_data)
937 {
938 spawn_data data;
939 scm_i_pthread_t id;
940 int err;
941
942 data.parent = scm_current_dynamic_state ();
943 data.body = body;
944 data.body_data = body_data;
945 data.handler = handler;
946 data.handler_data = handler_data;
947 data.thread = SCM_BOOL_F;
948 scm_i_pthread_mutex_init (&data.mutex, NULL);
949 scm_i_pthread_cond_init (&data.cond, NULL);
950
951 scm_i_scm_pthread_mutex_lock (&data.mutex);
952 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
953 if (err)
954 {
955 scm_i_pthread_mutex_unlock (&data.mutex);
956 errno = err;
957 scm_syserror (NULL);
958 }
959 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
960 scm_i_pthread_mutex_unlock (&data.mutex);
961
962 return data.thread;
963 }
964
965 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
966 (),
967 "Move the calling thread to the end of the scheduling queue.")
968 #define FUNC_NAME s_scm_yield
969 {
970 return scm_from_bool (scm_i_sched_yield ());
971 }
972 #undef FUNC_NAME
973
974 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
975 (SCM thread),
976 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
977 "cannot be the current thread, and if @var{thread} has already terminated or "
978 "been signaled to terminate, this function is a no-op.")
979 #define FUNC_NAME s_scm_cancel_thread
980 {
981 scm_i_thread *t = NULL;
982
983 SCM_VALIDATE_THREAD (1, thread);
984 t = SCM_I_THREAD_DATA (thread);
985 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
986 if (!t->canceled)
987 {
988 t->canceled = 1;
989 scm_i_pthread_mutex_unlock (&t->admin_mutex);
990 scm_i_pthread_cancel (t->pthread);
991 }
992 else
993 scm_i_pthread_mutex_unlock (&t->admin_mutex);
994
995 return SCM_UNSPECIFIED;
996 }
997 #undef FUNC_NAME
998
999 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1000 (SCM thread, SCM proc),
1001 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1002 "This handler will be called when the thread exits.")
1003 #define FUNC_NAME s_scm_set_thread_cleanup_x
1004 {
1005 scm_i_thread *t;
1006
1007 SCM_VALIDATE_THREAD (1, thread);
1008 if (!scm_is_false (proc))
1009 SCM_VALIDATE_THUNK (2, proc);
1010
1011 t = SCM_I_THREAD_DATA (thread);
1012 scm_i_pthread_mutex_lock (&t->admin_mutex);
1013
1014 if (!(t->exited || t->canceled))
1015 t->cleanup_handler = proc;
1016
1017 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1018
1019 return SCM_UNSPECIFIED;
1020 }
1021 #undef FUNC_NAME
1022
1023 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1024 (SCM thread),
1025 "Return the cleanup handler installed for the thread @var{thread}.")
1026 #define FUNC_NAME s_scm_thread_cleanup
1027 {
1028 scm_i_thread *t;
1029 SCM ret;
1030
1031 SCM_VALIDATE_THREAD (1, thread);
1032
1033 t = SCM_I_THREAD_DATA (thread);
1034 scm_i_pthread_mutex_lock (&t->admin_mutex);
1035 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1036 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1037
1038 return ret;
1039 }
1040 #undef FUNC_NAME
1041
1042 SCM scm_join_thread (SCM thread)
1043 {
1044 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1045 }
1046
1047 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1048 (SCM thread, SCM timeout, SCM timeoutval),
1049 "Suspend execution of the calling thread until the target @var{thread} "
1050 "terminates, unless the target @var{thread} has already terminated. ")
1051 #define FUNC_NAME s_scm_join_thread_timed
1052 {
1053 scm_i_thread *t;
1054 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1055 SCM res = SCM_BOOL_F;
1056
1057 if (! (SCM_UNBNDP (timeoutval)))
1058 res = timeoutval;
1059
1060 SCM_VALIDATE_THREAD (1, thread);
1061 if (scm_is_eq (scm_current_thread (), thread))
1062 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1063
1064 t = SCM_I_THREAD_DATA (thread);
1065 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1066
1067 if (! SCM_UNBNDP (timeout))
1068 {
1069 to_timespec (timeout, &ctimeout);
1070 timeout_ptr = &ctimeout;
1071 }
1072
1073 if (t->exited)
1074 res = t->result;
1075 else
1076 {
1077 while (1)
1078 {
1079 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1080 timeout_ptr);
1081 if (err == 0)
1082 {
1083 if (t->exited)
1084 {
1085 res = t->result;
1086 break;
1087 }
1088 }
1089 else if (err == ETIMEDOUT)
1090 break;
1091
1092 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1093 SCM_TICK;
1094 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1095
1096 /* Check for exit again, since we just released and
1097 reacquired the admin mutex, before the next block_self
1098 call (which would block forever if t has already
1099 exited). */
1100 if (t->exited)
1101 {
1102 res = t->result;
1103 break;
1104 }
1105 }
1106 }
1107
1108 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1109
1110 return res;
1111 }
1112 #undef FUNC_NAME
1113
1114 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1115 (SCM obj),
1116 "Return @code{#t} if @var{obj} is a thread.")
1117 #define FUNC_NAME s_scm_thread_p
1118 {
1119 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1120 }
1121 #undef FUNC_NAME
1122
1123
1124 static size_t
1125 fat_mutex_free (SCM mx)
1126 {
1127 fat_mutex *m = SCM_MUTEX_DATA (mx);
1128 scm_i_pthread_mutex_destroy (&m->lock);
1129 return 0;
1130 }
1131
1132 static int
1133 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1134 {
1135 fat_mutex *m = SCM_MUTEX_DATA (mx);
1136 scm_puts ("#<mutex ", port);
1137 scm_uintprint ((scm_t_bits)m, 16, port);
1138 scm_puts (">", port);
1139 return 1;
1140 }
1141
1142 static SCM
1143 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1144 {
1145 fat_mutex *m;
1146 SCM mx;
1147
1148 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1149 scm_i_pthread_mutex_init (&m->lock, NULL);
1150 m->owner = SCM_BOOL_F;
1151 m->level = 0;
1152
1153 m->recursive = recursive;
1154 m->unchecked_unlock = unchecked_unlock;
1155 m->allow_external_unlock = external_unlock;
1156
1157 m->waiting = SCM_EOL;
1158 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1159 m->waiting = make_queue ();
1160 return mx;
1161 }
1162
1163 SCM scm_make_mutex (void)
1164 {
1165 return scm_make_mutex_with_flags (SCM_EOL);
1166 }
1167
1168 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1169 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1170 SCM_SYMBOL (recursive_sym, "recursive");
1171
1172 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1173 (SCM flags),
1174 "Create a new mutex. ")
1175 #define FUNC_NAME s_scm_make_mutex_with_flags
1176 {
1177 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1178
1179 SCM ptr = flags;
1180 while (! scm_is_null (ptr))
1181 {
1182 SCM flag = SCM_CAR (ptr);
1183 if (scm_is_eq (flag, unchecked_unlock_sym))
1184 unchecked_unlock = 1;
1185 else if (scm_is_eq (flag, allow_external_unlock_sym))
1186 external_unlock = 1;
1187 else if (scm_is_eq (flag, recursive_sym))
1188 recursive = 1;
1189 else
1190 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1191 ptr = SCM_CDR (ptr);
1192 }
1193 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1194 }
1195 #undef FUNC_NAME
1196
1197 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1198 (void),
1199 "Create a new recursive mutex. ")
1200 #define FUNC_NAME s_scm_make_recursive_mutex
1201 {
1202 return make_fat_mutex (1, 0, 0);
1203 }
1204 #undef FUNC_NAME
1205
1206 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1207
1208 static SCM
1209 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1210 {
1211 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1212
1213 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1214 SCM err = SCM_BOOL_F;
1215
1216 struct timeval current_time;
1217
1218 scm_i_scm_pthread_mutex_lock (&m->lock);
1219
1220 while (1)
1221 {
1222 if (m->level == 0)
1223 {
1224 m->owner = new_owner;
1225 m->level++;
1226
1227 if (SCM_I_IS_THREAD (new_owner))
1228 {
1229 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1230 scm_i_pthread_mutex_lock (&t->admin_mutex);
1231
1232 /* Only keep a weak reference to MUTEX so that it's not
1233 retained when not referenced elsewhere (bug #27450). Note
1234 that the weak pair itself it still retained, but it's better
1235 than retaining MUTEX and the threads referred to by its
1236 associated queue. */
1237 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1238
1239 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1240 }
1241 *ret = 1;
1242 break;
1243 }
1244 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1245 {
1246 m->owner = new_owner;
1247 err = scm_cons (scm_abandoned_mutex_error_key,
1248 scm_from_locale_string ("lock obtained on abandoned "
1249 "mutex"));
1250 *ret = 1;
1251 break;
1252 }
1253 else if (scm_is_eq (m->owner, new_owner))
1254 {
1255 if (m->recursive)
1256 {
1257 m->level++;
1258 *ret = 1;
1259 }
1260 else
1261 {
1262 err = scm_cons (scm_misc_error_key,
1263 scm_from_locale_string ("mutex already locked "
1264 "by thread"));
1265 *ret = 0;
1266 }
1267 break;
1268 }
1269 else
1270 {
1271 if (timeout != NULL)
1272 {
1273 gettimeofday (&current_time, NULL);
1274 if (current_time.tv_sec > timeout->tv_sec ||
1275 (current_time.tv_sec == timeout->tv_sec &&
1276 current_time.tv_usec * 1000 > timeout->tv_nsec))
1277 {
1278 *ret = 0;
1279 break;
1280 }
1281 }
1282 block_self (m->waiting, mutex, &m->lock, timeout);
1283 scm_i_pthread_mutex_unlock (&m->lock);
1284 SCM_TICK;
1285 scm_i_scm_pthread_mutex_lock (&m->lock);
1286 }
1287 }
1288 scm_i_pthread_mutex_unlock (&m->lock);
1289 return err;
1290 }
1291
1292 SCM scm_lock_mutex (SCM mx)
1293 {
1294 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1295 }
1296
1297 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1298 (SCM m, SCM timeout, SCM owner),
1299 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1300 "blocks until the mutex becomes available. The function returns when "
1301 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1302 "a thread already owns will succeed right away and will not block the "
1303 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1304 #define FUNC_NAME s_scm_lock_mutex_timed
1305 {
1306 SCM exception;
1307 int ret = 0;
1308 scm_t_timespec cwaittime, *waittime = NULL;
1309
1310 SCM_VALIDATE_MUTEX (1, m);
1311
1312 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1313 {
1314 to_timespec (timeout, &cwaittime);
1315 waittime = &cwaittime;
1316 }
1317
1318 exception = fat_mutex_lock (m, waittime, owner, &ret);
1319 if (!scm_is_false (exception))
1320 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1321 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1322 }
1323 #undef FUNC_NAME
1324
1325 void
1326 scm_dynwind_lock_mutex (SCM mutex)
1327 {
1328 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1329 SCM_F_WIND_EXPLICITLY);
1330 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1331 SCM_F_WIND_EXPLICITLY);
1332 }
1333
1334 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1335 (SCM mutex),
1336 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1337 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1338 #define FUNC_NAME s_scm_try_mutex
1339 {
1340 SCM exception;
1341 int ret = 0;
1342 scm_t_timespec cwaittime, *waittime = NULL;
1343
1344 SCM_VALIDATE_MUTEX (1, mutex);
1345
1346 to_timespec (scm_from_int(0), &cwaittime);
1347 waittime = &cwaittime;
1348
1349 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1350 if (!scm_is_false (exception))
1351 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1352 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1353 }
1354 #undef FUNC_NAME
1355
1356 /*** Fat condition variables */
1357
1358 typedef struct {
1359 scm_i_pthread_mutex_t lock;
1360 SCM waiting; /* the threads waiting for this condition. */
1361 } fat_cond;
1362
1363 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1364 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1365
1366 static int
1367 fat_mutex_unlock (SCM mutex, SCM cond,
1368 const scm_t_timespec *waittime, int relock)
1369 {
1370 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1371 fat_cond *c = NULL;
1372 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1373 int err = 0, ret = 0;
1374
1375 scm_i_scm_pthread_mutex_lock (&m->lock);
1376
1377 SCM owner = m->owner;
1378
1379 if (!scm_is_eq (owner, scm_current_thread ()))
1380 {
1381 if (m->level == 0)
1382 {
1383 if (!m->unchecked_unlock)
1384 {
1385 scm_i_pthread_mutex_unlock (&m->lock);
1386 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1387 }
1388 owner = scm_current_thread ();
1389 }
1390 else if (!m->allow_external_unlock)
1391 {
1392 scm_i_pthread_mutex_unlock (&m->lock);
1393 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1394 }
1395 }
1396
1397 if (! (SCM_UNBNDP (cond)))
1398 {
1399 c = SCM_CONDVAR_DATA (cond);
1400 while (1)
1401 {
1402 int brk = 0;
1403
1404 if (m->level > 0)
1405 m->level--;
1406 if (m->level == 0)
1407 m->owner = unblock_from_queue (m->waiting);
1408
1409 t->block_asyncs++;
1410
1411 err = block_self (c->waiting, cond, &m->lock, waittime);
1412 scm_i_pthread_mutex_unlock (&m->lock);
1413
1414 if (err == 0)
1415 {
1416 ret = 1;
1417 brk = 1;
1418 }
1419 else if (err == ETIMEDOUT)
1420 {
1421 ret = 0;
1422 brk = 1;
1423 }
1424 else if (err != EINTR)
1425 {
1426 errno = err;
1427 scm_syserror (NULL);
1428 }
1429
1430 if (brk)
1431 {
1432 if (relock)
1433 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1434 t->block_asyncs--;
1435 break;
1436 }
1437
1438 t->block_asyncs--;
1439 scm_async_click ();
1440
1441 scm_remember_upto_here_2 (cond, mutex);
1442
1443 scm_i_scm_pthread_mutex_lock (&m->lock);
1444 }
1445 }
1446 else
1447 {
1448 if (m->level > 0)
1449 m->level--;
1450 if (m->level == 0)
1451 m->owner = unblock_from_queue (m->waiting);
1452
1453 scm_i_pthread_mutex_unlock (&m->lock);
1454 ret = 1;
1455 }
1456
1457 return ret;
1458 }
1459
1460 SCM scm_unlock_mutex (SCM mx)
1461 {
1462 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1463 }
1464
1465 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1466 (SCM mx, SCM cond, SCM timeout),
1467 "Unlocks @var{mutex} if the calling thread owns the lock on "
1468 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1469 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1470 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1471 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1472 "with a call to @code{unlock-mutex}. Only the last call to "
1473 "@code{unlock-mutex} will actually unlock the mutex. ")
1474 #define FUNC_NAME s_scm_unlock_mutex_timed
1475 {
1476 scm_t_timespec cwaittime, *waittime = NULL;
1477
1478 SCM_VALIDATE_MUTEX (1, mx);
1479 if (! (SCM_UNBNDP (cond)))
1480 {
1481 SCM_VALIDATE_CONDVAR (2, cond);
1482
1483 if (! (SCM_UNBNDP (timeout)))
1484 {
1485 to_timespec (timeout, &cwaittime);
1486 waittime = &cwaittime;
1487 }
1488 }
1489
1490 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1491 }
1492 #undef FUNC_NAME
1493
1494 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1495 (SCM obj),
1496 "Return @code{#t} if @var{obj} is a mutex.")
1497 #define FUNC_NAME s_scm_mutex_p
1498 {
1499 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1500 }
1501 #undef FUNC_NAME
1502
1503 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1504 (SCM mx),
1505 "Return the thread owning @var{mx}, or @code{#f}.")
1506 #define FUNC_NAME s_scm_mutex_owner
1507 {
1508 SCM owner;
1509 fat_mutex *m = NULL;
1510
1511 SCM_VALIDATE_MUTEX (1, mx);
1512 m = SCM_MUTEX_DATA (mx);
1513 scm_i_pthread_mutex_lock (&m->lock);
1514 owner = m->owner;
1515 scm_i_pthread_mutex_unlock (&m->lock);
1516
1517 return owner;
1518 }
1519 #undef FUNC_NAME
1520
1521 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1522 (SCM mx),
1523 "Return the lock level of mutex @var{mx}.")
1524 #define FUNC_NAME s_scm_mutex_level
1525 {
1526 SCM_VALIDATE_MUTEX (1, mx);
1527 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1528 }
1529 #undef FUNC_NAME
1530
1531 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1532 (SCM mx),
1533 "Returns @code{#t} if the mutex @var{mx} is locked.")
1534 #define FUNC_NAME s_scm_mutex_locked_p
1535 {
1536 SCM_VALIDATE_MUTEX (1, mx);
1537 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1538 }
1539 #undef FUNC_NAME
1540
1541 static int
1542 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1543 {
1544 fat_cond *c = SCM_CONDVAR_DATA (cv);
1545 scm_puts ("#<condition-variable ", port);
1546 scm_uintprint ((scm_t_bits)c, 16, port);
1547 scm_puts (">", port);
1548 return 1;
1549 }
1550
1551 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1552 (void),
1553 "Make a new condition variable.")
1554 #define FUNC_NAME s_scm_make_condition_variable
1555 {
1556 fat_cond *c;
1557 SCM cv;
1558
1559 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1560 c->waiting = SCM_EOL;
1561 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1562 c->waiting = make_queue ();
1563 return cv;
1564 }
1565 #undef FUNC_NAME
1566
1567 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1568 (SCM cv, SCM mx, SCM t),
1569 "Wait until @var{cond-var} has been signalled. While waiting, "
1570 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1571 "is locked again when this function returns. When @var{time} is given, "
1572 "it specifies a point in time where the waiting should be aborted. It "
1573 "can be either a integer as returned by @code{current-time} or a pair "
1574 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1575 "mutex is locked and @code{#f} is returned. When the condition "
1576 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1577 "is returned. ")
1578 #define FUNC_NAME s_scm_timed_wait_condition_variable
1579 {
1580 scm_t_timespec waittime, *waitptr = NULL;
1581
1582 SCM_VALIDATE_CONDVAR (1, cv);
1583 SCM_VALIDATE_MUTEX (2, mx);
1584
1585 if (!SCM_UNBNDP (t))
1586 {
1587 to_timespec (t, &waittime);
1588 waitptr = &waittime;
1589 }
1590
1591 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1592 }
1593 #undef FUNC_NAME
1594
1595 static void
1596 fat_cond_signal (fat_cond *c)
1597 {
1598 unblock_from_queue (c->waiting);
1599 }
1600
1601 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1602 (SCM cv),
1603 "Wake up one thread that is waiting for @var{cv}")
1604 #define FUNC_NAME s_scm_signal_condition_variable
1605 {
1606 SCM_VALIDATE_CONDVAR (1, cv);
1607 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1608 return SCM_BOOL_T;
1609 }
1610 #undef FUNC_NAME
1611
1612 static void
1613 fat_cond_broadcast (fat_cond *c)
1614 {
1615 while (scm_is_true (unblock_from_queue (c->waiting)))
1616 ;
1617 }
1618
1619 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1620 (SCM cv),
1621 "Wake up all threads that are waiting for @var{cv}. ")
1622 #define FUNC_NAME s_scm_broadcast_condition_variable
1623 {
1624 SCM_VALIDATE_CONDVAR (1, cv);
1625 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1626 return SCM_BOOL_T;
1627 }
1628 #undef FUNC_NAME
1629
1630 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1631 (SCM obj),
1632 "Return @code{#t} if @var{obj} is a condition variable.")
1633 #define FUNC_NAME s_scm_condition_variable_p
1634 {
1635 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1636 }
1637 #undef FUNC_NAME
1638
1639
1640 \f
1641 /*** Select */
1642
1643 struct select_args
1644 {
1645 int nfds;
1646 SELECT_TYPE *read_fds;
1647 SELECT_TYPE *write_fds;
1648 SELECT_TYPE *except_fds;
1649 struct timeval *timeout;
1650
1651 int result;
1652 int errno_value;
1653 };
1654
1655 static void *
1656 do_std_select (void *args)
1657 {
1658 struct select_args *select_args;
1659
1660 select_args = (struct select_args *) args;
1661
1662 select_args->result =
1663 select (select_args->nfds,
1664 select_args->read_fds, select_args->write_fds,
1665 select_args->except_fds, select_args->timeout);
1666 select_args->errno_value = errno;
1667
1668 return NULL;
1669 }
1670
1671 int
1672 scm_std_select (int nfds,
1673 SELECT_TYPE *readfds,
1674 SELECT_TYPE *writefds,
1675 SELECT_TYPE *exceptfds,
1676 struct timeval *timeout)
1677 {
1678 fd_set my_readfds;
1679 int res, eno, wakeup_fd;
1680 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1681 struct select_args args;
1682
1683 if (readfds == NULL)
1684 {
1685 FD_ZERO (&my_readfds);
1686 readfds = &my_readfds;
1687 }
1688
1689 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1690 SCM_TICK;
1691
1692 wakeup_fd = t->sleep_pipe[0];
1693 FD_SET (wakeup_fd, readfds);
1694 if (wakeup_fd >= nfds)
1695 nfds = wakeup_fd+1;
1696
1697 args.nfds = nfds;
1698 args.read_fds = readfds;
1699 args.write_fds = writefds;
1700 args.except_fds = exceptfds;
1701 args.timeout = timeout;
1702
1703 /* Explicitly cooperate with the GC. */
1704 scm_without_guile (do_std_select, &args);
1705
1706 res = args.result;
1707 eno = args.errno_value;
1708
1709 t->sleep_fd = -1;
1710 scm_i_reset_sleep (t);
1711
1712 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1713 {
1714 char dummy;
1715 full_read (wakeup_fd, &dummy, 1);
1716
1717 FD_CLR (wakeup_fd, readfds);
1718 res -= 1;
1719 if (res == 0)
1720 {
1721 eno = EINTR;
1722 res = -1;
1723 }
1724 }
1725 errno = eno;
1726 return res;
1727 }
1728
1729 /* Convenience API for blocking while in guile mode. */
1730
1731 #if SCM_USE_PTHREAD_THREADS
1732
1733 /* It seems reasonable to not run procedures related to mutex and condition
1734 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1735 without it, and (ii) the only potential gain would be GC latency. See
1736 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1737 for a discussion of the pros and cons. */
1738
1739 int
1740 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1741 {
1742 int res = scm_i_pthread_mutex_lock (mutex);
1743 return res;
1744 }
1745
1746 static void
1747 do_unlock (void *data)
1748 {
1749 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1750 }
1751
1752 void
1753 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1754 {
1755 scm_i_scm_pthread_mutex_lock (mutex);
1756 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1757 }
1758
1759 int
1760 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1761 {
1762 int res;
1763 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1764
1765 t->held_mutex = mutex;
1766 res = scm_i_pthread_cond_wait (cond, mutex);
1767 t->held_mutex = NULL;
1768
1769 return res;
1770 }
1771
1772 int
1773 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1774 scm_i_pthread_mutex_t *mutex,
1775 const scm_t_timespec *wt)
1776 {
1777 int res;
1778 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1779
1780 t->held_mutex = mutex;
1781 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1782 t->held_mutex = NULL;
1783
1784 return res;
1785 }
1786
1787 #endif
1788
1789 unsigned long
1790 scm_std_usleep (unsigned long usecs)
1791 {
1792 struct timeval tv;
1793 tv.tv_usec = usecs % 1000000;
1794 tv.tv_sec = usecs / 1000000;
1795 scm_std_select (0, NULL, NULL, NULL, &tv);
1796 return tv.tv_sec * 1000000 + tv.tv_usec;
1797 }
1798
1799 unsigned int
1800 scm_std_sleep (unsigned int secs)
1801 {
1802 struct timeval tv;
1803 tv.tv_usec = 0;
1804 tv.tv_sec = secs;
1805 scm_std_select (0, NULL, NULL, NULL, &tv);
1806 return tv.tv_sec;
1807 }
1808
1809 /*** Misc */
1810
1811 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1812 (void),
1813 "Return the thread that called this function.")
1814 #define FUNC_NAME s_scm_current_thread
1815 {
1816 return SCM_I_CURRENT_THREAD->handle;
1817 }
1818 #undef FUNC_NAME
1819
1820 static SCM
1821 scm_c_make_list (size_t n, SCM fill)
1822 {
1823 SCM res = SCM_EOL;
1824 while (n-- > 0)
1825 res = scm_cons (fill, res);
1826 return res;
1827 }
1828
1829 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1830 (void),
1831 "Return a list of all threads.")
1832 #define FUNC_NAME s_scm_all_threads
1833 {
1834 /* We can not allocate while holding the thread_admin_mutex because
1835 of the way GC is done.
1836 */
1837 int n = thread_count;
1838 scm_i_thread *t;
1839 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1840
1841 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1842 l = &list;
1843 for (t = all_threads; t && n > 0; t = t->next_thread)
1844 {
1845 if (t != scm_i_signal_delivery_thread)
1846 {
1847 SCM_SETCAR (*l, t->handle);
1848 l = SCM_CDRLOC (*l);
1849 }
1850 n--;
1851 }
1852 *l = SCM_EOL;
1853 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1854 return list;
1855 }
1856 #undef FUNC_NAME
1857
1858 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1859 (SCM thread),
1860 "Return @code{#t} iff @var{thread} has exited.\n")
1861 #define FUNC_NAME s_scm_thread_exited_p
1862 {
1863 return scm_from_bool (scm_c_thread_exited_p (thread));
1864 }
1865 #undef FUNC_NAME
1866
1867 int
1868 scm_c_thread_exited_p (SCM thread)
1869 #define FUNC_NAME s_scm_thread_exited_p
1870 {
1871 scm_i_thread *t;
1872 SCM_VALIDATE_THREAD (1, thread);
1873 t = SCM_I_THREAD_DATA (thread);
1874 return t->exited;
1875 }
1876 #undef FUNC_NAME
1877
1878 static scm_i_pthread_cond_t wake_up_cond;
1879 static int threads_initialized_p = 0;
1880
1881
1882 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1883 */
1884 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1885
1886 static SCM dynwind_critical_section_mutex;
1887
1888 void
1889 scm_dynwind_critical_section (SCM mutex)
1890 {
1891 if (scm_is_false (mutex))
1892 mutex = dynwind_critical_section_mutex;
1893 scm_dynwind_lock_mutex (mutex);
1894 scm_dynwind_block_asyncs ();
1895 }
1896
1897 /*** Initialization */
1898
1899 scm_i_pthread_mutex_t scm_i_misc_mutex;
1900
1901 #if SCM_USE_PTHREAD_THREADS
1902 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1903 #endif
1904
1905 void
1906 scm_threads_prehistory (SCM_STACKITEM *base)
1907 {
1908 #if SCM_USE_PTHREAD_THREADS
1909 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1910 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1911 PTHREAD_MUTEX_RECURSIVE);
1912 #endif
1913
1914 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1915 scm_i_pthread_mutexattr_recursive);
1916 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1917 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1918
1919 guilify_self_1 (base);
1920 }
1921
1922 scm_t_bits scm_tc16_thread;
1923 scm_t_bits scm_tc16_mutex;
1924 scm_t_bits scm_tc16_condvar;
1925
1926 void
1927 scm_init_threads ()
1928 {
1929 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1930 scm_set_smob_print (scm_tc16_thread, thread_print);
1931
1932 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1933 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1934 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1935
1936 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1937 sizeof (fat_cond));
1938 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1939
1940 scm_i_default_dynamic_state = SCM_BOOL_F;
1941 guilify_self_2 (SCM_BOOL_F);
1942 threads_initialized_p = 1;
1943
1944 dynwind_critical_section_mutex =
1945 scm_permanent_object (scm_make_recursive_mutex ());
1946 }
1947
1948 void
1949 scm_init_threads_default_dynamic_state ()
1950 {
1951 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1952 scm_i_default_dynamic_state = scm_permanent_object (state);
1953 }
1954
1955 void
1956 scm_init_thread_procs ()
1957 {
1958 #include "libguile/threads.x"
1959 }
1960
1961 \f
1962 /* IA64-specific things. */
1963
1964 #ifdef __ia64__
1965 # ifdef __hpux
1966 # include <sys/param.h>
1967 # include <sys/pstat.h>
1968 void *
1969 scm_ia64_register_backing_store_base (void)
1970 {
1971 struct pst_vm_status vm_status;
1972 int i = 0;
1973 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
1974 if (vm_status.pst_type == PS_RSESTACK)
1975 return (void *) vm_status.pst_vaddr;
1976 abort ();
1977 }
1978 void *
1979 scm_ia64_ar_bsp (const void *ctx)
1980 {
1981 uint64_t bsp;
1982 __uc_get_ar_bsp (ctx, &bsp);
1983 return (void *) bsp;
1984 }
1985 # endif /* hpux */
1986 # ifdef linux
1987 # include <ucontext.h>
1988 void *
1989 scm_ia64_register_backing_store_base (void)
1990 {
1991 extern void *__libc_ia64_register_backing_store_base;
1992 return __libc_ia64_register_backing_store_base;
1993 }
1994 void *
1995 scm_ia64_ar_bsp (const void *opaque)
1996 {
1997 const ucontext_t *ctx = opaque;
1998 return (void *) ctx->uc_mcontext.sc_ar_bsp;
1999 }
2000 # endif /* linux */
2001 #endif /* __ia64__ */
2002
2003
2004 /*
2005 Local Variables:
2006 c-file-style: "gnu"
2007 End:
2008 */