1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "libguile/_scm.h"
33 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
40 #include "libguile/validate.h"
41 #include "libguile/root.h"
42 #include "libguile/eval.h"
43 #include "libguile/async.h"
44 #include "libguile/ports.h"
45 #include "libguile/threads.h"
46 #include "libguile/dynwind.h"
47 #include "libguile/iselect.h"
48 #include "libguile/fluids.h"
49 #include "libguile/continuations.h"
50 #include "libguile/gc.h"
51 #include "libguile/init.h"
52 #include "libguile/scmsigs.h"
53 #include "libguile/strings.h"
57 # define ETIMEDOUT WSAETIMEDOUT
61 # define pipe(fd) _pipe (fd, 256, O_BINARY)
62 #endif /* __MINGW32__ */
64 #include <full-read.h>
68 to_timespec (SCM t
, scm_t_timespec
*waittime
)
72 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
73 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
77 double time
= scm_to_double (t
);
78 double sec
= scm_c_truncate (time
);
80 waittime
->tv_sec
= (long) sec
;
81 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
87 /* Make an empty queue data structure.
92 return scm_cons (SCM_EOL
, SCM_EOL
);
95 /* Put T at the back of Q and return a handle that can be used with
96 remqueue to remove T from Q again.
99 enqueue (SCM q
, SCM t
)
101 SCM c
= scm_cons (t
, SCM_EOL
);
102 SCM_CRITICAL_SECTION_START
;
103 if (scm_is_null (SCM_CDR (q
)))
106 SCM_SETCDR (SCM_CAR (q
), c
);
108 SCM_CRITICAL_SECTION_END
;
112 /* Remove the element that the handle C refers to from the queue Q. C
113 must have been returned from a call to enqueue. The return value
114 is zero when the element referred to by C has already been removed.
115 Otherwise, 1 is returned.
118 remqueue (SCM q
, SCM c
)
121 SCM_CRITICAL_SECTION_START
;
122 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
124 if (scm_is_eq (p
, c
))
126 if (scm_is_eq (c
, SCM_CAR (q
)))
127 SCM_SETCAR (q
, SCM_CDR (c
));
128 SCM_SETCDR (prev
, SCM_CDR (c
));
129 SCM_CRITICAL_SECTION_END
;
134 SCM_CRITICAL_SECTION_END
;
138 /* Remove the front-most element from the queue Q and return it.
139 Return SCM_BOOL_F when Q is empty.
145 SCM_CRITICAL_SECTION_START
;
149 SCM_CRITICAL_SECTION_END
;
154 SCM_SETCDR (q
, SCM_CDR (c
));
155 if (scm_is_null (SCM_CDR (q
)))
156 SCM_SETCAR (q
, SCM_EOL
);
157 SCM_CRITICAL_SECTION_END
;
162 /*** Thread smob routines */
165 thread_mark (SCM obj
)
167 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
168 scm_gc_mark (t
->result
);
169 scm_gc_mark (t
->cleanup_handler
);
170 scm_gc_mark (t
->join_queue
);
171 scm_gc_mark (t
->mutexes
);
172 scm_gc_mark (t
->dynwinds
);
173 scm_gc_mark (t
->active_asyncs
);
174 scm_gc_mark (t
->continuation_root
);
176 return t
->dynamic_state
;
180 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
182 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
183 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
184 the struct case, hence we go via a union, and extract according to the
185 size of pthread_t. */
193 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
194 scm_i_pthread_t p
= t
->pthread
;
197 if (sizeof (p
) == sizeof (unsigned short))
199 else if (sizeof (p
) == sizeof (unsigned int))
201 else if (sizeof (p
) == sizeof (unsigned long))
206 scm_puts ("#<thread ", port
);
207 scm_uintprint (id
, 10, port
);
208 scm_puts (" (", port
);
209 scm_uintprint ((scm_t_bits
)t
, 16, port
);
210 scm_puts (")>", port
);
215 thread_free (SCM obj
)
217 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
219 scm_gc_free (t
, sizeof (*t
), "thread");
223 /*** Blocking on queues. */
225 /* See also scm_i_queue_async_cell for how such a block is
229 /* Put the current thread on QUEUE and go to sleep, waiting for it to
230 be woken up by a call to 'unblock_from_queue', or to be
231 interrupted. Upon return of this function, the current thread is
232 no longer on QUEUE, even when the sleep has been interrupted.
234 The caller of block_self must hold MUTEX. It will be atomically
235 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
237 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
240 When WAITTIME is not NULL, the sleep will be aborted at that time.
242 The return value of block_self is an errno value. It will be zero
243 when the sleep has been successfully completed by a call to
244 unblock_from_queue, EINTR when it has been interrupted by the
245 delivery of a system async, and ETIMEDOUT when the timeout has
248 The system asyncs themselves are not executed by block_self.
251 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
252 const scm_t_timespec
*waittime
)
254 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
258 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
263 q_handle
= enqueue (queue
, t
->handle
);
264 if (waittime
== NULL
)
265 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
267 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
269 /* When we are still on QUEUE, we have been interrupted. We
270 report this only when no other error (such as a timeout) has
273 if (remqueue (queue
, q_handle
) && err
== 0)
276 scm_i_reset_sleep (t
);
282 /* Wake up the first thread on QUEUE, if any. The awoken thread is
283 returned, or #f if the queue was empty.
286 unblock_from_queue (SCM queue
)
288 SCM thread
= dequeue (queue
);
289 if (scm_is_true (thread
))
290 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
294 /* Getting into and out of guile mode.
297 /* Ken Raeburn observes that the implementation of suspend and resume
298 (and the things that build on top of them) are very likely not
299 correct (see below). We will need fix this eventually, and that's
300 why scm_leave_guile/scm_enter_guile are not exported in the API.
304 Consider this sequence:
306 Function foo, called in Guile mode, calls suspend (maybe indirectly
307 through scm_leave_guile), which does this:
309 // record top of stack for the GC
310 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
313 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
314 setjmp (t->regs); // here's most of the magic
318 Function foo has a SCM value X, a handle on a non-immediate object, in
319 a caller-saved register R, and it's the only reference to the object
322 The compiler wants to use R in suspend, so it pushes the current
323 value, X, into a stack slot which will be reloaded on exit from
324 suspend; then it loads stuff into R and goes about its business. The
325 setjmp call saves (some of) the current registers, including R, which
326 no longer contains X. (This isn't a problem for a normal
327 setjmp/longjmp situation, where longjmp would be called before
328 setjmp's caller returns; the old value for X would be loaded back from
329 the stack after the longjmp, before the function returned.)
331 So, suspend returns, loading X back into R (and invalidating the jump
332 buffer) in the process. The caller foo then goes off and calls a
333 bunch of other functions out of Guile mode, occasionally storing X on
334 the stack again, but, say, much deeper on the stack than suspend's
335 stack frame went, and the stack slot where suspend had written X has
336 long since been overwritten with other values.
338 Okay, nothing actively broken so far. Now, let garbage collection
339 run, triggered by another thread.
341 The thread calling foo is out of Guile mode at the time, so the
342 garbage collector just scans a range of stack addresses. Too bad that
343 X isn't stored there. So the pointed-to storage goes onto the free
344 list, and I think you can see where things go from there.
346 Is there anything I'm missing that'll prevent this scenario from
347 happening? I mean, aside from, "well, suspend and scm_leave_guile
348 don't have many local variables, so they probably won't need to save
349 any registers on most systems, so we hope everything will wind up in
350 the jump buffer and we'll just get away with it"?
352 (And, going the other direction, if scm_leave_guile and suspend push
353 the stack pointer over onto a new page, and foo doesn't make further
354 function calls and thus the stack pointer no longer includes that
355 page, are we guaranteed that the kernel cannot release the now-unused
356 stack page that contains the top-of-stack pointer we just saved? I
357 don't know if any OS actually does that. If it does, we could get
358 faults in garbage collection.)
360 I don't think scm_without_guile has to have this problem, as it gets
361 more control over the stack handling -- but it should call setjmp
362 itself. I'd probably try something like:
364 // record top of stack for the GC
365 t->top = SCM_STACK_PTR (&t);
367 SCM_FLUSH_REGISTER_WINDOWS;
372 ... though even that's making some assumptions about the stack
373 ordering of local variables versus caller-saved registers.
375 For something like scm_leave_guile to work, I don't think it can just
376 rely on invalidated jump buffers. A valid jump buffer, and a handle
377 on the stack state at the point when the jump buffer was initialized,
378 together, would work fine, but I think then we're talking about macros
379 invoking setjmp in the caller's stack frame, and requiring that the
380 caller of scm_leave_guile also call scm_enter_guile before returning,
381 kind of like pthread_cleanup_push/pop calls that have to be paired up
382 in a function. (In fact, the pthread ones have to be paired up
383 syntactically, as if they might expand to a compound statement
384 incorporating the user's code, and invoking a compiler's
385 exception-handling primitives. Which might be something to think
386 about for cases where Guile is used with C++ exceptions or
390 scm_i_pthread_key_t scm_i_thread_key
;
393 resume (scm_i_thread
*t
)
396 if (t
->clear_freelists_p
)
398 *SCM_FREELIST_LOC (scm_i_freelist
) = SCM_EOL
;
399 *SCM_FREELIST_LOC (scm_i_freelist2
) = SCM_EOL
;
400 t
->clear_freelists_p
= 0;
404 typedef void* scm_t_guile_ticket
;
407 scm_enter_guile (scm_t_guile_ticket ticket
)
409 scm_i_thread
*t
= (scm_i_thread
*)ticket
;
412 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
413 t
->heap_mutex_locked_by_self
= 1;
418 static scm_i_thread
*
421 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
423 /* record top of stack for the GC */
424 t
->top
= SCM_STACK_PTR (&t
);
425 /* save registers. */
426 SCM_FLUSH_REGISTER_WINDOWS
;
431 static scm_t_guile_ticket
434 scm_i_thread
*t
= suspend ();
435 if (t
->heap_mutex_locked_by_self
)
437 t
->heap_mutex_locked_by_self
= 0;
438 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
440 return (scm_t_guile_ticket
) t
;
443 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
444 static scm_i_thread
*all_threads
= NULL
;
445 static int thread_count
;
447 static SCM scm_i_default_dynamic_state
;
449 /* Perform first stage of thread initialisation, in non-guile mode.
452 guilify_self_1 (SCM_STACKITEM
*base
)
454 scm_i_thread
*t
= malloc (sizeof (scm_i_thread
));
456 t
->pthread
= scm_i_pthread_self ();
457 t
->handle
= SCM_BOOL_F
;
458 t
->result
= SCM_BOOL_F
;
459 t
->cleanup_handler
= SCM_BOOL_F
;
460 t
->mutexes
= SCM_EOL
;
461 t
->held_mutex
= NULL
;
462 t
->join_queue
= SCM_EOL
;
463 t
->dynamic_state
= SCM_BOOL_F
;
464 t
->dynwinds
= SCM_EOL
;
465 t
->active_asyncs
= SCM_EOL
;
467 t
->pending_asyncs
= 1;
468 t
->last_debug_frame
= NULL
;
471 /* Calculate and store off the base of this thread's register
472 backing store (RBS). Unfortunately our implementation(s) of
473 scm_ia64_register_backing_store_base are only reliable for the
474 main thread. For other threads, therefore, find out the current
475 top of the RBS, and use that as a maximum. */
476 t
->register_backing_store_base
= scm_ia64_register_backing_store_base ();
481 bsp
= scm_ia64_ar_bsp (&ctx
);
482 if (t
->register_backing_store_base
> bsp
)
483 t
->register_backing_store_base
= bsp
;
486 t
->continuation_root
= SCM_EOL
;
487 t
->continuation_base
= base
;
488 scm_i_pthread_cond_init (&t
->sleep_cond
, NULL
);
489 t
->sleep_mutex
= NULL
;
490 t
->sleep_object
= SCM_BOOL_F
;
493 if (pipe (t
->sleep_pipe
) != 0)
494 /* FIXME: Error conditions during the initialization phase are handled
495 gracelessly since public functions such as `scm_init_guile ()'
496 currently have type `void'. */
499 scm_i_pthread_mutex_init (&t
->heap_mutex
, NULL
);
500 t
->heap_mutex_locked_by_self
= 0;
501 scm_i_pthread_mutex_init (&t
->admin_mutex
, NULL
);
502 t
->clear_freelists_p
= 0;
507 t
->freelist
= SCM_EOL
;
508 t
->freelist2
= SCM_EOL
;
509 SCM_SET_FREELIST_LOC (scm_i_freelist
, &t
->freelist
);
510 SCM_SET_FREELIST_LOC (scm_i_freelist2
, &t
->freelist2
);
512 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
514 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
515 t
->heap_mutex_locked_by_self
= 1;
517 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
518 t
->next_thread
= all_threads
;
521 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
524 /* Perform second stage of thread initialisation, in guile mode.
527 guilify_self_2 (SCM parent
)
529 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
531 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
532 scm_gc_register_collectable_memory (t
, sizeof (scm_i_thread
), "thread");
533 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
534 t
->continuation_base
= t
->base
;
537 if (scm_is_true (parent
))
538 t
->dynamic_state
= scm_make_dynamic_state (parent
);
540 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
542 t
->join_queue
= make_queue ();
549 /* We implement our own mutex type since we want them to be 'fair', we
550 want to do fancy things while waiting for them (like running
551 asyncs) and we might want to add things that are nice for
556 scm_i_pthread_mutex_t lock
;
558 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
560 int recursive
; /* allow recursive locking? */
561 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
562 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
563 owned by the current thread? */
565 SCM waiting
; /* the threads waiting for this mutex. */
568 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
569 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
571 /* Perform thread tear-down, in guile mode.
574 do_thread_exit (void *v
)
576 scm_i_thread
*t
= (scm_i_thread
*) v
;
578 if (!scm_is_false (t
->cleanup_handler
))
580 SCM ptr
= t
->cleanup_handler
;
582 t
->cleanup_handler
= SCM_BOOL_F
;
583 t
->result
= scm_internal_catch (SCM_BOOL_T
,
584 (scm_t_catch_body
) scm_call_0
, ptr
,
585 scm_handle_by_message_noexit
, NULL
);
588 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
591 close (t
->sleep_pipe
[0]);
592 close (t
->sleep_pipe
[1]);
593 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
596 while (!scm_is_null (t
->mutexes
))
598 SCM mutex
= SCM_CAR (t
->mutexes
);
599 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
600 scm_i_pthread_mutex_lock (&m
->lock
);
602 unblock_from_queue (m
->waiting
);
604 scm_i_pthread_mutex_unlock (&m
->lock
);
605 t
->mutexes
= SCM_CDR (t
->mutexes
);
608 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
614 on_thread_exit (void *v
)
616 /* This handler is executed in non-guile mode. */
617 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
619 /* If this thread was cancelled while doing a cond wait, it will
620 still have a mutex locked, so we unlock it here. */
623 scm_i_pthread_mutex_unlock (t
->held_mutex
);
624 t
->held_mutex
= NULL
;
627 scm_i_pthread_setspecific (scm_i_thread_key
, v
);
629 /* Ensure the signal handling thread has been launched, because we might be
631 scm_i_ensure_signal_delivery_thread ();
633 /* Unblocking the joining threads needs to happen in guile mode
634 since the queue is a SCM data structure. */
635 scm_with_guile (do_thread_exit
, v
);
637 /* Removing ourself from the list of all threads needs to happen in
638 non-guile mode since all SCM values on our stack become
639 unprotected once we are no longer in the list. */
640 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
641 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
644 *tp
= t
->next_thread
;
649 /* If there's only one other thread, it could be the signal delivery
650 thread, so we need to notify it to shut down by closing its read pipe.
651 If it's not the signal delivery thread, then closing the read pipe isn't
653 if (thread_count
<= 1)
654 scm_i_close_signal_pipe ();
656 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
658 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
661 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
664 init_thread_key (void)
666 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
669 /* Perform any initializations necessary to bring the current thread
670 into guile mode, initializing Guile itself, if necessary.
672 BASE is the stack base to use with GC.
674 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
675 which case the default dynamic state is used.
677 Return zero when the thread was in guile mode already; otherwise
682 scm_i_init_thread_for_guile (SCM_STACKITEM
*base
, SCM parent
)
686 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
688 if ((t
= SCM_I_CURRENT_THREAD
) == NULL
)
690 /* This thread has not been guilified yet.
693 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
694 if (scm_initialized_p
== 0)
696 /* First thread ever to enter Guile. Run the full
699 scm_i_init_guile (base
);
700 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
704 /* Guile is already initialized, but this thread enters it for
705 the first time. Only initialize this thread.
707 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
708 guilify_self_1 (base
);
709 guilify_self_2 (parent
);
715 /* This thread is already guilified but not in guile mode, just
718 A user call to scm_with_guile() will lead us to here. This could
719 happen from anywhere on the stack, and in particular lower on the
720 stack than when it was when this thread was first guilified. Thus,
721 `base' must be updated. */
722 #if SCM_STACK_GROWS_UP
730 scm_enter_guile ((scm_t_guile_ticket
) t
);
735 /* Thread is already in guile mode. Nothing to do.
741 #if SCM_USE_PTHREAD_THREADS
743 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
744 /* This method for GNU/Linux and perhaps some other systems.
745 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
746 available on them. */
747 #define HAVE_GET_THREAD_STACK_BASE
749 static SCM_STACKITEM
*
750 get_thread_stack_base ()
756 pthread_getattr_np (pthread_self (), &attr
);
757 pthread_attr_getstack (&attr
, &start
, &size
);
758 end
= (char *)start
+ size
;
760 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
761 for the main thread, but we can use scm_get_stack_base in that
765 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
766 if ((void *)&attr
< start
|| (void *)&attr
>= end
)
767 return scm_get_stack_base ();
771 #if SCM_STACK_GROWS_UP
779 #elif HAVE_PTHREAD_GET_STACKADDR_NP
780 /* This method for MacOS X.
781 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
782 but as of 2006 there's nothing obvious at apple.com. */
783 #define HAVE_GET_THREAD_STACK_BASE
784 static SCM_STACKITEM
*
785 get_thread_stack_base ()
787 return pthread_get_stackaddr_np (pthread_self ());
790 #elif defined (__MINGW32__)
791 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
792 in any thread. We don't like hard-coding the name of a system, but there
793 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
795 #define HAVE_GET_THREAD_STACK_BASE
796 static SCM_STACKITEM
*
797 get_thread_stack_base ()
799 return scm_get_stack_base ();
802 #endif /* pthread methods of get_thread_stack_base */
804 #else /* !SCM_USE_PTHREAD_THREADS */
806 #define HAVE_GET_THREAD_STACK_BASE
808 static SCM_STACKITEM
*
809 get_thread_stack_base ()
811 return scm_get_stack_base ();
814 #endif /* !SCM_USE_PTHREAD_THREADS */
816 #ifdef HAVE_GET_THREAD_STACK_BASE
821 scm_i_init_thread_for_guile (get_thread_stack_base (),
822 scm_i_default_dynamic_state
);
828 scm_with_guile (void *(*func
)(void *), void *data
)
830 return scm_i_with_guile_and_parent (func
, data
,
831 scm_i_default_dynamic_state
);
834 SCM_UNUSED
static void
835 scm_leave_guile_cleanup (void *x
)
841 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
845 SCM_STACKITEM base_item
;
847 really_entered
= scm_i_init_thread_for_guile (&base_item
, parent
);
850 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup
, NULL
);
851 res
= scm_c_with_continuation_barrier (func
, data
);
852 scm_i_pthread_cleanup_pop (0);
856 res
= scm_c_with_continuation_barrier (func
, data
);
862 scm_without_guile (void *(*func
)(void *), void *data
)
865 scm_t_guile_ticket t
;
866 t
= scm_leave_guile ();
872 /*** Thread creation */
879 scm_i_pthread_mutex_t mutex
;
880 scm_i_pthread_cond_t cond
;
884 really_launch (void *d
)
886 launch_data
*data
= (launch_data
*)d
;
887 SCM thunk
= data
->thunk
, handler
= data
->handler
;
890 t
= SCM_I_CURRENT_THREAD
;
892 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
893 data
->thread
= scm_current_thread ();
894 scm_i_pthread_cond_signal (&data
->cond
);
895 scm_i_pthread_mutex_unlock (&data
->mutex
);
897 if (SCM_UNBNDP (handler
))
898 t
->result
= scm_call_0 (thunk
);
900 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
906 launch_thread (void *d
)
908 launch_data
*data
= (launch_data
*)d
;
909 scm_i_pthread_detach (scm_i_pthread_self ());
910 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
914 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
915 (SCM thunk
, SCM handler
),
916 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
917 "returning a new thread object representing the thread. The procedure\n"
918 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
920 "When @var{handler} is specified, then @var{thunk} is called from\n"
921 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
922 "handler. This catch is established inside the continuation barrier.\n"
924 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
925 "the @emph{exit value} of the thread and the thread is terminated.")
926 #define FUNC_NAME s_scm_call_with_new_thread
932 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
933 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
934 handler
, SCM_ARG2
, FUNC_NAME
);
936 data
.parent
= scm_current_dynamic_state ();
938 data
.handler
= handler
;
939 data
.thread
= SCM_BOOL_F
;
940 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
941 scm_i_pthread_cond_init (&data
.cond
, NULL
);
943 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
944 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
947 scm_i_pthread_mutex_unlock (&data
.mutex
);
951 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
952 scm_i_pthread_mutex_unlock (&data
.mutex
);
960 scm_t_catch_body body
;
962 scm_t_catch_handler handler
;
965 scm_i_pthread_mutex_t mutex
;
966 scm_i_pthread_cond_t cond
;
970 really_spawn (void *d
)
972 spawn_data
*data
= (spawn_data
*)d
;
973 scm_t_catch_body body
= data
->body
;
974 void *body_data
= data
->body_data
;
975 scm_t_catch_handler handler
= data
->handler
;
976 void *handler_data
= data
->handler_data
;
977 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
979 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
980 data
->thread
= scm_current_thread ();
981 scm_i_pthread_cond_signal (&data
->cond
);
982 scm_i_pthread_mutex_unlock (&data
->mutex
);
985 t
->result
= body (body_data
);
987 t
->result
= scm_internal_catch (SCM_BOOL_T
,
989 handler
, handler_data
);
995 spawn_thread (void *d
)
997 spawn_data
*data
= (spawn_data
*)d
;
998 scm_i_pthread_detach (scm_i_pthread_self ());
999 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
1004 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
1005 scm_t_catch_handler handler
, void *handler_data
)
1011 data
.parent
= scm_current_dynamic_state ();
1013 data
.body_data
= body_data
;
1014 data
.handler
= handler
;
1015 data
.handler_data
= handler_data
;
1016 data
.thread
= SCM_BOOL_F
;
1017 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1018 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1020 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1021 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1024 scm_i_pthread_mutex_unlock (&data
.mutex
);
1026 scm_syserror (NULL
);
1028 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1029 scm_i_pthread_mutex_unlock (&data
.mutex
);
1034 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1036 "Move the calling thread to the end of the scheduling queue.")
1037 #define FUNC_NAME s_scm_yield
1039 return scm_from_bool (scm_i_sched_yield ());
1043 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1045 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1046 "cannot be the current thread, and if @var{thread} has already terminated or "
1047 "been signaled to terminate, this function is a no-op.")
1048 #define FUNC_NAME s_scm_cancel_thread
1050 scm_i_thread
*t
= NULL
;
1052 SCM_VALIDATE_THREAD (1, thread
);
1053 t
= SCM_I_THREAD_DATA (thread
);
1054 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1058 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1059 scm_i_pthread_cancel (t
->pthread
);
1062 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1064 return SCM_UNSPECIFIED
;
1068 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1069 (SCM thread
, SCM proc
),
1070 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1071 "This handler will be called when the thread exits.")
1072 #define FUNC_NAME s_scm_set_thread_cleanup_x
1076 SCM_VALIDATE_THREAD (1, thread
);
1077 if (!scm_is_false (proc
))
1078 SCM_VALIDATE_THUNK (2, proc
);
1080 t
= SCM_I_THREAD_DATA (thread
);
1081 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1083 if (!(t
->exited
|| t
->canceled
))
1084 t
->cleanup_handler
= proc
;
1086 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1088 return SCM_UNSPECIFIED
;
1092 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1094 "Return the cleanup handler installed for the thread @var{thread}.")
1095 #define FUNC_NAME s_scm_thread_cleanup
1100 SCM_VALIDATE_THREAD (1, thread
);
1102 t
= SCM_I_THREAD_DATA (thread
);
1103 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1104 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1105 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1111 SCM
scm_join_thread (SCM thread
)
1113 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1116 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1117 (SCM thread
, SCM timeout
, SCM timeoutval
),
1118 "Suspend execution of the calling thread until the target @var{thread} "
1119 "terminates, unless the target @var{thread} has already terminated. ")
1120 #define FUNC_NAME s_scm_join_thread_timed
1123 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1124 SCM res
= SCM_BOOL_F
;
1126 if (! (SCM_UNBNDP (timeoutval
)))
1129 SCM_VALIDATE_THREAD (1, thread
);
1130 if (scm_is_eq (scm_current_thread (), thread
))
1131 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1133 t
= SCM_I_THREAD_DATA (thread
);
1134 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1136 if (! SCM_UNBNDP (timeout
))
1138 to_timespec (timeout
, &ctimeout
);
1139 timeout_ptr
= &ctimeout
;
1148 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1158 else if (err
== ETIMEDOUT
)
1161 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1163 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1167 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1173 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1175 "Return @code{#t} if @var{obj} is a thread.")
1176 #define FUNC_NAME s_scm_thread_p
1178 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1183 fat_mutex_mark (SCM mx
)
1185 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1186 scm_gc_mark (m
->owner
);
1191 fat_mutex_free (SCM mx
)
1193 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1194 scm_i_pthread_mutex_destroy (&m
->lock
);
1195 scm_gc_free (m
, sizeof (fat_mutex
), "mutex");
1200 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1202 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1203 scm_puts ("#<mutex ", port
);
1204 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1205 scm_puts (">", port
);
1210 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1215 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1216 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
1217 m
->owner
= SCM_BOOL_F
;
1220 m
->recursive
= recursive
;
1221 m
->unchecked_unlock
= unchecked_unlock
;
1222 m
->allow_external_unlock
= external_unlock
;
1224 m
->waiting
= SCM_EOL
;
1225 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1226 m
->waiting
= make_queue ();
1230 SCM
scm_make_mutex (void)
1232 return scm_make_mutex_with_flags (SCM_EOL
);
1235 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1236 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1237 SCM_SYMBOL (recursive_sym
, "recursive");
1239 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1241 "Create a new mutex. ")
1242 #define FUNC_NAME s_scm_make_mutex_with_flags
1244 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1247 while (! scm_is_null (ptr
))
1249 SCM flag
= SCM_CAR (ptr
);
1250 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1251 unchecked_unlock
= 1;
1252 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1253 external_unlock
= 1;
1254 else if (scm_is_eq (flag
, recursive_sym
))
1257 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1258 ptr
= SCM_CDR (ptr
);
1260 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1264 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1266 "Create a new recursive mutex. ")
1267 #define FUNC_NAME s_scm_make_recursive_mutex
1269 return make_fat_mutex (1, 0, 0);
1273 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1276 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1278 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1280 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1281 SCM err
= SCM_BOOL_F
;
1283 struct timeval current_time
;
1285 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1291 m
->owner
= new_owner
;
1294 if (SCM_I_IS_THREAD (new_owner
))
1296 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1297 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1298 t
->mutexes
= scm_cons (mutex
, t
->mutexes
);
1299 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1304 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1306 m
->owner
= new_owner
;
1307 err
= scm_cons (scm_abandoned_mutex_error_key
,
1308 scm_from_locale_string ("lock obtained on abandoned "
1313 else if (scm_is_eq (m
->owner
, new_owner
))
1322 err
= scm_cons (scm_misc_error_key
,
1323 scm_from_locale_string ("mutex already locked "
1331 if (timeout
!= NULL
)
1333 gettimeofday (¤t_time
, NULL
);
1334 if (current_time
.tv_sec
> timeout
->tv_sec
||
1335 (current_time
.tv_sec
== timeout
->tv_sec
&&
1336 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1342 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1343 scm_i_pthread_mutex_unlock (&m
->lock
);
1345 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1348 scm_i_pthread_mutex_unlock (&m
->lock
);
1352 SCM
scm_lock_mutex (SCM mx
)
1354 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1357 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1358 (SCM m
, SCM timeout
, SCM owner
),
1359 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1360 "blocks until the mutex becomes available. The function returns when "
1361 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1362 "a thread already owns will succeed right away and will not block the "
1363 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1364 #define FUNC_NAME s_scm_lock_mutex_timed
1368 scm_t_timespec cwaittime
, *waittime
= NULL
;
1370 SCM_VALIDATE_MUTEX (1, m
);
1372 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1374 to_timespec (timeout
, &cwaittime
);
1375 waittime
= &cwaittime
;
1378 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1379 if (!scm_is_false (exception
))
1380 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1381 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1386 scm_dynwind_lock_mutex (SCM mutex
)
1388 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM
))scm_unlock_mutex
, mutex
,
1389 SCM_F_WIND_EXPLICITLY
);
1390 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM
))scm_lock_mutex
, mutex
,
1391 SCM_F_WIND_EXPLICITLY
);
1394 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1396 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1397 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1398 #define FUNC_NAME s_scm_try_mutex
1402 scm_t_timespec cwaittime
, *waittime
= NULL
;
1404 SCM_VALIDATE_MUTEX (1, mutex
);
1406 to_timespec (scm_from_int(0), &cwaittime
);
1407 waittime
= &cwaittime
;
1409 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1410 if (!scm_is_false (exception
))
1411 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1412 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1416 /*** Fat condition variables */
1419 scm_i_pthread_mutex_t lock
;
1420 SCM waiting
; /* the threads waiting for this condition. */
1423 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1424 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1427 fat_mutex_unlock (SCM mutex
, SCM cond
,
1428 const scm_t_timespec
*waittime
, int relock
)
1430 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1432 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1433 int err
= 0, ret
= 0;
1435 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1437 SCM owner
= m
->owner
;
1439 if (!scm_is_eq (owner
, scm_current_thread ()))
1443 if (!m
->unchecked_unlock
)
1445 scm_i_pthread_mutex_unlock (&m
->lock
);
1446 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1448 owner
= scm_current_thread ();
1450 else if (!m
->allow_external_unlock
)
1452 scm_i_pthread_mutex_unlock (&m
->lock
);
1453 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1457 if (! (SCM_UNBNDP (cond
)))
1459 c
= SCM_CONDVAR_DATA (cond
);
1467 m
->owner
= unblock_from_queue (m
->waiting
);
1471 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1472 scm_i_pthread_mutex_unlock (&m
->lock
);
1479 else if (err
== ETIMEDOUT
)
1484 else if (err
!= EINTR
)
1487 scm_syserror (NULL
);
1493 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1501 scm_remember_upto_here_2 (cond
, mutex
);
1503 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1511 m
->owner
= unblock_from_queue (m
->waiting
);
1513 scm_i_pthread_mutex_unlock (&m
->lock
);
1520 SCM
scm_unlock_mutex (SCM mx
)
1522 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1525 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1526 (SCM mx
, SCM cond
, SCM timeout
),
1527 "Unlocks @var{mutex} if the calling thread owns the lock on "
1528 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1529 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1530 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1531 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1532 "with a call to @code{unlock-mutex}. Only the last call to "
1533 "@code{unlock-mutex} will actually unlock the mutex. ")
1534 #define FUNC_NAME s_scm_unlock_mutex_timed
1536 scm_t_timespec cwaittime
, *waittime
= NULL
;
1538 SCM_VALIDATE_MUTEX (1, mx
);
1539 if (! (SCM_UNBNDP (cond
)))
1541 SCM_VALIDATE_CONDVAR (2, cond
);
1543 if (! (SCM_UNBNDP (timeout
)))
1545 to_timespec (timeout
, &cwaittime
);
1546 waittime
= &cwaittime
;
1550 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1554 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1556 "Return @code{#t} if @var{obj} is a mutex.")
1557 #define FUNC_NAME s_scm_mutex_p
1559 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1563 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1565 "Return the thread owning @var{mx}, or @code{#f}.")
1566 #define FUNC_NAME s_scm_mutex_owner
1569 fat_mutex
*m
= NULL
;
1571 SCM_VALIDATE_MUTEX (1, mx
);
1572 m
= SCM_MUTEX_DATA (mx
);
1573 scm_i_pthread_mutex_lock (&m
->lock
);
1575 scm_i_pthread_mutex_unlock (&m
->lock
);
1581 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1583 "Return the lock level of mutex @var{mx}.")
1584 #define FUNC_NAME s_scm_mutex_level
1586 SCM_VALIDATE_MUTEX (1, mx
);
1587 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1591 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1593 "Returns @code{#t} if the mutex @var{mx} is locked.")
1594 #define FUNC_NAME s_scm_mutex_locked_p
1596 SCM_VALIDATE_MUTEX (1, mx
);
1597 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1602 fat_cond_mark (SCM cv
)
1604 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1609 fat_cond_free (SCM mx
)
1611 fat_cond
*c
= SCM_CONDVAR_DATA (mx
);
1612 scm_gc_free (c
, sizeof (fat_cond
), "condition-variable");
1617 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1619 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1620 scm_puts ("#<condition-variable ", port
);
1621 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1622 scm_puts (">", port
);
1626 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1628 "Make a new condition variable.")
1629 #define FUNC_NAME s_scm_make_condition_variable
1634 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1635 c
->waiting
= SCM_EOL
;
1636 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1637 c
->waiting
= make_queue ();
1642 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1643 (SCM cv
, SCM mx
, SCM t
),
1644 "Wait until @var{cond-var} has been signalled. While waiting, "
1645 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1646 "is locked again when this function returns. When @var{time} is given, "
1647 "it specifies a point in time where the waiting should be aborted. It "
1648 "can be either a integer as returned by @code{current-time} or a pair "
1649 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1650 "mutex is locked and @code{#f} is returned. When the condition "
1651 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1653 #define FUNC_NAME s_scm_timed_wait_condition_variable
1655 scm_t_timespec waittime
, *waitptr
= NULL
;
1657 SCM_VALIDATE_CONDVAR (1, cv
);
1658 SCM_VALIDATE_MUTEX (2, mx
);
1660 if (!SCM_UNBNDP (t
))
1662 to_timespec (t
, &waittime
);
1663 waitptr
= &waittime
;
1666 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1671 fat_cond_signal (fat_cond
*c
)
1673 unblock_from_queue (c
->waiting
);
1676 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1678 "Wake up one thread that is waiting for @var{cv}")
1679 #define FUNC_NAME s_scm_signal_condition_variable
1681 SCM_VALIDATE_CONDVAR (1, cv
);
1682 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1688 fat_cond_broadcast (fat_cond
*c
)
1690 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1694 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1696 "Wake up all threads that are waiting for @var{cv}. ")
1697 #define FUNC_NAME s_scm_broadcast_condition_variable
1699 SCM_VALIDATE_CONDVAR (1, cv
);
1700 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1705 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1707 "Return @code{#t} if @var{obj} is a condition variable.")
1708 #define FUNC_NAME s_scm_condition_variable_p
1710 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1714 /*** Marking stacks */
1716 /* XXX - what to do with this? Do we need to handle this for blocked
1720 # define SCM_MARK_BACKING_STORE() do { \
1722 SCM_STACKITEM * top, * bot; \
1723 getcontext (&ctx); \
1724 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1725 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1726 / sizeof (SCM_STACKITEM))); \
1727 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1728 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1729 scm_mark_locations (bot, top - bot); } while (0)
1731 # define SCM_MARK_BACKING_STORE()
1735 scm_threads_mark_stacks (void)
1738 for (t
= all_threads
; t
; t
= t
->next_thread
)
1740 /* Check that thread has indeed been suspended.
1744 scm_gc_mark (t
->handle
);
1746 #if SCM_STACK_GROWS_UP
1747 scm_mark_locations (t
->base
, t
->top
- t
->base
);
1749 scm_mark_locations (t
->top
, t
->base
- t
->top
);
1751 scm_mark_locations ((void *) &t
->regs
,
1752 ((size_t) sizeof(t
->regs
)
1753 / sizeof (SCM_STACKITEM
)));
1756 SCM_MARK_BACKING_STORE ();
1762 scm_std_select (int nfds
,
1763 SELECT_TYPE
*readfds
,
1764 SELECT_TYPE
*writefds
,
1765 SELECT_TYPE
*exceptfds
,
1766 struct timeval
*timeout
)
1769 int res
, eno
, wakeup_fd
;
1770 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1771 scm_t_guile_ticket ticket
;
1773 if (readfds
== NULL
)
1775 FD_ZERO (&my_readfds
);
1776 readfds
= &my_readfds
;
1779 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1782 wakeup_fd
= t
->sleep_pipe
[0];
1783 ticket
= scm_leave_guile ();
1784 FD_SET (wakeup_fd
, readfds
);
1785 if (wakeup_fd
>= nfds
)
1787 res
= select (nfds
, readfds
, writefds
, exceptfds
, timeout
);
1790 scm_enter_guile (ticket
);
1792 scm_i_reset_sleep (t
);
1794 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1797 full_read (wakeup_fd
, &dummy
, 1);
1799 FD_CLR (wakeup_fd
, readfds
);
1811 /* Convenience API for blocking while in guile mode. */
1813 #if SCM_USE_PTHREAD_THREADS
1816 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1818 scm_t_guile_ticket t
= scm_leave_guile ();
1819 int res
= scm_i_pthread_mutex_lock (mutex
);
1820 scm_enter_guile (t
);
1825 do_unlock (void *data
)
1827 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1831 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1833 scm_i_scm_pthread_mutex_lock (mutex
);
1834 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1838 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1840 scm_t_guile_ticket t
= scm_leave_guile ();
1841 ((scm_i_thread
*)t
)->held_mutex
= mutex
;
1842 int res
= scm_i_pthread_cond_wait (cond
, mutex
);
1843 ((scm_i_thread
*)t
)->held_mutex
= NULL
;
1844 scm_enter_guile (t
);
1849 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1850 scm_i_pthread_mutex_t
*mutex
,
1851 const scm_t_timespec
*wt
)
1853 scm_t_guile_ticket t
= scm_leave_guile ();
1854 ((scm_i_thread
*)t
)->held_mutex
= mutex
;
1855 int res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1856 ((scm_i_thread
*)t
)->held_mutex
= NULL
;
1857 scm_enter_guile (t
);
1864 scm_std_usleep (unsigned long usecs
)
1867 tv
.tv_usec
= usecs
% 1000000;
1868 tv
.tv_sec
= usecs
/ 1000000;
1869 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1870 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1874 scm_std_sleep (unsigned int secs
)
1879 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1885 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1887 "Return the thread that called this function.")
1888 #define FUNC_NAME s_scm_current_thread
1890 return SCM_I_CURRENT_THREAD
->handle
;
1895 scm_c_make_list (size_t n
, SCM fill
)
1899 res
= scm_cons (fill
, res
);
1903 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1905 "Return a list of all threads.")
1906 #define FUNC_NAME s_scm_all_threads
1908 /* We can not allocate while holding the thread_admin_mutex because
1909 of the way GC is done.
1911 int n
= thread_count
;
1913 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1915 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1917 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1919 if (t
!= scm_i_signal_delivery_thread
)
1921 SCM_SETCAR (*l
, t
->handle
);
1922 l
= SCM_CDRLOC (*l
);
1927 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1932 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1934 "Return @code{#t} iff @var{thread} has exited.\n")
1935 #define FUNC_NAME s_scm_thread_exited_p
1937 return scm_from_bool (scm_c_thread_exited_p (thread
));
1942 scm_c_thread_exited_p (SCM thread
)
1943 #define FUNC_NAME s_scm_thread_exited_p
1946 SCM_VALIDATE_THREAD (1, thread
);
1947 t
= SCM_I_THREAD_DATA (thread
);
1952 static scm_i_pthread_cond_t wake_up_cond
;
1953 int scm_i_thread_go_to_sleep
;
1954 static int threads_initialized_p
= 0;
1957 scm_i_thread_put_to_sleep ()
1959 if (threads_initialized_p
)
1964 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1966 /* Signal all threads to go to sleep
1968 scm_i_thread_go_to_sleep
= 1;
1969 for (t
= all_threads
; t
; t
= t
->next_thread
)
1970 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
1971 scm_i_thread_go_to_sleep
= 0;
1976 scm_i_thread_invalidate_freelists ()
1978 /* thread_admin_mutex is already locked. */
1981 for (t
= all_threads
; t
; t
= t
->next_thread
)
1982 if (t
!= SCM_I_CURRENT_THREAD
)
1983 t
->clear_freelists_p
= 1;
1987 scm_i_thread_wake_up ()
1989 if (threads_initialized_p
)
1993 scm_i_pthread_cond_broadcast (&wake_up_cond
);
1994 for (t
= all_threads
; t
; t
= t
->next_thread
)
1995 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
1996 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1997 scm_enter_guile ((scm_t_guile_ticket
) SCM_I_CURRENT_THREAD
);
2002 scm_i_thread_sleep_for_gc ()
2004 scm_i_thread
*t
= suspend ();
2006 /* Don't put t->heap_mutex in t->held_mutex here, because if the
2007 thread is cancelled during the cond wait, the thread's cleanup
2008 function (scm_leave_guile_cleanup) will handle unlocking the
2009 heap_mutex, so we don't need to do that again in on_thread_exit.
2011 scm_i_pthread_cond_wait (&wake_up_cond
, &t
->heap_mutex
);
2016 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2018 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2019 int scm_i_critical_section_level
= 0;
2021 static SCM dynwind_critical_section_mutex
;
2024 scm_dynwind_critical_section (SCM mutex
)
2026 if (scm_is_false (mutex
))
2027 mutex
= dynwind_critical_section_mutex
;
2028 scm_dynwind_lock_mutex (mutex
);
2029 scm_dynwind_block_asyncs ();
2032 /*** Initialization */
2034 scm_i_pthread_key_t scm_i_freelist
, scm_i_freelist2
;
2035 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2037 #if SCM_USE_PTHREAD_THREADS
2038 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2042 scm_threads_prehistory (SCM_STACKITEM
*base
)
2044 #if SCM_USE_PTHREAD_THREADS
2045 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2046 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2047 PTHREAD_MUTEX_RECURSIVE
);
2050 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2051 scm_i_pthread_mutexattr_recursive
);
2052 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2053 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2054 scm_i_pthread_key_create (&scm_i_freelist
, NULL
);
2055 scm_i_pthread_key_create (&scm_i_freelist2
, NULL
);
2057 guilify_self_1 (base
);
2060 scm_t_bits scm_tc16_thread
;
2061 scm_t_bits scm_tc16_mutex
;
2062 scm_t_bits scm_tc16_condvar
;
2067 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2068 scm_set_smob_mark (scm_tc16_thread
, thread_mark
);
2069 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2070 scm_set_smob_free (scm_tc16_thread
, thread_free
);
2072 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2073 scm_set_smob_mark (scm_tc16_mutex
, fat_mutex_mark
);
2074 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2075 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
2077 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2079 scm_set_smob_mark (scm_tc16_condvar
, fat_cond_mark
);
2080 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2081 scm_set_smob_free (scm_tc16_condvar
, fat_cond_free
);
2083 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2084 guilify_self_2 (SCM_BOOL_F
);
2085 threads_initialized_p
= 1;
2087 dynwind_critical_section_mutex
=
2088 scm_permanent_object (scm_make_recursive_mutex ());
2092 scm_init_threads_default_dynamic_state ()
2094 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2095 scm_i_default_dynamic_state
= scm_permanent_object (state
);
2099 scm_init_thread_procs ()
2101 #include "libguile/threads.x"