1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libguile/_scm.h"
30 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
37 #include "libguile/validate.h"
38 #include "libguile/root.h"
39 #include "libguile/eval.h"
40 #include "libguile/async.h"
41 #include "libguile/ports.h"
42 #include "libguile/threads.h"
43 #include "libguile/dynwind.h"
44 #include "libguile/iselect.h"
45 #include "libguile/fluids.h"
46 #include "libguile/continuations.h"
47 #include "libguile/gc.h"
48 #include "libguile/init.h"
49 #include "libguile/scmsigs.h"
50 #include "libguile/strings.h"
54 # define ETIMEDOUT WSAETIMEDOUT
58 # define pipe(fd) _pipe (fd, 256, O_BINARY)
59 #endif /* __MINGW32__ */
62 to_timespec (SCM t
, scm_t_timespec
*waittime
)
66 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
67 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
71 double time
= scm_to_double (t
);
72 double sec
= scm_c_truncate (time
);
74 waittime
->tv_sec
= (long) sec
;
75 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
81 /* Make an empty queue data structure.
86 return scm_cons (SCM_EOL
, SCM_EOL
);
89 /* Put T at the back of Q and return a handle that can be used with
90 remqueue to remove T from Q again.
93 enqueue (SCM q
, SCM t
)
95 SCM c
= scm_cons (t
, SCM_EOL
);
96 if (scm_is_null (SCM_CDR (q
)))
99 SCM_SETCDR (SCM_CAR (q
), c
);
104 /* Remove the element that the handle C refers to from the queue Q. C
105 must have been returned from a call to enqueue. The return value
106 is zero when the element referred to by C has already been removed.
107 Otherwise, 1 is returned.
110 remqueue (SCM q
, SCM c
)
113 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
115 if (scm_is_eq (p
, c
))
117 if (scm_is_eq (c
, SCM_CAR (q
)))
118 SCM_SETCAR (q
, SCM_CDR (c
));
119 SCM_SETCDR (prev
, SCM_CDR (c
));
127 /* Remove the front-most element from the queue Q and return it.
128 Return SCM_BOOL_F when Q is empty.
138 SCM_SETCDR (q
, SCM_CDR (c
));
139 if (scm_is_null (SCM_CDR (q
)))
140 SCM_SETCAR (q
, SCM_EOL
);
145 /*** Thread smob routines */
148 thread_mark (SCM obj
)
150 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
151 scm_gc_mark (t
->result
);
152 scm_gc_mark (t
->cleanup_handler
);
153 scm_gc_mark (t
->join_queue
);
154 scm_gc_mark (t
->mutexes
);
155 scm_gc_mark (t
->dynwinds
);
156 scm_gc_mark (t
->active_asyncs
);
157 scm_gc_mark (t
->continuation_root
);
158 return t
->dynamic_state
;
162 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
164 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
165 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
166 the struct case, hence we go via a union, and extract according to the
167 size of pthread_t. */
175 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
176 scm_i_pthread_t p
= t
->pthread
;
179 if (sizeof (p
) == sizeof (unsigned short))
181 else if (sizeof (p
) == sizeof (unsigned int))
183 else if (sizeof (p
) == sizeof (unsigned long))
188 scm_puts ("#<thread ", port
);
189 scm_uintprint (id
, 10, port
);
190 scm_puts (" (", port
);
191 scm_uintprint ((scm_t_bits
)t
, 16, port
);
192 scm_puts (")>", port
);
197 thread_free (SCM obj
)
199 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
201 scm_gc_free (t
, sizeof (*t
), "thread");
205 /*** Blocking on queues. */
207 /* See also scm_i_queue_async_cell for how such a block is
211 /* Put the current thread on QUEUE and go to sleep, waiting for it to
212 be woken up by a call to 'unblock_from_queue', or to be
213 interrupted. Upon return of this function, the current thread is
214 no longer on QUEUE, even when the sleep has been interrupted.
216 The QUEUE data structure is assumed to be protected by MUTEX and
217 the caller of block_self must hold MUTEX. It will be atomically
218 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
220 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
223 When WAITTIME is not NULL, the sleep will be aborted at that time.
225 The return value of block_self is an errno value. It will be zero
226 when the sleep has been successfully completed by a call to
227 unblock_from_queue, EINTR when it has been interrupted by the
228 delivery of a system async, and ETIMEDOUT when the timeout has
231 The system asyncs themselves are not executed by block_self.
234 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
235 const scm_t_timespec
*waittime
)
237 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
241 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
246 q_handle
= enqueue (queue
, t
->handle
);
247 if (waittime
== NULL
)
248 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
250 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
252 /* When we are still on QUEUE, we have been interrupted. We
253 report this only when no other error (such as a timeout) has
256 if (remqueue (queue
, q_handle
) && err
== 0)
259 scm_i_reset_sleep (t
);
265 /* Wake up the first thread on QUEUE, if any. The caller must hold
266 the mutex that protects QUEUE. The awoken thread is returned, or
267 #f when the queue was empty.
270 unblock_from_queue (SCM queue
)
272 SCM thread
= dequeue (queue
);
273 if (scm_is_true (thread
))
274 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
278 /* Getting into and out of guile mode.
281 /* Ken Raeburn observes that the implementation of suspend and resume
282 (and the things that build on top of them) are very likely not
283 correct (see below). We will need fix this eventually, and that's
284 why scm_leave_guile/scm_enter_guile are not exported in the API.
288 Consider this sequence:
290 Function foo, called in Guile mode, calls suspend (maybe indirectly
291 through scm_leave_guile), which does this:
293 // record top of stack for the GC
294 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
297 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
298 setjmp (t->regs); // here's most of the magic
302 Function foo has a SCM value X, a handle on a non-immediate object, in
303 a caller-saved register R, and it's the only reference to the object
306 The compiler wants to use R in suspend, so it pushes the current
307 value, X, into a stack slot which will be reloaded on exit from
308 suspend; then it loads stuff into R and goes about its business. The
309 setjmp call saves (some of) the current registers, including R, which
310 no longer contains X. (This isn't a problem for a normal
311 setjmp/longjmp situation, where longjmp would be called before
312 setjmp's caller returns; the old value for X would be loaded back from
313 the stack after the longjmp, before the function returned.)
315 So, suspend returns, loading X back into R (and invalidating the jump
316 buffer) in the process. The caller foo then goes off and calls a
317 bunch of other functions out of Guile mode, occasionally storing X on
318 the stack again, but, say, much deeper on the stack than suspend's
319 stack frame went, and the stack slot where suspend had written X has
320 long since been overwritten with other values.
322 Okay, nothing actively broken so far. Now, let garbage collection
323 run, triggered by another thread.
325 The thread calling foo is out of Guile mode at the time, so the
326 garbage collector just scans a range of stack addresses. Too bad that
327 X isn't stored there. So the pointed-to storage goes onto the free
328 list, and I think you can see where things go from there.
330 Is there anything I'm missing that'll prevent this scenario from
331 happening? I mean, aside from, "well, suspend and scm_leave_guile
332 don't have many local variables, so they probably won't need to save
333 any registers on most systems, so we hope everything will wind up in
334 the jump buffer and we'll just get away with it"?
336 (And, going the other direction, if scm_leave_guile and suspend push
337 the stack pointer over onto a new page, and foo doesn't make further
338 function calls and thus the stack pointer no longer includes that
339 page, are we guaranteed that the kernel cannot release the now-unused
340 stack page that contains the top-of-stack pointer we just saved? I
341 don't know if any OS actually does that. If it does, we could get
342 faults in garbage collection.)
344 I don't think scm_without_guile has to have this problem, as it gets
345 more control over the stack handling -- but it should call setjmp
346 itself. I'd probably try something like:
348 // record top of stack for the GC
349 t->top = SCM_STACK_PTR (&t);
351 SCM_FLUSH_REGISTER_WINDOWS;
356 ... though even that's making some assumptions about the stack
357 ordering of local variables versus caller-saved registers.
359 For something like scm_leave_guile to work, I don't think it can just
360 rely on invalidated jump buffers. A valid jump buffer, and a handle
361 on the stack state at the point when the jump buffer was initialized,
362 together, would work fine, but I think then we're talking about macros
363 invoking setjmp in the caller's stack frame, and requiring that the
364 caller of scm_leave_guile also call scm_enter_guile before returning,
365 kind of like pthread_cleanup_push/pop calls that have to be paired up
366 in a function. (In fact, the pthread ones have to be paired up
367 syntactically, as if they might expand to a compound statement
368 incorporating the user's code, and invoking a compiler's
369 exception-handling primitives. Which might be something to think
370 about for cases where Guile is used with C++ exceptions or
374 scm_i_pthread_key_t scm_i_thread_key
;
377 resume (scm_i_thread
*t
)
380 if (t
->clear_freelists_p
)
382 *SCM_FREELIST_LOC (scm_i_freelist
) = SCM_EOL
;
383 *SCM_FREELIST_LOC (scm_i_freelist2
) = SCM_EOL
;
384 t
->clear_freelists_p
= 0;
388 typedef void* scm_t_guile_ticket
;
391 scm_enter_guile (scm_t_guile_ticket ticket
)
393 scm_i_thread
*t
= (scm_i_thread
*)ticket
;
396 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
401 static scm_i_thread
*
404 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
406 /* record top of stack for the GC */
407 t
->top
= SCM_STACK_PTR (&t
);
408 /* save registers. */
409 SCM_FLUSH_REGISTER_WINDOWS
;
414 static scm_t_guile_ticket
417 scm_i_thread
*t
= suspend ();
418 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
419 return (scm_t_guile_ticket
) t
;
422 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
423 static scm_i_thread
*all_threads
= NULL
;
424 static int thread_count
;
426 static SCM scm_i_default_dynamic_state
;
428 /* Perform first stage of thread initialisation, in non-guile mode.
431 guilify_self_1 (SCM_STACKITEM
*base
)
433 scm_i_thread
*t
= malloc (sizeof (scm_i_thread
));
435 t
->pthread
= scm_i_pthread_self ();
436 t
->handle
= SCM_BOOL_F
;
437 t
->result
= SCM_BOOL_F
;
438 t
->cleanup_handler
= SCM_BOOL_F
;
439 t
->mutexes
= SCM_EOL
;
440 t
->join_queue
= SCM_EOL
;
441 t
->dynamic_state
= SCM_BOOL_F
;
442 t
->dynwinds
= SCM_EOL
;
443 t
->active_asyncs
= SCM_EOL
;
445 t
->pending_asyncs
= 1;
446 t
->last_debug_frame
= NULL
;
449 /* Calculate and store off the base of this thread's register
450 backing store (RBS). Unfortunately our implementation(s) of
451 scm_ia64_register_backing_store_base are only reliable for the
452 main thread. For other threads, therefore, find out the current
453 top of the RBS, and use that as a maximum. */
454 t
->register_backing_store_base
= scm_ia64_register_backing_store_base ();
459 bsp
= scm_ia64_ar_bsp (&ctx
);
460 if (t
->register_backing_store_base
> bsp
)
461 t
->register_backing_store_base
= bsp
;
464 t
->continuation_root
= SCM_EOL
;
465 t
->continuation_base
= base
;
466 scm_i_pthread_cond_init (&t
->sleep_cond
, NULL
);
467 t
->sleep_mutex
= NULL
;
468 t
->sleep_object
= SCM_BOOL_F
;
470 /* XXX - check for errors. */
471 pipe (t
->sleep_pipe
);
472 scm_i_pthread_mutex_init (&t
->heap_mutex
, NULL
);
473 scm_i_pthread_mutex_init (&t
->admin_mutex
, NULL
);
474 t
->clear_freelists_p
= 0;
479 t
->freelist
= SCM_EOL
;
480 t
->freelist2
= SCM_EOL
;
481 SCM_SET_FREELIST_LOC (scm_i_freelist
, &t
->freelist
);
482 SCM_SET_FREELIST_LOC (scm_i_freelist2
, &t
->freelist2
);
484 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
486 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
488 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
489 t
->next_thread
= all_threads
;
492 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
495 /* Perform second stage of thread initialisation, in guile mode.
498 guilify_self_2 (SCM parent
)
500 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
502 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
503 scm_gc_register_collectable_memory (t
, sizeof (scm_i_thread
), "thread");
504 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
505 t
->continuation_base
= t
->base
;
507 if (scm_is_true (parent
))
508 t
->dynamic_state
= scm_make_dynamic_state (parent
);
510 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
512 t
->join_queue
= make_queue ();
519 /* We implement our own mutex type since we want them to be 'fair', we
520 want to do fancy things while waiting for them (like running
521 asyncs) and we might want to add things that are nice for
526 scm_i_pthread_mutex_t lock
;
528 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
530 int recursive
; /* allow recursive locking? */
531 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
532 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
533 owned by the current thread? */
535 SCM waiting
; /* the threads waiting for this mutex. */
538 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
539 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
541 /* Perform thread tear-down, in guile mode.
544 do_thread_exit (void *v
)
546 scm_i_thread
*t
= (scm_i_thread
*) v
;
548 if (!scm_is_false (t
->cleanup_handler
))
550 SCM ptr
= t
->cleanup_handler
;
552 t
->cleanup_handler
= SCM_BOOL_F
;
553 t
->result
= scm_internal_catch (SCM_BOOL_T
,
554 (scm_t_catch_body
) scm_call_0
, ptr
,
555 scm_handle_by_message_noexit
, NULL
);
558 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
561 close (t
->sleep_pipe
[0]);
562 close (t
->sleep_pipe
[1]);
563 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
566 while (!scm_is_null (t
->mutexes
))
568 SCM mutex
= SCM_CAR (t
->mutexes
);
569 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
570 scm_i_pthread_mutex_lock (&m
->lock
);
572 unblock_from_queue (m
->waiting
);
574 scm_i_pthread_mutex_unlock (&m
->lock
);
575 t
->mutexes
= SCM_CDR (t
->mutexes
);
578 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
584 on_thread_exit (void *v
)
586 /* This handler is executed in non-guile mode. */
587 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
589 scm_i_pthread_setspecific (scm_i_thread_key
, v
);
591 /* Ensure the signal handling thread has been launched, because we might be
593 scm_i_ensure_signal_delivery_thread ();
595 /* Unblocking the joining threads needs to happen in guile mode
596 since the queue is a SCM data structure. */
597 scm_with_guile (do_thread_exit
, v
);
599 /* Removing ourself from the list of all threads needs to happen in
600 non-guile mode since all SCM values on our stack become
601 unprotected once we are no longer in the list. */
602 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
603 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
606 *tp
= t
->next_thread
;
611 /* If there's only one other thread, it could be the signal delivery
612 thread, so we need to notify it to shut down by closing its read pipe.
613 If it's not the signal delivery thread, then closing the read pipe isn't
615 if (thread_count
<= 1)
616 scm_i_close_signal_pipe ();
618 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
620 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
623 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
626 init_thread_key (void)
628 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
631 /* Perform any initializations necessary to bring the current thread
632 into guile mode, initializing Guile itself, if necessary.
634 BASE is the stack base to use with GC.
636 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
637 which case the default dynamic state is used.
639 Return zero when the thread was in guile mode already; otherwise
644 scm_i_init_thread_for_guile (SCM_STACKITEM
*base
, SCM parent
)
648 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
650 if ((t
= SCM_I_CURRENT_THREAD
) == NULL
)
652 /* This thread has not been guilified yet.
655 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
656 if (scm_initialized_p
== 0)
658 /* First thread ever to enter Guile. Run the full
661 scm_i_init_guile (base
);
662 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
666 /* Guile is already initialized, but this thread enters it for
667 the first time. Only initialize this thread.
669 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
670 guilify_self_1 (base
);
671 guilify_self_2 (parent
);
677 /* This thread is already guilified but not in guile mode, just
680 XXX - base might be lower than when this thread was first
683 scm_enter_guile ((scm_t_guile_ticket
) t
);
688 /* Thread is already in guile mode. Nothing to do.
694 #if SCM_USE_PTHREAD_THREADS
696 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
697 /* This method for GNU/Linux and perhaps some other systems.
698 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
699 available on them. */
700 #define HAVE_GET_THREAD_STACK_BASE
702 static SCM_STACKITEM
*
703 get_thread_stack_base ()
709 pthread_getattr_np (pthread_self (), &attr
);
710 pthread_attr_getstack (&attr
, &start
, &size
);
711 end
= (char *)start
+ size
;
713 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
714 for the main thread, but we can use scm_get_stack_base in that
718 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
719 if ((void *)&attr
< start
|| (void *)&attr
>= end
)
720 return scm_get_stack_base ();
724 #if SCM_STACK_GROWS_UP
732 #elif HAVE_PTHREAD_GET_STACKADDR_NP
733 /* This method for MacOS X.
734 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
735 but as of 2006 there's nothing obvious at apple.com. */
736 #define HAVE_GET_THREAD_STACK_BASE
737 static SCM_STACKITEM
*
738 get_thread_stack_base ()
740 return pthread_get_stackaddr_np (pthread_self ());
743 #elif defined (__MINGW32__)
744 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
745 in any thread. We don't like hard-coding the name of a system, but there
746 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
748 #define HAVE_GET_THREAD_STACK_BASE
749 static SCM_STACKITEM
*
750 get_thread_stack_base ()
752 return scm_get_stack_base ();
755 #endif /* pthread methods of get_thread_stack_base */
757 #else /* !SCM_USE_PTHREAD_THREADS */
759 #define HAVE_GET_THREAD_STACK_BASE
761 static SCM_STACKITEM
*
762 get_thread_stack_base ()
764 return scm_get_stack_base ();
767 #endif /* !SCM_USE_PTHREAD_THREADS */
769 #ifdef HAVE_GET_THREAD_STACK_BASE
774 scm_i_init_thread_for_guile (get_thread_stack_base (),
775 scm_i_default_dynamic_state
);
781 scm_with_guile (void *(*func
)(void *), void *data
)
783 return scm_i_with_guile_and_parent (func
, data
,
784 scm_i_default_dynamic_state
);
787 SCM_UNUSED
static void
788 scm_leave_guile_cleanup (void *x
)
794 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
798 SCM_STACKITEM base_item
;
800 really_entered
= scm_i_init_thread_for_guile (&base_item
, parent
);
803 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup
, NULL
);
804 res
= scm_c_with_continuation_barrier (func
, data
);
805 scm_i_pthread_cleanup_pop (0);
809 res
= scm_c_with_continuation_barrier (func
, data
);
815 scm_without_guile (void *(*func
)(void *), void *data
)
818 scm_t_guile_ticket t
;
819 t
= scm_leave_guile ();
825 /*** Thread creation */
832 scm_i_pthread_mutex_t mutex
;
833 scm_i_pthread_cond_t cond
;
837 really_launch (void *d
)
839 launch_data
*data
= (launch_data
*)d
;
840 SCM thunk
= data
->thunk
, handler
= data
->handler
;
843 t
= SCM_I_CURRENT_THREAD
;
845 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
846 data
->thread
= scm_current_thread ();
847 scm_i_pthread_cond_signal (&data
->cond
);
848 scm_i_pthread_mutex_unlock (&data
->mutex
);
850 if (SCM_UNBNDP (handler
))
851 t
->result
= scm_call_0 (thunk
);
853 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
859 launch_thread (void *d
)
861 launch_data
*data
= (launch_data
*)d
;
862 scm_i_pthread_detach (scm_i_pthread_self ());
863 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
867 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
868 (SCM thunk
, SCM handler
),
869 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
870 "returning a new thread object representing the thread. The procedure\n"
871 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
873 "When @var{handler} is specified, then @var{thunk} is called from\n"
874 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
875 "handler. This catch is established inside the continuation barrier.\n"
877 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
878 "the @emph{exit value} of the thread and the thread is terminated.")
879 #define FUNC_NAME s_scm_call_with_new_thread
885 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
886 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
887 handler
, SCM_ARG2
, FUNC_NAME
);
889 data
.parent
= scm_current_dynamic_state ();
891 data
.handler
= handler
;
892 data
.thread
= SCM_BOOL_F
;
893 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
894 scm_i_pthread_cond_init (&data
.cond
, NULL
);
896 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
897 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
900 scm_i_pthread_mutex_unlock (&data
.mutex
);
904 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
905 scm_i_pthread_mutex_unlock (&data
.mutex
);
913 scm_t_catch_body body
;
915 scm_t_catch_handler handler
;
918 scm_i_pthread_mutex_t mutex
;
919 scm_i_pthread_cond_t cond
;
923 really_spawn (void *d
)
925 spawn_data
*data
= (spawn_data
*)d
;
926 scm_t_catch_body body
= data
->body
;
927 void *body_data
= data
->body_data
;
928 scm_t_catch_handler handler
= data
->handler
;
929 void *handler_data
= data
->handler_data
;
930 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
932 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
933 data
->thread
= scm_current_thread ();
934 scm_i_pthread_cond_signal (&data
->cond
);
935 scm_i_pthread_mutex_unlock (&data
->mutex
);
938 t
->result
= body (body_data
);
940 t
->result
= scm_internal_catch (SCM_BOOL_T
,
942 handler
, handler_data
);
948 spawn_thread (void *d
)
950 spawn_data
*data
= (spawn_data
*)d
;
951 scm_i_pthread_detach (scm_i_pthread_self ());
952 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
957 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
958 scm_t_catch_handler handler
, void *handler_data
)
964 data
.parent
= scm_current_dynamic_state ();
966 data
.body_data
= body_data
;
967 data
.handler
= handler
;
968 data
.handler_data
= handler_data
;
969 data
.thread
= SCM_BOOL_F
;
970 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
971 scm_i_pthread_cond_init (&data
.cond
, NULL
);
973 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
974 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
977 scm_i_pthread_mutex_unlock (&data
.mutex
);
981 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
982 scm_i_pthread_mutex_unlock (&data
.mutex
);
987 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
989 "Move the calling thread to the end of the scheduling queue.")
990 #define FUNC_NAME s_scm_yield
992 return scm_from_bool (scm_i_sched_yield ());
996 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
998 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
999 "cannot be the current thread, and if @var{thread} has already terminated or "
1000 "been signaled to terminate, this function is a no-op.")
1001 #define FUNC_NAME s_scm_cancel_thread
1003 scm_i_thread
*t
= NULL
;
1005 SCM_VALIDATE_THREAD (1, thread
);
1006 t
= SCM_I_THREAD_DATA (thread
);
1007 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1011 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1012 scm_i_pthread_cancel (t
->pthread
);
1015 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1017 return SCM_UNSPECIFIED
;
1021 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1022 (SCM thread
, SCM proc
),
1023 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1024 "This handler will be called when the thread exits.")
1025 #define FUNC_NAME s_scm_set_thread_cleanup_x
1029 SCM_VALIDATE_THREAD (1, thread
);
1030 if (!scm_is_false (proc
))
1031 SCM_VALIDATE_THUNK (2, proc
);
1033 t
= SCM_I_THREAD_DATA (thread
);
1034 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1036 if (!(t
->exited
|| t
->canceled
))
1037 t
->cleanup_handler
= proc
;
1039 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1041 return SCM_UNSPECIFIED
;
1045 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1047 "Return the cleanup handler installed for the thread @var{thread}.")
1048 #define FUNC_NAME s_scm_thread_cleanup
1053 SCM_VALIDATE_THREAD (1, thread
);
1055 t
= SCM_I_THREAD_DATA (thread
);
1056 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1057 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1058 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1064 SCM
scm_join_thread (SCM thread
)
1066 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1069 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1070 (SCM thread
, SCM timeout
, SCM timeoutval
),
1071 "Suspend execution of the calling thread until the target @var{thread} "
1072 "terminates, unless the target @var{thread} has already terminated. ")
1073 #define FUNC_NAME s_scm_join_thread_timed
1076 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1077 SCM res
= SCM_BOOL_F
;
1079 if (! (SCM_UNBNDP (timeoutval
)))
1082 SCM_VALIDATE_THREAD (1, thread
);
1083 if (scm_is_eq (scm_current_thread (), thread
))
1084 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1086 t
= SCM_I_THREAD_DATA (thread
);
1087 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1089 if (! SCM_UNBNDP (timeout
))
1091 to_timespec (timeout
, &ctimeout
);
1092 timeout_ptr
= &ctimeout
;
1101 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1111 else if (err
== ETIMEDOUT
)
1114 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1116 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1120 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1126 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1128 "Return @code{#t} if @var{obj} is a thread.")
1129 #define FUNC_NAME s_scm_thread_p
1131 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1136 fat_mutex_mark (SCM mx
)
1138 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1139 scm_gc_mark (m
->owner
);
1144 fat_mutex_free (SCM mx
)
1146 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1147 scm_i_pthread_mutex_destroy (&m
->lock
);
1148 scm_gc_free (m
, sizeof (fat_mutex
), "mutex");
1153 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1155 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1156 scm_puts ("#<mutex ", port
);
1157 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1158 scm_puts (">", port
);
1163 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1168 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1169 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
1170 m
->owner
= SCM_BOOL_F
;
1173 m
->recursive
= recursive
;
1174 m
->unchecked_unlock
= unchecked_unlock
;
1175 m
->allow_external_unlock
= external_unlock
;
1177 m
->waiting
= SCM_EOL
;
1178 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1179 m
->waiting
= make_queue ();
1183 SCM
scm_make_mutex (void)
1185 return scm_make_mutex_with_flags (SCM_EOL
);
1188 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1189 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1190 SCM_SYMBOL (recursive_sym
, "recursive");
1192 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1194 "Create a new mutex. ")
1195 #define FUNC_NAME s_scm_make_mutex_with_flags
1197 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1200 while (! scm_is_null (ptr
))
1202 SCM flag
= SCM_CAR (ptr
);
1203 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1204 unchecked_unlock
= 1;
1205 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1206 external_unlock
= 1;
1207 else if (scm_is_eq (flag
, recursive_sym
))
1210 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1211 ptr
= SCM_CDR (ptr
);
1213 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1217 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1219 "Create a new recursive mutex. ")
1220 #define FUNC_NAME s_scm_make_recursive_mutex
1222 return make_fat_mutex (1, 0, 0);
1226 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1229 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1231 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1233 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1234 SCM err
= SCM_BOOL_F
;
1236 struct timeval current_time
;
1238 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1244 m
->owner
= new_owner
;
1247 if (SCM_I_IS_THREAD (new_owner
))
1249 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1250 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1251 t
->mutexes
= scm_cons (mutex
, t
->mutexes
);
1252 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1257 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1259 m
->owner
= new_owner
;
1260 err
= scm_cons (scm_abandoned_mutex_error_key
,
1261 scm_from_locale_string ("lock obtained on abandoned "
1266 else if (scm_is_eq (m
->owner
, new_owner
))
1275 err
= scm_cons (scm_misc_error_key
,
1276 scm_from_locale_string ("mutex already locked "
1284 if (timeout
!= NULL
)
1286 gettimeofday (¤t_time
, NULL
);
1287 if (current_time
.tv_sec
> timeout
->tv_sec
||
1288 (current_time
.tv_sec
== timeout
->tv_sec
&&
1289 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1295 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1296 scm_i_pthread_mutex_unlock (&m
->lock
);
1298 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1301 scm_i_pthread_mutex_unlock (&m
->lock
);
1305 SCM
scm_lock_mutex (SCM mx
)
1307 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1310 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1311 (SCM m
, SCM timeout
, SCM owner
),
1312 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1313 "blocks until the mutex becomes available. The function returns when "
1314 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1315 "a thread already owns will succeed right away and will not block the "
1316 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1317 #define FUNC_NAME s_scm_lock_mutex_timed
1321 scm_t_timespec cwaittime
, *waittime
= NULL
;
1323 SCM_VALIDATE_MUTEX (1, m
);
1325 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1327 to_timespec (timeout
, &cwaittime
);
1328 waittime
= &cwaittime
;
1331 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1332 if (!scm_is_false (exception
))
1333 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1334 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1339 scm_dynwind_lock_mutex (SCM mutex
)
1341 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM
))scm_unlock_mutex
, mutex
,
1342 SCM_F_WIND_EXPLICITLY
);
1343 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM
))scm_lock_mutex
, mutex
,
1344 SCM_F_WIND_EXPLICITLY
);
1347 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1349 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1350 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1351 #define FUNC_NAME s_scm_try_mutex
1355 scm_t_timespec cwaittime
, *waittime
= NULL
;
1357 SCM_VALIDATE_MUTEX (1, mutex
);
1359 to_timespec (scm_from_int(0), &cwaittime
);
1360 waittime
= &cwaittime
;
1362 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1363 if (!scm_is_false (exception
))
1364 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1365 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1369 /*** Fat condition variables */
1372 scm_i_pthread_mutex_t lock
;
1373 SCM waiting
; /* the threads waiting for this condition. */
1376 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1377 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1380 fat_mutex_unlock (SCM mutex
, SCM cond
,
1381 const scm_t_timespec
*waittime
, int relock
)
1383 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1385 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1386 int err
= 0, ret
= 0;
1388 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1390 SCM owner
= m
->owner
;
1392 if (!scm_is_eq (owner
, scm_current_thread ()))
1396 if (!m
->unchecked_unlock
)
1398 scm_i_pthread_mutex_unlock (&m
->lock
);
1399 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1401 owner
= scm_current_thread ();
1403 else if (!m
->allow_external_unlock
)
1405 scm_i_pthread_mutex_unlock (&m
->lock
);
1406 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1410 if (! (SCM_UNBNDP (cond
)))
1412 c
= SCM_CONDVAR_DATA (cond
);
1417 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1421 m
->owner
= unblock_from_queue (m
->waiting
);
1423 scm_i_pthread_mutex_unlock (&m
->lock
);
1427 err
= block_self (c
->waiting
, cond
, &c
->lock
, waittime
);
1434 else if (err
== ETIMEDOUT
)
1439 else if (err
!= EINTR
)
1442 scm_i_pthread_mutex_unlock (&c
->lock
);
1443 scm_syserror (NULL
);
1449 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1450 scm_i_pthread_mutex_unlock (&c
->lock
);
1454 scm_i_pthread_mutex_unlock (&c
->lock
);
1459 scm_remember_upto_here_2 (cond
, mutex
);
1461 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1469 m
->owner
= unblock_from_queue (m
->waiting
);
1471 scm_i_pthread_mutex_unlock (&m
->lock
);
1478 SCM
scm_unlock_mutex (SCM mx
)
1480 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1483 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1484 (SCM mx
, SCM cond
, SCM timeout
),
1485 "Unlocks @var{mutex} if the calling thread owns the lock on "
1486 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1487 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1488 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1489 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1490 "with a call to @code{unlock-mutex}. Only the last call to "
1491 "@code{unlock-mutex} will actually unlock the mutex. ")
1492 #define FUNC_NAME s_scm_unlock_mutex_timed
1494 scm_t_timespec cwaittime
, *waittime
= NULL
;
1496 SCM_VALIDATE_MUTEX (1, mx
);
1497 if (! (SCM_UNBNDP (cond
)))
1499 SCM_VALIDATE_CONDVAR (2, cond
);
1501 if (! (SCM_UNBNDP (timeout
)))
1503 to_timespec (timeout
, &cwaittime
);
1504 waittime
= &cwaittime
;
1508 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1512 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1514 "Return @code{#t} if @var{obj} is a mutex.")
1515 #define FUNC_NAME s_scm_mutex_p
1517 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1521 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1523 "Return the thread owning @var{mx}, or @code{#f}.")
1524 #define FUNC_NAME s_scm_mutex_owner
1527 fat_mutex
*m
= NULL
;
1529 SCM_VALIDATE_MUTEX (1, mx
);
1530 m
= SCM_MUTEX_DATA (mx
);
1531 scm_i_pthread_mutex_lock (&m
->lock
);
1533 scm_i_pthread_mutex_unlock (&m
->lock
);
1539 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1541 "Return the lock level of mutex @var{mx}.")
1542 #define FUNC_NAME s_scm_mutex_level
1544 SCM_VALIDATE_MUTEX (1, mx
);
1545 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1549 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1551 "Returns @code{#t} if the mutex @var{mx} is locked.")
1552 #define FUNC_NAME s_scm_mutex_locked_p
1554 SCM_VALIDATE_MUTEX (1, mx
);
1555 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1560 fat_cond_mark (SCM cv
)
1562 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1567 fat_cond_free (SCM mx
)
1569 fat_cond
*c
= SCM_CONDVAR_DATA (mx
);
1570 scm_i_pthread_mutex_destroy (&c
->lock
);
1571 scm_gc_free (c
, sizeof (fat_cond
), "condition-variable");
1576 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1578 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1579 scm_puts ("#<condition-variable ", port
);
1580 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1581 scm_puts (">", port
);
1585 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1587 "Make a new condition variable.")
1588 #define FUNC_NAME s_scm_make_condition_variable
1593 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1594 scm_i_pthread_mutex_init (&c
->lock
, 0);
1595 c
->waiting
= SCM_EOL
;
1596 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1597 c
->waiting
= make_queue ();
1602 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1603 (SCM cv
, SCM mx
, SCM t
),
1604 "Wait until @var{cond-var} has been signalled. While waiting, "
1605 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1606 "is locked again when this function returns. When @var{time} is given, "
1607 "it specifies a point in time where the waiting should be aborted. It "
1608 "can be either a integer as returned by @code{current-time} or a pair "
1609 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1610 "mutex is locked and @code{#f} is returned. When the condition "
1611 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1613 #define FUNC_NAME s_scm_timed_wait_condition_variable
1615 scm_t_timespec waittime
, *waitptr
= NULL
;
1617 SCM_VALIDATE_CONDVAR (1, cv
);
1618 SCM_VALIDATE_MUTEX (2, mx
);
1620 if (!SCM_UNBNDP (t
))
1622 to_timespec (t
, &waittime
);
1623 waitptr
= &waittime
;
1626 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1631 fat_cond_signal (fat_cond
*c
)
1633 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1634 unblock_from_queue (c
->waiting
);
1635 scm_i_pthread_mutex_unlock (&c
->lock
);
1638 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1640 "Wake up one thread that is waiting for @var{cv}")
1641 #define FUNC_NAME s_scm_signal_condition_variable
1643 SCM_VALIDATE_CONDVAR (1, cv
);
1644 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1650 fat_cond_broadcast (fat_cond
*c
)
1652 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1653 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1655 scm_i_pthread_mutex_unlock (&c
->lock
);
1658 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1660 "Wake up all threads that are waiting for @var{cv}. ")
1661 #define FUNC_NAME s_scm_broadcast_condition_variable
1663 SCM_VALIDATE_CONDVAR (1, cv
);
1664 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1669 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1671 "Return @code{#t} if @var{obj} is a condition variable.")
1672 #define FUNC_NAME s_scm_condition_variable_p
1674 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1678 /*** Marking stacks */
1680 /* XXX - what to do with this? Do we need to handle this for blocked
1684 # define SCM_MARK_BACKING_STORE() do { \
1686 SCM_STACKITEM * top, * bot; \
1687 getcontext (&ctx); \
1688 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1689 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1690 / sizeof (SCM_STACKITEM))); \
1691 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1692 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1693 scm_mark_locations (bot, top - bot); } while (0)
1695 # define SCM_MARK_BACKING_STORE()
1699 scm_threads_mark_stacks (void)
1702 for (t
= all_threads
; t
; t
= t
->next_thread
)
1704 /* Check that thread has indeed been suspended.
1708 scm_gc_mark (t
->handle
);
1710 #if SCM_STACK_GROWS_UP
1711 scm_mark_locations (t
->base
, t
->top
- t
->base
);
1713 scm_mark_locations (t
->top
, t
->base
- t
->top
);
1715 scm_mark_locations ((SCM_STACKITEM
*) &t
->regs
,
1716 ((size_t) sizeof(t
->regs
)
1717 / sizeof (SCM_STACKITEM
)));
1720 SCM_MARK_BACKING_STORE ();
1726 scm_std_select (int nfds
,
1727 SELECT_TYPE
*readfds
,
1728 SELECT_TYPE
*writefds
,
1729 SELECT_TYPE
*exceptfds
,
1730 struct timeval
*timeout
)
1733 int res
, eno
, wakeup_fd
;
1734 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1735 scm_t_guile_ticket ticket
;
1737 if (readfds
== NULL
)
1739 FD_ZERO (&my_readfds
);
1740 readfds
= &my_readfds
;
1743 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1746 wakeup_fd
= t
->sleep_pipe
[0];
1747 ticket
= scm_leave_guile ();
1748 FD_SET (wakeup_fd
, readfds
);
1749 if (wakeup_fd
>= nfds
)
1751 res
= select (nfds
, readfds
, writefds
, exceptfds
, timeout
);
1754 scm_enter_guile (ticket
);
1756 scm_i_reset_sleep (t
);
1758 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1761 read (wakeup_fd
, &dummy
, 1);
1762 FD_CLR (wakeup_fd
, readfds
);
1774 /* Convenience API for blocking while in guile mode. */
1776 #if SCM_USE_PTHREAD_THREADS
1779 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1781 scm_t_guile_ticket t
= scm_leave_guile ();
1782 int res
= scm_i_pthread_mutex_lock (mutex
);
1783 scm_enter_guile (t
);
1788 do_unlock (void *data
)
1790 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1794 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1796 scm_i_scm_pthread_mutex_lock (mutex
);
1797 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1801 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1803 scm_t_guile_ticket t
= scm_leave_guile ();
1804 int res
= scm_i_pthread_cond_wait (cond
, mutex
);
1805 scm_enter_guile (t
);
1810 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1811 scm_i_pthread_mutex_t
*mutex
,
1812 const scm_t_timespec
*wt
)
1814 scm_t_guile_ticket t
= scm_leave_guile ();
1815 int res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1816 scm_enter_guile (t
);
1823 scm_std_usleep (unsigned long usecs
)
1826 tv
.tv_usec
= usecs
% 1000000;
1827 tv
.tv_sec
= usecs
/ 1000000;
1828 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1829 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1833 scm_std_sleep (unsigned int secs
)
1838 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1844 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1846 "Return the thread that called this function.")
1847 #define FUNC_NAME s_scm_current_thread
1849 return SCM_I_CURRENT_THREAD
->handle
;
1854 scm_c_make_list (size_t n
, SCM fill
)
1858 res
= scm_cons (fill
, res
);
1862 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1864 "Return a list of all threads.")
1865 #define FUNC_NAME s_scm_all_threads
1867 /* We can not allocate while holding the thread_admin_mutex because
1868 of the way GC is done.
1870 int n
= thread_count
;
1872 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1874 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1876 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1878 if (t
!= scm_i_signal_delivery_thread
)
1880 SCM_SETCAR (*l
, t
->handle
);
1881 l
= SCM_CDRLOC (*l
);
1886 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1891 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1893 "Return @code{#t} iff @var{thread} has exited.\n")
1894 #define FUNC_NAME s_scm_thread_exited_p
1896 return scm_from_bool (scm_c_thread_exited_p (thread
));
1901 scm_c_thread_exited_p (SCM thread
)
1902 #define FUNC_NAME s_scm_thread_exited_p
1905 SCM_VALIDATE_THREAD (1, thread
);
1906 t
= SCM_I_THREAD_DATA (thread
);
1911 static scm_i_pthread_cond_t wake_up_cond
;
1912 int scm_i_thread_go_to_sleep
;
1913 static int threads_initialized_p
= 0;
1916 scm_i_thread_put_to_sleep ()
1918 if (threads_initialized_p
)
1923 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1925 /* Signal all threads to go to sleep
1927 scm_i_thread_go_to_sleep
= 1;
1928 for (t
= all_threads
; t
; t
= t
->next_thread
)
1929 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
1930 scm_i_thread_go_to_sleep
= 0;
1935 scm_i_thread_invalidate_freelists ()
1937 /* thread_admin_mutex is already locked. */
1940 for (t
= all_threads
; t
; t
= t
->next_thread
)
1941 if (t
!= SCM_I_CURRENT_THREAD
)
1942 t
->clear_freelists_p
= 1;
1946 scm_i_thread_wake_up ()
1948 if (threads_initialized_p
)
1952 scm_i_pthread_cond_broadcast (&wake_up_cond
);
1953 for (t
= all_threads
; t
; t
= t
->next_thread
)
1954 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
1955 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1956 scm_enter_guile ((scm_t_guile_ticket
) SCM_I_CURRENT_THREAD
);
1961 scm_i_thread_sleep_for_gc ()
1963 scm_i_thread
*t
= suspend ();
1964 scm_i_pthread_cond_wait (&wake_up_cond
, &t
->heap_mutex
);
1968 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1970 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
1971 int scm_i_critical_section_level
= 0;
1973 static SCM dynwind_critical_section_mutex
;
1976 scm_dynwind_critical_section (SCM mutex
)
1978 if (scm_is_false (mutex
))
1979 mutex
= dynwind_critical_section_mutex
;
1980 scm_dynwind_lock_mutex (mutex
);
1981 scm_dynwind_block_asyncs ();
1984 /*** Initialization */
1986 scm_i_pthread_key_t scm_i_freelist
, scm_i_freelist2
;
1987 scm_i_pthread_mutex_t scm_i_misc_mutex
;
1989 #if SCM_USE_PTHREAD_THREADS
1990 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
1994 scm_threads_prehistory (SCM_STACKITEM
*base
)
1996 #if SCM_USE_PTHREAD_THREADS
1997 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
1998 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
1999 PTHREAD_MUTEX_RECURSIVE
);
2002 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2003 scm_i_pthread_mutexattr_recursive
);
2004 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2005 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2006 scm_i_pthread_key_create (&scm_i_freelist
, NULL
);
2007 scm_i_pthread_key_create (&scm_i_freelist2
, NULL
);
2009 guilify_self_1 (base
);
2012 scm_t_bits scm_tc16_thread
;
2013 scm_t_bits scm_tc16_mutex
;
2014 scm_t_bits scm_tc16_condvar
;
2019 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2020 scm_set_smob_mark (scm_tc16_thread
, thread_mark
);
2021 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2022 scm_set_smob_free (scm_tc16_thread
, thread_free
);
2024 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2025 scm_set_smob_mark (scm_tc16_mutex
, fat_mutex_mark
);
2026 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2027 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
2029 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2031 scm_set_smob_mark (scm_tc16_condvar
, fat_cond_mark
);
2032 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2033 scm_set_smob_free (scm_tc16_condvar
, fat_cond_free
);
2035 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2036 guilify_self_2 (SCM_BOOL_F
);
2037 threads_initialized_p
= 1;
2039 dynwind_critical_section_mutex
=
2040 scm_permanent_object (scm_make_recursive_mutex ());
2044 scm_init_threads_default_dynamic_state ()
2046 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2047 scm_i_default_dynamic_state
= scm_permanent_object (state
);
2051 scm_init_thread_procs ()
2053 #include "libguile/threads.x"