Merge branch 'master' into ossau-gds-dev
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20 #ifdef HAVE_CONFIG_H
21 # include <config.h>
22 #endif
23
24 #include "libguile/_scm.h"
25
26 #if HAVE_UNISTD_H
27 #include <unistd.h>
28 #endif
29 #include <stdio.h>
30 #include <assert.h>
31
32 #ifdef HAVE_STRING_H
33 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
34 #endif
35
36 #if HAVE_SYS_TIME_H
37 #include <sys/time.h>
38 #endif
39
40 #include "libguile/validate.h"
41 #include "libguile/root.h"
42 #include "libguile/eval.h"
43 #include "libguile/async.h"
44 #include "libguile/ports.h"
45 #include "libguile/threads.h"
46 #include "libguile/dynwind.h"
47 #include "libguile/iselect.h"
48 #include "libguile/fluids.h"
49 #include "libguile/continuations.h"
50 #include "libguile/gc.h"
51 #include "libguile/init.h"
52 #include "libguile/scmsigs.h"
53 #include "libguile/strings.h"
54
55 #ifdef __MINGW32__
56 #ifndef ETIMEDOUT
57 # define ETIMEDOUT WSAETIMEDOUT
58 #endif
59 # include <fcntl.h>
60 # include <process.h>
61 # define pipe(fd) _pipe (fd, 256, O_BINARY)
62 #endif /* __MINGW32__ */
63
64 #include <full-read.h>
65
66 \f
67 static void
68 to_timespec (SCM t, scm_t_timespec *waittime)
69 {
70 if (scm_is_pair (t))
71 {
72 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
73 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
74 }
75 else
76 {
77 double time = scm_to_double (t);
78 double sec = scm_c_truncate (time);
79
80 waittime->tv_sec = (long) sec;
81 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
82 }
83 }
84
85 /*** Queues */
86
87 /* Make an empty queue data structure.
88 */
89 static SCM
90 make_queue ()
91 {
92 return scm_cons (SCM_EOL, SCM_EOL);
93 }
94
95 /* Put T at the back of Q and return a handle that can be used with
96 remqueue to remove T from Q again.
97 */
98 static SCM
99 enqueue (SCM q, SCM t)
100 {
101 SCM c = scm_cons (t, SCM_EOL);
102 SCM_CRITICAL_SECTION_START;
103 if (scm_is_null (SCM_CDR (q)))
104 SCM_SETCDR (q, c);
105 else
106 SCM_SETCDR (SCM_CAR (q), c);
107 SCM_SETCAR (q, c);
108 SCM_CRITICAL_SECTION_END;
109 return c;
110 }
111
112 /* Remove the element that the handle C refers to from the queue Q. C
113 must have been returned from a call to enqueue. The return value
114 is zero when the element referred to by C has already been removed.
115 Otherwise, 1 is returned.
116 */
117 static int
118 remqueue (SCM q, SCM c)
119 {
120 SCM p, prev = q;
121 SCM_CRITICAL_SECTION_START;
122 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
123 {
124 if (scm_is_eq (p, c))
125 {
126 if (scm_is_eq (c, SCM_CAR (q)))
127 SCM_SETCAR (q, SCM_CDR (c));
128 SCM_SETCDR (prev, SCM_CDR (c));
129 SCM_CRITICAL_SECTION_END;
130 return 1;
131 }
132 prev = p;
133 }
134 SCM_CRITICAL_SECTION_END;
135 return 0;
136 }
137
138 /* Remove the front-most element from the queue Q and return it.
139 Return SCM_BOOL_F when Q is empty.
140 */
141 static SCM
142 dequeue (SCM q)
143 {
144 SCM c;
145 SCM_CRITICAL_SECTION_START;
146 c = SCM_CDR (q);
147 if (scm_is_null (c))
148 {
149 SCM_CRITICAL_SECTION_END;
150 return SCM_BOOL_F;
151 }
152 else
153 {
154 SCM_SETCDR (q, SCM_CDR (c));
155 if (scm_is_null (SCM_CDR (q)))
156 SCM_SETCAR (q, SCM_EOL);
157 SCM_CRITICAL_SECTION_END;
158 return SCM_CAR (c);
159 }
160 }
161
162 /*** Thread smob routines */
163
164 static SCM
165 thread_mark (SCM obj)
166 {
167 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
168 scm_gc_mark (t->result);
169 scm_gc_mark (t->cleanup_handler);
170 scm_gc_mark (t->join_queue);
171 scm_gc_mark (t->mutexes);
172 scm_gc_mark (t->dynwinds);
173 scm_gc_mark (t->active_asyncs);
174 scm_gc_mark (t->continuation_root);
175 return t->dynamic_state;
176 }
177
178 static int
179 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
180 {
181 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
182 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
183 the struct case, hence we go via a union, and extract according to the
184 size of pthread_t. */
185 union {
186 scm_i_pthread_t p;
187 unsigned short us;
188 unsigned int ui;
189 unsigned long ul;
190 scm_t_uintmax um;
191 } u;
192 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
193 scm_i_pthread_t p = t->pthread;
194 scm_t_uintmax id;
195 u.p = p;
196 if (sizeof (p) == sizeof (unsigned short))
197 id = u.us;
198 else if (sizeof (p) == sizeof (unsigned int))
199 id = u.ui;
200 else if (sizeof (p) == sizeof (unsigned long))
201 id = u.ul;
202 else
203 id = u.um;
204
205 scm_puts ("#<thread ", port);
206 scm_uintprint (id, 10, port);
207 scm_puts (" (", port);
208 scm_uintprint ((scm_t_bits)t, 16, port);
209 scm_puts (")>", port);
210 return 1;
211 }
212
213 static size_t
214 thread_free (SCM obj)
215 {
216 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
217 assert (t->exited);
218 scm_gc_free (t, sizeof (*t), "thread");
219 return 0;
220 }
221
222 /*** Blocking on queues. */
223
224 /* See also scm_i_queue_async_cell for how such a block is
225 interrputed.
226 */
227
228 /* Put the current thread on QUEUE and go to sleep, waiting for it to
229 be woken up by a call to 'unblock_from_queue', or to be
230 interrupted. Upon return of this function, the current thread is
231 no longer on QUEUE, even when the sleep has been interrupted.
232
233 The caller of block_self must hold MUTEX. It will be atomically
234 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
235
236 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
237 as MUTEX is needed.
238
239 When WAITTIME is not NULL, the sleep will be aborted at that time.
240
241 The return value of block_self is an errno value. It will be zero
242 when the sleep has been successfully completed by a call to
243 unblock_from_queue, EINTR when it has been interrupted by the
244 delivery of a system async, and ETIMEDOUT when the timeout has
245 expired.
246
247 The system asyncs themselves are not executed by block_self.
248 */
249 static int
250 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
251 const scm_t_timespec *waittime)
252 {
253 scm_i_thread *t = SCM_I_CURRENT_THREAD;
254 SCM q_handle;
255 int err;
256
257 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
258 err = EINTR;
259 else
260 {
261 t->block_asyncs++;
262 q_handle = enqueue (queue, t->handle);
263 if (waittime == NULL)
264 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
265 else
266 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
267
268 /* When we are still on QUEUE, we have been interrupted. We
269 report this only when no other error (such as a timeout) has
270 happened above.
271 */
272 if (remqueue (queue, q_handle) && err == 0)
273 err = EINTR;
274 t->block_asyncs--;
275 scm_i_reset_sleep (t);
276 }
277
278 return err;
279 }
280
281 /* Wake up the first thread on QUEUE, if any. The awoken thread is
282 returned, or #f if the queue was empty.
283 */
284 static SCM
285 unblock_from_queue (SCM queue)
286 {
287 SCM thread = dequeue (queue);
288 if (scm_is_true (thread))
289 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
290 return thread;
291 }
292
293 /* Getting into and out of guile mode.
294 */
295
296 /* Ken Raeburn observes that the implementation of suspend and resume
297 (and the things that build on top of them) are very likely not
298 correct (see below). We will need fix this eventually, and that's
299 why scm_leave_guile/scm_enter_guile are not exported in the API.
300
301 Ken writes:
302
303 Consider this sequence:
304
305 Function foo, called in Guile mode, calls suspend (maybe indirectly
306 through scm_leave_guile), which does this:
307
308 // record top of stack for the GC
309 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
310 var 't'
311 // save registers.
312 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
313 setjmp (t->regs); // here's most of the magic
314
315 ... and returns.
316
317 Function foo has a SCM value X, a handle on a non-immediate object, in
318 a caller-saved register R, and it's the only reference to the object
319 currently.
320
321 The compiler wants to use R in suspend, so it pushes the current
322 value, X, into a stack slot which will be reloaded on exit from
323 suspend; then it loads stuff into R and goes about its business. The
324 setjmp call saves (some of) the current registers, including R, which
325 no longer contains X. (This isn't a problem for a normal
326 setjmp/longjmp situation, where longjmp would be called before
327 setjmp's caller returns; the old value for X would be loaded back from
328 the stack after the longjmp, before the function returned.)
329
330 So, suspend returns, loading X back into R (and invalidating the jump
331 buffer) in the process. The caller foo then goes off and calls a
332 bunch of other functions out of Guile mode, occasionally storing X on
333 the stack again, but, say, much deeper on the stack than suspend's
334 stack frame went, and the stack slot where suspend had written X has
335 long since been overwritten with other values.
336
337 Okay, nothing actively broken so far. Now, let garbage collection
338 run, triggered by another thread.
339
340 The thread calling foo is out of Guile mode at the time, so the
341 garbage collector just scans a range of stack addresses. Too bad that
342 X isn't stored there. So the pointed-to storage goes onto the free
343 list, and I think you can see where things go from there.
344
345 Is there anything I'm missing that'll prevent this scenario from
346 happening? I mean, aside from, "well, suspend and scm_leave_guile
347 don't have many local variables, so they probably won't need to save
348 any registers on most systems, so we hope everything will wind up in
349 the jump buffer and we'll just get away with it"?
350
351 (And, going the other direction, if scm_leave_guile and suspend push
352 the stack pointer over onto a new page, and foo doesn't make further
353 function calls and thus the stack pointer no longer includes that
354 page, are we guaranteed that the kernel cannot release the now-unused
355 stack page that contains the top-of-stack pointer we just saved? I
356 don't know if any OS actually does that. If it does, we could get
357 faults in garbage collection.)
358
359 I don't think scm_without_guile has to have this problem, as it gets
360 more control over the stack handling -- but it should call setjmp
361 itself. I'd probably try something like:
362
363 // record top of stack for the GC
364 t->top = SCM_STACK_PTR (&t);
365 // save registers.
366 SCM_FLUSH_REGISTER_WINDOWS;
367 setjmp (t->regs);
368 res = func(data);
369 scm_enter_guile (t);
370
371 ... though even that's making some assumptions about the stack
372 ordering of local variables versus caller-saved registers.
373
374 For something like scm_leave_guile to work, I don't think it can just
375 rely on invalidated jump buffers. A valid jump buffer, and a handle
376 on the stack state at the point when the jump buffer was initialized,
377 together, would work fine, but I think then we're talking about macros
378 invoking setjmp in the caller's stack frame, and requiring that the
379 caller of scm_leave_guile also call scm_enter_guile before returning,
380 kind of like pthread_cleanup_push/pop calls that have to be paired up
381 in a function. (In fact, the pthread ones have to be paired up
382 syntactically, as if they might expand to a compound statement
383 incorporating the user's code, and invoking a compiler's
384 exception-handling primitives. Which might be something to think
385 about for cases where Guile is used with C++ exceptions or
386 pthread_cancel.)
387 */
388
389 scm_i_pthread_key_t scm_i_thread_key;
390
391 static void
392 resume (scm_i_thread *t)
393 {
394 t->top = NULL;
395 if (t->clear_freelists_p)
396 {
397 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
398 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
399 t->clear_freelists_p = 0;
400 }
401 }
402
403 typedef void* scm_t_guile_ticket;
404
405 static void
406 scm_enter_guile (scm_t_guile_ticket ticket)
407 {
408 scm_i_thread *t = (scm_i_thread *)ticket;
409 if (t)
410 {
411 scm_i_pthread_mutex_lock (&t->heap_mutex);
412 t->heap_mutex_locked_by_self = 1;
413 resume (t);
414 }
415 }
416
417 static scm_i_thread *
418 suspend (void)
419 {
420 scm_i_thread *t = SCM_I_CURRENT_THREAD;
421
422 /* record top of stack for the GC */
423 t->top = SCM_STACK_PTR (&t);
424 /* save registers. */
425 SCM_FLUSH_REGISTER_WINDOWS;
426 setjmp (t->regs);
427 return t;
428 }
429
430 static scm_t_guile_ticket
431 scm_leave_guile ()
432 {
433 scm_i_thread *t = suspend ();
434 if (t->heap_mutex_locked_by_self)
435 {
436 t->heap_mutex_locked_by_self = 0;
437 scm_i_pthread_mutex_unlock (&t->heap_mutex);
438 }
439 return (scm_t_guile_ticket) t;
440 }
441
442 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
443 static scm_i_thread *all_threads = NULL;
444 static int thread_count;
445
446 static SCM scm_i_default_dynamic_state;
447
448 /* Perform first stage of thread initialisation, in non-guile mode.
449 */
450 static void
451 guilify_self_1 (SCM_STACKITEM *base)
452 {
453 scm_i_thread *t = malloc (sizeof (scm_i_thread));
454
455 t->pthread = scm_i_pthread_self ();
456 t->handle = SCM_BOOL_F;
457 t->result = SCM_BOOL_F;
458 t->cleanup_handler = SCM_BOOL_F;
459 t->mutexes = SCM_EOL;
460 t->held_mutex = NULL;
461 t->join_queue = SCM_EOL;
462 t->dynamic_state = SCM_BOOL_F;
463 t->dynwinds = SCM_EOL;
464 t->active_asyncs = SCM_EOL;
465 t->block_asyncs = 1;
466 t->pending_asyncs = 1;
467 t->last_debug_frame = NULL;
468 t->base = base;
469 #ifdef __ia64__
470 /* Calculate and store off the base of this thread's register
471 backing store (RBS). Unfortunately our implementation(s) of
472 scm_ia64_register_backing_store_base are only reliable for the
473 main thread. For other threads, therefore, find out the current
474 top of the RBS, and use that as a maximum. */
475 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
476 {
477 ucontext_t ctx;
478 void *bsp;
479 getcontext (&ctx);
480 bsp = scm_ia64_ar_bsp (&ctx);
481 if (t->register_backing_store_base > bsp)
482 t->register_backing_store_base = bsp;
483 }
484 #endif
485 t->continuation_root = SCM_EOL;
486 t->continuation_base = base;
487 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
488 t->sleep_mutex = NULL;
489 t->sleep_object = SCM_BOOL_F;
490 t->sleep_fd = -1;
491
492 if (pipe (t->sleep_pipe) != 0)
493 /* FIXME: Error conditions during the initialization phase are handled
494 gracelessly since public functions such as `scm_init_guile ()'
495 currently have type `void'. */
496 abort ();
497
498 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
499 t->heap_mutex_locked_by_self = 0;
500 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
501 t->clear_freelists_p = 0;
502 t->gc_running_p = 0;
503 t->canceled = 0;
504 t->exited = 0;
505
506 t->freelist = SCM_EOL;
507 t->freelist2 = SCM_EOL;
508 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
509 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
510
511 scm_i_pthread_setspecific (scm_i_thread_key, t);
512
513 scm_i_pthread_mutex_lock (&t->heap_mutex);
514 t->heap_mutex_locked_by_self = 1;
515
516 scm_i_pthread_mutex_lock (&thread_admin_mutex);
517 t->next_thread = all_threads;
518 all_threads = t;
519 thread_count++;
520 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
521 }
522
523 /* Perform second stage of thread initialisation, in guile mode.
524 */
525 static void
526 guilify_self_2 (SCM parent)
527 {
528 scm_i_thread *t = SCM_I_CURRENT_THREAD;
529
530 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
531 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
532 t->continuation_root = scm_cons (t->handle, SCM_EOL);
533 t->continuation_base = t->base;
534
535 if (scm_is_true (parent))
536 t->dynamic_state = scm_make_dynamic_state (parent);
537 else
538 t->dynamic_state = scm_i_make_initial_dynamic_state ();
539
540 t->join_queue = make_queue ();
541 t->block_asyncs = 0;
542 }
543
544 \f
545 /*** Fat mutexes */
546
547 /* We implement our own mutex type since we want them to be 'fair', we
548 want to do fancy things while waiting for them (like running
549 asyncs) and we might want to add things that are nice for
550 debugging.
551 */
552
553 typedef struct {
554 scm_i_pthread_mutex_t lock;
555 SCM owner;
556 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
557
558 int recursive; /* allow recursive locking? */
559 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
560 int allow_external_unlock; /* is it an error to unlock a mutex that is not
561 owned by the current thread? */
562
563 SCM waiting; /* the threads waiting for this mutex. */
564 } fat_mutex;
565
566 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
567 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
568
569 /* Perform thread tear-down, in guile mode.
570 */
571 static void *
572 do_thread_exit (void *v)
573 {
574 scm_i_thread *t = (scm_i_thread *) v;
575
576 if (!scm_is_false (t->cleanup_handler))
577 {
578 SCM ptr = t->cleanup_handler;
579
580 t->cleanup_handler = SCM_BOOL_F;
581 t->result = scm_internal_catch (SCM_BOOL_T,
582 (scm_t_catch_body) scm_call_0, ptr,
583 scm_handle_by_message_noexit, NULL);
584 }
585
586 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
587
588 t->exited = 1;
589 close (t->sleep_pipe[0]);
590 close (t->sleep_pipe[1]);
591 while (scm_is_true (unblock_from_queue (t->join_queue)))
592 ;
593
594 while (!scm_is_null (t->mutexes))
595 {
596 SCM mutex = SCM_CAR (t->mutexes);
597 fat_mutex *m = SCM_MUTEX_DATA (mutex);
598 scm_i_pthread_mutex_lock (&m->lock);
599
600 unblock_from_queue (m->waiting);
601
602 scm_i_pthread_mutex_unlock (&m->lock);
603 t->mutexes = SCM_CDR (t->mutexes);
604 }
605
606 scm_i_pthread_mutex_unlock (&t->admin_mutex);
607
608 return NULL;
609 }
610
611 static void
612 on_thread_exit (void *v)
613 {
614 /* This handler is executed in non-guile mode. */
615 scm_i_thread *t = (scm_i_thread *) v, **tp;
616
617 /* If this thread was cancelled while doing a cond wait, it will
618 still have a mutex locked, so we unlock it here. */
619 if (t->held_mutex)
620 {
621 scm_i_pthread_mutex_unlock (t->held_mutex);
622 t->held_mutex = NULL;
623 }
624
625 scm_i_pthread_setspecific (scm_i_thread_key, v);
626
627 /* Ensure the signal handling thread has been launched, because we might be
628 shutting it down. */
629 scm_i_ensure_signal_delivery_thread ();
630
631 /* Unblocking the joining threads needs to happen in guile mode
632 since the queue is a SCM data structure. */
633 scm_with_guile (do_thread_exit, v);
634
635 /* Removing ourself from the list of all threads needs to happen in
636 non-guile mode since all SCM values on our stack become
637 unprotected once we are no longer in the list. */
638 scm_i_pthread_mutex_lock (&thread_admin_mutex);
639 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
640 if (*tp == t)
641 {
642 *tp = t->next_thread;
643 break;
644 }
645 thread_count--;
646
647 /* If there's only one other thread, it could be the signal delivery
648 thread, so we need to notify it to shut down by closing its read pipe.
649 If it's not the signal delivery thread, then closing the read pipe isn't
650 going to hurt. */
651 if (thread_count <= 1)
652 scm_i_close_signal_pipe ();
653
654 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
655
656 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
657 }
658
659 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
660
661 static void
662 init_thread_key (void)
663 {
664 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
665 }
666
667 /* Perform any initializations necessary to bring the current thread
668 into guile mode, initializing Guile itself, if necessary.
669
670 BASE is the stack base to use with GC.
671
672 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
673 which case the default dynamic state is used.
674
675 Return zero when the thread was in guile mode already; otherwise
676 return 1.
677 */
678
679 static int
680 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
681 {
682 scm_i_thread *t;
683
684 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
685
686 if ((t = SCM_I_CURRENT_THREAD) == NULL)
687 {
688 /* This thread has not been guilified yet.
689 */
690
691 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
692 if (scm_initialized_p == 0)
693 {
694 /* First thread ever to enter Guile. Run the full
695 initialization.
696 */
697 scm_i_init_guile (base);
698 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
699 }
700 else
701 {
702 /* Guile is already initialized, but this thread enters it for
703 the first time. Only initialize this thread.
704 */
705 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
706 guilify_self_1 (base);
707 guilify_self_2 (parent);
708 }
709 return 1;
710 }
711 else if (t->top)
712 {
713 /* This thread is already guilified but not in guile mode, just
714 resume it.
715
716 A user call to scm_with_guile() will lead us to here. This could
717 happen from anywhere on the stack, and in particular lower on the
718 stack than when it was when this thread was first guilified. Thus,
719 `base' must be updated. */
720 #if SCM_STACK_GROWS_UP
721 if (base < t->base)
722 t->base = base;
723 #else
724 if (base > t->base)
725 t->base = base;
726 #endif
727
728 scm_enter_guile ((scm_t_guile_ticket) t);
729 return 1;
730 }
731 else
732 {
733 /* Thread is already in guile mode. Nothing to do.
734 */
735 return 0;
736 }
737 }
738
739 #if SCM_USE_PTHREAD_THREADS
740
741 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
742 /* This method for GNU/Linux and perhaps some other systems.
743 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
744 available on them. */
745 #define HAVE_GET_THREAD_STACK_BASE
746
747 static SCM_STACKITEM *
748 get_thread_stack_base ()
749 {
750 pthread_attr_t attr;
751 void *start, *end;
752 size_t size;
753
754 pthread_getattr_np (pthread_self (), &attr);
755 pthread_attr_getstack (&attr, &start, &size);
756 end = (char *)start + size;
757
758 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
759 for the main thread, but we can use scm_get_stack_base in that
760 case.
761 */
762
763 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
764 if ((void *)&attr < start || (void *)&attr >= end)
765 return scm_get_stack_base ();
766 else
767 #endif
768 {
769 #if SCM_STACK_GROWS_UP
770 return start;
771 #else
772 return end;
773 #endif
774 }
775 }
776
777 #elif HAVE_PTHREAD_GET_STACKADDR_NP
778 /* This method for MacOS X.
779 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
780 but as of 2006 there's nothing obvious at apple.com. */
781 #define HAVE_GET_THREAD_STACK_BASE
782 static SCM_STACKITEM *
783 get_thread_stack_base ()
784 {
785 return pthread_get_stackaddr_np (pthread_self ());
786 }
787
788 #elif defined (__MINGW32__)
789 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
790 in any thread. We don't like hard-coding the name of a system, but there
791 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
792 work. */
793 #define HAVE_GET_THREAD_STACK_BASE
794 static SCM_STACKITEM *
795 get_thread_stack_base ()
796 {
797 return scm_get_stack_base ();
798 }
799
800 #endif /* pthread methods of get_thread_stack_base */
801
802 #else /* !SCM_USE_PTHREAD_THREADS */
803
804 #define HAVE_GET_THREAD_STACK_BASE
805
806 static SCM_STACKITEM *
807 get_thread_stack_base ()
808 {
809 return scm_get_stack_base ();
810 }
811
812 #endif /* !SCM_USE_PTHREAD_THREADS */
813
814 #ifdef HAVE_GET_THREAD_STACK_BASE
815
816 void
817 scm_init_guile ()
818 {
819 scm_i_init_thread_for_guile (get_thread_stack_base (),
820 scm_i_default_dynamic_state);
821 }
822
823 #endif
824
825 void *
826 scm_with_guile (void *(*func)(void *), void *data)
827 {
828 return scm_i_with_guile_and_parent (func, data,
829 scm_i_default_dynamic_state);
830 }
831
832 SCM_UNUSED static void
833 scm_leave_guile_cleanup (void *x)
834 {
835 scm_leave_guile ();
836 }
837
838 void *
839 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
840 {
841 void *res;
842 int really_entered;
843 SCM_STACKITEM base_item;
844
845 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
846 if (really_entered)
847 {
848 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
849 res = scm_c_with_continuation_barrier (func, data);
850 scm_i_pthread_cleanup_pop (0);
851 scm_leave_guile ();
852 }
853 else
854 res = scm_c_with_continuation_barrier (func, data);
855
856 return res;
857 }
858
859 void *
860 scm_without_guile (void *(*func)(void *), void *data)
861 {
862 void *res;
863 scm_t_guile_ticket t;
864 t = scm_leave_guile ();
865 res = func (data);
866 scm_enter_guile (t);
867 return res;
868 }
869
870 /*** Thread creation */
871
872 typedef struct {
873 SCM parent;
874 SCM thunk;
875 SCM handler;
876 SCM thread;
877 scm_i_pthread_mutex_t mutex;
878 scm_i_pthread_cond_t cond;
879 } launch_data;
880
881 static void *
882 really_launch (void *d)
883 {
884 launch_data *data = (launch_data *)d;
885 SCM thunk = data->thunk, handler = data->handler;
886 scm_i_thread *t;
887
888 t = SCM_I_CURRENT_THREAD;
889
890 scm_i_scm_pthread_mutex_lock (&data->mutex);
891 data->thread = scm_current_thread ();
892 scm_i_pthread_cond_signal (&data->cond);
893 scm_i_pthread_mutex_unlock (&data->mutex);
894
895 if (SCM_UNBNDP (handler))
896 t->result = scm_call_0 (thunk);
897 else
898 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
899
900 return 0;
901 }
902
903 static void *
904 launch_thread (void *d)
905 {
906 launch_data *data = (launch_data *)d;
907 scm_i_pthread_detach (scm_i_pthread_self ());
908 scm_i_with_guile_and_parent (really_launch, d, data->parent);
909 return NULL;
910 }
911
912 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
913 (SCM thunk, SCM handler),
914 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
915 "returning a new thread object representing the thread. The procedure\n"
916 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
917 "\n"
918 "When @var{handler} is specified, then @var{thunk} is called from\n"
919 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
920 "handler. This catch is established inside the continuation barrier.\n"
921 "\n"
922 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
923 "the @emph{exit value} of the thread and the thread is terminated.")
924 #define FUNC_NAME s_scm_call_with_new_thread
925 {
926 launch_data data;
927 scm_i_pthread_t id;
928 int err;
929
930 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
931 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
932 handler, SCM_ARG2, FUNC_NAME);
933
934 data.parent = scm_current_dynamic_state ();
935 data.thunk = thunk;
936 data.handler = handler;
937 data.thread = SCM_BOOL_F;
938 scm_i_pthread_mutex_init (&data.mutex, NULL);
939 scm_i_pthread_cond_init (&data.cond, NULL);
940
941 scm_i_scm_pthread_mutex_lock (&data.mutex);
942 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
943 if (err)
944 {
945 scm_i_pthread_mutex_unlock (&data.mutex);
946 errno = err;
947 scm_syserror (NULL);
948 }
949 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
950 scm_i_pthread_mutex_unlock (&data.mutex);
951
952 return data.thread;
953 }
954 #undef FUNC_NAME
955
956 typedef struct {
957 SCM parent;
958 scm_t_catch_body body;
959 void *body_data;
960 scm_t_catch_handler handler;
961 void *handler_data;
962 SCM thread;
963 scm_i_pthread_mutex_t mutex;
964 scm_i_pthread_cond_t cond;
965 } spawn_data;
966
967 static void *
968 really_spawn (void *d)
969 {
970 spawn_data *data = (spawn_data *)d;
971 scm_t_catch_body body = data->body;
972 void *body_data = data->body_data;
973 scm_t_catch_handler handler = data->handler;
974 void *handler_data = data->handler_data;
975 scm_i_thread *t = SCM_I_CURRENT_THREAD;
976
977 scm_i_scm_pthread_mutex_lock (&data->mutex);
978 data->thread = scm_current_thread ();
979 scm_i_pthread_cond_signal (&data->cond);
980 scm_i_pthread_mutex_unlock (&data->mutex);
981
982 if (handler == NULL)
983 t->result = body (body_data);
984 else
985 t->result = scm_internal_catch (SCM_BOOL_T,
986 body, body_data,
987 handler, handler_data);
988
989 return 0;
990 }
991
992 static void *
993 spawn_thread (void *d)
994 {
995 spawn_data *data = (spawn_data *)d;
996 scm_i_pthread_detach (scm_i_pthread_self ());
997 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
998 return NULL;
999 }
1000
1001 SCM
1002 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1003 scm_t_catch_handler handler, void *handler_data)
1004 {
1005 spawn_data data;
1006 scm_i_pthread_t id;
1007 int err;
1008
1009 data.parent = scm_current_dynamic_state ();
1010 data.body = body;
1011 data.body_data = body_data;
1012 data.handler = handler;
1013 data.handler_data = handler_data;
1014 data.thread = SCM_BOOL_F;
1015 scm_i_pthread_mutex_init (&data.mutex, NULL);
1016 scm_i_pthread_cond_init (&data.cond, NULL);
1017
1018 scm_i_scm_pthread_mutex_lock (&data.mutex);
1019 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1020 if (err)
1021 {
1022 scm_i_pthread_mutex_unlock (&data.mutex);
1023 errno = err;
1024 scm_syserror (NULL);
1025 }
1026 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1027 scm_i_pthread_mutex_unlock (&data.mutex);
1028
1029 return data.thread;
1030 }
1031
1032 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1033 (),
1034 "Move the calling thread to the end of the scheduling queue.")
1035 #define FUNC_NAME s_scm_yield
1036 {
1037 return scm_from_bool (scm_i_sched_yield ());
1038 }
1039 #undef FUNC_NAME
1040
1041 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1042 (SCM thread),
1043 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1044 "cannot be the current thread, and if @var{thread} has already terminated or "
1045 "been signaled to terminate, this function is a no-op.")
1046 #define FUNC_NAME s_scm_cancel_thread
1047 {
1048 scm_i_thread *t = NULL;
1049
1050 SCM_VALIDATE_THREAD (1, thread);
1051 t = SCM_I_THREAD_DATA (thread);
1052 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1053 if (!t->canceled)
1054 {
1055 t->canceled = 1;
1056 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1057 scm_i_pthread_cancel (t->pthread);
1058 }
1059 else
1060 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1061
1062 return SCM_UNSPECIFIED;
1063 }
1064 #undef FUNC_NAME
1065
1066 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1067 (SCM thread, SCM proc),
1068 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1069 "This handler will be called when the thread exits.")
1070 #define FUNC_NAME s_scm_set_thread_cleanup_x
1071 {
1072 scm_i_thread *t;
1073
1074 SCM_VALIDATE_THREAD (1, thread);
1075 if (!scm_is_false (proc))
1076 SCM_VALIDATE_THUNK (2, proc);
1077
1078 t = SCM_I_THREAD_DATA (thread);
1079 scm_i_pthread_mutex_lock (&t->admin_mutex);
1080
1081 if (!(t->exited || t->canceled))
1082 t->cleanup_handler = proc;
1083
1084 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1085
1086 return SCM_UNSPECIFIED;
1087 }
1088 #undef FUNC_NAME
1089
1090 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1091 (SCM thread),
1092 "Return the cleanup handler installed for the thread @var{thread}.")
1093 #define FUNC_NAME s_scm_thread_cleanup
1094 {
1095 scm_i_thread *t;
1096 SCM ret;
1097
1098 SCM_VALIDATE_THREAD (1, thread);
1099
1100 t = SCM_I_THREAD_DATA (thread);
1101 scm_i_pthread_mutex_lock (&t->admin_mutex);
1102 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1103 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1104
1105 return ret;
1106 }
1107 #undef FUNC_NAME
1108
1109 SCM scm_join_thread (SCM thread)
1110 {
1111 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1112 }
1113
1114 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1115 (SCM thread, SCM timeout, SCM timeoutval),
1116 "Suspend execution of the calling thread until the target @var{thread} "
1117 "terminates, unless the target @var{thread} has already terminated. ")
1118 #define FUNC_NAME s_scm_join_thread_timed
1119 {
1120 scm_i_thread *t;
1121 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1122 SCM res = SCM_BOOL_F;
1123
1124 if (! (SCM_UNBNDP (timeoutval)))
1125 res = timeoutval;
1126
1127 SCM_VALIDATE_THREAD (1, thread);
1128 if (scm_is_eq (scm_current_thread (), thread))
1129 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1130
1131 t = SCM_I_THREAD_DATA (thread);
1132 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1133
1134 if (! SCM_UNBNDP (timeout))
1135 {
1136 to_timespec (timeout, &ctimeout);
1137 timeout_ptr = &ctimeout;
1138 }
1139
1140 if (t->exited)
1141 res = t->result;
1142 else
1143 {
1144 while (1)
1145 {
1146 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1147 timeout_ptr);
1148 if (err == 0)
1149 {
1150 if (t->exited)
1151 {
1152 res = t->result;
1153 break;
1154 }
1155 }
1156 else if (err == ETIMEDOUT)
1157 break;
1158
1159 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1160 SCM_TICK;
1161 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1162 }
1163 }
1164
1165 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1166
1167 return res;
1168 }
1169 #undef FUNC_NAME
1170
1171 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1172 (SCM obj),
1173 "Return @code{#t} if @var{obj} is a thread.")
1174 #define FUNC_NAME s_scm_thread_p
1175 {
1176 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1177 }
1178 #undef FUNC_NAME
1179
1180 static SCM
1181 fat_mutex_mark (SCM mx)
1182 {
1183 fat_mutex *m = SCM_MUTEX_DATA (mx);
1184 scm_gc_mark (m->owner);
1185 return m->waiting;
1186 }
1187
1188 static size_t
1189 fat_mutex_free (SCM mx)
1190 {
1191 fat_mutex *m = SCM_MUTEX_DATA (mx);
1192 scm_i_pthread_mutex_destroy (&m->lock);
1193 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1194 return 0;
1195 }
1196
1197 static int
1198 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1199 {
1200 fat_mutex *m = SCM_MUTEX_DATA (mx);
1201 scm_puts ("#<mutex ", port);
1202 scm_uintprint ((scm_t_bits)m, 16, port);
1203 scm_puts (">", port);
1204 return 1;
1205 }
1206
1207 static SCM
1208 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1209 {
1210 fat_mutex *m;
1211 SCM mx;
1212
1213 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1214 scm_i_pthread_mutex_init (&m->lock, NULL);
1215 m->owner = SCM_BOOL_F;
1216 m->level = 0;
1217
1218 m->recursive = recursive;
1219 m->unchecked_unlock = unchecked_unlock;
1220 m->allow_external_unlock = external_unlock;
1221
1222 m->waiting = SCM_EOL;
1223 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1224 m->waiting = make_queue ();
1225 return mx;
1226 }
1227
1228 SCM scm_make_mutex (void)
1229 {
1230 return scm_make_mutex_with_flags (SCM_EOL);
1231 }
1232
1233 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1234 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1235 SCM_SYMBOL (recursive_sym, "recursive");
1236
1237 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1238 (SCM flags),
1239 "Create a new mutex. ")
1240 #define FUNC_NAME s_scm_make_mutex_with_flags
1241 {
1242 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1243
1244 SCM ptr = flags;
1245 while (! scm_is_null (ptr))
1246 {
1247 SCM flag = SCM_CAR (ptr);
1248 if (scm_is_eq (flag, unchecked_unlock_sym))
1249 unchecked_unlock = 1;
1250 else if (scm_is_eq (flag, allow_external_unlock_sym))
1251 external_unlock = 1;
1252 else if (scm_is_eq (flag, recursive_sym))
1253 recursive = 1;
1254 else
1255 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1256 ptr = SCM_CDR (ptr);
1257 }
1258 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1259 }
1260 #undef FUNC_NAME
1261
1262 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1263 (void),
1264 "Create a new recursive mutex. ")
1265 #define FUNC_NAME s_scm_make_recursive_mutex
1266 {
1267 return make_fat_mutex (1, 0, 0);
1268 }
1269 #undef FUNC_NAME
1270
1271 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1272
1273 static SCM
1274 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1275 {
1276 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1277
1278 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1279 SCM err = SCM_BOOL_F;
1280
1281 struct timeval current_time;
1282
1283 scm_i_scm_pthread_mutex_lock (&m->lock);
1284
1285 while (1)
1286 {
1287 if (m->level == 0)
1288 {
1289 m->owner = new_owner;
1290 m->level++;
1291
1292 if (SCM_I_IS_THREAD (new_owner))
1293 {
1294 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1295 scm_i_pthread_mutex_lock (&t->admin_mutex);
1296 t->mutexes = scm_cons (mutex, t->mutexes);
1297 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1298 }
1299 *ret = 1;
1300 break;
1301 }
1302 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1303 {
1304 m->owner = new_owner;
1305 err = scm_cons (scm_abandoned_mutex_error_key,
1306 scm_from_locale_string ("lock obtained on abandoned "
1307 "mutex"));
1308 *ret = 1;
1309 break;
1310 }
1311 else if (scm_is_eq (m->owner, new_owner))
1312 {
1313 if (m->recursive)
1314 {
1315 m->level++;
1316 *ret = 1;
1317 }
1318 else
1319 {
1320 err = scm_cons (scm_misc_error_key,
1321 scm_from_locale_string ("mutex already locked "
1322 "by thread"));
1323 *ret = 0;
1324 }
1325 break;
1326 }
1327 else
1328 {
1329 if (timeout != NULL)
1330 {
1331 gettimeofday (&current_time, NULL);
1332 if (current_time.tv_sec > timeout->tv_sec ||
1333 (current_time.tv_sec == timeout->tv_sec &&
1334 current_time.tv_usec * 1000 > timeout->tv_nsec))
1335 {
1336 *ret = 0;
1337 break;
1338 }
1339 }
1340 block_self (m->waiting, mutex, &m->lock, timeout);
1341 scm_i_pthread_mutex_unlock (&m->lock);
1342 SCM_TICK;
1343 scm_i_scm_pthread_mutex_lock (&m->lock);
1344 }
1345 }
1346 scm_i_pthread_mutex_unlock (&m->lock);
1347 return err;
1348 }
1349
1350 SCM scm_lock_mutex (SCM mx)
1351 {
1352 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1353 }
1354
1355 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1356 (SCM m, SCM timeout, SCM owner),
1357 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1358 "blocks until the mutex becomes available. The function returns when "
1359 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1360 "a thread already owns will succeed right away and will not block the "
1361 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1362 #define FUNC_NAME s_scm_lock_mutex_timed
1363 {
1364 SCM exception;
1365 int ret = 0;
1366 scm_t_timespec cwaittime, *waittime = NULL;
1367
1368 SCM_VALIDATE_MUTEX (1, m);
1369
1370 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1371 {
1372 to_timespec (timeout, &cwaittime);
1373 waittime = &cwaittime;
1374 }
1375
1376 exception = fat_mutex_lock (m, waittime, owner, &ret);
1377 if (!scm_is_false (exception))
1378 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1379 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1380 }
1381 #undef FUNC_NAME
1382
1383 void
1384 scm_dynwind_lock_mutex (SCM mutex)
1385 {
1386 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1387 SCM_F_WIND_EXPLICITLY);
1388 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1389 SCM_F_WIND_EXPLICITLY);
1390 }
1391
1392 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1393 (SCM mutex),
1394 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1395 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1396 #define FUNC_NAME s_scm_try_mutex
1397 {
1398 SCM exception;
1399 int ret = 0;
1400 scm_t_timespec cwaittime, *waittime = NULL;
1401
1402 SCM_VALIDATE_MUTEX (1, mutex);
1403
1404 to_timespec (scm_from_int(0), &cwaittime);
1405 waittime = &cwaittime;
1406
1407 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1408 if (!scm_is_false (exception))
1409 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1410 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1411 }
1412 #undef FUNC_NAME
1413
1414 /*** Fat condition variables */
1415
1416 typedef struct {
1417 scm_i_pthread_mutex_t lock;
1418 SCM waiting; /* the threads waiting for this condition. */
1419 } fat_cond;
1420
1421 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1422 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1423
1424 static int
1425 fat_mutex_unlock (SCM mutex, SCM cond,
1426 const scm_t_timespec *waittime, int relock)
1427 {
1428 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1429 fat_cond *c = NULL;
1430 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1431 int err = 0, ret = 0;
1432
1433 scm_i_scm_pthread_mutex_lock (&m->lock);
1434
1435 SCM owner = m->owner;
1436
1437 if (!scm_is_eq (owner, scm_current_thread ()))
1438 {
1439 if (m->level == 0)
1440 {
1441 if (!m->unchecked_unlock)
1442 {
1443 scm_i_pthread_mutex_unlock (&m->lock);
1444 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1445 }
1446 owner = scm_current_thread ();
1447 }
1448 else if (!m->allow_external_unlock)
1449 {
1450 scm_i_pthread_mutex_unlock (&m->lock);
1451 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1452 }
1453 }
1454
1455 if (! (SCM_UNBNDP (cond)))
1456 {
1457 c = SCM_CONDVAR_DATA (cond);
1458 while (1)
1459 {
1460 int brk = 0;
1461
1462 if (m->level > 0)
1463 m->level--;
1464 if (m->level == 0)
1465 m->owner = unblock_from_queue (m->waiting);
1466
1467 t->block_asyncs++;
1468
1469 err = block_self (c->waiting, cond, &m->lock, waittime);
1470 scm_i_pthread_mutex_unlock (&m->lock);
1471
1472 if (err == 0)
1473 {
1474 ret = 1;
1475 brk = 1;
1476 }
1477 else if (err == ETIMEDOUT)
1478 {
1479 ret = 0;
1480 brk = 1;
1481 }
1482 else if (err != EINTR)
1483 {
1484 errno = err;
1485 scm_syserror (NULL);
1486 }
1487
1488 if (brk)
1489 {
1490 if (relock)
1491 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1492 break;
1493 }
1494
1495 t->block_asyncs--;
1496 scm_async_click ();
1497
1498 scm_remember_upto_here_2 (cond, mutex);
1499
1500 scm_i_scm_pthread_mutex_lock (&m->lock);
1501 }
1502 }
1503 else
1504 {
1505 if (m->level > 0)
1506 m->level--;
1507 if (m->level == 0)
1508 m->owner = unblock_from_queue (m->waiting);
1509
1510 scm_i_pthread_mutex_unlock (&m->lock);
1511 ret = 1;
1512 }
1513
1514 return ret;
1515 }
1516
1517 SCM scm_unlock_mutex (SCM mx)
1518 {
1519 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1520 }
1521
1522 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1523 (SCM mx, SCM cond, SCM timeout),
1524 "Unlocks @var{mutex} if the calling thread owns the lock on "
1525 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1526 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1527 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1528 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1529 "with a call to @code{unlock-mutex}. Only the last call to "
1530 "@code{unlock-mutex} will actually unlock the mutex. ")
1531 #define FUNC_NAME s_scm_unlock_mutex_timed
1532 {
1533 scm_t_timespec cwaittime, *waittime = NULL;
1534
1535 SCM_VALIDATE_MUTEX (1, mx);
1536 if (! (SCM_UNBNDP (cond)))
1537 {
1538 SCM_VALIDATE_CONDVAR (2, cond);
1539
1540 if (! (SCM_UNBNDP (timeout)))
1541 {
1542 to_timespec (timeout, &cwaittime);
1543 waittime = &cwaittime;
1544 }
1545 }
1546
1547 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1548 }
1549 #undef FUNC_NAME
1550
1551 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1552 (SCM obj),
1553 "Return @code{#t} if @var{obj} is a mutex.")
1554 #define FUNC_NAME s_scm_mutex_p
1555 {
1556 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1557 }
1558 #undef FUNC_NAME
1559
1560 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1561 (SCM mx),
1562 "Return the thread owning @var{mx}, or @code{#f}.")
1563 #define FUNC_NAME s_scm_mutex_owner
1564 {
1565 SCM owner;
1566 fat_mutex *m = NULL;
1567
1568 SCM_VALIDATE_MUTEX (1, mx);
1569 m = SCM_MUTEX_DATA (mx);
1570 scm_i_pthread_mutex_lock (&m->lock);
1571 owner = m->owner;
1572 scm_i_pthread_mutex_unlock (&m->lock);
1573
1574 return owner;
1575 }
1576 #undef FUNC_NAME
1577
1578 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1579 (SCM mx),
1580 "Return the lock level of mutex @var{mx}.")
1581 #define FUNC_NAME s_scm_mutex_level
1582 {
1583 SCM_VALIDATE_MUTEX (1, mx);
1584 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1585 }
1586 #undef FUNC_NAME
1587
1588 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1589 (SCM mx),
1590 "Returns @code{#t} if the mutex @var{mx} is locked.")
1591 #define FUNC_NAME s_scm_mutex_locked_p
1592 {
1593 SCM_VALIDATE_MUTEX (1, mx);
1594 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1595 }
1596 #undef FUNC_NAME
1597
1598 static SCM
1599 fat_cond_mark (SCM cv)
1600 {
1601 fat_cond *c = SCM_CONDVAR_DATA (cv);
1602 return c->waiting;
1603 }
1604
1605 static size_t
1606 fat_cond_free (SCM mx)
1607 {
1608 fat_cond *c = SCM_CONDVAR_DATA (mx);
1609 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1610 return 0;
1611 }
1612
1613 static int
1614 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1615 {
1616 fat_cond *c = SCM_CONDVAR_DATA (cv);
1617 scm_puts ("#<condition-variable ", port);
1618 scm_uintprint ((scm_t_bits)c, 16, port);
1619 scm_puts (">", port);
1620 return 1;
1621 }
1622
1623 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1624 (void),
1625 "Make a new condition variable.")
1626 #define FUNC_NAME s_scm_make_condition_variable
1627 {
1628 fat_cond *c;
1629 SCM cv;
1630
1631 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1632 c->waiting = SCM_EOL;
1633 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1634 c->waiting = make_queue ();
1635 return cv;
1636 }
1637 #undef FUNC_NAME
1638
1639 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1640 (SCM cv, SCM mx, SCM t),
1641 "Wait until @var{cond-var} has been signalled. While waiting, "
1642 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1643 "is locked again when this function returns. When @var{time} is given, "
1644 "it specifies a point in time where the waiting should be aborted. It "
1645 "can be either a integer as returned by @code{current-time} or a pair "
1646 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1647 "mutex is locked and @code{#f} is returned. When the condition "
1648 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1649 "is returned. ")
1650 #define FUNC_NAME s_scm_timed_wait_condition_variable
1651 {
1652 scm_t_timespec waittime, *waitptr = NULL;
1653
1654 SCM_VALIDATE_CONDVAR (1, cv);
1655 SCM_VALIDATE_MUTEX (2, mx);
1656
1657 if (!SCM_UNBNDP (t))
1658 {
1659 to_timespec (t, &waittime);
1660 waitptr = &waittime;
1661 }
1662
1663 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1664 }
1665 #undef FUNC_NAME
1666
1667 static void
1668 fat_cond_signal (fat_cond *c)
1669 {
1670 unblock_from_queue (c->waiting);
1671 }
1672
1673 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1674 (SCM cv),
1675 "Wake up one thread that is waiting for @var{cv}")
1676 #define FUNC_NAME s_scm_signal_condition_variable
1677 {
1678 SCM_VALIDATE_CONDVAR (1, cv);
1679 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1680 return SCM_BOOL_T;
1681 }
1682 #undef FUNC_NAME
1683
1684 static void
1685 fat_cond_broadcast (fat_cond *c)
1686 {
1687 while (scm_is_true (unblock_from_queue (c->waiting)))
1688 ;
1689 }
1690
1691 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1692 (SCM cv),
1693 "Wake up all threads that are waiting for @var{cv}. ")
1694 #define FUNC_NAME s_scm_broadcast_condition_variable
1695 {
1696 SCM_VALIDATE_CONDVAR (1, cv);
1697 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1698 return SCM_BOOL_T;
1699 }
1700 #undef FUNC_NAME
1701
1702 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1703 (SCM obj),
1704 "Return @code{#t} if @var{obj} is a condition variable.")
1705 #define FUNC_NAME s_scm_condition_variable_p
1706 {
1707 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1708 }
1709 #undef FUNC_NAME
1710
1711 /*** Marking stacks */
1712
1713 /* XXX - what to do with this? Do we need to handle this for blocked
1714 threads as well?
1715 */
1716 #ifdef __ia64__
1717 # define SCM_MARK_BACKING_STORE() do { \
1718 ucontext_t ctx; \
1719 SCM_STACKITEM * top, * bot; \
1720 getcontext (&ctx); \
1721 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1722 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1723 / sizeof (SCM_STACKITEM))); \
1724 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1725 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1726 scm_mark_locations (bot, top - bot); } while (0)
1727 #else
1728 # define SCM_MARK_BACKING_STORE()
1729 #endif
1730
1731 void
1732 scm_threads_mark_stacks (void)
1733 {
1734 scm_i_thread *t;
1735 for (t = all_threads; t; t = t->next_thread)
1736 {
1737 /* Check that thread has indeed been suspended.
1738 */
1739 assert (t->top);
1740
1741 scm_gc_mark (t->handle);
1742
1743 #if SCM_STACK_GROWS_UP
1744 scm_mark_locations (t->base, t->top - t->base);
1745 #else
1746 scm_mark_locations (t->top, t->base - t->top);
1747 #endif
1748 scm_mark_locations ((void *) &t->regs,
1749 ((size_t) sizeof(t->regs)
1750 / sizeof (SCM_STACKITEM)));
1751 }
1752
1753 SCM_MARK_BACKING_STORE ();
1754 }
1755
1756 /*** Select */
1757
1758 int
1759 scm_std_select (int nfds,
1760 SELECT_TYPE *readfds,
1761 SELECT_TYPE *writefds,
1762 SELECT_TYPE *exceptfds,
1763 struct timeval *timeout)
1764 {
1765 fd_set my_readfds;
1766 int res, eno, wakeup_fd;
1767 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1768 scm_t_guile_ticket ticket;
1769
1770 if (readfds == NULL)
1771 {
1772 FD_ZERO (&my_readfds);
1773 readfds = &my_readfds;
1774 }
1775
1776 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1777 SCM_TICK;
1778
1779 wakeup_fd = t->sleep_pipe[0];
1780 ticket = scm_leave_guile ();
1781 FD_SET (wakeup_fd, readfds);
1782 if (wakeup_fd >= nfds)
1783 nfds = wakeup_fd+1;
1784 res = select (nfds, readfds, writefds, exceptfds, timeout);
1785 t->sleep_fd = -1;
1786 eno = errno;
1787 scm_enter_guile (ticket);
1788
1789 scm_i_reset_sleep (t);
1790
1791 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1792 {
1793 char dummy;
1794 full_read (wakeup_fd, &dummy, 1);
1795
1796 FD_CLR (wakeup_fd, readfds);
1797 res -= 1;
1798 if (res == 0)
1799 {
1800 eno = EINTR;
1801 res = -1;
1802 }
1803 }
1804 errno = eno;
1805 return res;
1806 }
1807
1808 /* Convenience API for blocking while in guile mode. */
1809
1810 #if SCM_USE_PTHREAD_THREADS
1811
1812 int
1813 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1814 {
1815 scm_t_guile_ticket t = scm_leave_guile ();
1816 int res = scm_i_pthread_mutex_lock (mutex);
1817 scm_enter_guile (t);
1818 return res;
1819 }
1820
1821 static void
1822 do_unlock (void *data)
1823 {
1824 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1825 }
1826
1827 void
1828 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1829 {
1830 scm_i_scm_pthread_mutex_lock (mutex);
1831 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1832 }
1833
1834 int
1835 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1836 {
1837 scm_t_guile_ticket t = scm_leave_guile ();
1838 ((scm_i_thread *)t)->held_mutex = mutex;
1839 int res = scm_i_pthread_cond_wait (cond, mutex);
1840 ((scm_i_thread *)t)->held_mutex = NULL;
1841 scm_enter_guile (t);
1842 return res;
1843 }
1844
1845 int
1846 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1847 scm_i_pthread_mutex_t *mutex,
1848 const scm_t_timespec *wt)
1849 {
1850 scm_t_guile_ticket t = scm_leave_guile ();
1851 ((scm_i_thread *)t)->held_mutex = mutex;
1852 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1853 ((scm_i_thread *)t)->held_mutex = NULL;
1854 scm_enter_guile (t);
1855 return res;
1856 }
1857
1858 #endif
1859
1860 unsigned long
1861 scm_std_usleep (unsigned long usecs)
1862 {
1863 struct timeval tv;
1864 tv.tv_usec = usecs % 1000000;
1865 tv.tv_sec = usecs / 1000000;
1866 scm_std_select (0, NULL, NULL, NULL, &tv);
1867 return tv.tv_sec * 1000000 + tv.tv_usec;
1868 }
1869
1870 unsigned int
1871 scm_std_sleep (unsigned int secs)
1872 {
1873 struct timeval tv;
1874 tv.tv_usec = 0;
1875 tv.tv_sec = secs;
1876 scm_std_select (0, NULL, NULL, NULL, &tv);
1877 return tv.tv_sec;
1878 }
1879
1880 /*** Misc */
1881
1882 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1883 (void),
1884 "Return the thread that called this function.")
1885 #define FUNC_NAME s_scm_current_thread
1886 {
1887 return SCM_I_CURRENT_THREAD->handle;
1888 }
1889 #undef FUNC_NAME
1890
1891 static SCM
1892 scm_c_make_list (size_t n, SCM fill)
1893 {
1894 SCM res = SCM_EOL;
1895 while (n-- > 0)
1896 res = scm_cons (fill, res);
1897 return res;
1898 }
1899
1900 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1901 (void),
1902 "Return a list of all threads.")
1903 #define FUNC_NAME s_scm_all_threads
1904 {
1905 /* We can not allocate while holding the thread_admin_mutex because
1906 of the way GC is done.
1907 */
1908 int n = thread_count;
1909 scm_i_thread *t;
1910 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1911
1912 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1913 l = &list;
1914 for (t = all_threads; t && n > 0; t = t->next_thread)
1915 {
1916 if (t != scm_i_signal_delivery_thread)
1917 {
1918 SCM_SETCAR (*l, t->handle);
1919 l = SCM_CDRLOC (*l);
1920 }
1921 n--;
1922 }
1923 *l = SCM_EOL;
1924 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1925 return list;
1926 }
1927 #undef FUNC_NAME
1928
1929 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1930 (SCM thread),
1931 "Return @code{#t} iff @var{thread} has exited.\n")
1932 #define FUNC_NAME s_scm_thread_exited_p
1933 {
1934 return scm_from_bool (scm_c_thread_exited_p (thread));
1935 }
1936 #undef FUNC_NAME
1937
1938 int
1939 scm_c_thread_exited_p (SCM thread)
1940 #define FUNC_NAME s_scm_thread_exited_p
1941 {
1942 scm_i_thread *t;
1943 SCM_VALIDATE_THREAD (1, thread);
1944 t = SCM_I_THREAD_DATA (thread);
1945 return t->exited;
1946 }
1947 #undef FUNC_NAME
1948
1949 static scm_i_pthread_cond_t wake_up_cond;
1950 int scm_i_thread_go_to_sleep;
1951 static int threads_initialized_p = 0;
1952
1953 void
1954 scm_i_thread_put_to_sleep ()
1955 {
1956 if (threads_initialized_p)
1957 {
1958 scm_i_thread *t;
1959
1960 scm_leave_guile ();
1961 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1962
1963 /* Signal all threads to go to sleep
1964 */
1965 scm_i_thread_go_to_sleep = 1;
1966 for (t = all_threads; t; t = t->next_thread)
1967 scm_i_pthread_mutex_lock (&t->heap_mutex);
1968 scm_i_thread_go_to_sleep = 0;
1969 }
1970 }
1971
1972 void
1973 scm_i_thread_invalidate_freelists ()
1974 {
1975 /* thread_admin_mutex is already locked. */
1976
1977 scm_i_thread *t;
1978 for (t = all_threads; t; t = t->next_thread)
1979 if (t != SCM_I_CURRENT_THREAD)
1980 t->clear_freelists_p = 1;
1981 }
1982
1983 void
1984 scm_i_thread_wake_up ()
1985 {
1986 if (threads_initialized_p)
1987 {
1988 scm_i_thread *t;
1989
1990 scm_i_pthread_cond_broadcast (&wake_up_cond);
1991 for (t = all_threads; t; t = t->next_thread)
1992 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1993 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1994 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1995 }
1996 }
1997
1998 void
1999 scm_i_thread_sleep_for_gc ()
2000 {
2001 scm_i_thread *t = suspend ();
2002
2003 /* Don't put t->heap_mutex in t->held_mutex here, because if the
2004 thread is cancelled during the cond wait, the thread's cleanup
2005 function (scm_leave_guile_cleanup) will handle unlocking the
2006 heap_mutex, so we don't need to do that again in on_thread_exit.
2007 */
2008 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
2009
2010 resume (t);
2011 }
2012
2013 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2014 */
2015 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2016 int scm_i_critical_section_level = 0;
2017
2018 static SCM dynwind_critical_section_mutex;
2019
2020 void
2021 scm_dynwind_critical_section (SCM mutex)
2022 {
2023 if (scm_is_false (mutex))
2024 mutex = dynwind_critical_section_mutex;
2025 scm_dynwind_lock_mutex (mutex);
2026 scm_dynwind_block_asyncs ();
2027 }
2028
2029 /*** Initialization */
2030
2031 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
2032 scm_i_pthread_mutex_t scm_i_misc_mutex;
2033
2034 #if SCM_USE_PTHREAD_THREADS
2035 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2036 #endif
2037
2038 void
2039 scm_threads_prehistory (SCM_STACKITEM *base)
2040 {
2041 #if SCM_USE_PTHREAD_THREADS
2042 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2043 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2044 PTHREAD_MUTEX_RECURSIVE);
2045 #endif
2046
2047 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2048 scm_i_pthread_mutexattr_recursive);
2049 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2050 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2051 scm_i_pthread_key_create (&scm_i_freelist, NULL);
2052 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
2053
2054 guilify_self_1 (base);
2055 }
2056
2057 scm_t_bits scm_tc16_thread;
2058 scm_t_bits scm_tc16_mutex;
2059 scm_t_bits scm_tc16_condvar;
2060
2061 void
2062 scm_init_threads ()
2063 {
2064 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2065 scm_set_smob_mark (scm_tc16_thread, thread_mark);
2066 scm_set_smob_print (scm_tc16_thread, thread_print);
2067 scm_set_smob_free (scm_tc16_thread, thread_free);
2068
2069 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2070 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
2071 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2072 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2073
2074 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2075 sizeof (fat_cond));
2076 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
2077 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2078 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
2079
2080 scm_i_default_dynamic_state = SCM_BOOL_F;
2081 guilify_self_2 (SCM_BOOL_F);
2082 threads_initialized_p = 1;
2083
2084 dynwind_critical_section_mutex =
2085 scm_permanent_object (scm_make_recursive_mutex ());
2086 }
2087
2088 void
2089 scm_init_threads_default_dynamic_state ()
2090 {
2091 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2092 scm_i_default_dynamic_state = scm_permanent_object (state);
2093 }
2094
2095 void
2096 scm_init_thread_procs ()
2097 {
2098 #include "libguile/threads.x"
2099 }
2100
2101 /*
2102 Local Variables:
2103 c-file-style: "gnu"
2104 End:
2105 */