Remove unnecessary uses of `scm_{leave,enter}_guile ()'.
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/boehm-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32 #include <assert.h>
33
34 #ifdef HAVE_STRING_H
35 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
36 #endif
37
38 #if HAVE_SYS_TIME_H
39 #include <sys/time.h>
40 #endif
41
42 #include "libguile/validate.h"
43 #include "libguile/root.h"
44 #include "libguile/eval.h"
45 #include "libguile/async.h"
46 #include "libguile/ports.h"
47 #include "libguile/threads.h"
48 #include "libguile/dynwind.h"
49 #include "libguile/iselect.h"
50 #include "libguile/fluids.h"
51 #include "libguile/continuations.h"
52 #include "libguile/gc.h"
53 #include "libguile/init.h"
54 #include "libguile/scmsigs.h"
55 #include "libguile/strings.h"
56
57 #ifdef __MINGW32__
58 #ifndef ETIMEDOUT
59 # define ETIMEDOUT WSAETIMEDOUT
60 #endif
61 # include <fcntl.h>
62 # include <process.h>
63 # define pipe(fd) _pipe (fd, 256, O_BINARY)
64 #endif /* __MINGW32__ */
65
66 #include <full-read.h>
67
68 \f
69 static void
70 to_timespec (SCM t, scm_t_timespec *waittime)
71 {
72 if (scm_is_pair (t))
73 {
74 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
75 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
76 }
77 else
78 {
79 double time = scm_to_double (t);
80 double sec = scm_c_truncate (time);
81
82 waittime->tv_sec = (long) sec;
83 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
84 }
85 }
86
87 /*** Queues */
88
89 /* Make an empty queue data structure.
90 */
91 static SCM
92 make_queue ()
93 {
94 return scm_cons (SCM_EOL, SCM_EOL);
95 }
96
97 /* Put T at the back of Q and return a handle that can be used with
98 remqueue to remove T from Q again.
99 */
100 static SCM
101 enqueue (SCM q, SCM t)
102 {
103 SCM c = scm_cons (t, SCM_EOL);
104 SCM_CRITICAL_SECTION_START;
105 if (scm_is_null (SCM_CDR (q)))
106 SCM_SETCDR (q, c);
107 else
108 SCM_SETCDR (SCM_CAR (q), c);
109 SCM_SETCAR (q, c);
110 SCM_CRITICAL_SECTION_END;
111 return c;
112 }
113
114 /* Remove the element that the handle C refers to from the queue Q. C
115 must have been returned from a call to enqueue. The return value
116 is zero when the element referred to by C has already been removed.
117 Otherwise, 1 is returned.
118 */
119 static int
120 remqueue (SCM q, SCM c)
121 {
122 SCM p, prev = q;
123 SCM_CRITICAL_SECTION_START;
124 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
125 {
126 if (scm_is_eq (p, c))
127 {
128 if (scm_is_eq (c, SCM_CAR (q)))
129 SCM_SETCAR (q, SCM_CDR (c));
130 SCM_SETCDR (prev, SCM_CDR (c));
131 SCM_CRITICAL_SECTION_END;
132 return 1;
133 }
134 prev = p;
135 }
136 SCM_CRITICAL_SECTION_END;
137 return 0;
138 }
139
140 /* Remove the front-most element from the queue Q and return it.
141 Return SCM_BOOL_F when Q is empty.
142 */
143 static SCM
144 dequeue (SCM q)
145 {
146 SCM c;
147 SCM_CRITICAL_SECTION_START;
148 c = SCM_CDR (q);
149 if (scm_is_null (c))
150 {
151 SCM_CRITICAL_SECTION_END;
152 return SCM_BOOL_F;
153 }
154 else
155 {
156 SCM_SETCDR (q, SCM_CDR (c));
157 if (scm_is_null (SCM_CDR (q)))
158 SCM_SETCAR (q, SCM_EOL);
159 SCM_CRITICAL_SECTION_END;
160 return SCM_CAR (c);
161 }
162 }
163
164 /*** Thread smob routines */
165
166
167 static int
168 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
169 {
170 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
171 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
172 the struct case, hence we go via a union, and extract according to the
173 size of pthread_t. */
174 union {
175 scm_i_pthread_t p;
176 unsigned short us;
177 unsigned int ui;
178 unsigned long ul;
179 scm_t_uintmax um;
180 } u;
181 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
182 scm_i_pthread_t p = t->pthread;
183 scm_t_uintmax id;
184 u.p = p;
185 if (sizeof (p) == sizeof (unsigned short))
186 id = u.us;
187 else if (sizeof (p) == sizeof (unsigned int))
188 id = u.ui;
189 else if (sizeof (p) == sizeof (unsigned long))
190 id = u.ul;
191 else
192 id = u.um;
193
194 scm_puts ("#<thread ", port);
195 scm_uintprint (id, 10, port);
196 scm_puts (" (", port);
197 scm_uintprint ((scm_t_bits)t, 16, port);
198 scm_puts (")>", port);
199 return 1;
200 }
201
202 static size_t
203 thread_free (SCM obj)
204 {
205 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
206 assert (t->exited);
207 scm_gc_free (t, sizeof (*t), "thread");
208 return 0;
209 }
210
211 /*** Blocking on queues. */
212
213 /* See also scm_i_queue_async_cell for how such a block is
214 interrputed.
215 */
216
217 /* Put the current thread on QUEUE and go to sleep, waiting for it to
218 be woken up by a call to 'unblock_from_queue', or to be
219 interrupted. Upon return of this function, the current thread is
220 no longer on QUEUE, even when the sleep has been interrupted.
221
222 The caller of block_self must hold MUTEX. It will be atomically
223 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
224
225 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
226 as MUTEX is needed.
227
228 When WAITTIME is not NULL, the sleep will be aborted at that time.
229
230 The return value of block_self is an errno value. It will be zero
231 when the sleep has been successfully completed by a call to
232 unblock_from_queue, EINTR when it has been interrupted by the
233 delivery of a system async, and ETIMEDOUT when the timeout has
234 expired.
235
236 The system asyncs themselves are not executed by block_self.
237 */
238 static int
239 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
240 const scm_t_timespec *waittime)
241 {
242 scm_i_thread *t = SCM_I_CURRENT_THREAD;
243 SCM q_handle;
244 int err;
245
246 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
247 err = EINTR;
248 else
249 {
250 t->block_asyncs++;
251 q_handle = enqueue (queue, t->handle);
252 if (waittime == NULL)
253 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
254 else
255 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
256
257 /* When we are still on QUEUE, we have been interrupted. We
258 report this only when no other error (such as a timeout) has
259 happened above.
260 */
261 if (remqueue (queue, q_handle) && err == 0)
262 err = EINTR;
263 t->block_asyncs--;
264 scm_i_reset_sleep (t);
265 }
266
267 return err;
268 }
269
270 /* Wake up the first thread on QUEUE, if any. The awoken thread is
271 returned, or #f if the queue was empty.
272 */
273 static SCM
274 unblock_from_queue (SCM queue)
275 {
276 SCM thread = dequeue (queue);
277 if (scm_is_true (thread))
278 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
279 return thread;
280 }
281
282 /* Getting into and out of guile mode.
283 */
284
285 /* Ken Raeburn observes that the implementation of suspend and resume
286 (and the things that build on top of them) are very likely not
287 correct (see below). We will need fix this eventually, and that's
288 why scm_leave_guile/scm_enter_guile are not exported in the API.
289
290 Ken writes:
291
292 Consider this sequence:
293
294 Function foo, called in Guile mode, calls suspend (maybe indirectly
295 through scm_leave_guile), which does this:
296
297 // record top of stack for the GC
298 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
299 var 't'
300 // save registers.
301 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
302 SCM_I_SETJMP (t->regs); // here's most of the magic
303
304 ... and returns.
305
306 Function foo has a SCM value X, a handle on a non-immediate object, in
307 a caller-saved register R, and it's the only reference to the object
308 currently.
309
310 The compiler wants to use R in suspend, so it pushes the current
311 value, X, into a stack slot which will be reloaded on exit from
312 suspend; then it loads stuff into R and goes about its business. The
313 setjmp call saves (some of) the current registers, including R, which
314 no longer contains X. (This isn't a problem for a normal
315 setjmp/longjmp situation, where longjmp would be called before
316 setjmp's caller returns; the old value for X would be loaded back from
317 the stack after the longjmp, before the function returned.)
318
319 So, suspend returns, loading X back into R (and invalidating the jump
320 buffer) in the process. The caller foo then goes off and calls a
321 bunch of other functions out of Guile mode, occasionally storing X on
322 the stack again, but, say, much deeper on the stack than suspend's
323 stack frame went, and the stack slot where suspend had written X has
324 long since been overwritten with other values.
325
326 Okay, nothing actively broken so far. Now, let garbage collection
327 run, triggered by another thread.
328
329 The thread calling foo is out of Guile mode at the time, so the
330 garbage collector just scans a range of stack addresses. Too bad that
331 X isn't stored there. So the pointed-to storage goes onto the free
332 list, and I think you can see where things go from there.
333
334 Is there anything I'm missing that'll prevent this scenario from
335 happening? I mean, aside from, "well, suspend and scm_leave_guile
336 don't have many local variables, so they probably won't need to save
337 any registers on most systems, so we hope everything will wind up in
338 the jump buffer and we'll just get away with it"?
339
340 (And, going the other direction, if scm_leave_guile and suspend push
341 the stack pointer over onto a new page, and foo doesn't make further
342 function calls and thus the stack pointer no longer includes that
343 page, are we guaranteed that the kernel cannot release the now-unused
344 stack page that contains the top-of-stack pointer we just saved? I
345 don't know if any OS actually does that. If it does, we could get
346 faults in garbage collection.)
347
348 I don't think scm_without_guile has to have this problem, as it gets
349 more control over the stack handling -- but it should call setjmp
350 itself. I'd probably try something like:
351
352 // record top of stack for the GC
353 t->top = SCM_STACK_PTR (&t);
354 // save registers.
355 SCM_FLUSH_REGISTER_WINDOWS;
356 SCM_I_SETJMP (t->regs);
357 res = func(data);
358 scm_enter_guile (t);
359
360 ... though even that's making some assumptions about the stack
361 ordering of local variables versus caller-saved registers.
362
363 For something like scm_leave_guile to work, I don't think it can just
364 rely on invalidated jump buffers. A valid jump buffer, and a handle
365 on the stack state at the point when the jump buffer was initialized,
366 together, would work fine, but I think then we're talking about macros
367 invoking setjmp in the caller's stack frame, and requiring that the
368 caller of scm_leave_guile also call scm_enter_guile before returning,
369 kind of like pthread_cleanup_push/pop calls that have to be paired up
370 in a function. (In fact, the pthread ones have to be paired up
371 syntactically, as if they might expand to a compound statement
372 incorporating the user's code, and invoking a compiler's
373 exception-handling primitives. Which might be something to think
374 about for cases where Guile is used with C++ exceptions or
375 pthread_cancel.)
376 */
377
378 scm_i_pthread_key_t scm_i_thread_key;
379
380 static void
381 resume (scm_i_thread *t)
382 {
383 t->top = NULL;
384 }
385
386 typedef void* scm_t_guile_ticket;
387
388 static void
389 scm_enter_guile (scm_t_guile_ticket ticket)
390 {
391 scm_i_thread *t = (scm_i_thread *)ticket;
392 if (t)
393 {
394 resume (t);
395 }
396 }
397
398 static scm_i_thread *
399 suspend (void)
400 {
401 scm_i_thread *t = SCM_I_CURRENT_THREAD;
402
403 /* record top of stack for the GC */
404 t->top = SCM_STACK_PTR (&t);
405 /* save registers. */
406 SCM_FLUSH_REGISTER_WINDOWS;
407 SCM_I_SETJMP (t->regs);
408 return t;
409 }
410
411 static scm_t_guile_ticket
412 scm_leave_guile ()
413 {
414 scm_i_thread *t = suspend ();
415 return (scm_t_guile_ticket) t;
416 }
417
418 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
419 static scm_i_thread *all_threads = NULL;
420 static int thread_count;
421
422 static SCM scm_i_default_dynamic_state;
423
424 /* Perform first stage of thread initialisation, in non-guile mode.
425 */
426 static void
427 guilify_self_1 (SCM_STACKITEM *base)
428 {
429 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
430
431 t->pthread = scm_i_pthread_self ();
432 t->handle = SCM_BOOL_F;
433 t->result = SCM_BOOL_F;
434 t->cleanup_handler = SCM_BOOL_F;
435 t->mutexes = SCM_EOL;
436 t->held_mutex = NULL;
437 t->join_queue = SCM_EOL;
438 t->dynamic_state = SCM_BOOL_F;
439 t->dynwinds = SCM_EOL;
440 t->active_asyncs = SCM_EOL;
441 t->block_asyncs = 1;
442 t->pending_asyncs = 1;
443 t->last_debug_frame = NULL;
444 t->base = base;
445 #ifdef __ia64__
446 /* Calculate and store off the base of this thread's register
447 backing store (RBS). Unfortunately our implementation(s) of
448 scm_ia64_register_backing_store_base are only reliable for the
449 main thread. For other threads, therefore, find out the current
450 top of the RBS, and use that as a maximum. */
451 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
452 {
453 ucontext_t ctx;
454 void *bsp;
455 getcontext (&ctx);
456 bsp = scm_ia64_ar_bsp (&ctx);
457 if (t->register_backing_store_base > bsp)
458 t->register_backing_store_base = bsp;
459 }
460 #endif
461 t->continuation_root = SCM_EOL;
462 t->continuation_base = base;
463 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
464 t->sleep_mutex = NULL;
465 t->sleep_object = SCM_BOOL_F;
466 t->sleep_fd = -1;
467
468 if (pipe (t->sleep_pipe) != 0)
469 /* FIXME: Error conditions during the initialization phase are handled
470 gracelessly since public functions such as `scm_init_guile ()'
471 currently have type `void'. */
472 abort ();
473
474 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
475 t->current_mark_stack_ptr = NULL;
476 t->current_mark_stack_limit = NULL;
477 t->canceled = 0;
478 t->exited = 0;
479 t->guile_mode = 0;
480
481 scm_i_pthread_setspecific (scm_i_thread_key, t);
482
483 scm_i_pthread_mutex_lock (&thread_admin_mutex);
484 t->next_thread = all_threads;
485 all_threads = t;
486 thread_count++;
487 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
488 }
489
490 /* Perform second stage of thread initialisation, in guile mode.
491 */
492 static void
493 guilify_self_2 (SCM parent)
494 {
495 scm_i_thread *t = SCM_I_CURRENT_THREAD;
496
497 t->guile_mode = 1;
498
499 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
500
501 t->continuation_root = scm_cons (t->handle, SCM_EOL);
502 t->continuation_base = t->base;
503 t->vm = SCM_BOOL_F;
504
505 if (scm_is_true (parent))
506 t->dynamic_state = scm_make_dynamic_state (parent);
507 else
508 t->dynamic_state = scm_i_make_initial_dynamic_state ();
509
510 t->join_queue = make_queue ();
511 t->block_asyncs = 0;
512 }
513
514 \f
515 /*** Fat mutexes */
516
517 /* We implement our own mutex type since we want them to be 'fair', we
518 want to do fancy things while waiting for them (like running
519 asyncs) and we might want to add things that are nice for
520 debugging.
521 */
522
523 typedef struct {
524 scm_i_pthread_mutex_t lock;
525 SCM owner;
526 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
527
528 int recursive; /* allow recursive locking? */
529 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
530 int allow_external_unlock; /* is it an error to unlock a mutex that is not
531 owned by the current thread? */
532
533 SCM waiting; /* the threads waiting for this mutex. */
534 } fat_mutex;
535
536 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
537 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
538
539 /* Perform thread tear-down, in guile mode.
540 */
541 static void *
542 do_thread_exit (void *v)
543 {
544 scm_i_thread *t = (scm_i_thread *) v;
545
546 if (!scm_is_false (t->cleanup_handler))
547 {
548 SCM ptr = t->cleanup_handler;
549
550 t->cleanup_handler = SCM_BOOL_F;
551 t->result = scm_internal_catch (SCM_BOOL_T,
552 (scm_t_catch_body) scm_call_0, ptr,
553 scm_handle_by_message_noexit, NULL);
554 }
555
556 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
557
558 t->exited = 1;
559 close (t->sleep_pipe[0]);
560 close (t->sleep_pipe[1]);
561 while (scm_is_true (unblock_from_queue (t->join_queue)))
562 ;
563
564 while (!scm_is_null (t->mutexes))
565 {
566 SCM mutex = SCM_CAR (t->mutexes);
567 fat_mutex *m = SCM_MUTEX_DATA (mutex);
568 scm_i_pthread_mutex_lock (&m->lock);
569
570 unblock_from_queue (m->waiting);
571
572 scm_i_pthread_mutex_unlock (&m->lock);
573 t->mutexes = SCM_CDR (t->mutexes);
574 }
575
576 scm_i_pthread_mutex_unlock (&t->admin_mutex);
577
578 return NULL;
579 }
580
581 static void
582 on_thread_exit (void *v)
583 {
584 /* This handler is executed in non-guile mode. */
585 scm_i_thread *t = (scm_i_thread *) v, **tp;
586
587 /* If this thread was cancelled while doing a cond wait, it will
588 still have a mutex locked, so we unlock it here. */
589 if (t->held_mutex)
590 {
591 scm_i_pthread_mutex_unlock (t->held_mutex);
592 t->held_mutex = NULL;
593 }
594
595 scm_i_pthread_setspecific (scm_i_thread_key, v);
596
597 /* Ensure the signal handling thread has been launched, because we might be
598 shutting it down. */
599 scm_i_ensure_signal_delivery_thread ();
600
601 /* Unblocking the joining threads needs to happen in guile mode
602 since the queue is a SCM data structure. */
603
604 /* Note: Since `do_thread_exit ()' uses allocates memory via `libgc', we
605 assume the GC is usable at this point, and notably that thread-local
606 storage (TLS) hasn't been deallocated yet. */
607 do_thread_exit (v);
608
609 /* Removing ourself from the list of all threads needs to happen in
610 non-guile mode since all SCM values on our stack become
611 unprotected once we are no longer in the list. */
612 scm_i_pthread_mutex_lock (&thread_admin_mutex);
613 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
614 if (*tp == t)
615 {
616 *tp = t->next_thread;
617 break;
618 }
619 thread_count--;
620
621 /* If there's only one other thread, it could be the signal delivery
622 thread, so we need to notify it to shut down by closing its read pipe.
623 If it's not the signal delivery thread, then closing the read pipe isn't
624 going to hurt. */
625 if (thread_count <= 1)
626 scm_i_close_signal_pipe ();
627
628 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
629
630 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
631 }
632
633 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
634
635 static void
636 init_thread_key (void)
637 {
638 scm_i_pthread_key_create (&scm_i_thread_key, NULL);
639 }
640
641 /* Perform any initializations necessary to bring the current thread
642 into guile mode, initializing Guile itself, if necessary.
643
644 BASE is the stack base to use with GC.
645
646 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
647 which case the default dynamic state is used.
648
649 Return zero when the thread was in guile mode already; otherwise
650 return 1.
651 */
652
653 static int
654 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
655 {
656 scm_i_thread *t;
657
658 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
659
660 if ((t = SCM_I_CURRENT_THREAD) == NULL)
661 {
662 /* This thread has not been guilified yet.
663 */
664
665 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
666 if (scm_initialized_p == 0)
667 {
668 /* First thread ever to enter Guile. Run the full
669 initialization.
670 */
671 scm_i_init_guile (base);
672 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
673 }
674 else
675 {
676 /* Guile is already initialized, but this thread enters it for
677 the first time. Only initialize this thread.
678 */
679 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
680 guilify_self_1 (base);
681 guilify_self_2 (parent);
682 }
683 return 1;
684 }
685 else if (t->top)
686 {
687 /* This thread is already guilified but not in guile mode, just
688 resume it.
689
690 A user call to scm_with_guile() will lead us to here. This could
691 happen from anywhere on the stack, and in particular lower on the
692 stack than when it was when this thread was first guilified. Thus,
693 `base' must be updated. */
694 #if SCM_STACK_GROWS_UP
695 if (base < t->base)
696 t->base = base;
697 #else
698 if (base > t->base)
699 t->base = base;
700 #endif
701
702 scm_enter_guile ((scm_t_guile_ticket) t);
703 return 1;
704 }
705 else
706 {
707 /* Thread is already in guile mode. Nothing to do.
708 */
709 return 0;
710 }
711 }
712
713 #if SCM_USE_PTHREAD_THREADS
714
715 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
716 /* This method for GNU/Linux and perhaps some other systems.
717 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
718 available on them. */
719 #define HAVE_GET_THREAD_STACK_BASE
720
721 static SCM_STACKITEM *
722 get_thread_stack_base ()
723 {
724 pthread_attr_t attr;
725 void *start, *end;
726 size_t size;
727
728 pthread_getattr_np (pthread_self (), &attr);
729 pthread_attr_getstack (&attr, &start, &size);
730 end = (char *)start + size;
731
732 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
733 for the main thread, but we can use scm_get_stack_base in that
734 case.
735 */
736
737 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
738 if ((void *)&attr < start || (void *)&attr >= end)
739 return (SCM_STACKITEM *) GC_stackbottom;
740 else
741 #endif
742 {
743 #if SCM_STACK_GROWS_UP
744 return start;
745 #else
746 return end;
747 #endif
748 }
749 }
750
751 #elif HAVE_PTHREAD_GET_STACKADDR_NP
752 /* This method for MacOS X.
753 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
754 but as of 2006 there's nothing obvious at apple.com. */
755 #define HAVE_GET_THREAD_STACK_BASE
756 static SCM_STACKITEM *
757 get_thread_stack_base ()
758 {
759 return pthread_get_stackaddr_np (pthread_self ());
760 }
761
762 #elif defined (__MINGW32__)
763 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
764 in any thread. We don't like hard-coding the name of a system, but there
765 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
766 work. */
767 #define HAVE_GET_THREAD_STACK_BASE
768 static SCM_STACKITEM *
769 get_thread_stack_base ()
770 {
771 return (SCM_STACKITEM *) GC_stackbottom;
772 }
773
774 #endif /* pthread methods of get_thread_stack_base */
775
776 #else /* !SCM_USE_PTHREAD_THREADS */
777
778 #define HAVE_GET_THREAD_STACK_BASE
779
780 static SCM_STACKITEM *
781 get_thread_stack_base ()
782 {
783 return (SCM_STACKITEM *) GC_stackbottom;
784 }
785
786 #endif /* !SCM_USE_PTHREAD_THREADS */
787
788 #ifdef HAVE_GET_THREAD_STACK_BASE
789
790 void
791 scm_init_guile ()
792 {
793 scm_i_init_thread_for_guile (get_thread_stack_base (),
794 scm_i_default_dynamic_state);
795 }
796
797 #endif
798
799 void *
800 scm_with_guile (void *(*func)(void *), void *data)
801 {
802 return scm_i_with_guile_and_parent (func, data,
803 scm_i_default_dynamic_state);
804 }
805
806 SCM_UNUSED static void
807 scm_leave_guile_cleanup (void *x)
808 {
809 scm_leave_guile ();
810 on_thread_exit (SCM_I_CURRENT_THREAD);
811 }
812
813 void *
814 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
815 {
816 void *res;
817 int really_entered;
818 SCM_STACKITEM base_item;
819
820 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
821 if (really_entered)
822 {
823 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
824 res = scm_c_with_continuation_barrier (func, data);
825 scm_i_pthread_cleanup_pop (0);
826 scm_leave_guile ();
827 }
828 else
829 res = scm_c_with_continuation_barrier (func, data);
830
831 return res;
832 }
833
834 \f
835 /*** Non-guile mode. */
836
837 #if (defined HAVE_GC_DO_BLOCKING) && (!defined HAVE_DECL_GC_DO_BLOCKING)
838
839 /* This declaration is missing from the public headers of GC 7.1. */
840 extern void GC_do_blocking (void (*) (void *), void *);
841
842 #endif
843
844 #ifdef HAVE_GC_DO_BLOCKING
845 struct without_guile_arg
846 {
847 void * (*function) (void *);
848 void *data;
849 void *result;
850 };
851
852 static void
853 without_guile_trampoline (void *closure)
854 {
855 struct without_guile_arg *arg;
856
857 SCM_I_CURRENT_THREAD->guile_mode = 0;
858
859 arg = (struct without_guile_arg *) closure;
860 arg->result = arg->function (arg->data);
861
862 SCM_I_CURRENT_THREAD->guile_mode = 1;
863 }
864 #endif
865
866 void *
867 scm_without_guile (void *(*func)(void *), void *data)
868 {
869 void *result;
870
871 #ifdef HAVE_GC_DO_BLOCKING
872 if (SCM_I_CURRENT_THREAD->guile_mode)
873 {
874 struct without_guile_arg arg;
875
876 arg.function = func;
877 arg.data = data;
878 GC_do_blocking (without_guile_trampoline, &arg);
879 result = arg.result;
880 }
881 else
882 #endif
883 result = func (data);
884
885 return result;
886 }
887
888 \f
889 /*** Thread creation */
890
891 typedef struct {
892 SCM parent;
893 SCM thunk;
894 SCM handler;
895 SCM thread;
896 scm_i_pthread_mutex_t mutex;
897 scm_i_pthread_cond_t cond;
898 } launch_data;
899
900 static void *
901 really_launch (void *d)
902 {
903 launch_data *data = (launch_data *)d;
904 SCM thunk = data->thunk, handler = data->handler;
905 scm_i_thread *t;
906
907 t = SCM_I_CURRENT_THREAD;
908
909 scm_i_scm_pthread_mutex_lock (&data->mutex);
910 data->thread = scm_current_thread ();
911 scm_i_pthread_cond_signal (&data->cond);
912 scm_i_pthread_mutex_unlock (&data->mutex);
913
914 if (SCM_UNBNDP (handler))
915 t->result = scm_call_0 (thunk);
916 else
917 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
918
919 /* Trigger a call to `on_thread_exit ()'. */
920 pthread_exit (NULL);
921
922 return 0;
923 }
924
925 static void *
926 launch_thread (void *d)
927 {
928 launch_data *data = (launch_data *)d;
929 scm_i_pthread_detach (scm_i_pthread_self ());
930 scm_i_with_guile_and_parent (really_launch, d, data->parent);
931 return NULL;
932 }
933
934 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
935 (SCM thunk, SCM handler),
936 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
937 "returning a new thread object representing the thread. The procedure\n"
938 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
939 "\n"
940 "When @var{handler} is specified, then @var{thunk} is called from\n"
941 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
942 "handler. This catch is established inside the continuation barrier.\n"
943 "\n"
944 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
945 "the @emph{exit value} of the thread and the thread is terminated.")
946 #define FUNC_NAME s_scm_call_with_new_thread
947 {
948 launch_data data;
949 scm_i_pthread_t id;
950 int err;
951
952 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
953 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
954 handler, SCM_ARG2, FUNC_NAME);
955
956 data.parent = scm_current_dynamic_state ();
957 data.thunk = thunk;
958 data.handler = handler;
959 data.thread = SCM_BOOL_F;
960 scm_i_pthread_mutex_init (&data.mutex, NULL);
961 scm_i_pthread_cond_init (&data.cond, NULL);
962
963 scm_i_scm_pthread_mutex_lock (&data.mutex);
964 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
965 if (err)
966 {
967 scm_i_pthread_mutex_unlock (&data.mutex);
968 errno = err;
969 scm_syserror (NULL);
970 }
971 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
972 scm_i_pthread_mutex_unlock (&data.mutex);
973
974 return data.thread;
975 }
976 #undef FUNC_NAME
977
978 typedef struct {
979 SCM parent;
980 scm_t_catch_body body;
981 void *body_data;
982 scm_t_catch_handler handler;
983 void *handler_data;
984 SCM thread;
985 scm_i_pthread_mutex_t mutex;
986 scm_i_pthread_cond_t cond;
987 } spawn_data;
988
989 static void *
990 really_spawn (void *d)
991 {
992 spawn_data *data = (spawn_data *)d;
993 scm_t_catch_body body = data->body;
994 void *body_data = data->body_data;
995 scm_t_catch_handler handler = data->handler;
996 void *handler_data = data->handler_data;
997 scm_i_thread *t = SCM_I_CURRENT_THREAD;
998
999 scm_i_scm_pthread_mutex_lock (&data->mutex);
1000 data->thread = scm_current_thread ();
1001 scm_i_pthread_cond_signal (&data->cond);
1002 scm_i_pthread_mutex_unlock (&data->mutex);
1003
1004 if (handler == NULL)
1005 t->result = body (body_data);
1006 else
1007 t->result = scm_internal_catch (SCM_BOOL_T,
1008 body, body_data,
1009 handler, handler_data);
1010
1011 return 0;
1012 }
1013
1014 static void *
1015 spawn_thread (void *d)
1016 {
1017 spawn_data *data = (spawn_data *)d;
1018 scm_i_pthread_detach (scm_i_pthread_self ());
1019 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1020 return NULL;
1021 }
1022
1023 SCM
1024 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1025 scm_t_catch_handler handler, void *handler_data)
1026 {
1027 spawn_data data;
1028 scm_i_pthread_t id;
1029 int err;
1030
1031 data.parent = scm_current_dynamic_state ();
1032 data.body = body;
1033 data.body_data = body_data;
1034 data.handler = handler;
1035 data.handler_data = handler_data;
1036 data.thread = SCM_BOOL_F;
1037 scm_i_pthread_mutex_init (&data.mutex, NULL);
1038 scm_i_pthread_cond_init (&data.cond, NULL);
1039
1040 scm_i_scm_pthread_mutex_lock (&data.mutex);
1041 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1042 if (err)
1043 {
1044 scm_i_pthread_mutex_unlock (&data.mutex);
1045 errno = err;
1046 scm_syserror (NULL);
1047 }
1048 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1049 scm_i_pthread_mutex_unlock (&data.mutex);
1050
1051 return data.thread;
1052 }
1053
1054 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1055 (),
1056 "Move the calling thread to the end of the scheduling queue.")
1057 #define FUNC_NAME s_scm_yield
1058 {
1059 return scm_from_bool (scm_i_sched_yield ());
1060 }
1061 #undef FUNC_NAME
1062
1063 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1064 (SCM thread),
1065 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1066 "cannot be the current thread, and if @var{thread} has already terminated or "
1067 "been signaled to terminate, this function is a no-op.")
1068 #define FUNC_NAME s_scm_cancel_thread
1069 {
1070 scm_i_thread *t = NULL;
1071
1072 SCM_VALIDATE_THREAD (1, thread);
1073 t = SCM_I_THREAD_DATA (thread);
1074 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1075 if (!t->canceled)
1076 {
1077 t->canceled = 1;
1078 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1079 scm_i_pthread_cancel (t->pthread);
1080 }
1081 else
1082 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1083
1084 return SCM_UNSPECIFIED;
1085 }
1086 #undef FUNC_NAME
1087
1088 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1089 (SCM thread, SCM proc),
1090 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1091 "This handler will be called when the thread exits.")
1092 #define FUNC_NAME s_scm_set_thread_cleanup_x
1093 {
1094 scm_i_thread *t;
1095
1096 SCM_VALIDATE_THREAD (1, thread);
1097 if (!scm_is_false (proc))
1098 SCM_VALIDATE_THUNK (2, proc);
1099
1100 t = SCM_I_THREAD_DATA (thread);
1101 scm_i_pthread_mutex_lock (&t->admin_mutex);
1102
1103 if (!(t->exited || t->canceled))
1104 t->cleanup_handler = proc;
1105
1106 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1107
1108 return SCM_UNSPECIFIED;
1109 }
1110 #undef FUNC_NAME
1111
1112 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1113 (SCM thread),
1114 "Return the cleanup handler installed for the thread @var{thread}.")
1115 #define FUNC_NAME s_scm_thread_cleanup
1116 {
1117 scm_i_thread *t;
1118 SCM ret;
1119
1120 SCM_VALIDATE_THREAD (1, thread);
1121
1122 t = SCM_I_THREAD_DATA (thread);
1123 scm_i_pthread_mutex_lock (&t->admin_mutex);
1124 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1125 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1126
1127 return ret;
1128 }
1129 #undef FUNC_NAME
1130
1131 SCM scm_join_thread (SCM thread)
1132 {
1133 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1134 }
1135
1136 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1137 (SCM thread, SCM timeout, SCM timeoutval),
1138 "Suspend execution of the calling thread until the target @var{thread} "
1139 "terminates, unless the target @var{thread} has already terminated. ")
1140 #define FUNC_NAME s_scm_join_thread_timed
1141 {
1142 scm_i_thread *t;
1143 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1144 SCM res = SCM_BOOL_F;
1145
1146 if (! (SCM_UNBNDP (timeoutval)))
1147 res = timeoutval;
1148
1149 SCM_VALIDATE_THREAD (1, thread);
1150 if (scm_is_eq (scm_current_thread (), thread))
1151 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1152
1153 t = SCM_I_THREAD_DATA (thread);
1154 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1155
1156 if (! SCM_UNBNDP (timeout))
1157 {
1158 to_timespec (timeout, &ctimeout);
1159 timeout_ptr = &ctimeout;
1160 }
1161
1162 if (t->exited)
1163 res = t->result;
1164 else
1165 {
1166 while (1)
1167 {
1168 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1169 timeout_ptr);
1170 if (err == 0)
1171 {
1172 if (t->exited)
1173 {
1174 res = t->result;
1175 break;
1176 }
1177 }
1178 else if (err == ETIMEDOUT)
1179 break;
1180
1181 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1182 SCM_TICK;
1183 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1184
1185 /* Check for exit again, since we just released and
1186 reacquired the admin mutex, before the next block_self
1187 call (which would block forever if t has already
1188 exited). */
1189 if (t->exited)
1190 {
1191 res = t->result;
1192 break;
1193 }
1194 }
1195 }
1196
1197 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1198
1199 return res;
1200 }
1201 #undef FUNC_NAME
1202
1203 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1204 (SCM obj),
1205 "Return @code{#t} if @var{obj} is a thread.")
1206 #define FUNC_NAME s_scm_thread_p
1207 {
1208 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1209 }
1210 #undef FUNC_NAME
1211
1212
1213 static size_t
1214 fat_mutex_free (SCM mx)
1215 {
1216 fat_mutex *m = SCM_MUTEX_DATA (mx);
1217 scm_i_pthread_mutex_destroy (&m->lock);
1218 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1219 return 0;
1220 }
1221
1222 static int
1223 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1224 {
1225 fat_mutex *m = SCM_MUTEX_DATA (mx);
1226 scm_puts ("#<mutex ", port);
1227 scm_uintprint ((scm_t_bits)m, 16, port);
1228 scm_puts (">", port);
1229 return 1;
1230 }
1231
1232 static SCM
1233 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1234 {
1235 fat_mutex *m;
1236 SCM mx;
1237
1238 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1239 scm_i_pthread_mutex_init (&m->lock, NULL);
1240 m->owner = SCM_BOOL_F;
1241 m->level = 0;
1242
1243 m->recursive = recursive;
1244 m->unchecked_unlock = unchecked_unlock;
1245 m->allow_external_unlock = external_unlock;
1246
1247 m->waiting = SCM_EOL;
1248 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1249 m->waiting = make_queue ();
1250 return mx;
1251 }
1252
1253 SCM scm_make_mutex (void)
1254 {
1255 return scm_make_mutex_with_flags (SCM_EOL);
1256 }
1257
1258 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1259 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1260 SCM_SYMBOL (recursive_sym, "recursive");
1261
1262 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1263 (SCM flags),
1264 "Create a new mutex. ")
1265 #define FUNC_NAME s_scm_make_mutex_with_flags
1266 {
1267 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1268
1269 SCM ptr = flags;
1270 while (! scm_is_null (ptr))
1271 {
1272 SCM flag = SCM_CAR (ptr);
1273 if (scm_is_eq (flag, unchecked_unlock_sym))
1274 unchecked_unlock = 1;
1275 else if (scm_is_eq (flag, allow_external_unlock_sym))
1276 external_unlock = 1;
1277 else if (scm_is_eq (flag, recursive_sym))
1278 recursive = 1;
1279 else
1280 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1281 ptr = SCM_CDR (ptr);
1282 }
1283 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1284 }
1285 #undef FUNC_NAME
1286
1287 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1288 (void),
1289 "Create a new recursive mutex. ")
1290 #define FUNC_NAME s_scm_make_recursive_mutex
1291 {
1292 return make_fat_mutex (1, 0, 0);
1293 }
1294 #undef FUNC_NAME
1295
1296 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1297
1298 static SCM
1299 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1300 {
1301 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1302
1303 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1304 SCM err = SCM_BOOL_F;
1305
1306 struct timeval current_time;
1307
1308 scm_i_scm_pthread_mutex_lock (&m->lock);
1309
1310 while (1)
1311 {
1312 if (m->level == 0)
1313 {
1314 m->owner = new_owner;
1315 m->level++;
1316
1317 if (SCM_I_IS_THREAD (new_owner))
1318 {
1319 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1320 scm_i_pthread_mutex_lock (&t->admin_mutex);
1321 t->mutexes = scm_cons (mutex, t->mutexes);
1322 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1323 }
1324 *ret = 1;
1325 break;
1326 }
1327 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1328 {
1329 m->owner = new_owner;
1330 err = scm_cons (scm_abandoned_mutex_error_key,
1331 scm_from_locale_string ("lock obtained on abandoned "
1332 "mutex"));
1333 *ret = 1;
1334 break;
1335 }
1336 else if (scm_is_eq (m->owner, new_owner))
1337 {
1338 if (m->recursive)
1339 {
1340 m->level++;
1341 *ret = 1;
1342 }
1343 else
1344 {
1345 err = scm_cons (scm_misc_error_key,
1346 scm_from_locale_string ("mutex already locked "
1347 "by thread"));
1348 *ret = 0;
1349 }
1350 break;
1351 }
1352 else
1353 {
1354 if (timeout != NULL)
1355 {
1356 gettimeofday (&current_time, NULL);
1357 if (current_time.tv_sec > timeout->tv_sec ||
1358 (current_time.tv_sec == timeout->tv_sec &&
1359 current_time.tv_usec * 1000 > timeout->tv_nsec))
1360 {
1361 *ret = 0;
1362 break;
1363 }
1364 }
1365 block_self (m->waiting, mutex, &m->lock, timeout);
1366 scm_i_pthread_mutex_unlock (&m->lock);
1367 SCM_TICK;
1368 scm_i_scm_pthread_mutex_lock (&m->lock);
1369 }
1370 }
1371 scm_i_pthread_mutex_unlock (&m->lock);
1372 return err;
1373 }
1374
1375 SCM scm_lock_mutex (SCM mx)
1376 {
1377 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1378 }
1379
1380 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1381 (SCM m, SCM timeout, SCM owner),
1382 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1383 "blocks until the mutex becomes available. The function returns when "
1384 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1385 "a thread already owns will succeed right away and will not block the "
1386 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1387 #define FUNC_NAME s_scm_lock_mutex_timed
1388 {
1389 SCM exception;
1390 int ret = 0;
1391 scm_t_timespec cwaittime, *waittime = NULL;
1392
1393 SCM_VALIDATE_MUTEX (1, m);
1394
1395 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1396 {
1397 to_timespec (timeout, &cwaittime);
1398 waittime = &cwaittime;
1399 }
1400
1401 exception = fat_mutex_lock (m, waittime, owner, &ret);
1402 if (!scm_is_false (exception))
1403 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1404 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1405 }
1406 #undef FUNC_NAME
1407
1408 void
1409 scm_dynwind_lock_mutex (SCM mutex)
1410 {
1411 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1412 SCM_F_WIND_EXPLICITLY);
1413 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1414 SCM_F_WIND_EXPLICITLY);
1415 }
1416
1417 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1418 (SCM mutex),
1419 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1420 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1421 #define FUNC_NAME s_scm_try_mutex
1422 {
1423 SCM exception;
1424 int ret = 0;
1425 scm_t_timespec cwaittime, *waittime = NULL;
1426
1427 SCM_VALIDATE_MUTEX (1, mutex);
1428
1429 to_timespec (scm_from_int(0), &cwaittime);
1430 waittime = &cwaittime;
1431
1432 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1433 if (!scm_is_false (exception))
1434 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1435 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1436 }
1437 #undef FUNC_NAME
1438
1439 /*** Fat condition variables */
1440
1441 typedef struct {
1442 scm_i_pthread_mutex_t lock;
1443 SCM waiting; /* the threads waiting for this condition. */
1444 } fat_cond;
1445
1446 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1447 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1448
1449 static int
1450 fat_mutex_unlock (SCM mutex, SCM cond,
1451 const scm_t_timespec *waittime, int relock)
1452 {
1453 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1454 fat_cond *c = NULL;
1455 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1456 int err = 0, ret = 0;
1457
1458 scm_i_scm_pthread_mutex_lock (&m->lock);
1459
1460 SCM owner = m->owner;
1461
1462 if (!scm_is_eq (owner, scm_current_thread ()))
1463 {
1464 if (m->level == 0)
1465 {
1466 if (!m->unchecked_unlock)
1467 {
1468 scm_i_pthread_mutex_unlock (&m->lock);
1469 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1470 }
1471 owner = scm_current_thread ();
1472 }
1473 else if (!m->allow_external_unlock)
1474 {
1475 scm_i_pthread_mutex_unlock (&m->lock);
1476 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1477 }
1478 }
1479
1480 if (! (SCM_UNBNDP (cond)))
1481 {
1482 c = SCM_CONDVAR_DATA (cond);
1483 while (1)
1484 {
1485 int brk = 0;
1486
1487 if (m->level > 0)
1488 m->level--;
1489 if (m->level == 0)
1490 m->owner = unblock_from_queue (m->waiting);
1491
1492 t->block_asyncs++;
1493
1494 err = block_self (c->waiting, cond, &m->lock, waittime);
1495 scm_i_pthread_mutex_unlock (&m->lock);
1496
1497 if (err == 0)
1498 {
1499 ret = 1;
1500 brk = 1;
1501 }
1502 else if (err == ETIMEDOUT)
1503 {
1504 ret = 0;
1505 brk = 1;
1506 }
1507 else if (err != EINTR)
1508 {
1509 errno = err;
1510 scm_syserror (NULL);
1511 }
1512
1513 if (brk)
1514 {
1515 if (relock)
1516 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1517 t->block_asyncs--;
1518 break;
1519 }
1520
1521 t->block_asyncs--;
1522 scm_async_click ();
1523
1524 scm_remember_upto_here_2 (cond, mutex);
1525
1526 scm_i_scm_pthread_mutex_lock (&m->lock);
1527 }
1528 }
1529 else
1530 {
1531 if (m->level > 0)
1532 m->level--;
1533 if (m->level == 0)
1534 m->owner = unblock_from_queue (m->waiting);
1535
1536 scm_i_pthread_mutex_unlock (&m->lock);
1537 ret = 1;
1538 }
1539
1540 return ret;
1541 }
1542
1543 SCM scm_unlock_mutex (SCM mx)
1544 {
1545 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1546 }
1547
1548 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1549 (SCM mx, SCM cond, SCM timeout),
1550 "Unlocks @var{mutex} if the calling thread owns the lock on "
1551 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1552 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1553 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1554 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1555 "with a call to @code{unlock-mutex}. Only the last call to "
1556 "@code{unlock-mutex} will actually unlock the mutex. ")
1557 #define FUNC_NAME s_scm_unlock_mutex_timed
1558 {
1559 scm_t_timespec cwaittime, *waittime = NULL;
1560
1561 SCM_VALIDATE_MUTEX (1, mx);
1562 if (! (SCM_UNBNDP (cond)))
1563 {
1564 SCM_VALIDATE_CONDVAR (2, cond);
1565
1566 if (! (SCM_UNBNDP (timeout)))
1567 {
1568 to_timespec (timeout, &cwaittime);
1569 waittime = &cwaittime;
1570 }
1571 }
1572
1573 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1574 }
1575 #undef FUNC_NAME
1576
1577 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1578 (SCM obj),
1579 "Return @code{#t} if @var{obj} is a mutex.")
1580 #define FUNC_NAME s_scm_mutex_p
1581 {
1582 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1583 }
1584 #undef FUNC_NAME
1585
1586 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1587 (SCM mx),
1588 "Return the thread owning @var{mx}, or @code{#f}.")
1589 #define FUNC_NAME s_scm_mutex_owner
1590 {
1591 SCM owner;
1592 fat_mutex *m = NULL;
1593
1594 SCM_VALIDATE_MUTEX (1, mx);
1595 m = SCM_MUTEX_DATA (mx);
1596 scm_i_pthread_mutex_lock (&m->lock);
1597 owner = m->owner;
1598 scm_i_pthread_mutex_unlock (&m->lock);
1599
1600 return owner;
1601 }
1602 #undef FUNC_NAME
1603
1604 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1605 (SCM mx),
1606 "Return the lock level of mutex @var{mx}.")
1607 #define FUNC_NAME s_scm_mutex_level
1608 {
1609 SCM_VALIDATE_MUTEX (1, mx);
1610 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1611 }
1612 #undef FUNC_NAME
1613
1614 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1615 (SCM mx),
1616 "Returns @code{#t} if the mutex @var{mx} is locked.")
1617 #define FUNC_NAME s_scm_mutex_locked_p
1618 {
1619 SCM_VALIDATE_MUTEX (1, mx);
1620 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1621 }
1622 #undef FUNC_NAME
1623
1624 static size_t
1625 fat_cond_free (SCM mx)
1626 {
1627 fat_cond *c = SCM_CONDVAR_DATA (mx);
1628 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1629 return 0;
1630 }
1631
1632 static int
1633 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1634 {
1635 fat_cond *c = SCM_CONDVAR_DATA (cv);
1636 scm_puts ("#<condition-variable ", port);
1637 scm_uintprint ((scm_t_bits)c, 16, port);
1638 scm_puts (">", port);
1639 return 1;
1640 }
1641
1642 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1643 (void),
1644 "Make a new condition variable.")
1645 #define FUNC_NAME s_scm_make_condition_variable
1646 {
1647 fat_cond *c;
1648 SCM cv;
1649
1650 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1651 c->waiting = SCM_EOL;
1652 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1653 c->waiting = make_queue ();
1654 return cv;
1655 }
1656 #undef FUNC_NAME
1657
1658 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1659 (SCM cv, SCM mx, SCM t),
1660 "Wait until @var{cond-var} has been signalled. While waiting, "
1661 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1662 "is locked again when this function returns. When @var{time} is given, "
1663 "it specifies a point in time where the waiting should be aborted. It "
1664 "can be either a integer as returned by @code{current-time} or a pair "
1665 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1666 "mutex is locked and @code{#f} is returned. When the condition "
1667 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1668 "is returned. ")
1669 #define FUNC_NAME s_scm_timed_wait_condition_variable
1670 {
1671 scm_t_timespec waittime, *waitptr = NULL;
1672
1673 SCM_VALIDATE_CONDVAR (1, cv);
1674 SCM_VALIDATE_MUTEX (2, mx);
1675
1676 if (!SCM_UNBNDP (t))
1677 {
1678 to_timespec (t, &waittime);
1679 waitptr = &waittime;
1680 }
1681
1682 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1683 }
1684 #undef FUNC_NAME
1685
1686 static void
1687 fat_cond_signal (fat_cond *c)
1688 {
1689 unblock_from_queue (c->waiting);
1690 }
1691
1692 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1693 (SCM cv),
1694 "Wake up one thread that is waiting for @var{cv}")
1695 #define FUNC_NAME s_scm_signal_condition_variable
1696 {
1697 SCM_VALIDATE_CONDVAR (1, cv);
1698 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1699 return SCM_BOOL_T;
1700 }
1701 #undef FUNC_NAME
1702
1703 static void
1704 fat_cond_broadcast (fat_cond *c)
1705 {
1706 while (scm_is_true (unblock_from_queue (c->waiting)))
1707 ;
1708 }
1709
1710 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1711 (SCM cv),
1712 "Wake up all threads that are waiting for @var{cv}. ")
1713 #define FUNC_NAME s_scm_broadcast_condition_variable
1714 {
1715 SCM_VALIDATE_CONDVAR (1, cv);
1716 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1717 return SCM_BOOL_T;
1718 }
1719 #undef FUNC_NAME
1720
1721 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1722 (SCM obj),
1723 "Return @code{#t} if @var{obj} is a condition variable.")
1724 #define FUNC_NAME s_scm_condition_variable_p
1725 {
1726 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1727 }
1728 #undef FUNC_NAME
1729
1730 /*** Marking stacks */
1731
1732 /* XXX - what to do with this? Do we need to handle this for blocked
1733 threads as well?
1734 */
1735 #ifdef __ia64__
1736 # define SCM_MARK_BACKING_STORE() do { \
1737 ucontext_t ctx; \
1738 SCM_STACKITEM * top, * bot; \
1739 getcontext (&ctx); \
1740 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1741 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1742 / sizeof (SCM_STACKITEM))); \
1743 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1744 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1745 scm_mark_locations (bot, top - bot); } while (0)
1746 #else
1747 # define SCM_MARK_BACKING_STORE()
1748 #endif
1749
1750
1751 \f
1752 /*** Select */
1753
1754 struct select_args
1755 {
1756 int nfds;
1757 SELECT_TYPE *read_fds;
1758 SELECT_TYPE *write_fds;
1759 SELECT_TYPE *except_fds;
1760 struct timeval *timeout;
1761
1762 int result;
1763 int errno_value;
1764 };
1765
1766 static void *
1767 do_std_select (void *args)
1768 {
1769 struct select_args *select_args;
1770
1771 select_args = (struct select_args *) args;
1772
1773 select_args->result =
1774 select (select_args->nfds,
1775 select_args->read_fds, select_args->write_fds,
1776 select_args->except_fds, select_args->timeout);
1777 select_args->errno_value = errno;
1778
1779 return NULL;
1780 }
1781
1782 int
1783 scm_std_select (int nfds,
1784 SELECT_TYPE *readfds,
1785 SELECT_TYPE *writefds,
1786 SELECT_TYPE *exceptfds,
1787 struct timeval *timeout)
1788 {
1789 fd_set my_readfds;
1790 int res, eno, wakeup_fd;
1791 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1792 struct select_args args;
1793
1794 if (readfds == NULL)
1795 {
1796 FD_ZERO (&my_readfds);
1797 readfds = &my_readfds;
1798 }
1799
1800 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1801 SCM_TICK;
1802
1803 wakeup_fd = t->sleep_pipe[0];
1804 FD_SET (wakeup_fd, readfds);
1805 if (wakeup_fd >= nfds)
1806 nfds = wakeup_fd+1;
1807
1808 args.nfds = nfds;
1809 args.read_fds = readfds;
1810 args.write_fds = writefds;
1811 args.except_fds = exceptfds;
1812 args.timeout = timeout;
1813
1814 /* Explicitly cooperate with the GC. */
1815 scm_without_guile (do_std_select, &args);
1816
1817 res = args.result;
1818 eno = args.errno_value;
1819
1820 t->sleep_fd = -1;
1821 scm_i_reset_sleep (t);
1822
1823 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1824 {
1825 char dummy;
1826 full_read (wakeup_fd, &dummy, 1);
1827
1828 FD_CLR (wakeup_fd, readfds);
1829 res -= 1;
1830 if (res == 0)
1831 {
1832 eno = EINTR;
1833 res = -1;
1834 }
1835 }
1836 errno = eno;
1837 return res;
1838 }
1839
1840 /* Convenience API for blocking while in guile mode. */
1841
1842 #if SCM_USE_PTHREAD_THREADS
1843
1844 /* It seems reasonable to not run procedures related to mutex and condition
1845 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1846 without it, and (ii) the only potential gain would be GC latency. See
1847 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1848 for a discussion of the pros and cons. */
1849
1850 int
1851 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1852 {
1853 int res = scm_i_pthread_mutex_lock (mutex);
1854 return res;
1855 }
1856
1857 static void
1858 do_unlock (void *data)
1859 {
1860 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1861 }
1862
1863 void
1864 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1865 {
1866 scm_i_scm_pthread_mutex_lock (mutex);
1867 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1868 }
1869
1870 int
1871 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1872 {
1873 int res;
1874 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1875
1876 t->held_mutex = mutex;
1877 res = scm_i_pthread_cond_wait (cond, mutex);
1878 t->held_mutex = NULL;
1879
1880 return res;
1881 }
1882
1883 int
1884 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1885 scm_i_pthread_mutex_t *mutex,
1886 const scm_t_timespec *wt)
1887 {
1888 int res;
1889 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1890
1891 t->held_mutex = mutex;
1892 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1893 t->held_mutex = NULL;
1894
1895 return res;
1896 }
1897
1898 #endif
1899
1900 unsigned long
1901 scm_std_usleep (unsigned long usecs)
1902 {
1903 struct timeval tv;
1904 tv.tv_usec = usecs % 1000000;
1905 tv.tv_sec = usecs / 1000000;
1906 scm_std_select (0, NULL, NULL, NULL, &tv);
1907 return tv.tv_sec * 1000000 + tv.tv_usec;
1908 }
1909
1910 unsigned int
1911 scm_std_sleep (unsigned int secs)
1912 {
1913 struct timeval tv;
1914 tv.tv_usec = 0;
1915 tv.tv_sec = secs;
1916 scm_std_select (0, NULL, NULL, NULL, &tv);
1917 return tv.tv_sec;
1918 }
1919
1920 /*** Misc */
1921
1922 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1923 (void),
1924 "Return the thread that called this function.")
1925 #define FUNC_NAME s_scm_current_thread
1926 {
1927 return SCM_I_CURRENT_THREAD->handle;
1928 }
1929 #undef FUNC_NAME
1930
1931 static SCM
1932 scm_c_make_list (size_t n, SCM fill)
1933 {
1934 SCM res = SCM_EOL;
1935 while (n-- > 0)
1936 res = scm_cons (fill, res);
1937 return res;
1938 }
1939
1940 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1941 (void),
1942 "Return a list of all threads.")
1943 #define FUNC_NAME s_scm_all_threads
1944 {
1945 /* We can not allocate while holding the thread_admin_mutex because
1946 of the way GC is done.
1947 */
1948 int n = thread_count;
1949 scm_i_thread *t;
1950 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1951
1952 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1953 l = &list;
1954 for (t = all_threads; t && n > 0; t = t->next_thread)
1955 {
1956 if (t != scm_i_signal_delivery_thread)
1957 {
1958 SCM_SETCAR (*l, t->handle);
1959 l = SCM_CDRLOC (*l);
1960 }
1961 n--;
1962 }
1963 *l = SCM_EOL;
1964 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1965 return list;
1966 }
1967 #undef FUNC_NAME
1968
1969 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1970 (SCM thread),
1971 "Return @code{#t} iff @var{thread} has exited.\n")
1972 #define FUNC_NAME s_scm_thread_exited_p
1973 {
1974 return scm_from_bool (scm_c_thread_exited_p (thread));
1975 }
1976 #undef FUNC_NAME
1977
1978 int
1979 scm_c_thread_exited_p (SCM thread)
1980 #define FUNC_NAME s_scm_thread_exited_p
1981 {
1982 scm_i_thread *t;
1983 SCM_VALIDATE_THREAD (1, thread);
1984 t = SCM_I_THREAD_DATA (thread);
1985 return t->exited;
1986 }
1987 #undef FUNC_NAME
1988
1989 static scm_i_pthread_cond_t wake_up_cond;
1990 static int threads_initialized_p = 0;
1991
1992
1993 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1994 */
1995 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1996 int scm_i_critical_section_level = 0;
1997
1998 static SCM dynwind_critical_section_mutex;
1999
2000 void
2001 scm_dynwind_critical_section (SCM mutex)
2002 {
2003 if (scm_is_false (mutex))
2004 mutex = dynwind_critical_section_mutex;
2005 scm_dynwind_lock_mutex (mutex);
2006 scm_dynwind_block_asyncs ();
2007 }
2008
2009 /*** Initialization */
2010
2011 scm_i_pthread_mutex_t scm_i_misc_mutex;
2012
2013 #if SCM_USE_PTHREAD_THREADS
2014 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2015 #endif
2016
2017 void
2018 scm_threads_prehistory (SCM_STACKITEM *base)
2019 {
2020 #if SCM_USE_PTHREAD_THREADS
2021 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2022 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2023 PTHREAD_MUTEX_RECURSIVE);
2024 #endif
2025
2026 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2027 scm_i_pthread_mutexattr_recursive);
2028 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2029 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2030
2031 guilify_self_1 (base);
2032 }
2033
2034 scm_t_bits scm_tc16_thread;
2035 scm_t_bits scm_tc16_mutex;
2036 scm_t_bits scm_tc16_condvar;
2037
2038 void
2039 scm_init_threads ()
2040 {
2041 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2042 scm_set_smob_print (scm_tc16_thread, thread_print);
2043 scm_set_smob_free (scm_tc16_thread, thread_free); /* XXX: Could be removed */
2044
2045 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2046 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2047 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2048
2049 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2050 sizeof (fat_cond));
2051 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2052 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
2053
2054 scm_i_default_dynamic_state = SCM_BOOL_F;
2055 guilify_self_2 (SCM_BOOL_F);
2056 threads_initialized_p = 1;
2057
2058 dynwind_critical_section_mutex =
2059 scm_permanent_object (scm_make_recursive_mutex ());
2060 }
2061
2062 void
2063 scm_init_threads_default_dynamic_state ()
2064 {
2065 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2066 scm_i_default_dynamic_state = scm_permanent_object (state);
2067 }
2068
2069 void
2070 scm_init_thread_procs ()
2071 {
2072 #include "libguile/threads.x"
2073 }
2074
2075 \f
2076 /* IA64-specific things. */
2077
2078 #ifdef __ia64__
2079 # ifdef __hpux
2080 # include <sys/param.h>
2081 # include <sys/pstat.h>
2082 void *
2083 scm_ia64_register_backing_store_base (void)
2084 {
2085 struct pst_vm_status vm_status;
2086 int i = 0;
2087 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2088 if (vm_status.pst_type == PS_RSESTACK)
2089 return (void *) vm_status.pst_vaddr;
2090 abort ();
2091 }
2092 void *
2093 scm_ia64_ar_bsp (const void *ctx)
2094 {
2095 uint64_t bsp;
2096 __uc_get_ar_bsp (ctx, &bsp);
2097 return (void *) bsp;
2098 }
2099 # endif /* hpux */
2100 # ifdef linux
2101 # include <ucontext.h>
2102 void *
2103 scm_ia64_register_backing_store_base (void)
2104 {
2105 extern void *__libc_ia64_register_backing_store_base;
2106 return __libc_ia64_register_backing_store_base;
2107 }
2108 void *
2109 scm_ia64_ar_bsp (const void *opaque)
2110 {
2111 const ucontext_t *ctx = opaque;
2112 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2113 }
2114 # endif /* linux */
2115 #endif /* __ia64__ */
2116
2117
2118 /*
2119 Local Variables:
2120 c-file-style: "gnu"
2121 End:
2122 */