Renamed the "frames" that are related to dynamic-wind to "dynamic
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20
21 #define _GNU_SOURCE
22
23 #include "libguile/_scm.h"
24
25 #if HAVE_UNISTD_H
26 #include <unistd.h>
27 #endif
28 #include <stdio.h>
29 #include <assert.h>
30 #if HAVE_SYS_TIME_H
31 #include <sys/time.h>
32 #endif
33
34 #include "libguile/validate.h"
35 #include "libguile/root.h"
36 #include "libguile/eval.h"
37 #include "libguile/async.h"
38 #include "libguile/ports.h"
39 #include "libguile/threads.h"
40 #include "libguile/dynwind.h"
41 #include "libguile/iselect.h"
42 #include "libguile/fluids.h"
43 #include "libguile/continuations.h"
44 #include "libguile/init.h"
45
46 #ifdef __MINGW32__
47 #ifndef ETIMEDOUT
48 # define ETIMEDOUT WSAETIMEDOUT
49 #endif
50 # include <fcntl.h>
51 # include <process.h>
52 # define pipe(fd) _pipe (fd, 256, O_BINARY)
53 #endif /* __MINGW32__ */
54
55 /*** Queues */
56
57 /* Make an empty queue data structure.
58 */
59 static SCM
60 make_queue ()
61 {
62 return scm_cons (SCM_EOL, SCM_EOL);
63 }
64
65 /* Put T at the back of Q and return a handle that can be used with
66 remqueue to remove T from Q again.
67 */
68 static SCM
69 enqueue (SCM q, SCM t)
70 {
71 SCM c = scm_cons (t, SCM_EOL);
72 if (scm_is_null (SCM_CDR (q)))
73 SCM_SETCDR (q, c);
74 else
75 SCM_SETCDR (SCM_CAR (q), c);
76 SCM_SETCAR (q, c);
77 return c;
78 }
79
80 /* Remove the element that the handle C refers to from the queue Q. C
81 must have been returned from a call to enqueue. The return value
82 is zero when the element referred to by C has already been removed.
83 Otherwise, 1 is returned.
84 */
85 static int
86 remqueue (SCM q, SCM c)
87 {
88 SCM p, prev = q;
89 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
90 {
91 if (scm_is_eq (p, c))
92 {
93 if (scm_is_eq (c, SCM_CAR (q)))
94 SCM_SETCAR (q, SCM_CDR (c));
95 SCM_SETCDR (prev, SCM_CDR (c));
96 return 1;
97 }
98 prev = p;
99 }
100 return 0;
101 }
102
103 /* Remove the front-most element from the queue Q and return it.
104 Return SCM_BOOL_F when Q is empty.
105 */
106 static SCM
107 dequeue (SCM q)
108 {
109 SCM c = SCM_CDR (q);
110 if (scm_is_null (c))
111 return SCM_BOOL_F;
112 else
113 {
114 SCM_SETCDR (q, SCM_CDR (c));
115 if (scm_is_null (SCM_CDR (q)))
116 SCM_SETCAR (q, SCM_EOL);
117 return SCM_CAR (c);
118 }
119 }
120
121 /*** Thread smob routines */
122
123 static SCM
124 thread_mark (SCM obj)
125 {
126 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
127 scm_gc_mark (t->result);
128 scm_gc_mark (t->join_queue);
129 scm_gc_mark (t->dynwinds);
130 scm_gc_mark (t->active_asyncs);
131 scm_gc_mark (t->continuation_root);
132 return t->dynamic_state;
133 }
134
135 static int
136 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
137 {
138 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
139 scm_puts ("#<thread ", port);
140 scm_uintprint ((size_t)t->pthread, 10, port);
141 scm_puts (" (", port);
142 scm_uintprint ((scm_t_bits)t, 16, port);
143 scm_puts (")>", port);
144 return 1;
145 }
146
147 static size_t
148 thread_free (SCM obj)
149 {
150 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
151 assert (t->exited);
152 scm_gc_free (t, sizeof (*t), "thread");
153 return 0;
154 }
155
156 /*** Blocking on queues. */
157
158 /* See also scm_i_queue_async_cell for how such a block is
159 interrputed.
160 */
161
162 /* Put the current thread on QUEUE and go to sleep, waiting for it to
163 be woken up by a call to 'unblock_from_queue', or to be
164 interrupted. Upon return of this function, the current thread is
165 no longer on QUEUE, even when the sleep has been interrupted.
166
167 The QUEUE data structure is assumed to be protected by MUTEX and
168 the caller of block_self must hold MUTEX. It will be atomically
169 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
170
171 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
172 as MUTEX is needed.
173
174 When WAITTIME is not NULL, the sleep will be aborted at that time.
175
176 The return value of block_self is an errno value. It will be zero
177 when the sleep has been successfully completed by a call to
178 unblock_from_queue, EINTR when it has been interrupted by the
179 delivery of a system async, and ETIMEDOUT when the timeout has
180 expired.
181
182 The system asyncs themselves are not executed by block_self.
183 */
184 static int
185 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
186 const scm_t_timespec *waittime)
187 {
188 scm_i_thread *t = SCM_I_CURRENT_THREAD;
189 SCM q_handle;
190 int err;
191
192 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
193 err = EINTR;
194 else
195 {
196 t->block_asyncs++;
197 q_handle = enqueue (queue, t->handle);
198 if (waittime == NULL)
199 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
200 else
201 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
202
203 /* When we are still on QUEUE, we have been interrupted. We
204 report this only when no other error (such as a timeout) has
205 happened above.
206 */
207 if (remqueue (queue, q_handle) && err == 0)
208 err = EINTR;
209 t->block_asyncs--;
210 scm_i_reset_sleep (t);
211 }
212
213 return err;
214 }
215
216 /* Wake up the first thread on QUEUE, if any. The caller must hold
217 the mutex that protects QUEUE. The awoken thread is returned, or
218 #f when the queue was empty.
219 */
220 static SCM
221 unblock_from_queue (SCM queue)
222 {
223 SCM thread = dequeue (queue);
224 if (scm_is_true (thread))
225 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
226 return thread;
227 }
228
229 /* Getting into and out of guile mode.
230 */
231
232 /* Ken Raeburn observes that the implementation of suspend and resume
233 (and the things that build on top of them) are very likely not
234 correct (see below). We will need fix this eventually, and that's
235 why scm_leave_guile/scm_enter_guile are not exported in the API.
236
237 Ken writes:
238
239 Consider this sequence:
240
241 Function foo, called in Guile mode, calls suspend (maybe indirectly
242 through scm_leave_guile), which does this:
243
244 // record top of stack for the GC
245 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
246 var 't'
247 // save registers.
248 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
249 setjmp (t->regs); // here's most of the magic
250
251 ... and returns.
252
253 Function foo has a SCM value X, a handle on a non-immediate object, in
254 a caller-saved register R, and it's the only reference to the object
255 currently.
256
257 The compiler wants to use R in suspend, so it pushes the current
258 value, X, into a stack slot which will be reloaded on exit from
259 suspend; then it loads stuff into R and goes about its business. The
260 setjmp call saves (some of) the current registers, including R, which
261 no longer contains X. (This isn't a problem for a normal
262 setjmp/longjmp situation, where longjmp would be called before
263 setjmp's caller returns; the old value for X would be loaded back from
264 the stack after the longjmp, before the function returned.)
265
266 So, suspend returns, loading X back into R (and invalidating the jump
267 buffer) in the process. The caller foo then goes off and calls a
268 bunch of other functions out of Guile mode, occasionally storing X on
269 the stack again, but, say, much deeper on the stack than suspend's
270 stack frame went, and the stack slot where suspend had written X has
271 long since been overwritten with other values.
272
273 Okay, nothing actively broken so far. Now, let garbage collection
274 run, triggered by another thread.
275
276 The thread calling foo is out of Guile mode at the time, so the
277 garbage collector just scans a range of stack addresses. Too bad that
278 X isn't stored there. So the pointed-to storage goes onto the free
279 list, and I think you can see where things go from there.
280
281 Is there anything I'm missing that'll prevent this scenario from
282 happening? I mean, aside from, "well, suspend and scm_leave_guile
283 don't have many local variables, so they probably won't need to save
284 any registers on most systems, so we hope everything will wind up in
285 the jump buffer and we'll just get away with it"?
286
287 (And, going the other direction, if scm_leave_guile and suspend push
288 the stack pointer over onto a new page, and foo doesn't make further
289 function calls and thus the stack pointer no longer includes that
290 page, are we guaranteed that the kernel cannot release the now-unused
291 stack page that contains the top-of-stack pointer we just saved? I
292 don't know if any OS actually does that. If it does, we could get
293 faults in garbage collection.)
294
295 I don't think scm_without_guile has to have this problem, as it gets
296 more control over the stack handling -- but it should call setjmp
297 itself. I'd probably try something like:
298
299 // record top of stack for the GC
300 t->top = SCM_STACK_PTR (&t);
301 // save registers.
302 SCM_FLUSH_REGISTER_WINDOWS;
303 setjmp (t->regs);
304 res = func(data);
305 scm_enter_guile (t);
306
307 ... though even that's making some assumptions about the stack
308 ordering of local variables versus caller-saved registers.
309
310 For something like scm_leave_guile to work, I don't think it can just
311 rely on invalidated jump buffers. A valid jump buffer, and a handle
312 on the stack state at the point when the jump buffer was initialized,
313 together, would work fine, but I think then we're talking about macros
314 invoking setjmp in the caller's stack frame, and requiring that the
315 caller of scm_leave_guile also call scm_enter_guile before returning,
316 kind of like pthread_cleanup_push/pop calls that have to be paired up
317 in a function. (In fact, the pthread ones have to be paired up
318 syntactically, as if they might expand to a compound statement
319 incorporating the user's code, and invoking a compiler's
320 exception-handling primitives. Which might be something to think
321 about for cases where Guile is used with C++ exceptions or
322 pthread_cancel.)
323 */
324
325 scm_i_pthread_key_t scm_i_thread_key;
326
327 static void
328 resume (scm_i_thread *t)
329 {
330 t->top = NULL;
331 if (t->clear_freelists_p)
332 {
333 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
334 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
335 t->clear_freelists_p = 0;
336 }
337 }
338
339 typedef void* scm_t_guile_ticket;
340
341 static void
342 scm_enter_guile (scm_t_guile_ticket ticket)
343 {
344 scm_i_thread *t = (scm_i_thread *)ticket;
345 if (t)
346 {
347 scm_i_pthread_mutex_lock (&t->heap_mutex);
348 resume (t);
349 }
350 }
351
352 static scm_i_thread *
353 suspend (void)
354 {
355 scm_i_thread *t = SCM_I_CURRENT_THREAD;
356
357 /* record top of stack for the GC */
358 t->top = SCM_STACK_PTR (&t);
359 /* save registers. */
360 SCM_FLUSH_REGISTER_WINDOWS;
361 setjmp (t->regs);
362 return t;
363 }
364
365 static scm_t_guile_ticket
366 scm_leave_guile ()
367 {
368 scm_i_thread *t = suspend ();
369 scm_i_pthread_mutex_unlock (&t->heap_mutex);
370 return (scm_t_guile_ticket) t;
371 }
372
373 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
374 static scm_i_thread *all_threads = NULL;
375 static int thread_count;
376
377 static SCM scm_i_default_dynamic_state;
378
379 /* Perform first stage of thread initialisation, in non-guile mode.
380 */
381 static void
382 guilify_self_1 (SCM_STACKITEM *base)
383 {
384 scm_i_thread *t = malloc (sizeof (scm_i_thread));
385
386 t->pthread = scm_i_pthread_self ();
387 t->handle = SCM_BOOL_F;
388 t->result = SCM_BOOL_F;
389 t->join_queue = SCM_EOL;
390 t->dynamic_state = SCM_BOOL_F;
391 t->dynwinds = SCM_EOL;
392 t->active_asyncs = SCM_EOL;
393 t->block_asyncs = 1;
394 t->pending_asyncs = 1;
395 t->last_debug_frame = NULL;
396 t->base = base;
397 t->continuation_root = SCM_EOL;
398 t->continuation_base = base;
399 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
400 t->sleep_mutex = NULL;
401 t->sleep_object = SCM_BOOL_F;
402 t->sleep_fd = -1;
403 /* XXX - check for errors. */
404 pipe (t->sleep_pipe);
405 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
406 t->clear_freelists_p = 0;
407 t->gc_running_p = 0;
408 t->exited = 0;
409
410 t->freelist = SCM_EOL;
411 t->freelist2 = SCM_EOL;
412 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
413 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
414
415 scm_i_pthread_setspecific (scm_i_thread_key, t);
416
417 scm_i_pthread_mutex_lock (&t->heap_mutex);
418
419 scm_i_pthread_mutex_lock (&thread_admin_mutex);
420 t->next_thread = all_threads;
421 all_threads = t;
422 thread_count++;
423 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
424 }
425
426 /* Perform second stage of thread initialisation, in guile mode.
427 */
428 static void
429 guilify_self_2 (SCM parent)
430 {
431 scm_i_thread *t = SCM_I_CURRENT_THREAD;
432
433 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
434 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
435 t->continuation_root = scm_cons (t->handle, SCM_EOL);
436 t->continuation_base = t->base;
437
438 if (scm_is_true (parent))
439 t->dynamic_state = scm_make_dynamic_state (parent);
440 else
441 t->dynamic_state = scm_i_make_initial_dynamic_state ();
442
443 t->join_queue = make_queue ();
444 t->block_asyncs = 0;
445 }
446
447 /* Perform thread tear-down, in guile mode.
448 */
449 static void *
450 do_thread_exit (void *v)
451 {
452 scm_i_thread *t = (scm_i_thread *)v;
453
454 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
455
456 t->exited = 1;
457 close (t->sleep_pipe[0]);
458 close (t->sleep_pipe[1]);
459 while (scm_is_true (unblock_from_queue (t->join_queue)))
460 ;
461
462 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
463 return NULL;
464 }
465
466 static void
467 on_thread_exit (void *v)
468 {
469 scm_i_thread *t = (scm_i_thread *)v, **tp;
470
471 scm_i_pthread_setspecific (scm_i_thread_key, v);
472
473 /* Unblocking the joining threads needs to happen in guile mode
474 since the queue is a SCM data structure.
475 */
476 scm_with_guile (do_thread_exit, v);
477
478 /* Removing ourself from the list of all threads needs to happen in
479 non-guile mode since all SCM values on our stack become
480 unprotected once we are no longer in the list.
481 */
482 scm_leave_guile ();
483 scm_i_pthread_mutex_lock (&thread_admin_mutex);
484 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
485 if (*tp == t)
486 {
487 *tp = t->next_thread;
488 break;
489 }
490 thread_count--;
491 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
492
493 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
494 }
495
496 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
497
498 static void
499 init_thread_key (void)
500 {
501 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
502 }
503
504 /* Perform any initializations necessary to bring the current thread
505 into guile mode, initializing Guile itself, if necessary.
506
507 BASE is the stack base to use with GC.
508
509 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
510 which case the default dynamic state is used.
511
512 Return zero when the thread was in guile mode already; otherwise
513 return 1.
514 */
515
516 static int
517 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
518 {
519 scm_i_thread *t;
520
521 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
522
523 if ((t = SCM_I_CURRENT_THREAD) == NULL)
524 {
525 /* This thread has not been guilified yet.
526 */
527
528 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
529 if (scm_initialized_p == 0)
530 {
531 /* First thread ever to enter Guile. Run the full
532 initialization.
533 */
534 scm_i_init_guile (base);
535 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
536 }
537 else
538 {
539 /* Guile is already initialized, but this thread enters it for
540 the first time. Only initialize this thread.
541 */
542 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
543 guilify_self_1 (base);
544 guilify_self_2 (parent);
545 }
546 return 1;
547 }
548 else if (t->top)
549 {
550 /* This thread is already guilified but not in guile mode, just
551 resume it.
552
553 XXX - base might be lower than when this thread was first
554 guilified.
555 */
556 scm_enter_guile ((scm_t_guile_ticket) t);
557 return 1;
558 }
559 else
560 {
561 /* Thread is already in guile mode. Nothing to do.
562 */
563 return 0;
564 }
565 }
566
567 #ifdef HAVE_LIBC_STACK_END
568
569 extern void *__libc_stack_end;
570
571 #if SCM_USE_PTHREAD_THREADS
572 #ifdef HAVE_PTHREAD_ATTR_GETSTACK
573
574 #define HAVE_GET_THREAD_STACK_BASE
575
576 static SCM_STACKITEM *
577 get_thread_stack_base ()
578 {
579 pthread_attr_t attr;
580 void *start, *end;
581 size_t size;
582
583 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
584 for the main thread, but we can use __libc_stack_end in that
585 case.
586 */
587
588 pthread_getattr_np (pthread_self (), &attr);
589 pthread_attr_getstack (&attr, &start, &size);
590 end = (char *)start + size;
591
592 if ((void *)&attr < start || (void *)&attr >= end)
593 return __libc_stack_end;
594 else
595 {
596 #if SCM_STACK_GROWS_UP
597 return start;
598 #else
599 return end;
600 #endif
601 }
602 }
603
604 #endif /* HAVE_PTHREAD_ATTR_GETSTACK */
605
606 #else /* !SCM_USE_PTHREAD_THREADS */
607
608 #define HAVE_GET_THREAD_STACK_BASE
609
610 static SCM_STACKITEM *
611 get_thread_stack_base ()
612 {
613 return __libc_stack_end;
614 }
615
616 #endif /* !SCM_USE_PTHREAD_THREADS */
617 #endif /* HAVE_LIBC_STACK_END */
618
619 #ifdef HAVE_GET_THREAD_STACK_BASE
620
621 void
622 scm_init_guile ()
623 {
624 scm_i_init_thread_for_guile (get_thread_stack_base (),
625 scm_i_default_dynamic_state);
626 }
627
628 #endif
629
630 void *
631 scm_with_guile (void *(*func)(void *), void *data)
632 {
633 return scm_i_with_guile_and_parent (func, data,
634 scm_i_default_dynamic_state);
635 }
636
637 void *
638 scm_i_with_guile_and_parent (void *(*func)(void *), void *data,
639 SCM parent)
640 {
641 void *res;
642 int really_entered;
643 SCM_STACKITEM base_item;
644 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
645 res = scm_c_with_continuation_barrier (func, data);
646 if (really_entered)
647 scm_leave_guile ();
648 return res;
649 }
650
651 void *
652 scm_without_guile (void *(*func)(void *), void *data)
653 {
654 void *res;
655 scm_t_guile_ticket t;
656 t = scm_leave_guile ();
657 res = func (data);
658 scm_enter_guile (t);
659 return res;
660 }
661
662 /*** Thread creation */
663
664 typedef struct {
665 SCM parent;
666 SCM thunk;
667 SCM handler;
668 SCM thread;
669 scm_i_pthread_mutex_t mutex;
670 scm_i_pthread_cond_t cond;
671 } launch_data;
672
673 static void *
674 really_launch (void *d)
675 {
676 launch_data *data = (launch_data *)d;
677 SCM thunk = data->thunk, handler = data->handler;
678 scm_i_thread *t;
679
680 t = SCM_I_CURRENT_THREAD;
681
682 scm_i_scm_pthread_mutex_lock (&data->mutex);
683 data->thread = scm_current_thread ();
684 scm_i_pthread_cond_signal (&data->cond);
685 scm_i_pthread_mutex_unlock (&data->mutex);
686
687 if (SCM_UNBNDP (handler))
688 t->result = scm_call_0 (thunk);
689 else
690 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
691
692 return 0;
693 }
694
695 static void *
696 launch_thread (void *d)
697 {
698 launch_data *data = (launch_data *)d;
699 scm_i_pthread_detach (scm_i_pthread_self ());
700 scm_i_with_guile_and_parent (really_launch, d, data->parent);
701 return NULL;
702 }
703
704 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
705 (SCM thunk, SCM handler),
706 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
707 "returning a new thread object representing the thread. The procedure\n"
708 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
709 "\n"
710 "When @var{handler} is specified, then @var{thunk} is called from\n"
711 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
712 "handler. This catch is established inside the continuation barrier.\n"
713 "\n"
714 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
715 "the @emph{exit value} of the thread and the thread is terminated.")
716 #define FUNC_NAME s_scm_call_with_new_thread
717 {
718 launch_data data;
719 scm_i_pthread_t id;
720 int err;
721
722 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
723 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
724 handler, SCM_ARG2, FUNC_NAME);
725
726 data.parent = scm_current_dynamic_state ();
727 data.thunk = thunk;
728 data.handler = handler;
729 data.thread = SCM_BOOL_F;
730 scm_i_pthread_mutex_init (&data.mutex, NULL);
731 scm_i_pthread_cond_init (&data.cond, NULL);
732
733 scm_i_scm_pthread_mutex_lock (&data.mutex);
734 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
735 if (err)
736 {
737 scm_i_pthread_mutex_unlock (&data.mutex);
738 errno = err;
739 scm_syserror (NULL);
740 }
741 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
742 scm_i_pthread_mutex_unlock (&data.mutex);
743
744 return data.thread;
745 }
746 #undef FUNC_NAME
747
748 typedef struct {
749 SCM parent;
750 scm_t_catch_body body;
751 void *body_data;
752 scm_t_catch_handler handler;
753 void *handler_data;
754 SCM thread;
755 scm_i_pthread_mutex_t mutex;
756 scm_i_pthread_cond_t cond;
757 } spawn_data;
758
759 static void *
760 really_spawn (void *d)
761 {
762 spawn_data *data = (spawn_data *)d;
763 scm_t_catch_body body = data->body;
764 void *body_data = data->body_data;
765 scm_t_catch_handler handler = data->handler;
766 void *handler_data = data->handler_data;
767 scm_i_thread *t = SCM_I_CURRENT_THREAD;
768
769 scm_i_scm_pthread_mutex_lock (&data->mutex);
770 data->thread = scm_current_thread ();
771 scm_i_pthread_cond_signal (&data->cond);
772 scm_i_pthread_mutex_unlock (&data->mutex);
773
774 if (handler == NULL)
775 t->result = body (body_data);
776 else
777 t->result = scm_internal_catch (SCM_BOOL_T,
778 body, body_data,
779 handler, handler_data);
780
781 return 0;
782 }
783
784 static void *
785 spawn_thread (void *d)
786 {
787 spawn_data *data = (spawn_data *)d;
788 scm_i_pthread_detach (scm_i_pthread_self ());
789 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
790 return NULL;
791 }
792
793 SCM
794 scm_spawn_thread (scm_t_catch_body body, void *body_data,
795 scm_t_catch_handler handler, void *handler_data)
796 {
797 spawn_data data;
798 scm_i_pthread_t id;
799 int err;
800
801 data.parent = scm_current_dynamic_state ();
802 data.body = body;
803 data.body_data = body_data;
804 data.handler = handler;
805 data.handler_data = handler_data;
806 data.thread = SCM_BOOL_F;
807 scm_i_pthread_mutex_init (&data.mutex, NULL);
808 scm_i_pthread_cond_init (&data.cond, NULL);
809
810 scm_i_scm_pthread_mutex_lock (&data.mutex);
811 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
812 if (err)
813 {
814 scm_i_pthread_mutex_unlock (&data.mutex);
815 errno = err;
816 scm_syserror (NULL);
817 }
818 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
819 scm_i_pthread_mutex_unlock (&data.mutex);
820
821 return data.thread;
822 }
823
824 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
825 (),
826 "Move the calling thread to the end of the scheduling queue.")
827 #define FUNC_NAME s_scm_yield
828 {
829 return scm_from_bool (scm_i_sched_yield ());
830 }
831 #undef FUNC_NAME
832
833 SCM_DEFINE (scm_join_thread, "join-thread", 1, 0, 0,
834 (SCM thread),
835 "Suspend execution of the calling thread until the target @var{thread} "
836 "terminates, unless the target @var{thread} has already terminated. ")
837 #define FUNC_NAME s_scm_join_thread
838 {
839 scm_i_thread *t;
840 SCM res;
841
842 SCM_VALIDATE_THREAD (1, thread);
843 if (scm_is_eq (scm_current_thread (), thread))
844 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
845
846 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
847
848 t = SCM_I_THREAD_DATA (thread);
849 if (!t->exited)
850 {
851 while (1)
852 {
853 block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
854 if (t->exited)
855 break;
856 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
857 SCM_TICK;
858 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
859 }
860 }
861 res = t->result;
862
863 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
864 return res;
865 }
866 #undef FUNC_NAME
867
868 /*** Fat mutexes */
869
870 /* We implement our own mutex type since we want them to be 'fair', we
871 want to do fancy things while waiting for them (like running
872 asyncs) and we might want to add things that are nice for
873 debugging.
874 */
875
876 typedef struct {
877 scm_i_pthread_mutex_t lock;
878 SCM owner;
879 int level; /* how much the owner owns us.
880 < 0 for non-recursive mutexes */
881 SCM waiting; /* the threads waiting for this mutex. */
882 } fat_mutex;
883
884 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
885 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
886
887 static SCM
888 fat_mutex_mark (SCM mx)
889 {
890 fat_mutex *m = SCM_MUTEX_DATA (mx);
891 scm_gc_mark (m->owner);
892 return m->waiting;
893 }
894
895 static size_t
896 fat_mutex_free (SCM mx)
897 {
898 fat_mutex *m = SCM_MUTEX_DATA (mx);
899 scm_i_pthread_mutex_destroy (&m->lock);
900 scm_gc_free (m, sizeof (fat_mutex), "mutex");
901 return 0;
902 }
903
904 static int
905 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
906 {
907 fat_mutex *m = SCM_MUTEX_DATA (mx);
908 scm_puts ("#<mutex ", port);
909 scm_uintprint ((scm_t_bits)m, 16, port);
910 scm_puts (">", port);
911 return 1;
912 }
913
914 static SCM
915 make_fat_mutex (int recursive)
916 {
917 fat_mutex *m;
918 SCM mx;
919
920 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
921 scm_i_pthread_mutex_init (&m->lock, NULL);
922 m->owner = SCM_BOOL_F;
923 m->level = recursive? 0 : -1;
924 m->waiting = SCM_EOL;
925 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
926 m->waiting = make_queue ();
927 return mx;
928 }
929
930 SCM_DEFINE (scm_make_mutex, "make-mutex", 0, 0, 0,
931 (void),
932 "Create a new mutex. ")
933 #define FUNC_NAME s_scm_make_mutex
934 {
935 return make_fat_mutex (0);
936 }
937 #undef FUNC_NAME
938
939 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
940 (void),
941 "Create a new recursive mutex. ")
942 #define FUNC_NAME s_scm_make_recursive_mutex
943 {
944 return make_fat_mutex (1);
945 }
946 #undef FUNC_NAME
947
948 static char *
949 fat_mutex_lock (SCM mutex)
950 {
951 fat_mutex *m = SCM_MUTEX_DATA (mutex);
952 SCM thread = scm_current_thread ();
953 char *msg = NULL;
954
955 scm_i_scm_pthread_mutex_lock (&m->lock);
956 if (scm_is_false (m->owner))
957 m->owner = thread;
958 else if (scm_is_eq (m->owner, thread))
959 {
960 if (m->level >= 0)
961 m->level++;
962 else
963 msg = "mutex already locked by current thread";
964 }
965 else
966 {
967 while (1)
968 {
969 block_self (m->waiting, mutex, &m->lock, NULL);
970 if (scm_is_eq (m->owner, thread))
971 break;
972 scm_i_pthread_mutex_unlock (&m->lock);
973 SCM_TICK;
974 scm_i_scm_pthread_mutex_lock (&m->lock);
975 }
976 }
977 scm_i_pthread_mutex_unlock (&m->lock);
978 return msg;
979 }
980
981 SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
982 (SCM mx),
983 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
984 "blocks until the mutex becomes available. The function returns when "
985 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
986 "a thread already owns will succeed right away and will not block the "
987 "thread. That is, Guile's mutexes are @emph{recursive}. ")
988 #define FUNC_NAME s_scm_lock_mutex
989 {
990 char *msg;
991
992 SCM_VALIDATE_MUTEX (1, mx);
993 msg = fat_mutex_lock (mx);
994 if (msg)
995 scm_misc_error (NULL, msg, SCM_EOL);
996 return SCM_BOOL_T;
997 }
998 #undef FUNC_NAME
999
1000 void
1001 scm_dynwind_lock_mutex (SCM mutex)
1002 {
1003 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1004 SCM_F_WIND_EXPLICITLY);
1005 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1006 SCM_F_WIND_EXPLICITLY);
1007 }
1008
1009 static char *
1010 fat_mutex_trylock (fat_mutex *m, int *resp)
1011 {
1012 char *msg = NULL;
1013 SCM thread = scm_current_thread ();
1014
1015 *resp = 1;
1016 scm_i_pthread_mutex_lock (&m->lock);
1017 if (scm_is_false (m->owner))
1018 m->owner = thread;
1019 else if (scm_is_eq (m->owner, thread))
1020 {
1021 if (m->level >= 0)
1022 m->level++;
1023 else
1024 msg = "mutex already locked by current thread";
1025 }
1026 else
1027 *resp = 0;
1028 scm_i_pthread_mutex_unlock (&m->lock);
1029 return msg;
1030 }
1031
1032 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1033 (SCM mutex),
1034 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1035 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1036 #define FUNC_NAME s_scm_try_mutex
1037 {
1038 char *msg;
1039 int res;
1040
1041 SCM_VALIDATE_MUTEX (1, mutex);
1042
1043 msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
1044 if (msg)
1045 scm_misc_error (NULL, msg, SCM_EOL);
1046 return scm_from_bool (res);
1047 }
1048 #undef FUNC_NAME
1049
1050 static char *
1051 fat_mutex_unlock (fat_mutex *m)
1052 {
1053 char *msg = NULL;
1054
1055 scm_i_scm_pthread_mutex_lock (&m->lock);
1056 if (!scm_is_eq (m->owner, scm_current_thread ()))
1057 {
1058 if (scm_is_false (m->owner))
1059 msg = "mutex not locked";
1060 else
1061 msg = "mutex not locked by current thread";
1062 }
1063 else if (m->level > 0)
1064 m->level--;
1065 else
1066 m->owner = unblock_from_queue (m->waiting);
1067 scm_i_pthread_mutex_unlock (&m->lock);
1068
1069 return msg;
1070 }
1071
1072 SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
1073 (SCM mx),
1074 "Unlocks @var{mutex} if the calling thread owns the lock on "
1075 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1076 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1077 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1078 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1079 "with a call to @code{unlock-mutex}. Only the last call to "
1080 "@code{unlock-mutex} will actually unlock the mutex. ")
1081 #define FUNC_NAME s_scm_unlock_mutex
1082 {
1083 char *msg;
1084 SCM_VALIDATE_MUTEX (1, mx);
1085
1086 msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
1087 if (msg)
1088 scm_misc_error (NULL, msg, SCM_EOL);
1089 return SCM_BOOL_T;
1090 }
1091 #undef FUNC_NAME
1092
1093 #if 0
1094
1095 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1096 (SCM mx),
1097 "Return the thread owning @var{mx}, or @code{#f}.")
1098 #define FUNC_NAME s_scm_mutex_owner
1099 {
1100 SCM_VALIDATE_MUTEX (1, mx);
1101 return (SCM_MUTEX_DATA(mx))->owner;
1102 }
1103 #undef FUNC_NAME
1104
1105 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1106 (SCM mx),
1107 "Return the lock level of a recursive mutex, or -1\n"
1108 "for a standard mutex.")
1109 #define FUNC_NAME s_scm_mutex_level
1110 {
1111 SCM_VALIDATE_MUTEX (1, mx);
1112 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1113 }
1114 #undef FUNC_NAME
1115
1116 #endif
1117
1118 /*** Fat condition variables */
1119
1120 typedef struct {
1121 scm_i_pthread_mutex_t lock;
1122 SCM waiting; /* the threads waiting for this condition. */
1123 } fat_cond;
1124
1125 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1126 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1127
1128 static SCM
1129 fat_cond_mark (SCM cv)
1130 {
1131 fat_cond *c = SCM_CONDVAR_DATA (cv);
1132 return c->waiting;
1133 }
1134
1135 static size_t
1136 fat_cond_free (SCM mx)
1137 {
1138 fat_cond *c = SCM_CONDVAR_DATA (mx);
1139 scm_i_pthread_mutex_destroy (&c->lock);
1140 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1141 return 0;
1142 }
1143
1144 static int
1145 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1146 {
1147 fat_cond *c = SCM_CONDVAR_DATA (cv);
1148 scm_puts ("#<condition-variable ", port);
1149 scm_uintprint ((scm_t_bits)c, 16, port);
1150 scm_puts (">", port);
1151 return 1;
1152 }
1153
1154 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1155 (void),
1156 "Make a new condition variable.")
1157 #define FUNC_NAME s_scm_make_condition_variable
1158 {
1159 fat_cond *c;
1160 SCM cv;
1161
1162 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1163 scm_i_pthread_mutex_init (&c->lock, 0);
1164 c->waiting = SCM_EOL;
1165 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1166 c->waiting = make_queue ();
1167 return cv;
1168 }
1169 #undef FUNC_NAME
1170
1171 static int
1172 fat_cond_timedwait (SCM cond, SCM mutex,
1173 const scm_t_timespec *waittime)
1174 {
1175 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1176 fat_cond *c = SCM_CONDVAR_DATA (cond);
1177 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1178 const char *msg;
1179 int err = 0;
1180
1181 while (1)
1182 {
1183 scm_i_scm_pthread_mutex_lock (&c->lock);
1184 msg = fat_mutex_unlock (m);
1185 t->block_asyncs++;
1186 if (msg == NULL)
1187 {
1188 err = block_self (c->waiting, cond, &c->lock, waittime);
1189 scm_i_pthread_mutex_unlock (&c->lock);
1190 fat_mutex_lock (mutex);
1191 }
1192 else
1193 scm_i_pthread_mutex_unlock (&c->lock);
1194 t->block_asyncs--;
1195 scm_async_click ();
1196
1197 if (msg)
1198 scm_misc_error (NULL, msg, SCM_EOL);
1199
1200 scm_remember_upto_here_2 (cond, mutex);
1201
1202 if (err == 0)
1203 return 1;
1204 if (err == ETIMEDOUT)
1205 return 0;
1206 if (err != EINTR)
1207 {
1208 errno = err;
1209 scm_syserror (NULL);
1210 }
1211 }
1212 }
1213
1214 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1215 (SCM cv, SCM mx, SCM t),
1216 "Wait until @var{cond-var} has been signalled. While waiting, "
1217 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1218 "is locked again when this function returns. When @var{time} is given, "
1219 "it specifies a point in time where the waiting should be aborted. It "
1220 "can be either a integer as returned by @code{current-time} or a pair "
1221 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1222 "mutex is locked and @code{#f} is returned. When the condition "
1223 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1224 "is returned. ")
1225 #define FUNC_NAME s_scm_timed_wait_condition_variable
1226 {
1227 scm_t_timespec waittime, *waitptr = NULL;
1228
1229 SCM_VALIDATE_CONDVAR (1, cv);
1230 SCM_VALIDATE_MUTEX (2, mx);
1231
1232 if (!SCM_UNBNDP (t))
1233 {
1234 if (scm_is_pair (t))
1235 {
1236 waittime.tv_sec = scm_to_ulong (SCM_CAR (t));
1237 waittime.tv_nsec = scm_to_ulong (SCM_CAR (t)) * 1000;
1238 }
1239 else
1240 {
1241 waittime.tv_sec = scm_to_ulong (t);
1242 waittime.tv_nsec = 0;
1243 }
1244 waitptr = &waittime;
1245 }
1246
1247 return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
1248 }
1249 #undef FUNC_NAME
1250
1251 static void
1252 fat_cond_signal (fat_cond *c)
1253 {
1254 scm_i_scm_pthread_mutex_lock (&c->lock);
1255 unblock_from_queue (c->waiting);
1256 scm_i_pthread_mutex_unlock (&c->lock);
1257 }
1258
1259 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1260 (SCM cv),
1261 "Wake up one thread that is waiting for @var{cv}")
1262 #define FUNC_NAME s_scm_signal_condition_variable
1263 {
1264 SCM_VALIDATE_CONDVAR (1, cv);
1265 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1266 return SCM_BOOL_T;
1267 }
1268 #undef FUNC_NAME
1269
1270 static void
1271 fat_cond_broadcast (fat_cond *c)
1272 {
1273 scm_i_scm_pthread_mutex_lock (&c->lock);
1274 while (scm_is_true (unblock_from_queue (c->waiting)))
1275 ;
1276 scm_i_pthread_mutex_unlock (&c->lock);
1277 }
1278
1279 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1280 (SCM cv),
1281 "Wake up all threads that are waiting for @var{cv}. ")
1282 #define FUNC_NAME s_scm_broadcast_condition_variable
1283 {
1284 SCM_VALIDATE_CONDVAR (1, cv);
1285 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1286 return SCM_BOOL_T;
1287 }
1288 #undef FUNC_NAME
1289
1290 /*** Marking stacks */
1291
1292 /* XXX - what to do with this? Do we need to handle this for blocked
1293 threads as well?
1294 */
1295 #ifdef __ia64__
1296 # define SCM_MARK_BACKING_STORE() do { \
1297 ucontext_t ctx; \
1298 SCM_STACKITEM * top, * bot; \
1299 getcontext (&ctx); \
1300 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1301 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1302 / sizeof (SCM_STACKITEM))); \
1303 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
1304 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
1305 scm_mark_locations (bot, top - bot); } while (0)
1306 #else
1307 # define SCM_MARK_BACKING_STORE()
1308 #endif
1309
1310 void
1311 scm_threads_mark_stacks (void)
1312 {
1313 scm_i_thread *t;
1314 for (t = all_threads; t; t = t->next_thread)
1315 {
1316 /* Check that thread has indeed been suspended.
1317 */
1318 assert (t->top);
1319
1320 scm_gc_mark (t->handle);
1321
1322 #if SCM_STACK_GROWS_UP
1323 scm_mark_locations (t->base, t->top - t->base);
1324 #else
1325 scm_mark_locations (t->top, t->base - t->top);
1326 #endif
1327 scm_mark_locations ((SCM_STACKITEM *) t->regs,
1328 ((size_t) sizeof(t->regs)
1329 / sizeof (SCM_STACKITEM)));
1330 }
1331
1332 SCM_MARK_BACKING_STORE ();
1333 }
1334
1335 /*** Select */
1336
1337 int
1338 scm_std_select (int nfds,
1339 SELECT_TYPE *readfds,
1340 SELECT_TYPE *writefds,
1341 SELECT_TYPE *exceptfds,
1342 struct timeval *timeout)
1343 {
1344 fd_set my_readfds;
1345 int res, eno, wakeup_fd;
1346 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1347 scm_t_guile_ticket ticket;
1348
1349 if (readfds == NULL)
1350 {
1351 FD_ZERO (&my_readfds);
1352 readfds = &my_readfds;
1353 }
1354
1355 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1356 SCM_TICK;
1357
1358 wakeup_fd = t->sleep_pipe[0];
1359 ticket = scm_leave_guile ();
1360 FD_SET (wakeup_fd, readfds);
1361 if (wakeup_fd >= nfds)
1362 nfds = wakeup_fd+1;
1363 res = select (nfds, readfds, writefds, exceptfds, timeout);
1364 t->sleep_fd = -1;
1365 eno = errno;
1366 scm_enter_guile (ticket);
1367
1368 scm_i_reset_sleep (t);
1369
1370 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1371 {
1372 char dummy;
1373 read (wakeup_fd, &dummy, 1);
1374 FD_CLR (wakeup_fd, readfds);
1375 res -= 1;
1376 if (res == 0)
1377 {
1378 eno = EINTR;
1379 res = -1;
1380 }
1381 }
1382 errno = eno;
1383 return res;
1384 }
1385
1386 /* Convenience API for blocking while in guile mode. */
1387
1388 #if SCM_USE_PTHREAD_THREADS
1389
1390 int
1391 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1392 {
1393 scm_t_guile_ticket t = scm_leave_guile ();
1394 int res = scm_i_pthread_mutex_lock (mutex);
1395 scm_enter_guile (t);
1396 return res;
1397 }
1398
1399 static void
1400 unlock (void *data)
1401 {
1402 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1403 }
1404
1405 void
1406 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1407 {
1408 scm_i_scm_pthread_mutex_lock (mutex);
1409 scm_dynwind_unwind_handler (unlock, mutex, SCM_F_WIND_EXPLICITLY);
1410 }
1411
1412 int
1413 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1414 {
1415 scm_t_guile_ticket t = scm_leave_guile ();
1416 int res = scm_i_pthread_cond_wait (cond, mutex);
1417 scm_enter_guile (t);
1418 return res;
1419 }
1420
1421 int
1422 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1423 scm_i_pthread_mutex_t *mutex,
1424 const scm_t_timespec *wt)
1425 {
1426 scm_t_guile_ticket t = scm_leave_guile ();
1427 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1428 scm_enter_guile (t);
1429 return res;
1430 }
1431
1432 #endif
1433
1434 unsigned long
1435 scm_std_usleep (unsigned long usecs)
1436 {
1437 struct timeval tv;
1438 tv.tv_usec = usecs % 1000000;
1439 tv.tv_sec = usecs / 1000000;
1440 scm_std_select (0, NULL, NULL, NULL, &tv);
1441 return tv.tv_sec * 1000000 + tv.tv_usec;
1442 }
1443
1444 unsigned int
1445 scm_std_sleep (unsigned int secs)
1446 {
1447 struct timeval tv;
1448 tv.tv_usec = 0;
1449 tv.tv_sec = secs;
1450 scm_std_select (0, NULL, NULL, NULL, &tv);
1451 return tv.tv_sec;
1452 }
1453
1454 /*** Misc */
1455
1456 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1457 (void),
1458 "Return the thread that called this function.")
1459 #define FUNC_NAME s_scm_current_thread
1460 {
1461 return SCM_I_CURRENT_THREAD->handle;
1462 }
1463 #undef FUNC_NAME
1464
1465 static SCM
1466 scm_c_make_list (size_t n, SCM fill)
1467 {
1468 SCM res = SCM_EOL;
1469 while (n-- > 0)
1470 res = scm_cons (fill, res);
1471 return res;
1472 }
1473
1474 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1475 (void),
1476 "Return a list of all threads.")
1477 #define FUNC_NAME s_scm_all_threads
1478 {
1479 /* We can not allocate while holding the thread_admin_mutex because
1480 of the way GC is done.
1481 */
1482 int n = thread_count;
1483 scm_i_thread *t;
1484 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1485
1486 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1487 l = &list;
1488 for (t = all_threads; t && n > 0; t = t->next_thread)
1489 {
1490 SCM_SETCAR (*l, t->handle);
1491 l = SCM_CDRLOC (*l);
1492 n--;
1493 }
1494 *l = SCM_EOL;
1495 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1496 return list;
1497 }
1498 #undef FUNC_NAME
1499
1500 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1501 (SCM thread),
1502 "Return @code{#t} iff @var{thread} has exited.\n")
1503 #define FUNC_NAME s_scm_thread_exited_p
1504 {
1505 return scm_from_bool (scm_c_thread_exited_p (thread));
1506 }
1507 #undef FUNC_NAME
1508
1509 int
1510 scm_c_thread_exited_p (SCM thread)
1511 #define FUNC_NAME s_scm_thread_exited_p
1512 {
1513 scm_i_thread *t;
1514 SCM_VALIDATE_THREAD (1, thread);
1515 t = SCM_I_THREAD_DATA (thread);
1516 return t->exited;
1517 }
1518 #undef FUNC_NAME
1519
1520 static scm_i_pthread_cond_t wake_up_cond;
1521 int scm_i_thread_go_to_sleep;
1522 static int threads_initialized_p = 0;
1523
1524 void
1525 scm_i_thread_put_to_sleep ()
1526 {
1527 if (threads_initialized_p)
1528 {
1529 scm_i_thread *t;
1530
1531 scm_leave_guile ();
1532 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1533
1534 /* Signal all threads to go to sleep
1535 */
1536 scm_i_thread_go_to_sleep = 1;
1537 for (t = all_threads; t; t = t->next_thread)
1538 scm_i_pthread_mutex_lock (&t->heap_mutex);
1539 scm_i_thread_go_to_sleep = 0;
1540 }
1541 }
1542
1543 void
1544 scm_i_thread_invalidate_freelists ()
1545 {
1546 /* thread_admin_mutex is already locked. */
1547
1548 scm_i_thread *t;
1549 for (t = all_threads; t; t = t->next_thread)
1550 if (t != SCM_I_CURRENT_THREAD)
1551 t->clear_freelists_p = 1;
1552 }
1553
1554 void
1555 scm_i_thread_wake_up ()
1556 {
1557 if (threads_initialized_p)
1558 {
1559 scm_i_thread *t;
1560
1561 scm_i_pthread_cond_broadcast (&wake_up_cond);
1562 for (t = all_threads; t; t = t->next_thread)
1563 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1564 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1565 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1566 }
1567 }
1568
1569 void
1570 scm_i_thread_sleep_for_gc ()
1571 {
1572 scm_i_thread *t = suspend ();
1573 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1574 resume (t);
1575 }
1576
1577 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1578 */
1579 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1580 int scm_i_critical_section_level = 0;
1581
1582 static SCM dynwind_critical_section_mutex;
1583
1584 void
1585 scm_dynwind_critical_section (SCM mutex)
1586 {
1587 if (scm_is_false (mutex))
1588 mutex = dynwind_critical_section_mutex;
1589 scm_dynwind_lock_mutex (mutex);
1590 scm_dynwind_block_asyncs ();
1591 }
1592
1593 /*** Initialization */
1594
1595 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1596 scm_i_pthread_mutex_t scm_i_misc_mutex;
1597
1598 #if SCM_USE_PTHREAD_THREADS
1599 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1600 #endif
1601
1602 void
1603 scm_threads_prehistory (SCM_STACKITEM *base)
1604 {
1605 #if SCM_USE_PTHREAD_THREADS
1606 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1607 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1608 PTHREAD_MUTEX_RECURSIVE);
1609 #endif
1610
1611 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1612 scm_i_pthread_mutexattr_recursive);
1613 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1614 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1615 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1616 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1617
1618 guilify_self_1 (base);
1619 }
1620
1621 scm_t_bits scm_tc16_thread;
1622 scm_t_bits scm_tc16_mutex;
1623 scm_t_bits scm_tc16_condvar;
1624
1625 void
1626 scm_init_threads ()
1627 {
1628 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1629 scm_set_smob_mark (scm_tc16_thread, thread_mark);
1630 scm_set_smob_print (scm_tc16_thread, thread_print);
1631 scm_set_smob_free (scm_tc16_thread, thread_free);
1632
1633 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1634 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
1635 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1636 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1637
1638 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1639 sizeof (fat_cond));
1640 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
1641 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1642 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
1643
1644 scm_i_default_dynamic_state = SCM_BOOL_F;
1645 guilify_self_2 (SCM_BOOL_F);
1646 threads_initialized_p = 1;
1647
1648 dynwind_critical_section_mutex =
1649 scm_permanent_object (scm_make_recursive_mutex ());
1650 }
1651
1652 void
1653 scm_init_threads_default_dynamic_state ()
1654 {
1655 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1656 scm_i_default_dynamic_state = scm_permanent_object (state);
1657 }
1658
1659 void
1660 scm_init_thread_procs ()
1661 {
1662 #include "libguile/threads.x"
1663 }
1664
1665 /*
1666 Local Variables:
1667 c-file-style: "gnu"
1668 End:
1669 */