Merge commit 'a675a2e81b792b9f860bec57c38a1948631c7a41'
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014
3 * Free Software Foundation, Inc.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 */
20
21
22 \f
23 #ifdef HAVE_CONFIG_H
24 # include <config.h>
25 #endif
26
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
29 #include "libguile/_scm.h"
30
31 #include <stdlib.h>
32 #if HAVE_UNISTD_H
33 #include <unistd.h>
34 #endif
35 #include <stdio.h>
36
37 #ifdef HAVE_STRING_H
38 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
39 #endif
40
41 #if HAVE_SYS_TIME_H
42 #include <sys/time.h>
43 #endif
44
45 #if HAVE_PTHREAD_NP_H
46 # include <pthread_np.h>
47 #endif
48
49 #include <sys/select.h>
50
51 #include <assert.h>
52 #include <fcntl.h>
53 #include <nproc.h>
54
55 #include "libguile/validate.h"
56 #include "libguile/root.h"
57 #include "libguile/eval.h"
58 #include "libguile/async.h"
59 #include "libguile/ports.h"
60 #include "libguile/threads.h"
61 #include "libguile/dynwind.h"
62 #include "libguile/iselect.h"
63 #include "libguile/fluids.h"
64 #include "libguile/continuations.h"
65 #include "libguile/gc.h"
66 #include "libguile/gc-inline.h"
67 #include "libguile/init.h"
68 #include "libguile/scmsigs.h"
69 #include "libguile/strings.h"
70 #include "libguile/vm.h"
71
72 #include <full-read.h>
73
74
75 \f
76
77 /* The GC "kind" for threads that allow them to mark their VM
78 stacks. */
79 static int thread_gc_kind;
80
81 static struct GC_ms_entry *
82 thread_mark (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
83 struct GC_ms_entry *mark_stack_limit, GC_word env)
84 {
85 int word;
86 const struct scm_i_thread *t = (struct scm_i_thread *) addr;
87
88 if (SCM_UNPACK (t->handle) == 0)
89 /* T must be on the free-list; ignore. (See warning in
90 gc_mark.h.) */
91 return mark_stack_ptr;
92
93 /* Mark T. We could be more precise, but it doesn't matter. */
94 for (word = 0; word * sizeof (*addr) < sizeof (*t); word++)
95 mark_stack_ptr = GC_MARK_AND_PUSH ((void *) addr[word],
96 mark_stack_ptr, mark_stack_limit,
97 NULL);
98
99 /* The pointerless freelists are threaded through their first word,
100 but GC doesn't know to trace them (as they are pointerless), so we
101 need to do that here. See the comments at the top of libgc's
102 gc_inline.h. */
103 {
104 size_t n;
105 for (n = 0; n < SCM_INLINE_GC_FREELIST_COUNT; n++)
106 {
107 void *chain = t->pointerless_freelists[n];
108 if (chain)
109 {
110 /* The first link is already marked by the freelist vector,
111 so we just have to mark the tail. */
112 while ((chain = *(void **)chain))
113 mark_stack_ptr = GC_mark_and_push (chain, mark_stack_ptr,
114 mark_stack_limit, NULL);
115 }
116 }
117 }
118
119 if (t->vp)
120 mark_stack_ptr = scm_i_vm_mark_stack (t->vp, mark_stack_ptr,
121 mark_stack_limit);
122
123 return mark_stack_ptr;
124 }
125
126
127 \f
128 static void
129 to_timespec (SCM t, scm_t_timespec *waittime)
130 {
131 if (scm_is_pair (t))
132 {
133 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
134 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
135 }
136 else
137 {
138 double time = scm_to_double (t);
139 double sec = scm_c_truncate (time);
140
141 waittime->tv_sec = (long) sec;
142 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
143 }
144 }
145
146
147 \f
148 /*** Queues */
149
150 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
151 the risk of false references leading to unbounded retained space as
152 described in "Bounding Space Usage of Conservative Garbage Collectors",
153 H.J. Boehm, 2001. */
154
155 /* Make an empty queue data structure.
156 */
157 static SCM
158 make_queue ()
159 {
160 return scm_cons (SCM_EOL, SCM_EOL);
161 }
162
163 /* Put T at the back of Q and return a handle that can be used with
164 remqueue to remove T from Q again.
165 */
166 static SCM
167 enqueue (SCM q, SCM t)
168 {
169 SCM c = scm_cons (t, SCM_EOL);
170 SCM_CRITICAL_SECTION_START;
171 if (scm_is_null (SCM_CDR (q)))
172 SCM_SETCDR (q, c);
173 else
174 SCM_SETCDR (SCM_CAR (q), c);
175 SCM_SETCAR (q, c);
176 SCM_CRITICAL_SECTION_END;
177 return c;
178 }
179
180 /* Remove the element that the handle C refers to from the queue Q. C
181 must have been returned from a call to enqueue. The return value
182 is zero when the element referred to by C has already been removed.
183 Otherwise, 1 is returned.
184 */
185 static int
186 remqueue (SCM q, SCM c)
187 {
188 SCM p, prev = q;
189 SCM_CRITICAL_SECTION_START;
190 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
191 {
192 if (scm_is_eq (p, c))
193 {
194 if (scm_is_eq (c, SCM_CAR (q)))
195 SCM_SETCAR (q, scm_is_eq (prev, q) ? SCM_EOL : prev);
196 SCM_SETCDR (prev, SCM_CDR (c));
197
198 /* GC-robust */
199 SCM_SETCDR (c, SCM_EOL);
200
201 SCM_CRITICAL_SECTION_END;
202 return 1;
203 }
204 prev = p;
205 }
206 SCM_CRITICAL_SECTION_END;
207 return 0;
208 }
209
210 /* Remove the front-most element from the queue Q and return it.
211 Return SCM_BOOL_F when Q is empty.
212 */
213 static SCM
214 dequeue (SCM q)
215 {
216 SCM c;
217 SCM_CRITICAL_SECTION_START;
218 c = SCM_CDR (q);
219 if (scm_is_null (c))
220 {
221 SCM_CRITICAL_SECTION_END;
222 return SCM_BOOL_F;
223 }
224 else
225 {
226 SCM_SETCDR (q, SCM_CDR (c));
227 if (scm_is_null (SCM_CDR (q)))
228 SCM_SETCAR (q, SCM_EOL);
229 SCM_CRITICAL_SECTION_END;
230
231 /* GC-robust */
232 SCM_SETCDR (c, SCM_EOL);
233
234 return SCM_CAR (c);
235 }
236 }
237
238 /*** Thread smob routines */
239
240
241 static int
242 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
243 {
244 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
245 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
246 the struct case, hence we go via a union, and extract according to the
247 size of pthread_t. */
248 union {
249 scm_i_pthread_t p;
250 unsigned short us;
251 unsigned int ui;
252 unsigned long ul;
253 scm_t_uintmax um;
254 } u;
255 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
256 scm_i_pthread_t p = t->pthread;
257 scm_t_uintmax id;
258 u.p = p;
259 if (sizeof (p) == sizeof (unsigned short))
260 id = u.us;
261 else if (sizeof (p) == sizeof (unsigned int))
262 id = u.ui;
263 else if (sizeof (p) == sizeof (unsigned long))
264 id = u.ul;
265 else
266 id = u.um;
267
268 scm_puts_unlocked ("#<thread ", port);
269 scm_uintprint (id, 10, port);
270 scm_puts_unlocked (" (", port);
271 scm_uintprint ((scm_t_bits)t, 16, port);
272 scm_puts_unlocked (")>", port);
273 return 1;
274 }
275
276 \f
277 /*** Blocking on queues. */
278
279 /* See also scm_i_queue_async_cell for how such a block is
280 interrputed.
281 */
282
283 /* Put the current thread on QUEUE and go to sleep, waiting for it to
284 be woken up by a call to 'unblock_from_queue', or to be
285 interrupted. Upon return of this function, the current thread is
286 no longer on QUEUE, even when the sleep has been interrupted.
287
288 The caller of block_self must hold MUTEX. It will be atomically
289 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
290
291 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
292 as MUTEX is needed.
293
294 When WAITTIME is not NULL, the sleep will be aborted at that time.
295
296 The return value of block_self is an errno value. It will be zero
297 when the sleep has been successfully completed by a call to
298 unblock_from_queue, EINTR when it has been interrupted by the
299 delivery of a system async, and ETIMEDOUT when the timeout has
300 expired.
301
302 The system asyncs themselves are not executed by block_self.
303 */
304 static int
305 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
306 const scm_t_timespec *waittime)
307 {
308 scm_i_thread *t = SCM_I_CURRENT_THREAD;
309 SCM q_handle;
310 int err;
311
312 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
313 err = EINTR;
314 else
315 {
316 t->block_asyncs++;
317 q_handle = enqueue (queue, t->handle);
318 if (waittime == NULL)
319 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
320 else
321 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
322
323 /* When we are still on QUEUE, we have been interrupted. We
324 report this only when no other error (such as a timeout) has
325 happened above.
326 */
327 if (remqueue (queue, q_handle) && err == 0)
328 err = EINTR;
329 t->block_asyncs--;
330 scm_i_reset_sleep (t);
331 }
332
333 return err;
334 }
335
336 /* Wake up the first thread on QUEUE, if any. The awoken thread is
337 returned, or #f if the queue was empty.
338 */
339 static SCM
340 unblock_from_queue (SCM queue)
341 {
342 SCM thread = dequeue (queue);
343 if (scm_is_true (thread))
344 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
345 return thread;
346 }
347
348 \f
349 /* Getting into and out of guile mode.
350 */
351
352 /* Key used to attach a cleanup handler to a given thread. Also, if
353 thread-local storage is unavailable, this key is used to retrieve the
354 current thread with `pthread_getspecific ()'. */
355 scm_i_pthread_key_t scm_i_thread_key;
356
357
358 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
359
360 /* When thread-local storage (TLS) is available, a pointer to the
361 current-thread object is kept in TLS. Note that storing the thread-object
362 itself in TLS (rather than a pointer to some malloc'd memory) is not
363 possible since thread objects may live longer than the actual thread they
364 represent. */
365 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
366
367 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
368
369
370 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
371 static scm_i_thread *all_threads = NULL;
372 static int thread_count;
373
374 static SCM scm_i_default_dynamic_state;
375
376 /* Run when a fluid is collected. */
377 void
378 scm_i_reset_fluid (size_t n)
379 {
380 scm_i_thread *t;
381
382 scm_i_pthread_mutex_lock (&thread_admin_mutex);
383 for (t = all_threads; t; t = t->next_thread)
384 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
385 {
386 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
387
388 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
389 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
390 }
391 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
392 }
393
394 /* Perform first stage of thread initialisation, in non-guile mode.
395 */
396 static void
397 guilify_self_1 (struct GC_stack_base *base)
398 {
399 scm_i_thread t;
400
401 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
402 before allocating anything in this thread, because allocation could
403 cause GC to run, and GC could cause finalizers, which could invoke
404 Scheme functions, which need the current thread to be set. */
405
406 t.pthread = scm_i_pthread_self ();
407 t.handle = SCM_BOOL_F;
408 t.result = SCM_BOOL_F;
409 t.cleanup_handler = SCM_BOOL_F;
410 t.mutexes = SCM_EOL;
411 t.held_mutex = NULL;
412 t.join_queue = SCM_EOL;
413 t.freelists = NULL;
414 t.pointerless_freelists = NULL;
415 t.dynamic_state = SCM_BOOL_F;
416 t.dynstack.base = NULL;
417 t.dynstack.top = NULL;
418 t.dynstack.limit = NULL;
419 t.active_asyncs = SCM_EOL;
420 t.block_asyncs = 1;
421 t.pending_asyncs = 1;
422 t.critical_section_level = 0;
423 t.base = base->mem_base;
424 #ifdef __ia64__
425 t.register_backing_store_base = base->reg_base;
426 #endif
427 t.continuation_root = SCM_EOL;
428 t.continuation_base = t.base;
429 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
430 t.sleep_mutex = NULL;
431 t.sleep_object = SCM_BOOL_F;
432 t.sleep_fd = -1;
433 t.vp = NULL;
434
435 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
436 /* FIXME: Error conditions during the initialization phase are handled
437 gracelessly since public functions such as `scm_init_guile ()'
438 currently have type `void'. */
439 abort ();
440
441 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
442 t.canceled = 0;
443 t.exited = 0;
444 t.guile_mode = 0;
445
446 /* The switcheroo. */
447 {
448 scm_i_thread *t_ptr = &t;
449
450 GC_disable ();
451 t_ptr = GC_generic_malloc (sizeof (*t_ptr), thread_gc_kind);
452 memcpy (t_ptr, &t, sizeof t);
453
454 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
455
456 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
457 /* Cache the current thread in TLS for faster lookup. */
458 scm_i_current_thread = t_ptr;
459 #endif
460
461 scm_i_pthread_mutex_lock (&thread_admin_mutex);
462 t_ptr->next_thread = all_threads;
463 all_threads = t_ptr;
464 thread_count++;
465 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
466
467 GC_enable ();
468 }
469 }
470
471 /* Perform second stage of thread initialisation, in guile mode.
472 */
473 static void
474 guilify_self_2 (SCM parent)
475 {
476 scm_i_thread *t = SCM_I_CURRENT_THREAD;
477
478 t->guile_mode = 1;
479
480 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
481
482 t->continuation_root = scm_cons (t->handle, SCM_EOL);
483 t->continuation_base = t->base;
484
485 {
486 size_t size = SCM_INLINE_GC_FREELIST_COUNT * sizeof (void *);
487 t->freelists = scm_gc_malloc (size, "freelists");
488 t->pointerless_freelists = scm_gc_malloc (size, "atomic freelists");
489 }
490
491 if (scm_is_true (parent))
492 t->dynamic_state = scm_make_dynamic_state (parent);
493 else
494 t->dynamic_state = scm_i_make_initial_dynamic_state ();
495
496 t->dynstack.base = scm_gc_malloc (16 * sizeof (scm_t_bits), "dynstack");
497 t->dynstack.limit = t->dynstack.base + 16;
498 t->dynstack.top = t->dynstack.base + SCM_DYNSTACK_HEADER_LEN;
499
500 t->join_queue = make_queue ();
501 t->block_asyncs = 0;
502
503 /* See note in finalizers.c:queue_finalizer_async(). */
504 GC_invoke_finalizers ();
505 }
506
507 \f
508 /*** Fat mutexes */
509
510 /* We implement our own mutex type since we want them to be 'fair', we
511 want to do fancy things while waiting for them (like running
512 asyncs) and we might want to add things that are nice for
513 debugging.
514 */
515
516 typedef struct {
517 scm_i_pthread_mutex_t lock;
518 SCM owner;
519 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
520
521 int recursive; /* allow recursive locking? */
522 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
523 int allow_external_unlock; /* is it an error to unlock a mutex that is not
524 owned by the current thread? */
525
526 SCM waiting; /* the threads waiting for this mutex. */
527 } fat_mutex;
528
529 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
530 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
531
532 static SCM
533 call_cleanup (void *data)
534 {
535 SCM *proc_p = data;
536 return scm_call_0 (*proc_p);
537 }
538
539 /* Perform thread tear-down, in guile mode.
540 */
541 static void *
542 do_thread_exit (void *v)
543 {
544 scm_i_thread *t = (scm_i_thread *) v;
545
546 if (!scm_is_false (t->cleanup_handler))
547 {
548 SCM ptr = t->cleanup_handler;
549
550 t->cleanup_handler = SCM_BOOL_F;
551 t->result = scm_internal_catch (SCM_BOOL_T,
552 call_cleanup, &ptr,
553 scm_handle_by_message_noexit, NULL);
554 }
555
556 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
557
558 t->exited = 1;
559 close (t->sleep_pipe[0]);
560 close (t->sleep_pipe[1]);
561 while (scm_is_true (unblock_from_queue (t->join_queue)))
562 ;
563
564 while (!scm_is_null (t->mutexes))
565 {
566 SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
567
568 if (scm_is_true (mutex))
569 {
570 fat_mutex *m = SCM_MUTEX_DATA (mutex);
571
572 scm_i_pthread_mutex_lock (&m->lock);
573
574 /* Check whether T owns MUTEX. This is usually the case, unless
575 T abandoned MUTEX; in that case, T is no longer its owner (see
576 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
577 if (scm_is_eq (m->owner, t->handle))
578 unblock_from_queue (m->waiting);
579
580 scm_i_pthread_mutex_unlock (&m->lock);
581 }
582
583 t->mutexes = scm_cdr (t->mutexes);
584 }
585
586 scm_i_pthread_mutex_unlock (&t->admin_mutex);
587
588 return NULL;
589 }
590
591 static void *
592 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
593 {
594 /* Won't hurt if we are already registered. */
595 #if SCM_USE_PTHREAD_THREADS
596 GC_register_my_thread (sb);
597 #endif
598
599 return scm_with_guile (do_thread_exit, v);
600 }
601
602 static void
603 on_thread_exit (void *v)
604 {
605 /* This handler is executed in non-guile mode. */
606 scm_i_thread *t = (scm_i_thread *) v, **tp;
607
608 /* If we were canceled, we were unable to clear `t->guile_mode', so do
609 it here. */
610 t->guile_mode = 0;
611
612 /* If this thread was cancelled while doing a cond wait, it will
613 still have a mutex locked, so we unlock it here. */
614 if (t->held_mutex)
615 {
616 scm_i_pthread_mutex_unlock (t->held_mutex);
617 t->held_mutex = NULL;
618 }
619
620 /* Reinstate the current thread for purposes of scm_with_guile
621 guile-mode cleanup handlers. Only really needed in the non-TLS
622 case but it doesn't hurt to be consistent. */
623 scm_i_pthread_setspecific (scm_i_thread_key, t);
624
625 /* Scheme-level thread finalizers and other cleanup needs to happen in
626 guile mode. */
627 GC_call_with_stack_base (do_thread_exit_trampoline, t);
628
629 /* Removing ourself from the list of all threads needs to happen in
630 non-guile mode since all SCM values on our stack become
631 unprotected once we are no longer in the list. */
632 scm_i_pthread_mutex_lock (&thread_admin_mutex);
633 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
634 if (*tp == t)
635 {
636 *tp = t->next_thread;
637
638 /* GC-robust */
639 t->next_thread = NULL;
640
641 break;
642 }
643 thread_count--;
644
645 /* If there's only one other thread, it could be the signal delivery
646 thread, so we need to notify it to shut down by closing its read pipe.
647 If it's not the signal delivery thread, then closing the read pipe isn't
648 going to hurt. */
649 if (thread_count <= 1)
650 scm_i_close_signal_pipe ();
651
652 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
653
654 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
655
656 if (t->vp)
657 {
658 scm_i_vm_free_stack (t->vp);
659 t->vp = NULL;
660 }
661
662 #if SCM_USE_PTHREAD_THREADS
663 GC_unregister_my_thread ();
664 #endif
665 }
666
667 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
668
669 static void
670 init_thread_key (void)
671 {
672 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
673 }
674
675 /* Perform any initializations necessary to make the current thread
676 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
677 if necessary.
678
679 BASE is the stack base to use with GC.
680
681 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
682 which case the default dynamic state is used.
683
684 Returns zero when the thread was known to guile already; otherwise
685 return 1.
686
687 Note that it could be the case that the thread was known
688 to Guile, but not in guile mode (because we are within a
689 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
690 be sure. New threads are put into guile mode implicitly. */
691
692 static int
693 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
694 {
695 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
696
697 if (SCM_I_CURRENT_THREAD)
698 {
699 /* Thread is already known to Guile.
700 */
701 return 0;
702 }
703 else
704 {
705 /* This thread has not been guilified yet.
706 */
707
708 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
709 if (scm_initialized_p == 0)
710 {
711 /* First thread ever to enter Guile. Run the full
712 initialization.
713 */
714 scm_i_init_guile (base);
715
716 #if SCM_USE_PTHREAD_THREADS
717 /* Allow other threads to come in later. */
718 GC_allow_register_threads ();
719 #endif
720
721 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
722 }
723 else
724 {
725 /* Guile is already initialized, but this thread enters it for
726 the first time. Only initialize this thread.
727 */
728 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
729
730 /* Register this thread with libgc. */
731 #if SCM_USE_PTHREAD_THREADS
732 GC_register_my_thread (base);
733 #endif
734
735 guilify_self_1 (base);
736 guilify_self_2 (parent);
737 }
738 return 1;
739 }
740 }
741
742 void
743 scm_init_guile ()
744 {
745 struct GC_stack_base stack_base;
746
747 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
748 scm_i_init_thread_for_guile (&stack_base,
749 scm_i_default_dynamic_state);
750 else
751 {
752 fprintf (stderr, "Failed to get stack base for current thread.\n");
753 exit (EXIT_FAILURE);
754 }
755 }
756
757 struct with_guile_args
758 {
759 GC_fn_type func;
760 void *data;
761 SCM parent;
762 };
763
764 static void *
765 with_guile_trampoline (void *data)
766 {
767 struct with_guile_args *args = data;
768
769 return scm_c_with_continuation_barrier (args->func, args->data);
770 }
771
772 static void *
773 with_guile_and_parent (struct GC_stack_base *base, void *data)
774 {
775 void *res;
776 int new_thread;
777 scm_i_thread *t;
778 struct with_guile_args *args = data;
779
780 new_thread = scm_i_init_thread_for_guile (base, args->parent);
781 t = SCM_I_CURRENT_THREAD;
782 if (new_thread)
783 {
784 /* We are in Guile mode. */
785 assert (t->guile_mode);
786
787 res = scm_c_with_continuation_barrier (args->func, args->data);
788
789 /* Leave Guile mode. */
790 t->guile_mode = 0;
791 }
792 else if (t->guile_mode)
793 {
794 /* Already in Guile mode. */
795 res = scm_c_with_continuation_barrier (args->func, args->data);
796 }
797 else
798 {
799 /* We are not in Guile mode, either because we are not within a
800 scm_with_guile, or because we are within a scm_without_guile.
801
802 This call to scm_with_guile() could happen from anywhere on the
803 stack, and in particular lower on the stack than when it was
804 when this thread was first guilified. Thus, `base' must be
805 updated. */
806 #if SCM_STACK_GROWS_UP
807 if (SCM_STACK_PTR (base->mem_base) < t->base)
808 t->base = SCM_STACK_PTR (base->mem_base);
809 #else
810 if (SCM_STACK_PTR (base->mem_base) > t->base)
811 t->base = SCM_STACK_PTR (base->mem_base);
812 #endif
813
814 t->guile_mode = 1;
815 res = GC_call_with_gc_active (with_guile_trampoline, args);
816 t->guile_mode = 0;
817 }
818 return res;
819 }
820
821 static void *
822 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
823 {
824 struct with_guile_args args;
825
826 args.func = func;
827 args.data = data;
828 args.parent = parent;
829
830 return GC_call_with_stack_base (with_guile_and_parent, &args);
831 }
832
833 void *
834 scm_with_guile (void *(*func)(void *), void *data)
835 {
836 return scm_i_with_guile_and_parent (func, data,
837 scm_i_default_dynamic_state);
838 }
839
840 void *
841 scm_without_guile (void *(*func)(void *), void *data)
842 {
843 void *result;
844 scm_i_thread *t = SCM_I_CURRENT_THREAD;
845
846 if (t->guile_mode)
847 {
848 SCM_I_CURRENT_THREAD->guile_mode = 0;
849 result = GC_do_blocking (func, data);
850 SCM_I_CURRENT_THREAD->guile_mode = 1;
851 }
852 else
853 /* Otherwise we're not in guile mode, so nothing to do. */
854 result = func (data);
855
856 return result;
857 }
858
859 \f
860 /*** Thread creation */
861
862 typedef struct {
863 SCM parent;
864 SCM thunk;
865 SCM handler;
866 SCM thread;
867 scm_i_pthread_mutex_t mutex;
868 scm_i_pthread_cond_t cond;
869 } launch_data;
870
871 static void *
872 really_launch (void *d)
873 {
874 launch_data *data = (launch_data *)d;
875 SCM thunk = data->thunk, handler = data->handler;
876 scm_i_thread *t;
877
878 t = SCM_I_CURRENT_THREAD;
879
880 scm_i_scm_pthread_mutex_lock (&data->mutex);
881 data->thread = scm_current_thread ();
882 scm_i_pthread_cond_signal (&data->cond);
883 scm_i_pthread_mutex_unlock (&data->mutex);
884
885 if (SCM_UNBNDP (handler))
886 t->result = scm_call_0 (thunk);
887 else
888 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
889
890 return 0;
891 }
892
893 static void *
894 launch_thread (void *d)
895 {
896 launch_data *data = (launch_data *)d;
897 scm_i_pthread_detach (scm_i_pthread_self ());
898 scm_i_with_guile_and_parent (really_launch, d, data->parent);
899 return NULL;
900 }
901
902 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
903 (SCM thunk, SCM handler),
904 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
905 "returning a new thread object representing the thread. The procedure\n"
906 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
907 "\n"
908 "When @var{handler} is specified, then @var{thunk} is called from\n"
909 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
910 "handler. This catch is established inside the continuation barrier.\n"
911 "\n"
912 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
913 "the @emph{exit value} of the thread and the thread is terminated.")
914 #define FUNC_NAME s_scm_call_with_new_thread
915 {
916 launch_data data;
917 scm_i_pthread_t id;
918 int err;
919
920 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
921 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
922 handler, SCM_ARG2, FUNC_NAME);
923
924 GC_collect_a_little ();
925 data.parent = scm_current_dynamic_state ();
926 data.thunk = thunk;
927 data.handler = handler;
928 data.thread = SCM_BOOL_F;
929 scm_i_pthread_mutex_init (&data.mutex, NULL);
930 scm_i_pthread_cond_init (&data.cond, NULL);
931
932 scm_i_scm_pthread_mutex_lock (&data.mutex);
933 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
934 if (err)
935 {
936 scm_i_pthread_mutex_unlock (&data.mutex);
937 errno = err;
938 scm_syserror (NULL);
939 }
940
941 while (scm_is_false (data.thread))
942 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
943
944 scm_i_pthread_mutex_unlock (&data.mutex);
945
946 return data.thread;
947 }
948 #undef FUNC_NAME
949
950 typedef struct {
951 SCM parent;
952 scm_t_catch_body body;
953 void *body_data;
954 scm_t_catch_handler handler;
955 void *handler_data;
956 SCM thread;
957 scm_i_pthread_mutex_t mutex;
958 scm_i_pthread_cond_t cond;
959 } spawn_data;
960
961 static void *
962 really_spawn (void *d)
963 {
964 spawn_data *data = (spawn_data *)d;
965 scm_t_catch_body body = data->body;
966 void *body_data = data->body_data;
967 scm_t_catch_handler handler = data->handler;
968 void *handler_data = data->handler_data;
969 scm_i_thread *t = SCM_I_CURRENT_THREAD;
970
971 scm_i_scm_pthread_mutex_lock (&data->mutex);
972 data->thread = scm_current_thread ();
973 scm_i_pthread_cond_signal (&data->cond);
974 scm_i_pthread_mutex_unlock (&data->mutex);
975
976 if (handler == NULL)
977 t->result = body (body_data);
978 else
979 t->result = scm_internal_catch (SCM_BOOL_T,
980 body, body_data,
981 handler, handler_data);
982
983 return 0;
984 }
985
986 static void *
987 spawn_thread (void *d)
988 {
989 spawn_data *data = (spawn_data *)d;
990 scm_i_pthread_detach (scm_i_pthread_self ());
991 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
992 return NULL;
993 }
994
995 SCM
996 scm_spawn_thread (scm_t_catch_body body, void *body_data,
997 scm_t_catch_handler handler, void *handler_data)
998 {
999 spawn_data data;
1000 scm_i_pthread_t id;
1001 int err;
1002
1003 data.parent = scm_current_dynamic_state ();
1004 data.body = body;
1005 data.body_data = body_data;
1006 data.handler = handler;
1007 data.handler_data = handler_data;
1008 data.thread = SCM_BOOL_F;
1009 scm_i_pthread_mutex_init (&data.mutex, NULL);
1010 scm_i_pthread_cond_init (&data.cond, NULL);
1011
1012 scm_i_scm_pthread_mutex_lock (&data.mutex);
1013 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1014 if (err)
1015 {
1016 scm_i_pthread_mutex_unlock (&data.mutex);
1017 errno = err;
1018 scm_syserror (NULL);
1019 }
1020
1021 while (scm_is_false (data.thread))
1022 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1023
1024 scm_i_pthread_mutex_unlock (&data.mutex);
1025
1026 assert (SCM_I_IS_THREAD (data.thread));
1027
1028 return data.thread;
1029 }
1030
1031 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1032 (),
1033 "Move the calling thread to the end of the scheduling queue.")
1034 #define FUNC_NAME s_scm_yield
1035 {
1036 return scm_from_bool (scm_i_sched_yield ());
1037 }
1038 #undef FUNC_NAME
1039
1040 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1041 (SCM thread),
1042 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1043 "cannot be the current thread, and if @var{thread} has already terminated or "
1044 "been signaled to terminate, this function is a no-op.")
1045 #define FUNC_NAME s_scm_cancel_thread
1046 {
1047 scm_i_thread *t = NULL;
1048
1049 SCM_VALIDATE_THREAD (1, thread);
1050 t = SCM_I_THREAD_DATA (thread);
1051 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1052 if (!t->canceled)
1053 {
1054 t->canceled = 1;
1055 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1056 scm_i_pthread_cancel (t->pthread);
1057 }
1058 else
1059 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1060
1061 return SCM_UNSPECIFIED;
1062 }
1063 #undef FUNC_NAME
1064
1065 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1066 (SCM thread, SCM proc),
1067 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1068 "This handler will be called when the thread exits.")
1069 #define FUNC_NAME s_scm_set_thread_cleanup_x
1070 {
1071 scm_i_thread *t;
1072
1073 SCM_VALIDATE_THREAD (1, thread);
1074 if (!scm_is_false (proc))
1075 SCM_VALIDATE_THUNK (2, proc);
1076
1077 t = SCM_I_THREAD_DATA (thread);
1078 scm_i_pthread_mutex_lock (&t->admin_mutex);
1079
1080 if (!(t->exited || t->canceled))
1081 t->cleanup_handler = proc;
1082
1083 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1084
1085 return SCM_UNSPECIFIED;
1086 }
1087 #undef FUNC_NAME
1088
1089 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1090 (SCM thread),
1091 "Return the cleanup handler installed for the thread @var{thread}.")
1092 #define FUNC_NAME s_scm_thread_cleanup
1093 {
1094 scm_i_thread *t;
1095 SCM ret;
1096
1097 SCM_VALIDATE_THREAD (1, thread);
1098
1099 t = SCM_I_THREAD_DATA (thread);
1100 scm_i_pthread_mutex_lock (&t->admin_mutex);
1101 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1102 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1103
1104 return ret;
1105 }
1106 #undef FUNC_NAME
1107
1108 SCM scm_join_thread (SCM thread)
1109 {
1110 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1111 }
1112
1113 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1114 (SCM thread, SCM timeout, SCM timeoutval),
1115 "Suspend execution of the calling thread until the target @var{thread} "
1116 "terminates, unless the target @var{thread} has already terminated. ")
1117 #define FUNC_NAME s_scm_join_thread_timed
1118 {
1119 scm_i_thread *t;
1120 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1121 SCM res = SCM_BOOL_F;
1122
1123 if (! (SCM_UNBNDP (timeoutval)))
1124 res = timeoutval;
1125
1126 SCM_VALIDATE_THREAD (1, thread);
1127 if (scm_is_eq (scm_current_thread (), thread))
1128 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1129
1130 t = SCM_I_THREAD_DATA (thread);
1131 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1132
1133 if (! SCM_UNBNDP (timeout))
1134 {
1135 to_timespec (timeout, &ctimeout);
1136 timeout_ptr = &ctimeout;
1137 }
1138
1139 if (t->exited)
1140 res = t->result;
1141 else
1142 {
1143 while (1)
1144 {
1145 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1146 timeout_ptr);
1147 if (err == 0)
1148 {
1149 if (t->exited)
1150 {
1151 res = t->result;
1152 break;
1153 }
1154 }
1155 else if (err == ETIMEDOUT)
1156 break;
1157
1158 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1159 SCM_TICK;
1160 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1161
1162 /* Check for exit again, since we just released and
1163 reacquired the admin mutex, before the next block_self
1164 call (which would block forever if t has already
1165 exited). */
1166 if (t->exited)
1167 {
1168 res = t->result;
1169 break;
1170 }
1171 }
1172 }
1173
1174 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1175
1176 return res;
1177 }
1178 #undef FUNC_NAME
1179
1180 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1181 (SCM obj),
1182 "Return @code{#t} if @var{obj} is a thread.")
1183 #define FUNC_NAME s_scm_thread_p
1184 {
1185 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1186 }
1187 #undef FUNC_NAME
1188
1189
1190 static int
1191 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1192 {
1193 fat_mutex *m = SCM_MUTEX_DATA (mx);
1194 scm_puts_unlocked ("#<mutex ", port);
1195 scm_uintprint ((scm_t_bits)m, 16, port);
1196 scm_puts_unlocked (">", port);
1197 return 1;
1198 }
1199
1200 static SCM
1201 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1202 {
1203 fat_mutex *m;
1204 SCM mx;
1205 scm_i_pthread_mutex_t lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
1206
1207 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1208 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1209 and so we can just copy it. */
1210 memcpy (&m->lock, &lock, sizeof (m->lock));
1211 m->owner = SCM_BOOL_F;
1212 m->level = 0;
1213
1214 m->recursive = recursive;
1215 m->unchecked_unlock = unchecked_unlock;
1216 m->allow_external_unlock = external_unlock;
1217
1218 m->waiting = SCM_EOL;
1219 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1220 m->waiting = make_queue ();
1221 return mx;
1222 }
1223
1224 SCM scm_make_mutex (void)
1225 {
1226 return scm_make_mutex_with_flags (SCM_EOL);
1227 }
1228
1229 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1230 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1231 SCM_SYMBOL (recursive_sym, "recursive");
1232
1233 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1234 (SCM flags),
1235 "Create a new mutex. ")
1236 #define FUNC_NAME s_scm_make_mutex_with_flags
1237 {
1238 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1239
1240 SCM ptr = flags;
1241 while (! scm_is_null (ptr))
1242 {
1243 SCM flag = SCM_CAR (ptr);
1244 if (scm_is_eq (flag, unchecked_unlock_sym))
1245 unchecked_unlock = 1;
1246 else if (scm_is_eq (flag, allow_external_unlock_sym))
1247 external_unlock = 1;
1248 else if (scm_is_eq (flag, recursive_sym))
1249 recursive = 1;
1250 else
1251 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1252 ptr = SCM_CDR (ptr);
1253 }
1254 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1255 }
1256 #undef FUNC_NAME
1257
1258 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1259 (void),
1260 "Create a new recursive mutex. ")
1261 #define FUNC_NAME s_scm_make_recursive_mutex
1262 {
1263 return make_fat_mutex (1, 0, 0);
1264 }
1265 #undef FUNC_NAME
1266
1267 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1268
1269 static SCM
1270 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1271 {
1272 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1273
1274 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1275 SCM err = SCM_BOOL_F;
1276
1277 struct timeval current_time;
1278
1279 scm_i_scm_pthread_mutex_lock (&m->lock);
1280
1281 while (1)
1282 {
1283 if (m->level == 0)
1284 {
1285 m->owner = new_owner;
1286 m->level++;
1287
1288 if (SCM_I_IS_THREAD (new_owner))
1289 {
1290 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1291
1292 /* FIXME: The order in which `t->admin_mutex' and
1293 `m->lock' are taken differs from that in
1294 `on_thread_exit', potentially leading to deadlocks. */
1295 scm_i_pthread_mutex_lock (&t->admin_mutex);
1296
1297 /* Only keep a weak reference to MUTEX so that it's not
1298 retained when not referenced elsewhere (bug #27450).
1299 The weak pair itself is eventually removed when MUTEX
1300 is unlocked. Note that `t->mutexes' lists mutexes
1301 currently held by T, so it should be small. */
1302 t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
1303 t->mutexes);
1304
1305 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1306 }
1307 *ret = 1;
1308 break;
1309 }
1310 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1311 {
1312 m->owner = new_owner;
1313 err = scm_cons (scm_abandoned_mutex_error_key,
1314 scm_from_locale_string ("lock obtained on abandoned "
1315 "mutex"));
1316 *ret = 1;
1317 break;
1318 }
1319 else if (scm_is_eq (m->owner, new_owner))
1320 {
1321 if (m->recursive)
1322 {
1323 m->level++;
1324 *ret = 1;
1325 }
1326 else
1327 {
1328 err = scm_cons (scm_misc_error_key,
1329 scm_from_locale_string ("mutex already locked "
1330 "by thread"));
1331 *ret = 0;
1332 }
1333 break;
1334 }
1335 else
1336 {
1337 if (timeout != NULL)
1338 {
1339 gettimeofday (&current_time, NULL);
1340 if (current_time.tv_sec > timeout->tv_sec ||
1341 (current_time.tv_sec == timeout->tv_sec &&
1342 current_time.tv_usec * 1000 > timeout->tv_nsec))
1343 {
1344 *ret = 0;
1345 break;
1346 }
1347 }
1348 block_self (m->waiting, mutex, &m->lock, timeout);
1349 scm_i_pthread_mutex_unlock (&m->lock);
1350 SCM_TICK;
1351 scm_i_scm_pthread_mutex_lock (&m->lock);
1352 }
1353 }
1354 scm_i_pthread_mutex_unlock (&m->lock);
1355 return err;
1356 }
1357
1358 SCM scm_lock_mutex (SCM mx)
1359 {
1360 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1361 }
1362
1363 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1364 (SCM m, SCM timeout, SCM owner),
1365 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1366 "thread blocks until the mutex becomes available. The function\n"
1367 "returns when the calling thread owns the lock on @var{m}.\n"
1368 "Locking a mutex that a thread already owns will succeed right\n"
1369 "away and will not block the thread. That is, Guile's mutexes\n"
1370 "are @emph{recursive}.")
1371 #define FUNC_NAME s_scm_lock_mutex_timed
1372 {
1373 SCM exception;
1374 int ret = 0;
1375 scm_t_timespec cwaittime, *waittime = NULL;
1376
1377 SCM_VALIDATE_MUTEX (1, m);
1378
1379 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1380 {
1381 to_timespec (timeout, &cwaittime);
1382 waittime = &cwaittime;
1383 }
1384
1385 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1386 SCM_VALIDATE_THREAD (3, owner);
1387
1388 exception = fat_mutex_lock (m, waittime, owner, &ret);
1389 if (!scm_is_false (exception))
1390 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1391 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1392 }
1393 #undef FUNC_NAME
1394
1395 static void
1396 lock_mutex_return_void (SCM mx)
1397 {
1398 (void) scm_lock_mutex (mx);
1399 }
1400
1401 static void
1402 unlock_mutex_return_void (SCM mx)
1403 {
1404 (void) scm_unlock_mutex (mx);
1405 }
1406
1407 void
1408 scm_dynwind_lock_mutex (SCM mutex)
1409 {
1410 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1411 SCM_F_WIND_EXPLICITLY);
1412 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1413 SCM_F_WIND_EXPLICITLY);
1414 }
1415
1416 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1417 (SCM mutex),
1418 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1419 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1420 #define FUNC_NAME s_scm_try_mutex
1421 {
1422 SCM exception;
1423 int ret = 0;
1424 scm_t_timespec cwaittime, *waittime = NULL;
1425
1426 SCM_VALIDATE_MUTEX (1, mutex);
1427
1428 to_timespec (scm_from_int(0), &cwaittime);
1429 waittime = &cwaittime;
1430
1431 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1432 if (!scm_is_false (exception))
1433 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1434 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1435 }
1436 #undef FUNC_NAME
1437
1438 /*** Fat condition variables */
1439
1440 typedef struct {
1441 scm_i_pthread_mutex_t lock;
1442 SCM waiting; /* the threads waiting for this condition. */
1443 } fat_cond;
1444
1445 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1446 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1447
1448 static void
1449 remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
1450 {
1451 SCM walk, prev;
1452
1453 for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
1454 walk = SCM_CDR (walk))
1455 {
1456 if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
1457 {
1458 if (scm_is_pair (prev))
1459 SCM_SETCDR (prev, SCM_CDR (walk));
1460 else
1461 t->mutexes = SCM_CDR (walk);
1462 break;
1463 }
1464 }
1465 }
1466
1467 static int
1468 fat_mutex_unlock (SCM mutex, SCM cond,
1469 const scm_t_timespec *waittime, int relock)
1470 {
1471 SCM owner;
1472 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1473 fat_cond *c = NULL;
1474 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1475 int err = 0, ret = 0;
1476
1477 scm_i_scm_pthread_mutex_lock (&m->lock);
1478
1479 owner = m->owner;
1480
1481 if (!scm_is_eq (owner, t->handle))
1482 {
1483 if (m->level == 0)
1484 {
1485 if (!m->unchecked_unlock)
1486 {
1487 scm_i_pthread_mutex_unlock (&m->lock);
1488 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1489 }
1490 owner = t->handle;
1491 }
1492 else if (!m->allow_external_unlock)
1493 {
1494 scm_i_pthread_mutex_unlock (&m->lock);
1495 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1496 }
1497 }
1498
1499 if (! (SCM_UNBNDP (cond)))
1500 {
1501 c = SCM_CONDVAR_DATA (cond);
1502 while (1)
1503 {
1504 int brk = 0;
1505
1506 if (m->level > 0)
1507 m->level--;
1508 if (m->level == 0)
1509 {
1510 /* Change the owner of MUTEX. */
1511 remove_mutex_from_thread (mutex, t);
1512 m->owner = unblock_from_queue (m->waiting);
1513 }
1514
1515 t->block_asyncs++;
1516
1517 err = block_self (c->waiting, cond, &m->lock, waittime);
1518 scm_i_pthread_mutex_unlock (&m->lock);
1519
1520 if (err == 0)
1521 {
1522 ret = 1;
1523 brk = 1;
1524 }
1525 else if (err == ETIMEDOUT)
1526 {
1527 ret = 0;
1528 brk = 1;
1529 }
1530 else if (err != EINTR)
1531 {
1532 errno = err;
1533 scm_syserror (NULL);
1534 }
1535
1536 if (brk)
1537 {
1538 if (relock)
1539 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1540 t->block_asyncs--;
1541 break;
1542 }
1543
1544 t->block_asyncs--;
1545 scm_async_tick ();
1546
1547 scm_remember_upto_here_2 (cond, mutex);
1548
1549 scm_i_scm_pthread_mutex_lock (&m->lock);
1550 }
1551 }
1552 else
1553 {
1554 if (m->level > 0)
1555 m->level--;
1556 if (m->level == 0)
1557 {
1558 /* Change the owner of MUTEX. */
1559 remove_mutex_from_thread (mutex, t);
1560 m->owner = unblock_from_queue (m->waiting);
1561 }
1562
1563 scm_i_pthread_mutex_unlock (&m->lock);
1564 ret = 1;
1565 }
1566
1567 return ret;
1568 }
1569
1570 SCM scm_unlock_mutex (SCM mx)
1571 {
1572 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1573 }
1574
1575 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1576 (SCM mx, SCM cond, SCM timeout),
1577 "Unlocks @var{mutex} if the calling thread owns the lock on "
1578 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1579 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1580 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1581 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1582 "with a call to @code{unlock-mutex}. Only the last call to "
1583 "@code{unlock-mutex} will actually unlock the mutex. ")
1584 #define FUNC_NAME s_scm_unlock_mutex_timed
1585 {
1586 scm_t_timespec cwaittime, *waittime = NULL;
1587
1588 SCM_VALIDATE_MUTEX (1, mx);
1589 if (! (SCM_UNBNDP (cond)))
1590 {
1591 SCM_VALIDATE_CONDVAR (2, cond);
1592
1593 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1594 {
1595 to_timespec (timeout, &cwaittime);
1596 waittime = &cwaittime;
1597 }
1598 }
1599
1600 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1601 }
1602 #undef FUNC_NAME
1603
1604 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1605 (SCM obj),
1606 "Return @code{#t} if @var{obj} is a mutex.")
1607 #define FUNC_NAME s_scm_mutex_p
1608 {
1609 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1610 }
1611 #undef FUNC_NAME
1612
1613 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1614 (SCM mx),
1615 "Return the thread owning @var{mx}, or @code{#f}.")
1616 #define FUNC_NAME s_scm_mutex_owner
1617 {
1618 SCM owner;
1619 fat_mutex *m = NULL;
1620
1621 SCM_VALIDATE_MUTEX (1, mx);
1622 m = SCM_MUTEX_DATA (mx);
1623 scm_i_pthread_mutex_lock (&m->lock);
1624 owner = m->owner;
1625 scm_i_pthread_mutex_unlock (&m->lock);
1626
1627 return owner;
1628 }
1629 #undef FUNC_NAME
1630
1631 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1632 (SCM mx),
1633 "Return the lock level of mutex @var{mx}.")
1634 #define FUNC_NAME s_scm_mutex_level
1635 {
1636 SCM_VALIDATE_MUTEX (1, mx);
1637 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1638 }
1639 #undef FUNC_NAME
1640
1641 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1642 (SCM mx),
1643 "Returns @code{#t} if the mutex @var{mx} is locked.")
1644 #define FUNC_NAME s_scm_mutex_locked_p
1645 {
1646 SCM_VALIDATE_MUTEX (1, mx);
1647 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1648 }
1649 #undef FUNC_NAME
1650
1651 static int
1652 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1653 {
1654 fat_cond *c = SCM_CONDVAR_DATA (cv);
1655 scm_puts_unlocked ("#<condition-variable ", port);
1656 scm_uintprint ((scm_t_bits)c, 16, port);
1657 scm_puts_unlocked (">", port);
1658 return 1;
1659 }
1660
1661 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1662 (void),
1663 "Make a new condition variable.")
1664 #define FUNC_NAME s_scm_make_condition_variable
1665 {
1666 fat_cond *c;
1667 SCM cv;
1668
1669 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1670 c->waiting = SCM_EOL;
1671 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1672 c->waiting = make_queue ();
1673 return cv;
1674 }
1675 #undef FUNC_NAME
1676
1677 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1678 (SCM cv, SCM mx, SCM t),
1679 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1680 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1681 "is locked again when this function returns. When @var{t} is given, "
1682 "it specifies a point in time where the waiting should be aborted. It "
1683 "can be either a integer as returned by @code{current-time} or a pair "
1684 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1685 "mutex is locked and @code{#f} is returned. When the condition "
1686 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1687 "is returned. ")
1688 #define FUNC_NAME s_scm_timed_wait_condition_variable
1689 {
1690 scm_t_timespec waittime, *waitptr = NULL;
1691
1692 SCM_VALIDATE_CONDVAR (1, cv);
1693 SCM_VALIDATE_MUTEX (2, mx);
1694
1695 if (!SCM_UNBNDP (t))
1696 {
1697 to_timespec (t, &waittime);
1698 waitptr = &waittime;
1699 }
1700
1701 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1702 }
1703 #undef FUNC_NAME
1704
1705 static void
1706 fat_cond_signal (fat_cond *c)
1707 {
1708 unblock_from_queue (c->waiting);
1709 }
1710
1711 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1712 (SCM cv),
1713 "Wake up one thread that is waiting for @var{cv}")
1714 #define FUNC_NAME s_scm_signal_condition_variable
1715 {
1716 SCM_VALIDATE_CONDVAR (1, cv);
1717 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1718 return SCM_BOOL_T;
1719 }
1720 #undef FUNC_NAME
1721
1722 static void
1723 fat_cond_broadcast (fat_cond *c)
1724 {
1725 while (scm_is_true (unblock_from_queue (c->waiting)))
1726 ;
1727 }
1728
1729 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1730 (SCM cv),
1731 "Wake up all threads that are waiting for @var{cv}. ")
1732 #define FUNC_NAME s_scm_broadcast_condition_variable
1733 {
1734 SCM_VALIDATE_CONDVAR (1, cv);
1735 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1736 return SCM_BOOL_T;
1737 }
1738 #undef FUNC_NAME
1739
1740 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1741 (SCM obj),
1742 "Return @code{#t} if @var{obj} is a condition variable.")
1743 #define FUNC_NAME s_scm_condition_variable_p
1744 {
1745 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1746 }
1747 #undef FUNC_NAME
1748
1749
1750 \f
1751 /*** Select */
1752
1753 struct select_args
1754 {
1755 int nfds;
1756 fd_set *read_fds;
1757 fd_set *write_fds;
1758 fd_set *except_fds;
1759 struct timeval *timeout;
1760
1761 int result;
1762 int errno_value;
1763 };
1764
1765 static void *
1766 do_std_select (void *args)
1767 {
1768 struct select_args *select_args;
1769
1770 select_args = (struct select_args *) args;
1771
1772 select_args->result =
1773 select (select_args->nfds,
1774 select_args->read_fds, select_args->write_fds,
1775 select_args->except_fds, select_args->timeout);
1776 select_args->errno_value = errno;
1777
1778 return NULL;
1779 }
1780
1781 #if !SCM_HAVE_SYS_SELECT_H
1782 static int scm_std_select (int nfds,
1783 fd_set *readfds,
1784 fd_set *writefds,
1785 fd_set *exceptfds,
1786 struct timeval *timeout);
1787 #endif
1788
1789 int
1790 scm_std_select (int nfds,
1791 fd_set *readfds,
1792 fd_set *writefds,
1793 fd_set *exceptfds,
1794 struct timeval *timeout)
1795 {
1796 fd_set my_readfds;
1797 int res, eno, wakeup_fd;
1798 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1799 struct select_args args;
1800
1801 if (readfds == NULL)
1802 {
1803 FD_ZERO (&my_readfds);
1804 readfds = &my_readfds;
1805 }
1806
1807 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1808 SCM_TICK;
1809
1810 wakeup_fd = t->sleep_pipe[0];
1811 FD_SET (wakeup_fd, readfds);
1812 if (wakeup_fd >= nfds)
1813 nfds = wakeup_fd+1;
1814
1815 args.nfds = nfds;
1816 args.read_fds = readfds;
1817 args.write_fds = writefds;
1818 args.except_fds = exceptfds;
1819 args.timeout = timeout;
1820
1821 /* Explicitly cooperate with the GC. */
1822 scm_without_guile (do_std_select, &args);
1823
1824 res = args.result;
1825 eno = args.errno_value;
1826
1827 t->sleep_fd = -1;
1828 scm_i_reset_sleep (t);
1829
1830 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1831 {
1832 char dummy;
1833 full_read (wakeup_fd, &dummy, 1);
1834
1835 FD_CLR (wakeup_fd, readfds);
1836 res -= 1;
1837 if (res == 0)
1838 {
1839 eno = EINTR;
1840 res = -1;
1841 }
1842 }
1843 errno = eno;
1844 return res;
1845 }
1846
1847 /* Convenience API for blocking while in guile mode. */
1848
1849 #if SCM_USE_PTHREAD_THREADS
1850
1851 /* It seems reasonable to not run procedures related to mutex and condition
1852 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1853 without it, and (ii) the only potential gain would be GC latency. See
1854 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1855 for a discussion of the pros and cons. */
1856
1857 int
1858 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1859 {
1860 int res = scm_i_pthread_mutex_lock (mutex);
1861 return res;
1862 }
1863
1864 static void
1865 do_unlock (void *data)
1866 {
1867 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1868 }
1869
1870 void
1871 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1872 {
1873 scm_i_scm_pthread_mutex_lock (mutex);
1874 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1875 }
1876
1877 int
1878 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1879 {
1880 int res;
1881 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1882
1883 t->held_mutex = mutex;
1884 res = scm_i_pthread_cond_wait (cond, mutex);
1885 t->held_mutex = NULL;
1886
1887 return res;
1888 }
1889
1890 int
1891 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1892 scm_i_pthread_mutex_t *mutex,
1893 const scm_t_timespec *wt)
1894 {
1895 int res;
1896 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1897
1898 t->held_mutex = mutex;
1899 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1900 t->held_mutex = NULL;
1901
1902 return res;
1903 }
1904
1905 #endif
1906
1907 static void
1908 do_unlock_with_asyncs (void *data)
1909 {
1910 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1911 SCM_I_CURRENT_THREAD->block_asyncs--;
1912 }
1913
1914 void
1915 scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t *mutex)
1916 {
1917 SCM_I_CURRENT_THREAD->block_asyncs++;
1918 scm_i_scm_pthread_mutex_lock (mutex);
1919 scm_dynwind_unwind_handler (do_unlock_with_asyncs, mutex,
1920 SCM_F_WIND_EXPLICITLY);
1921 }
1922
1923 unsigned long
1924 scm_std_usleep (unsigned long usecs)
1925 {
1926 struct timeval tv;
1927 tv.tv_usec = usecs % 1000000;
1928 tv.tv_sec = usecs / 1000000;
1929 scm_std_select (0, NULL, NULL, NULL, &tv);
1930 return tv.tv_sec * 1000000 + tv.tv_usec;
1931 }
1932
1933 unsigned int
1934 scm_std_sleep (unsigned int secs)
1935 {
1936 struct timeval tv;
1937 tv.tv_usec = 0;
1938 tv.tv_sec = secs;
1939 scm_std_select (0, NULL, NULL, NULL, &tv);
1940 return tv.tv_sec;
1941 }
1942
1943 /*** Misc */
1944
1945 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1946 (void),
1947 "Return the thread that called this function.")
1948 #define FUNC_NAME s_scm_current_thread
1949 {
1950 return SCM_I_CURRENT_THREAD->handle;
1951 }
1952 #undef FUNC_NAME
1953
1954 static SCM
1955 scm_c_make_list (size_t n, SCM fill)
1956 {
1957 SCM res = SCM_EOL;
1958 while (n-- > 0)
1959 res = scm_cons (fill, res);
1960 return res;
1961 }
1962
1963 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1964 (void),
1965 "Return a list of all threads.")
1966 #define FUNC_NAME s_scm_all_threads
1967 {
1968 /* We can not allocate while holding the thread_admin_mutex because
1969 of the way GC is done.
1970 */
1971 int n = thread_count;
1972 scm_i_thread *t;
1973 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1974
1975 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1976 l = &list;
1977 for (t = all_threads; t && n > 0; t = t->next_thread)
1978 {
1979 if (t != scm_i_signal_delivery_thread)
1980 {
1981 SCM_SETCAR (*l, t->handle);
1982 l = SCM_CDRLOC (*l);
1983 }
1984 n--;
1985 }
1986 *l = SCM_EOL;
1987 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1988 return list;
1989 }
1990 #undef FUNC_NAME
1991
1992 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1993 (SCM thread),
1994 "Return @code{#t} iff @var{thread} has exited.\n")
1995 #define FUNC_NAME s_scm_thread_exited_p
1996 {
1997 return scm_from_bool (scm_c_thread_exited_p (thread));
1998 }
1999 #undef FUNC_NAME
2000
2001 int
2002 scm_c_thread_exited_p (SCM thread)
2003 #define FUNC_NAME s_scm_thread_exited_p
2004 {
2005 scm_i_thread *t;
2006 SCM_VALIDATE_THREAD (1, thread);
2007 t = SCM_I_THREAD_DATA (thread);
2008 return t->exited;
2009 }
2010 #undef FUNC_NAME
2011
2012 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2013 (void),
2014 "Return the total number of processors of the machine, which\n"
2015 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2016 "thread execution unit, which can be either:\n\n"
2017 "@itemize\n"
2018 "@item an execution core in a (possibly multi-core) chip, in a\n"
2019 " (possibly multi- chip) module, in a single computer, or\n"
2020 "@item a thread execution unit inside a core in the case of\n"
2021 " @dfn{hyper-threaded} CPUs.\n"
2022 "@end itemize\n\n"
2023 "Which of the two definitions is used, is unspecified.\n")
2024 #define FUNC_NAME s_scm_total_processor_count
2025 {
2026 return scm_from_ulong (num_processors (NPROC_ALL));
2027 }
2028 #undef FUNC_NAME
2029
2030 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2031 (void),
2032 "Like @code{total-processor-count}, but return the number of\n"
2033 "processors available to the current process. See\n"
2034 "@code{setaffinity} and @code{getaffinity} for more\n"
2035 "information.\n")
2036 #define FUNC_NAME s_scm_current_processor_count
2037 {
2038 return scm_from_ulong (num_processors (NPROC_CURRENT));
2039 }
2040 #undef FUNC_NAME
2041
2042
2043 \f
2044
2045 static scm_i_pthread_cond_t wake_up_cond;
2046 static int threads_initialized_p = 0;
2047
2048
2049 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2050 */
2051 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2052
2053 static SCM dynwind_critical_section_mutex;
2054
2055 void
2056 scm_dynwind_critical_section (SCM mutex)
2057 {
2058 if (scm_is_false (mutex))
2059 mutex = dynwind_critical_section_mutex;
2060 scm_dynwind_lock_mutex (mutex);
2061 scm_dynwind_block_asyncs ();
2062 }
2063
2064 /*** Initialization */
2065
2066 scm_i_pthread_mutex_t scm_i_misc_mutex;
2067
2068 #if SCM_USE_PTHREAD_THREADS
2069 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2070 #endif
2071
2072 void
2073 scm_threads_prehistory (void *base)
2074 {
2075 #if SCM_USE_PTHREAD_THREADS
2076 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2077 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2078 PTHREAD_MUTEX_RECURSIVE);
2079 #endif
2080
2081 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2082 scm_i_pthread_mutexattr_recursive);
2083 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2084 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2085
2086 thread_gc_kind =
2087 GC_new_kind (GC_new_free_list (),
2088 GC_MAKE_PROC (GC_new_proc (thread_mark), 0),
2089 0, 1);
2090
2091 guilify_self_1 ((struct GC_stack_base *) base);
2092 }
2093
2094 scm_t_bits scm_tc16_thread;
2095 scm_t_bits scm_tc16_mutex;
2096 scm_t_bits scm_tc16_condvar;
2097
2098 void
2099 scm_init_threads ()
2100 {
2101 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2102 scm_set_smob_print (scm_tc16_thread, thread_print);
2103
2104 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2105 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2106
2107 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2108 sizeof (fat_cond));
2109 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2110
2111 scm_i_default_dynamic_state = SCM_BOOL_F;
2112 guilify_self_2 (SCM_BOOL_F);
2113 threads_initialized_p = 1;
2114
2115 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2116 }
2117
2118 void
2119 scm_init_threads_default_dynamic_state ()
2120 {
2121 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2122 scm_i_default_dynamic_state = state;
2123 }
2124
2125 void
2126 scm_init_thread_procs ()
2127 {
2128 #include "libguile/threads.x"
2129 }
2130
2131 \f
2132 /* IA64-specific things. */
2133
2134 #ifdef __ia64__
2135 # ifdef __hpux
2136 # include <sys/param.h>
2137 # include <sys/pstat.h>
2138 void *
2139 scm_ia64_register_backing_store_base (void)
2140 {
2141 struct pst_vm_status vm_status;
2142 int i = 0;
2143 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2144 if (vm_status.pst_type == PS_RSESTACK)
2145 return (void *) vm_status.pst_vaddr;
2146 abort ();
2147 }
2148 void *
2149 scm_ia64_ar_bsp (const void *ctx)
2150 {
2151 uint64_t bsp;
2152 __uc_get_ar_bsp (ctx, &bsp);
2153 return (void *) bsp;
2154 }
2155 # endif /* hpux */
2156 # ifdef linux
2157 # include <ucontext.h>
2158 void *
2159 scm_ia64_register_backing_store_base (void)
2160 {
2161 extern void *__libc_ia64_register_backing_store_base;
2162 return __libc_ia64_register_backing_store_base;
2163 }
2164 void *
2165 scm_ia64_ar_bsp (const void *opaque)
2166 {
2167 const ucontext_t *ctx = opaque;
2168 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2169 }
2170 # endif /* linux */
2171 # ifdef __FreeBSD__
2172 # include <ucontext.h>
2173 void *
2174 scm_ia64_register_backing_store_base (void)
2175 {
2176 return (void *)0x8000000000000000;
2177 }
2178 void *
2179 scm_ia64_ar_bsp (const void *opaque)
2180 {
2181 const ucontext_t *ctx = opaque;
2182 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2183 + ctx->uc_mcontext.mc_special.ndirty);
2184 }
2185 # endif /* __FreeBSD__ */
2186 #endif /* __ia64__ */
2187
2188
2189 /*
2190 Local Variables:
2191 c-file-style: "gnu"
2192 End:
2193 */