Add thread mark procedure
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
3 * Free Software Foundation, Inc.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 */
20
21
22 \f
23 #ifdef HAVE_CONFIG_H
24 # include <config.h>
25 #endif
26
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
29 #include "libguile/_scm.h"
30
31 #include <stdlib.h>
32 #if HAVE_UNISTD_H
33 #include <unistd.h>
34 #endif
35 #include <stdio.h>
36
37 #ifdef HAVE_STRING_H
38 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
39 #endif
40
41 #if HAVE_SYS_TIME_H
42 #include <sys/time.h>
43 #endif
44
45 #if HAVE_PTHREAD_NP_H
46 # include <pthread_np.h>
47 #endif
48
49 #include <sys/select.h>
50
51 #include <assert.h>
52 #include <fcntl.h>
53 #include <nproc.h>
54
55 #include "libguile/validate.h"
56 #include "libguile/root.h"
57 #include "libguile/eval.h"
58 #include "libguile/async.h"
59 #include "libguile/ports.h"
60 #include "libguile/threads.h"
61 #include "libguile/dynwind.h"
62 #include "libguile/iselect.h"
63 #include "libguile/fluids.h"
64 #include "libguile/continuations.h"
65 #include "libguile/gc.h"
66 #include "libguile/init.h"
67 #include "libguile/scmsigs.h"
68 #include "libguile/strings.h"
69
70 #include <full-read.h>
71
72
73 \f
74
75 /* The GC "kind" for threads that allow them to mark their VM
76 stacks. */
77 static int thread_gc_kind;
78
79 static struct GC_ms_entry *
80 thread_mark (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
81 struct GC_ms_entry *mark_stack_limit, GC_word env)
82 {
83 int word;
84 const struct scm_i_thread *t = (struct scm_i_thread *) addr;
85
86 if (SCM_UNPACK (t->handle) == 0)
87 /* T must be on the free-list; ignore. (See warning in
88 gc_mark.h.) */
89 return mark_stack_ptr;
90
91 /* Mark T. We could be more precise, but it doesn't matte. */
92 for (word = 0; word * sizeof (*addr) < sizeof (*t); word++)
93 mark_stack_ptr = GC_MARK_AND_PUSH ((void *) addr[word],
94 mark_stack_ptr, mark_stack_limit,
95 NULL);
96
97 return mark_stack_ptr;
98 }
99
100
101 \f
102 static void
103 to_timespec (SCM t, scm_t_timespec *waittime)
104 {
105 if (scm_is_pair (t))
106 {
107 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
108 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
109 }
110 else
111 {
112 double time = scm_to_double (t);
113 double sec = scm_c_truncate (time);
114
115 waittime->tv_sec = (long) sec;
116 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
117 }
118 }
119
120
121 \f
122 /*** Queues */
123
124 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
125 the risk of false references leading to unbounded retained space as
126 described in "Bounding Space Usage of Conservative Garbage Collectors",
127 H.J. Boehm, 2001. */
128
129 /* Make an empty queue data structure.
130 */
131 static SCM
132 make_queue ()
133 {
134 return scm_cons (SCM_EOL, SCM_EOL);
135 }
136
137 /* Put T at the back of Q and return a handle that can be used with
138 remqueue to remove T from Q again.
139 */
140 static SCM
141 enqueue (SCM q, SCM t)
142 {
143 SCM c = scm_cons (t, SCM_EOL);
144 SCM_CRITICAL_SECTION_START;
145 if (scm_is_null (SCM_CDR (q)))
146 SCM_SETCDR (q, c);
147 else
148 SCM_SETCDR (SCM_CAR (q), c);
149 SCM_SETCAR (q, c);
150 SCM_CRITICAL_SECTION_END;
151 return c;
152 }
153
154 /* Remove the element that the handle C refers to from the queue Q. C
155 must have been returned from a call to enqueue. The return value
156 is zero when the element referred to by C has already been removed.
157 Otherwise, 1 is returned.
158 */
159 static int
160 remqueue (SCM q, SCM c)
161 {
162 SCM p, prev = q;
163 SCM_CRITICAL_SECTION_START;
164 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
165 {
166 if (scm_is_eq (p, c))
167 {
168 if (scm_is_eq (c, SCM_CAR (q)))
169 SCM_SETCAR (q, scm_is_eq (prev, q) ? SCM_EOL : prev);
170 SCM_SETCDR (prev, SCM_CDR (c));
171
172 /* GC-robust */
173 SCM_SETCDR (c, SCM_EOL);
174
175 SCM_CRITICAL_SECTION_END;
176 return 1;
177 }
178 prev = p;
179 }
180 SCM_CRITICAL_SECTION_END;
181 return 0;
182 }
183
184 /* Remove the front-most element from the queue Q and return it.
185 Return SCM_BOOL_F when Q is empty.
186 */
187 static SCM
188 dequeue (SCM q)
189 {
190 SCM c;
191 SCM_CRITICAL_SECTION_START;
192 c = SCM_CDR (q);
193 if (scm_is_null (c))
194 {
195 SCM_CRITICAL_SECTION_END;
196 return SCM_BOOL_F;
197 }
198 else
199 {
200 SCM_SETCDR (q, SCM_CDR (c));
201 if (scm_is_null (SCM_CDR (q)))
202 SCM_SETCAR (q, SCM_EOL);
203 SCM_CRITICAL_SECTION_END;
204
205 /* GC-robust */
206 SCM_SETCDR (c, SCM_EOL);
207
208 return SCM_CAR (c);
209 }
210 }
211
212 /*** Thread smob routines */
213
214
215 static int
216 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
217 {
218 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
219 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
220 the struct case, hence we go via a union, and extract according to the
221 size of pthread_t. */
222 union {
223 scm_i_pthread_t p;
224 unsigned short us;
225 unsigned int ui;
226 unsigned long ul;
227 scm_t_uintmax um;
228 } u;
229 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
230 scm_i_pthread_t p = t->pthread;
231 scm_t_uintmax id;
232 u.p = p;
233 if (sizeof (p) == sizeof (unsigned short))
234 id = u.us;
235 else if (sizeof (p) == sizeof (unsigned int))
236 id = u.ui;
237 else if (sizeof (p) == sizeof (unsigned long))
238 id = u.ul;
239 else
240 id = u.um;
241
242 scm_puts_unlocked ("#<thread ", port);
243 scm_uintprint (id, 10, port);
244 scm_puts_unlocked (" (", port);
245 scm_uintprint ((scm_t_bits)t, 16, port);
246 scm_puts_unlocked (")>", port);
247 return 1;
248 }
249
250 \f
251 /*** Blocking on queues. */
252
253 /* See also scm_i_queue_async_cell for how such a block is
254 interrputed.
255 */
256
257 /* Put the current thread on QUEUE and go to sleep, waiting for it to
258 be woken up by a call to 'unblock_from_queue', or to be
259 interrupted. Upon return of this function, the current thread is
260 no longer on QUEUE, even when the sleep has been interrupted.
261
262 The caller of block_self must hold MUTEX. It will be atomically
263 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
264
265 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
266 as MUTEX is needed.
267
268 When WAITTIME is not NULL, the sleep will be aborted at that time.
269
270 The return value of block_self is an errno value. It will be zero
271 when the sleep has been successfully completed by a call to
272 unblock_from_queue, EINTR when it has been interrupted by the
273 delivery of a system async, and ETIMEDOUT when the timeout has
274 expired.
275
276 The system asyncs themselves are not executed by block_self.
277 */
278 static int
279 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
280 const scm_t_timespec *waittime)
281 {
282 scm_i_thread *t = SCM_I_CURRENT_THREAD;
283 SCM q_handle;
284 int err;
285
286 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
287 err = EINTR;
288 else
289 {
290 t->block_asyncs++;
291 q_handle = enqueue (queue, t->handle);
292 if (waittime == NULL)
293 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
294 else
295 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
296
297 /* When we are still on QUEUE, we have been interrupted. We
298 report this only when no other error (such as a timeout) has
299 happened above.
300 */
301 if (remqueue (queue, q_handle) && err == 0)
302 err = EINTR;
303 t->block_asyncs--;
304 scm_i_reset_sleep (t);
305 }
306
307 return err;
308 }
309
310 /* Wake up the first thread on QUEUE, if any. The awoken thread is
311 returned, or #f if the queue was empty.
312 */
313 static SCM
314 unblock_from_queue (SCM queue)
315 {
316 SCM thread = dequeue (queue);
317 if (scm_is_true (thread))
318 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
319 return thread;
320 }
321
322 \f
323 /* Getting into and out of guile mode.
324 */
325
326 /* Key used to attach a cleanup handler to a given thread. Also, if
327 thread-local storage is unavailable, this key is used to retrieve the
328 current thread with `pthread_getspecific ()'. */
329 scm_i_pthread_key_t scm_i_thread_key;
330
331
332 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
333
334 /* When thread-local storage (TLS) is available, a pointer to the
335 current-thread object is kept in TLS. Note that storing the thread-object
336 itself in TLS (rather than a pointer to some malloc'd memory) is not
337 possible since thread objects may live longer than the actual thread they
338 represent. */
339 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
340
341 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
342
343
344 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
345 static scm_i_thread *all_threads = NULL;
346 static int thread_count;
347
348 static SCM scm_i_default_dynamic_state;
349
350 /* Run when a fluid is collected. */
351 void
352 scm_i_reset_fluid (size_t n)
353 {
354 scm_i_thread *t;
355
356 scm_i_pthread_mutex_lock (&thread_admin_mutex);
357 for (t = all_threads; t; t = t->next_thread)
358 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
359 {
360 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
361
362 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
363 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
364 }
365 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
366 }
367
368 /* Perform first stage of thread initialisation, in non-guile mode.
369 */
370 static void
371 guilify_self_1 (struct GC_stack_base *base)
372 {
373 scm_i_thread t;
374
375 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
376 before allocating anything in this thread, because allocation could
377 cause GC to run, and GC could cause finalizers, which could invoke
378 Scheme functions, which need the current thread to be set. */
379
380 t.pthread = scm_i_pthread_self ();
381 t.handle = SCM_BOOL_F;
382 t.result = SCM_BOOL_F;
383 t.cleanup_handler = SCM_BOOL_F;
384 t.mutexes = SCM_EOL;
385 t.held_mutex = NULL;
386 t.join_queue = SCM_EOL;
387 t.dynamic_state = SCM_BOOL_F;
388 t.dynstack.base = NULL;
389 t.dynstack.top = NULL;
390 t.dynstack.limit = NULL;
391 t.active_asyncs = SCM_EOL;
392 t.block_asyncs = 1;
393 t.pending_asyncs = 1;
394 t.critical_section_level = 0;
395 t.base = base->mem_base;
396 #ifdef __ia64__
397 t.register_backing_store_base = base->reg_base;
398 #endif
399 t.continuation_root = SCM_EOL;
400 t.continuation_base = t.base;
401 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
402 t.sleep_mutex = NULL;
403 t.sleep_object = SCM_BOOL_F;
404 t.sleep_fd = -1;
405 t.vp = NULL;
406
407 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
408 /* FIXME: Error conditions during the initialization phase are handled
409 gracelessly since public functions such as `scm_init_guile ()'
410 currently have type `void'. */
411 abort ();
412
413 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
414 t.canceled = 0;
415 t.exited = 0;
416 t.guile_mode = 0;
417
418 /* The switcheroo. */
419 {
420 scm_i_thread *t_ptr = &t;
421
422 GC_disable ();
423 t_ptr = GC_generic_malloc (sizeof (*t_ptr), thread_gc_kind);
424 memcpy (t_ptr, &t, sizeof t);
425
426 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
427
428 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
429 /* Cache the current thread in TLS for faster lookup. */
430 scm_i_current_thread = t_ptr;
431 #endif
432
433 scm_i_pthread_mutex_lock (&thread_admin_mutex);
434 t_ptr->next_thread = all_threads;
435 all_threads = t_ptr;
436 thread_count++;
437 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
438
439 GC_enable ();
440 }
441 }
442
443 /* Perform second stage of thread initialisation, in guile mode.
444 */
445 static void
446 guilify_self_2 (SCM parent)
447 {
448 scm_i_thread *t = SCM_I_CURRENT_THREAD;
449
450 t->guile_mode = 1;
451
452 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
453
454 t->continuation_root = scm_cons (t->handle, SCM_EOL);
455 t->continuation_base = t->base;
456
457 if (scm_is_true (parent))
458 t->dynamic_state = scm_make_dynamic_state (parent);
459 else
460 t->dynamic_state = scm_i_make_initial_dynamic_state ();
461
462 t->dynstack.base = scm_gc_malloc (16 * sizeof (scm_t_bits), "dynstack");
463 t->dynstack.limit = t->dynstack.base + 16;
464 t->dynstack.top = t->dynstack.base + SCM_DYNSTACK_HEADER_LEN;
465
466 t->join_queue = make_queue ();
467 t->block_asyncs = 0;
468
469 /* See note in finalizers.c:queue_finalizer_async(). */
470 GC_invoke_finalizers ();
471 }
472
473 \f
474 /*** Fat mutexes */
475
476 /* We implement our own mutex type since we want them to be 'fair', we
477 want to do fancy things while waiting for them (like running
478 asyncs) and we might want to add things that are nice for
479 debugging.
480 */
481
482 typedef struct {
483 scm_i_pthread_mutex_t lock;
484 SCM owner;
485 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
486
487 int recursive; /* allow recursive locking? */
488 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
489 int allow_external_unlock; /* is it an error to unlock a mutex that is not
490 owned by the current thread? */
491
492 SCM waiting; /* the threads waiting for this mutex. */
493 } fat_mutex;
494
495 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
496 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
497
498 static SCM
499 call_cleanup (void *data)
500 {
501 SCM *proc_p = data;
502 return scm_call_0 (*proc_p);
503 }
504
505 /* Perform thread tear-down, in guile mode.
506 */
507 static void *
508 do_thread_exit (void *v)
509 {
510 scm_i_thread *t = (scm_i_thread *) v;
511
512 if (!scm_is_false (t->cleanup_handler))
513 {
514 SCM ptr = t->cleanup_handler;
515
516 t->cleanup_handler = SCM_BOOL_F;
517 t->result = scm_internal_catch (SCM_BOOL_T,
518 call_cleanup, &ptr,
519 scm_handle_by_message_noexit, NULL);
520 }
521
522 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
523
524 t->exited = 1;
525 close (t->sleep_pipe[0]);
526 close (t->sleep_pipe[1]);
527 while (scm_is_true (unblock_from_queue (t->join_queue)))
528 ;
529
530 while (!scm_is_null (t->mutexes))
531 {
532 SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
533
534 if (scm_is_true (mutex))
535 {
536 fat_mutex *m = SCM_MUTEX_DATA (mutex);
537
538 scm_i_pthread_mutex_lock (&m->lock);
539
540 /* Check whether T owns MUTEX. This is usually the case, unless
541 T abandoned MUTEX; in that case, T is no longer its owner (see
542 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
543 if (scm_is_eq (m->owner, t->handle))
544 unblock_from_queue (m->waiting);
545
546 scm_i_pthread_mutex_unlock (&m->lock);
547 }
548
549 t->mutexes = scm_cdr (t->mutexes);
550 }
551
552 scm_i_pthread_mutex_unlock (&t->admin_mutex);
553
554 return NULL;
555 }
556
557 static void *
558 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
559 {
560 /* Won't hurt if we are already registered. */
561 #if SCM_USE_PTHREAD_THREADS
562 GC_register_my_thread (sb);
563 #endif
564
565 return scm_with_guile (do_thread_exit, v);
566 }
567
568 static void
569 on_thread_exit (void *v)
570 {
571 /* This handler is executed in non-guile mode. */
572 scm_i_thread *t = (scm_i_thread *) v, **tp;
573
574 /* If we were canceled, we were unable to clear `t->guile_mode', so do
575 it here. */
576 t->guile_mode = 0;
577
578 /* If this thread was cancelled while doing a cond wait, it will
579 still have a mutex locked, so we unlock it here. */
580 if (t->held_mutex)
581 {
582 scm_i_pthread_mutex_unlock (t->held_mutex);
583 t->held_mutex = NULL;
584 }
585
586 /* Reinstate the current thread for purposes of scm_with_guile
587 guile-mode cleanup handlers. Only really needed in the non-TLS
588 case but it doesn't hurt to be consistent. */
589 scm_i_pthread_setspecific (scm_i_thread_key, t);
590
591 /* Scheme-level thread finalizers and other cleanup needs to happen in
592 guile mode. */
593 GC_call_with_stack_base (do_thread_exit_trampoline, t);
594
595 /* Removing ourself from the list of all threads needs to happen in
596 non-guile mode since all SCM values on our stack become
597 unprotected once we are no longer in the list. */
598 scm_i_pthread_mutex_lock (&thread_admin_mutex);
599 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
600 if (*tp == t)
601 {
602 *tp = t->next_thread;
603
604 /* GC-robust */
605 t->next_thread = NULL;
606
607 break;
608 }
609 thread_count--;
610
611 /* If there's only one other thread, it could be the signal delivery
612 thread, so we need to notify it to shut down by closing its read pipe.
613 If it's not the signal delivery thread, then closing the read pipe isn't
614 going to hurt. */
615 if (thread_count <= 1)
616 scm_i_close_signal_pipe ();
617
618 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
619
620 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
621
622 #if SCM_USE_PTHREAD_THREADS
623 GC_unregister_my_thread ();
624 #endif
625 }
626
627 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
628
629 static void
630 init_thread_key (void)
631 {
632 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
633 }
634
635 /* Perform any initializations necessary to make the current thread
636 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
637 if necessary.
638
639 BASE is the stack base to use with GC.
640
641 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
642 which case the default dynamic state is used.
643
644 Returns zero when the thread was known to guile already; otherwise
645 return 1.
646
647 Note that it could be the case that the thread was known
648 to Guile, but not in guile mode (because we are within a
649 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
650 be sure. New threads are put into guile mode implicitly. */
651
652 static int
653 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
654 {
655 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
656
657 if (SCM_I_CURRENT_THREAD)
658 {
659 /* Thread is already known to Guile.
660 */
661 return 0;
662 }
663 else
664 {
665 /* This thread has not been guilified yet.
666 */
667
668 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
669 if (scm_initialized_p == 0)
670 {
671 /* First thread ever to enter Guile. Run the full
672 initialization.
673 */
674 scm_i_init_guile (base);
675
676 #if SCM_USE_PTHREAD_THREADS
677 /* Allow other threads to come in later. */
678 GC_allow_register_threads ();
679 #endif
680
681 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
682 }
683 else
684 {
685 /* Guile is already initialized, but this thread enters it for
686 the first time. Only initialize this thread.
687 */
688 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
689
690 /* Register this thread with libgc. */
691 #if SCM_USE_PTHREAD_THREADS
692 GC_register_my_thread (base);
693 #endif
694
695 guilify_self_1 (base);
696 guilify_self_2 (parent);
697 }
698 return 1;
699 }
700 }
701
702 void
703 scm_init_guile ()
704 {
705 struct GC_stack_base stack_base;
706
707 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
708 scm_i_init_thread_for_guile (&stack_base,
709 scm_i_default_dynamic_state);
710 else
711 {
712 fprintf (stderr, "Failed to get stack base for current thread.\n");
713 exit (EXIT_FAILURE);
714 }
715 }
716
717 struct with_guile_args
718 {
719 GC_fn_type func;
720 void *data;
721 SCM parent;
722 };
723
724 static void *
725 with_guile_trampoline (void *data)
726 {
727 struct with_guile_args *args = data;
728
729 return scm_c_with_continuation_barrier (args->func, args->data);
730 }
731
732 static void *
733 with_guile_and_parent (struct GC_stack_base *base, void *data)
734 {
735 void *res;
736 int new_thread;
737 scm_i_thread *t;
738 struct with_guile_args *args = data;
739
740 new_thread = scm_i_init_thread_for_guile (base, args->parent);
741 t = SCM_I_CURRENT_THREAD;
742 if (new_thread)
743 {
744 /* We are in Guile mode. */
745 assert (t->guile_mode);
746
747 res = scm_c_with_continuation_barrier (args->func, args->data);
748
749 /* Leave Guile mode. */
750 t->guile_mode = 0;
751 }
752 else if (t->guile_mode)
753 {
754 /* Already in Guile mode. */
755 res = scm_c_with_continuation_barrier (args->func, args->data);
756 }
757 else
758 {
759 /* We are not in Guile mode, either because we are not within a
760 scm_with_guile, or because we are within a scm_without_guile.
761
762 This call to scm_with_guile() could happen from anywhere on the
763 stack, and in particular lower on the stack than when it was
764 when this thread was first guilified. Thus, `base' must be
765 updated. */
766 #if SCM_STACK_GROWS_UP
767 if (SCM_STACK_PTR (base->mem_base) < t->base)
768 t->base = SCM_STACK_PTR (base->mem_base);
769 #else
770 if (SCM_STACK_PTR (base->mem_base) > t->base)
771 t->base = SCM_STACK_PTR (base->mem_base);
772 #endif
773
774 t->guile_mode = 1;
775 res = GC_call_with_gc_active (with_guile_trampoline, args);
776 t->guile_mode = 0;
777 }
778 return res;
779 }
780
781 static void *
782 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
783 {
784 struct with_guile_args args;
785
786 args.func = func;
787 args.data = data;
788 args.parent = parent;
789
790 return GC_call_with_stack_base (with_guile_and_parent, &args);
791 }
792
793 void *
794 scm_with_guile (void *(*func)(void *), void *data)
795 {
796 return scm_i_with_guile_and_parent (func, data,
797 scm_i_default_dynamic_state);
798 }
799
800 void *
801 scm_without_guile (void *(*func)(void *), void *data)
802 {
803 void *result;
804 scm_i_thread *t = SCM_I_CURRENT_THREAD;
805
806 if (t->guile_mode)
807 {
808 SCM_I_CURRENT_THREAD->guile_mode = 0;
809 result = GC_do_blocking (func, data);
810 SCM_I_CURRENT_THREAD->guile_mode = 1;
811 }
812 else
813 /* Otherwise we're not in guile mode, so nothing to do. */
814 result = func (data);
815
816 return result;
817 }
818
819 \f
820 /*** Thread creation */
821
822 typedef struct {
823 SCM parent;
824 SCM thunk;
825 SCM handler;
826 SCM thread;
827 scm_i_pthread_mutex_t mutex;
828 scm_i_pthread_cond_t cond;
829 } launch_data;
830
831 static void *
832 really_launch (void *d)
833 {
834 launch_data *data = (launch_data *)d;
835 SCM thunk = data->thunk, handler = data->handler;
836 scm_i_thread *t;
837
838 t = SCM_I_CURRENT_THREAD;
839
840 scm_i_scm_pthread_mutex_lock (&data->mutex);
841 data->thread = scm_current_thread ();
842 scm_i_pthread_cond_signal (&data->cond);
843 scm_i_pthread_mutex_unlock (&data->mutex);
844
845 if (SCM_UNBNDP (handler))
846 t->result = scm_call_0 (thunk);
847 else
848 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
849
850 return 0;
851 }
852
853 static void *
854 launch_thread (void *d)
855 {
856 launch_data *data = (launch_data *)d;
857 scm_i_pthread_detach (scm_i_pthread_self ());
858 scm_i_with_guile_and_parent (really_launch, d, data->parent);
859 return NULL;
860 }
861
862 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
863 (SCM thunk, SCM handler),
864 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
865 "returning a new thread object representing the thread. The procedure\n"
866 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
867 "\n"
868 "When @var{handler} is specified, then @var{thunk} is called from\n"
869 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
870 "handler. This catch is established inside the continuation barrier.\n"
871 "\n"
872 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
873 "the @emph{exit value} of the thread and the thread is terminated.")
874 #define FUNC_NAME s_scm_call_with_new_thread
875 {
876 launch_data data;
877 scm_i_pthread_t id;
878 int err;
879
880 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
881 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
882 handler, SCM_ARG2, FUNC_NAME);
883
884 GC_collect_a_little ();
885 data.parent = scm_current_dynamic_state ();
886 data.thunk = thunk;
887 data.handler = handler;
888 data.thread = SCM_BOOL_F;
889 scm_i_pthread_mutex_init (&data.mutex, NULL);
890 scm_i_pthread_cond_init (&data.cond, NULL);
891
892 scm_i_scm_pthread_mutex_lock (&data.mutex);
893 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
894 if (err)
895 {
896 scm_i_pthread_mutex_unlock (&data.mutex);
897 errno = err;
898 scm_syserror (NULL);
899 }
900
901 while (scm_is_false (data.thread))
902 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
903
904 scm_i_pthread_mutex_unlock (&data.mutex);
905
906 return data.thread;
907 }
908 #undef FUNC_NAME
909
910 typedef struct {
911 SCM parent;
912 scm_t_catch_body body;
913 void *body_data;
914 scm_t_catch_handler handler;
915 void *handler_data;
916 SCM thread;
917 scm_i_pthread_mutex_t mutex;
918 scm_i_pthread_cond_t cond;
919 } spawn_data;
920
921 static void *
922 really_spawn (void *d)
923 {
924 spawn_data *data = (spawn_data *)d;
925 scm_t_catch_body body = data->body;
926 void *body_data = data->body_data;
927 scm_t_catch_handler handler = data->handler;
928 void *handler_data = data->handler_data;
929 scm_i_thread *t = SCM_I_CURRENT_THREAD;
930
931 scm_i_scm_pthread_mutex_lock (&data->mutex);
932 data->thread = scm_current_thread ();
933 scm_i_pthread_cond_signal (&data->cond);
934 scm_i_pthread_mutex_unlock (&data->mutex);
935
936 if (handler == NULL)
937 t->result = body (body_data);
938 else
939 t->result = scm_internal_catch (SCM_BOOL_T,
940 body, body_data,
941 handler, handler_data);
942
943 return 0;
944 }
945
946 static void *
947 spawn_thread (void *d)
948 {
949 spawn_data *data = (spawn_data *)d;
950 scm_i_pthread_detach (scm_i_pthread_self ());
951 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
952 return NULL;
953 }
954
955 SCM
956 scm_spawn_thread (scm_t_catch_body body, void *body_data,
957 scm_t_catch_handler handler, void *handler_data)
958 {
959 spawn_data data;
960 scm_i_pthread_t id;
961 int err;
962
963 data.parent = scm_current_dynamic_state ();
964 data.body = body;
965 data.body_data = body_data;
966 data.handler = handler;
967 data.handler_data = handler_data;
968 data.thread = SCM_BOOL_F;
969 scm_i_pthread_mutex_init (&data.mutex, NULL);
970 scm_i_pthread_cond_init (&data.cond, NULL);
971
972 scm_i_scm_pthread_mutex_lock (&data.mutex);
973 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
974 if (err)
975 {
976 scm_i_pthread_mutex_unlock (&data.mutex);
977 errno = err;
978 scm_syserror (NULL);
979 }
980
981 while (scm_is_false (data.thread))
982 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
983
984 scm_i_pthread_mutex_unlock (&data.mutex);
985
986 assert (SCM_I_IS_THREAD (data.thread));
987
988 return data.thread;
989 }
990
991 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
992 (),
993 "Move the calling thread to the end of the scheduling queue.")
994 #define FUNC_NAME s_scm_yield
995 {
996 return scm_from_bool (scm_i_sched_yield ());
997 }
998 #undef FUNC_NAME
999
1000 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1001 (SCM thread),
1002 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1003 "cannot be the current thread, and if @var{thread} has already terminated or "
1004 "been signaled to terminate, this function is a no-op.")
1005 #define FUNC_NAME s_scm_cancel_thread
1006 {
1007 scm_i_thread *t = NULL;
1008
1009 SCM_VALIDATE_THREAD (1, thread);
1010 t = SCM_I_THREAD_DATA (thread);
1011 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1012 if (!t->canceled)
1013 {
1014 t->canceled = 1;
1015 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1016 scm_i_pthread_cancel (t->pthread);
1017 }
1018 else
1019 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1020
1021 return SCM_UNSPECIFIED;
1022 }
1023 #undef FUNC_NAME
1024
1025 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1026 (SCM thread, SCM proc),
1027 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1028 "This handler will be called when the thread exits.")
1029 #define FUNC_NAME s_scm_set_thread_cleanup_x
1030 {
1031 scm_i_thread *t;
1032
1033 SCM_VALIDATE_THREAD (1, thread);
1034 if (!scm_is_false (proc))
1035 SCM_VALIDATE_THUNK (2, proc);
1036
1037 t = SCM_I_THREAD_DATA (thread);
1038 scm_i_pthread_mutex_lock (&t->admin_mutex);
1039
1040 if (!(t->exited || t->canceled))
1041 t->cleanup_handler = proc;
1042
1043 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1044
1045 return SCM_UNSPECIFIED;
1046 }
1047 #undef FUNC_NAME
1048
1049 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1050 (SCM thread),
1051 "Return the cleanup handler installed for the thread @var{thread}.")
1052 #define FUNC_NAME s_scm_thread_cleanup
1053 {
1054 scm_i_thread *t;
1055 SCM ret;
1056
1057 SCM_VALIDATE_THREAD (1, thread);
1058
1059 t = SCM_I_THREAD_DATA (thread);
1060 scm_i_pthread_mutex_lock (&t->admin_mutex);
1061 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1062 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1063
1064 return ret;
1065 }
1066 #undef FUNC_NAME
1067
1068 SCM scm_join_thread (SCM thread)
1069 {
1070 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1071 }
1072
1073 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1074 (SCM thread, SCM timeout, SCM timeoutval),
1075 "Suspend execution of the calling thread until the target @var{thread} "
1076 "terminates, unless the target @var{thread} has already terminated. ")
1077 #define FUNC_NAME s_scm_join_thread_timed
1078 {
1079 scm_i_thread *t;
1080 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1081 SCM res = SCM_BOOL_F;
1082
1083 if (! (SCM_UNBNDP (timeoutval)))
1084 res = timeoutval;
1085
1086 SCM_VALIDATE_THREAD (1, thread);
1087 if (scm_is_eq (scm_current_thread (), thread))
1088 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1089
1090 t = SCM_I_THREAD_DATA (thread);
1091 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1092
1093 if (! SCM_UNBNDP (timeout))
1094 {
1095 to_timespec (timeout, &ctimeout);
1096 timeout_ptr = &ctimeout;
1097 }
1098
1099 if (t->exited)
1100 res = t->result;
1101 else
1102 {
1103 while (1)
1104 {
1105 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1106 timeout_ptr);
1107 if (err == 0)
1108 {
1109 if (t->exited)
1110 {
1111 res = t->result;
1112 break;
1113 }
1114 }
1115 else if (err == ETIMEDOUT)
1116 break;
1117
1118 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1119 SCM_TICK;
1120 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1121
1122 /* Check for exit again, since we just released and
1123 reacquired the admin mutex, before the next block_self
1124 call (which would block forever if t has already
1125 exited). */
1126 if (t->exited)
1127 {
1128 res = t->result;
1129 break;
1130 }
1131 }
1132 }
1133
1134 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1135
1136 return res;
1137 }
1138 #undef FUNC_NAME
1139
1140 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1141 (SCM obj),
1142 "Return @code{#t} if @var{obj} is a thread.")
1143 #define FUNC_NAME s_scm_thread_p
1144 {
1145 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1146 }
1147 #undef FUNC_NAME
1148
1149
1150 static int
1151 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1152 {
1153 fat_mutex *m = SCM_MUTEX_DATA (mx);
1154 scm_puts_unlocked ("#<mutex ", port);
1155 scm_uintprint ((scm_t_bits)m, 16, port);
1156 scm_puts_unlocked (">", port);
1157 return 1;
1158 }
1159
1160 static SCM
1161 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1162 {
1163 fat_mutex *m;
1164 SCM mx;
1165 scm_i_pthread_mutex_t lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
1166
1167 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1168 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1169 and so we can just copy it. */
1170 memcpy (&m->lock, &lock, sizeof (m->lock));
1171 m->owner = SCM_BOOL_F;
1172 m->level = 0;
1173
1174 m->recursive = recursive;
1175 m->unchecked_unlock = unchecked_unlock;
1176 m->allow_external_unlock = external_unlock;
1177
1178 m->waiting = SCM_EOL;
1179 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1180 m->waiting = make_queue ();
1181 return mx;
1182 }
1183
1184 SCM scm_make_mutex (void)
1185 {
1186 return scm_make_mutex_with_flags (SCM_EOL);
1187 }
1188
1189 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1190 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1191 SCM_SYMBOL (recursive_sym, "recursive");
1192
1193 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1194 (SCM flags),
1195 "Create a new mutex. ")
1196 #define FUNC_NAME s_scm_make_mutex_with_flags
1197 {
1198 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1199
1200 SCM ptr = flags;
1201 while (! scm_is_null (ptr))
1202 {
1203 SCM flag = SCM_CAR (ptr);
1204 if (scm_is_eq (flag, unchecked_unlock_sym))
1205 unchecked_unlock = 1;
1206 else if (scm_is_eq (flag, allow_external_unlock_sym))
1207 external_unlock = 1;
1208 else if (scm_is_eq (flag, recursive_sym))
1209 recursive = 1;
1210 else
1211 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1212 ptr = SCM_CDR (ptr);
1213 }
1214 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1215 }
1216 #undef FUNC_NAME
1217
1218 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1219 (void),
1220 "Create a new recursive mutex. ")
1221 #define FUNC_NAME s_scm_make_recursive_mutex
1222 {
1223 return make_fat_mutex (1, 0, 0);
1224 }
1225 #undef FUNC_NAME
1226
1227 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1228
1229 static SCM
1230 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1231 {
1232 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1233
1234 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1235 SCM err = SCM_BOOL_F;
1236
1237 struct timeval current_time;
1238
1239 scm_i_scm_pthread_mutex_lock (&m->lock);
1240
1241 while (1)
1242 {
1243 if (m->level == 0)
1244 {
1245 m->owner = new_owner;
1246 m->level++;
1247
1248 if (SCM_I_IS_THREAD (new_owner))
1249 {
1250 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1251
1252 /* FIXME: The order in which `t->admin_mutex' and
1253 `m->lock' are taken differs from that in
1254 `on_thread_exit', potentially leading to deadlocks. */
1255 scm_i_pthread_mutex_lock (&t->admin_mutex);
1256
1257 /* Only keep a weak reference to MUTEX so that it's not
1258 retained when not referenced elsewhere (bug #27450).
1259 The weak pair itself is eventually removed when MUTEX
1260 is unlocked. Note that `t->mutexes' lists mutexes
1261 currently held by T, so it should be small. */
1262 t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
1263 t->mutexes);
1264
1265 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1266 }
1267 *ret = 1;
1268 break;
1269 }
1270 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1271 {
1272 m->owner = new_owner;
1273 err = scm_cons (scm_abandoned_mutex_error_key,
1274 scm_from_locale_string ("lock obtained on abandoned "
1275 "mutex"));
1276 *ret = 1;
1277 break;
1278 }
1279 else if (scm_is_eq (m->owner, new_owner))
1280 {
1281 if (m->recursive)
1282 {
1283 m->level++;
1284 *ret = 1;
1285 }
1286 else
1287 {
1288 err = scm_cons (scm_misc_error_key,
1289 scm_from_locale_string ("mutex already locked "
1290 "by thread"));
1291 *ret = 0;
1292 }
1293 break;
1294 }
1295 else
1296 {
1297 if (timeout != NULL)
1298 {
1299 gettimeofday (&current_time, NULL);
1300 if (current_time.tv_sec > timeout->tv_sec ||
1301 (current_time.tv_sec == timeout->tv_sec &&
1302 current_time.tv_usec * 1000 > timeout->tv_nsec))
1303 {
1304 *ret = 0;
1305 break;
1306 }
1307 }
1308 block_self (m->waiting, mutex, &m->lock, timeout);
1309 scm_i_pthread_mutex_unlock (&m->lock);
1310 SCM_TICK;
1311 scm_i_scm_pthread_mutex_lock (&m->lock);
1312 }
1313 }
1314 scm_i_pthread_mutex_unlock (&m->lock);
1315 return err;
1316 }
1317
1318 SCM scm_lock_mutex (SCM mx)
1319 {
1320 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1321 }
1322
1323 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1324 (SCM m, SCM timeout, SCM owner),
1325 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1326 "thread blocks until the mutex becomes available. The function\n"
1327 "returns when the calling thread owns the lock on @var{m}.\n"
1328 "Locking a mutex that a thread already owns will succeed right\n"
1329 "away and will not block the thread. That is, Guile's mutexes\n"
1330 "are @emph{recursive}.")
1331 #define FUNC_NAME s_scm_lock_mutex_timed
1332 {
1333 SCM exception;
1334 int ret = 0;
1335 scm_t_timespec cwaittime, *waittime = NULL;
1336
1337 SCM_VALIDATE_MUTEX (1, m);
1338
1339 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1340 {
1341 to_timespec (timeout, &cwaittime);
1342 waittime = &cwaittime;
1343 }
1344
1345 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1346 SCM_VALIDATE_THREAD (3, owner);
1347
1348 exception = fat_mutex_lock (m, waittime, owner, &ret);
1349 if (!scm_is_false (exception))
1350 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1351 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1352 }
1353 #undef FUNC_NAME
1354
1355 static void
1356 lock_mutex_return_void (SCM mx)
1357 {
1358 (void) scm_lock_mutex (mx);
1359 }
1360
1361 static void
1362 unlock_mutex_return_void (SCM mx)
1363 {
1364 (void) scm_unlock_mutex (mx);
1365 }
1366
1367 void
1368 scm_dynwind_lock_mutex (SCM mutex)
1369 {
1370 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1371 SCM_F_WIND_EXPLICITLY);
1372 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1373 SCM_F_WIND_EXPLICITLY);
1374 }
1375
1376 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1377 (SCM mutex),
1378 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1379 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1380 #define FUNC_NAME s_scm_try_mutex
1381 {
1382 SCM exception;
1383 int ret = 0;
1384 scm_t_timespec cwaittime, *waittime = NULL;
1385
1386 SCM_VALIDATE_MUTEX (1, mutex);
1387
1388 to_timespec (scm_from_int(0), &cwaittime);
1389 waittime = &cwaittime;
1390
1391 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1392 if (!scm_is_false (exception))
1393 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1394 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1395 }
1396 #undef FUNC_NAME
1397
1398 /*** Fat condition variables */
1399
1400 typedef struct {
1401 scm_i_pthread_mutex_t lock;
1402 SCM waiting; /* the threads waiting for this condition. */
1403 } fat_cond;
1404
1405 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1406 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1407
1408 static void
1409 remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
1410 {
1411 SCM walk, prev;
1412
1413 for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
1414 walk = SCM_CDR (walk))
1415 {
1416 if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
1417 {
1418 if (scm_is_pair (prev))
1419 SCM_SETCDR (prev, SCM_CDR (walk));
1420 else
1421 t->mutexes = SCM_CDR (walk);
1422 break;
1423 }
1424 }
1425 }
1426
1427 static int
1428 fat_mutex_unlock (SCM mutex, SCM cond,
1429 const scm_t_timespec *waittime, int relock)
1430 {
1431 SCM owner;
1432 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1433 fat_cond *c = NULL;
1434 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1435 int err = 0, ret = 0;
1436
1437 scm_i_scm_pthread_mutex_lock (&m->lock);
1438
1439 owner = m->owner;
1440
1441 if (!scm_is_eq (owner, t->handle))
1442 {
1443 if (m->level == 0)
1444 {
1445 if (!m->unchecked_unlock)
1446 {
1447 scm_i_pthread_mutex_unlock (&m->lock);
1448 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1449 }
1450 owner = t->handle;
1451 }
1452 else if (!m->allow_external_unlock)
1453 {
1454 scm_i_pthread_mutex_unlock (&m->lock);
1455 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1456 }
1457 }
1458
1459 if (! (SCM_UNBNDP (cond)))
1460 {
1461 c = SCM_CONDVAR_DATA (cond);
1462 while (1)
1463 {
1464 int brk = 0;
1465
1466 if (m->level > 0)
1467 m->level--;
1468 if (m->level == 0)
1469 {
1470 /* Change the owner of MUTEX. */
1471 remove_mutex_from_thread (mutex, t);
1472 m->owner = unblock_from_queue (m->waiting);
1473 }
1474
1475 t->block_asyncs++;
1476
1477 err = block_self (c->waiting, cond, &m->lock, waittime);
1478 scm_i_pthread_mutex_unlock (&m->lock);
1479
1480 if (err == 0)
1481 {
1482 ret = 1;
1483 brk = 1;
1484 }
1485 else if (err == ETIMEDOUT)
1486 {
1487 ret = 0;
1488 brk = 1;
1489 }
1490 else if (err != EINTR)
1491 {
1492 errno = err;
1493 scm_syserror (NULL);
1494 }
1495
1496 if (brk)
1497 {
1498 if (relock)
1499 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1500 t->block_asyncs--;
1501 break;
1502 }
1503
1504 t->block_asyncs--;
1505 scm_async_tick ();
1506
1507 scm_remember_upto_here_2 (cond, mutex);
1508
1509 scm_i_scm_pthread_mutex_lock (&m->lock);
1510 }
1511 }
1512 else
1513 {
1514 if (m->level > 0)
1515 m->level--;
1516 if (m->level == 0)
1517 {
1518 /* Change the owner of MUTEX. */
1519 remove_mutex_from_thread (mutex, t);
1520 m->owner = unblock_from_queue (m->waiting);
1521 }
1522
1523 scm_i_pthread_mutex_unlock (&m->lock);
1524 ret = 1;
1525 }
1526
1527 return ret;
1528 }
1529
1530 SCM scm_unlock_mutex (SCM mx)
1531 {
1532 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1533 }
1534
1535 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1536 (SCM mx, SCM cond, SCM timeout),
1537 "Unlocks @var{mutex} if the calling thread owns the lock on "
1538 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1539 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1540 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1541 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1542 "with a call to @code{unlock-mutex}. Only the last call to "
1543 "@code{unlock-mutex} will actually unlock the mutex. ")
1544 #define FUNC_NAME s_scm_unlock_mutex_timed
1545 {
1546 scm_t_timespec cwaittime, *waittime = NULL;
1547
1548 SCM_VALIDATE_MUTEX (1, mx);
1549 if (! (SCM_UNBNDP (cond)))
1550 {
1551 SCM_VALIDATE_CONDVAR (2, cond);
1552
1553 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1554 {
1555 to_timespec (timeout, &cwaittime);
1556 waittime = &cwaittime;
1557 }
1558 }
1559
1560 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1561 }
1562 #undef FUNC_NAME
1563
1564 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1565 (SCM obj),
1566 "Return @code{#t} if @var{obj} is a mutex.")
1567 #define FUNC_NAME s_scm_mutex_p
1568 {
1569 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1570 }
1571 #undef FUNC_NAME
1572
1573 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1574 (SCM mx),
1575 "Return the thread owning @var{mx}, or @code{#f}.")
1576 #define FUNC_NAME s_scm_mutex_owner
1577 {
1578 SCM owner;
1579 fat_mutex *m = NULL;
1580
1581 SCM_VALIDATE_MUTEX (1, mx);
1582 m = SCM_MUTEX_DATA (mx);
1583 scm_i_pthread_mutex_lock (&m->lock);
1584 owner = m->owner;
1585 scm_i_pthread_mutex_unlock (&m->lock);
1586
1587 return owner;
1588 }
1589 #undef FUNC_NAME
1590
1591 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1592 (SCM mx),
1593 "Return the lock level of mutex @var{mx}.")
1594 #define FUNC_NAME s_scm_mutex_level
1595 {
1596 SCM_VALIDATE_MUTEX (1, mx);
1597 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1598 }
1599 #undef FUNC_NAME
1600
1601 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1602 (SCM mx),
1603 "Returns @code{#t} if the mutex @var{mx} is locked.")
1604 #define FUNC_NAME s_scm_mutex_locked_p
1605 {
1606 SCM_VALIDATE_MUTEX (1, mx);
1607 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1608 }
1609 #undef FUNC_NAME
1610
1611 static int
1612 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1613 {
1614 fat_cond *c = SCM_CONDVAR_DATA (cv);
1615 scm_puts_unlocked ("#<condition-variable ", port);
1616 scm_uintprint ((scm_t_bits)c, 16, port);
1617 scm_puts_unlocked (">", port);
1618 return 1;
1619 }
1620
1621 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1622 (void),
1623 "Make a new condition variable.")
1624 #define FUNC_NAME s_scm_make_condition_variable
1625 {
1626 fat_cond *c;
1627 SCM cv;
1628
1629 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1630 c->waiting = SCM_EOL;
1631 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1632 c->waiting = make_queue ();
1633 return cv;
1634 }
1635 #undef FUNC_NAME
1636
1637 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1638 (SCM cv, SCM mx, SCM t),
1639 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1640 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1641 "is locked again when this function returns. When @var{t} is given, "
1642 "it specifies a point in time where the waiting should be aborted. It "
1643 "can be either a integer as returned by @code{current-time} or a pair "
1644 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1645 "mutex is locked and @code{#f} is returned. When the condition "
1646 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1647 "is returned. ")
1648 #define FUNC_NAME s_scm_timed_wait_condition_variable
1649 {
1650 scm_t_timespec waittime, *waitptr = NULL;
1651
1652 SCM_VALIDATE_CONDVAR (1, cv);
1653 SCM_VALIDATE_MUTEX (2, mx);
1654
1655 if (!SCM_UNBNDP (t))
1656 {
1657 to_timespec (t, &waittime);
1658 waitptr = &waittime;
1659 }
1660
1661 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1662 }
1663 #undef FUNC_NAME
1664
1665 static void
1666 fat_cond_signal (fat_cond *c)
1667 {
1668 unblock_from_queue (c->waiting);
1669 }
1670
1671 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1672 (SCM cv),
1673 "Wake up one thread that is waiting for @var{cv}")
1674 #define FUNC_NAME s_scm_signal_condition_variable
1675 {
1676 SCM_VALIDATE_CONDVAR (1, cv);
1677 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1678 return SCM_BOOL_T;
1679 }
1680 #undef FUNC_NAME
1681
1682 static void
1683 fat_cond_broadcast (fat_cond *c)
1684 {
1685 while (scm_is_true (unblock_from_queue (c->waiting)))
1686 ;
1687 }
1688
1689 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1690 (SCM cv),
1691 "Wake up all threads that are waiting for @var{cv}. ")
1692 #define FUNC_NAME s_scm_broadcast_condition_variable
1693 {
1694 SCM_VALIDATE_CONDVAR (1, cv);
1695 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1696 return SCM_BOOL_T;
1697 }
1698 #undef FUNC_NAME
1699
1700 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1701 (SCM obj),
1702 "Return @code{#t} if @var{obj} is a condition variable.")
1703 #define FUNC_NAME s_scm_condition_variable_p
1704 {
1705 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1706 }
1707 #undef FUNC_NAME
1708
1709
1710 \f
1711 /*** Select */
1712
1713 struct select_args
1714 {
1715 int nfds;
1716 fd_set *read_fds;
1717 fd_set *write_fds;
1718 fd_set *except_fds;
1719 struct timeval *timeout;
1720
1721 int result;
1722 int errno_value;
1723 };
1724
1725 static void *
1726 do_std_select (void *args)
1727 {
1728 struct select_args *select_args;
1729
1730 select_args = (struct select_args *) args;
1731
1732 select_args->result =
1733 select (select_args->nfds,
1734 select_args->read_fds, select_args->write_fds,
1735 select_args->except_fds, select_args->timeout);
1736 select_args->errno_value = errno;
1737
1738 return NULL;
1739 }
1740
1741 #if !SCM_HAVE_SYS_SELECT_H
1742 static int scm_std_select (int nfds,
1743 fd_set *readfds,
1744 fd_set *writefds,
1745 fd_set *exceptfds,
1746 struct timeval *timeout);
1747 #endif
1748
1749 int
1750 scm_std_select (int nfds,
1751 fd_set *readfds,
1752 fd_set *writefds,
1753 fd_set *exceptfds,
1754 struct timeval *timeout)
1755 {
1756 fd_set my_readfds;
1757 int res, eno, wakeup_fd;
1758 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1759 struct select_args args;
1760
1761 if (readfds == NULL)
1762 {
1763 FD_ZERO (&my_readfds);
1764 readfds = &my_readfds;
1765 }
1766
1767 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1768 SCM_TICK;
1769
1770 wakeup_fd = t->sleep_pipe[0];
1771 FD_SET (wakeup_fd, readfds);
1772 if (wakeup_fd >= nfds)
1773 nfds = wakeup_fd+1;
1774
1775 args.nfds = nfds;
1776 args.read_fds = readfds;
1777 args.write_fds = writefds;
1778 args.except_fds = exceptfds;
1779 args.timeout = timeout;
1780
1781 /* Explicitly cooperate with the GC. */
1782 scm_without_guile (do_std_select, &args);
1783
1784 res = args.result;
1785 eno = args.errno_value;
1786
1787 t->sleep_fd = -1;
1788 scm_i_reset_sleep (t);
1789
1790 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1791 {
1792 char dummy;
1793 full_read (wakeup_fd, &dummy, 1);
1794
1795 FD_CLR (wakeup_fd, readfds);
1796 res -= 1;
1797 if (res == 0)
1798 {
1799 eno = EINTR;
1800 res = -1;
1801 }
1802 }
1803 errno = eno;
1804 return res;
1805 }
1806
1807 /* Convenience API for blocking while in guile mode. */
1808
1809 #if SCM_USE_PTHREAD_THREADS
1810
1811 /* It seems reasonable to not run procedures related to mutex and condition
1812 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1813 without it, and (ii) the only potential gain would be GC latency. See
1814 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1815 for a discussion of the pros and cons. */
1816
1817 int
1818 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1819 {
1820 int res = scm_i_pthread_mutex_lock (mutex);
1821 return res;
1822 }
1823
1824 static void
1825 do_unlock (void *data)
1826 {
1827 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1828 }
1829
1830 void
1831 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1832 {
1833 scm_i_scm_pthread_mutex_lock (mutex);
1834 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1835 }
1836
1837 int
1838 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1839 {
1840 int res;
1841 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1842
1843 t->held_mutex = mutex;
1844 res = scm_i_pthread_cond_wait (cond, mutex);
1845 t->held_mutex = NULL;
1846
1847 return res;
1848 }
1849
1850 int
1851 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1852 scm_i_pthread_mutex_t *mutex,
1853 const scm_t_timespec *wt)
1854 {
1855 int res;
1856 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1857
1858 t->held_mutex = mutex;
1859 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1860 t->held_mutex = NULL;
1861
1862 return res;
1863 }
1864
1865 #endif
1866
1867 unsigned long
1868 scm_std_usleep (unsigned long usecs)
1869 {
1870 struct timeval tv;
1871 tv.tv_usec = usecs % 1000000;
1872 tv.tv_sec = usecs / 1000000;
1873 scm_std_select (0, NULL, NULL, NULL, &tv);
1874 return tv.tv_sec * 1000000 + tv.tv_usec;
1875 }
1876
1877 unsigned int
1878 scm_std_sleep (unsigned int secs)
1879 {
1880 struct timeval tv;
1881 tv.tv_usec = 0;
1882 tv.tv_sec = secs;
1883 scm_std_select (0, NULL, NULL, NULL, &tv);
1884 return tv.tv_sec;
1885 }
1886
1887 /*** Misc */
1888
1889 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1890 (void),
1891 "Return the thread that called this function.")
1892 #define FUNC_NAME s_scm_current_thread
1893 {
1894 return SCM_I_CURRENT_THREAD->handle;
1895 }
1896 #undef FUNC_NAME
1897
1898 static SCM
1899 scm_c_make_list (size_t n, SCM fill)
1900 {
1901 SCM res = SCM_EOL;
1902 while (n-- > 0)
1903 res = scm_cons (fill, res);
1904 return res;
1905 }
1906
1907 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1908 (void),
1909 "Return a list of all threads.")
1910 #define FUNC_NAME s_scm_all_threads
1911 {
1912 /* We can not allocate while holding the thread_admin_mutex because
1913 of the way GC is done.
1914 */
1915 int n = thread_count;
1916 scm_i_thread *t;
1917 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1918
1919 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1920 l = &list;
1921 for (t = all_threads; t && n > 0; t = t->next_thread)
1922 {
1923 if (t != scm_i_signal_delivery_thread)
1924 {
1925 SCM_SETCAR (*l, t->handle);
1926 l = SCM_CDRLOC (*l);
1927 }
1928 n--;
1929 }
1930 *l = SCM_EOL;
1931 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1932 return list;
1933 }
1934 #undef FUNC_NAME
1935
1936 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1937 (SCM thread),
1938 "Return @code{#t} iff @var{thread} has exited.\n")
1939 #define FUNC_NAME s_scm_thread_exited_p
1940 {
1941 return scm_from_bool (scm_c_thread_exited_p (thread));
1942 }
1943 #undef FUNC_NAME
1944
1945 int
1946 scm_c_thread_exited_p (SCM thread)
1947 #define FUNC_NAME s_scm_thread_exited_p
1948 {
1949 scm_i_thread *t;
1950 SCM_VALIDATE_THREAD (1, thread);
1951 t = SCM_I_THREAD_DATA (thread);
1952 return t->exited;
1953 }
1954 #undef FUNC_NAME
1955
1956 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
1957 (void),
1958 "Return the total number of processors of the machine, which\n"
1959 "is guaranteed to be at least 1. A ``processor'' here is a\n"
1960 "thread execution unit, which can be either:\n\n"
1961 "@itemize\n"
1962 "@item an execution core in a (possibly multi-core) chip, in a\n"
1963 " (possibly multi- chip) module, in a single computer, or\n"
1964 "@item a thread execution unit inside a core in the case of\n"
1965 " @dfn{hyper-threaded} CPUs.\n"
1966 "@end itemize\n\n"
1967 "Which of the two definitions is used, is unspecified.\n")
1968 #define FUNC_NAME s_scm_total_processor_count
1969 {
1970 return scm_from_ulong (num_processors (NPROC_ALL));
1971 }
1972 #undef FUNC_NAME
1973
1974 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
1975 (void),
1976 "Like @code{total-processor-count}, but return the number of\n"
1977 "processors available to the current process. See\n"
1978 "@code{setaffinity} and @code{getaffinity} for more\n"
1979 "information.\n")
1980 #define FUNC_NAME s_scm_current_processor_count
1981 {
1982 return scm_from_ulong (num_processors (NPROC_CURRENT));
1983 }
1984 #undef FUNC_NAME
1985
1986
1987 \f
1988
1989 static scm_i_pthread_cond_t wake_up_cond;
1990 static int threads_initialized_p = 0;
1991
1992
1993 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1994 */
1995 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1996
1997 static SCM dynwind_critical_section_mutex;
1998
1999 void
2000 scm_dynwind_critical_section (SCM mutex)
2001 {
2002 if (scm_is_false (mutex))
2003 mutex = dynwind_critical_section_mutex;
2004 scm_dynwind_lock_mutex (mutex);
2005 scm_dynwind_block_asyncs ();
2006 }
2007
2008 /*** Initialization */
2009
2010 scm_i_pthread_mutex_t scm_i_misc_mutex;
2011
2012 #if SCM_USE_PTHREAD_THREADS
2013 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2014 #endif
2015
2016 void
2017 scm_threads_prehistory (void *base)
2018 {
2019 #if SCM_USE_PTHREAD_THREADS
2020 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2021 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2022 PTHREAD_MUTEX_RECURSIVE);
2023 #endif
2024
2025 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2026 scm_i_pthread_mutexattr_recursive);
2027 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2028 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2029
2030 thread_gc_kind =
2031 GC_new_kind (GC_new_free_list (),
2032 GC_MAKE_PROC (GC_new_proc (thread_mark), 0),
2033 0, 1);
2034
2035 guilify_self_1 ((struct GC_stack_base *) base);
2036 }
2037
2038 scm_t_bits scm_tc16_thread;
2039 scm_t_bits scm_tc16_mutex;
2040 scm_t_bits scm_tc16_condvar;
2041
2042 void
2043 scm_init_threads ()
2044 {
2045 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2046 scm_set_smob_print (scm_tc16_thread, thread_print);
2047
2048 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2049 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2050
2051 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2052 sizeof (fat_cond));
2053 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2054
2055 scm_i_default_dynamic_state = SCM_BOOL_F;
2056 guilify_self_2 (SCM_BOOL_F);
2057 threads_initialized_p = 1;
2058
2059 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2060 }
2061
2062 void
2063 scm_init_threads_default_dynamic_state ()
2064 {
2065 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2066 scm_i_default_dynamic_state = state;
2067 }
2068
2069 void
2070 scm_init_thread_procs ()
2071 {
2072 #include "libguile/threads.x"
2073 }
2074
2075 \f
2076 /* IA64-specific things. */
2077
2078 #ifdef __ia64__
2079 # ifdef __hpux
2080 # include <sys/param.h>
2081 # include <sys/pstat.h>
2082 void *
2083 scm_ia64_register_backing_store_base (void)
2084 {
2085 struct pst_vm_status vm_status;
2086 int i = 0;
2087 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2088 if (vm_status.pst_type == PS_RSESTACK)
2089 return (void *) vm_status.pst_vaddr;
2090 abort ();
2091 }
2092 void *
2093 scm_ia64_ar_bsp (const void *ctx)
2094 {
2095 uint64_t bsp;
2096 __uc_get_ar_bsp (ctx, &bsp);
2097 return (void *) bsp;
2098 }
2099 # endif /* hpux */
2100 # ifdef linux
2101 # include <ucontext.h>
2102 void *
2103 scm_ia64_register_backing_store_base (void)
2104 {
2105 extern void *__libc_ia64_register_backing_store_base;
2106 return __libc_ia64_register_backing_store_base;
2107 }
2108 void *
2109 scm_ia64_ar_bsp (const void *opaque)
2110 {
2111 const ucontext_t *ctx = opaque;
2112 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2113 }
2114 # endif /* linux */
2115 # ifdef __FreeBSD__
2116 # include <ucontext.h>
2117 void *
2118 scm_ia64_register_backing_store_base (void)
2119 {
2120 return (void *)0x8000000000000000;
2121 }
2122 void *
2123 scm_ia64_ar_bsp (const void *opaque)
2124 {
2125 const ucontext_t *ctx = opaque;
2126 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2127 + ctx->uc_mcontext.mc_special.ndirty);
2128 }
2129 # endif /* __FreeBSD__ */
2130 #endif /* __ia64__ */
2131
2132
2133 /*
2134 Local Variables:
2135 c-file-style: "gnu"
2136 End:
2137 */