fix gc_register_my_thread et al fallback impls
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include <assert.h>
42
43 #include "libguile/validate.h"
44 #include "libguile/root.h"
45 #include "libguile/eval.h"
46 #include "libguile/async.h"
47 #include "libguile/ports.h"
48 #include "libguile/threads.h"
49 #include "libguile/dynwind.h"
50 #include "libguile/iselect.h"
51 #include "libguile/fluids.h"
52 #include "libguile/continuations.h"
53 #include "libguile/gc.h"
54 #include "libguile/init.h"
55 #include "libguile/scmsigs.h"
56 #include "libguile/strings.h"
57 #include "libguile/weaks.h"
58
59 #ifdef __MINGW32__
60 #ifndef ETIMEDOUT
61 # define ETIMEDOUT WSAETIMEDOUT
62 #endif
63 # include <fcntl.h>
64 # include <process.h>
65 # define pipe(fd) _pipe (fd, 256, O_BINARY)
66 #endif /* __MINGW32__ */
67
68 #include <full-read.h>
69
70
71 \f
72
73 /* First some libgc shims. */
74
75 /* Make sure GC_fn_type is defined; it is missing from the public
76 headers of GC 7.1 and earlier. */
77 #ifndef HAVE_GC_FN_TYPE
78 typedef void * (* GC_fn_type) (void *);
79 #endif
80
81
82 #ifndef GC_SUCCESS
83 #define GC_SUCCESS 0
84 #endif
85
86 #ifndef GC_UNIMPLEMENTED
87 #define GC_UNIMPLEMENTED 3
88 #endif
89
90 /* Likewise struct GC_stack_base is missing before 7.1. */
91 #ifndef HAVE_GC_STACK_BASE
92 struct GC_stack_base {
93 void * mem_base; /* Base of memory stack. */
94 #ifdef __ia64__
95 void * reg_base; /* Base of separate register stack. */
96 #endif
97 };
98
99 static int
100 GC_register_my_thread (struct GC_stack_base *stack_base)
101 {
102 return GC_UNIMPLEMENTED;
103 }
104
105 static void
106 GC_unregister_my_thread ()
107 {
108 }
109
110 #if !SCM_USE_PTHREAD_THREADS
111 /* No threads; we can just use GC_stackbottom. */
112 static void *
113 get_thread_stack_base ()
114 {
115 return GC_stackbottom;
116 }
117
118 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
119 && defined PTHREAD_ATTR_GETSTACK_WORKS
120 /* This method for GNU/Linux and perhaps some other systems.
121 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
122 available on them. */
123 static void *
124 get_thread_stack_base ()
125 {
126 pthread_attr_t attr;
127 void *start, *end;
128 size_t size;
129
130 pthread_getattr_np (pthread_self (), &attr);
131 pthread_attr_getstack (&attr, &start, &size);
132 end = (char *)start + size;
133
134 #if SCM_STACK_GROWS_UP
135 return start;
136 #else
137 return end;
138 #endif
139 }
140
141 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
142 /* This method for MacOS X.
143 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
144 but as of 2006 there's nothing obvious at apple.com. */
145 static void *
146 get_thread_stack_base ()
147 {
148 return pthread_get_stackaddr_np (pthread_self ());
149 }
150
151 #else
152 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
153 #endif
154
155 static int
156 GC_get_stack_base (struct GC_stack_base *stack_base)
157 {
158 stack_base->mem_base = get_thread_stack_base ();
159 #ifdef __ia64__
160 /* Calculate and store off the base of this thread's register
161 backing store (RBS). Unfortunately our implementation(s) of
162 scm_ia64_register_backing_store_base are only reliable for the
163 main thread. For other threads, therefore, find out the current
164 top of the RBS, and use that as a maximum. */
165 stack_base->reg_base = scm_ia64_register_backing_store_base ();
166 {
167 ucontext_t ctx;
168 void *bsp;
169 getcontext (&ctx);
170 bsp = scm_ia64_ar_bsp (&ctx);
171 if (stack_base->reg_base > bsp)
172 stack_base->reg_base = bsp;
173 }
174 #endif
175 return GC_SUCCESS;
176 }
177
178 static void *
179 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
180 {
181 struct GC_stack_base stack_base;
182
183 stack_base.mem_base = (void*)&stack_base;
184 #ifdef __ia64__
185 /* FIXME: Untested. */
186 {
187 ucontext_t ctx;
188 getcontext (&ctx);
189 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
190 }
191 #endif
192
193 return fn (&stack_base, arg);
194 }
195 #endif /* HAVE_GC_STACK_BASE */
196
197
198 /* Now define with_gc_active and with_gc_inactive. */
199
200 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
201
202 /* We have a sufficiently new libgc (7.2 or newer). */
203
204 static void*
205 with_gc_inactive (GC_fn_type func, void *data)
206 {
207 return GC_do_blocking (func, data);
208 }
209
210 static void*
211 with_gc_active (GC_fn_type func, void *data)
212 {
213 return GC_call_with_gc_active (func, data);
214 }
215
216 #else
217
218 /* libgc not new enough, so never actually deactivate GC.
219
220 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
221 GC_call_with_gc_active. */
222
223 static void*
224 with_gc_inactive (GC_fn_type func, void *data)
225 {
226 return func (data);
227 }
228
229 static void*
230 with_gc_active (GC_fn_type func, void *data)
231 {
232 return func (data);
233 }
234
235 #endif /* HAVE_GC_DO_BLOCKING */
236
237
238 \f
239 static void
240 to_timespec (SCM t, scm_t_timespec *waittime)
241 {
242 if (scm_is_pair (t))
243 {
244 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
245 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
246 }
247 else
248 {
249 double time = scm_to_double (t);
250 double sec = scm_c_truncate (time);
251
252 waittime->tv_sec = (long) sec;
253 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
254 }
255 }
256
257 \f
258 /*** Queues */
259
260 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
261 the risk of false references leading to unbounded retained space as
262 described in "Bounding Space Usage of Conservative Garbage Collectors",
263 H.J. Boehm, 2001. */
264
265 /* Make an empty queue data structure.
266 */
267 static SCM
268 make_queue ()
269 {
270 return scm_cons (SCM_EOL, SCM_EOL);
271 }
272
273 /* Put T at the back of Q and return a handle that can be used with
274 remqueue to remove T from Q again.
275 */
276 static SCM
277 enqueue (SCM q, SCM t)
278 {
279 SCM c = scm_cons (t, SCM_EOL);
280 SCM_CRITICAL_SECTION_START;
281 if (scm_is_null (SCM_CDR (q)))
282 SCM_SETCDR (q, c);
283 else
284 SCM_SETCDR (SCM_CAR (q), c);
285 SCM_SETCAR (q, c);
286 SCM_CRITICAL_SECTION_END;
287 return c;
288 }
289
290 /* Remove the element that the handle C refers to from the queue Q. C
291 must have been returned from a call to enqueue. The return value
292 is zero when the element referred to by C has already been removed.
293 Otherwise, 1 is returned.
294 */
295 static int
296 remqueue (SCM q, SCM c)
297 {
298 SCM p, prev = q;
299 SCM_CRITICAL_SECTION_START;
300 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
301 {
302 if (scm_is_eq (p, c))
303 {
304 if (scm_is_eq (c, SCM_CAR (q)))
305 SCM_SETCAR (q, SCM_CDR (c));
306 SCM_SETCDR (prev, SCM_CDR (c));
307
308 /* GC-robust */
309 SCM_SETCDR (c, SCM_EOL);
310
311 SCM_CRITICAL_SECTION_END;
312 return 1;
313 }
314 prev = p;
315 }
316 SCM_CRITICAL_SECTION_END;
317 return 0;
318 }
319
320 /* Remove the front-most element from the queue Q and return it.
321 Return SCM_BOOL_F when Q is empty.
322 */
323 static SCM
324 dequeue (SCM q)
325 {
326 SCM c;
327 SCM_CRITICAL_SECTION_START;
328 c = SCM_CDR (q);
329 if (scm_is_null (c))
330 {
331 SCM_CRITICAL_SECTION_END;
332 return SCM_BOOL_F;
333 }
334 else
335 {
336 SCM_SETCDR (q, SCM_CDR (c));
337 if (scm_is_null (SCM_CDR (q)))
338 SCM_SETCAR (q, SCM_EOL);
339 SCM_CRITICAL_SECTION_END;
340
341 /* GC-robust */
342 SCM_SETCDR (c, SCM_EOL);
343
344 return SCM_CAR (c);
345 }
346 }
347
348 /*** Thread smob routines */
349
350
351 static int
352 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
353 {
354 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
355 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
356 the struct case, hence we go via a union, and extract according to the
357 size of pthread_t. */
358 union {
359 scm_i_pthread_t p;
360 unsigned short us;
361 unsigned int ui;
362 unsigned long ul;
363 scm_t_uintmax um;
364 } u;
365 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
366 scm_i_pthread_t p = t->pthread;
367 scm_t_uintmax id;
368 u.p = p;
369 if (sizeof (p) == sizeof (unsigned short))
370 id = u.us;
371 else if (sizeof (p) == sizeof (unsigned int))
372 id = u.ui;
373 else if (sizeof (p) == sizeof (unsigned long))
374 id = u.ul;
375 else
376 id = u.um;
377
378 scm_puts ("#<thread ", port);
379 scm_uintprint (id, 10, port);
380 scm_puts (" (", port);
381 scm_uintprint ((scm_t_bits)t, 16, port);
382 scm_puts (")>", port);
383 return 1;
384 }
385
386 \f
387 /*** Blocking on queues. */
388
389 /* See also scm_i_queue_async_cell for how such a block is
390 interrputed.
391 */
392
393 /* Put the current thread on QUEUE and go to sleep, waiting for it to
394 be woken up by a call to 'unblock_from_queue', or to be
395 interrupted. Upon return of this function, the current thread is
396 no longer on QUEUE, even when the sleep has been interrupted.
397
398 The caller of block_self must hold MUTEX. It will be atomically
399 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
400
401 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
402 as MUTEX is needed.
403
404 When WAITTIME is not NULL, the sleep will be aborted at that time.
405
406 The return value of block_self is an errno value. It will be zero
407 when the sleep has been successfully completed by a call to
408 unblock_from_queue, EINTR when it has been interrupted by the
409 delivery of a system async, and ETIMEDOUT when the timeout has
410 expired.
411
412 The system asyncs themselves are not executed by block_self.
413 */
414 static int
415 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
416 const scm_t_timespec *waittime)
417 {
418 scm_i_thread *t = SCM_I_CURRENT_THREAD;
419 SCM q_handle;
420 int err;
421
422 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
423 err = EINTR;
424 else
425 {
426 t->block_asyncs++;
427 q_handle = enqueue (queue, t->handle);
428 if (waittime == NULL)
429 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
430 else
431 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
432
433 /* When we are still on QUEUE, we have been interrupted. We
434 report this only when no other error (such as a timeout) has
435 happened above.
436 */
437 if (remqueue (queue, q_handle) && err == 0)
438 err = EINTR;
439 t->block_asyncs--;
440 scm_i_reset_sleep (t);
441 }
442
443 return err;
444 }
445
446 /* Wake up the first thread on QUEUE, if any. The awoken thread is
447 returned, or #f if the queue was empty.
448 */
449 static SCM
450 unblock_from_queue (SCM queue)
451 {
452 SCM thread = dequeue (queue);
453 if (scm_is_true (thread))
454 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
455 return thread;
456 }
457
458 \f
459 /* Getting into and out of guile mode.
460 */
461
462 /* Key used to attach a cleanup handler to a given thread. Also, if
463 thread-local storage is unavailable, this key is used to retrieve the
464 current thread with `pthread_getspecific ()'. */
465 scm_i_pthread_key_t scm_i_thread_key;
466
467
468 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
469
470 /* When thread-local storage (TLS) is available, a pointer to the
471 current-thread object is kept in TLS. Note that storing the thread-object
472 itself in TLS (rather than a pointer to some malloc'd memory) is not
473 possible since thread objects may live longer than the actual thread they
474 represent. */
475 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
476
477 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
478
479
480 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
481 static scm_i_thread *all_threads = NULL;
482 static int thread_count;
483
484 static SCM scm_i_default_dynamic_state;
485
486 /* Perform first stage of thread initialisation, in non-guile mode.
487 */
488 static void
489 guilify_self_1 (struct GC_stack_base *base)
490 {
491 scm_i_thread t;
492
493 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
494 before allocating anything in this thread, because allocation could
495 cause GC to run, and GC could cause finalizers, which could invoke
496 Scheme functions, which need the current thread to be set. */
497
498 t.pthread = scm_i_pthread_self ();
499 t.handle = SCM_BOOL_F;
500 t.result = SCM_BOOL_F;
501 t.cleanup_handler = SCM_BOOL_F;
502 t.mutexes = SCM_EOL;
503 t.held_mutex = NULL;
504 t.join_queue = SCM_EOL;
505 t.dynamic_state = SCM_BOOL_F;
506 t.dynwinds = SCM_EOL;
507 t.active_asyncs = SCM_EOL;
508 t.block_asyncs = 1;
509 t.pending_asyncs = 1;
510 t.critical_section_level = 0;
511 t.base = base->mem_base;
512 #ifdef __ia64__
513 t.register_backing_store_base = base->reg-base;
514 #endif
515 t.continuation_root = SCM_EOL;
516 t.continuation_base = t.base;
517 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
518 t.sleep_mutex = NULL;
519 t.sleep_object = SCM_BOOL_F;
520 t.sleep_fd = -1;
521
522 if (pipe (t.sleep_pipe) != 0)
523 /* FIXME: Error conditions during the initialization phase are handled
524 gracelessly since public functions such as `scm_init_guile ()'
525 currently have type `void'. */
526 abort ();
527
528 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
529 t.current_mark_stack_ptr = NULL;
530 t.current_mark_stack_limit = NULL;
531 t.canceled = 0;
532 t.exited = 0;
533 t.guile_mode = 0;
534
535 /* The switcheroo. */
536 {
537 scm_i_thread *t_ptr = &t;
538
539 GC_disable ();
540 t_ptr = GC_malloc (sizeof (scm_i_thread));
541 memcpy (t_ptr, &t, sizeof t);
542
543 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
544
545 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
546 /* Cache the current thread in TLS for faster lookup. */
547 scm_i_current_thread = t_ptr;
548 #endif
549
550 scm_i_pthread_mutex_lock (&thread_admin_mutex);
551 t_ptr->next_thread = all_threads;
552 all_threads = t_ptr;
553 thread_count++;
554 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
555
556 GC_enable ();
557 }
558 }
559
560 /* Perform second stage of thread initialisation, in guile mode.
561 */
562 static void
563 guilify_self_2 (SCM parent)
564 {
565 scm_i_thread *t = SCM_I_CURRENT_THREAD;
566
567 t->guile_mode = 1;
568
569 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
570
571 t->continuation_root = scm_cons (t->handle, SCM_EOL);
572 t->continuation_base = t->base;
573 t->vm = SCM_BOOL_F;
574
575 if (scm_is_true (parent))
576 t->dynamic_state = scm_make_dynamic_state (parent);
577 else
578 t->dynamic_state = scm_i_make_initial_dynamic_state ();
579
580 t->join_queue = make_queue ();
581 t->block_asyncs = 0;
582 }
583
584 \f
585 /*** Fat mutexes */
586
587 /* We implement our own mutex type since we want them to be 'fair', we
588 want to do fancy things while waiting for them (like running
589 asyncs) and we might want to add things that are nice for
590 debugging.
591 */
592
593 typedef struct {
594 scm_i_pthread_mutex_t lock;
595 SCM owner;
596 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
597
598 int recursive; /* allow recursive locking? */
599 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
600 int allow_external_unlock; /* is it an error to unlock a mutex that is not
601 owned by the current thread? */
602
603 SCM waiting; /* the threads waiting for this mutex. */
604 } fat_mutex;
605
606 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
607 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
608
609 /* Perform thread tear-down, in guile mode.
610 */
611 static void *
612 do_thread_exit (void *v)
613 {
614 scm_i_thread *t = (scm_i_thread *) v;
615
616 if (!scm_is_false (t->cleanup_handler))
617 {
618 SCM ptr = t->cleanup_handler;
619
620 t->cleanup_handler = SCM_BOOL_F;
621 t->result = scm_internal_catch (SCM_BOOL_T,
622 (scm_t_catch_body) scm_call_0, ptr,
623 scm_handle_by_message_noexit, NULL);
624 }
625
626 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
627
628 t->exited = 1;
629 close (t->sleep_pipe[0]);
630 close (t->sleep_pipe[1]);
631 while (scm_is_true (unblock_from_queue (t->join_queue)))
632 ;
633
634 while (!scm_is_null (t->mutexes))
635 {
636 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
637
638 if (!SCM_UNBNDP (mutex))
639 {
640 fat_mutex *m = SCM_MUTEX_DATA (mutex);
641
642 scm_i_pthread_mutex_lock (&m->lock);
643
644 /* Since MUTEX is in `t->mutexes', T must be its owner. */
645 assert (scm_is_eq (m->owner, t->handle));
646
647 unblock_from_queue (m->waiting);
648
649 scm_i_pthread_mutex_unlock (&m->lock);
650 }
651
652 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
653 }
654
655 scm_i_pthread_mutex_unlock (&t->admin_mutex);
656
657 return NULL;
658 }
659
660 static void *
661 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
662 {
663 /* Won't hurt if we are already registered. */
664 GC_register_my_thread (sb);
665
666 return scm_with_guile (do_thread_exit, v);
667 }
668
669 static void
670 on_thread_exit (void *v)
671 {
672 /* This handler is executed in non-guile mode. */
673 scm_i_thread *t = (scm_i_thread *) v, **tp;
674
675 /* If this thread was cancelled while doing a cond wait, it will
676 still have a mutex locked, so we unlock it here. */
677 if (t->held_mutex)
678 {
679 scm_i_pthread_mutex_unlock (t->held_mutex);
680 t->held_mutex = NULL;
681 }
682
683 /* Reinstate the current thread for purposes of scm_with_guile
684 guile-mode cleanup handlers. Only really needed in the non-TLS
685 case but it doesn't hurt to be consistent. */
686 scm_i_pthread_setspecific (scm_i_thread_key, t);
687
688 /* Ensure the signal handling thread has been launched, because we might be
689 shutting it down. */
690 scm_i_ensure_signal_delivery_thread ();
691
692 /* Scheme-level thread finalizers and other cleanup needs to happen in
693 guile mode. */
694 GC_call_with_stack_base (do_thread_exit_trampoline, t);
695
696 /* Removing ourself from the list of all threads needs to happen in
697 non-guile mode since all SCM values on our stack become
698 unprotected once we are no longer in the list. */
699 scm_i_pthread_mutex_lock (&thread_admin_mutex);
700 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
701 if (*tp == t)
702 {
703 *tp = t->next_thread;
704
705 /* GC-robust */
706 t->next_thread = NULL;
707
708 break;
709 }
710 thread_count--;
711
712 /* If there's only one other thread, it could be the signal delivery
713 thread, so we need to notify it to shut down by closing its read pipe.
714 If it's not the signal delivery thread, then closing the read pipe isn't
715 going to hurt. */
716 if (thread_count <= 1)
717 scm_i_close_signal_pipe ();
718
719 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
720
721 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
722
723 #if !SCM_USE_NULL_THREADS
724 GC_unregister_my_thread ();
725 #endif
726 }
727
728 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
729
730 static void
731 init_thread_key (void)
732 {
733 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
734 }
735
736 /* Perform any initializations necessary to make the current thread
737 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
738 if necessary.
739
740 BASE is the stack base to use with GC.
741
742 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
743 which case the default dynamic state is used.
744
745 Returns zero when the thread was known to guile already; otherwise
746 return 1.
747
748 Note that it could be the case that the thread was known
749 to Guile, but not in guile mode (because we are within a
750 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
751 be sure. New threads are put into guile mode implicitly. */
752
753 static int
754 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
755 {
756 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
757
758 if (SCM_I_CURRENT_THREAD)
759 {
760 /* Thread is already known to Guile.
761 */
762 return 0;
763 }
764 else
765 {
766 /* This thread has not been guilified yet.
767 */
768
769 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
770 if (scm_initialized_p == 0)
771 {
772 /* First thread ever to enter Guile. Run the full
773 initialization.
774 */
775 scm_i_init_guile (base);
776
777 #ifdef HAVE_GC_ALLOW_REGISTER_THREADS
778 /* Allow other threads to come in later. */
779 GC_allow_register_threads ();
780 #endif
781
782 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
783 }
784 else
785 {
786 /* Guile is already initialized, but this thread enters it for
787 the first time. Only initialize this thread.
788 */
789 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
790
791 /* Register this thread with libgc. */
792 GC_register_my_thread (base);
793
794 guilify_self_1 (base);
795 guilify_self_2 (parent);
796 }
797 return 1;
798 }
799 }
800
801 void
802 scm_init_guile ()
803 {
804 struct GC_stack_base stack_base;
805
806 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
807 scm_i_init_thread_for_guile (&stack_base,
808 scm_i_default_dynamic_state);
809 else
810 {
811 fprintf (stderr, "Failed to get stack base for current thread.\n");
812 exit (1);
813 }
814 }
815
816 SCM_UNUSED static void
817 scm_leave_guile_cleanup (void *x)
818 {
819 on_thread_exit (SCM_I_CURRENT_THREAD);
820 }
821
822 struct with_guile_args
823 {
824 GC_fn_type func;
825 void *data;
826 SCM parent;
827 };
828
829 static void *
830 with_guile_trampoline (void *data)
831 {
832 struct with_guile_args *args = data;
833
834 return scm_c_with_continuation_barrier (args->func, args->data);
835 }
836
837 static void *
838 with_guile_and_parent (struct GC_stack_base *base, void *data)
839 {
840 void *res;
841 int new_thread;
842 scm_i_thread *t;
843 struct with_guile_args *args = data;
844
845 new_thread = scm_i_init_thread_for_guile (base, args->parent);
846 t = SCM_I_CURRENT_THREAD;
847 if (new_thread)
848 {
849 /* We are in Guile mode. */
850 assert (t->guile_mode);
851
852 res = scm_c_with_continuation_barrier (args->func, args->data);
853
854 /* Leave Guile mode. */
855 t->guile_mode = 0;
856 }
857 else if (t->guile_mode)
858 {
859 /* Already in Guile mode. */
860 res = scm_c_with_continuation_barrier (args->func, args->data);
861 }
862 else
863 {
864 /* We are not in Guile mode, either because we are not within a
865 scm_with_guile, or because we are within a scm_without_guile.
866
867 This call to scm_with_guile() could happen from anywhere on the
868 stack, and in particular lower on the stack than when it was
869 when this thread was first guilified. Thus, `base' must be
870 updated. */
871 #if SCM_STACK_GROWS_UP
872 if (SCM_STACK_PTR (base->mem_base) < t->base)
873 t->base = SCM_STACK_PTR (base->mem_base);
874 #else
875 if (SCM_STACK_PTR (base->mem_base) > t->base)
876 t->base = SCM_STACK_PTR (base->mem_base);
877 #endif
878
879 t->guile_mode = 1;
880 res = with_gc_active (with_guile_trampoline, args);
881 t->guile_mode = 0;
882 }
883 return res;
884 }
885
886 static void *
887 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
888 {
889 struct with_guile_args args;
890
891 args.func = func;
892 args.data = data;
893 args.parent = parent;
894
895 return GC_call_with_stack_base (with_guile_and_parent, &args);
896 }
897
898 void *
899 scm_with_guile (void *(*func)(void *), void *data)
900 {
901 return scm_i_with_guile_and_parent (func, data,
902 scm_i_default_dynamic_state);
903 }
904
905 void *
906 scm_without_guile (void *(*func)(void *), void *data)
907 {
908 void *result;
909 scm_i_thread *t = SCM_I_CURRENT_THREAD;
910
911 if (t->guile_mode)
912 {
913 SCM_I_CURRENT_THREAD->guile_mode = 0;
914 result = with_gc_inactive (func, data);
915 SCM_I_CURRENT_THREAD->guile_mode = 1;
916 }
917 else
918 /* Otherwise we're not in guile mode, so nothing to do. */
919 result = func (data);
920
921 return result;
922 }
923
924 \f
925 /*** Thread creation */
926
927 typedef struct {
928 SCM parent;
929 SCM thunk;
930 SCM handler;
931 SCM thread;
932 scm_i_pthread_mutex_t mutex;
933 scm_i_pthread_cond_t cond;
934 } launch_data;
935
936 static void *
937 really_launch (void *d)
938 {
939 launch_data *data = (launch_data *)d;
940 SCM thunk = data->thunk, handler = data->handler;
941 scm_i_thread *t;
942
943 t = SCM_I_CURRENT_THREAD;
944
945 scm_i_scm_pthread_mutex_lock (&data->mutex);
946 data->thread = scm_current_thread ();
947 scm_i_pthread_cond_signal (&data->cond);
948 scm_i_pthread_mutex_unlock (&data->mutex);
949
950 if (SCM_UNBNDP (handler))
951 t->result = scm_call_0 (thunk);
952 else
953 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
954
955 return 0;
956 }
957
958 static void *
959 launch_thread (void *d)
960 {
961 launch_data *data = (launch_data *)d;
962 scm_i_pthread_detach (scm_i_pthread_self ());
963 scm_i_with_guile_and_parent (really_launch, d, data->parent);
964 return NULL;
965 }
966
967 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
968 (SCM thunk, SCM handler),
969 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
970 "returning a new thread object representing the thread. The procedure\n"
971 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
972 "\n"
973 "When @var{handler} is specified, then @var{thunk} is called from\n"
974 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
975 "handler. This catch is established inside the continuation barrier.\n"
976 "\n"
977 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
978 "the @emph{exit value} of the thread and the thread is terminated.")
979 #define FUNC_NAME s_scm_call_with_new_thread
980 {
981 launch_data data;
982 scm_i_pthread_t id;
983 int err;
984
985 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
986 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
987 handler, SCM_ARG2, FUNC_NAME);
988
989 data.parent = scm_current_dynamic_state ();
990 data.thunk = thunk;
991 data.handler = handler;
992 data.thread = SCM_BOOL_F;
993 scm_i_pthread_mutex_init (&data.mutex, NULL);
994 scm_i_pthread_cond_init (&data.cond, NULL);
995
996 scm_i_scm_pthread_mutex_lock (&data.mutex);
997 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
998 if (err)
999 {
1000 scm_i_pthread_mutex_unlock (&data.mutex);
1001 errno = err;
1002 scm_syserror (NULL);
1003 }
1004 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1005 scm_i_pthread_mutex_unlock (&data.mutex);
1006
1007 return data.thread;
1008 }
1009 #undef FUNC_NAME
1010
1011 typedef struct {
1012 SCM parent;
1013 scm_t_catch_body body;
1014 void *body_data;
1015 scm_t_catch_handler handler;
1016 void *handler_data;
1017 SCM thread;
1018 scm_i_pthread_mutex_t mutex;
1019 scm_i_pthread_cond_t cond;
1020 } spawn_data;
1021
1022 static void *
1023 really_spawn (void *d)
1024 {
1025 spawn_data *data = (spawn_data *)d;
1026 scm_t_catch_body body = data->body;
1027 void *body_data = data->body_data;
1028 scm_t_catch_handler handler = data->handler;
1029 void *handler_data = data->handler_data;
1030 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1031
1032 scm_i_scm_pthread_mutex_lock (&data->mutex);
1033 data->thread = scm_current_thread ();
1034 scm_i_pthread_cond_signal (&data->cond);
1035 scm_i_pthread_mutex_unlock (&data->mutex);
1036
1037 if (handler == NULL)
1038 t->result = body (body_data);
1039 else
1040 t->result = scm_internal_catch (SCM_BOOL_T,
1041 body, body_data,
1042 handler, handler_data);
1043
1044 return 0;
1045 }
1046
1047 static void *
1048 spawn_thread (void *d)
1049 {
1050 spawn_data *data = (spawn_data *)d;
1051 scm_i_pthread_detach (scm_i_pthread_self ());
1052 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1053 return NULL;
1054 }
1055
1056 SCM
1057 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1058 scm_t_catch_handler handler, void *handler_data)
1059 {
1060 spawn_data data;
1061 scm_i_pthread_t id;
1062 int err;
1063
1064 data.parent = scm_current_dynamic_state ();
1065 data.body = body;
1066 data.body_data = body_data;
1067 data.handler = handler;
1068 data.handler_data = handler_data;
1069 data.thread = SCM_BOOL_F;
1070 scm_i_pthread_mutex_init (&data.mutex, NULL);
1071 scm_i_pthread_cond_init (&data.cond, NULL);
1072
1073 scm_i_scm_pthread_mutex_lock (&data.mutex);
1074 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1075 if (err)
1076 {
1077 scm_i_pthread_mutex_unlock (&data.mutex);
1078 errno = err;
1079 scm_syserror (NULL);
1080 }
1081 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1082 scm_i_pthread_mutex_unlock (&data.mutex);
1083
1084 return data.thread;
1085 }
1086
1087 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1088 (),
1089 "Move the calling thread to the end of the scheduling queue.")
1090 #define FUNC_NAME s_scm_yield
1091 {
1092 return scm_from_bool (scm_i_sched_yield ());
1093 }
1094 #undef FUNC_NAME
1095
1096 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1097 (SCM thread),
1098 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1099 "cannot be the current thread, and if @var{thread} has already terminated or "
1100 "been signaled to terminate, this function is a no-op.")
1101 #define FUNC_NAME s_scm_cancel_thread
1102 {
1103 scm_i_thread *t = NULL;
1104
1105 SCM_VALIDATE_THREAD (1, thread);
1106 t = SCM_I_THREAD_DATA (thread);
1107 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1108 if (!t->canceled)
1109 {
1110 t->canceled = 1;
1111 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1112 scm_i_pthread_cancel (t->pthread);
1113 }
1114 else
1115 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1116
1117 return SCM_UNSPECIFIED;
1118 }
1119 #undef FUNC_NAME
1120
1121 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1122 (SCM thread, SCM proc),
1123 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1124 "This handler will be called when the thread exits.")
1125 #define FUNC_NAME s_scm_set_thread_cleanup_x
1126 {
1127 scm_i_thread *t;
1128
1129 SCM_VALIDATE_THREAD (1, thread);
1130 if (!scm_is_false (proc))
1131 SCM_VALIDATE_THUNK (2, proc);
1132
1133 t = SCM_I_THREAD_DATA (thread);
1134 scm_i_pthread_mutex_lock (&t->admin_mutex);
1135
1136 if (!(t->exited || t->canceled))
1137 t->cleanup_handler = proc;
1138
1139 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1140
1141 return SCM_UNSPECIFIED;
1142 }
1143 #undef FUNC_NAME
1144
1145 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1146 (SCM thread),
1147 "Return the cleanup handler installed for the thread @var{thread}.")
1148 #define FUNC_NAME s_scm_thread_cleanup
1149 {
1150 scm_i_thread *t;
1151 SCM ret;
1152
1153 SCM_VALIDATE_THREAD (1, thread);
1154
1155 t = SCM_I_THREAD_DATA (thread);
1156 scm_i_pthread_mutex_lock (&t->admin_mutex);
1157 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1158 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1159
1160 return ret;
1161 }
1162 #undef FUNC_NAME
1163
1164 SCM scm_join_thread (SCM thread)
1165 {
1166 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1167 }
1168
1169 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1170 (SCM thread, SCM timeout, SCM timeoutval),
1171 "Suspend execution of the calling thread until the target @var{thread} "
1172 "terminates, unless the target @var{thread} has already terminated. ")
1173 #define FUNC_NAME s_scm_join_thread_timed
1174 {
1175 scm_i_thread *t;
1176 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1177 SCM res = SCM_BOOL_F;
1178
1179 if (! (SCM_UNBNDP (timeoutval)))
1180 res = timeoutval;
1181
1182 SCM_VALIDATE_THREAD (1, thread);
1183 if (scm_is_eq (scm_current_thread (), thread))
1184 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1185
1186 t = SCM_I_THREAD_DATA (thread);
1187 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1188
1189 if (! SCM_UNBNDP (timeout))
1190 {
1191 to_timespec (timeout, &ctimeout);
1192 timeout_ptr = &ctimeout;
1193 }
1194
1195 if (t->exited)
1196 res = t->result;
1197 else
1198 {
1199 while (1)
1200 {
1201 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1202 timeout_ptr);
1203 if (err == 0)
1204 {
1205 if (t->exited)
1206 {
1207 res = t->result;
1208 break;
1209 }
1210 }
1211 else if (err == ETIMEDOUT)
1212 break;
1213
1214 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1215 SCM_TICK;
1216 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1217
1218 /* Check for exit again, since we just released and
1219 reacquired the admin mutex, before the next block_self
1220 call (which would block forever if t has already
1221 exited). */
1222 if (t->exited)
1223 {
1224 res = t->result;
1225 break;
1226 }
1227 }
1228 }
1229
1230 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1231
1232 return res;
1233 }
1234 #undef FUNC_NAME
1235
1236 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1237 (SCM obj),
1238 "Return @code{#t} if @var{obj} is a thread.")
1239 #define FUNC_NAME s_scm_thread_p
1240 {
1241 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1242 }
1243 #undef FUNC_NAME
1244
1245
1246 static size_t
1247 fat_mutex_free (SCM mx)
1248 {
1249 fat_mutex *m = SCM_MUTEX_DATA (mx);
1250 scm_i_pthread_mutex_destroy (&m->lock);
1251 return 0;
1252 }
1253
1254 static int
1255 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1256 {
1257 fat_mutex *m = SCM_MUTEX_DATA (mx);
1258 scm_puts ("#<mutex ", port);
1259 scm_uintprint ((scm_t_bits)m, 16, port);
1260 scm_puts (">", port);
1261 return 1;
1262 }
1263
1264 static SCM
1265 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1266 {
1267 fat_mutex *m;
1268 SCM mx;
1269
1270 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1271 scm_i_pthread_mutex_init (&m->lock, NULL);
1272 m->owner = SCM_BOOL_F;
1273 m->level = 0;
1274
1275 m->recursive = recursive;
1276 m->unchecked_unlock = unchecked_unlock;
1277 m->allow_external_unlock = external_unlock;
1278
1279 m->waiting = SCM_EOL;
1280 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1281 m->waiting = make_queue ();
1282 return mx;
1283 }
1284
1285 SCM scm_make_mutex (void)
1286 {
1287 return scm_make_mutex_with_flags (SCM_EOL);
1288 }
1289
1290 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1291 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1292 SCM_SYMBOL (recursive_sym, "recursive");
1293
1294 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1295 (SCM flags),
1296 "Create a new mutex. ")
1297 #define FUNC_NAME s_scm_make_mutex_with_flags
1298 {
1299 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1300
1301 SCM ptr = flags;
1302 while (! scm_is_null (ptr))
1303 {
1304 SCM flag = SCM_CAR (ptr);
1305 if (scm_is_eq (flag, unchecked_unlock_sym))
1306 unchecked_unlock = 1;
1307 else if (scm_is_eq (flag, allow_external_unlock_sym))
1308 external_unlock = 1;
1309 else if (scm_is_eq (flag, recursive_sym))
1310 recursive = 1;
1311 else
1312 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1313 ptr = SCM_CDR (ptr);
1314 }
1315 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1316 }
1317 #undef FUNC_NAME
1318
1319 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1320 (void),
1321 "Create a new recursive mutex. ")
1322 #define FUNC_NAME s_scm_make_recursive_mutex
1323 {
1324 return make_fat_mutex (1, 0, 0);
1325 }
1326 #undef FUNC_NAME
1327
1328 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1329
1330 static SCM
1331 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1332 {
1333 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1334
1335 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1336 SCM err = SCM_BOOL_F;
1337
1338 struct timeval current_time;
1339
1340 scm_i_scm_pthread_mutex_lock (&m->lock);
1341
1342 while (1)
1343 {
1344 if (m->level == 0)
1345 {
1346 m->owner = new_owner;
1347 m->level++;
1348
1349 if (SCM_I_IS_THREAD (new_owner))
1350 {
1351 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1352 scm_i_pthread_mutex_lock (&t->admin_mutex);
1353
1354 /* Only keep a weak reference to MUTEX so that it's not
1355 retained when not referenced elsewhere (bug #27450).
1356 The weak pair itself is eventually removed when MUTEX
1357 is unlocked. Note that `t->mutexes' lists mutexes
1358 currently held by T, so it should be small. */
1359 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1360
1361 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1362 }
1363 *ret = 1;
1364 break;
1365 }
1366 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1367 {
1368 m->owner = new_owner;
1369 err = scm_cons (scm_abandoned_mutex_error_key,
1370 scm_from_locale_string ("lock obtained on abandoned "
1371 "mutex"));
1372 *ret = 1;
1373 break;
1374 }
1375 else if (scm_is_eq (m->owner, new_owner))
1376 {
1377 if (m->recursive)
1378 {
1379 m->level++;
1380 *ret = 1;
1381 }
1382 else
1383 {
1384 err = scm_cons (scm_misc_error_key,
1385 scm_from_locale_string ("mutex already locked "
1386 "by thread"));
1387 *ret = 0;
1388 }
1389 break;
1390 }
1391 else
1392 {
1393 if (timeout != NULL)
1394 {
1395 gettimeofday (&current_time, NULL);
1396 if (current_time.tv_sec > timeout->tv_sec ||
1397 (current_time.tv_sec == timeout->tv_sec &&
1398 current_time.tv_usec * 1000 > timeout->tv_nsec))
1399 {
1400 *ret = 0;
1401 break;
1402 }
1403 }
1404 block_self (m->waiting, mutex, &m->lock, timeout);
1405 scm_i_pthread_mutex_unlock (&m->lock);
1406 SCM_TICK;
1407 scm_i_scm_pthread_mutex_lock (&m->lock);
1408 }
1409 }
1410 scm_i_pthread_mutex_unlock (&m->lock);
1411 return err;
1412 }
1413
1414 SCM scm_lock_mutex (SCM mx)
1415 {
1416 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1417 }
1418
1419 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1420 (SCM m, SCM timeout, SCM owner),
1421 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1422 "blocks until the mutex becomes available. The function returns when "
1423 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1424 "a thread already owns will succeed right away and will not block the "
1425 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1426 #define FUNC_NAME s_scm_lock_mutex_timed
1427 {
1428 SCM exception;
1429 int ret = 0;
1430 scm_t_timespec cwaittime, *waittime = NULL;
1431
1432 SCM_VALIDATE_MUTEX (1, m);
1433
1434 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1435 {
1436 to_timespec (timeout, &cwaittime);
1437 waittime = &cwaittime;
1438 }
1439
1440 exception = fat_mutex_lock (m, waittime, owner, &ret);
1441 if (!scm_is_false (exception))
1442 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1443 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1444 }
1445 #undef FUNC_NAME
1446
1447 void
1448 scm_dynwind_lock_mutex (SCM mutex)
1449 {
1450 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1451 SCM_F_WIND_EXPLICITLY);
1452 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1453 SCM_F_WIND_EXPLICITLY);
1454 }
1455
1456 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1457 (SCM mutex),
1458 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1459 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1460 #define FUNC_NAME s_scm_try_mutex
1461 {
1462 SCM exception;
1463 int ret = 0;
1464 scm_t_timespec cwaittime, *waittime = NULL;
1465
1466 SCM_VALIDATE_MUTEX (1, mutex);
1467
1468 to_timespec (scm_from_int(0), &cwaittime);
1469 waittime = &cwaittime;
1470
1471 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1472 if (!scm_is_false (exception))
1473 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1474 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1475 }
1476 #undef FUNC_NAME
1477
1478 /*** Fat condition variables */
1479
1480 typedef struct {
1481 scm_i_pthread_mutex_t lock;
1482 SCM waiting; /* the threads waiting for this condition. */
1483 } fat_cond;
1484
1485 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1486 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1487
1488 static int
1489 fat_mutex_unlock (SCM mutex, SCM cond,
1490 const scm_t_timespec *waittime, int relock)
1491 {
1492 SCM owner;
1493 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1494 fat_cond *c = NULL;
1495 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1496 int err = 0, ret = 0;
1497
1498 scm_i_scm_pthread_mutex_lock (&m->lock);
1499
1500 owner = m->owner;
1501
1502 if (!scm_is_eq (owner, t->handle))
1503 {
1504 if (m->level == 0)
1505 {
1506 if (!m->unchecked_unlock)
1507 {
1508 scm_i_pthread_mutex_unlock (&m->lock);
1509 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1510 }
1511 owner = t->handle;
1512 }
1513 else if (!m->allow_external_unlock)
1514 {
1515 scm_i_pthread_mutex_unlock (&m->lock);
1516 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1517 }
1518 }
1519
1520 if (! (SCM_UNBNDP (cond)))
1521 {
1522 c = SCM_CONDVAR_DATA (cond);
1523 while (1)
1524 {
1525 int brk = 0;
1526
1527 if (m->level > 0)
1528 m->level--;
1529 if (m->level == 0)
1530 {
1531 /* Change the owner of MUTEX. */
1532 t->mutexes = scm_delq_x (mutex, t->mutexes);
1533 m->owner = unblock_from_queue (m->waiting);
1534 }
1535
1536 t->block_asyncs++;
1537
1538 err = block_self (c->waiting, cond, &m->lock, waittime);
1539 scm_i_pthread_mutex_unlock (&m->lock);
1540
1541 if (err == 0)
1542 {
1543 ret = 1;
1544 brk = 1;
1545 }
1546 else if (err == ETIMEDOUT)
1547 {
1548 ret = 0;
1549 brk = 1;
1550 }
1551 else if (err != EINTR)
1552 {
1553 errno = err;
1554 scm_syserror (NULL);
1555 }
1556
1557 if (brk)
1558 {
1559 if (relock)
1560 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1561 t->block_asyncs--;
1562 break;
1563 }
1564
1565 t->block_asyncs--;
1566 scm_async_click ();
1567
1568 scm_remember_upto_here_2 (cond, mutex);
1569
1570 scm_i_scm_pthread_mutex_lock (&m->lock);
1571 }
1572 }
1573 else
1574 {
1575 if (m->level > 0)
1576 m->level--;
1577 if (m->level == 0)
1578 {
1579 /* Change the owner of MUTEX. */
1580 t->mutexes = scm_delq_x (mutex, t->mutexes);
1581 m->owner = unblock_from_queue (m->waiting);
1582 }
1583
1584 scm_i_pthread_mutex_unlock (&m->lock);
1585 ret = 1;
1586 }
1587
1588 return ret;
1589 }
1590
1591 SCM scm_unlock_mutex (SCM mx)
1592 {
1593 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1594 }
1595
1596 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1597 (SCM mx, SCM cond, SCM timeout),
1598 "Unlocks @var{mutex} if the calling thread owns the lock on "
1599 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1600 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1601 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1602 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1603 "with a call to @code{unlock-mutex}. Only the last call to "
1604 "@code{unlock-mutex} will actually unlock the mutex. ")
1605 #define FUNC_NAME s_scm_unlock_mutex_timed
1606 {
1607 scm_t_timespec cwaittime, *waittime = NULL;
1608
1609 SCM_VALIDATE_MUTEX (1, mx);
1610 if (! (SCM_UNBNDP (cond)))
1611 {
1612 SCM_VALIDATE_CONDVAR (2, cond);
1613
1614 if (! (SCM_UNBNDP (timeout)))
1615 {
1616 to_timespec (timeout, &cwaittime);
1617 waittime = &cwaittime;
1618 }
1619 }
1620
1621 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1622 }
1623 #undef FUNC_NAME
1624
1625 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1626 (SCM obj),
1627 "Return @code{#t} if @var{obj} is a mutex.")
1628 #define FUNC_NAME s_scm_mutex_p
1629 {
1630 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1631 }
1632 #undef FUNC_NAME
1633
1634 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1635 (SCM mx),
1636 "Return the thread owning @var{mx}, or @code{#f}.")
1637 #define FUNC_NAME s_scm_mutex_owner
1638 {
1639 SCM owner;
1640 fat_mutex *m = NULL;
1641
1642 SCM_VALIDATE_MUTEX (1, mx);
1643 m = SCM_MUTEX_DATA (mx);
1644 scm_i_pthread_mutex_lock (&m->lock);
1645 owner = m->owner;
1646 scm_i_pthread_mutex_unlock (&m->lock);
1647
1648 return owner;
1649 }
1650 #undef FUNC_NAME
1651
1652 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1653 (SCM mx),
1654 "Return the lock level of mutex @var{mx}.")
1655 #define FUNC_NAME s_scm_mutex_level
1656 {
1657 SCM_VALIDATE_MUTEX (1, mx);
1658 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1659 }
1660 #undef FUNC_NAME
1661
1662 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1663 (SCM mx),
1664 "Returns @code{#t} if the mutex @var{mx} is locked.")
1665 #define FUNC_NAME s_scm_mutex_locked_p
1666 {
1667 SCM_VALIDATE_MUTEX (1, mx);
1668 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1669 }
1670 #undef FUNC_NAME
1671
1672 static int
1673 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1674 {
1675 fat_cond *c = SCM_CONDVAR_DATA (cv);
1676 scm_puts ("#<condition-variable ", port);
1677 scm_uintprint ((scm_t_bits)c, 16, port);
1678 scm_puts (">", port);
1679 return 1;
1680 }
1681
1682 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1683 (void),
1684 "Make a new condition variable.")
1685 #define FUNC_NAME s_scm_make_condition_variable
1686 {
1687 fat_cond *c;
1688 SCM cv;
1689
1690 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1691 c->waiting = SCM_EOL;
1692 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1693 c->waiting = make_queue ();
1694 return cv;
1695 }
1696 #undef FUNC_NAME
1697
1698 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1699 (SCM cv, SCM mx, SCM t),
1700 "Wait until @var{cond-var} has been signalled. While waiting, "
1701 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1702 "is locked again when this function returns. When @var{time} is given, "
1703 "it specifies a point in time where the waiting should be aborted. It "
1704 "can be either a integer as returned by @code{current-time} or a pair "
1705 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1706 "mutex is locked and @code{#f} is returned. When the condition "
1707 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1708 "is returned. ")
1709 #define FUNC_NAME s_scm_timed_wait_condition_variable
1710 {
1711 scm_t_timespec waittime, *waitptr = NULL;
1712
1713 SCM_VALIDATE_CONDVAR (1, cv);
1714 SCM_VALIDATE_MUTEX (2, mx);
1715
1716 if (!SCM_UNBNDP (t))
1717 {
1718 to_timespec (t, &waittime);
1719 waitptr = &waittime;
1720 }
1721
1722 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1723 }
1724 #undef FUNC_NAME
1725
1726 static void
1727 fat_cond_signal (fat_cond *c)
1728 {
1729 unblock_from_queue (c->waiting);
1730 }
1731
1732 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1733 (SCM cv),
1734 "Wake up one thread that is waiting for @var{cv}")
1735 #define FUNC_NAME s_scm_signal_condition_variable
1736 {
1737 SCM_VALIDATE_CONDVAR (1, cv);
1738 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1739 return SCM_BOOL_T;
1740 }
1741 #undef FUNC_NAME
1742
1743 static void
1744 fat_cond_broadcast (fat_cond *c)
1745 {
1746 while (scm_is_true (unblock_from_queue (c->waiting)))
1747 ;
1748 }
1749
1750 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1751 (SCM cv),
1752 "Wake up all threads that are waiting for @var{cv}. ")
1753 #define FUNC_NAME s_scm_broadcast_condition_variable
1754 {
1755 SCM_VALIDATE_CONDVAR (1, cv);
1756 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1757 return SCM_BOOL_T;
1758 }
1759 #undef FUNC_NAME
1760
1761 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1762 (SCM obj),
1763 "Return @code{#t} if @var{obj} is a condition variable.")
1764 #define FUNC_NAME s_scm_condition_variable_p
1765 {
1766 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1767 }
1768 #undef FUNC_NAME
1769
1770
1771 \f
1772 /*** Select */
1773
1774 struct select_args
1775 {
1776 int nfds;
1777 SELECT_TYPE *read_fds;
1778 SELECT_TYPE *write_fds;
1779 SELECT_TYPE *except_fds;
1780 struct timeval *timeout;
1781
1782 int result;
1783 int errno_value;
1784 };
1785
1786 static void *
1787 do_std_select (void *args)
1788 {
1789 struct select_args *select_args;
1790
1791 select_args = (struct select_args *) args;
1792
1793 select_args->result =
1794 select (select_args->nfds,
1795 select_args->read_fds, select_args->write_fds,
1796 select_args->except_fds, select_args->timeout);
1797 select_args->errno_value = errno;
1798
1799 return NULL;
1800 }
1801
1802 int
1803 scm_std_select (int nfds,
1804 SELECT_TYPE *readfds,
1805 SELECT_TYPE *writefds,
1806 SELECT_TYPE *exceptfds,
1807 struct timeval *timeout)
1808 {
1809 fd_set my_readfds;
1810 int res, eno, wakeup_fd;
1811 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1812 struct select_args args;
1813
1814 if (readfds == NULL)
1815 {
1816 FD_ZERO (&my_readfds);
1817 readfds = &my_readfds;
1818 }
1819
1820 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1821 SCM_TICK;
1822
1823 wakeup_fd = t->sleep_pipe[0];
1824 FD_SET (wakeup_fd, readfds);
1825 if (wakeup_fd >= nfds)
1826 nfds = wakeup_fd+1;
1827
1828 args.nfds = nfds;
1829 args.read_fds = readfds;
1830 args.write_fds = writefds;
1831 args.except_fds = exceptfds;
1832 args.timeout = timeout;
1833
1834 /* Explicitly cooperate with the GC. */
1835 scm_without_guile (do_std_select, &args);
1836
1837 res = args.result;
1838 eno = args.errno_value;
1839
1840 t->sleep_fd = -1;
1841 scm_i_reset_sleep (t);
1842
1843 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1844 {
1845 char dummy;
1846 full_read (wakeup_fd, &dummy, 1);
1847
1848 FD_CLR (wakeup_fd, readfds);
1849 res -= 1;
1850 if (res == 0)
1851 {
1852 eno = EINTR;
1853 res = -1;
1854 }
1855 }
1856 errno = eno;
1857 return res;
1858 }
1859
1860 /* Convenience API for blocking while in guile mode. */
1861
1862 #if SCM_USE_PTHREAD_THREADS
1863
1864 /* It seems reasonable to not run procedures related to mutex and condition
1865 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1866 without it, and (ii) the only potential gain would be GC latency. See
1867 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1868 for a discussion of the pros and cons. */
1869
1870 int
1871 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1872 {
1873 int res = scm_i_pthread_mutex_lock (mutex);
1874 return res;
1875 }
1876
1877 static void
1878 do_unlock (void *data)
1879 {
1880 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1881 }
1882
1883 void
1884 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1885 {
1886 scm_i_scm_pthread_mutex_lock (mutex);
1887 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1888 }
1889
1890 int
1891 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1892 {
1893 int res;
1894 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1895
1896 t->held_mutex = mutex;
1897 res = scm_i_pthread_cond_wait (cond, mutex);
1898 t->held_mutex = NULL;
1899
1900 return res;
1901 }
1902
1903 int
1904 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1905 scm_i_pthread_mutex_t *mutex,
1906 const scm_t_timespec *wt)
1907 {
1908 int res;
1909 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1910
1911 t->held_mutex = mutex;
1912 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1913 t->held_mutex = NULL;
1914
1915 return res;
1916 }
1917
1918 #endif
1919
1920 unsigned long
1921 scm_std_usleep (unsigned long usecs)
1922 {
1923 struct timeval tv;
1924 tv.tv_usec = usecs % 1000000;
1925 tv.tv_sec = usecs / 1000000;
1926 scm_std_select (0, NULL, NULL, NULL, &tv);
1927 return tv.tv_sec * 1000000 + tv.tv_usec;
1928 }
1929
1930 unsigned int
1931 scm_std_sleep (unsigned int secs)
1932 {
1933 struct timeval tv;
1934 tv.tv_usec = 0;
1935 tv.tv_sec = secs;
1936 scm_std_select (0, NULL, NULL, NULL, &tv);
1937 return tv.tv_sec;
1938 }
1939
1940 /*** Misc */
1941
1942 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1943 (void),
1944 "Return the thread that called this function.")
1945 #define FUNC_NAME s_scm_current_thread
1946 {
1947 return SCM_I_CURRENT_THREAD->handle;
1948 }
1949 #undef FUNC_NAME
1950
1951 static SCM
1952 scm_c_make_list (size_t n, SCM fill)
1953 {
1954 SCM res = SCM_EOL;
1955 while (n-- > 0)
1956 res = scm_cons (fill, res);
1957 return res;
1958 }
1959
1960 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1961 (void),
1962 "Return a list of all threads.")
1963 #define FUNC_NAME s_scm_all_threads
1964 {
1965 /* We can not allocate while holding the thread_admin_mutex because
1966 of the way GC is done.
1967 */
1968 int n = thread_count;
1969 scm_i_thread *t;
1970 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1971
1972 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1973 l = &list;
1974 for (t = all_threads; t && n > 0; t = t->next_thread)
1975 {
1976 if (t != scm_i_signal_delivery_thread)
1977 {
1978 SCM_SETCAR (*l, t->handle);
1979 l = SCM_CDRLOC (*l);
1980 }
1981 n--;
1982 }
1983 *l = SCM_EOL;
1984 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1985 return list;
1986 }
1987 #undef FUNC_NAME
1988
1989 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1990 (SCM thread),
1991 "Return @code{#t} iff @var{thread} has exited.\n")
1992 #define FUNC_NAME s_scm_thread_exited_p
1993 {
1994 return scm_from_bool (scm_c_thread_exited_p (thread));
1995 }
1996 #undef FUNC_NAME
1997
1998 int
1999 scm_c_thread_exited_p (SCM thread)
2000 #define FUNC_NAME s_scm_thread_exited_p
2001 {
2002 scm_i_thread *t;
2003 SCM_VALIDATE_THREAD (1, thread);
2004 t = SCM_I_THREAD_DATA (thread);
2005 return t->exited;
2006 }
2007 #undef FUNC_NAME
2008
2009 static scm_i_pthread_cond_t wake_up_cond;
2010 static int threads_initialized_p = 0;
2011
2012
2013 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2014 */
2015 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2016
2017 static SCM dynwind_critical_section_mutex;
2018
2019 void
2020 scm_dynwind_critical_section (SCM mutex)
2021 {
2022 if (scm_is_false (mutex))
2023 mutex = dynwind_critical_section_mutex;
2024 scm_dynwind_lock_mutex (mutex);
2025 scm_dynwind_block_asyncs ();
2026 }
2027
2028 /*** Initialization */
2029
2030 scm_i_pthread_mutex_t scm_i_misc_mutex;
2031
2032 #if SCM_USE_PTHREAD_THREADS
2033 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2034 #endif
2035
2036 void
2037 scm_threads_prehistory (void *base)
2038 {
2039 #if SCM_USE_PTHREAD_THREADS
2040 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2041 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2042 PTHREAD_MUTEX_RECURSIVE);
2043 #endif
2044
2045 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2046 scm_i_pthread_mutexattr_recursive);
2047 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2048 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2049
2050 guilify_self_1 ((struct GC_stack_base *) base);
2051 }
2052
2053 scm_t_bits scm_tc16_thread;
2054 scm_t_bits scm_tc16_mutex;
2055 scm_t_bits scm_tc16_condvar;
2056
2057 void
2058 scm_init_threads ()
2059 {
2060 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2061 scm_set_smob_print (scm_tc16_thread, thread_print);
2062
2063 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2064 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2065 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2066
2067 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2068 sizeof (fat_cond));
2069 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2070
2071 scm_i_default_dynamic_state = SCM_BOOL_F;
2072 guilify_self_2 (SCM_BOOL_F);
2073 threads_initialized_p = 1;
2074
2075 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2076 }
2077
2078 void
2079 scm_init_threads_default_dynamic_state ()
2080 {
2081 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2082 scm_i_default_dynamic_state = state;
2083 }
2084
2085 void
2086 scm_init_thread_procs ()
2087 {
2088 #include "libguile/threads.x"
2089 }
2090
2091 \f
2092 /* IA64-specific things. */
2093
2094 #ifdef __ia64__
2095 # ifdef __hpux
2096 # include <sys/param.h>
2097 # include <sys/pstat.h>
2098 void *
2099 scm_ia64_register_backing_store_base (void)
2100 {
2101 struct pst_vm_status vm_status;
2102 int i = 0;
2103 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2104 if (vm_status.pst_type == PS_RSESTACK)
2105 return (void *) vm_status.pst_vaddr;
2106 abort ();
2107 }
2108 void *
2109 scm_ia64_ar_bsp (const void *ctx)
2110 {
2111 uint64_t bsp;
2112 __uc_get_ar_bsp (ctx, &bsp);
2113 return (void *) bsp;
2114 }
2115 # endif /* hpux */
2116 # ifdef linux
2117 # include <ucontext.h>
2118 void *
2119 scm_ia64_register_backing_store_base (void)
2120 {
2121 extern void *__libc_ia64_register_backing_store_base;
2122 return __libc_ia64_register_backing_store_base;
2123 }
2124 void *
2125 scm_ia64_ar_bsp (const void *opaque)
2126 {
2127 const ucontext_t *ctx = opaque;
2128 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2129 }
2130 # endif /* linux */
2131 #endif /* __ia64__ */
2132
2133
2134 /*
2135 Local Variables:
2136 c-file-style: "gnu"
2137 End:
2138 */