avoid running GC when SCM_I_CURRENT_THREAD is unset
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include <assert.h>
42
43 #include "libguile/validate.h"
44 #include "libguile/root.h"
45 #include "libguile/eval.h"
46 #include "libguile/async.h"
47 #include "libguile/ports.h"
48 #include "libguile/threads.h"
49 #include "libguile/dynwind.h"
50 #include "libguile/iselect.h"
51 #include "libguile/fluids.h"
52 #include "libguile/continuations.h"
53 #include "libguile/gc.h"
54 #include "libguile/init.h"
55 #include "libguile/scmsigs.h"
56 #include "libguile/strings.h"
57 #include "libguile/weaks.h"
58
59 #ifdef __MINGW32__
60 #ifndef ETIMEDOUT
61 # define ETIMEDOUT WSAETIMEDOUT
62 #endif
63 # include <fcntl.h>
64 # include <process.h>
65 # define pipe(fd) _pipe (fd, 256, O_BINARY)
66 #endif /* __MINGW32__ */
67
68 #include <full-read.h>
69
70
71 \f
72
73 /* First some libgc shims. */
74
75 /* Make sure GC_fn_type is defined; it is missing from the public
76 headers of GC 7.1 and earlier. */
77 #ifndef HAVE_GC_FN_TYPE
78 typedef void * (* GC_fn_type) (void *);
79 #endif
80
81
82 #ifndef GC_SUCCESS
83 #define GC_SUCCESS 0
84 #endif
85
86 #ifndef GC_UNIMPLEMENTED
87 #define GC_UNIMPLEMENTED 3
88 #endif
89
90 /* Likewise struct GC_stack_base is missing before 7.1. */
91 #ifndef HAVE_GC_STACK_BASE
92 struct GC_stack_base {
93 void * mem_base; /* Base of memory stack. */
94 #ifdef __ia64__
95 void * reg_base; /* Base of separate register stack. */
96 #endif
97 };
98
99 static int
100 GC_register_my_thread (struct GC_stack_base *)
101 {
102 return GC_UNIMPLEMENTED;
103 }
104
105 static void
106 GC_unregister_my_thread ()
107 {
108 }
109
110 #if !SCM_USE_PTHREAD_THREADS
111 /* No threads; we can just use GC_stackbottom. */
112 static void *
113 get_thread_stack_base ()
114 {
115 return GC_stackbottom;
116 }
117
118 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
119 && defined PTHREAD_ATTR_GETSTACK_WORKS
120 /* This method for GNU/Linux and perhaps some other systems.
121 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
122 available on them. */
123 static void *
124 get_thread_stack_base ()
125 {
126 pthread_attr_t attr;
127 void *start, *end;
128 size_t size;
129
130 pthread_getattr_np (pthread_self (), &attr);
131 pthread_attr_getstack (&attr, &start, &size);
132 end = (char *)start + size;
133
134 #if SCM_STACK_GROWS_UP
135 return start;
136 #else
137 return end;
138 #endif
139 }
140
141 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
142 /* This method for MacOS X.
143 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
144 but as of 2006 there's nothing obvious at apple.com. */
145 static void *
146 get_thread_stack_base ()
147 {
148 return pthread_get_stackaddr_np (pthread_self ());
149 }
150
151 #else
152 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
153 #endif
154
155 static int
156 GC_get_stack_base (struct GC_stack_base *)
157 {
158 stack_base->mem_base = get_thread_stack_base ();
159 #ifdef __ia64__
160 /* Calculate and store off the base of this thread's register
161 backing store (RBS). Unfortunately our implementation(s) of
162 scm_ia64_register_backing_store_base are only reliable for the
163 main thread. For other threads, therefore, find out the current
164 top of the RBS, and use that as a maximum. */
165 stack_base->reg_base = scm_ia64_register_backing_store_base ();
166 {
167 ucontext_t ctx;
168 void *bsp;
169 getcontext (&ctx);
170 bsp = scm_ia64_ar_bsp (&ctx);
171 if (stack_base->reg_base > bsp)
172 stack_base->reg_base = bsp;
173 }
174 #endif
175 return GC_SUCCESS;
176 }
177
178 static void *
179 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
180 {
181 struct GC_stack_base stack_base;
182
183 stack_base.mem_base = (void*)&stack_base;
184 #ifdef __ia64__
185 /* FIXME: Untested. */
186 {
187 ucontext_t ctx;
188 getcontext (&ctx);
189 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
190 }
191 #endif
192
193 return fn (&stack_base, arg);
194 }
195 #endif /* HAVE_GC_STACK_BASE */
196
197
198 /* Now define with_gc_active and with_gc_inactive. */
199
200 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
201
202 /* We have a sufficiently new libgc (7.2 or newer). */
203
204 static void*
205 with_gc_inactive (GC_fn_type func, void *data)
206 {
207 return GC_do_blocking (func, data);
208 }
209
210 static void*
211 with_gc_active (GC_fn_type func, void *data)
212 {
213 return GC_call_with_gc_active (func, data);
214 }
215
216 #else
217
218 /* libgc not new enough, so never actually deactivate GC.
219
220 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
221 GC_call_with_gc_active. */
222
223 static void*
224 with_gc_inactive (GC_fn_type func, void *data)
225 {
226 return func (data);
227 }
228
229 static void*
230 with_gc_active (GC_fn_type func, void *data)
231 {
232 return func (data);
233 }
234
235 #endif /* HAVE_GC_DO_BLOCKING */
236
237
238 \f
239 static void
240 to_timespec (SCM t, scm_t_timespec *waittime)
241 {
242 if (scm_is_pair (t))
243 {
244 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
245 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
246 }
247 else
248 {
249 double time = scm_to_double (t);
250 double sec = scm_c_truncate (time);
251
252 waittime->tv_sec = (long) sec;
253 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
254 }
255 }
256
257 \f
258 /*** Queues */
259
260 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
261 the risk of false references leading to unbounded retained space as
262 described in "Bounding Space Usage of Conservative Garbage Collectors",
263 H.J. Boehm, 2001. */
264
265 /* Make an empty queue data structure.
266 */
267 static SCM
268 make_queue ()
269 {
270 return scm_cons (SCM_EOL, SCM_EOL);
271 }
272
273 /* Put T at the back of Q and return a handle that can be used with
274 remqueue to remove T from Q again.
275 */
276 static SCM
277 enqueue (SCM q, SCM t)
278 {
279 SCM c = scm_cons (t, SCM_EOL);
280 SCM_CRITICAL_SECTION_START;
281 if (scm_is_null (SCM_CDR (q)))
282 SCM_SETCDR (q, c);
283 else
284 SCM_SETCDR (SCM_CAR (q), c);
285 SCM_SETCAR (q, c);
286 SCM_CRITICAL_SECTION_END;
287 return c;
288 }
289
290 /* Remove the element that the handle C refers to from the queue Q. C
291 must have been returned from a call to enqueue. The return value
292 is zero when the element referred to by C has already been removed.
293 Otherwise, 1 is returned.
294 */
295 static int
296 remqueue (SCM q, SCM c)
297 {
298 SCM p, prev = q;
299 SCM_CRITICAL_SECTION_START;
300 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
301 {
302 if (scm_is_eq (p, c))
303 {
304 if (scm_is_eq (c, SCM_CAR (q)))
305 SCM_SETCAR (q, SCM_CDR (c));
306 SCM_SETCDR (prev, SCM_CDR (c));
307
308 /* GC-robust */
309 SCM_SETCDR (c, SCM_EOL);
310
311 SCM_CRITICAL_SECTION_END;
312 return 1;
313 }
314 prev = p;
315 }
316 SCM_CRITICAL_SECTION_END;
317 return 0;
318 }
319
320 /* Remove the front-most element from the queue Q and return it.
321 Return SCM_BOOL_F when Q is empty.
322 */
323 static SCM
324 dequeue (SCM q)
325 {
326 SCM c;
327 SCM_CRITICAL_SECTION_START;
328 c = SCM_CDR (q);
329 if (scm_is_null (c))
330 {
331 SCM_CRITICAL_SECTION_END;
332 return SCM_BOOL_F;
333 }
334 else
335 {
336 SCM_SETCDR (q, SCM_CDR (c));
337 if (scm_is_null (SCM_CDR (q)))
338 SCM_SETCAR (q, SCM_EOL);
339 SCM_CRITICAL_SECTION_END;
340
341 /* GC-robust */
342 SCM_SETCDR (c, SCM_EOL);
343
344 return SCM_CAR (c);
345 }
346 }
347
348 /*** Thread smob routines */
349
350
351 static int
352 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
353 {
354 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
355 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
356 the struct case, hence we go via a union, and extract according to the
357 size of pthread_t. */
358 union {
359 scm_i_pthread_t p;
360 unsigned short us;
361 unsigned int ui;
362 unsigned long ul;
363 scm_t_uintmax um;
364 } u;
365 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
366 scm_i_pthread_t p = t->pthread;
367 scm_t_uintmax id;
368 u.p = p;
369 if (sizeof (p) == sizeof (unsigned short))
370 id = u.us;
371 else if (sizeof (p) == sizeof (unsigned int))
372 id = u.ui;
373 else if (sizeof (p) == sizeof (unsigned long))
374 id = u.ul;
375 else
376 id = u.um;
377
378 scm_puts ("#<thread ", port);
379 scm_uintprint (id, 10, port);
380 scm_puts (" (", port);
381 scm_uintprint ((scm_t_bits)t, 16, port);
382 scm_puts (")>", port);
383 return 1;
384 }
385
386 \f
387 /*** Blocking on queues. */
388
389 /* See also scm_i_queue_async_cell for how such a block is
390 interrputed.
391 */
392
393 /* Put the current thread on QUEUE and go to sleep, waiting for it to
394 be woken up by a call to 'unblock_from_queue', or to be
395 interrupted. Upon return of this function, the current thread is
396 no longer on QUEUE, even when the sleep has been interrupted.
397
398 The caller of block_self must hold MUTEX. It will be atomically
399 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
400
401 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
402 as MUTEX is needed.
403
404 When WAITTIME is not NULL, the sleep will be aborted at that time.
405
406 The return value of block_self is an errno value. It will be zero
407 when the sleep has been successfully completed by a call to
408 unblock_from_queue, EINTR when it has been interrupted by the
409 delivery of a system async, and ETIMEDOUT when the timeout has
410 expired.
411
412 The system asyncs themselves are not executed by block_self.
413 */
414 static int
415 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
416 const scm_t_timespec *waittime)
417 {
418 scm_i_thread *t = SCM_I_CURRENT_THREAD;
419 SCM q_handle;
420 int err;
421
422 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
423 err = EINTR;
424 else
425 {
426 t->block_asyncs++;
427 q_handle = enqueue (queue, t->handle);
428 if (waittime == NULL)
429 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
430 else
431 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
432
433 /* When we are still on QUEUE, we have been interrupted. We
434 report this only when no other error (such as a timeout) has
435 happened above.
436 */
437 if (remqueue (queue, q_handle) && err == 0)
438 err = EINTR;
439 t->block_asyncs--;
440 scm_i_reset_sleep (t);
441 }
442
443 return err;
444 }
445
446 /* Wake up the first thread on QUEUE, if any. The awoken thread is
447 returned, or #f if the queue was empty.
448 */
449 static SCM
450 unblock_from_queue (SCM queue)
451 {
452 SCM thread = dequeue (queue);
453 if (scm_is_true (thread))
454 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
455 return thread;
456 }
457
458 \f
459 /* Getting into and out of guile mode.
460 */
461
462 /* Key used to attach a cleanup handler to a given thread. Also, if
463 thread-local storage is unavailable, this key is used to retrieve the
464 current thread with `pthread_getspecific ()'. */
465 scm_i_pthread_key_t scm_i_thread_key;
466
467
468 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
469
470 /* When thread-local storage (TLS) is available, a pointer to the
471 current-thread object is kept in TLS. Note that storing the thread-object
472 itself in TLS (rather than a pointer to some malloc'd memory) is not
473 possible since thread objects may live longer than the actual thread they
474 represent. */
475 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
476
477 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
478
479
480 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
481 static scm_i_thread *all_threads = NULL;
482 static int thread_count;
483
484 static SCM scm_i_default_dynamic_state;
485
486 /* Perform first stage of thread initialisation, in non-guile mode.
487 */
488 static void
489 guilify_self_1 (struct GC_stack_base *base)
490 {
491 scm_i_thread t;
492
493 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
494 before allocating anything in this thread, because allocation could
495 cause GC to run, and GC could cause finalizers, which could invoke
496 Scheme functions, which need the current thread to be set. */
497
498 t.pthread = scm_i_pthread_self ();
499 t.handle = SCM_BOOL_F;
500 t.result = SCM_BOOL_F;
501 t.cleanup_handler = SCM_BOOL_F;
502 t.mutexes = SCM_EOL;
503 t.held_mutex = NULL;
504 t.join_queue = SCM_EOL;
505 t.dynamic_state = SCM_BOOL_F;
506 t.dynwinds = SCM_EOL;
507 t.active_asyncs = SCM_EOL;
508 t.block_asyncs = 1;
509 t.pending_asyncs = 1;
510 t.critical_section_level = 0;
511 t.base = base->mem_base;
512 #ifdef __ia64__
513 t.register_backing_store_base = base->reg-base;
514 #endif
515 t.continuation_root = SCM_EOL;
516 t.continuation_base = t.base;
517 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
518 t.sleep_mutex = NULL;
519 t.sleep_object = SCM_BOOL_F;
520 t.sleep_fd = -1;
521
522 if (pipe (t.sleep_pipe) != 0)
523 /* FIXME: Error conditions during the initialization phase are handled
524 gracelessly since public functions such as `scm_init_guile ()'
525 currently have type `void'. */
526 abort ();
527
528 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
529 t.current_mark_stack_ptr = NULL;
530 t.current_mark_stack_limit = NULL;
531 t.canceled = 0;
532 t.exited = 0;
533 t.guile_mode = 0;
534
535 /* The switcheroo. */
536 {
537 scm_i_thread *t_ptr = &t;
538
539 GC_disable ();
540 t_ptr = GC_malloc (sizeof (scm_i_thread));
541 memcpy (t_ptr, &t, sizeof t);
542
543 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
544
545 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
546 /* Cache the current thread in TLS for faster lookup. */
547 scm_i_current_thread = t_ptr;
548 #endif
549
550 scm_i_pthread_mutex_lock (&thread_admin_mutex);
551 t_ptr->next_thread = all_threads;
552 all_threads = t_ptr;
553 thread_count++;
554 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
555
556 GC_enable ();
557 }
558 }
559
560 /* Perform second stage of thread initialisation, in guile mode.
561 */
562 static void
563 guilify_self_2 (SCM parent)
564 {
565 scm_i_thread *t = SCM_I_CURRENT_THREAD;
566
567 t->guile_mode = 1;
568
569 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
570
571 t->continuation_root = scm_cons (t->handle, SCM_EOL);
572 t->continuation_base = t->base;
573 t->vm = SCM_BOOL_F;
574
575 if (scm_is_true (parent))
576 t->dynamic_state = scm_make_dynamic_state (parent);
577 else
578 t->dynamic_state = scm_i_make_initial_dynamic_state ();
579
580 t->join_queue = make_queue ();
581 t->block_asyncs = 0;
582 }
583
584 \f
585 /*** Fat mutexes */
586
587 /* We implement our own mutex type since we want them to be 'fair', we
588 want to do fancy things while waiting for them (like running
589 asyncs) and we might want to add things that are nice for
590 debugging.
591 */
592
593 typedef struct {
594 scm_i_pthread_mutex_t lock;
595 SCM owner;
596 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
597
598 int recursive; /* allow recursive locking? */
599 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
600 int allow_external_unlock; /* is it an error to unlock a mutex that is not
601 owned by the current thread? */
602
603 SCM waiting; /* the threads waiting for this mutex. */
604 } fat_mutex;
605
606 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
607 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
608
609 /* Perform thread tear-down, in guile mode.
610 */
611 static void *
612 do_thread_exit (void *v)
613 {
614 scm_i_thread *t = (scm_i_thread *) v;
615
616 if (!scm_is_false (t->cleanup_handler))
617 {
618 SCM ptr = t->cleanup_handler;
619
620 t->cleanup_handler = SCM_BOOL_F;
621 t->result = scm_internal_catch (SCM_BOOL_T,
622 (scm_t_catch_body) scm_call_0, ptr,
623 scm_handle_by_message_noexit, NULL);
624 }
625
626 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
627
628 t->exited = 1;
629 close (t->sleep_pipe[0]);
630 close (t->sleep_pipe[1]);
631 while (scm_is_true (unblock_from_queue (t->join_queue)))
632 ;
633
634 while (!scm_is_null (t->mutexes))
635 {
636 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
637
638 if (!SCM_UNBNDP (mutex))
639 {
640 fat_mutex *m = SCM_MUTEX_DATA (mutex);
641
642 scm_i_pthread_mutex_lock (&m->lock);
643
644 /* Since MUTEX is in `t->mutexes', T must be its owner. */
645 assert (scm_is_eq (m->owner, t->handle));
646
647 unblock_from_queue (m->waiting);
648
649 scm_i_pthread_mutex_unlock (&m->lock);
650 }
651
652 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
653 }
654
655 scm_i_pthread_mutex_unlock (&t->admin_mutex);
656
657 return NULL;
658 }
659
660 static void *
661 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
662 {
663 /* Won't hurt if we are already registered. */
664 GC_register_my_thread (sb);
665
666 return scm_with_guile (do_thread_exit, v);
667 }
668
669 static void
670 on_thread_exit (void *v)
671 {
672 /* This handler is executed in non-guile mode. */
673 scm_i_thread *t = (scm_i_thread *) v, **tp;
674
675 /* If this thread was cancelled while doing a cond wait, it will
676 still have a mutex locked, so we unlock it here. */
677 if (t->held_mutex)
678 {
679 scm_i_pthread_mutex_unlock (t->held_mutex);
680 t->held_mutex = NULL;
681 }
682
683 /* Reinstate the current thread for purposes of scm_with_guile
684 guile-mode cleanup handlers. Only really needed in the non-TLS
685 case but it doesn't hurt to be consistent. */
686 scm_i_pthread_setspecific (scm_i_thread_key, t);
687
688 /* Ensure the signal handling thread has been launched, because we might be
689 shutting it down. */
690 scm_i_ensure_signal_delivery_thread ();
691
692 /* Scheme-level thread finalizers and other cleanup needs to happen in
693 guile mode. */
694 GC_call_with_stack_base (do_thread_exit_trampoline, t);
695
696 /* Removing ourself from the list of all threads needs to happen in
697 non-guile mode since all SCM values on our stack become
698 unprotected once we are no longer in the list. */
699 scm_i_pthread_mutex_lock (&thread_admin_mutex);
700 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
701 if (*tp == t)
702 {
703 *tp = t->next_thread;
704
705 /* GC-robust */
706 t->next_thread = NULL;
707
708 break;
709 }
710 thread_count--;
711
712 /* If there's only one other thread, it could be the signal delivery
713 thread, so we need to notify it to shut down by closing its read pipe.
714 If it's not the signal delivery thread, then closing the read pipe isn't
715 going to hurt. */
716 if (thread_count <= 1)
717 scm_i_close_signal_pipe ();
718
719 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
720
721 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
722
723 GC_unregister_my_thread ();
724 }
725
726 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
727
728 static void
729 init_thread_key (void)
730 {
731 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
732 }
733
734 /* Perform any initializations necessary to make the current thread
735 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
736 if necessary.
737
738 BASE is the stack base to use with GC.
739
740 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
741 which case the default dynamic state is used.
742
743 Returns zero when the thread was known to guile already; otherwise
744 return 1.
745
746 Note that it could be the case that the thread was known
747 to Guile, but not in guile mode (because we are within a
748 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
749 be sure. New threads are put into guile mode implicitly. */
750
751 static int
752 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
753 {
754 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
755
756 if (SCM_I_CURRENT_THREAD)
757 {
758 /* Thread is already known to Guile.
759 */
760 return 0;
761 }
762 else
763 {
764 /* This thread has not been guilified yet.
765 */
766
767 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
768 if (scm_initialized_p == 0)
769 {
770 /* First thread ever to enter Guile. Run the full
771 initialization.
772 */
773 scm_i_init_guile (base);
774
775 /* Allow other threads to come in later. */
776 GC_allow_register_threads ();
777
778 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
779 }
780 else
781 {
782 /* Guile is already initialized, but this thread enters it for
783 the first time. Only initialize this thread.
784 */
785 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
786
787 /* Register this thread with libgc. */
788 GC_register_my_thread (base);
789
790 guilify_self_1 (base);
791 guilify_self_2 (parent);
792 }
793 return 1;
794 }
795 }
796
797 void
798 scm_init_guile ()
799 {
800 struct GC_stack_base stack_base;
801
802 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
803 scm_i_init_thread_for_guile (&stack_base,
804 scm_i_default_dynamic_state);
805 else
806 {
807 fprintf (stderr, "Failed to get stack base for current thread.\n");
808 exit (1);
809 }
810 }
811
812 SCM_UNUSED static void
813 scm_leave_guile_cleanup (void *x)
814 {
815 on_thread_exit (SCM_I_CURRENT_THREAD);
816 }
817
818 struct with_guile_args
819 {
820 GC_fn_type func;
821 void *data;
822 SCM parent;
823 };
824
825 static void *
826 with_guile_trampoline (void *data)
827 {
828 struct with_guile_args *args = data;
829
830 return scm_c_with_continuation_barrier (args->func, args->data);
831 }
832
833 static void *
834 with_guile_and_parent (struct GC_stack_base *base, void *data)
835 {
836 void *res;
837 int new_thread;
838 scm_i_thread *t;
839 struct with_guile_args *args = data;
840
841 new_thread = scm_i_init_thread_for_guile (base, args->parent);
842 t = SCM_I_CURRENT_THREAD;
843 if (new_thread)
844 {
845 /* We are in Guile mode. */
846 assert (t->guile_mode);
847
848 res = scm_c_with_continuation_barrier (args->func, args->data);
849
850 /* Leave Guile mode. */
851 t->guile_mode = 0;
852 }
853 else if (t->guile_mode)
854 {
855 /* Already in Guile mode. */
856 res = scm_c_with_continuation_barrier (args->func, args->data);
857 }
858 else
859 {
860 /* We are not in Guile mode, either because we are not within a
861 scm_with_guile, or because we are within a scm_without_guile.
862
863 This call to scm_with_guile() could happen from anywhere on the
864 stack, and in particular lower on the stack than when it was
865 when this thread was first guilified. Thus, `base' must be
866 updated. */
867 #if SCM_STACK_GROWS_UP
868 if (SCM_STACK_PTR (base->mem_base) < t->base)
869 t->base = SCM_STACK_PTR (base->mem_base);
870 #else
871 if (SCM_STACK_PTR (base->mem_base) > t->base)
872 t->base = SCM_STACK_PTR (base->mem_base);
873 #endif
874
875 t->guile_mode = 1;
876 res = with_gc_active (with_guile_trampoline, args);
877 t->guile_mode = 0;
878 }
879 return res;
880 }
881
882 static void *
883 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
884 {
885 struct with_guile_args args;
886
887 args.func = func;
888 args.data = data;
889 args.parent = parent;
890
891 return GC_call_with_stack_base (with_guile_and_parent, &args);
892 }
893
894 void *
895 scm_with_guile (void *(*func)(void *), void *data)
896 {
897 return scm_i_with_guile_and_parent (func, data,
898 scm_i_default_dynamic_state);
899 }
900
901 void *
902 scm_without_guile (void *(*func)(void *), void *data)
903 {
904 void *result;
905 scm_i_thread *t = SCM_I_CURRENT_THREAD;
906
907 if (t->guile_mode)
908 {
909 SCM_I_CURRENT_THREAD->guile_mode = 0;
910 result = with_gc_inactive (func, data);
911 SCM_I_CURRENT_THREAD->guile_mode = 1;
912 }
913 else
914 /* Otherwise we're not in guile mode, so nothing to do. */
915 result = func (data);
916
917 return result;
918 }
919
920 \f
921 /*** Thread creation */
922
923 typedef struct {
924 SCM parent;
925 SCM thunk;
926 SCM handler;
927 SCM thread;
928 scm_i_pthread_mutex_t mutex;
929 scm_i_pthread_cond_t cond;
930 } launch_data;
931
932 static void *
933 really_launch (void *d)
934 {
935 launch_data *data = (launch_data *)d;
936 SCM thunk = data->thunk, handler = data->handler;
937 scm_i_thread *t;
938
939 t = SCM_I_CURRENT_THREAD;
940
941 scm_i_scm_pthread_mutex_lock (&data->mutex);
942 data->thread = scm_current_thread ();
943 scm_i_pthread_cond_signal (&data->cond);
944 scm_i_pthread_mutex_unlock (&data->mutex);
945
946 if (SCM_UNBNDP (handler))
947 t->result = scm_call_0 (thunk);
948 else
949 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
950
951 return 0;
952 }
953
954 static void *
955 launch_thread (void *d)
956 {
957 launch_data *data = (launch_data *)d;
958 scm_i_pthread_detach (scm_i_pthread_self ());
959 scm_i_with_guile_and_parent (really_launch, d, data->parent);
960 return NULL;
961 }
962
963 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
964 (SCM thunk, SCM handler),
965 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
966 "returning a new thread object representing the thread. The procedure\n"
967 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
968 "\n"
969 "When @var{handler} is specified, then @var{thunk} is called from\n"
970 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
971 "handler. This catch is established inside the continuation barrier.\n"
972 "\n"
973 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
974 "the @emph{exit value} of the thread and the thread is terminated.")
975 #define FUNC_NAME s_scm_call_with_new_thread
976 {
977 launch_data data;
978 scm_i_pthread_t id;
979 int err;
980
981 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
982 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
983 handler, SCM_ARG2, FUNC_NAME);
984
985 data.parent = scm_current_dynamic_state ();
986 data.thunk = thunk;
987 data.handler = handler;
988 data.thread = SCM_BOOL_F;
989 scm_i_pthread_mutex_init (&data.mutex, NULL);
990 scm_i_pthread_cond_init (&data.cond, NULL);
991
992 scm_i_scm_pthread_mutex_lock (&data.mutex);
993 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
994 if (err)
995 {
996 scm_i_pthread_mutex_unlock (&data.mutex);
997 errno = err;
998 scm_syserror (NULL);
999 }
1000 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1001 scm_i_pthread_mutex_unlock (&data.mutex);
1002
1003 return data.thread;
1004 }
1005 #undef FUNC_NAME
1006
1007 typedef struct {
1008 SCM parent;
1009 scm_t_catch_body body;
1010 void *body_data;
1011 scm_t_catch_handler handler;
1012 void *handler_data;
1013 SCM thread;
1014 scm_i_pthread_mutex_t mutex;
1015 scm_i_pthread_cond_t cond;
1016 } spawn_data;
1017
1018 static void *
1019 really_spawn (void *d)
1020 {
1021 spawn_data *data = (spawn_data *)d;
1022 scm_t_catch_body body = data->body;
1023 void *body_data = data->body_data;
1024 scm_t_catch_handler handler = data->handler;
1025 void *handler_data = data->handler_data;
1026 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1027
1028 scm_i_scm_pthread_mutex_lock (&data->mutex);
1029 data->thread = scm_current_thread ();
1030 scm_i_pthread_cond_signal (&data->cond);
1031 scm_i_pthread_mutex_unlock (&data->mutex);
1032
1033 if (handler == NULL)
1034 t->result = body (body_data);
1035 else
1036 t->result = scm_internal_catch (SCM_BOOL_T,
1037 body, body_data,
1038 handler, handler_data);
1039
1040 return 0;
1041 }
1042
1043 static void *
1044 spawn_thread (void *d)
1045 {
1046 spawn_data *data = (spawn_data *)d;
1047 scm_i_pthread_detach (scm_i_pthread_self ());
1048 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1049 return NULL;
1050 }
1051
1052 SCM
1053 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1054 scm_t_catch_handler handler, void *handler_data)
1055 {
1056 spawn_data data;
1057 scm_i_pthread_t id;
1058 int err;
1059
1060 data.parent = scm_current_dynamic_state ();
1061 data.body = body;
1062 data.body_data = body_data;
1063 data.handler = handler;
1064 data.handler_data = handler_data;
1065 data.thread = SCM_BOOL_F;
1066 scm_i_pthread_mutex_init (&data.mutex, NULL);
1067 scm_i_pthread_cond_init (&data.cond, NULL);
1068
1069 scm_i_scm_pthread_mutex_lock (&data.mutex);
1070 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1071 if (err)
1072 {
1073 scm_i_pthread_mutex_unlock (&data.mutex);
1074 errno = err;
1075 scm_syserror (NULL);
1076 }
1077 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1078 scm_i_pthread_mutex_unlock (&data.mutex);
1079
1080 return data.thread;
1081 }
1082
1083 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1084 (),
1085 "Move the calling thread to the end of the scheduling queue.")
1086 #define FUNC_NAME s_scm_yield
1087 {
1088 return scm_from_bool (scm_i_sched_yield ());
1089 }
1090 #undef FUNC_NAME
1091
1092 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1093 (SCM thread),
1094 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1095 "cannot be the current thread, and if @var{thread} has already terminated or "
1096 "been signaled to terminate, this function is a no-op.")
1097 #define FUNC_NAME s_scm_cancel_thread
1098 {
1099 scm_i_thread *t = NULL;
1100
1101 SCM_VALIDATE_THREAD (1, thread);
1102 t = SCM_I_THREAD_DATA (thread);
1103 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1104 if (!t->canceled)
1105 {
1106 t->canceled = 1;
1107 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1108 scm_i_pthread_cancel (t->pthread);
1109 }
1110 else
1111 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1112
1113 return SCM_UNSPECIFIED;
1114 }
1115 #undef FUNC_NAME
1116
1117 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1118 (SCM thread, SCM proc),
1119 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1120 "This handler will be called when the thread exits.")
1121 #define FUNC_NAME s_scm_set_thread_cleanup_x
1122 {
1123 scm_i_thread *t;
1124
1125 SCM_VALIDATE_THREAD (1, thread);
1126 if (!scm_is_false (proc))
1127 SCM_VALIDATE_THUNK (2, proc);
1128
1129 t = SCM_I_THREAD_DATA (thread);
1130 scm_i_pthread_mutex_lock (&t->admin_mutex);
1131
1132 if (!(t->exited || t->canceled))
1133 t->cleanup_handler = proc;
1134
1135 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1136
1137 return SCM_UNSPECIFIED;
1138 }
1139 #undef FUNC_NAME
1140
1141 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1142 (SCM thread),
1143 "Return the cleanup handler installed for the thread @var{thread}.")
1144 #define FUNC_NAME s_scm_thread_cleanup
1145 {
1146 scm_i_thread *t;
1147 SCM ret;
1148
1149 SCM_VALIDATE_THREAD (1, thread);
1150
1151 t = SCM_I_THREAD_DATA (thread);
1152 scm_i_pthread_mutex_lock (&t->admin_mutex);
1153 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1154 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1155
1156 return ret;
1157 }
1158 #undef FUNC_NAME
1159
1160 SCM scm_join_thread (SCM thread)
1161 {
1162 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1163 }
1164
1165 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1166 (SCM thread, SCM timeout, SCM timeoutval),
1167 "Suspend execution of the calling thread until the target @var{thread} "
1168 "terminates, unless the target @var{thread} has already terminated. ")
1169 #define FUNC_NAME s_scm_join_thread_timed
1170 {
1171 scm_i_thread *t;
1172 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1173 SCM res = SCM_BOOL_F;
1174
1175 if (! (SCM_UNBNDP (timeoutval)))
1176 res = timeoutval;
1177
1178 SCM_VALIDATE_THREAD (1, thread);
1179 if (scm_is_eq (scm_current_thread (), thread))
1180 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1181
1182 t = SCM_I_THREAD_DATA (thread);
1183 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1184
1185 if (! SCM_UNBNDP (timeout))
1186 {
1187 to_timespec (timeout, &ctimeout);
1188 timeout_ptr = &ctimeout;
1189 }
1190
1191 if (t->exited)
1192 res = t->result;
1193 else
1194 {
1195 while (1)
1196 {
1197 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1198 timeout_ptr);
1199 if (err == 0)
1200 {
1201 if (t->exited)
1202 {
1203 res = t->result;
1204 break;
1205 }
1206 }
1207 else if (err == ETIMEDOUT)
1208 break;
1209
1210 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1211 SCM_TICK;
1212 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1213
1214 /* Check for exit again, since we just released and
1215 reacquired the admin mutex, before the next block_self
1216 call (which would block forever if t has already
1217 exited). */
1218 if (t->exited)
1219 {
1220 res = t->result;
1221 break;
1222 }
1223 }
1224 }
1225
1226 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1227
1228 return res;
1229 }
1230 #undef FUNC_NAME
1231
1232 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1233 (SCM obj),
1234 "Return @code{#t} if @var{obj} is a thread.")
1235 #define FUNC_NAME s_scm_thread_p
1236 {
1237 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1238 }
1239 #undef FUNC_NAME
1240
1241
1242 static size_t
1243 fat_mutex_free (SCM mx)
1244 {
1245 fat_mutex *m = SCM_MUTEX_DATA (mx);
1246 scm_i_pthread_mutex_destroy (&m->lock);
1247 return 0;
1248 }
1249
1250 static int
1251 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1252 {
1253 fat_mutex *m = SCM_MUTEX_DATA (mx);
1254 scm_puts ("#<mutex ", port);
1255 scm_uintprint ((scm_t_bits)m, 16, port);
1256 scm_puts (">", port);
1257 return 1;
1258 }
1259
1260 static SCM
1261 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1262 {
1263 fat_mutex *m;
1264 SCM mx;
1265
1266 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1267 scm_i_pthread_mutex_init (&m->lock, NULL);
1268 m->owner = SCM_BOOL_F;
1269 m->level = 0;
1270
1271 m->recursive = recursive;
1272 m->unchecked_unlock = unchecked_unlock;
1273 m->allow_external_unlock = external_unlock;
1274
1275 m->waiting = SCM_EOL;
1276 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1277 m->waiting = make_queue ();
1278 return mx;
1279 }
1280
1281 SCM scm_make_mutex (void)
1282 {
1283 return scm_make_mutex_with_flags (SCM_EOL);
1284 }
1285
1286 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1287 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1288 SCM_SYMBOL (recursive_sym, "recursive");
1289
1290 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1291 (SCM flags),
1292 "Create a new mutex. ")
1293 #define FUNC_NAME s_scm_make_mutex_with_flags
1294 {
1295 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1296
1297 SCM ptr = flags;
1298 while (! scm_is_null (ptr))
1299 {
1300 SCM flag = SCM_CAR (ptr);
1301 if (scm_is_eq (flag, unchecked_unlock_sym))
1302 unchecked_unlock = 1;
1303 else if (scm_is_eq (flag, allow_external_unlock_sym))
1304 external_unlock = 1;
1305 else if (scm_is_eq (flag, recursive_sym))
1306 recursive = 1;
1307 else
1308 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1309 ptr = SCM_CDR (ptr);
1310 }
1311 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1312 }
1313 #undef FUNC_NAME
1314
1315 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1316 (void),
1317 "Create a new recursive mutex. ")
1318 #define FUNC_NAME s_scm_make_recursive_mutex
1319 {
1320 return make_fat_mutex (1, 0, 0);
1321 }
1322 #undef FUNC_NAME
1323
1324 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1325
1326 static SCM
1327 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1328 {
1329 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1330
1331 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1332 SCM err = SCM_BOOL_F;
1333
1334 struct timeval current_time;
1335
1336 scm_i_scm_pthread_mutex_lock (&m->lock);
1337
1338 while (1)
1339 {
1340 if (m->level == 0)
1341 {
1342 m->owner = new_owner;
1343 m->level++;
1344
1345 if (SCM_I_IS_THREAD (new_owner))
1346 {
1347 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1348 scm_i_pthread_mutex_lock (&t->admin_mutex);
1349
1350 /* Only keep a weak reference to MUTEX so that it's not
1351 retained when not referenced elsewhere (bug #27450).
1352 The weak pair itself is eventually removed when MUTEX
1353 is unlocked. Note that `t->mutexes' lists mutexes
1354 currently held by T, so it should be small. */
1355 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1356
1357 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1358 }
1359 *ret = 1;
1360 break;
1361 }
1362 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1363 {
1364 m->owner = new_owner;
1365 err = scm_cons (scm_abandoned_mutex_error_key,
1366 scm_from_locale_string ("lock obtained on abandoned "
1367 "mutex"));
1368 *ret = 1;
1369 break;
1370 }
1371 else if (scm_is_eq (m->owner, new_owner))
1372 {
1373 if (m->recursive)
1374 {
1375 m->level++;
1376 *ret = 1;
1377 }
1378 else
1379 {
1380 err = scm_cons (scm_misc_error_key,
1381 scm_from_locale_string ("mutex already locked "
1382 "by thread"));
1383 *ret = 0;
1384 }
1385 break;
1386 }
1387 else
1388 {
1389 if (timeout != NULL)
1390 {
1391 gettimeofday (&current_time, NULL);
1392 if (current_time.tv_sec > timeout->tv_sec ||
1393 (current_time.tv_sec == timeout->tv_sec &&
1394 current_time.tv_usec * 1000 > timeout->tv_nsec))
1395 {
1396 *ret = 0;
1397 break;
1398 }
1399 }
1400 block_self (m->waiting, mutex, &m->lock, timeout);
1401 scm_i_pthread_mutex_unlock (&m->lock);
1402 SCM_TICK;
1403 scm_i_scm_pthread_mutex_lock (&m->lock);
1404 }
1405 }
1406 scm_i_pthread_mutex_unlock (&m->lock);
1407 return err;
1408 }
1409
1410 SCM scm_lock_mutex (SCM mx)
1411 {
1412 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1413 }
1414
1415 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1416 (SCM m, SCM timeout, SCM owner),
1417 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1418 "blocks until the mutex becomes available. The function returns when "
1419 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1420 "a thread already owns will succeed right away and will not block the "
1421 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1422 #define FUNC_NAME s_scm_lock_mutex_timed
1423 {
1424 SCM exception;
1425 int ret = 0;
1426 scm_t_timespec cwaittime, *waittime = NULL;
1427
1428 SCM_VALIDATE_MUTEX (1, m);
1429
1430 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1431 {
1432 to_timespec (timeout, &cwaittime);
1433 waittime = &cwaittime;
1434 }
1435
1436 exception = fat_mutex_lock (m, waittime, owner, &ret);
1437 if (!scm_is_false (exception))
1438 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1439 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1440 }
1441 #undef FUNC_NAME
1442
1443 void
1444 scm_dynwind_lock_mutex (SCM mutex)
1445 {
1446 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1447 SCM_F_WIND_EXPLICITLY);
1448 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1449 SCM_F_WIND_EXPLICITLY);
1450 }
1451
1452 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1453 (SCM mutex),
1454 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1455 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1456 #define FUNC_NAME s_scm_try_mutex
1457 {
1458 SCM exception;
1459 int ret = 0;
1460 scm_t_timespec cwaittime, *waittime = NULL;
1461
1462 SCM_VALIDATE_MUTEX (1, mutex);
1463
1464 to_timespec (scm_from_int(0), &cwaittime);
1465 waittime = &cwaittime;
1466
1467 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1468 if (!scm_is_false (exception))
1469 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1470 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1471 }
1472 #undef FUNC_NAME
1473
1474 /*** Fat condition variables */
1475
1476 typedef struct {
1477 scm_i_pthread_mutex_t lock;
1478 SCM waiting; /* the threads waiting for this condition. */
1479 } fat_cond;
1480
1481 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1482 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1483
1484 static int
1485 fat_mutex_unlock (SCM mutex, SCM cond,
1486 const scm_t_timespec *waittime, int relock)
1487 {
1488 SCM owner;
1489 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1490 fat_cond *c = NULL;
1491 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1492 int err = 0, ret = 0;
1493
1494 scm_i_scm_pthread_mutex_lock (&m->lock);
1495
1496 owner = m->owner;
1497
1498 if (!scm_is_eq (owner, t->handle))
1499 {
1500 if (m->level == 0)
1501 {
1502 if (!m->unchecked_unlock)
1503 {
1504 scm_i_pthread_mutex_unlock (&m->lock);
1505 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1506 }
1507 owner = t->handle;
1508 }
1509 else if (!m->allow_external_unlock)
1510 {
1511 scm_i_pthread_mutex_unlock (&m->lock);
1512 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1513 }
1514 }
1515
1516 if (! (SCM_UNBNDP (cond)))
1517 {
1518 c = SCM_CONDVAR_DATA (cond);
1519 while (1)
1520 {
1521 int brk = 0;
1522
1523 if (m->level > 0)
1524 m->level--;
1525 if (m->level == 0)
1526 {
1527 /* Change the owner of MUTEX. */
1528 t->mutexes = scm_delq_x (mutex, t->mutexes);
1529 m->owner = unblock_from_queue (m->waiting);
1530 }
1531
1532 t->block_asyncs++;
1533
1534 err = block_self (c->waiting, cond, &m->lock, waittime);
1535 scm_i_pthread_mutex_unlock (&m->lock);
1536
1537 if (err == 0)
1538 {
1539 ret = 1;
1540 brk = 1;
1541 }
1542 else if (err == ETIMEDOUT)
1543 {
1544 ret = 0;
1545 brk = 1;
1546 }
1547 else if (err != EINTR)
1548 {
1549 errno = err;
1550 scm_syserror (NULL);
1551 }
1552
1553 if (brk)
1554 {
1555 if (relock)
1556 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1557 t->block_asyncs--;
1558 break;
1559 }
1560
1561 t->block_asyncs--;
1562 scm_async_click ();
1563
1564 scm_remember_upto_here_2 (cond, mutex);
1565
1566 scm_i_scm_pthread_mutex_lock (&m->lock);
1567 }
1568 }
1569 else
1570 {
1571 if (m->level > 0)
1572 m->level--;
1573 if (m->level == 0)
1574 {
1575 /* Change the owner of MUTEX. */
1576 t->mutexes = scm_delq_x (mutex, t->mutexes);
1577 m->owner = unblock_from_queue (m->waiting);
1578 }
1579
1580 scm_i_pthread_mutex_unlock (&m->lock);
1581 ret = 1;
1582 }
1583
1584 return ret;
1585 }
1586
1587 SCM scm_unlock_mutex (SCM mx)
1588 {
1589 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1590 }
1591
1592 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1593 (SCM mx, SCM cond, SCM timeout),
1594 "Unlocks @var{mutex} if the calling thread owns the lock on "
1595 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1596 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1597 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1598 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1599 "with a call to @code{unlock-mutex}. Only the last call to "
1600 "@code{unlock-mutex} will actually unlock the mutex. ")
1601 #define FUNC_NAME s_scm_unlock_mutex_timed
1602 {
1603 scm_t_timespec cwaittime, *waittime = NULL;
1604
1605 SCM_VALIDATE_MUTEX (1, mx);
1606 if (! (SCM_UNBNDP (cond)))
1607 {
1608 SCM_VALIDATE_CONDVAR (2, cond);
1609
1610 if (! (SCM_UNBNDP (timeout)))
1611 {
1612 to_timespec (timeout, &cwaittime);
1613 waittime = &cwaittime;
1614 }
1615 }
1616
1617 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1618 }
1619 #undef FUNC_NAME
1620
1621 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1622 (SCM obj),
1623 "Return @code{#t} if @var{obj} is a mutex.")
1624 #define FUNC_NAME s_scm_mutex_p
1625 {
1626 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1627 }
1628 #undef FUNC_NAME
1629
1630 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1631 (SCM mx),
1632 "Return the thread owning @var{mx}, or @code{#f}.")
1633 #define FUNC_NAME s_scm_mutex_owner
1634 {
1635 SCM owner;
1636 fat_mutex *m = NULL;
1637
1638 SCM_VALIDATE_MUTEX (1, mx);
1639 m = SCM_MUTEX_DATA (mx);
1640 scm_i_pthread_mutex_lock (&m->lock);
1641 owner = m->owner;
1642 scm_i_pthread_mutex_unlock (&m->lock);
1643
1644 return owner;
1645 }
1646 #undef FUNC_NAME
1647
1648 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1649 (SCM mx),
1650 "Return the lock level of mutex @var{mx}.")
1651 #define FUNC_NAME s_scm_mutex_level
1652 {
1653 SCM_VALIDATE_MUTEX (1, mx);
1654 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1655 }
1656 #undef FUNC_NAME
1657
1658 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1659 (SCM mx),
1660 "Returns @code{#t} if the mutex @var{mx} is locked.")
1661 #define FUNC_NAME s_scm_mutex_locked_p
1662 {
1663 SCM_VALIDATE_MUTEX (1, mx);
1664 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1665 }
1666 #undef FUNC_NAME
1667
1668 static int
1669 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1670 {
1671 fat_cond *c = SCM_CONDVAR_DATA (cv);
1672 scm_puts ("#<condition-variable ", port);
1673 scm_uintprint ((scm_t_bits)c, 16, port);
1674 scm_puts (">", port);
1675 return 1;
1676 }
1677
1678 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1679 (void),
1680 "Make a new condition variable.")
1681 #define FUNC_NAME s_scm_make_condition_variable
1682 {
1683 fat_cond *c;
1684 SCM cv;
1685
1686 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1687 c->waiting = SCM_EOL;
1688 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1689 c->waiting = make_queue ();
1690 return cv;
1691 }
1692 #undef FUNC_NAME
1693
1694 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1695 (SCM cv, SCM mx, SCM t),
1696 "Wait until @var{cond-var} has been signalled. While waiting, "
1697 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1698 "is locked again when this function returns. When @var{time} is given, "
1699 "it specifies a point in time where the waiting should be aborted. It "
1700 "can be either a integer as returned by @code{current-time} or a pair "
1701 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1702 "mutex is locked and @code{#f} is returned. When the condition "
1703 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1704 "is returned. ")
1705 #define FUNC_NAME s_scm_timed_wait_condition_variable
1706 {
1707 scm_t_timespec waittime, *waitptr = NULL;
1708
1709 SCM_VALIDATE_CONDVAR (1, cv);
1710 SCM_VALIDATE_MUTEX (2, mx);
1711
1712 if (!SCM_UNBNDP (t))
1713 {
1714 to_timespec (t, &waittime);
1715 waitptr = &waittime;
1716 }
1717
1718 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1719 }
1720 #undef FUNC_NAME
1721
1722 static void
1723 fat_cond_signal (fat_cond *c)
1724 {
1725 unblock_from_queue (c->waiting);
1726 }
1727
1728 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1729 (SCM cv),
1730 "Wake up one thread that is waiting for @var{cv}")
1731 #define FUNC_NAME s_scm_signal_condition_variable
1732 {
1733 SCM_VALIDATE_CONDVAR (1, cv);
1734 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1735 return SCM_BOOL_T;
1736 }
1737 #undef FUNC_NAME
1738
1739 static void
1740 fat_cond_broadcast (fat_cond *c)
1741 {
1742 while (scm_is_true (unblock_from_queue (c->waiting)))
1743 ;
1744 }
1745
1746 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1747 (SCM cv),
1748 "Wake up all threads that are waiting for @var{cv}. ")
1749 #define FUNC_NAME s_scm_broadcast_condition_variable
1750 {
1751 SCM_VALIDATE_CONDVAR (1, cv);
1752 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1753 return SCM_BOOL_T;
1754 }
1755 #undef FUNC_NAME
1756
1757 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1758 (SCM obj),
1759 "Return @code{#t} if @var{obj} is a condition variable.")
1760 #define FUNC_NAME s_scm_condition_variable_p
1761 {
1762 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1763 }
1764 #undef FUNC_NAME
1765
1766
1767 \f
1768 /*** Select */
1769
1770 struct select_args
1771 {
1772 int nfds;
1773 SELECT_TYPE *read_fds;
1774 SELECT_TYPE *write_fds;
1775 SELECT_TYPE *except_fds;
1776 struct timeval *timeout;
1777
1778 int result;
1779 int errno_value;
1780 };
1781
1782 static void *
1783 do_std_select (void *args)
1784 {
1785 struct select_args *select_args;
1786
1787 select_args = (struct select_args *) args;
1788
1789 select_args->result =
1790 select (select_args->nfds,
1791 select_args->read_fds, select_args->write_fds,
1792 select_args->except_fds, select_args->timeout);
1793 select_args->errno_value = errno;
1794
1795 return NULL;
1796 }
1797
1798 int
1799 scm_std_select (int nfds,
1800 SELECT_TYPE *readfds,
1801 SELECT_TYPE *writefds,
1802 SELECT_TYPE *exceptfds,
1803 struct timeval *timeout)
1804 {
1805 fd_set my_readfds;
1806 int res, eno, wakeup_fd;
1807 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1808 struct select_args args;
1809
1810 if (readfds == NULL)
1811 {
1812 FD_ZERO (&my_readfds);
1813 readfds = &my_readfds;
1814 }
1815
1816 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1817 SCM_TICK;
1818
1819 wakeup_fd = t->sleep_pipe[0];
1820 FD_SET (wakeup_fd, readfds);
1821 if (wakeup_fd >= nfds)
1822 nfds = wakeup_fd+1;
1823
1824 args.nfds = nfds;
1825 args.read_fds = readfds;
1826 args.write_fds = writefds;
1827 args.except_fds = exceptfds;
1828 args.timeout = timeout;
1829
1830 /* Explicitly cooperate with the GC. */
1831 scm_without_guile (do_std_select, &args);
1832
1833 res = args.result;
1834 eno = args.errno_value;
1835
1836 t->sleep_fd = -1;
1837 scm_i_reset_sleep (t);
1838
1839 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1840 {
1841 char dummy;
1842 full_read (wakeup_fd, &dummy, 1);
1843
1844 FD_CLR (wakeup_fd, readfds);
1845 res -= 1;
1846 if (res == 0)
1847 {
1848 eno = EINTR;
1849 res = -1;
1850 }
1851 }
1852 errno = eno;
1853 return res;
1854 }
1855
1856 /* Convenience API for blocking while in guile mode. */
1857
1858 #if SCM_USE_PTHREAD_THREADS
1859
1860 /* It seems reasonable to not run procedures related to mutex and condition
1861 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1862 without it, and (ii) the only potential gain would be GC latency. See
1863 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1864 for a discussion of the pros and cons. */
1865
1866 int
1867 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1868 {
1869 int res = scm_i_pthread_mutex_lock (mutex);
1870 return res;
1871 }
1872
1873 static void
1874 do_unlock (void *data)
1875 {
1876 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1877 }
1878
1879 void
1880 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1881 {
1882 scm_i_scm_pthread_mutex_lock (mutex);
1883 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1884 }
1885
1886 int
1887 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1888 {
1889 int res;
1890 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1891
1892 t->held_mutex = mutex;
1893 res = scm_i_pthread_cond_wait (cond, mutex);
1894 t->held_mutex = NULL;
1895
1896 return res;
1897 }
1898
1899 int
1900 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1901 scm_i_pthread_mutex_t *mutex,
1902 const scm_t_timespec *wt)
1903 {
1904 int res;
1905 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1906
1907 t->held_mutex = mutex;
1908 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1909 t->held_mutex = NULL;
1910
1911 return res;
1912 }
1913
1914 #endif
1915
1916 unsigned long
1917 scm_std_usleep (unsigned long usecs)
1918 {
1919 struct timeval tv;
1920 tv.tv_usec = usecs % 1000000;
1921 tv.tv_sec = usecs / 1000000;
1922 scm_std_select (0, NULL, NULL, NULL, &tv);
1923 return tv.tv_sec * 1000000 + tv.tv_usec;
1924 }
1925
1926 unsigned int
1927 scm_std_sleep (unsigned int secs)
1928 {
1929 struct timeval tv;
1930 tv.tv_usec = 0;
1931 tv.tv_sec = secs;
1932 scm_std_select (0, NULL, NULL, NULL, &tv);
1933 return tv.tv_sec;
1934 }
1935
1936 /*** Misc */
1937
1938 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1939 (void),
1940 "Return the thread that called this function.")
1941 #define FUNC_NAME s_scm_current_thread
1942 {
1943 return SCM_I_CURRENT_THREAD->handle;
1944 }
1945 #undef FUNC_NAME
1946
1947 static SCM
1948 scm_c_make_list (size_t n, SCM fill)
1949 {
1950 SCM res = SCM_EOL;
1951 while (n-- > 0)
1952 res = scm_cons (fill, res);
1953 return res;
1954 }
1955
1956 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1957 (void),
1958 "Return a list of all threads.")
1959 #define FUNC_NAME s_scm_all_threads
1960 {
1961 /* We can not allocate while holding the thread_admin_mutex because
1962 of the way GC is done.
1963 */
1964 int n = thread_count;
1965 scm_i_thread *t;
1966 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1967
1968 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1969 l = &list;
1970 for (t = all_threads; t && n > 0; t = t->next_thread)
1971 {
1972 if (t != scm_i_signal_delivery_thread)
1973 {
1974 SCM_SETCAR (*l, t->handle);
1975 l = SCM_CDRLOC (*l);
1976 }
1977 n--;
1978 }
1979 *l = SCM_EOL;
1980 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1981 return list;
1982 }
1983 #undef FUNC_NAME
1984
1985 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1986 (SCM thread),
1987 "Return @code{#t} iff @var{thread} has exited.\n")
1988 #define FUNC_NAME s_scm_thread_exited_p
1989 {
1990 return scm_from_bool (scm_c_thread_exited_p (thread));
1991 }
1992 #undef FUNC_NAME
1993
1994 int
1995 scm_c_thread_exited_p (SCM thread)
1996 #define FUNC_NAME s_scm_thread_exited_p
1997 {
1998 scm_i_thread *t;
1999 SCM_VALIDATE_THREAD (1, thread);
2000 t = SCM_I_THREAD_DATA (thread);
2001 return t->exited;
2002 }
2003 #undef FUNC_NAME
2004
2005 static scm_i_pthread_cond_t wake_up_cond;
2006 static int threads_initialized_p = 0;
2007
2008
2009 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2010 */
2011 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2012
2013 static SCM dynwind_critical_section_mutex;
2014
2015 void
2016 scm_dynwind_critical_section (SCM mutex)
2017 {
2018 if (scm_is_false (mutex))
2019 mutex = dynwind_critical_section_mutex;
2020 scm_dynwind_lock_mutex (mutex);
2021 scm_dynwind_block_asyncs ();
2022 }
2023
2024 /*** Initialization */
2025
2026 scm_i_pthread_mutex_t scm_i_misc_mutex;
2027
2028 #if SCM_USE_PTHREAD_THREADS
2029 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2030 #endif
2031
2032 void
2033 scm_threads_prehistory (void *base)
2034 {
2035 #if SCM_USE_PTHREAD_THREADS
2036 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2037 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2038 PTHREAD_MUTEX_RECURSIVE);
2039 #endif
2040
2041 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2042 scm_i_pthread_mutexattr_recursive);
2043 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2044 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2045
2046 guilify_self_1 ((struct GC_stack_base *) base);
2047 }
2048
2049 scm_t_bits scm_tc16_thread;
2050 scm_t_bits scm_tc16_mutex;
2051 scm_t_bits scm_tc16_condvar;
2052
2053 void
2054 scm_init_threads ()
2055 {
2056 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2057 scm_set_smob_print (scm_tc16_thread, thread_print);
2058
2059 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2060 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2061 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2062
2063 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2064 sizeof (fat_cond));
2065 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2066
2067 scm_i_default_dynamic_state = SCM_BOOL_F;
2068 guilify_self_2 (SCM_BOOL_F);
2069 threads_initialized_p = 1;
2070
2071 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2072 }
2073
2074 void
2075 scm_init_threads_default_dynamic_state ()
2076 {
2077 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2078 scm_i_default_dynamic_state = state;
2079 }
2080
2081 void
2082 scm_init_thread_procs ()
2083 {
2084 #include "libguile/threads.x"
2085 }
2086
2087 \f
2088 /* IA64-specific things. */
2089
2090 #ifdef __ia64__
2091 # ifdef __hpux
2092 # include <sys/param.h>
2093 # include <sys/pstat.h>
2094 void *
2095 scm_ia64_register_backing_store_base (void)
2096 {
2097 struct pst_vm_status vm_status;
2098 int i = 0;
2099 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2100 if (vm_status.pst_type == PS_RSESTACK)
2101 return (void *) vm_status.pst_vaddr;
2102 abort ();
2103 }
2104 void *
2105 scm_ia64_ar_bsp (const void *ctx)
2106 {
2107 uint64_t bsp;
2108 __uc_get_ar_bsp (ctx, &bsp);
2109 return (void *) bsp;
2110 }
2111 # endif /* hpux */
2112 # ifdef linux
2113 # include <ucontext.h>
2114 void *
2115 scm_ia64_register_backing_store_base (void)
2116 {
2117 extern void *__libc_ia64_register_backing_store_base;
2118 return __libc_ia64_register_backing_store_base;
2119 }
2120 void *
2121 scm_ia64_ar_bsp (const void *opaque)
2122 {
2123 const ucontext_t *ctx = opaque;
2124 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2125 }
2126 # endif /* linux */
2127 #endif /* __ia64__ */
2128
2129
2130 /*
2131 Local Variables:
2132 c-file-style: "gnu"
2133 End:
2134 */