bdw-gc 6.8 compatibility (hopefully)
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include <assert.h>
42
43 #include "libguile/validate.h"
44 #include "libguile/root.h"
45 #include "libguile/eval.h"
46 #include "libguile/async.h"
47 #include "libguile/ports.h"
48 #include "libguile/threads.h"
49 #include "libguile/dynwind.h"
50 #include "libguile/iselect.h"
51 #include "libguile/fluids.h"
52 #include "libguile/continuations.h"
53 #include "libguile/gc.h"
54 #include "libguile/init.h"
55 #include "libguile/scmsigs.h"
56 #include "libguile/strings.h"
57 #include "libguile/weaks.h"
58
59 #ifdef __MINGW32__
60 #ifndef ETIMEDOUT
61 # define ETIMEDOUT WSAETIMEDOUT
62 #endif
63 # include <fcntl.h>
64 # include <process.h>
65 # define pipe(fd) _pipe (fd, 256, O_BINARY)
66 #endif /* __MINGW32__ */
67
68 #include <full-read.h>
69
70
71 \f
72
73 /* First some libgc shims. */
74
75 /* Make sure GC_fn_type is defined; it is missing from the public
76 headers of GC 7.1 and earlier. */
77 #ifndef HAVE_GC_FN_TYPE
78 typedef void * (* GC_fn_type) (void *);
79 #endif
80
81
82 #ifndef GC_SUCCESS
83 #define GC_SUCCESS 0
84 #endif
85
86 #ifndef GC_UNIMPLEMENTED
87 #define GC_UNIMPLEMENTED 3
88 #endif
89
90 /* Likewise struct GC_stack_base is missing before 7.1. */
91 #ifndef HAVE_GC_STACK_BASE
92 struct GC_stack_base;
93
94 static int
95 GC_register_my_thread (struct GC_stack_base *)
96 {
97 return GC_UNIMPLEMENTED;
98 }
99
100 static void
101 GC_unregister_my_thread ()
102 {
103 }
104
105 static void *
106 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
107 {
108 return fn (NULL, arg);
109 }
110 #endif
111
112
113 /* Now define with_gc_active and with_gc_inactive. */
114
115 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
116
117 /* We have a sufficiently new libgc (7.2 or newer). */
118
119 static void*
120 with_gc_inactive (GC_fn_type func, void *data)
121 {
122 return GC_do_blocking (func, data);
123 }
124
125 static void*
126 with_gc_active (GC_fn_type func, void *data)
127 {
128 return GC_call_with_gc_active (func, data);
129 }
130
131 #else
132
133 /* libgc not new enough, so never actually deactivate GC.
134
135 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
136 GC_call_with_gc_active. */
137
138 static void*
139 with_gc_inactive (GC_fn_type func, void *data)
140 {
141 return func (data);
142 }
143
144 static void*
145 with_gc_active (GC_fn_type func, void *data)
146 {
147 return func (data);
148 }
149
150 #endif /* HAVE_GC_DO_BLOCKING */
151
152
153 \f
154 static void
155 to_timespec (SCM t, scm_t_timespec *waittime)
156 {
157 if (scm_is_pair (t))
158 {
159 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
160 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
161 }
162 else
163 {
164 double time = scm_to_double (t);
165 double sec = scm_c_truncate (time);
166
167 waittime->tv_sec = (long) sec;
168 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
169 }
170 }
171
172 \f
173 /*** Queues */
174
175 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
176 the risk of false references leading to unbounded retained space as
177 described in "Bounding Space Usage of Conservative Garbage Collectors",
178 H.J. Boehm, 2001. */
179
180 /* Make an empty queue data structure.
181 */
182 static SCM
183 make_queue ()
184 {
185 return scm_cons (SCM_EOL, SCM_EOL);
186 }
187
188 /* Put T at the back of Q and return a handle that can be used with
189 remqueue to remove T from Q again.
190 */
191 static SCM
192 enqueue (SCM q, SCM t)
193 {
194 SCM c = scm_cons (t, SCM_EOL);
195 SCM_CRITICAL_SECTION_START;
196 if (scm_is_null (SCM_CDR (q)))
197 SCM_SETCDR (q, c);
198 else
199 SCM_SETCDR (SCM_CAR (q), c);
200 SCM_SETCAR (q, c);
201 SCM_CRITICAL_SECTION_END;
202 return c;
203 }
204
205 /* Remove the element that the handle C refers to from the queue Q. C
206 must have been returned from a call to enqueue. The return value
207 is zero when the element referred to by C has already been removed.
208 Otherwise, 1 is returned.
209 */
210 static int
211 remqueue (SCM q, SCM c)
212 {
213 SCM p, prev = q;
214 SCM_CRITICAL_SECTION_START;
215 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
216 {
217 if (scm_is_eq (p, c))
218 {
219 if (scm_is_eq (c, SCM_CAR (q)))
220 SCM_SETCAR (q, SCM_CDR (c));
221 SCM_SETCDR (prev, SCM_CDR (c));
222
223 /* GC-robust */
224 SCM_SETCDR (c, SCM_EOL);
225
226 SCM_CRITICAL_SECTION_END;
227 return 1;
228 }
229 prev = p;
230 }
231 SCM_CRITICAL_SECTION_END;
232 return 0;
233 }
234
235 /* Remove the front-most element from the queue Q and return it.
236 Return SCM_BOOL_F when Q is empty.
237 */
238 static SCM
239 dequeue (SCM q)
240 {
241 SCM c;
242 SCM_CRITICAL_SECTION_START;
243 c = SCM_CDR (q);
244 if (scm_is_null (c))
245 {
246 SCM_CRITICAL_SECTION_END;
247 return SCM_BOOL_F;
248 }
249 else
250 {
251 SCM_SETCDR (q, SCM_CDR (c));
252 if (scm_is_null (SCM_CDR (q)))
253 SCM_SETCAR (q, SCM_EOL);
254 SCM_CRITICAL_SECTION_END;
255
256 /* GC-robust */
257 SCM_SETCDR (c, SCM_EOL);
258
259 return SCM_CAR (c);
260 }
261 }
262
263 /*** Thread smob routines */
264
265
266 static int
267 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
268 {
269 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
270 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
271 the struct case, hence we go via a union, and extract according to the
272 size of pthread_t. */
273 union {
274 scm_i_pthread_t p;
275 unsigned short us;
276 unsigned int ui;
277 unsigned long ul;
278 scm_t_uintmax um;
279 } u;
280 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
281 scm_i_pthread_t p = t->pthread;
282 scm_t_uintmax id;
283 u.p = p;
284 if (sizeof (p) == sizeof (unsigned short))
285 id = u.us;
286 else if (sizeof (p) == sizeof (unsigned int))
287 id = u.ui;
288 else if (sizeof (p) == sizeof (unsigned long))
289 id = u.ul;
290 else
291 id = u.um;
292
293 scm_puts ("#<thread ", port);
294 scm_uintprint (id, 10, port);
295 scm_puts (" (", port);
296 scm_uintprint ((scm_t_bits)t, 16, port);
297 scm_puts (")>", port);
298 return 1;
299 }
300
301 \f
302 /*** Blocking on queues. */
303
304 /* See also scm_i_queue_async_cell for how such a block is
305 interrputed.
306 */
307
308 /* Put the current thread on QUEUE and go to sleep, waiting for it to
309 be woken up by a call to 'unblock_from_queue', or to be
310 interrupted. Upon return of this function, the current thread is
311 no longer on QUEUE, even when the sleep has been interrupted.
312
313 The caller of block_self must hold MUTEX. It will be atomically
314 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
315
316 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
317 as MUTEX is needed.
318
319 When WAITTIME is not NULL, the sleep will be aborted at that time.
320
321 The return value of block_self is an errno value. It will be zero
322 when the sleep has been successfully completed by a call to
323 unblock_from_queue, EINTR when it has been interrupted by the
324 delivery of a system async, and ETIMEDOUT when the timeout has
325 expired.
326
327 The system asyncs themselves are not executed by block_self.
328 */
329 static int
330 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
331 const scm_t_timespec *waittime)
332 {
333 scm_i_thread *t = SCM_I_CURRENT_THREAD;
334 SCM q_handle;
335 int err;
336
337 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
338 err = EINTR;
339 else
340 {
341 t->block_asyncs++;
342 q_handle = enqueue (queue, t->handle);
343 if (waittime == NULL)
344 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
345 else
346 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
347
348 /* When we are still on QUEUE, we have been interrupted. We
349 report this only when no other error (such as a timeout) has
350 happened above.
351 */
352 if (remqueue (queue, q_handle) && err == 0)
353 err = EINTR;
354 t->block_asyncs--;
355 scm_i_reset_sleep (t);
356 }
357
358 return err;
359 }
360
361 /* Wake up the first thread on QUEUE, if any. The awoken thread is
362 returned, or #f if the queue was empty.
363 */
364 static SCM
365 unblock_from_queue (SCM queue)
366 {
367 SCM thread = dequeue (queue);
368 if (scm_is_true (thread))
369 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
370 return thread;
371 }
372
373 \f
374 /* Getting into and out of guile mode.
375 */
376
377 /* Key used to attach a cleanup handler to a given thread. Also, if
378 thread-local storage is unavailable, this key is used to retrieve the
379 current thread with `pthread_getspecific ()'. */
380 scm_i_pthread_key_t scm_i_thread_key;
381
382
383 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
384
385 /* When thread-local storage (TLS) is available, a pointer to the
386 current-thread object is kept in TLS. Note that storing the thread-object
387 itself in TLS (rather than a pointer to some malloc'd memory) is not
388 possible since thread objects may live longer than the actual thread they
389 represent. */
390 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
391
392 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
393
394
395 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
396 static scm_i_thread *all_threads = NULL;
397 static int thread_count;
398
399 static SCM scm_i_default_dynamic_state;
400
401 /* Perform first stage of thread initialisation, in non-guile mode.
402 */
403 static void
404 guilify_self_1 (SCM_STACKITEM *base)
405 {
406 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
407
408 t->pthread = scm_i_pthread_self ();
409 t->handle = SCM_BOOL_F;
410 t->result = SCM_BOOL_F;
411 t->cleanup_handler = SCM_BOOL_F;
412 t->mutexes = SCM_EOL;
413 t->held_mutex = NULL;
414 t->join_queue = SCM_EOL;
415 t->dynamic_state = SCM_BOOL_F;
416 t->dynwinds = SCM_EOL;
417 t->active_asyncs = SCM_EOL;
418 t->block_asyncs = 1;
419 t->pending_asyncs = 1;
420 t->critical_section_level = 0;
421 t->base = base;
422 #ifdef __ia64__
423 /* Calculate and store off the base of this thread's register
424 backing store (RBS). Unfortunately our implementation(s) of
425 scm_ia64_register_backing_store_base are only reliable for the
426 main thread. For other threads, therefore, find out the current
427 top of the RBS, and use that as a maximum. */
428 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
429 {
430 ucontext_t ctx;
431 void *bsp;
432 getcontext (&ctx);
433 bsp = scm_ia64_ar_bsp (&ctx);
434 if (t->register_backing_store_base > bsp)
435 t->register_backing_store_base = bsp;
436 }
437 #endif
438 t->continuation_root = SCM_EOL;
439 t->continuation_base = base;
440 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
441 t->sleep_mutex = NULL;
442 t->sleep_object = SCM_BOOL_F;
443 t->sleep_fd = -1;
444
445 if (pipe (t->sleep_pipe) != 0)
446 /* FIXME: Error conditions during the initialization phase are handled
447 gracelessly since public functions such as `scm_init_guile ()'
448 currently have type `void'. */
449 abort ();
450
451 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
452 t->current_mark_stack_ptr = NULL;
453 t->current_mark_stack_limit = NULL;
454 t->canceled = 0;
455 t->exited = 0;
456 t->guile_mode = 0;
457
458 scm_i_pthread_setspecific (scm_i_thread_key, t);
459
460 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
461 /* Cache the current thread in TLS for faster lookup. */
462 scm_i_current_thread = t;
463 #endif
464
465 scm_i_pthread_mutex_lock (&thread_admin_mutex);
466 t->next_thread = all_threads;
467 all_threads = t;
468 thread_count++;
469 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
470 }
471
472 /* Perform second stage of thread initialisation, in guile mode.
473 */
474 static void
475 guilify_self_2 (SCM parent)
476 {
477 scm_i_thread *t = SCM_I_CURRENT_THREAD;
478
479 t->guile_mode = 1;
480
481 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
482
483 t->continuation_root = scm_cons (t->handle, SCM_EOL);
484 t->continuation_base = t->base;
485 t->vm = SCM_BOOL_F;
486
487 if (scm_is_true (parent))
488 t->dynamic_state = scm_make_dynamic_state (parent);
489 else
490 t->dynamic_state = scm_i_make_initial_dynamic_state ();
491
492 t->join_queue = make_queue ();
493 t->block_asyncs = 0;
494 }
495
496 \f
497 /*** Fat mutexes */
498
499 /* We implement our own mutex type since we want them to be 'fair', we
500 want to do fancy things while waiting for them (like running
501 asyncs) and we might want to add things that are nice for
502 debugging.
503 */
504
505 typedef struct {
506 scm_i_pthread_mutex_t lock;
507 SCM owner;
508 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
509
510 int recursive; /* allow recursive locking? */
511 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
512 int allow_external_unlock; /* is it an error to unlock a mutex that is not
513 owned by the current thread? */
514
515 SCM waiting; /* the threads waiting for this mutex. */
516 } fat_mutex;
517
518 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
519 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
520
521 /* Perform thread tear-down, in guile mode.
522 */
523 static void *
524 do_thread_exit (void *v)
525 {
526 scm_i_thread *t = (scm_i_thread *) v;
527
528 if (!scm_is_false (t->cleanup_handler))
529 {
530 SCM ptr = t->cleanup_handler;
531
532 t->cleanup_handler = SCM_BOOL_F;
533 t->result = scm_internal_catch (SCM_BOOL_T,
534 (scm_t_catch_body) scm_call_0, ptr,
535 scm_handle_by_message_noexit, NULL);
536 }
537
538 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
539
540 t->exited = 1;
541 close (t->sleep_pipe[0]);
542 close (t->sleep_pipe[1]);
543 while (scm_is_true (unblock_from_queue (t->join_queue)))
544 ;
545
546 while (!scm_is_null (t->mutexes))
547 {
548 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
549
550 if (!SCM_UNBNDP (mutex))
551 {
552 fat_mutex *m = SCM_MUTEX_DATA (mutex);
553
554 scm_i_pthread_mutex_lock (&m->lock);
555
556 /* Since MUTEX is in `t->mutexes', T must be its owner. */
557 assert (scm_is_eq (m->owner, t->handle));
558
559 unblock_from_queue (m->waiting);
560
561 scm_i_pthread_mutex_unlock (&m->lock);
562 }
563
564 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
565 }
566
567 scm_i_pthread_mutex_unlock (&t->admin_mutex);
568
569 return NULL;
570 }
571
572 static void *
573 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
574 {
575 void *ret;
576 int registered;
577
578 registered = GC_register_my_thread (sb);
579
580 ret = scm_with_guile (do_thread_exit, v);
581
582 if (registered == GC_SUCCESS)
583 GC_unregister_my_thread ();
584
585 return ret;
586 }
587
588 static void
589 on_thread_exit (void *v)
590 {
591 /* This handler is executed in non-guile mode. */
592 scm_i_thread *t = (scm_i_thread *) v, **tp;
593
594 /* If this thread was cancelled while doing a cond wait, it will
595 still have a mutex locked, so we unlock it here. */
596 if (t->held_mutex)
597 {
598 scm_i_pthread_mutex_unlock (t->held_mutex);
599 t->held_mutex = NULL;
600 }
601
602 /* Reinstate the current thread for purposes of scm_with_guile
603 guile-mode cleanup handlers. Only really needed in the non-TLS
604 case but it doesn't hurt to be consistent. */
605 scm_i_pthread_setspecific (scm_i_thread_key, t);
606
607 /* Ensure the signal handling thread has been launched, because we might be
608 shutting it down. */
609 scm_i_ensure_signal_delivery_thread ();
610
611 /* Unblocking the joining threads needs to happen in guile mode
612 since the queue is a SCM data structure. Trampoline through
613 GC_call_with_stack_base so that the GC works even if it already
614 cleaned up for this thread. */
615 GC_call_with_stack_base (do_thread_exit_trampoline, v);
616
617 /* Removing ourself from the list of all threads needs to happen in
618 non-guile mode since all SCM values on our stack become
619 unprotected once we are no longer in the list. */
620 scm_i_pthread_mutex_lock (&thread_admin_mutex);
621 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
622 if (*tp == t)
623 {
624 *tp = t->next_thread;
625
626 /* GC-robust */
627 t->next_thread = NULL;
628
629 break;
630 }
631 thread_count--;
632
633 /* If there's only one other thread, it could be the signal delivery
634 thread, so we need to notify it to shut down by closing its read pipe.
635 If it's not the signal delivery thread, then closing the read pipe isn't
636 going to hurt. */
637 if (thread_count <= 1)
638 scm_i_close_signal_pipe ();
639
640 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
641
642 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
643 }
644
645 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
646
647 static void
648 init_thread_key (void)
649 {
650 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
651 }
652
653 /* Perform any initializations necessary to make the current thread
654 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
655 if necessary.
656
657 BASE is the stack base to use with GC.
658
659 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
660 which case the default dynamic state is used.
661
662 Returns zero when the thread was known to guile already; otherwise
663 return 1.
664
665 Note that it could be the case that the thread was known
666 to Guile, but not in guile mode (because we are within a
667 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
668 be sure. New threads are put into guile mode implicitly. */
669
670 static int
671 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
672 {
673 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
674
675 if (SCM_I_CURRENT_THREAD)
676 {
677 /* Thread is already known to Guile.
678 */
679 return 0;
680 }
681 else
682 {
683 /* This thread has not been guilified yet.
684 */
685
686 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
687 if (scm_initialized_p == 0)
688 {
689 /* First thread ever to enter Guile. Run the full
690 initialization.
691 */
692 scm_i_init_guile (base);
693 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
694 }
695 else
696 {
697 /* Guile is already initialized, but this thread enters it for
698 the first time. Only initialize this thread.
699 */
700 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
701 guilify_self_1 (base);
702 guilify_self_2 (parent);
703 }
704 return 1;
705 }
706 }
707
708 #if SCM_USE_PTHREAD_THREADS
709
710 #if defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP
711 /* This method for GNU/Linux and perhaps some other systems.
712 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
713 available on them. */
714 #define HAVE_GET_THREAD_STACK_BASE
715
716 static SCM_STACKITEM *
717 get_thread_stack_base ()
718 {
719 pthread_attr_t attr;
720 void *start, *end;
721 size_t size;
722
723 pthread_getattr_np (pthread_self (), &attr);
724 pthread_attr_getstack (&attr, &start, &size);
725 end = (char *)start + size;
726
727 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
728 for the main thread, but we can use scm_get_stack_base in that
729 case.
730 */
731
732 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
733 if ((void *)&attr < start || (void *)&attr >= end)
734 return (SCM_STACKITEM *) GC_stackbottom;
735 else
736 #endif
737 {
738 #if SCM_STACK_GROWS_UP
739 return start;
740 #else
741 return end;
742 #endif
743 }
744 }
745
746 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
747 /* This method for MacOS X.
748 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
749 but as of 2006 there's nothing obvious at apple.com. */
750 #define HAVE_GET_THREAD_STACK_BASE
751 static SCM_STACKITEM *
752 get_thread_stack_base ()
753 {
754 return pthread_get_stackaddr_np (pthread_self ());
755 }
756
757 #elif defined (__MINGW32__)
758 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
759 in any thread. We don't like hard-coding the name of a system, but there
760 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
761 work. */
762 #define HAVE_GET_THREAD_STACK_BASE
763 static SCM_STACKITEM *
764 get_thread_stack_base ()
765 {
766 return (SCM_STACKITEM *) GC_stackbottom;
767 }
768
769 #endif /* pthread methods of get_thread_stack_base */
770
771 #else /* !SCM_USE_PTHREAD_THREADS */
772
773 #define HAVE_GET_THREAD_STACK_BASE
774
775 static SCM_STACKITEM *
776 get_thread_stack_base ()
777 {
778 return (SCM_STACKITEM *) GC_stackbottom;
779 }
780
781 #endif /* !SCM_USE_PTHREAD_THREADS */
782
783 #ifdef HAVE_GET_THREAD_STACK_BASE
784
785 void
786 scm_init_guile ()
787 {
788 scm_i_init_thread_for_guile (get_thread_stack_base (),
789 scm_i_default_dynamic_state);
790 }
791
792 #endif
793
794 void *
795 scm_with_guile (void *(*func)(void *), void *data)
796 {
797 return scm_i_with_guile_and_parent (func, data,
798 scm_i_default_dynamic_state);
799 }
800
801 SCM_UNUSED static void
802 scm_leave_guile_cleanup (void *x)
803 {
804 on_thread_exit (SCM_I_CURRENT_THREAD);
805 }
806
807 struct with_guile_trampoline_args
808 {
809 GC_fn_type func;
810 void *data;
811 };
812
813 static void *
814 with_guile_trampoline (void *data)
815 {
816 struct with_guile_trampoline_args *args = data;
817
818 return scm_c_with_continuation_barrier (args->func, args->data);
819 }
820
821 void *
822 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
823 {
824 void *res;
825 int new_thread;
826 scm_i_thread *t;
827 SCM_STACKITEM base_item;
828
829 new_thread = scm_i_init_thread_for_guile (&base_item, parent);
830 t = SCM_I_CURRENT_THREAD;
831 if (new_thread)
832 {
833 /* We are in Guile mode. */
834 assert (t->guile_mode);
835
836 res = scm_c_with_continuation_barrier (func, data);
837
838 /* Leave Guile mode. */
839 t->guile_mode = 0;
840 }
841 else if (t->guile_mode)
842 {
843 /* Already in Guile mode. */
844 res = scm_c_with_continuation_barrier (func, data);
845 }
846 else
847 {
848 struct with_guile_trampoline_args args;
849 args.func = func;
850 args.data = data;
851
852 /* We are not in Guile mode, either because we are not within a
853 scm_with_guile, or because we are within a scm_without_guile.
854
855 This call to scm_with_guile() could happen from anywhere on the
856 stack, and in particular lower on the stack than when it was
857 when this thread was first guilified. Thus, `base' must be
858 updated. */
859 #if SCM_STACK_GROWS_UP
860 if (SCM_STACK_PTR (&base_item) < t->base)
861 t->base = SCM_STACK_PTR (&base_item);
862 #else
863 if (SCM_STACK_PTR (&base_item) > t->base)
864 t->base = SCM_STACK_PTR (&base_item);
865 #endif
866
867 t->guile_mode = 1;
868 res = with_gc_active (with_guile_trampoline, &args);
869 t->guile_mode = 0;
870 }
871 return res;
872 }
873
874 void *
875 scm_without_guile (void *(*func)(void *), void *data)
876 {
877 void *result;
878 scm_i_thread *t = SCM_I_CURRENT_THREAD;
879
880 if (t->guile_mode)
881 {
882 SCM_I_CURRENT_THREAD->guile_mode = 0;
883 result = with_gc_inactive (func, data);
884 SCM_I_CURRENT_THREAD->guile_mode = 1;
885 }
886 else
887 /* Otherwise we're not in guile mode, so nothing to do. */
888 result = func (data);
889
890 return result;
891 }
892
893 \f
894 /*** Thread creation */
895
896 typedef struct {
897 SCM parent;
898 SCM thunk;
899 SCM handler;
900 SCM thread;
901 scm_i_pthread_mutex_t mutex;
902 scm_i_pthread_cond_t cond;
903 } launch_data;
904
905 static void *
906 really_launch (void *d)
907 {
908 launch_data *data = (launch_data *)d;
909 SCM thunk = data->thunk, handler = data->handler;
910 scm_i_thread *t;
911
912 t = SCM_I_CURRENT_THREAD;
913
914 scm_i_scm_pthread_mutex_lock (&data->mutex);
915 data->thread = scm_current_thread ();
916 scm_i_pthread_cond_signal (&data->cond);
917 scm_i_pthread_mutex_unlock (&data->mutex);
918
919 if (SCM_UNBNDP (handler))
920 t->result = scm_call_0 (thunk);
921 else
922 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
923
924 return 0;
925 }
926
927 static void *
928 launch_thread (void *d)
929 {
930 launch_data *data = (launch_data *)d;
931 scm_i_pthread_detach (scm_i_pthread_self ());
932 scm_i_with_guile_and_parent (really_launch, d, data->parent);
933 return NULL;
934 }
935
936 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
937 (SCM thunk, SCM handler),
938 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
939 "returning a new thread object representing the thread. The procedure\n"
940 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
941 "\n"
942 "When @var{handler} is specified, then @var{thunk} is called from\n"
943 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
944 "handler. This catch is established inside the continuation barrier.\n"
945 "\n"
946 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
947 "the @emph{exit value} of the thread and the thread is terminated.")
948 #define FUNC_NAME s_scm_call_with_new_thread
949 {
950 launch_data data;
951 scm_i_pthread_t id;
952 int err;
953
954 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
955 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
956 handler, SCM_ARG2, FUNC_NAME);
957
958 data.parent = scm_current_dynamic_state ();
959 data.thunk = thunk;
960 data.handler = handler;
961 data.thread = SCM_BOOL_F;
962 scm_i_pthread_mutex_init (&data.mutex, NULL);
963 scm_i_pthread_cond_init (&data.cond, NULL);
964
965 scm_i_scm_pthread_mutex_lock (&data.mutex);
966 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
967 if (err)
968 {
969 scm_i_pthread_mutex_unlock (&data.mutex);
970 errno = err;
971 scm_syserror (NULL);
972 }
973 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
974 scm_i_pthread_mutex_unlock (&data.mutex);
975
976 return data.thread;
977 }
978 #undef FUNC_NAME
979
980 typedef struct {
981 SCM parent;
982 scm_t_catch_body body;
983 void *body_data;
984 scm_t_catch_handler handler;
985 void *handler_data;
986 SCM thread;
987 scm_i_pthread_mutex_t mutex;
988 scm_i_pthread_cond_t cond;
989 } spawn_data;
990
991 static void *
992 really_spawn (void *d)
993 {
994 spawn_data *data = (spawn_data *)d;
995 scm_t_catch_body body = data->body;
996 void *body_data = data->body_data;
997 scm_t_catch_handler handler = data->handler;
998 void *handler_data = data->handler_data;
999 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1000
1001 scm_i_scm_pthread_mutex_lock (&data->mutex);
1002 data->thread = scm_current_thread ();
1003 scm_i_pthread_cond_signal (&data->cond);
1004 scm_i_pthread_mutex_unlock (&data->mutex);
1005
1006 if (handler == NULL)
1007 t->result = body (body_data);
1008 else
1009 t->result = scm_internal_catch (SCM_BOOL_T,
1010 body, body_data,
1011 handler, handler_data);
1012
1013 return 0;
1014 }
1015
1016 static void *
1017 spawn_thread (void *d)
1018 {
1019 spawn_data *data = (spawn_data *)d;
1020 scm_i_pthread_detach (scm_i_pthread_self ());
1021 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1022 return NULL;
1023 }
1024
1025 SCM
1026 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1027 scm_t_catch_handler handler, void *handler_data)
1028 {
1029 spawn_data data;
1030 scm_i_pthread_t id;
1031 int err;
1032
1033 data.parent = scm_current_dynamic_state ();
1034 data.body = body;
1035 data.body_data = body_data;
1036 data.handler = handler;
1037 data.handler_data = handler_data;
1038 data.thread = SCM_BOOL_F;
1039 scm_i_pthread_mutex_init (&data.mutex, NULL);
1040 scm_i_pthread_cond_init (&data.cond, NULL);
1041
1042 scm_i_scm_pthread_mutex_lock (&data.mutex);
1043 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1044 if (err)
1045 {
1046 scm_i_pthread_mutex_unlock (&data.mutex);
1047 errno = err;
1048 scm_syserror (NULL);
1049 }
1050 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1051 scm_i_pthread_mutex_unlock (&data.mutex);
1052
1053 return data.thread;
1054 }
1055
1056 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1057 (),
1058 "Move the calling thread to the end of the scheduling queue.")
1059 #define FUNC_NAME s_scm_yield
1060 {
1061 return scm_from_bool (scm_i_sched_yield ());
1062 }
1063 #undef FUNC_NAME
1064
1065 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1066 (SCM thread),
1067 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1068 "cannot be the current thread, and if @var{thread} has already terminated or "
1069 "been signaled to terminate, this function is a no-op.")
1070 #define FUNC_NAME s_scm_cancel_thread
1071 {
1072 scm_i_thread *t = NULL;
1073
1074 SCM_VALIDATE_THREAD (1, thread);
1075 t = SCM_I_THREAD_DATA (thread);
1076 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1077 if (!t->canceled)
1078 {
1079 t->canceled = 1;
1080 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1081 scm_i_pthread_cancel (t->pthread);
1082 }
1083 else
1084 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1085
1086 return SCM_UNSPECIFIED;
1087 }
1088 #undef FUNC_NAME
1089
1090 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1091 (SCM thread, SCM proc),
1092 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1093 "This handler will be called when the thread exits.")
1094 #define FUNC_NAME s_scm_set_thread_cleanup_x
1095 {
1096 scm_i_thread *t;
1097
1098 SCM_VALIDATE_THREAD (1, thread);
1099 if (!scm_is_false (proc))
1100 SCM_VALIDATE_THUNK (2, proc);
1101
1102 t = SCM_I_THREAD_DATA (thread);
1103 scm_i_pthread_mutex_lock (&t->admin_mutex);
1104
1105 if (!(t->exited || t->canceled))
1106 t->cleanup_handler = proc;
1107
1108 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1109
1110 return SCM_UNSPECIFIED;
1111 }
1112 #undef FUNC_NAME
1113
1114 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1115 (SCM thread),
1116 "Return the cleanup handler installed for the thread @var{thread}.")
1117 #define FUNC_NAME s_scm_thread_cleanup
1118 {
1119 scm_i_thread *t;
1120 SCM ret;
1121
1122 SCM_VALIDATE_THREAD (1, thread);
1123
1124 t = SCM_I_THREAD_DATA (thread);
1125 scm_i_pthread_mutex_lock (&t->admin_mutex);
1126 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1127 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1128
1129 return ret;
1130 }
1131 #undef FUNC_NAME
1132
1133 SCM scm_join_thread (SCM thread)
1134 {
1135 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1136 }
1137
1138 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1139 (SCM thread, SCM timeout, SCM timeoutval),
1140 "Suspend execution of the calling thread until the target @var{thread} "
1141 "terminates, unless the target @var{thread} has already terminated. ")
1142 #define FUNC_NAME s_scm_join_thread_timed
1143 {
1144 scm_i_thread *t;
1145 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1146 SCM res = SCM_BOOL_F;
1147
1148 if (! (SCM_UNBNDP (timeoutval)))
1149 res = timeoutval;
1150
1151 SCM_VALIDATE_THREAD (1, thread);
1152 if (scm_is_eq (scm_current_thread (), thread))
1153 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1154
1155 t = SCM_I_THREAD_DATA (thread);
1156 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1157
1158 if (! SCM_UNBNDP (timeout))
1159 {
1160 to_timespec (timeout, &ctimeout);
1161 timeout_ptr = &ctimeout;
1162 }
1163
1164 if (t->exited)
1165 res = t->result;
1166 else
1167 {
1168 while (1)
1169 {
1170 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1171 timeout_ptr);
1172 if (err == 0)
1173 {
1174 if (t->exited)
1175 {
1176 res = t->result;
1177 break;
1178 }
1179 }
1180 else if (err == ETIMEDOUT)
1181 break;
1182
1183 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1184 SCM_TICK;
1185 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1186
1187 /* Check for exit again, since we just released and
1188 reacquired the admin mutex, before the next block_self
1189 call (which would block forever if t has already
1190 exited). */
1191 if (t->exited)
1192 {
1193 res = t->result;
1194 break;
1195 }
1196 }
1197 }
1198
1199 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1200
1201 return res;
1202 }
1203 #undef FUNC_NAME
1204
1205 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1206 (SCM obj),
1207 "Return @code{#t} if @var{obj} is a thread.")
1208 #define FUNC_NAME s_scm_thread_p
1209 {
1210 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1211 }
1212 #undef FUNC_NAME
1213
1214
1215 static size_t
1216 fat_mutex_free (SCM mx)
1217 {
1218 fat_mutex *m = SCM_MUTEX_DATA (mx);
1219 scm_i_pthread_mutex_destroy (&m->lock);
1220 return 0;
1221 }
1222
1223 static int
1224 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1225 {
1226 fat_mutex *m = SCM_MUTEX_DATA (mx);
1227 scm_puts ("#<mutex ", port);
1228 scm_uintprint ((scm_t_bits)m, 16, port);
1229 scm_puts (">", port);
1230 return 1;
1231 }
1232
1233 static SCM
1234 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1235 {
1236 fat_mutex *m;
1237 SCM mx;
1238
1239 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1240 scm_i_pthread_mutex_init (&m->lock, NULL);
1241 m->owner = SCM_BOOL_F;
1242 m->level = 0;
1243
1244 m->recursive = recursive;
1245 m->unchecked_unlock = unchecked_unlock;
1246 m->allow_external_unlock = external_unlock;
1247
1248 m->waiting = SCM_EOL;
1249 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1250 m->waiting = make_queue ();
1251 return mx;
1252 }
1253
1254 SCM scm_make_mutex (void)
1255 {
1256 return scm_make_mutex_with_flags (SCM_EOL);
1257 }
1258
1259 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1260 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1261 SCM_SYMBOL (recursive_sym, "recursive");
1262
1263 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1264 (SCM flags),
1265 "Create a new mutex. ")
1266 #define FUNC_NAME s_scm_make_mutex_with_flags
1267 {
1268 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1269
1270 SCM ptr = flags;
1271 while (! scm_is_null (ptr))
1272 {
1273 SCM flag = SCM_CAR (ptr);
1274 if (scm_is_eq (flag, unchecked_unlock_sym))
1275 unchecked_unlock = 1;
1276 else if (scm_is_eq (flag, allow_external_unlock_sym))
1277 external_unlock = 1;
1278 else if (scm_is_eq (flag, recursive_sym))
1279 recursive = 1;
1280 else
1281 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1282 ptr = SCM_CDR (ptr);
1283 }
1284 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1285 }
1286 #undef FUNC_NAME
1287
1288 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1289 (void),
1290 "Create a new recursive mutex. ")
1291 #define FUNC_NAME s_scm_make_recursive_mutex
1292 {
1293 return make_fat_mutex (1, 0, 0);
1294 }
1295 #undef FUNC_NAME
1296
1297 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1298
1299 static SCM
1300 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1301 {
1302 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1303
1304 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1305 SCM err = SCM_BOOL_F;
1306
1307 struct timeval current_time;
1308
1309 scm_i_scm_pthread_mutex_lock (&m->lock);
1310
1311 while (1)
1312 {
1313 if (m->level == 0)
1314 {
1315 m->owner = new_owner;
1316 m->level++;
1317
1318 if (SCM_I_IS_THREAD (new_owner))
1319 {
1320 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1321 scm_i_pthread_mutex_lock (&t->admin_mutex);
1322
1323 /* Only keep a weak reference to MUTEX so that it's not
1324 retained when not referenced elsewhere (bug #27450).
1325 The weak pair itself is eventually removed when MUTEX
1326 is unlocked. Note that `t->mutexes' lists mutexes
1327 currently held by T, so it should be small. */
1328 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1329
1330 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1331 }
1332 *ret = 1;
1333 break;
1334 }
1335 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1336 {
1337 m->owner = new_owner;
1338 err = scm_cons (scm_abandoned_mutex_error_key,
1339 scm_from_locale_string ("lock obtained on abandoned "
1340 "mutex"));
1341 *ret = 1;
1342 break;
1343 }
1344 else if (scm_is_eq (m->owner, new_owner))
1345 {
1346 if (m->recursive)
1347 {
1348 m->level++;
1349 *ret = 1;
1350 }
1351 else
1352 {
1353 err = scm_cons (scm_misc_error_key,
1354 scm_from_locale_string ("mutex already locked "
1355 "by thread"));
1356 *ret = 0;
1357 }
1358 break;
1359 }
1360 else
1361 {
1362 if (timeout != NULL)
1363 {
1364 gettimeofday (&current_time, NULL);
1365 if (current_time.tv_sec > timeout->tv_sec ||
1366 (current_time.tv_sec == timeout->tv_sec &&
1367 current_time.tv_usec * 1000 > timeout->tv_nsec))
1368 {
1369 *ret = 0;
1370 break;
1371 }
1372 }
1373 block_self (m->waiting, mutex, &m->lock, timeout);
1374 scm_i_pthread_mutex_unlock (&m->lock);
1375 SCM_TICK;
1376 scm_i_scm_pthread_mutex_lock (&m->lock);
1377 }
1378 }
1379 scm_i_pthread_mutex_unlock (&m->lock);
1380 return err;
1381 }
1382
1383 SCM scm_lock_mutex (SCM mx)
1384 {
1385 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1386 }
1387
1388 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1389 (SCM m, SCM timeout, SCM owner),
1390 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1391 "blocks until the mutex becomes available. The function returns when "
1392 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1393 "a thread already owns will succeed right away and will not block the "
1394 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1395 #define FUNC_NAME s_scm_lock_mutex_timed
1396 {
1397 SCM exception;
1398 int ret = 0;
1399 scm_t_timespec cwaittime, *waittime = NULL;
1400
1401 SCM_VALIDATE_MUTEX (1, m);
1402
1403 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1404 {
1405 to_timespec (timeout, &cwaittime);
1406 waittime = &cwaittime;
1407 }
1408
1409 exception = fat_mutex_lock (m, waittime, owner, &ret);
1410 if (!scm_is_false (exception))
1411 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1412 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1413 }
1414 #undef FUNC_NAME
1415
1416 void
1417 scm_dynwind_lock_mutex (SCM mutex)
1418 {
1419 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1420 SCM_F_WIND_EXPLICITLY);
1421 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1422 SCM_F_WIND_EXPLICITLY);
1423 }
1424
1425 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1426 (SCM mutex),
1427 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1428 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1429 #define FUNC_NAME s_scm_try_mutex
1430 {
1431 SCM exception;
1432 int ret = 0;
1433 scm_t_timespec cwaittime, *waittime = NULL;
1434
1435 SCM_VALIDATE_MUTEX (1, mutex);
1436
1437 to_timespec (scm_from_int(0), &cwaittime);
1438 waittime = &cwaittime;
1439
1440 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1441 if (!scm_is_false (exception))
1442 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1443 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1444 }
1445 #undef FUNC_NAME
1446
1447 /*** Fat condition variables */
1448
1449 typedef struct {
1450 scm_i_pthread_mutex_t lock;
1451 SCM waiting; /* the threads waiting for this condition. */
1452 } fat_cond;
1453
1454 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1455 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1456
1457 static int
1458 fat_mutex_unlock (SCM mutex, SCM cond,
1459 const scm_t_timespec *waittime, int relock)
1460 {
1461 SCM owner;
1462 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1463 fat_cond *c = NULL;
1464 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1465 int err = 0, ret = 0;
1466
1467 scm_i_scm_pthread_mutex_lock (&m->lock);
1468
1469 owner = m->owner;
1470
1471 if (!scm_is_eq (owner, t->handle))
1472 {
1473 if (m->level == 0)
1474 {
1475 if (!m->unchecked_unlock)
1476 {
1477 scm_i_pthread_mutex_unlock (&m->lock);
1478 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1479 }
1480 owner = t->handle;
1481 }
1482 else if (!m->allow_external_unlock)
1483 {
1484 scm_i_pthread_mutex_unlock (&m->lock);
1485 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1486 }
1487 }
1488
1489 if (! (SCM_UNBNDP (cond)))
1490 {
1491 c = SCM_CONDVAR_DATA (cond);
1492 while (1)
1493 {
1494 int brk = 0;
1495
1496 if (m->level > 0)
1497 m->level--;
1498 if (m->level == 0)
1499 {
1500 /* Change the owner of MUTEX. */
1501 t->mutexes = scm_delq_x (mutex, t->mutexes);
1502 m->owner = unblock_from_queue (m->waiting);
1503 }
1504
1505 t->block_asyncs++;
1506
1507 err = block_self (c->waiting, cond, &m->lock, waittime);
1508 scm_i_pthread_mutex_unlock (&m->lock);
1509
1510 if (err == 0)
1511 {
1512 ret = 1;
1513 brk = 1;
1514 }
1515 else if (err == ETIMEDOUT)
1516 {
1517 ret = 0;
1518 brk = 1;
1519 }
1520 else if (err != EINTR)
1521 {
1522 errno = err;
1523 scm_syserror (NULL);
1524 }
1525
1526 if (brk)
1527 {
1528 if (relock)
1529 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1530 t->block_asyncs--;
1531 break;
1532 }
1533
1534 t->block_asyncs--;
1535 scm_async_click ();
1536
1537 scm_remember_upto_here_2 (cond, mutex);
1538
1539 scm_i_scm_pthread_mutex_lock (&m->lock);
1540 }
1541 }
1542 else
1543 {
1544 if (m->level > 0)
1545 m->level--;
1546 if (m->level == 0)
1547 {
1548 /* Change the owner of MUTEX. */
1549 t->mutexes = scm_delq_x (mutex, t->mutexes);
1550 m->owner = unblock_from_queue (m->waiting);
1551 }
1552
1553 scm_i_pthread_mutex_unlock (&m->lock);
1554 ret = 1;
1555 }
1556
1557 return ret;
1558 }
1559
1560 SCM scm_unlock_mutex (SCM mx)
1561 {
1562 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1563 }
1564
1565 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1566 (SCM mx, SCM cond, SCM timeout),
1567 "Unlocks @var{mutex} if the calling thread owns the lock on "
1568 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1569 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1570 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1571 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1572 "with a call to @code{unlock-mutex}. Only the last call to "
1573 "@code{unlock-mutex} will actually unlock the mutex. ")
1574 #define FUNC_NAME s_scm_unlock_mutex_timed
1575 {
1576 scm_t_timespec cwaittime, *waittime = NULL;
1577
1578 SCM_VALIDATE_MUTEX (1, mx);
1579 if (! (SCM_UNBNDP (cond)))
1580 {
1581 SCM_VALIDATE_CONDVAR (2, cond);
1582
1583 if (! (SCM_UNBNDP (timeout)))
1584 {
1585 to_timespec (timeout, &cwaittime);
1586 waittime = &cwaittime;
1587 }
1588 }
1589
1590 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1591 }
1592 #undef FUNC_NAME
1593
1594 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1595 (SCM obj),
1596 "Return @code{#t} if @var{obj} is a mutex.")
1597 #define FUNC_NAME s_scm_mutex_p
1598 {
1599 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1600 }
1601 #undef FUNC_NAME
1602
1603 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1604 (SCM mx),
1605 "Return the thread owning @var{mx}, or @code{#f}.")
1606 #define FUNC_NAME s_scm_mutex_owner
1607 {
1608 SCM owner;
1609 fat_mutex *m = NULL;
1610
1611 SCM_VALIDATE_MUTEX (1, mx);
1612 m = SCM_MUTEX_DATA (mx);
1613 scm_i_pthread_mutex_lock (&m->lock);
1614 owner = m->owner;
1615 scm_i_pthread_mutex_unlock (&m->lock);
1616
1617 return owner;
1618 }
1619 #undef FUNC_NAME
1620
1621 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1622 (SCM mx),
1623 "Return the lock level of mutex @var{mx}.")
1624 #define FUNC_NAME s_scm_mutex_level
1625 {
1626 SCM_VALIDATE_MUTEX (1, mx);
1627 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1628 }
1629 #undef FUNC_NAME
1630
1631 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1632 (SCM mx),
1633 "Returns @code{#t} if the mutex @var{mx} is locked.")
1634 #define FUNC_NAME s_scm_mutex_locked_p
1635 {
1636 SCM_VALIDATE_MUTEX (1, mx);
1637 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1638 }
1639 #undef FUNC_NAME
1640
1641 static int
1642 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1643 {
1644 fat_cond *c = SCM_CONDVAR_DATA (cv);
1645 scm_puts ("#<condition-variable ", port);
1646 scm_uintprint ((scm_t_bits)c, 16, port);
1647 scm_puts (">", port);
1648 return 1;
1649 }
1650
1651 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1652 (void),
1653 "Make a new condition variable.")
1654 #define FUNC_NAME s_scm_make_condition_variable
1655 {
1656 fat_cond *c;
1657 SCM cv;
1658
1659 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1660 c->waiting = SCM_EOL;
1661 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1662 c->waiting = make_queue ();
1663 return cv;
1664 }
1665 #undef FUNC_NAME
1666
1667 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1668 (SCM cv, SCM mx, SCM t),
1669 "Wait until @var{cond-var} has been signalled. While waiting, "
1670 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1671 "is locked again when this function returns. When @var{time} is given, "
1672 "it specifies a point in time where the waiting should be aborted. It "
1673 "can be either a integer as returned by @code{current-time} or a pair "
1674 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1675 "mutex is locked and @code{#f} is returned. When the condition "
1676 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1677 "is returned. ")
1678 #define FUNC_NAME s_scm_timed_wait_condition_variable
1679 {
1680 scm_t_timespec waittime, *waitptr = NULL;
1681
1682 SCM_VALIDATE_CONDVAR (1, cv);
1683 SCM_VALIDATE_MUTEX (2, mx);
1684
1685 if (!SCM_UNBNDP (t))
1686 {
1687 to_timespec (t, &waittime);
1688 waitptr = &waittime;
1689 }
1690
1691 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1692 }
1693 #undef FUNC_NAME
1694
1695 static void
1696 fat_cond_signal (fat_cond *c)
1697 {
1698 unblock_from_queue (c->waiting);
1699 }
1700
1701 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1702 (SCM cv),
1703 "Wake up one thread that is waiting for @var{cv}")
1704 #define FUNC_NAME s_scm_signal_condition_variable
1705 {
1706 SCM_VALIDATE_CONDVAR (1, cv);
1707 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1708 return SCM_BOOL_T;
1709 }
1710 #undef FUNC_NAME
1711
1712 static void
1713 fat_cond_broadcast (fat_cond *c)
1714 {
1715 while (scm_is_true (unblock_from_queue (c->waiting)))
1716 ;
1717 }
1718
1719 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1720 (SCM cv),
1721 "Wake up all threads that are waiting for @var{cv}. ")
1722 #define FUNC_NAME s_scm_broadcast_condition_variable
1723 {
1724 SCM_VALIDATE_CONDVAR (1, cv);
1725 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1726 return SCM_BOOL_T;
1727 }
1728 #undef FUNC_NAME
1729
1730 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1731 (SCM obj),
1732 "Return @code{#t} if @var{obj} is a condition variable.")
1733 #define FUNC_NAME s_scm_condition_variable_p
1734 {
1735 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1736 }
1737 #undef FUNC_NAME
1738
1739
1740 \f
1741 /*** Select */
1742
1743 struct select_args
1744 {
1745 int nfds;
1746 SELECT_TYPE *read_fds;
1747 SELECT_TYPE *write_fds;
1748 SELECT_TYPE *except_fds;
1749 struct timeval *timeout;
1750
1751 int result;
1752 int errno_value;
1753 };
1754
1755 static void *
1756 do_std_select (void *args)
1757 {
1758 struct select_args *select_args;
1759
1760 select_args = (struct select_args *) args;
1761
1762 select_args->result =
1763 select (select_args->nfds,
1764 select_args->read_fds, select_args->write_fds,
1765 select_args->except_fds, select_args->timeout);
1766 select_args->errno_value = errno;
1767
1768 return NULL;
1769 }
1770
1771 int
1772 scm_std_select (int nfds,
1773 SELECT_TYPE *readfds,
1774 SELECT_TYPE *writefds,
1775 SELECT_TYPE *exceptfds,
1776 struct timeval *timeout)
1777 {
1778 fd_set my_readfds;
1779 int res, eno, wakeup_fd;
1780 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1781 struct select_args args;
1782
1783 if (readfds == NULL)
1784 {
1785 FD_ZERO (&my_readfds);
1786 readfds = &my_readfds;
1787 }
1788
1789 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1790 SCM_TICK;
1791
1792 wakeup_fd = t->sleep_pipe[0];
1793 FD_SET (wakeup_fd, readfds);
1794 if (wakeup_fd >= nfds)
1795 nfds = wakeup_fd+1;
1796
1797 args.nfds = nfds;
1798 args.read_fds = readfds;
1799 args.write_fds = writefds;
1800 args.except_fds = exceptfds;
1801 args.timeout = timeout;
1802
1803 /* Explicitly cooperate with the GC. */
1804 scm_without_guile (do_std_select, &args);
1805
1806 res = args.result;
1807 eno = args.errno_value;
1808
1809 t->sleep_fd = -1;
1810 scm_i_reset_sleep (t);
1811
1812 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1813 {
1814 char dummy;
1815 full_read (wakeup_fd, &dummy, 1);
1816
1817 FD_CLR (wakeup_fd, readfds);
1818 res -= 1;
1819 if (res == 0)
1820 {
1821 eno = EINTR;
1822 res = -1;
1823 }
1824 }
1825 errno = eno;
1826 return res;
1827 }
1828
1829 /* Convenience API for blocking while in guile mode. */
1830
1831 #if SCM_USE_PTHREAD_THREADS
1832
1833 /* It seems reasonable to not run procedures related to mutex and condition
1834 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1835 without it, and (ii) the only potential gain would be GC latency. See
1836 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1837 for a discussion of the pros and cons. */
1838
1839 int
1840 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1841 {
1842 int res = scm_i_pthread_mutex_lock (mutex);
1843 return res;
1844 }
1845
1846 static void
1847 do_unlock (void *data)
1848 {
1849 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1850 }
1851
1852 void
1853 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1854 {
1855 scm_i_scm_pthread_mutex_lock (mutex);
1856 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1857 }
1858
1859 int
1860 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1861 {
1862 int res;
1863 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1864
1865 t->held_mutex = mutex;
1866 res = scm_i_pthread_cond_wait (cond, mutex);
1867 t->held_mutex = NULL;
1868
1869 return res;
1870 }
1871
1872 int
1873 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1874 scm_i_pthread_mutex_t *mutex,
1875 const scm_t_timespec *wt)
1876 {
1877 int res;
1878 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1879
1880 t->held_mutex = mutex;
1881 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1882 t->held_mutex = NULL;
1883
1884 return res;
1885 }
1886
1887 #endif
1888
1889 unsigned long
1890 scm_std_usleep (unsigned long usecs)
1891 {
1892 struct timeval tv;
1893 tv.tv_usec = usecs % 1000000;
1894 tv.tv_sec = usecs / 1000000;
1895 scm_std_select (0, NULL, NULL, NULL, &tv);
1896 return tv.tv_sec * 1000000 + tv.tv_usec;
1897 }
1898
1899 unsigned int
1900 scm_std_sleep (unsigned int secs)
1901 {
1902 struct timeval tv;
1903 tv.tv_usec = 0;
1904 tv.tv_sec = secs;
1905 scm_std_select (0, NULL, NULL, NULL, &tv);
1906 return tv.tv_sec;
1907 }
1908
1909 /*** Misc */
1910
1911 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1912 (void),
1913 "Return the thread that called this function.")
1914 #define FUNC_NAME s_scm_current_thread
1915 {
1916 return SCM_I_CURRENT_THREAD->handle;
1917 }
1918 #undef FUNC_NAME
1919
1920 static SCM
1921 scm_c_make_list (size_t n, SCM fill)
1922 {
1923 SCM res = SCM_EOL;
1924 while (n-- > 0)
1925 res = scm_cons (fill, res);
1926 return res;
1927 }
1928
1929 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1930 (void),
1931 "Return a list of all threads.")
1932 #define FUNC_NAME s_scm_all_threads
1933 {
1934 /* We can not allocate while holding the thread_admin_mutex because
1935 of the way GC is done.
1936 */
1937 int n = thread_count;
1938 scm_i_thread *t;
1939 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1940
1941 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1942 l = &list;
1943 for (t = all_threads; t && n > 0; t = t->next_thread)
1944 {
1945 if (t != scm_i_signal_delivery_thread)
1946 {
1947 SCM_SETCAR (*l, t->handle);
1948 l = SCM_CDRLOC (*l);
1949 }
1950 n--;
1951 }
1952 *l = SCM_EOL;
1953 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1954 return list;
1955 }
1956 #undef FUNC_NAME
1957
1958 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1959 (SCM thread),
1960 "Return @code{#t} iff @var{thread} has exited.\n")
1961 #define FUNC_NAME s_scm_thread_exited_p
1962 {
1963 return scm_from_bool (scm_c_thread_exited_p (thread));
1964 }
1965 #undef FUNC_NAME
1966
1967 int
1968 scm_c_thread_exited_p (SCM thread)
1969 #define FUNC_NAME s_scm_thread_exited_p
1970 {
1971 scm_i_thread *t;
1972 SCM_VALIDATE_THREAD (1, thread);
1973 t = SCM_I_THREAD_DATA (thread);
1974 return t->exited;
1975 }
1976 #undef FUNC_NAME
1977
1978 static scm_i_pthread_cond_t wake_up_cond;
1979 static int threads_initialized_p = 0;
1980
1981
1982 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1983 */
1984 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1985
1986 static SCM dynwind_critical_section_mutex;
1987
1988 void
1989 scm_dynwind_critical_section (SCM mutex)
1990 {
1991 if (scm_is_false (mutex))
1992 mutex = dynwind_critical_section_mutex;
1993 scm_dynwind_lock_mutex (mutex);
1994 scm_dynwind_block_asyncs ();
1995 }
1996
1997 /*** Initialization */
1998
1999 scm_i_pthread_mutex_t scm_i_misc_mutex;
2000
2001 #if SCM_USE_PTHREAD_THREADS
2002 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2003 #endif
2004
2005 void
2006 scm_threads_prehistory (SCM_STACKITEM *base)
2007 {
2008 #if SCM_USE_PTHREAD_THREADS
2009 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2010 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2011 PTHREAD_MUTEX_RECURSIVE);
2012 #endif
2013
2014 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2015 scm_i_pthread_mutexattr_recursive);
2016 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2017 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2018
2019 guilify_self_1 (base);
2020 }
2021
2022 scm_t_bits scm_tc16_thread;
2023 scm_t_bits scm_tc16_mutex;
2024 scm_t_bits scm_tc16_condvar;
2025
2026 void
2027 scm_init_threads ()
2028 {
2029 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2030 scm_set_smob_print (scm_tc16_thread, thread_print);
2031
2032 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2033 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2034 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2035
2036 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2037 sizeof (fat_cond));
2038 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2039
2040 scm_i_default_dynamic_state = SCM_BOOL_F;
2041 guilify_self_2 (SCM_BOOL_F);
2042 threads_initialized_p = 1;
2043
2044 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2045 }
2046
2047 void
2048 scm_init_threads_default_dynamic_state ()
2049 {
2050 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2051 scm_i_default_dynamic_state = state;
2052 }
2053
2054 void
2055 scm_init_thread_procs ()
2056 {
2057 #include "libguile/threads.x"
2058 }
2059
2060 \f
2061 /* IA64-specific things. */
2062
2063 #ifdef __ia64__
2064 # ifdef __hpux
2065 # include <sys/param.h>
2066 # include <sys/pstat.h>
2067 void *
2068 scm_ia64_register_backing_store_base (void)
2069 {
2070 struct pst_vm_status vm_status;
2071 int i = 0;
2072 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2073 if (vm_status.pst_type == PS_RSESTACK)
2074 return (void *) vm_status.pst_vaddr;
2075 abort ();
2076 }
2077 void *
2078 scm_ia64_ar_bsp (const void *ctx)
2079 {
2080 uint64_t bsp;
2081 __uc_get_ar_bsp (ctx, &bsp);
2082 return (void *) bsp;
2083 }
2084 # endif /* hpux */
2085 # ifdef linux
2086 # include <ucontext.h>
2087 void *
2088 scm_ia64_register_backing_store_base (void)
2089 {
2090 extern void *__libc_ia64_register_backing_store_base;
2091 return __libc_ia64_register_backing_store_base;
2092 }
2093 void *
2094 scm_ia64_ar_bsp (const void *opaque)
2095 {
2096 const ucontext_t *ctx = opaque;
2097 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2098 }
2099 # endif /* linux */
2100 #endif /* __ia64__ */
2101
2102
2103 /*
2104 Local Variables:
2105 c-file-style: "gnu"
2106 End:
2107 */