Revert "(scm_shell_usage): Note need for subscription to bug-guile@gnu.org."
[bpt/guile.git] / libguile / coop-pthreads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2006 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20
21 #include "libguile/_scm.h" /* config.h, _scm.h, __scm.h should be first */
22
23 #include <unistd.h>
24 #include <stdio.h>
25 #include <assert.h>
26 #include <sys/time.h>
27
28 #include "libguile/validate.h"
29 #include "libguile/coop-pthreads.h"
30 #include "libguile/root.h"
31 #include "libguile/eval.h"
32 #include "libguile/async.h"
33 #include "libguile/ports.h"
34 #include "libguile/gc.h"
35
36 #undef DEBUG
37
38 /*** Queues */
39
40 static SCM
41 make_queue ()
42 {
43 return scm_cons (SCM_EOL, SCM_EOL);
44 }
45
46 static void
47 enqueue (SCM q, SCM t)
48 {
49 SCM c = scm_cons (t, SCM_EOL);
50 if (scm_is_null (SCM_CAR (q)))
51 SCM_SETCAR (q, c);
52 else
53 SCM_SETCDR (SCM_CDR (q), c);
54 SCM_SETCDR (q, c);
55 }
56
57 static SCM
58 dequeue (SCM q)
59 {
60 SCM c = SCM_CAR (q);
61 if (scm_is_null (c))
62 return SCM_BOOL_F;
63 else
64 {
65 SCM_SETCAR (q, SCM_CDR (c));
66 if (scm_is_null (SCM_CAR (q)))
67 SCM_SETCDR (q, SCM_EOL);
68 return SCM_CAR (c);
69 }
70 }
71
72
73 /*** Threads */
74
75 typedef struct scm_copt_thread {
76
77 /* A condition variable for sleeping on.
78 */
79 pthread_cond_t sleep_cond;
80
81 /* A link for waiting queues.
82 */
83 struct scm_copt_thread *next_waiting;
84
85 scm_root_state *root;
86 SCM handle;
87 pthread_t pthread;
88 SCM result;
89
90 SCM joining_threads;
91
92 /* For keeping track of the stack and registers. */
93 SCM_STACKITEM *base;
94 SCM_STACKITEM *top;
95 jmp_buf regs;
96
97 } scm_copt_thread;
98
99 static SCM
100 make_thread (SCM creation_protects)
101 {
102 SCM z;
103 scm_copt_thread *t = scm_gc_malloc (sizeof(*t), "thread");
104 z = scm_cell (scm_tc16_thread, (scm_t_bits)t);
105 t->handle = z;
106 t->result = creation_protects;
107 t->base = NULL;
108 t->joining_threads = make_queue ();
109 pthread_cond_init (&t->sleep_cond, NULL);
110 return z;
111 }
112
113 static void
114 init_thread_creator (SCM thread, pthread_t th, scm_root_state *r)
115 {
116 scm_copt_thread *t = SCM_THREAD_DATA(thread);
117 t->root = r;
118 t->pthread = th;
119 #ifdef DEBUG
120 // fprintf (stderr, "%ld created %ld\n", pthread_self (), th);
121 #endif
122 }
123
124 static void
125 init_thread_creatant (SCM thread, SCM_STACKITEM *base)
126 {
127 scm_copt_thread *t = SCM_THREAD_DATA(thread);
128 t->base = base;
129 t->top = NULL;
130 }
131
132 static SCM
133 thread_mark (SCM obj)
134 {
135 scm_copt_thread *t = SCM_THREAD_DATA (obj);
136 scm_gc_mark (t->result);
137 scm_gc_mark (t->joining_threads);
138 return t->root->handle;
139 }
140
141 static int
142 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
143 {
144 scm_copt_thread *t = SCM_THREAD_DATA (exp);
145 scm_puts ("#<thread ", port);
146 scm_uintprint ((scm_t_bits)t, 16, port);
147 if (t->pthread != -1)
148 {
149 scm_putc (' ', port);
150 scm_intprint (t->pthread, 10, port);
151 }
152 else
153 scm_puts (" (exited)", port);
154 scm_putc ('>', port);
155 return 1;
156 }
157
158 static size_t
159 thread_free (SCM obj)
160 {
161 scm_copt_thread *t = SCM_THREAD_DATA (obj);
162 if (t->pthread != -1)
163 abort ();
164 scm_gc_free (t, sizeof (*t), "thread");
165 return 0;
166 }
167
168 /*** Fair mutexes */
169
170 /* POSIX mutexes are not necessarily fair but since we'd like to use a
171 mutex for scheduling, we build a fair one on top of POSIX.
172 */
173
174 typedef struct fair_mutex {
175 pthread_mutex_t lock;
176 scm_copt_thread *owner;
177 scm_copt_thread *next_waiting, *last_waiting;
178 } fair_mutex;
179
180 static void
181 fair_mutex_init (fair_mutex *m)
182 {
183 pthread_mutex_init (&m->lock, NULL);
184 m->owner = NULL;
185 m->next_waiting = NULL;
186 m->last_waiting = NULL;
187 }
188
189 static void
190 fair_mutex_lock_1 (fair_mutex *m, scm_copt_thread *t)
191 {
192 if (m->owner == NULL)
193 m->owner = t;
194 else
195 {
196 t->next_waiting = NULL;
197 if (m->last_waiting)
198 m->last_waiting->next_waiting = t;
199 else
200 m->next_waiting = t;
201 m->last_waiting = t;
202 do
203 {
204 pthread_cond_wait (&t->sleep_cond, &m->lock);
205 }
206 while (m->owner != t);
207 assert (m->next_waiting == t);
208 m->next_waiting = t->next_waiting;
209 if (m->next_waiting == NULL)
210 m->last_waiting = NULL;
211 }
212 pthread_mutex_unlock (&m->lock);
213 }
214
215 static void
216 fair_mutex_lock (fair_mutex *m, scm_copt_thread *t)
217 {
218 pthread_mutex_lock (&m->lock);
219 fair_mutex_lock_1 (m, t);
220 }
221
222 static void
223 fair_mutex_unlock_1 (fair_mutex *m)
224 {
225 scm_copt_thread *t;
226 pthread_mutex_lock (&m->lock);
227 // fprintf (stderr, "%ld unlocking\n", m->owner->pthread);
228 if ((t = m->next_waiting) != NULL)
229 {
230 m->owner = t;
231 pthread_cond_signal (&t->sleep_cond);
232 }
233 else
234 m->owner = NULL;
235 // fprintf (stderr, "%ld unlocked\n", pthread_self ());
236 }
237
238 static void
239 fair_mutex_unlock (fair_mutex *m)
240 {
241 fair_mutex_unlock_1 (m);
242 pthread_mutex_unlock (&m->lock);
243 }
244
245 /* Temporarily give up the mutex. This function makes sure that we
246 are on the wait queue before starting the next thread. Otherwise
247 the next thread might preempt us and we will have a hard time
248 getting on the wait queue.
249 */
250 #if 0
251 static void
252 fair_mutex_yield (fair_mutex *m)
253 {
254 scm_copt_thread *self, *next;
255
256 pthread_mutex_lock (&m->lock);
257
258 /* get next thread
259 */
260 if ((next = m->next_waiting) == NULL)
261 {
262 /* No use giving it up. */
263 pthread_mutex_unlock (&m->lock);
264 return;
265 }
266
267 /* put us on queue
268 */
269 self = m->owner;
270 self->next_waiting = NULL;
271 if (m->last_waiting)
272 m->last_waiting->next_waiting = self;
273 else
274 m->next_waiting = self;
275 m->last_waiting = self;
276
277 /* wake up next thread
278 */
279
280 m->owner = next;
281 pthread_cond_signal (&next->sleep_cond);
282
283 /* wait for mutex
284 */
285 do
286 {
287 pthread_cond_wait (&self->sleep_cond, &m->lock);
288 }
289 while (m->owner != self);
290 assert (m->next_waiting == self);
291 m->next_waiting = self->next_waiting;
292 if (m->next_waiting == NULL)
293 m->last_waiting = NULL;
294
295 pthread_mutex_unlock (&m->lock);
296 }
297 #else
298 static void
299 fair_mutex_yield (fair_mutex *m)
300 {
301 scm_copt_thread *self = m->owner;
302 fair_mutex_unlock_1 (m);
303 fair_mutex_lock_1 (m, self);
304 }
305 #endif
306
307 static void
308 fair_cond_wait (pthread_cond_t *c, fair_mutex *m)
309 {
310 scm_copt_thread *t = m->owner;
311 fair_mutex_unlock_1 (m);
312 pthread_cond_wait (c, &m->lock);
313 fair_mutex_lock_1 (m, t);
314 }
315
316 /* Return 1 when the mutex was signalled and 0 when not. */
317 static int
318 fair_cond_timedwait (pthread_cond_t *c, fair_mutex *m, scm_t_timespec *at)
319 {
320 int res;
321 scm_copt_thread *t = m->owner;
322 fair_mutex_unlock_1 (m);
323 res = pthread_cond_timedwait (c, &m->lock, at); /* XXX - signals? */
324 fair_mutex_lock_1 (m, t);
325 return res == 0;
326 }
327
328 /*** Scheduling */
329
330 /* When a thread wants to execute Guile functions, it locks the
331 guile_mutex.
332 */
333
334 static fair_mutex guile_mutex;
335
336 static SCM cur_thread;
337 void *scm_i_copt_thread_data;
338
339 void
340 scm_i_copt_set_thread_data (void *data)
341 {
342 scm_copt_thread *t = SCM_THREAD_DATA (cur_thread);
343 scm_i_copt_thread_data = data;
344 t->root = (scm_root_state *)data;
345 }
346
347 static void
348 resume (scm_copt_thread *t)
349 {
350 cur_thread = t->handle;
351 scm_i_copt_thread_data = t->root;
352 t->top = NULL;
353 }
354
355 static void
356 enter_guile (scm_copt_thread *t)
357 {
358 fair_mutex_lock (&guile_mutex, t);
359 resume (t);
360 }
361
362 static scm_copt_thread *
363 suspend ()
364 {
365 SCM cur = cur_thread;
366 scm_copt_thread *c = SCM_THREAD_DATA (cur);
367
368 /* record top of stack for the GC */
369 c->top = (SCM_STACKITEM *)&c;
370 /* save registers. */
371 SCM_FLUSH_REGISTER_WINDOWS;
372 setjmp (c->regs);
373
374 return c;
375 }
376
377 static scm_copt_thread *
378 leave_guile ()
379 {
380 scm_copt_thread *c = suspend ();
381 fair_mutex_unlock (&guile_mutex);
382 return c;
383 }
384
385 int scm_i_switch_counter;
386
387 SCM
388 scm_yield ()
389 {
390 /* Testing guile_mutex.next_waiting without locking guile_mutex.lock
391 is OK since the outcome is not critical. Even when it changes
392 after the test, we do the right thing.
393 */
394 if (guile_mutex.next_waiting)
395 {
396 scm_copt_thread *t = suspend ();
397 fair_mutex_yield (&guile_mutex);
398 resume (t);
399 }
400 return SCM_BOOL_T;
401 }
402
403 /* Put the current thread to sleep until it is explicitely unblocked.
404 */
405 static void
406 block ()
407 {
408 scm_copt_thread *t = suspend ();
409 fair_cond_wait (&t->sleep_cond, &guile_mutex);
410 resume (t);
411 }
412
413 /* Put the current thread to sleep until it is explicitely unblocked
414 or until a signal arrives or until time AT (absolute time) is
415 reached. Return 1 when it has been unblocked; 0 otherwise.
416 */
417 static int
418 timed_block (scm_t_timespec *at)
419 {
420 int res;
421 scm_copt_thread *t = suspend ();
422 res = fair_cond_timedwait (&t->sleep_cond, &guile_mutex, at);
423 resume (t);
424 return res;
425 }
426
427 /* Unblock a sleeping thread.
428 */
429 static void
430 unblock (scm_copt_thread *t)
431 {
432 pthread_cond_signal (&t->sleep_cond);
433 }
434
435 /*** Thread creation */
436
437 static SCM all_threads;
438 static int thread_count;
439
440 typedef struct launch_data {
441 SCM thread;
442 SCM rootcont;
443 scm_t_catch_body body;
444 void *body_data;
445 scm_t_catch_handler handler;
446 void *handler_data;
447 } launch_data;
448
449 static SCM
450 body_bootstrip (launch_data* data)
451 {
452 /* First save the new root continuation */
453 data->rootcont = scm_root->rootcont;
454 return (data->body) (data->body_data);
455 // return scm_call_0 (data->body);
456 }
457
458 static SCM
459 handler_bootstrip (launch_data* data, SCM tag, SCM throw_args)
460 {
461 scm_root->rootcont = data->rootcont;
462 return (data->handler) (data->handler_data, tag, throw_args);
463 // return scm_apply_1 (data->handler, tag, throw_args);
464 }
465
466 static void
467 really_launch (SCM_STACKITEM *base, launch_data *data)
468 {
469 SCM thread = data->thread;
470 scm_copt_thread *t = SCM_THREAD_DATA (thread);
471 init_thread_creatant (thread, base);
472 enter_guile (t);
473
474 data->rootcont = SCM_BOOL_F;
475 t->result =
476 scm_internal_cwdr ((scm_t_catch_body) body_bootstrip,
477 data,
478 (scm_t_catch_handler) handler_bootstrip,
479 data, base);
480 free (data);
481
482 pthread_detach (t->pthread);
483 all_threads = scm_delq (thread, all_threads);
484 t->pthread = -1;
485 thread_count--;
486 leave_guile ();
487 }
488
489 static void *
490 launch_thread (void *p)
491 {
492 really_launch ((SCM_STACKITEM *)&p, (launch_data *)p);
493 return NULL;
494 }
495
496 static SCM
497 create_thread (scm_t_catch_body body, void *body_data,
498 scm_t_catch_handler handler, void *handler_data,
499 SCM protects)
500 {
501 SCM thread;
502
503 /* Make new thread. The first thing the new thread will do is to
504 lock guile_mutex. Thus, we can safely complete its
505 initialization after creating it. While the new thread starts,
506 all its data is protected via all_threads.
507 */
508
509 {
510 pthread_t th;
511 SCM root, old_winds;
512 launch_data *data;
513
514 /* Unwind wind chain. */
515 old_winds = scm_dynwinds;
516 scm_dowinds (SCM_EOL, scm_ilength (scm_root->dynwinds));
517
518 /* Allocate thread locals. */
519 root = scm_make_root (scm_root->handle);
520 data = scm_malloc (sizeof (launch_data));
521
522 /* Make thread. */
523 thread = make_thread (protects);
524 data->thread = thread;
525 data->body = body;
526 data->body_data = body_data;
527 data->handler = handler;
528 data->handler_data = handler_data;
529 pthread_create (&th, NULL, launch_thread, (void *) data);
530 init_thread_creator (thread, th, SCM_ROOT_STATE (root));
531 all_threads = scm_cons (thread, all_threads);
532 thread_count++;
533
534 /* Return to old dynamic context. */
535 scm_dowinds (old_winds, - scm_ilength (old_winds));
536 }
537
538 return thread;
539 }
540
541 SCM
542 scm_call_with_new_thread (SCM argl)
543 #define FUNC_NAME s_call_with_new_thread
544 {
545 SCM thunk, handler;
546
547 /* Check arguments. */
548 {
549 register SCM args = argl;
550 if (!scm_is_pair (args))
551 SCM_WRONG_NUM_ARGS ();
552 thunk = SCM_CAR (args);
553 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)),
554 thunk,
555 SCM_ARG1,
556 s_call_with_new_thread);
557 args = SCM_CDR (args);
558 if (!scm_is_pair (args))
559 SCM_WRONG_NUM_ARGS ();
560 handler = SCM_CAR (args);
561 SCM_ASSERT (scm_is_true (scm_procedure_p (handler)),
562 handler,
563 SCM_ARG2,
564 s_call_with_new_thread);
565 if (!scm_is_null (SCM_CDR (args)))
566 SCM_WRONG_NUM_ARGS ();
567 }
568
569 return create_thread ((scm_t_catch_body) scm_call_0, thunk,
570 (scm_t_catch_handler) scm_apply_1, handler,
571 argl);
572 }
573 #undef FUNC_NAME
574
575 SCM
576 scm_spawn_thread (scm_t_catch_body body, void *body_data,
577 scm_t_catch_handler handler, void *handler_data)
578 {
579 return create_thread (body, body_data, handler, handler_data, SCM_BOOL_F);
580 }
581
582 /*** Mutexes */
583
584 /* We implement our own mutex type since we want them to be 'fair', we
585 want to do fancy things while waiting for them (like running
586 asyncs) and we want to support waiting on many things at once.
587 Also, we might add things that are nice for debugging.
588 */
589
590 typedef struct scm_copt_mutex {
591 /* the thread currently owning the mutex, or SCM_BOOL_F. */
592 SCM owner;
593 /* how much the owner owns us. */
594 int level;
595 /* the threads waiting for this mutex. */
596 SCM waiting;
597 } scm_copt_mutex;
598
599 static SCM
600 mutex_mark (SCM mx)
601 {
602 scm_copt_mutex *m = SCM_MUTEX_DATA (mx);
603 scm_gc_mark (m->owner);
604 return m->waiting;
605 }
606
607 SCM
608 scm_make_mutex ()
609 {
610 SCM mx = scm_make_smob (scm_tc16_mutex);
611 scm_copt_mutex *m = SCM_MUTEX_DATA (mx);
612 m->owner = SCM_BOOL_F;
613 m->level = 0;
614 m->waiting = make_queue ();
615 return mx;
616 }
617
618 SCM
619 scm_lock_mutex (SCM mx)
620 #define FUNC_NAME s_lock_mutex
621 {
622 scm_copt_mutex *m;
623 SCM_ASSERT (SCM_MUTEXP (mx), mx, SCM_ARG1, FUNC_NAME);
624 m = SCM_MUTEX_DATA (mx);
625
626 if (m->owner == SCM_BOOL_F)
627 m->owner = cur_thread;
628 else if (m->owner == cur_thread)
629 m->level++;
630 else
631 {
632 while (m->owner != cur_thread)
633 {
634 enqueue (m->waiting, cur_thread);
635 block ();
636 SCM_ASYNC_TICK;
637 }
638 }
639 return SCM_BOOL_T;
640 }
641 #undef FUNC_NAME
642
643 SCM
644 scm_try_mutex (SCM mx)
645 #define FUNC_NAME s_try_mutex
646 {
647 scm_copt_mutex *m;
648 SCM_ASSERT (SCM_MUTEXP (mx), mx, SCM_ARG1, FUNC_NAME);
649 m = SCM_MUTEX_DATA (mx);
650
651 if (m->owner == SCM_BOOL_F)
652 m->owner = cur_thread;
653 else if (m->owner == cur_thread)
654 m->level++;
655 else
656 return SCM_BOOL_F;
657 return SCM_BOOL_T;
658 }
659 #undef FUNC_NAME
660
661 SCM
662 scm_unlock_mutex (SCM mx)
663 #define FUNC_NAME s_unlock_mutex
664 {
665 scm_copt_mutex *m;
666 SCM_ASSERT (SCM_MUTEXP (mx), mx, SCM_ARG1, FUNC_NAME);
667 m = SCM_MUTEX_DATA (mx);
668
669 if (m->owner != cur_thread)
670 {
671 if (m->owner == SCM_BOOL_F)
672 SCM_MISC_ERROR ("mutex not locked", SCM_EOL);
673 else
674 SCM_MISC_ERROR ("mutex not locked by this thread", SCM_EOL);
675 }
676 else if (m->level > 0)
677 m->level--;
678 else
679 {
680 SCM next = dequeue (m->waiting);
681 if (scm_is_true (next))
682 {
683 m->owner = next;
684 unblock (SCM_THREAD_DATA (next));
685 scm_yield ();
686 }
687 else
688 m->owner = SCM_BOOL_F;
689 }
690 return SCM_BOOL_T;
691 }
692 #undef FUNC_NAME
693
694 /*** Condition variables */
695
696 /* Like mutexes, we implement our own condition variables using the
697 primitives above.
698 */
699
700 /* yeah, we don't need a structure for this, but more things (like a
701 name) will likely follow... */
702
703 typedef struct scm_copt_cond {
704 /* the threads waiting for this condition. */
705 SCM waiting;
706 } scm_copt_cond;
707
708 static SCM
709 cond_mark (SCM cv)
710 {
711 scm_copt_cond *c = SCM_CONDVAR_DATA (cv);
712 return c->waiting;
713 }
714
715 SCM
716 scm_make_condition_variable (void)
717 {
718 SCM cv = scm_make_smob (scm_tc16_condvar);
719 scm_copt_cond *c = SCM_CONDVAR_DATA (cv);
720 c->waiting = make_queue ();
721 return cv;
722 }
723
724 SCM
725 scm_timed_wait_condition_variable (SCM cv, SCM mx, SCM t)
726 #define FUNC_NAME s_wait_condition_variable
727 {
728 scm_copt_cond *c;
729 scm_t_timespec waittime;
730 int res;
731
732 SCM_ASSERT (SCM_CONDVARP (cv),
733 cv,
734 SCM_ARG1,
735 s_wait_condition_variable);
736 SCM_ASSERT (SCM_MUTEXP (mx),
737 mx,
738 SCM_ARG2,
739 s_wait_condition_variable);
740 if (!SCM_UNBNDP (t))
741 {
742 if (scm_is_pair (t))
743 {
744 SCM_VALIDATE_UINT_COPY (3, SCM_CAR(t), waittime.tv_sec);
745 SCM_VALIDATE_UINT_COPY (3, SCM_CDR(t), waittime.tv_nsec);
746 waittime.tv_nsec *= 1000;
747 }
748 else
749 {
750 SCM_VALIDATE_UINT_COPY (3, t, waittime.tv_sec);
751 waittime.tv_nsec = 0;
752 }
753 }
754
755 c = SCM_CONDVAR_DATA (cv);
756
757 enqueue (c->waiting, cur_thread);
758 scm_unlock_mutex (mx);
759 if (SCM_UNBNDP (t))
760 {
761 block ();
762 res = 1;
763 }
764 else
765 res = timed_block (&waittime);
766 scm_lock_mutex (mx);
767 return scm_from_bool (res);
768 }
769 #undef FUNC_NAME
770
771 SCM
772 scm_signal_condition_variable (SCM cv)
773 #define FUNC_NAME s_signal_condition_variable
774 {
775 SCM th;
776 scm_copt_cond *c;
777 SCM_ASSERT (SCM_CONDVARP (cv),
778 cv,
779 SCM_ARG1,
780 s_signal_condition_variable);
781 c = SCM_CONDVAR_DATA (cv);
782 if (scm_is_true (th = dequeue (c->waiting)))
783 unblock (SCM_THREAD_DATA (th));
784 return SCM_BOOL_T;
785 }
786 #undef FUNC_NAME
787
788 SCM
789 scm_broadcast_condition_variable (SCM cv)
790 #define FUNC_NAME s_broadcast_condition_variable
791 {
792 SCM th;
793 scm_copt_cond *c;
794 SCM_ASSERT (SCM_CONDVARP (cv),
795 cv,
796 SCM_ARG1,
797 s_signal_condition_variable);
798 c = SCM_CONDVAR_DATA (cv);
799 while (scm_is_true (th = dequeue (c->waiting)))
800 unblock (SCM_THREAD_DATA (th));
801 return SCM_BOOL_T;
802 }
803 #undef FUNC_NAME
804
805 /*** Initialization */
806
807 void
808 scm_threads_init (SCM_STACKITEM *base)
809 {
810 scm_tc16_thread = scm_make_smob_type ("thread", 0);
811 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (scm_copt_mutex));
812 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
813 sizeof (scm_copt_cond));
814
815 scm_i_switch_counter = SCM_I_THREAD_SWITCH_COUNT;
816
817 fair_mutex_init (&guile_mutex);
818
819 cur_thread = make_thread (SCM_BOOL_F);
820 enter_guile (SCM_THREAD_DATA (cur_thread));
821 /* root is set later from init.c */
822 init_thread_creator (cur_thread, pthread_self(), NULL);
823 init_thread_creatant (cur_thread, base);
824
825 thread_count = 1;
826 scm_gc_register_root (&all_threads);
827 all_threads = scm_cons (cur_thread, SCM_EOL);
828
829 scm_set_smob_mark (scm_tc16_thread, thread_mark);
830 scm_set_smob_print (scm_tc16_thread, thread_print);
831 scm_set_smob_free (scm_tc16_thread, thread_free);
832
833 scm_set_smob_mark (scm_tc16_mutex, mutex_mark);
834
835 scm_set_smob_mark (scm_tc16_condvar, cond_mark);
836 }
837
838 /*** Marking stacks */
839
840 /* XXX - what to do with this? Do we need to handle this for blocked
841 threads as well?
842 */
843 #ifdef __ia64__
844 # define SCM_MARK_BACKING_STORE() do { \
845 ucontext_t ctx; \
846 SCM_STACKITEM * top, * bot; \
847 getcontext (&ctx); \
848 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
849 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
850 / sizeof (SCM_STACKITEM))); \
851 bot = (SCM_STACKITEM *) scm_ia64_register_backing_store_base (); \
852 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
853 scm_mark_locations (bot, top - bot); } while (0)
854 #else
855 # define SCM_MARK_BACKING_STORE()
856 #endif
857
858 void
859 scm_threads_mark_stacks (void)
860 {
861 volatile SCM c;
862 for (c = all_threads; !scm_is_null (c); c = SCM_CDR (c))
863 {
864 scm_copt_thread *t = SCM_THREAD_DATA (SCM_CAR (c));
865 if (t->base == NULL)
866 {
867 /* Not fully initialized yet. */
868 continue;
869 }
870 if (t->top == NULL)
871 {
872 /* Active thread */
873 /* stack_len is long rather than sizet in order to guarantee
874 that &stack_len is long aligned */
875 #if SCM_STACK_GROWS_UP
876 long stack_len = ((SCM_STACKITEM *) (&t) -
877 (SCM_STACKITEM *) thread->base);
878
879 /* Protect from the C stack. This must be the first marking
880 * done because it provides information about what objects
881 * are "in-use" by the C code. "in-use" objects are those
882 * for which the information about length and base address must
883 * remain usable. This requirement is stricter than a liveness
884 * requirement -- in particular, it constrains the implementation
885 * of scm_resizuve.
886 */
887 SCM_FLUSH_REGISTER_WINDOWS;
888 /* This assumes that all registers are saved into the jmp_buf */
889 setjmp (scm_save_regs_gc_mark);
890 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
891 ((size_t) sizeof scm_save_regs_gc_mark
892 / sizeof (SCM_STACKITEM)));
893
894 scm_mark_locations (((size_t) t->base,
895 (sizet) stack_len));
896 #else
897 long stack_len = ((SCM_STACKITEM *) t->base -
898 (SCM_STACKITEM *) (&t));
899
900 /* Protect from the C stack. This must be the first marking
901 * done because it provides information about what objects
902 * are "in-use" by the C code. "in-use" objects are those
903 * for which the information about length and base address must
904 * remain usable. This requirement is stricter than a liveness
905 * requirement -- in particular, it constrains the implementation
906 * of scm_resizuve.
907 */
908 SCM_FLUSH_REGISTER_WINDOWS;
909 /* This assumes that all registers are saved into the jmp_buf */
910 setjmp (scm_save_regs_gc_mark);
911 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
912 ((size_t) sizeof scm_save_regs_gc_mark
913 / sizeof (SCM_STACKITEM)));
914
915 scm_mark_locations ((SCM_STACKITEM *) &t,
916 stack_len);
917 #endif
918 }
919 else
920 {
921 /* Suspended thread */
922 #if SCM_STACK_GROWS_UP
923 long stack_len = t->top - t->base;
924 scm_mark_locations (t->base, stack_len);
925 #else
926 long stack_len = t->base - t->top;
927 scm_mark_locations (t->top, stack_len);
928 #endif
929 scm_mark_locations ((SCM_STACKITEM *) t->regs,
930 ((size_t) sizeof(t->regs)
931 / sizeof (SCM_STACKITEM)));
932 }
933 }
934 }
935
936 /*** Select */
937
938 int
939 scm_internal_select (int nfds,
940 SELECT_TYPE *readfds,
941 SELECT_TYPE *writefds,
942 SELECT_TYPE *exceptfds,
943 struct timeval *timeout)
944 {
945 int res, eno;
946 scm_copt_thread *c = leave_guile ();
947 res = select (nfds, readfds, writefds, exceptfds, timeout);
948 eno = errno;
949 enter_guile (c);
950 SCM_ASYNC_TICK;
951 errno = eno;
952 return res;
953 }
954
955 void
956 scm_init_iselect ()
957 {
958 }
959
960 unsigned long
961 scm_thread_usleep (unsigned long usec)
962 {
963 scm_copt_thread *c = leave_guile ();
964 usleep (usec);
965 enter_guile (c);
966 return 0;
967 }
968
969 unsigned long
970 scm_thread_sleep (unsigned long sec)
971 {
972 unsigned long res;
973 scm_copt_thread *c = leave_guile ();
974 res = sleep (sec);
975 enter_guile (c);
976 return res;
977 }
978
979 /*** Misc */
980
981 SCM
982 scm_current_thread (void)
983 {
984 return cur_thread;
985 }
986
987 SCM
988 scm_all_threads (void)
989 {
990 return all_threads;
991 }
992
993 scm_root_state *
994 scm_i_thread_root (SCM thread)
995 {
996 if (thread == cur_thread)
997 return scm_i_copt_thread_data;
998 else
999 return ((scm_copt_thread *)SCM_THREAD_DATA (thread))->root;
1000 }
1001
1002 SCM
1003 scm_join_thread (SCM thread)
1004 #define FUNC_NAME s_join_thread
1005 {
1006 scm_copt_thread *t;
1007 SCM res;
1008
1009 SCM_VALIDATE_THREAD (1, thread);
1010
1011 t = SCM_THREAD_DATA (thread);
1012 if (t->pthread != -1)
1013 {
1014 scm_copt_thread *c = leave_guile ();
1015 pthread_join (t->pthread, NULL);
1016 enter_guile (c);
1017 }
1018 res = t->result;
1019 t->result = SCM_BOOL_F;
1020 return res;
1021 }
1022 #undef FUNC_NAME
1023
1024 int
1025 scm_c_thread_exited_p (SCM thread)
1026 #define FUNC_NAME s_scm_thread_exited_p
1027 {
1028 scm_copt_thread *t;
1029 SCM_VALIDATE_THREAD (1, thread);
1030 t = SCM_THREAD_DATA (thread);
1031 return t->pthread == -1;
1032 }
1033 #undef FUNC_NAME
1034
1035 /*
1036 Local Variables:
1037 c-file-style: "gnu"
1038 End:
1039 */
1040