a9b1b86e7004afb92df4060a196a6d9dbb50eceb
[bpt/guile.git] / libguile / coop-pthreads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2006 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20
21 #include "libguile/_scm.h" /* config.h, _scm.h, __scm.h should be first */
22
23 #include <unistd.h>
24 #include <stdio.h>
25 #include <assert.h>
26 #include <sys/time.h>
27
28 #include "libguile/validate.h"
29 #include "libguile/coop-pthreads.h"
30 #include "libguile/root.h"
31 #include "libguile/eval.h"
32 #include "libguile/async.h"
33 #include "libguile/ports.h"
34
35 #undef DEBUG
36
37 /*** Queues */
38
39 static SCM
40 make_queue ()
41 {
42 return scm_cons (SCM_EOL, SCM_EOL);
43 }
44
45 static void
46 enqueue (SCM q, SCM t)
47 {
48 SCM c = scm_cons (t, SCM_EOL);
49 if (scm_is_null (SCM_CAR (q)))
50 SCM_SETCAR (q, c);
51 else
52 SCM_SETCDR (SCM_CDR (q), c);
53 SCM_SETCDR (q, c);
54 }
55
56 static SCM
57 dequeue (SCM q)
58 {
59 SCM c = SCM_CAR (q);
60 if (scm_is_null (c))
61 return SCM_BOOL_F;
62 else
63 {
64 SCM_SETCAR (q, SCM_CDR (c));
65 if (scm_is_null (SCM_CAR (q)))
66 SCM_SETCDR (q, SCM_EOL);
67 return SCM_CAR (c);
68 }
69 }
70
71
72 /*** Threads */
73
74 typedef struct scm_copt_thread {
75
76 /* A condition variable for sleeping on.
77 */
78 pthread_cond_t sleep_cond;
79
80 /* A link for waiting queues.
81 */
82 struct scm_copt_thread *next_waiting;
83
84 scm_root_state *root;
85 SCM handle;
86 pthread_t pthread;
87 SCM result;
88
89 SCM joining_threads;
90
91 /* For keeping track of the stack and registers. */
92 SCM_STACKITEM *base;
93 SCM_STACKITEM *top;
94 jmp_buf regs;
95
96 } scm_copt_thread;
97
98 static SCM
99 make_thread (SCM creation_protects)
100 {
101 SCM z;
102 scm_copt_thread *t = scm_gc_malloc (sizeof(*t), "thread");
103 z = scm_cell (scm_tc16_thread, (scm_t_bits)t);
104 t->handle = z;
105 t->result = creation_protects;
106 t->base = NULL;
107 t->joining_threads = make_queue ();
108 pthread_cond_init (&t->sleep_cond, NULL);
109 return z;
110 }
111
112 static void
113 init_thread_creator (SCM thread, pthread_t th, scm_root_state *r)
114 {
115 scm_copt_thread *t = SCM_THREAD_DATA(thread);
116 t->root = r;
117 t->pthread = th;
118 #ifdef DEBUG
119 // fprintf (stderr, "%ld created %ld\n", pthread_self (), th);
120 #endif
121 }
122
123 static void
124 init_thread_creatant (SCM thread, SCM_STACKITEM *base)
125 {
126 scm_copt_thread *t = SCM_THREAD_DATA(thread);
127 t->base = base;
128 t->top = NULL;
129 }
130
131 static SCM
132 thread_mark (SCM obj)
133 {
134 scm_copt_thread *t = SCM_THREAD_DATA (obj);
135 scm_gc_mark (t->result);
136 scm_gc_mark (t->joining_threads);
137 return t->root->handle;
138 }
139
140 static int
141 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
142 {
143 scm_copt_thread *t = SCM_THREAD_DATA (exp);
144 scm_puts ("#<thread ", port);
145 scm_uintprint ((scm_t_bits)t, 16, port);
146 if (t->pthread != -1)
147 {
148 scm_putc (' ', port);
149 scm_intprint (t->pthread, 10, port);
150 }
151 else
152 scm_puts (" (exited)", port);
153 scm_putc ('>', port);
154 return 1;
155 }
156
157 static size_t
158 thread_free (SCM obj)
159 {
160 scm_copt_thread *t = SCM_THREAD_DATA (obj);
161 if (t->pthread != -1)
162 abort ();
163 scm_gc_free (t, sizeof (*t), "thread");
164 return 0;
165 }
166
167 /*** Fair mutexes */
168
169 /* POSIX mutexes are not necessarily fair but since we'd like to use a
170 mutex for scheduling, we build a fair one on top of POSIX.
171 */
172
173 typedef struct fair_mutex {
174 pthread_mutex_t lock;
175 scm_copt_thread *owner;
176 scm_copt_thread *next_waiting, *last_waiting;
177 } fair_mutex;
178
179 static void
180 fair_mutex_init (fair_mutex *m)
181 {
182 pthread_mutex_init (&m->lock, NULL);
183 m->owner = NULL;
184 m->next_waiting = NULL;
185 m->last_waiting = NULL;
186 }
187
188 static void
189 fair_mutex_lock_1 (fair_mutex *m, scm_copt_thread *t)
190 {
191 if (m->owner == NULL)
192 m->owner = t;
193 else
194 {
195 t->next_waiting = NULL;
196 if (m->last_waiting)
197 m->last_waiting->next_waiting = t;
198 else
199 m->next_waiting = t;
200 m->last_waiting = t;
201 do
202 {
203 pthread_cond_wait (&t->sleep_cond, &m->lock);
204 }
205 while (m->owner != t);
206 assert (m->next_waiting == t);
207 m->next_waiting = t->next_waiting;
208 if (m->next_waiting == NULL)
209 m->last_waiting = NULL;
210 }
211 pthread_mutex_unlock (&m->lock);
212 }
213
214 static void
215 fair_mutex_lock (fair_mutex *m, scm_copt_thread *t)
216 {
217 pthread_mutex_lock (&m->lock);
218 fair_mutex_lock_1 (m, t);
219 }
220
221 static void
222 fair_mutex_unlock_1 (fair_mutex *m)
223 {
224 scm_copt_thread *t;
225 pthread_mutex_lock (&m->lock);
226 // fprintf (stderr, "%ld unlocking\n", m->owner->pthread);
227 if ((t = m->next_waiting) != NULL)
228 {
229 m->owner = t;
230 pthread_cond_signal (&t->sleep_cond);
231 }
232 else
233 m->owner = NULL;
234 // fprintf (stderr, "%ld unlocked\n", pthread_self ());
235 }
236
237 static void
238 fair_mutex_unlock (fair_mutex *m)
239 {
240 fair_mutex_unlock_1 (m);
241 pthread_mutex_unlock (&m->lock);
242 }
243
244 /* Temporarily give up the mutex. This function makes sure that we
245 are on the wait queue before starting the next thread. Otherwise
246 the next thread might preempt us and we will have a hard time
247 getting on the wait queue.
248 */
249 #if 0
250 static void
251 fair_mutex_yield (fair_mutex *m)
252 {
253 scm_copt_thread *self, *next;
254
255 pthread_mutex_lock (&m->lock);
256
257 /* get next thread
258 */
259 if ((next = m->next_waiting) == NULL)
260 {
261 /* No use giving it up. */
262 pthread_mutex_unlock (&m->lock);
263 return;
264 }
265
266 /* put us on queue
267 */
268 self = m->owner;
269 self->next_waiting = NULL;
270 if (m->last_waiting)
271 m->last_waiting->next_waiting = self;
272 else
273 m->next_waiting = self;
274 m->last_waiting = self;
275
276 /* wake up next thread
277 */
278
279 m->owner = next;
280 pthread_cond_signal (&next->sleep_cond);
281
282 /* wait for mutex
283 */
284 do
285 {
286 pthread_cond_wait (&self->sleep_cond, &m->lock);
287 }
288 while (m->owner != self);
289 assert (m->next_waiting == self);
290 m->next_waiting = self->next_waiting;
291 if (m->next_waiting == NULL)
292 m->last_waiting = NULL;
293
294 pthread_mutex_unlock (&m->lock);
295 }
296 #else
297 static void
298 fair_mutex_yield (fair_mutex *m)
299 {
300 scm_copt_thread *self = m->owner;
301 fair_mutex_unlock_1 (m);
302 fair_mutex_lock_1 (m, self);
303 }
304 #endif
305
306 static void
307 fair_cond_wait (pthread_cond_t *c, fair_mutex *m)
308 {
309 scm_copt_thread *t = m->owner;
310 fair_mutex_unlock_1 (m);
311 pthread_cond_wait (c, &m->lock);
312 fair_mutex_lock_1 (m, t);
313 }
314
315 /* Return 1 when the mutex was signalled and 0 when not. */
316 static int
317 fair_cond_timedwait (pthread_cond_t *c, fair_mutex *m, scm_t_timespec *at)
318 {
319 int res;
320 scm_copt_thread *t = m->owner;
321 fair_mutex_unlock_1 (m);
322 res = pthread_cond_timedwait (c, &m->lock, at); /* XXX - signals? */
323 fair_mutex_lock_1 (m, t);
324 return res == 0;
325 }
326
327 /*** Scheduling */
328
329 /* When a thread wants to execute Guile functions, it locks the
330 guile_mutex.
331 */
332
333 static fair_mutex guile_mutex;
334
335 static SCM cur_thread;
336 void *scm_i_copt_thread_data;
337
338 void
339 scm_i_copt_set_thread_data (void *data)
340 {
341 scm_copt_thread *t = SCM_THREAD_DATA (cur_thread);
342 scm_i_copt_thread_data = data;
343 t->root = (scm_root_state *)data;
344 }
345
346 static void
347 resume (scm_copt_thread *t)
348 {
349 cur_thread = t->handle;
350 scm_i_copt_thread_data = t->root;
351 t->top = NULL;
352 }
353
354 static void
355 enter_guile (scm_copt_thread *t)
356 {
357 fair_mutex_lock (&guile_mutex, t);
358 resume (t);
359 }
360
361 static scm_copt_thread *
362 suspend ()
363 {
364 SCM cur = cur_thread;
365 scm_copt_thread *c = SCM_THREAD_DATA (cur);
366
367 /* record top of stack for the GC */
368 c->top = (SCM_STACKITEM *)&c;
369 /* save registers. */
370 SCM_FLUSH_REGISTER_WINDOWS;
371 setjmp (c->regs);
372
373 return c;
374 }
375
376 static scm_copt_thread *
377 leave_guile ()
378 {
379 scm_copt_thread *c = suspend ();
380 fair_mutex_unlock (&guile_mutex);
381 return c;
382 }
383
384 int scm_i_switch_counter;
385
386 SCM
387 scm_yield ()
388 {
389 /* Testing guile_mutex.next_waiting without locking guile_mutex.lock
390 is OK since the outcome is not critical. Even when it changes
391 after the test, we do the right thing.
392 */
393 if (guile_mutex.next_waiting)
394 {
395 scm_copt_thread *t = suspend ();
396 fair_mutex_yield (&guile_mutex);
397 resume (t);
398 }
399 return SCM_BOOL_T;
400 }
401
402 /* Put the current thread to sleep until it is explicitely unblocked.
403 */
404 static void
405 block ()
406 {
407 scm_copt_thread *t = suspend ();
408 fair_cond_wait (&t->sleep_cond, &guile_mutex);
409 resume (t);
410 }
411
412 /* Put the current thread to sleep until it is explicitely unblocked
413 or until a signal arrives or until time AT (absolute time) is
414 reached. Return 1 when it has been unblocked; 0 otherwise.
415 */
416 static int
417 timed_block (scm_t_timespec *at)
418 {
419 int res;
420 scm_copt_thread *t = suspend ();
421 res = fair_cond_timedwait (&t->sleep_cond, &guile_mutex, at);
422 resume (t);
423 return res;
424 }
425
426 /* Unblock a sleeping thread.
427 */
428 static void
429 unblock (scm_copt_thread *t)
430 {
431 pthread_cond_signal (&t->sleep_cond);
432 }
433
434 /*** Thread creation */
435
436 static SCM all_threads;
437 static int thread_count;
438
439 typedef struct launch_data {
440 SCM thread;
441 SCM rootcont;
442 scm_t_catch_body body;
443 void *body_data;
444 scm_t_catch_handler handler;
445 void *handler_data;
446 } launch_data;
447
448 static SCM
449 body_bootstrip (launch_data* data)
450 {
451 /* First save the new root continuation */
452 data->rootcont = scm_root->rootcont;
453 return (data->body) (data->body_data);
454 // return scm_call_0 (data->body);
455 }
456
457 static SCM
458 handler_bootstrip (launch_data* data, SCM tag, SCM throw_args)
459 {
460 scm_root->rootcont = data->rootcont;
461 return (data->handler) (data->handler_data, tag, throw_args);
462 // return scm_apply_1 (data->handler, tag, throw_args);
463 }
464
465 static void
466 really_launch (SCM_STACKITEM *base, launch_data *data)
467 {
468 SCM thread = data->thread;
469 scm_copt_thread *t = SCM_THREAD_DATA (thread);
470 init_thread_creatant (thread, base);
471 enter_guile (t);
472
473 data->rootcont = SCM_BOOL_F;
474 t->result =
475 scm_internal_cwdr ((scm_t_catch_body) body_bootstrip,
476 data,
477 (scm_t_catch_handler) handler_bootstrip,
478 data, base);
479 free (data);
480
481 pthread_detach (t->pthread);
482 all_threads = scm_delq (thread, all_threads);
483 t->pthread = -1;
484 thread_count--;
485 leave_guile ();
486 }
487
488 static void *
489 launch_thread (void *p)
490 {
491 really_launch ((SCM_STACKITEM *)&p, (launch_data *)p);
492 return NULL;
493 }
494
495 static SCM
496 create_thread (scm_t_catch_body body, void *body_data,
497 scm_t_catch_handler handler, void *handler_data,
498 SCM protects)
499 {
500 SCM thread;
501
502 /* Make new thread. The first thing the new thread will do is to
503 lock guile_mutex. Thus, we can safely complete its
504 initialization after creating it. While the new thread starts,
505 all its data is protected via all_threads.
506 */
507
508 {
509 pthread_t th;
510 SCM root, old_winds;
511 launch_data *data;
512
513 /* Unwind wind chain. */
514 old_winds = scm_dynwinds;
515 scm_dowinds (SCM_EOL, scm_ilength (scm_root->dynwinds));
516
517 /* Allocate thread locals. */
518 root = scm_make_root (scm_root->handle);
519 data = scm_malloc (sizeof (launch_data));
520
521 /* Make thread. */
522 thread = make_thread (protects);
523 data->thread = thread;
524 data->body = body;
525 data->body_data = body_data;
526 data->handler = handler;
527 data->handler_data = handler_data;
528 pthread_create (&th, NULL, launch_thread, (void *) data);
529 init_thread_creator (thread, th, SCM_ROOT_STATE (root));
530 all_threads = scm_cons (thread, all_threads);
531 thread_count++;
532
533 /* Return to old dynamic context. */
534 scm_dowinds (old_winds, - scm_ilength (old_winds));
535 }
536
537 return thread;
538 }
539
540 SCM
541 scm_call_with_new_thread (SCM argl)
542 #define FUNC_NAME s_call_with_new_thread
543 {
544 SCM thunk, handler;
545
546 /* Check arguments. */
547 {
548 register SCM args = argl;
549 if (!scm_is_pair (args))
550 SCM_WRONG_NUM_ARGS ();
551 thunk = SCM_CAR (args);
552 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)),
553 thunk,
554 SCM_ARG1,
555 s_call_with_new_thread);
556 args = SCM_CDR (args);
557 if (!scm_is_pair (args))
558 SCM_WRONG_NUM_ARGS ();
559 handler = SCM_CAR (args);
560 SCM_ASSERT (scm_is_true (scm_procedure_p (handler)),
561 handler,
562 SCM_ARG2,
563 s_call_with_new_thread);
564 if (!scm_is_null (SCM_CDR (args)))
565 SCM_WRONG_NUM_ARGS ();
566 }
567
568 return create_thread ((scm_t_catch_body) scm_call_0, thunk,
569 (scm_t_catch_handler) scm_apply_1, handler,
570 argl);
571 }
572 #undef FUNC_NAME
573
574 SCM
575 scm_spawn_thread (scm_t_catch_body body, void *body_data,
576 scm_t_catch_handler handler, void *handler_data)
577 {
578 return create_thread (body, body_data, handler, handler_data, SCM_BOOL_F);
579 }
580
581 /*** Mutexes */
582
583 /* We implement our own mutex type since we want them to be 'fair', we
584 want to do fancy things while waiting for them (like running
585 asyncs) and we want to support waiting on many things at once.
586 Also, we might add things that are nice for debugging.
587 */
588
589 typedef struct scm_copt_mutex {
590 /* the thread currently owning the mutex, or SCM_BOOL_F. */
591 SCM owner;
592 /* how much the owner owns us. */
593 int level;
594 /* the threads waiting for this mutex. */
595 SCM waiting;
596 } scm_copt_mutex;
597
598 static SCM
599 mutex_mark (SCM mx)
600 {
601 scm_copt_mutex *m = SCM_MUTEX_DATA (mx);
602 scm_gc_mark (m->owner);
603 return m->waiting;
604 }
605
606 SCM
607 scm_make_mutex ()
608 {
609 SCM mx = scm_make_smob (scm_tc16_mutex);
610 scm_copt_mutex *m = SCM_MUTEX_DATA (mx);
611 m->owner = SCM_BOOL_F;
612 m->level = 0;
613 m->waiting = make_queue ();
614 return mx;
615 }
616
617 SCM
618 scm_lock_mutex (SCM mx)
619 #define FUNC_NAME s_lock_mutex
620 {
621 scm_copt_mutex *m;
622 SCM_ASSERT (SCM_MUTEXP (mx), mx, SCM_ARG1, FUNC_NAME);
623 m = SCM_MUTEX_DATA (mx);
624
625 if (m->owner == SCM_BOOL_F)
626 m->owner = cur_thread;
627 else if (m->owner == cur_thread)
628 m->level++;
629 else
630 {
631 while (m->owner != cur_thread)
632 {
633 enqueue (m->waiting, cur_thread);
634 block ();
635 SCM_ASYNC_TICK;
636 }
637 }
638 return SCM_BOOL_T;
639 }
640 #undef FUNC_NAME
641
642 SCM
643 scm_try_mutex (SCM mx)
644 #define FUNC_NAME s_try_mutex
645 {
646 scm_copt_mutex *m;
647 SCM_ASSERT (SCM_MUTEXP (mx), mx, SCM_ARG1, FUNC_NAME);
648 m = SCM_MUTEX_DATA (mx);
649
650 if (m->owner == SCM_BOOL_F)
651 m->owner = cur_thread;
652 else if (m->owner == cur_thread)
653 m->level++;
654 else
655 return SCM_BOOL_F;
656 return SCM_BOOL_T;
657 }
658 #undef FUNC_NAME
659
660 SCM
661 scm_unlock_mutex (SCM mx)
662 #define FUNC_NAME s_unlock_mutex
663 {
664 scm_copt_mutex *m;
665 SCM_ASSERT (SCM_MUTEXP (mx), mx, SCM_ARG1, FUNC_NAME);
666 m = SCM_MUTEX_DATA (mx);
667
668 if (m->owner != cur_thread)
669 {
670 if (m->owner == SCM_BOOL_F)
671 SCM_MISC_ERROR ("mutex not locked", SCM_EOL);
672 else
673 SCM_MISC_ERROR ("mutex not locked by this thread", SCM_EOL);
674 }
675 else if (m->level > 0)
676 m->level--;
677 else
678 {
679 SCM next = dequeue (m->waiting);
680 if (scm_is_true (next))
681 {
682 m->owner = next;
683 unblock (SCM_THREAD_DATA (next));
684 scm_yield ();
685 }
686 else
687 m->owner = SCM_BOOL_F;
688 }
689 return SCM_BOOL_T;
690 }
691 #undef FUNC_NAME
692
693 /*** Condition variables */
694
695 /* Like mutexes, we implement our own condition variables using the
696 primitives above.
697 */
698
699 /* yeah, we don't need a structure for this, but more things (like a
700 name) will likely follow... */
701
702 typedef struct scm_copt_cond {
703 /* the threads waiting for this condition. */
704 SCM waiting;
705 } scm_copt_cond;
706
707 static SCM
708 cond_mark (SCM cv)
709 {
710 scm_copt_cond *c = SCM_CONDVAR_DATA (cv);
711 return c->waiting;
712 }
713
714 SCM
715 scm_make_condition_variable (void)
716 {
717 SCM cv = scm_make_smob (scm_tc16_condvar);
718 scm_copt_cond *c = SCM_CONDVAR_DATA (cv);
719 c->waiting = make_queue ();
720 return cv;
721 }
722
723 SCM
724 scm_timed_wait_condition_variable (SCM cv, SCM mx, SCM t)
725 #define FUNC_NAME s_wait_condition_variable
726 {
727 scm_copt_cond *c;
728 scm_t_timespec waittime;
729 int res;
730
731 SCM_ASSERT (SCM_CONDVARP (cv),
732 cv,
733 SCM_ARG1,
734 s_wait_condition_variable);
735 SCM_ASSERT (SCM_MUTEXP (mx),
736 mx,
737 SCM_ARG2,
738 s_wait_condition_variable);
739 if (!SCM_UNBNDP (t))
740 {
741 if (scm_is_pair (t))
742 {
743 SCM_VALIDATE_UINT_COPY (3, SCM_CAR(t), waittime.tv_sec);
744 SCM_VALIDATE_UINT_COPY (3, SCM_CDR(t), waittime.tv_nsec);
745 waittime.tv_nsec *= 1000;
746 }
747 else
748 {
749 SCM_VALIDATE_UINT_COPY (3, t, waittime.tv_sec);
750 waittime.tv_nsec = 0;
751 }
752 }
753
754 c = SCM_CONDVAR_DATA (cv);
755
756 enqueue (c->waiting, cur_thread);
757 scm_unlock_mutex (mx);
758 if (SCM_UNBNDP (t))
759 {
760 block ();
761 res = 1;
762 }
763 else
764 res = timed_block (&waittime);
765 scm_lock_mutex (mx);
766 return scm_from_bool (res);
767 }
768 #undef FUNC_NAME
769
770 SCM
771 scm_signal_condition_variable (SCM cv)
772 #define FUNC_NAME s_signal_condition_variable
773 {
774 SCM th;
775 scm_copt_cond *c;
776 SCM_ASSERT (SCM_CONDVARP (cv),
777 cv,
778 SCM_ARG1,
779 s_signal_condition_variable);
780 c = SCM_CONDVAR_DATA (cv);
781 if (scm_is_true (th = dequeue (c->waiting)))
782 unblock (SCM_THREAD_DATA (th));
783 return SCM_BOOL_T;
784 }
785 #undef FUNC_NAME
786
787 SCM
788 scm_broadcast_condition_variable (SCM cv)
789 #define FUNC_NAME s_broadcast_condition_variable
790 {
791 SCM th;
792 scm_copt_cond *c;
793 SCM_ASSERT (SCM_CONDVARP (cv),
794 cv,
795 SCM_ARG1,
796 s_signal_condition_variable);
797 c = SCM_CONDVAR_DATA (cv);
798 while (scm_is_true (th = dequeue (c->waiting)))
799 unblock (SCM_THREAD_DATA (th));
800 return SCM_BOOL_T;
801 }
802 #undef FUNC_NAME
803
804 /*** Initialization */
805
806 void
807 scm_threads_init (SCM_STACKITEM *base)
808 {
809 scm_tc16_thread = scm_make_smob_type ("thread", 0);
810 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (scm_copt_mutex));
811 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
812 sizeof (scm_copt_cond));
813
814 scm_i_switch_counter = SCM_I_THREAD_SWITCH_COUNT;
815
816 fair_mutex_init (&guile_mutex);
817
818 cur_thread = make_thread (SCM_BOOL_F);
819 enter_guile (SCM_THREAD_DATA (cur_thread));
820 /* root is set later from init.c */
821 init_thread_creator (cur_thread, pthread_self(), NULL);
822 init_thread_creatant (cur_thread, base);
823
824 thread_count = 1;
825 scm_gc_register_root (&all_threads);
826 all_threads = scm_cons (cur_thread, SCM_EOL);
827
828 scm_set_smob_mark (scm_tc16_thread, thread_mark);
829 scm_set_smob_print (scm_tc16_thread, thread_print);
830 scm_set_smob_free (scm_tc16_thread, thread_free);
831
832 scm_set_smob_mark (scm_tc16_mutex, mutex_mark);
833
834 scm_set_smob_mark (scm_tc16_condvar, cond_mark);
835 }
836
837 /*** Marking stacks */
838
839 /* XXX - what to do with this? Do we need to handle this for blocked
840 threads as well?
841 */
842 #ifdef __ia64__
843 # define SCM_MARK_BACKING_STORE() do { \
844 ucontext_t ctx; \
845 SCM_STACKITEM * top, * bot; \
846 getcontext (&ctx); \
847 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
848 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
849 / sizeof (SCM_STACKITEM))); \
850 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
851 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
852 scm_mark_locations (bot, top - bot); } while (0)
853 #else
854 # define SCM_MARK_BACKING_STORE()
855 #endif
856
857 void
858 scm_threads_mark_stacks (void)
859 {
860 volatile SCM c;
861 for (c = all_threads; !scm_is_null (c); c = SCM_CDR (c))
862 {
863 scm_copt_thread *t = SCM_THREAD_DATA (SCM_CAR (c));
864 if (t->base == NULL)
865 {
866 /* Not fully initialized yet. */
867 continue;
868 }
869 if (t->top == NULL)
870 {
871 /* Active thread */
872 /* stack_len is long rather than sizet in order to guarantee
873 that &stack_len is long aligned */
874 #if SCM_STACK_GROWS_UP
875 long stack_len = ((SCM_STACKITEM *) (&t) -
876 (SCM_STACKITEM *) thread->base);
877
878 /* Protect from the C stack. This must be the first marking
879 * done because it provides information about what objects
880 * are "in-use" by the C code. "in-use" objects are those
881 * for which the information about length and base address must
882 * remain usable. This requirement is stricter than a liveness
883 * requirement -- in particular, it constrains the implementation
884 * of scm_resizuve.
885 */
886 SCM_FLUSH_REGISTER_WINDOWS;
887 /* This assumes that all registers are saved into the jmp_buf */
888 setjmp (scm_save_regs_gc_mark);
889 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
890 ((size_t) sizeof scm_save_regs_gc_mark
891 / sizeof (SCM_STACKITEM)));
892
893 scm_mark_locations (((size_t) t->base,
894 (sizet) stack_len));
895 #else
896 long stack_len = ((SCM_STACKITEM *) t->base -
897 (SCM_STACKITEM *) (&t));
898
899 /* Protect from the C stack. This must be the first marking
900 * done because it provides information about what objects
901 * are "in-use" by the C code. "in-use" objects are those
902 * for which the information about length and base address must
903 * remain usable. This requirement is stricter than a liveness
904 * requirement -- in particular, it constrains the implementation
905 * of scm_resizuve.
906 */
907 SCM_FLUSH_REGISTER_WINDOWS;
908 /* This assumes that all registers are saved into the jmp_buf */
909 setjmp (scm_save_regs_gc_mark);
910 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
911 ((size_t) sizeof scm_save_regs_gc_mark
912 / sizeof (SCM_STACKITEM)));
913
914 scm_mark_locations ((SCM_STACKITEM *) &t,
915 stack_len);
916 #endif
917 }
918 else
919 {
920 /* Suspended thread */
921 #if SCM_STACK_GROWS_UP
922 long stack_len = t->top - t->base;
923 scm_mark_locations (t->base, stack_len);
924 #else
925 long stack_len = t->base - t->top;
926 scm_mark_locations (t->top, stack_len);
927 #endif
928 scm_mark_locations ((SCM_STACKITEM *) t->regs,
929 ((size_t) sizeof(t->regs)
930 / sizeof (SCM_STACKITEM)));
931 }
932 }
933 }
934
935 /*** Select */
936
937 int
938 scm_internal_select (int nfds,
939 SELECT_TYPE *readfds,
940 SELECT_TYPE *writefds,
941 SELECT_TYPE *exceptfds,
942 struct timeval *timeout)
943 {
944 int res, eno;
945 scm_copt_thread *c = leave_guile ();
946 res = select (nfds, readfds, writefds, exceptfds, timeout);
947 eno = errno;
948 enter_guile (c);
949 SCM_ASYNC_TICK;
950 errno = eno;
951 return res;
952 }
953
954 void
955 scm_init_iselect ()
956 {
957 }
958
959 unsigned long
960 scm_thread_usleep (unsigned long usec)
961 {
962 scm_copt_thread *c = leave_guile ();
963 usleep (usec);
964 enter_guile (c);
965 return 0;
966 }
967
968 unsigned long
969 scm_thread_sleep (unsigned long sec)
970 {
971 unsigned long res;
972 scm_copt_thread *c = leave_guile ();
973 res = sleep (sec);
974 enter_guile (c);
975 return res;
976 }
977
978 /*** Misc */
979
980 SCM
981 scm_current_thread (void)
982 {
983 return cur_thread;
984 }
985
986 SCM
987 scm_all_threads (void)
988 {
989 return all_threads;
990 }
991
992 scm_root_state *
993 scm_i_thread_root (SCM thread)
994 {
995 if (thread == cur_thread)
996 return scm_i_copt_thread_data;
997 else
998 return ((scm_copt_thread *)SCM_THREAD_DATA (thread))->root;
999 }
1000
1001 SCM
1002 scm_join_thread (SCM thread)
1003 #define FUNC_NAME s_join_thread
1004 {
1005 scm_copt_thread *t;
1006 SCM res;
1007
1008 SCM_VALIDATE_THREAD (1, thread);
1009
1010 t = SCM_THREAD_DATA (thread);
1011 if (t->pthread != -1)
1012 {
1013 scm_copt_thread *c = leave_guile ();
1014 pthread_join (t->pthread, NULL);
1015 enter_guile (c);
1016 }
1017 res = t->result;
1018 t->result = SCM_BOOL_F;
1019 return res;
1020 }
1021 #undef FUNC_NAME
1022
1023 int
1024 scm_c_thread_exited_p (SCM thread)
1025 #define FUNC_NAME s_scm_thread_exited_p
1026 {
1027 scm_copt_thread *t;
1028 SCM_VALIDATE_THREAD (1, thread);
1029 t = SCM_THREAD_DATA (thread);
1030 return t->pthread == -1;
1031 }
1032 #undef FUNC_NAME
1033
1034 /*
1035 Local Variables:
1036 c-file-style: "gnu"
1037 End:
1038 */
1039