add weak sets
[bpt/guile.git] / libguile / weak-set.c
CommitLineData
26b26354
AW
1/* Copyright (C) 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20\f
21#ifdef HAVE_CONFIG_H
22# include <config.h>
23#endif
24
25#include <assert.h>
26
27#include "libguile/_scm.h"
28#include "libguile/hash.h"
29#include "libguile/eval.h"
30#include "libguile/ports.h"
31#include "libguile/bdw-gc.h"
32
33#include "libguile/validate.h"
34#include "libguile/weak-set.h"
35
36
37/* Weak Sets
38
39 This file implements weak sets. One example of a weak set is the
40 symbol table, where you want all instances of the `foo' symbol to map
41 to one object. So when you load a file and it wants a symbol with
42 the characters "foo", you one up in the table, using custom hash and
43 equality predicates. Only if one is not found will you bother to
44 cons one up and intern it.
45
46 Another use case for weak sets is the set of open ports. Guile needs
47 to be able to flush them all when the process exits, but the set
48 shouldn't prevent the GC from collecting the port (and thus closing
49 it).
50
51 Weak sets are implemented using an open-addressed hash table.
52 Basically this means that there is an array of entries, and the item
53 is expected to be found the slot corresponding to its hash code,
54 modulo the length of the array.
55
56 Collisions are handled using linear probing with the Robin Hood
57 technique. See Pedro Celis' paper, "Robin Hood Hashing":
58
59 http://www.cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
60
61 The vector of entries is allocated as an "atomic" piece of memory, so
62 that the GC doesn't trace it. When an item is added to the set, a
63 disappearing link is registered to its location. If the item is
64 collected, then that link will be zeroed out.
65
66 An entry is not just an item, though; the hash code is also stored in
67 the entry. We munge hash codes so that they are never 0. In this
68 way we can detect removed entries (key of zero but nonzero hash
69 code), and can then reshuffle elements as needed to maintain the
70 robin hood ordering.
71
72 Compared to buckets-and-chains hash tables, open addressing has the
73 advantage that it is very cache-friendly. It also uses less memory.
74
75 Implementation-wise, there are two things to note.
76
77 1. We assume that hash codes are evenly distributed across the
78 range of unsigned longs. The actual hash code stored in the
79 entry is left-shifted by 1 bit (losing 1 bit of hash precision),
80 and then or'd with 1. In this way we ensure that the hash field
81 of an occupied entry is nonzero. To map to an index, we
82 right-shift the hash by one, divide by the size, and take the
83 remainder.
84
85 2. Since the "keys" (the objects in the set) are stored in an
86 atomic region with disappearing links, they need to be accessed
87 with the GC alloc lock. `copy_weak_entry' will do that for
88 you. The hash code itself can be read outside the lock,
89 though.
90*/
91
92
93typedef struct {
94 unsigned long hash;
95 scm_t_bits key;
96} scm_t_weak_entry;
97
98
99struct weak_entry_data {
100 scm_t_weak_entry *in;
101 scm_t_weak_entry *out;
102};
103
104static void*
105do_copy_weak_entry (void *data)
106{
107 struct weak_entry_data *e = data;
108
109 e->out->hash = e->in->hash;
110 e->out->key = e->in->key;
111
112 return NULL;
113}
114
115static void
116copy_weak_entry (scm_t_weak_entry *src, scm_t_weak_entry *dst)
117{
118 struct weak_entry_data data;
119
120 data.in = src;
121 data.out = dst;
122
123 GC_call_with_alloc_lock (do_copy_weak_entry, &data);
124}
125
126
127typedef struct {
128 scm_t_weak_entry *entries; /* the data */
129 scm_i_pthread_mutex_t lock; /* the lock */
130 unsigned long size; /* total number of slots. */
131 unsigned long n_items; /* number of items in set */
132 unsigned long lower; /* when to shrink */
133 unsigned long upper; /* when to grow */
134 int size_index; /* index into hashset_size */
135 int min_size_index; /* minimum size_index */
136} scm_t_weak_set;
137
138
139#define SCM_WEAK_SET_P(x) (!SCM_IMP (x) && SCM_TYP7(x) == scm_tc7_weak_set)
140#define SCM_VALIDATE_WEAK_SET(pos, arg) \
141 SCM_MAKE_VALIDATE_MSG (pos, arg, WEAK_SET_P, "weak-set")
142#define SCM_WEAK_SET(x) ((scm_t_weak_set *) SCM_CELL_WORD_1 (x))
143
144
145static unsigned long
146hash_to_index (unsigned long hash, unsigned long size)
147{
148 return (hash >> 1) % size;
149}
150
151static unsigned long
152entry_distance (unsigned long hash, unsigned long k, unsigned long size)
153{
154 unsigned long origin = hash_to_index (hash, size);
155
156 if (k >= origin)
157 return k - origin;
158 else
159 /* The other key was displaced and wrapped around. */
160 return size - origin + k;
161}
162
163static void
164move_weak_entry (scm_t_weak_entry *from, scm_t_weak_entry *to)
165{
166 if (from->hash)
167 {
168 scm_t_weak_entry copy;
169
170 copy_weak_entry (from, &copy);
171 to->hash = copy.hash;
172 to->key = copy.key;
173
174 if (copy.key && SCM_NIMP (SCM_PACK (copy.key)))
175 {
176 GC_unregister_disappearing_link ((GC_PTR) &from->key);
177 SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &to->key,
178 (GC_PTR) to->key);
179 }
180 }
181 else
182 {
183 to->hash = 0;
184 to->key = 0;
185 }
186}
187
188static void
189rob_from_rich (scm_t_weak_set *set, unsigned long k)
190{
191 unsigned long empty, size;
192
193 size = set->size;
194
195 /* If we are to free up slot K in the set, we need room to do so. */
196 assert (set->n_items < size);
197
198 empty = k;
199 do
200 empty = (empty + 1) % size;
201 /* Here we access key outside the lock. Is this a problem? At first
202 glance, I wouldn't think so. */
203 while (set->entries[empty].key);
204
205 do
206 {
207 unsigned long last = empty ? (empty - 1) : (size - 1);
208 move_weak_entry (&set->entries[last], &set->entries[empty]);
209 empty = last;
210 }
211 while (empty != k);
212
213 /* Just for sanity. */
214 set->entries[empty].hash = 0;
215 set->entries[empty].key = 0;
216}
217
218static void
219give_to_poor (scm_t_weak_set *set, unsigned long k)
220{
221 /* Slot K was just freed up; possibly shuffle others down. */
222 unsigned long size = set->size;
223
224 while (1)
225 {
226 unsigned long next = (k + 1) % size;
227 unsigned long hash;
228 scm_t_weak_entry copy;
229
230 hash = set->entries[next].hash;
231
232 if (!hash || hash_to_index (hash, size) == next)
233 break;
234
235 copy_weak_entry (&set->entries[next], &copy);
236
237 if (!copy.key)
238 /* Lost weak reference. */
239 {
240 give_to_poor (set, next);
241 set->n_items--;
242 continue;
243 }
244
245 move_weak_entry (&set->entries[next], &set->entries[k]);
246
247 k = next;
248 }
249
250 /* We have shuffled down any entries that should be shuffled down; now
251 free the end. */
252 set->entries[k].hash = 0;
253 set->entries[k].key = 0;
254}
255
256
257\f
258
259/* Growing or shrinking is triggered when the load factor
260 *
261 * L = N / S (N: number of items in set, S: bucket vector length)
262 *
263 * passes an upper limit of 0.9 or a lower limit of 0.2.
264 *
265 * The implementation stores the upper and lower number of items which
266 * trigger a resize in the hashset object.
267 *
268 * Possible hash set sizes (primes) are stored in the array
269 * hashset_size.
270 */
271
272static unsigned long hashset_size[] = {
273 31, 61, 113, 223, 443, 883, 1759, 3517, 7027, 14051, 28099, 56197, 112363,
274 224717, 449419, 898823, 1797641, 3595271, 7190537, 14381041, 28762081,
275 57524111, 115048217, 230096423
276};
277
278#define HASHSET_SIZE_N (sizeof(hashset_size)/sizeof(unsigned long))
279
280static void
281resize_set (scm_t_weak_set *set)
282{
283 scm_t_weak_entry *old_entries, *new_entries;
284 int i;
285 unsigned long old_size, new_size, old_k;
286
287 old_entries = set->entries;
288 old_size = set->size;
289
290 if (set->n_items < set->lower)
291 {
292 /* rehashing is not triggered when i <= min_size */
293 i = set->size_index;
294 do
295 --i;
296 while (i > set->min_size_index
297 && set->n_items < hashset_size[i] / 4);
298 }
299 else
300 {
301 i = set->size_index + 1;
302 if (i >= HASHSET_SIZE_N)
303 /* The biggest size currently is 230096423, which for a 32-bit
304 machine will occupy 1.5GB of memory at a load of 80%. There
305 is probably something better to do here, but if you have a
306 weak map of that size, you are hosed in any case. */
307 abort ();
308 }
309
310 new_size = hashset_size[i];
311 new_entries = scm_gc_malloc_pointerless (new_size * sizeof(scm_t_weak_entry),
312 "weak set");
313 memset (new_entries, 0, new_size * sizeof(scm_t_weak_entry));
314
315 set->size_index = i;
316 set->size = new_size;
317 if (i <= set->min_size_index)
318 set->lower = 0;
319 else
320 set->lower = new_size / 5;
321 set->upper = 9 * new_size / 10;
322 set->n_items = 0;
323 set->entries = new_entries;
324
325 for (old_k = 0; old_k < old_size; old_k++)
326 {
327 scm_t_weak_entry copy;
328 unsigned long new_k, distance;
329
330 if (!old_entries[old_k].hash)
331 continue;
332
333 copy_weak_entry (&old_entries[old_k], &copy);
334
335 if (!copy.key)
336 continue;
337
338 new_k = hash_to_index (copy.hash, new_size);
339
340 for (distance = 0; ; distance++, new_k = (new_k + 1) % new_size)
341 {
342 unsigned long other_hash = new_entries[new_k].hash;
343
344 if (!other_hash)
345 /* Found an empty entry. */
346 break;
347
348 /* Displace the entry if our distance is less, otherwise keep
349 looking. */
350 if (entry_distance (other_hash, new_k, new_size) < distance)
351 {
352 rob_from_rich (set, new_k);
353 break;
354 }
355 }
356
357 set->n_items++;
358 new_entries[new_k].hash = copy.hash;
359 new_entries[new_k].key = copy.key;
360
361 if (SCM_NIMP (SCM_PACK (copy.key)))
362 SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &new_entries[new_k].key,
363 (GC_PTR) new_entries[new_k].key);
364 }
365}
366
367/* Run after GC via do_vacuum_weak_set, this function runs over the
368 whole table, removing lost weak references, reshuffling the set as it
369 goes. It might resize the set if it reaps enough entries. */
370static void
371vacuum_weak_set (scm_t_weak_set *set)
372{
373 scm_t_weak_entry *entries = set->entries;
374 unsigned long size = set->size;
375 unsigned long k;
376
377 for (k = 0; k < size; k++)
378 {
379 unsigned long hash = entries[k].hash;
380
381 if (hash)
382 {
383 scm_t_weak_entry copy;
384
385 copy_weak_entry (&entries[k], &copy);
386
387 if (!copy.key)
388 /* Lost weak reference; reshuffle. */
389 {
390 give_to_poor (set, k);
391 set->n_items--;
392 }
393 }
394 }
395
396 if (set->n_items < set->lower)
397 resize_set (set);
398}
399
400
401\f
402
403static SCM
404weak_set_lookup (scm_t_weak_set *set, unsigned long hash,
405 scm_t_set_predicate_fn pred, void *closure,
406 SCM dflt)
407{
408 unsigned long k, distance, size;
409 scm_t_weak_entry *entries;
410
411 size = set->size;
412 entries = set->entries;
413
414 hash = (hash << 1) | 0x1;
415 k = hash_to_index (hash, size);
416
417 for (distance = 0; distance < size; distance++, k = (k + 1) % size)
418 {
419 unsigned long other_hash;
420
421 retry:
422 other_hash = entries[k].hash;
423
424 if (!other_hash)
425 /* Not found. */
426 return dflt;
427
428 if (hash == other_hash)
429 {
430 scm_t_weak_entry copy;
431
432 copy_weak_entry (&entries[k], &copy);
433
434 if (!copy.key)
435 /* Lost weak reference; reshuffle. */
436 {
437 give_to_poor (set, k);
438 set->n_items--;
439 goto retry;
440 }
441
442 if (pred (SCM_PACK (copy.key), closure))
443 /* Found. */
444 return SCM_PACK (copy.key);
445 }
446
447 /* If the entry's distance is less, our key is not in the set. */
448 if (entry_distance (other_hash, k, size) < distance)
449 return dflt;
450 }
451
452 /* If we got here, then we were unfortunate enough to loop through the
453 whole set. Shouldn't happen, but hey. */
454 return dflt;
455}
456
457
458static SCM
459weak_set_add_x (scm_t_weak_set *set, unsigned long hash,
460 scm_t_set_predicate_fn pred, void *closure,
461 SCM obj)
462{
463 unsigned long k, distance, size;
464 scm_t_weak_entry *entries;
465
466 size = set->size;
467 entries = set->entries;
468
469 hash = (hash << 1) | 0x1;
470 k = hash_to_index (hash, size);
471
472 for (distance = 0; ; distance++, k = (k + 1) % size)
473 {
474 unsigned long other_hash;
475
476 retry:
477 other_hash = entries[k].hash;
478
479 if (!other_hash)
480 /* Found an empty entry. */
481 break;
482
483 if (other_hash == hash)
484 {
485 scm_t_weak_entry copy;
486
487 copy_weak_entry (&entries[k], &copy);
488
489 if (!copy.key)
490 /* Lost weak reference; reshuffle. */
491 {
492 give_to_poor (set, k);
493 set->n_items--;
494 goto retry;
495 }
496
497 if (pred (SCM_PACK (copy.key), closure))
498 /* Found an entry with this key. */
499 return SCM_PACK (copy.key);
500 }
501
502 if (set->n_items > set->upper)
503 /* Full set, time to resize. */
504 {
505 resize_set (set);
506 return weak_set_add_x (set, hash >> 1, pred, closure, obj);
507 }
508
509 /* Displace the entry if our distance is less, otherwise keep
510 looking. */
511 if (entry_distance (other_hash, k, size) < distance)
512 {
513 rob_from_rich (set, k);
514 break;
515 }
516 }
517
518 set->n_items++;
519 entries[k].hash = hash;
520 entries[k].key = SCM_UNPACK (obj);
521
522 if (SCM_NIMP (obj))
523 SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &entries[k].key,
524 (GC_PTR) SCM2PTR (obj));
525
526 return obj;
527}
528
529
530static void
531weak_set_remove_x (scm_t_weak_set *set, unsigned long hash,
532 scm_t_set_predicate_fn pred, void *closure)
533{
534 unsigned long k, distance, size;
535 scm_t_weak_entry *entries;
536
537 size = set->size;
538 entries = set->entries;
539
540 hash = (hash << 1) | 0x1;
541 k = hash_to_index (hash, size);
542
543 for (distance = 0; distance < size; distance++, k = (k + 1) % size)
544 {
545 unsigned long other_hash;
546
547 retry:
548 other_hash = entries[k].hash;
549
550 if (!other_hash)
551 /* Not found. */
552 return;
553
554 if (other_hash == hash)
555 {
556 scm_t_weak_entry copy;
557
558 copy_weak_entry (&entries[k], &copy);
559
560 if (!copy.key)
561 /* Lost weak reference; reshuffle. */
562 {
563 give_to_poor (set, k);
564 set->n_items--;
565 goto retry;
566 }
567
568 if (pred (SCM_PACK (copy.key), closure))
569 /* Found an entry with this key. */
570 {
571 entries[k].hash = 0;
572 entries[k].key = 0;
573
574 if (SCM_NIMP (SCM_PACK (copy.key)))
575 GC_unregister_disappearing_link ((GC_PTR) &entries[k].key);
576
577 if (--set->n_items < set->lower)
578 resize_set (set);
579 else
580 give_to_poor (set, k);
581
582 return;
583 }
584 }
585
586 /* If the entry's distance is less, our key is not in the set. */
587 if (entry_distance (other_hash, k, size) < distance)
588 return;
589 }
590}
591
592
593\f
594static SCM
595make_weak_set (unsigned long k)
596{
597 scm_t_weak_set *set;
598
599 int i = 0, n = k ? k : 31;
600 while (i + 1 < HASHSET_SIZE_N && n > hashset_size[i])
601 ++i;
602 n = hashset_size[i];
603
604 set = scm_gc_malloc (sizeof (*set), "weak-set");
605 set->entries = scm_gc_malloc_pointerless (n * sizeof(scm_t_weak_entry),
606 "weak-set");
607 memset (set->entries, 0, n * sizeof(scm_t_weak_entry));
608 set->n_items = 0;
609 set->size = n;
610 set->lower = 0;
611 set->upper = 9 * n / 10;
612 set->size_index = i;
613 set->min_size_index = i;
614 scm_i_pthread_mutex_init (&set->lock, NULL);
615
616 return scm_cell (scm_tc7_weak_set, (scm_t_bits)set);
617}
618
619void
620scm_i_weak_set_print (SCM exp, SCM port, scm_print_state *pstate)
621{
622 scm_puts ("#<", port);
623 scm_puts ("weak-set ", port);
624 scm_uintprint (SCM_WEAK_SET (exp)->n_items, 10, port);
625 scm_putc ('/', port);
626 scm_uintprint (SCM_WEAK_SET (exp)->size, 10, port);
627 scm_puts (">", port);
628}
629
630static void
631do_vacuum_weak_set (SCM set)
632{
633 scm_t_weak_set *s;
634
635 s = SCM_WEAK_SET (set);
636
637 if (scm_i_pthread_mutex_trylock (&s->lock) == 0)
638 {
639 vacuum_weak_set (s);
640 scm_i_pthread_mutex_unlock (&s->lock);
641 }
642
643 return;
644}
645
646/* The before-gc C hook only runs if GC_set_start_callback is available,
647 so if not, fall back on a finalizer-based implementation. */
648static int
649weak_gc_callback (void **weak)
650{
651 void *val = weak[0];
652 void (*callback) (SCM) = weak[1];
653
654 if (!val)
655 return 0;
656
657 callback (PTR2SCM (val));
658
659 return 1;
660}
661
662#ifdef HAVE_GC_SET_START_CALLBACK
663static void*
664weak_gc_hook (void *hook_data, void *fn_data, void *data)
665{
666 if (!weak_gc_callback (fn_data))
667 scm_c_hook_remove (&scm_before_gc_c_hook, weak_gc_hook, fn_data);
668
669 return NULL;
670}
671#else
672static void
673weak_gc_finalizer (void *ptr, void *data)
674{
675 if (weak_gc_callback (ptr))
676 GC_REGISTER_FINALIZER_NO_ORDER (ptr, weak_gc_finalizer, data, NULL, NULL);
677}
678#endif
679
680static void
681scm_c_register_weak_gc_callback (SCM obj, void (*callback) (SCM))
682{
683 void **weak = GC_MALLOC_ATOMIC (sizeof (void*) * 2);
684
685 weak[0] = SCM2PTR (obj);
686 weak[1] = (void*)callback;
687 GC_GENERAL_REGISTER_DISAPPEARING_LINK (weak, SCM2PTR (obj));
688
689#ifdef HAVE_GC_SET_START_CALLBACK
690 scm_c_hook_add (&scm_after_gc_c_hook, weak_gc_hook, weak, 0);
691#else
692 GC_REGISTER_FINALIZER_NO_ORDER (weak, weak_gc_finalizer, NULL, NULL, NULL);
693#endif
694}
695
696SCM
697scm_c_make_weak_set (unsigned long k)
698{
699 SCM ret;
700
701 ret = make_weak_set (k);
702
703 scm_c_register_weak_gc_callback (ret, do_vacuum_weak_set);
704
705 return ret;
706}
707
708SCM
709scm_weak_set_p (SCM obj)
710{
711 return scm_from_bool (SCM_WEAK_SET_P (obj));
712}
713
714SCM
715scm_weak_set_clear_x (SCM set)
716{
717 scm_t_weak_set *s = SCM_WEAK_SET (set);
718
719 scm_i_pthread_mutex_lock (&s->lock);
720
721 memset (s->entries, 0, sizeof (scm_t_weak_entry) * s->size);
722 s->n_items = 0;
723
724 scm_i_pthread_mutex_unlock (&s->lock);
725
726 return SCM_UNSPECIFIED;
727}
728
729SCM
730scm_c_weak_set_lookup (SCM set, unsigned long raw_hash,
731 scm_t_set_predicate_fn pred,
732 void *closure, SCM dflt)
733{
734 SCM ret;
735 scm_t_weak_set *s = SCM_WEAK_SET (set);
736
737 scm_i_pthread_mutex_lock (&s->lock);
738
739 ret = weak_set_lookup (s, raw_hash, pred, closure, dflt);
740
741 scm_i_pthread_mutex_unlock (&s->lock);
742
743 return ret;
744}
745
746SCM
747scm_c_weak_set_add_x (SCM set, unsigned long raw_hash,
748 scm_t_set_predicate_fn pred,
749 void *closure, SCM obj)
750{
751 SCM ret;
752 scm_t_weak_set *s = SCM_WEAK_SET (set);
753
754 scm_i_pthread_mutex_lock (&s->lock);
755
756 ret = weak_set_add_x (s, raw_hash, pred, closure, obj);
757
758 scm_i_pthread_mutex_unlock (&s->lock);
759
760 return ret;
761}
762
763void
764scm_c_weak_set_remove_x (SCM set, unsigned long raw_hash,
765 scm_t_set_predicate_fn pred,
766 void *closure)
767{
768 scm_t_weak_set *s = SCM_WEAK_SET (set);
769
770 scm_i_pthread_mutex_lock (&s->lock);
771
772 weak_set_remove_x (s, raw_hash, pred, closure);
773
774 scm_i_pthread_mutex_unlock (&s->lock);
775}
776
777static int
778eq_predicate (SCM x, void *closure)
779{
780 return scm_is_eq (x, PTR2SCM (closure));
781}
782
783SCM
784scm_weak_set_add_x (SCM set, SCM obj)
785{
786 return scm_c_weak_set_add_x (set, scm_ihashq (obj, -1),
787 eq_predicate, SCM2PTR (obj), obj);
788}
789
790SCM
791scm_weak_set_remove_x (SCM set, SCM obj)
792{
793 scm_c_weak_set_remove_x (set, scm_ihashq (obj, -1),
794 eq_predicate, SCM2PTR (obj));
795
796 return SCM_UNSPECIFIED;
797}
798
799SCM
800scm_c_weak_set_fold (scm_t_set_fold_fn proc, void *closure,
801 SCM init, SCM set)
802{
803 scm_t_weak_set *s;
804 scm_t_weak_entry *entries;
805 unsigned long k, size;
806
807 s = SCM_WEAK_SET (set);
808
809 scm_i_pthread_mutex_lock (&s->lock);
810
811 size = s->size;
812 entries = s->entries;
813
814 for (k = 0; k < size; k++)
815 {
816 if (entries[k].hash)
817 {
818 scm_t_weak_entry copy;
819
820 copy_weak_entry (&entries[k], &copy);
821
822 if (copy.key)
823 {
824 /* Release set lock while we call the function. */
825 scm_i_pthread_mutex_unlock (&s->lock);
826 init = proc (closure, SCM_PACK (copy.key), init);
827 scm_i_pthread_mutex_lock (&s->lock);
828 }
829 }
830 }
831
832 scm_i_pthread_mutex_unlock (&s->lock);
833
834 return init;
835}
836
837static SCM
838fold_trampoline (void *closure, SCM item, SCM init)
839{
840 return scm_call_2 (PTR2SCM (closure), item, init);
841}
842
843SCM
844scm_weak_set_fold (SCM proc, SCM init, SCM set)
845{
846 return scm_c_weak_set_fold (fold_trampoline, SCM2PTR (proc), init, set);
847}
848
849static SCM
850for_each_trampoline (void *closure, SCM item, SCM seed)
851{
852 scm_call_1 (PTR2SCM (closure), item);
853 return seed;
854}
855
856SCM
857scm_weak_set_for_each (SCM proc, SCM set)
858{
859 scm_c_weak_set_fold (for_each_trampoline, SCM2PTR (proc), SCM_BOOL_F, set);
860
861 return SCM_UNSPECIFIED;
862}
863
864static SCM
865map_trampoline (void *closure, SCM item, SCM seed)
866{
867 return scm_cons (scm_call_1 (PTR2SCM (closure), item), seed);
868}
869
870SCM
871scm_weak_set_map_to_list (SCM proc, SCM set)
872{
873 return scm_c_weak_set_fold (map_trampoline, SCM2PTR (proc), SCM_EOL, set);
874}
875
876
877void
878scm_init_weak_set ()
879{
880#include "libguile/weak-set.x"
881}
882
883/*
884 Local Variables:
885 c-file-style: "gnu"
886 End:
887*/