Merge remote-tracking branch 'origin/stable-2.0'
[bpt/guile.git] / libguile / weak-set.c
CommitLineData
26b26354
AW
1/* Copyright (C) 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20\f
21#ifdef HAVE_CONFIG_H
22# include <config.h>
23#endif
24
25#include <assert.h>
26
27#include "libguile/_scm.h"
28#include "libguile/hash.h"
29#include "libguile/eval.h"
30#include "libguile/ports.h"
31#include "libguile/bdw-gc.h"
32
33#include "libguile/validate.h"
34#include "libguile/weak-set.h"
35
36
37/* Weak Sets
38
39 This file implements weak sets. One example of a weak set is the
40 symbol table, where you want all instances of the `foo' symbol to map
41 to one object. So when you load a file and it wants a symbol with
42 the characters "foo", you one up in the table, using custom hash and
43 equality predicates. Only if one is not found will you bother to
44 cons one up and intern it.
45
46 Another use case for weak sets is the set of open ports. Guile needs
47 to be able to flush them all when the process exits, but the set
48 shouldn't prevent the GC from collecting the port (and thus closing
49 it).
50
51 Weak sets are implemented using an open-addressed hash table.
52 Basically this means that there is an array of entries, and the item
53 is expected to be found the slot corresponding to its hash code,
54 modulo the length of the array.
55
56 Collisions are handled using linear probing with the Robin Hood
57 technique. See Pedro Celis' paper, "Robin Hood Hashing":
58
59 http://www.cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
60
61 The vector of entries is allocated as an "atomic" piece of memory, so
62 that the GC doesn't trace it. When an item is added to the set, a
63 disappearing link is registered to its location. If the item is
64 collected, then that link will be zeroed out.
65
66 An entry is not just an item, though; the hash code is also stored in
67 the entry. We munge hash codes so that they are never 0. In this
68 way we can detect removed entries (key of zero but nonzero hash
69 code), and can then reshuffle elements as needed to maintain the
70 robin hood ordering.
71
72 Compared to buckets-and-chains hash tables, open addressing has the
73 advantage that it is very cache-friendly. It also uses less memory.
74
75 Implementation-wise, there are two things to note.
76
77 1. We assume that hash codes are evenly distributed across the
78 range of unsigned longs. The actual hash code stored in the
79 entry is left-shifted by 1 bit (losing 1 bit of hash precision),
80 and then or'd with 1. In this way we ensure that the hash field
81 of an occupied entry is nonzero. To map to an index, we
82 right-shift the hash by one, divide by the size, and take the
83 remainder.
84
85 2. Since the "keys" (the objects in the set) are stored in an
86 atomic region with disappearing links, they need to be accessed
87 with the GC alloc lock. `copy_weak_entry' will do that for
88 you. The hash code itself can be read outside the lock,
89 though.
90*/
91
92
93typedef struct {
94 unsigned long hash;
95 scm_t_bits key;
96} scm_t_weak_entry;
97
98
99struct weak_entry_data {
100 scm_t_weak_entry *in;
101 scm_t_weak_entry *out;
102};
103
104static void*
105do_copy_weak_entry (void *data)
106{
107 struct weak_entry_data *e = data;
108
109 e->out->hash = e->in->hash;
110 e->out->key = e->in->key;
111
112 return NULL;
113}
114
115static void
116copy_weak_entry (scm_t_weak_entry *src, scm_t_weak_entry *dst)
117{
118 struct weak_entry_data data;
119
120 data.in = src;
121 data.out = dst;
122
123 GC_call_with_alloc_lock (do_copy_weak_entry, &data);
124}
125
126
127typedef struct {
128 scm_t_weak_entry *entries; /* the data */
129 scm_i_pthread_mutex_t lock; /* the lock */
130 unsigned long size; /* total number of slots. */
131 unsigned long n_items; /* number of items in set */
132 unsigned long lower; /* when to shrink */
133 unsigned long upper; /* when to grow */
134 int size_index; /* index into hashset_size */
135 int min_size_index; /* minimum size_index */
136} scm_t_weak_set;
137
138
dc7da0be 139#define SCM_WEAK_SET_P(x) (SCM_HAS_TYP7 (x, scm_tc7_weak_set))
26b26354
AW
140#define SCM_VALIDATE_WEAK_SET(pos, arg) \
141 SCM_MAKE_VALIDATE_MSG (pos, arg, WEAK_SET_P, "weak-set")
142#define SCM_WEAK_SET(x) ((scm_t_weak_set *) SCM_CELL_WORD_1 (x))
143
144
145static unsigned long
146hash_to_index (unsigned long hash, unsigned long size)
147{
148 return (hash >> 1) % size;
149}
150
151static unsigned long
152entry_distance (unsigned long hash, unsigned long k, unsigned long size)
153{
154 unsigned long origin = hash_to_index (hash, size);
155
156 if (k >= origin)
157 return k - origin;
158 else
159 /* The other key was displaced and wrapped around. */
160 return size - origin + k;
161}
162
163static void
164move_weak_entry (scm_t_weak_entry *from, scm_t_weak_entry *to)
165{
166 if (from->hash)
167 {
168 scm_t_weak_entry copy;
169
170 copy_weak_entry (from, &copy);
171 to->hash = copy.hash;
172 to->key = copy.key;
173
8c5bb729 174 if (copy.key && SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
26b26354 175 {
3dc9f419
AW
176#ifdef HAVE_GC_MOVE_DISAPPEARING_LINK
177 GC_move_disappearing_link ((GC_PTR) &from->key, (GC_PTR) &to->key);
178#else
26b26354
AW
179 GC_unregister_disappearing_link ((GC_PTR) &from->key);
180 SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &to->key,
181 (GC_PTR) to->key);
3dc9f419 182#endif
26b26354
AW
183 }
184 }
185 else
186 {
187 to->hash = 0;
188 to->key = 0;
189 }
190}
191
192static void
193rob_from_rich (scm_t_weak_set *set, unsigned long k)
194{
195 unsigned long empty, size;
196
197 size = set->size;
198
199 /* If we are to free up slot K in the set, we need room to do so. */
200 assert (set->n_items < size);
201
202 empty = k;
203 do
204 empty = (empty + 1) % size;
205 /* Here we access key outside the lock. Is this a problem? At first
206 glance, I wouldn't think so. */
207 while (set->entries[empty].key);
208
209 do
210 {
211 unsigned long last = empty ? (empty - 1) : (size - 1);
212 move_weak_entry (&set->entries[last], &set->entries[empty]);
213 empty = last;
214 }
215 while (empty != k);
216
217 /* Just for sanity. */
218 set->entries[empty].hash = 0;
219 set->entries[empty].key = 0;
220}
221
222static void
223give_to_poor (scm_t_weak_set *set, unsigned long k)
224{
225 /* Slot K was just freed up; possibly shuffle others down. */
226 unsigned long size = set->size;
227
228 while (1)
229 {
230 unsigned long next = (k + 1) % size;
231 unsigned long hash;
232 scm_t_weak_entry copy;
233
234 hash = set->entries[next].hash;
235
236 if (!hash || hash_to_index (hash, size) == next)
237 break;
238
239 copy_weak_entry (&set->entries[next], &copy);
240
241 if (!copy.key)
242 /* Lost weak reference. */
243 {
244 give_to_poor (set, next);
245 set->n_items--;
246 continue;
247 }
248
249 move_weak_entry (&set->entries[next], &set->entries[k]);
250
251 k = next;
252 }
253
254 /* We have shuffled down any entries that should be shuffled down; now
255 free the end. */
256 set->entries[k].hash = 0;
257 set->entries[k].key = 0;
258}
259
260
261\f
262
263/* Growing or shrinking is triggered when the load factor
264 *
265 * L = N / S (N: number of items in set, S: bucket vector length)
266 *
267 * passes an upper limit of 0.9 or a lower limit of 0.2.
268 *
269 * The implementation stores the upper and lower number of items which
270 * trigger a resize in the hashset object.
271 *
272 * Possible hash set sizes (primes) are stored in the array
273 * hashset_size.
274 */
275
276static unsigned long hashset_size[] = {
277 31, 61, 113, 223, 443, 883, 1759, 3517, 7027, 14051, 28099, 56197, 112363,
278 224717, 449419, 898823, 1797641, 3595271, 7190537, 14381041, 28762081,
279 57524111, 115048217, 230096423
280};
281
282#define HASHSET_SIZE_N (sizeof(hashset_size)/sizeof(unsigned long))
283
284static void
285resize_set (scm_t_weak_set *set)
286{
287 scm_t_weak_entry *old_entries, *new_entries;
288 int i;
289 unsigned long old_size, new_size, old_k;
290
291 old_entries = set->entries;
292 old_size = set->size;
293
294 if (set->n_items < set->lower)
295 {
296 /* rehashing is not triggered when i <= min_size */
297 i = set->size_index;
298 do
299 --i;
300 while (i > set->min_size_index
301 && set->n_items < hashset_size[i] / 4);
302 }
303 else
304 {
305 i = set->size_index + 1;
306 if (i >= HASHSET_SIZE_N)
307 /* The biggest size currently is 230096423, which for a 32-bit
308 machine will occupy 1.5GB of memory at a load of 80%. There
309 is probably something better to do here, but if you have a
310 weak map of that size, you are hosed in any case. */
311 abort ();
312 }
313
314 new_size = hashset_size[i];
315 new_entries = scm_gc_malloc_pointerless (new_size * sizeof(scm_t_weak_entry),
316 "weak set");
317 memset (new_entries, 0, new_size * sizeof(scm_t_weak_entry));
318
319 set->size_index = i;
320 set->size = new_size;
321 if (i <= set->min_size_index)
322 set->lower = 0;
323 else
324 set->lower = new_size / 5;
325 set->upper = 9 * new_size / 10;
326 set->n_items = 0;
327 set->entries = new_entries;
328
329 for (old_k = 0; old_k < old_size; old_k++)
330 {
331 scm_t_weak_entry copy;
332 unsigned long new_k, distance;
333
334 if (!old_entries[old_k].hash)
335 continue;
336
337 copy_weak_entry (&old_entries[old_k], &copy);
338
339 if (!copy.key)
340 continue;
341
342 new_k = hash_to_index (copy.hash, new_size);
343
344 for (distance = 0; ; distance++, new_k = (new_k + 1) % new_size)
345 {
346 unsigned long other_hash = new_entries[new_k].hash;
347
348 if (!other_hash)
349 /* Found an empty entry. */
350 break;
351
352 /* Displace the entry if our distance is less, otherwise keep
353 looking. */
354 if (entry_distance (other_hash, new_k, new_size) < distance)
355 {
356 rob_from_rich (set, new_k);
357 break;
358 }
359 }
360
361 set->n_items++;
362 new_entries[new_k].hash = copy.hash;
363 new_entries[new_k].key = copy.key;
364
8c5bb729 365 if (SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
26b26354
AW
366 SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &new_entries[new_k].key,
367 (GC_PTR) new_entries[new_k].key);
368 }
369}
370
371/* Run after GC via do_vacuum_weak_set, this function runs over the
372 whole table, removing lost weak references, reshuffling the set as it
373 goes. It might resize the set if it reaps enough entries. */
374static void
375vacuum_weak_set (scm_t_weak_set *set)
376{
377 scm_t_weak_entry *entries = set->entries;
378 unsigned long size = set->size;
379 unsigned long k;
380
381 for (k = 0; k < size; k++)
382 {
383 unsigned long hash = entries[k].hash;
384
385 if (hash)
386 {
387 scm_t_weak_entry copy;
388
389 copy_weak_entry (&entries[k], &copy);
390
391 if (!copy.key)
392 /* Lost weak reference; reshuffle. */
393 {
394 give_to_poor (set, k);
395 set->n_items--;
396 }
397 }
398 }
399
400 if (set->n_items < set->lower)
401 resize_set (set);
402}
403
404
405\f
406
407static SCM
408weak_set_lookup (scm_t_weak_set *set, unsigned long hash,
409 scm_t_set_predicate_fn pred, void *closure,
410 SCM dflt)
411{
412 unsigned long k, distance, size;
413 scm_t_weak_entry *entries;
414
415 size = set->size;
416 entries = set->entries;
417
418 hash = (hash << 1) | 0x1;
419 k = hash_to_index (hash, size);
420
421 for (distance = 0; distance < size; distance++, k = (k + 1) % size)
422 {
423 unsigned long other_hash;
424
425 retry:
426 other_hash = entries[k].hash;
427
428 if (!other_hash)
429 /* Not found. */
430 return dflt;
431
432 if (hash == other_hash)
433 {
434 scm_t_weak_entry copy;
435
436 copy_weak_entry (&entries[k], &copy);
437
438 if (!copy.key)
439 /* Lost weak reference; reshuffle. */
440 {
441 give_to_poor (set, k);
442 set->n_items--;
443 goto retry;
444 }
445
446 if (pred (SCM_PACK (copy.key), closure))
447 /* Found. */
448 return SCM_PACK (copy.key);
449 }
450
451 /* If the entry's distance is less, our key is not in the set. */
452 if (entry_distance (other_hash, k, size) < distance)
453 return dflt;
454 }
455
456 /* If we got here, then we were unfortunate enough to loop through the
457 whole set. Shouldn't happen, but hey. */
458 return dflt;
459}
460
461
462static SCM
463weak_set_add_x (scm_t_weak_set *set, unsigned long hash,
464 scm_t_set_predicate_fn pred, void *closure,
465 SCM obj)
466{
467 unsigned long k, distance, size;
468 scm_t_weak_entry *entries;
469
470 size = set->size;
471 entries = set->entries;
472
473 hash = (hash << 1) | 0x1;
474 k = hash_to_index (hash, size);
475
476 for (distance = 0; ; distance++, k = (k + 1) % size)
477 {
478 unsigned long other_hash;
479
480 retry:
481 other_hash = entries[k].hash;
482
483 if (!other_hash)
484 /* Found an empty entry. */
485 break;
486
487 if (other_hash == hash)
488 {
489 scm_t_weak_entry copy;
490
491 copy_weak_entry (&entries[k], &copy);
492
493 if (!copy.key)
494 /* Lost weak reference; reshuffle. */
495 {
496 give_to_poor (set, k);
497 set->n_items--;
498 goto retry;
499 }
500
501 if (pred (SCM_PACK (copy.key), closure))
502 /* Found an entry with this key. */
503 return SCM_PACK (copy.key);
504 }
505
506 if (set->n_items > set->upper)
507 /* Full set, time to resize. */
508 {
509 resize_set (set);
510 return weak_set_add_x (set, hash >> 1, pred, closure, obj);
511 }
512
513 /* Displace the entry if our distance is less, otherwise keep
514 looking. */
515 if (entry_distance (other_hash, k, size) < distance)
516 {
517 rob_from_rich (set, k);
518 break;
519 }
520 }
521
522 set->n_items++;
523 entries[k].hash = hash;
524 entries[k].key = SCM_UNPACK (obj);
525
8c5bb729 526 if (SCM_HEAP_OBJECT_P (obj))
26b26354 527 SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &entries[k].key,
0aed71aa 528 (GC_PTR) SCM2PTR (obj));
26b26354
AW
529
530 return obj;
531}
532
533
534static void
535weak_set_remove_x (scm_t_weak_set *set, unsigned long hash,
536 scm_t_set_predicate_fn pred, void *closure)
537{
538 unsigned long k, distance, size;
539 scm_t_weak_entry *entries;
540
541 size = set->size;
542 entries = set->entries;
543
544 hash = (hash << 1) | 0x1;
545 k = hash_to_index (hash, size);
546
547 for (distance = 0; distance < size; distance++, k = (k + 1) % size)
548 {
549 unsigned long other_hash;
550
551 retry:
552 other_hash = entries[k].hash;
553
554 if (!other_hash)
555 /* Not found. */
556 return;
557
558 if (other_hash == hash)
559 {
560 scm_t_weak_entry copy;
561
562 copy_weak_entry (&entries[k], &copy);
563
564 if (!copy.key)
565 /* Lost weak reference; reshuffle. */
566 {
567 give_to_poor (set, k);
568 set->n_items--;
569 goto retry;
570 }
571
572 if (pred (SCM_PACK (copy.key), closure))
573 /* Found an entry with this key. */
574 {
575 entries[k].hash = 0;
576 entries[k].key = 0;
577
8c5bb729 578 if (SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
26b26354
AW
579 GC_unregister_disappearing_link ((GC_PTR) &entries[k].key);
580
581 if (--set->n_items < set->lower)
582 resize_set (set);
583 else
584 give_to_poor (set, k);
585
586 return;
587 }
588 }
589
590 /* If the entry's distance is less, our key is not in the set. */
591 if (entry_distance (other_hash, k, size) < distance)
592 return;
593 }
594}
595
596
597\f
598static SCM
599make_weak_set (unsigned long k)
600{
601 scm_t_weak_set *set;
602
603 int i = 0, n = k ? k : 31;
604 while (i + 1 < HASHSET_SIZE_N && n > hashset_size[i])
605 ++i;
606 n = hashset_size[i];
607
608 set = scm_gc_malloc (sizeof (*set), "weak-set");
609 set->entries = scm_gc_malloc_pointerless (n * sizeof(scm_t_weak_entry),
610 "weak-set");
611 memset (set->entries, 0, n * sizeof(scm_t_weak_entry));
612 set->n_items = 0;
613 set->size = n;
614 set->lower = 0;
615 set->upper = 9 * n / 10;
616 set->size_index = i;
617 set->min_size_index = i;
618 scm_i_pthread_mutex_init (&set->lock, NULL);
619
620 return scm_cell (scm_tc7_weak_set, (scm_t_bits)set);
621}
622
623void
624scm_i_weak_set_print (SCM exp, SCM port, scm_print_state *pstate)
625{
0607ebbf
AW
626 scm_puts_unlocked ("#<", port);
627 scm_puts_unlocked ("weak-set ", port);
26b26354 628 scm_uintprint (SCM_WEAK_SET (exp)->n_items, 10, port);
0607ebbf 629 scm_putc_unlocked ('/', port);
26b26354 630 scm_uintprint (SCM_WEAK_SET (exp)->size, 10, port);
0607ebbf 631 scm_puts_unlocked (">", port);
26b26354
AW
632}
633
634static void
635do_vacuum_weak_set (SCM set)
636{
637 scm_t_weak_set *s;
638
639 s = SCM_WEAK_SET (set);
640
641 if (scm_i_pthread_mutex_trylock (&s->lock) == 0)
642 {
643 vacuum_weak_set (s);
644 scm_i_pthread_mutex_unlock (&s->lock);
645 }
646
647 return;
648}
649
650/* The before-gc C hook only runs if GC_set_start_callback is available,
651 so if not, fall back on a finalizer-based implementation. */
652static int
653weak_gc_callback (void **weak)
654{
655 void *val = weak[0];
656 void (*callback) (SCM) = weak[1];
657
658 if (!val)
659 return 0;
660
21041372 661 callback (SCM_PACK_POINTER (val));
26b26354
AW
662
663 return 1;
664}
665
666#ifdef HAVE_GC_SET_START_CALLBACK
667static void*
668weak_gc_hook (void *hook_data, void *fn_data, void *data)
669{
670 if (!weak_gc_callback (fn_data))
671 scm_c_hook_remove (&scm_before_gc_c_hook, weak_gc_hook, fn_data);
672
673 return NULL;
674}
675#else
676static void
677weak_gc_finalizer (void *ptr, void *data)
678{
679 if (weak_gc_callback (ptr))
680 GC_REGISTER_FINALIZER_NO_ORDER (ptr, weak_gc_finalizer, data, NULL, NULL);
681}
682#endif
683
684static void
685scm_c_register_weak_gc_callback (SCM obj, void (*callback) (SCM))
686{
687 void **weak = GC_MALLOC_ATOMIC (sizeof (void*) * 2);
688
21041372 689 weak[0] = SCM_UNPACK_POINTER (obj);
26b26354 690 weak[1] = (void*)callback;
0aed71aa 691 GC_GENERAL_REGISTER_DISAPPEARING_LINK (weak, SCM2PTR (obj));
26b26354
AW
692
693#ifdef HAVE_GC_SET_START_CALLBACK
694 scm_c_hook_add (&scm_after_gc_c_hook, weak_gc_hook, weak, 0);
695#else
696 GC_REGISTER_FINALIZER_NO_ORDER (weak, weak_gc_finalizer, NULL, NULL, NULL);
697#endif
698}
699
700SCM
701scm_c_make_weak_set (unsigned long k)
702{
703 SCM ret;
704
705 ret = make_weak_set (k);
706
707 scm_c_register_weak_gc_callback (ret, do_vacuum_weak_set);
708
709 return ret;
710}
711
712SCM
713scm_weak_set_p (SCM obj)
714{
715 return scm_from_bool (SCM_WEAK_SET_P (obj));
716}
717
718SCM
719scm_weak_set_clear_x (SCM set)
720{
721 scm_t_weak_set *s = SCM_WEAK_SET (set);
722
723 scm_i_pthread_mutex_lock (&s->lock);
724
725 memset (s->entries, 0, sizeof (scm_t_weak_entry) * s->size);
726 s->n_items = 0;
727
728 scm_i_pthread_mutex_unlock (&s->lock);
729
730 return SCM_UNSPECIFIED;
731}
732
733SCM
734scm_c_weak_set_lookup (SCM set, unsigned long raw_hash,
735 scm_t_set_predicate_fn pred,
736 void *closure, SCM dflt)
737{
738 SCM ret;
739 scm_t_weak_set *s = SCM_WEAK_SET (set);
740
741 scm_i_pthread_mutex_lock (&s->lock);
742
743 ret = weak_set_lookup (s, raw_hash, pred, closure, dflt);
744
745 scm_i_pthread_mutex_unlock (&s->lock);
746
747 return ret;
748}
749
750SCM
751scm_c_weak_set_add_x (SCM set, unsigned long raw_hash,
752 scm_t_set_predicate_fn pred,
753 void *closure, SCM obj)
754{
755 SCM ret;
756 scm_t_weak_set *s = SCM_WEAK_SET (set);
757
758 scm_i_pthread_mutex_lock (&s->lock);
759
760 ret = weak_set_add_x (s, raw_hash, pred, closure, obj);
761
762 scm_i_pthread_mutex_unlock (&s->lock);
763
764 return ret;
765}
766
767void
768scm_c_weak_set_remove_x (SCM set, unsigned long raw_hash,
769 scm_t_set_predicate_fn pred,
770 void *closure)
771{
772 scm_t_weak_set *s = SCM_WEAK_SET (set);
773
774 scm_i_pthread_mutex_lock (&s->lock);
775
776 weak_set_remove_x (s, raw_hash, pred, closure);
777
778 scm_i_pthread_mutex_unlock (&s->lock);
779}
780
781static int
782eq_predicate (SCM x, void *closure)
783{
21041372 784 return scm_is_eq (x, SCM_PACK_POINTER (closure));
26b26354
AW
785}
786
787SCM
788scm_weak_set_add_x (SCM set, SCM obj)
789{
790 return scm_c_weak_set_add_x (set, scm_ihashq (obj, -1),
21041372 791 eq_predicate, SCM_UNPACK_POINTER (obj), obj);
26b26354
AW
792}
793
794SCM
795scm_weak_set_remove_x (SCM set, SCM obj)
796{
797 scm_c_weak_set_remove_x (set, scm_ihashq (obj, -1),
21041372 798 eq_predicate, SCM_UNPACK_POINTER (obj));
26b26354
AW
799
800 return SCM_UNSPECIFIED;
801}
802
803SCM
804scm_c_weak_set_fold (scm_t_set_fold_fn proc, void *closure,
805 SCM init, SCM set)
806{
807 scm_t_weak_set *s;
808 scm_t_weak_entry *entries;
809 unsigned long k, size;
810
811 s = SCM_WEAK_SET (set);
812
813 scm_i_pthread_mutex_lock (&s->lock);
814
815 size = s->size;
816 entries = s->entries;
817
818 for (k = 0; k < size; k++)
819 {
820 if (entries[k].hash)
821 {
822 scm_t_weak_entry copy;
823
824 copy_weak_entry (&entries[k], &copy);
825
826 if (copy.key)
827 {
828 /* Release set lock while we call the function. */
829 scm_i_pthread_mutex_unlock (&s->lock);
830 init = proc (closure, SCM_PACK (copy.key), init);
831 scm_i_pthread_mutex_lock (&s->lock);
832 }
833 }
834 }
835
836 scm_i_pthread_mutex_unlock (&s->lock);
837
838 return init;
839}
840
841static SCM
842fold_trampoline (void *closure, SCM item, SCM init)
843{
21041372 844 return scm_call_2 (SCM_PACK_POINTER (closure), item, init);
26b26354
AW
845}
846
847SCM
848scm_weak_set_fold (SCM proc, SCM init, SCM set)
849{
21041372 850 return scm_c_weak_set_fold (fold_trampoline, SCM_UNPACK_POINTER (proc), init, set);
26b26354
AW
851}
852
853static SCM
854for_each_trampoline (void *closure, SCM item, SCM seed)
855{
21041372 856 scm_call_1 (SCM_PACK_POINTER (closure), item);
26b26354
AW
857 return seed;
858}
859
860SCM
861scm_weak_set_for_each (SCM proc, SCM set)
862{
21041372 863 scm_c_weak_set_fold (for_each_trampoline, SCM_UNPACK_POINTER (proc), SCM_BOOL_F, set);
26b26354
AW
864
865 return SCM_UNSPECIFIED;
866}
867
868static SCM
869map_trampoline (void *closure, SCM item, SCM seed)
870{
21041372 871 return scm_cons (scm_call_1 (SCM_PACK_POINTER (closure), item), seed);
26b26354
AW
872}
873
874SCM
875scm_weak_set_map_to_list (SCM proc, SCM set)
876{
21041372 877 return scm_c_weak_set_fold (map_trampoline, SCM_UNPACK_POINTER (proc), SCM_EOL, set);
26b26354
AW
878}
879
880
881void
882scm_init_weak_set ()
883{
884#include "libguile/weak-set.x"
885}
886
887/*
888 Local Variables:
889 c-file-style: "gnu"
890 End:
891*/