Merge remote-tracking branch 'origin/stable-2.0'
[bpt/guile.git] / libguile / weak-set.c
1 /* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include <assert.h>
26
27 #include "libguile/_scm.h"
28 #include "libguile/hash.h"
29 #include "libguile/eval.h"
30 #include "libguile/ports.h"
31 #include "libguile/bdw-gc.h"
32
33 #include "libguile/validate.h"
34 #include "libguile/weak-set.h"
35
36
37 /* Weak Sets
38
39 This file implements weak sets. One example of a weak set is the
40 symbol table, where you want all instances of the `foo' symbol to map
41 to one object. So when you load a file and it wants a symbol with
42 the characters "foo", you one up in the table, using custom hash and
43 equality predicates. Only if one is not found will you bother to
44 cons one up and intern it.
45
46 Another use case for weak sets is the set of open ports. Guile needs
47 to be able to flush them all when the process exits, but the set
48 shouldn't prevent the GC from collecting the port (and thus closing
49 it).
50
51 Weak sets are implemented using an open-addressed hash table.
52 Basically this means that there is an array of entries, and the item
53 is expected to be found the slot corresponding to its hash code,
54 modulo the length of the array.
55
56 Collisions are handled using linear probing with the Robin Hood
57 technique. See Pedro Celis' paper, "Robin Hood Hashing":
58
59 http://www.cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
60
61 The vector of entries is allocated as an "atomic" piece of memory, so
62 that the GC doesn't trace it. When an item is added to the set, a
63 disappearing link is registered to its location. If the item is
64 collected, then that link will be zeroed out.
65
66 An entry is not just an item, though; the hash code is also stored in
67 the entry. We munge hash codes so that they are never 0. In this
68 way we can detect removed entries (key of zero but nonzero hash
69 code), and can then reshuffle elements as needed to maintain the
70 robin hood ordering.
71
72 Compared to buckets-and-chains hash tables, open addressing has the
73 advantage that it is very cache-friendly. It also uses less memory.
74
75 Implementation-wise, there are two things to note.
76
77 1. We assume that hash codes are evenly distributed across the
78 range of unsigned longs. The actual hash code stored in the
79 entry is left-shifted by 1 bit (losing 1 bit of hash precision),
80 and then or'd with 1. In this way we ensure that the hash field
81 of an occupied entry is nonzero. To map to an index, we
82 right-shift the hash by one, divide by the size, and take the
83 remainder.
84
85 2. Since the "keys" (the objects in the set) are stored in an
86 atomic region with disappearing links, they need to be accessed
87 with the GC alloc lock. `copy_weak_entry' will do that for
88 you. The hash code itself can be read outside the lock,
89 though.
90 */
91
92
93 typedef struct {
94 unsigned long hash;
95 scm_t_bits key;
96 } scm_t_weak_entry;
97
98
99 struct weak_entry_data {
100 scm_t_weak_entry *in;
101 scm_t_weak_entry *out;
102 };
103
104 static void*
105 do_copy_weak_entry (void *data)
106 {
107 struct weak_entry_data *e = data;
108
109 e->out->hash = e->in->hash;
110 e->out->key = e->in->key;
111
112 return NULL;
113 }
114
115 static void
116 copy_weak_entry (scm_t_weak_entry *src, scm_t_weak_entry *dst)
117 {
118 struct weak_entry_data data;
119
120 data.in = src;
121 data.out = dst;
122
123 GC_call_with_alloc_lock (do_copy_weak_entry, &data);
124 }
125
126
127 typedef struct {
128 scm_t_weak_entry *entries; /* the data */
129 scm_i_pthread_mutex_t lock; /* the lock */
130 unsigned long size; /* total number of slots. */
131 unsigned long n_items; /* number of items in set */
132 unsigned long lower; /* when to shrink */
133 unsigned long upper; /* when to grow */
134 int size_index; /* index into hashset_size */
135 int min_size_index; /* minimum size_index */
136 } scm_t_weak_set;
137
138
139 #define SCM_WEAK_SET_P(x) (SCM_HAS_TYP7 (x, scm_tc7_weak_set))
140 #define SCM_VALIDATE_WEAK_SET(pos, arg) \
141 SCM_MAKE_VALIDATE_MSG (pos, arg, WEAK_SET_P, "weak-set")
142 #define SCM_WEAK_SET(x) ((scm_t_weak_set *) SCM_CELL_WORD_1 (x))
143
144
145 static unsigned long
146 hash_to_index (unsigned long hash, unsigned long size)
147 {
148 return (hash >> 1) % size;
149 }
150
151 static unsigned long
152 entry_distance (unsigned long hash, unsigned long k, unsigned long size)
153 {
154 unsigned long origin = hash_to_index (hash, size);
155
156 if (k >= origin)
157 return k - origin;
158 else
159 /* The other key was displaced and wrapped around. */
160 return size - origin + k;
161 }
162
163 static void
164 move_weak_entry (scm_t_weak_entry *from, scm_t_weak_entry *to)
165 {
166 if (from->hash)
167 {
168 scm_t_weak_entry copy;
169
170 copy_weak_entry (from, &copy);
171 to->hash = copy.hash;
172 to->key = copy.key;
173
174 if (copy.key && SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
175 {
176 #ifdef HAVE_GC_MOVE_DISAPPEARING_LINK
177 GC_move_disappearing_link ((void **) &from->key, (void **) &to->key);
178 #else
179 GC_unregister_disappearing_link ((void **) &from->key);
180 SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &to->key,
181 to->key);
182 #endif
183 }
184 }
185 else
186 {
187 to->hash = 0;
188 to->key = 0;
189 }
190 }
191
192 static void
193 rob_from_rich (scm_t_weak_set *set, unsigned long k)
194 {
195 unsigned long empty, size;
196
197 size = set->size;
198
199 /* If we are to free up slot K in the set, we need room to do so. */
200 assert (set->n_items < size);
201
202 empty = k;
203 do
204 empty = (empty + 1) % size;
205 /* Here we access key outside the lock. Is this a problem? At first
206 glance, I wouldn't think so. */
207 while (set->entries[empty].key);
208
209 do
210 {
211 unsigned long last = empty ? (empty - 1) : (size - 1);
212 move_weak_entry (&set->entries[last], &set->entries[empty]);
213 empty = last;
214 }
215 while (empty != k);
216
217 /* Just for sanity. */
218 set->entries[empty].hash = 0;
219 set->entries[empty].key = 0;
220 }
221
222 static void
223 give_to_poor (scm_t_weak_set *set, unsigned long k)
224 {
225 /* Slot K was just freed up; possibly shuffle others down. */
226 unsigned long size = set->size;
227
228 while (1)
229 {
230 unsigned long next = (k + 1) % size;
231 unsigned long hash;
232 scm_t_weak_entry copy;
233
234 hash = set->entries[next].hash;
235
236 if (!hash || hash_to_index (hash, size) == next)
237 break;
238
239 copy_weak_entry (&set->entries[next], &copy);
240
241 if (!copy.key)
242 /* Lost weak reference. */
243 {
244 give_to_poor (set, next);
245 set->n_items--;
246 continue;
247 }
248
249 move_weak_entry (&set->entries[next], &set->entries[k]);
250
251 k = next;
252 }
253
254 /* We have shuffled down any entries that should be shuffled down; now
255 free the end. */
256 set->entries[k].hash = 0;
257 set->entries[k].key = 0;
258 }
259
260
261 \f
262
263 /* Growing or shrinking is triggered when the load factor
264 *
265 * L = N / S (N: number of items in set, S: bucket vector length)
266 *
267 * passes an upper limit of 0.9 or a lower limit of 0.2.
268 *
269 * The implementation stores the upper and lower number of items which
270 * trigger a resize in the hashset object.
271 *
272 * Possible hash set sizes (primes) are stored in the array
273 * hashset_size.
274 */
275
276 static unsigned long hashset_size[] = {
277 31, 61, 113, 223, 443, 883, 1759, 3517, 7027, 14051, 28099, 56197, 112363,
278 224717, 449419, 898823, 1797641, 3595271, 7190537, 14381041, 28762081,
279 57524111, 115048217, 230096423
280 };
281
282 #define HASHSET_SIZE_N (sizeof(hashset_size)/sizeof(unsigned long))
283
284 static int
285 compute_size_index (scm_t_weak_set *set)
286 {
287 int i = set->size_index;
288
289 if (set->n_items < set->lower)
290 {
291 /* rehashing is not triggered when i <= min_size */
292 do
293 --i;
294 while (i > set->min_size_index
295 && set->n_items < hashset_size[i] / 5);
296 }
297 else if (set->n_items > set->upper)
298 {
299 ++i;
300 if (i >= HASHSET_SIZE_N)
301 /* The biggest size currently is 230096423, which for a 32-bit
302 machine will occupy 1.5GB of memory at a load of 80%. There
303 is probably something better to do here, but if you have a
304 weak map of that size, you are hosed in any case. */
305 abort ();
306 }
307
308 return i;
309 }
310
311 static int
312 is_acceptable_size_index (scm_t_weak_set *set, int size_index)
313 {
314 int computed = compute_size_index (set);
315
316 if (size_index == computed)
317 /* We were going to grow or shrink, and allocating the new vector
318 didn't change the target size. */
319 return 1;
320
321 if (size_index == computed + 1)
322 {
323 /* We were going to enlarge the set, but allocating the new
324 vector finalized some objects, making an enlargement
325 unnecessary. It might still be a good idea to use the larger
326 set, though. (This branch also gets hit if, while allocating
327 the vector, some other thread was actively removing items from
328 the set. That is less likely, though.) */
329 unsigned long new_lower = hashset_size[size_index] / 5;
330
331 return set->size > new_lower;
332 }
333
334 if (size_index == computed - 1)
335 {
336 /* We were going to shrink the set, but when we dropped the lock
337 to allocate the new vector, some other thread added elements to
338 the set. */
339 return 0;
340 }
341
342 /* The computed size differs from our newly allocated size by more
343 than one size index -- recalculate. */
344 return 0;
345 }
346
347 static void
348 resize_set (scm_t_weak_set *set)
349 {
350 scm_t_weak_entry *old_entries, *new_entries;
351 int new_size_index;
352 unsigned long old_size, new_size, old_k;
353
354 do
355 {
356 new_size_index = compute_size_index (set);
357 if (new_size_index == set->size_index)
358 return;
359 new_size = hashset_size[new_size_index];
360 scm_i_pthread_mutex_unlock (&set->lock);
361 /* Allocating memory might cause finalizers to run, which could
362 run anything, so drop our lock to avoid deadlocks. */
363 new_entries = scm_gc_malloc_pointerless (new_size * sizeof(scm_t_weak_entry),
364 "weak set");
365 scm_i_pthread_mutex_lock (&set->lock);
366 }
367 while (!is_acceptable_size_index (set, new_size_index));
368
369 old_entries = set->entries;
370 old_size = set->size;
371
372 memset (new_entries, 0, new_size * sizeof(scm_t_weak_entry));
373
374 set->size_index = new_size_index;
375 set->size = new_size;
376 if (new_size_index <= set->min_size_index)
377 set->lower = 0;
378 else
379 set->lower = new_size / 5;
380 set->upper = 9 * new_size / 10;
381 set->n_items = 0;
382 set->entries = new_entries;
383
384 for (old_k = 0; old_k < old_size; old_k++)
385 {
386 scm_t_weak_entry copy;
387 unsigned long new_k, distance;
388
389 if (!old_entries[old_k].hash)
390 continue;
391
392 copy_weak_entry (&old_entries[old_k], &copy);
393
394 if (!copy.key)
395 continue;
396
397 new_k = hash_to_index (copy.hash, new_size);
398
399 for (distance = 0; ; distance++, new_k = (new_k + 1) % new_size)
400 {
401 unsigned long other_hash = new_entries[new_k].hash;
402
403 if (!other_hash)
404 /* Found an empty entry. */
405 break;
406
407 /* Displace the entry if our distance is less, otherwise keep
408 looking. */
409 if (entry_distance (other_hash, new_k, new_size) < distance)
410 {
411 rob_from_rich (set, new_k);
412 break;
413 }
414 }
415
416 set->n_items++;
417 new_entries[new_k].hash = copy.hash;
418 new_entries[new_k].key = copy.key;
419
420 if (SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
421 SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &new_entries[new_k].key,
422 (void *) new_entries[new_k].key);
423 }
424 }
425
426 /* Run after GC via do_vacuum_weak_set, this function runs over the
427 whole table, removing lost weak references, reshuffling the set as it
428 goes. It might resize the set if it reaps enough entries. */
429 static void
430 vacuum_weak_set (scm_t_weak_set *set)
431 {
432 scm_t_weak_entry *entries = set->entries;
433 unsigned long size = set->size;
434 unsigned long k;
435
436 for (k = 0; k < size; k++)
437 {
438 unsigned long hash = entries[k].hash;
439
440 if (hash)
441 {
442 scm_t_weak_entry copy;
443
444 copy_weak_entry (&entries[k], &copy);
445
446 if (!copy.key)
447 /* Lost weak reference; reshuffle. */
448 {
449 give_to_poor (set, k);
450 set->n_items--;
451 }
452 }
453 }
454
455 if (set->n_items < set->lower)
456 resize_set (set);
457 }
458
459
460 \f
461
462 static SCM
463 weak_set_lookup (scm_t_weak_set *set, unsigned long hash,
464 scm_t_set_predicate_fn pred, void *closure,
465 SCM dflt)
466 {
467 unsigned long k, distance, size;
468 scm_t_weak_entry *entries;
469
470 size = set->size;
471 entries = set->entries;
472
473 hash = (hash << 1) | 0x1;
474 k = hash_to_index (hash, size);
475
476 for (distance = 0; distance < size; distance++, k = (k + 1) % size)
477 {
478 unsigned long other_hash;
479
480 retry:
481 other_hash = entries[k].hash;
482
483 if (!other_hash)
484 /* Not found. */
485 return dflt;
486
487 if (hash == other_hash)
488 {
489 scm_t_weak_entry copy;
490
491 copy_weak_entry (&entries[k], &copy);
492
493 if (!copy.key)
494 /* Lost weak reference; reshuffle. */
495 {
496 give_to_poor (set, k);
497 set->n_items--;
498 goto retry;
499 }
500
501 if (pred (SCM_PACK (copy.key), closure))
502 /* Found. */
503 return SCM_PACK (copy.key);
504 }
505
506 /* If the entry's distance is less, our key is not in the set. */
507 if (entry_distance (other_hash, k, size) < distance)
508 return dflt;
509 }
510
511 /* If we got here, then we were unfortunate enough to loop through the
512 whole set. Shouldn't happen, but hey. */
513 return dflt;
514 }
515
516
517 static SCM
518 weak_set_add_x (scm_t_weak_set *set, unsigned long hash,
519 scm_t_set_predicate_fn pred, void *closure,
520 SCM obj)
521 {
522 unsigned long k, distance, size;
523 scm_t_weak_entry *entries;
524
525 size = set->size;
526 entries = set->entries;
527
528 hash = (hash << 1) | 0x1;
529 k = hash_to_index (hash, size);
530
531 for (distance = 0; ; distance++, k = (k + 1) % size)
532 {
533 unsigned long other_hash;
534
535 retry:
536 other_hash = entries[k].hash;
537
538 if (!other_hash)
539 /* Found an empty entry. */
540 break;
541
542 if (other_hash == hash)
543 {
544 scm_t_weak_entry copy;
545
546 copy_weak_entry (&entries[k], &copy);
547
548 if (!copy.key)
549 /* Lost weak reference; reshuffle. */
550 {
551 give_to_poor (set, k);
552 set->n_items--;
553 goto retry;
554 }
555
556 if (pred (SCM_PACK (copy.key), closure))
557 /* Found an entry with this key. */
558 return SCM_PACK (copy.key);
559 }
560
561 if (set->n_items > set->upper)
562 /* Full set, time to resize. */
563 {
564 resize_set (set);
565 return weak_set_add_x (set, hash >> 1, pred, closure, obj);
566 }
567
568 /* Displace the entry if our distance is less, otherwise keep
569 looking. */
570 if (entry_distance (other_hash, k, size) < distance)
571 {
572 rob_from_rich (set, k);
573 break;
574 }
575 }
576
577 set->n_items++;
578 entries[k].hash = hash;
579 entries[k].key = SCM_UNPACK (obj);
580
581 if (SCM_HEAP_OBJECT_P (obj))
582 SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &entries[k].key,
583 (void *) SCM2PTR (obj));
584
585 return obj;
586 }
587
588
589 static void
590 weak_set_remove_x (scm_t_weak_set *set, unsigned long hash,
591 scm_t_set_predicate_fn pred, void *closure)
592 {
593 unsigned long k, distance, size;
594 scm_t_weak_entry *entries;
595
596 size = set->size;
597 entries = set->entries;
598
599 hash = (hash << 1) | 0x1;
600 k = hash_to_index (hash, size);
601
602 for (distance = 0; distance < size; distance++, k = (k + 1) % size)
603 {
604 unsigned long other_hash;
605
606 retry:
607 other_hash = entries[k].hash;
608
609 if (!other_hash)
610 /* Not found. */
611 return;
612
613 if (other_hash == hash)
614 {
615 scm_t_weak_entry copy;
616
617 copy_weak_entry (&entries[k], &copy);
618
619 if (!copy.key)
620 /* Lost weak reference; reshuffle. */
621 {
622 give_to_poor (set, k);
623 set->n_items--;
624 goto retry;
625 }
626
627 if (pred (SCM_PACK (copy.key), closure))
628 /* Found an entry with this key. */
629 {
630 entries[k].hash = 0;
631 entries[k].key = 0;
632
633 if (SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
634 GC_unregister_disappearing_link ((void **) &entries[k].key);
635
636 if (--set->n_items < set->lower)
637 resize_set (set);
638 else
639 give_to_poor (set, k);
640
641 return;
642 }
643 }
644
645 /* If the entry's distance is less, our key is not in the set. */
646 if (entry_distance (other_hash, k, size) < distance)
647 return;
648 }
649 }
650
651
652 \f
653 static SCM
654 make_weak_set (unsigned long k)
655 {
656 scm_t_weak_set *set;
657
658 int i = 0, n = k ? k : 31;
659 while (i + 1 < HASHSET_SIZE_N && n > hashset_size[i])
660 ++i;
661 n = hashset_size[i];
662
663 set = scm_gc_malloc (sizeof (*set), "weak-set");
664 set->entries = scm_gc_malloc_pointerless (n * sizeof(scm_t_weak_entry),
665 "weak-set");
666 memset (set->entries, 0, n * sizeof(scm_t_weak_entry));
667 set->n_items = 0;
668 set->size = n;
669 set->lower = 0;
670 set->upper = 9 * n / 10;
671 set->size_index = i;
672 set->min_size_index = i;
673 scm_i_pthread_mutex_init (&set->lock, NULL);
674
675 return scm_cell (scm_tc7_weak_set, (scm_t_bits)set);
676 }
677
678 void
679 scm_i_weak_set_print (SCM exp, SCM port, scm_print_state *pstate)
680 {
681 scm_puts_unlocked ("#<", port);
682 scm_puts_unlocked ("weak-set ", port);
683 scm_uintprint (SCM_WEAK_SET (exp)->n_items, 10, port);
684 scm_putc_unlocked ('/', port);
685 scm_uintprint (SCM_WEAK_SET (exp)->size, 10, port);
686 scm_puts_unlocked (">", port);
687 }
688
689 static void
690 do_vacuum_weak_set (SCM set)
691 {
692 scm_t_weak_set *s;
693
694 s = SCM_WEAK_SET (set);
695
696 if (scm_i_pthread_mutex_trylock (&s->lock) == 0)
697 {
698 vacuum_weak_set (s);
699 scm_i_pthread_mutex_unlock (&s->lock);
700 }
701
702 return;
703 }
704
705 /* The before-gc C hook only runs if GC_set_start_callback is available,
706 so if not, fall back on a finalizer-based implementation. */
707 static int
708 weak_gc_callback (void **weak)
709 {
710 void *val = weak[0];
711 void (*callback) (SCM) = weak[1];
712
713 if (!val)
714 return 0;
715
716 callback (SCM_PACK_POINTER (val));
717
718 return 1;
719 }
720
721 #ifdef HAVE_GC_SET_START_CALLBACK
722 static void*
723 weak_gc_hook (void *hook_data, void *fn_data, void *data)
724 {
725 if (!weak_gc_callback (fn_data))
726 scm_c_hook_remove (&scm_before_gc_c_hook, weak_gc_hook, fn_data);
727
728 return NULL;
729 }
730 #else
731 static void
732 weak_gc_finalizer (void *ptr, void *data)
733 {
734 if (weak_gc_callback (ptr))
735 scm_i_set_finalizer (ptr, weak_gc_finalizer, data);
736 }
737 #endif
738
739 static void
740 scm_c_register_weak_gc_callback (SCM obj, void (*callback) (SCM))
741 {
742 void **weak = GC_MALLOC_ATOMIC (sizeof (void*) * 2);
743
744 weak[0] = SCM_UNPACK_POINTER (obj);
745 weak[1] = (void*)callback;
746 GC_GENERAL_REGISTER_DISAPPEARING_LINK (weak, SCM2PTR (obj));
747
748 #ifdef HAVE_GC_SET_START_CALLBACK
749 scm_c_hook_add (&scm_after_gc_c_hook, weak_gc_hook, weak, 0);
750 #else
751 scm_i_set_finalizer (weak, weak_gc_finalizer, NULL);
752 #endif
753 }
754
755 SCM
756 scm_c_make_weak_set (unsigned long k)
757 {
758 SCM ret;
759
760 ret = make_weak_set (k);
761
762 scm_c_register_weak_gc_callback (ret, do_vacuum_weak_set);
763
764 return ret;
765 }
766
767 SCM
768 scm_weak_set_p (SCM obj)
769 {
770 return scm_from_bool (SCM_WEAK_SET_P (obj));
771 }
772
773 SCM
774 scm_weak_set_clear_x (SCM set)
775 {
776 scm_t_weak_set *s = SCM_WEAK_SET (set);
777
778 scm_i_pthread_mutex_lock (&s->lock);
779
780 memset (s->entries, 0, sizeof (scm_t_weak_entry) * s->size);
781 s->n_items = 0;
782
783 scm_i_pthread_mutex_unlock (&s->lock);
784
785 return SCM_UNSPECIFIED;
786 }
787
788 SCM
789 scm_c_weak_set_lookup (SCM set, unsigned long raw_hash,
790 scm_t_set_predicate_fn pred,
791 void *closure, SCM dflt)
792 {
793 SCM ret;
794 scm_t_weak_set *s = SCM_WEAK_SET (set);
795
796 scm_i_pthread_mutex_lock (&s->lock);
797
798 ret = weak_set_lookup (s, raw_hash, pred, closure, dflt);
799
800 scm_i_pthread_mutex_unlock (&s->lock);
801
802 return ret;
803 }
804
805 SCM
806 scm_c_weak_set_add_x (SCM set, unsigned long raw_hash,
807 scm_t_set_predicate_fn pred,
808 void *closure, SCM obj)
809 {
810 SCM ret;
811 scm_t_weak_set *s = SCM_WEAK_SET (set);
812
813 scm_i_pthread_mutex_lock (&s->lock);
814
815 ret = weak_set_add_x (s, raw_hash, pred, closure, obj);
816
817 scm_i_pthread_mutex_unlock (&s->lock);
818
819 return ret;
820 }
821
822 void
823 scm_c_weak_set_remove_x (SCM set, unsigned long raw_hash,
824 scm_t_set_predicate_fn pred,
825 void *closure)
826 {
827 scm_t_weak_set *s = SCM_WEAK_SET (set);
828
829 scm_i_pthread_mutex_lock (&s->lock);
830
831 weak_set_remove_x (s, raw_hash, pred, closure);
832
833 scm_i_pthread_mutex_unlock (&s->lock);
834 }
835
836 static int
837 eq_predicate (SCM x, void *closure)
838 {
839 return scm_is_eq (x, SCM_PACK_POINTER (closure));
840 }
841
842 SCM
843 scm_weak_set_add_x (SCM set, SCM obj)
844 {
845 return scm_c_weak_set_add_x (set, scm_ihashq (obj, -1),
846 eq_predicate, SCM_UNPACK_POINTER (obj), obj);
847 }
848
849 SCM
850 scm_weak_set_remove_x (SCM set, SCM obj)
851 {
852 scm_c_weak_set_remove_x (set, scm_ihashq (obj, -1),
853 eq_predicate, SCM_UNPACK_POINTER (obj));
854
855 return SCM_UNSPECIFIED;
856 }
857
858 SCM
859 scm_c_weak_set_fold (scm_t_set_fold_fn proc, void *closure,
860 SCM init, SCM set)
861 {
862 scm_t_weak_set *s;
863 scm_t_weak_entry *entries;
864 unsigned long k, size;
865
866 s = SCM_WEAK_SET (set);
867
868 scm_i_pthread_mutex_lock (&s->lock);
869
870 size = s->size;
871 entries = s->entries;
872
873 for (k = 0; k < size; k++)
874 {
875 if (entries[k].hash)
876 {
877 scm_t_weak_entry copy;
878
879 copy_weak_entry (&entries[k], &copy);
880
881 if (copy.key)
882 {
883 /* Release set lock while we call the function. */
884 scm_i_pthread_mutex_unlock (&s->lock);
885 init = proc (closure, SCM_PACK (copy.key), init);
886 scm_i_pthread_mutex_lock (&s->lock);
887 }
888 }
889 }
890
891 scm_i_pthread_mutex_unlock (&s->lock);
892
893 return init;
894 }
895
896 static SCM
897 fold_trampoline (void *closure, SCM item, SCM init)
898 {
899 return scm_call_2 (SCM_PACK_POINTER (closure), item, init);
900 }
901
902 SCM
903 scm_weak_set_fold (SCM proc, SCM init, SCM set)
904 {
905 return scm_c_weak_set_fold (fold_trampoline, SCM_UNPACK_POINTER (proc), init, set);
906 }
907
908 static SCM
909 for_each_trampoline (void *closure, SCM item, SCM seed)
910 {
911 scm_call_1 (SCM_PACK_POINTER (closure), item);
912 return seed;
913 }
914
915 SCM
916 scm_weak_set_for_each (SCM proc, SCM set)
917 {
918 scm_c_weak_set_fold (for_each_trampoline, SCM_UNPACK_POINTER (proc), SCM_BOOL_F, set);
919
920 return SCM_UNSPECIFIED;
921 }
922
923 static SCM
924 map_trampoline (void *closure, SCM item, SCM seed)
925 {
926 return scm_cons (scm_call_1 (SCM_PACK_POINTER (closure), item), seed);
927 }
928
929 SCM
930 scm_weak_set_map_to_list (SCM proc, SCM set)
931 {
932 return scm_c_weak_set_fold (map_trampoline, SCM_UNPACK_POINTER (proc), SCM_EOL, set);
933 }
934
935
936 void
937 scm_init_weak_set ()
938 {
939 #include "libguile/weak-set.x"
940 }
941
942 /*
943 Local Variables:
944 c-file-style: "gnu"
945 End:
946 */