Merge remote-tracking branch 'origin/stable-2.0'
[bpt/guile.git] / libguile / weak-table.c
1 /* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include <assert.h>
26
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
29
30 #include "libguile/_scm.h"
31 #include "libguile/hash.h"
32 #include "libguile/eval.h"
33 #include "libguile/ports.h"
34
35 #include "libguile/validate.h"
36 #include "libguile/weak-table.h"
37
38
39 /* Weak Tables
40
41 This file implements weak hash tables. Weak hash tables are
42 generally used when you want to augment some object with additional
43 data, but when you don't have space to store the data in the object.
44 For example, procedure properties are implemented with weak tables.
45
46 Weak tables are implemented using an open-addressed hash table.
47 Basically this means that there is an array of entries, and the item
48 is expected to be found the slot corresponding to its hash code,
49 modulo the length of the array.
50
51 Collisions are handled using linear probing with the Robin Hood
52 technique. See Pedro Celis' paper, "Robin Hood Hashing":
53
54 http://www.cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
55
56 The vector of entries is allocated in such a way that the GC doesn't
57 trace the weak values. For doubly-weak tables, this means that the
58 entries are allocated as an "atomic" piece of memory. Key-weak and
59 value-weak tables use a special GC kind with a custom mark procedure.
60 When items are added weakly into table, a disappearing link is
61 registered to their locations. If the referent is collected, then
62 that link will be zeroed out.
63
64 An entry in the table consists of the key and the value, together
65 with the hash code of the key. We munge hash codes so that they are
66 never 0. In this way we can detect removed entries (key of zero but
67 nonzero hash code), and can then reshuffle elements as needed to
68 maintain the robin hood ordering.
69
70 Compared to buckets-and-chains hash tables, open addressing has the
71 advantage that it is very cache-friendly. It also uses less memory.
72
73 Implementation-wise, there are two things to note.
74
75 1. We assume that hash codes are evenly distributed across the
76 range of unsigned longs. The actual hash code stored in the
77 entry is left-shifted by 1 bit (losing 1 bit of hash precision),
78 and then or'd with 1. In this way we ensure that the hash field
79 of an occupied entry is nonzero. To map to an index, we
80 right-shift the hash by one, divide by the size, and take the
81 remainder.
82
83 2. Since the weak references are stored in an atomic region with
84 disappearing links, they need to be accessed with the GC alloc
85 lock. `copy_weak_entry' will do that for you. The hash code
86 itself can be read outside the lock, though.
87 */
88
89
90 typedef struct {
91 unsigned long hash;
92 scm_t_bits key;
93 scm_t_bits value;
94 } scm_t_weak_entry;
95
96
97 struct weak_entry_data {
98 scm_t_weak_entry *in;
99 scm_t_weak_entry *out;
100 };
101
102 static void*
103 do_copy_weak_entry (void *data)
104 {
105 struct weak_entry_data *e = data;
106
107 e->out->hash = e->in->hash;
108 e->out->key = e->in->key;
109 e->out->value = e->in->value;
110
111 return NULL;
112 }
113
114 static void
115 copy_weak_entry (scm_t_weak_entry *src, scm_t_weak_entry *dst)
116 {
117 struct weak_entry_data data;
118
119 data.in = src;
120 data.out = dst;
121
122 GC_call_with_alloc_lock (do_copy_weak_entry, &data);
123 }
124
125 static void
126 register_disappearing_links (scm_t_weak_entry *entry,
127 SCM k, SCM v,
128 scm_t_weak_table_kind kind)
129 {
130 if (SCM_UNPACK (k) && SCM_HEAP_OBJECT_P (k)
131 && (kind == SCM_WEAK_TABLE_KIND_KEY
132 || kind == SCM_WEAK_TABLE_KIND_BOTH))
133 SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &entry->key,
134 (GC_PTR) SCM2PTR (k));
135
136 if (SCM_UNPACK (v) && SCM_HEAP_OBJECT_P (v)
137 && (kind == SCM_WEAK_TABLE_KIND_VALUE
138 || kind == SCM_WEAK_TABLE_KIND_BOTH))
139 SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &entry->value,
140 (GC_PTR) SCM2PTR (v));
141 }
142
143 static void
144 unregister_disappearing_links (scm_t_weak_entry *entry,
145 scm_t_weak_table_kind kind)
146 {
147 if (kind == SCM_WEAK_TABLE_KIND_KEY || kind == SCM_WEAK_TABLE_KIND_BOTH)
148 GC_unregister_disappearing_link ((GC_PTR) &entry->key);
149
150 if (kind == SCM_WEAK_TABLE_KIND_VALUE || kind == SCM_WEAK_TABLE_KIND_BOTH)
151 GC_unregister_disappearing_link ((GC_PTR) &entry->value);
152 }
153
154 static void
155 move_disappearing_links (scm_t_weak_entry *from, scm_t_weak_entry *to,
156 SCM key, SCM value, scm_t_weak_table_kind kind)
157 {
158 if ((kind == SCM_WEAK_TABLE_KIND_KEY || kind == SCM_WEAK_TABLE_KIND_BOTH)
159 && SCM_HEAP_OBJECT_P (key))
160 {
161 #ifdef HAVE_GC_MOVE_DISAPPEARING_LINK
162 GC_move_disappearing_link ((GC_PTR) &from->key, (GC_PTR) &to->key);
163 #else
164 GC_unregister_disappearing_link (&from->key);
165 SCM_I_REGISTER_DISAPPEARING_LINK (&to->key, SCM2PTR (key));
166 #endif
167 }
168
169 if ((kind == SCM_WEAK_TABLE_KIND_VALUE || kind == SCM_WEAK_TABLE_KIND_BOTH)
170 && SCM_HEAP_OBJECT_P (value))
171 {
172 #ifdef HAVE_GC_MOVE_DISAPPEARING_LINK
173 GC_move_disappearing_link ((GC_PTR) &from->value, (GC_PTR) &to->value);
174 #else
175 GC_unregister_disappearing_link (&from->value);
176 SCM_I_REGISTER_DISAPPEARING_LINK (&to->value, SCM2PTR (value));
177 #endif
178 }
179 }
180
181 static void
182 move_weak_entry (scm_t_weak_entry *from, scm_t_weak_entry *to,
183 scm_t_weak_table_kind kind)
184 {
185 if (from->hash)
186 {
187 scm_t_weak_entry copy;
188
189 copy_weak_entry (from, &copy);
190 to->hash = copy.hash;
191 to->key = copy.key;
192 to->value = copy.value;
193
194 move_disappearing_links (from, to,
195 SCM_PACK (copy.key), SCM_PACK (copy.value),
196 kind);
197 }
198 else
199 {
200 to->hash = 0;
201 to->key = 0;
202 to->value = 0;
203 }
204 }
205
206
207 typedef struct {
208 scm_t_weak_entry *entries; /* the data */
209 scm_i_pthread_mutex_t lock; /* the lock */
210 scm_t_weak_table_kind kind; /* what kind of table it is */
211 unsigned long size; /* total number of slots. */
212 unsigned long n_items; /* number of items in table */
213 unsigned long lower; /* when to shrink */
214 unsigned long upper; /* when to grow */
215 int size_index; /* index into hashtable_size */
216 int min_size_index; /* minimum size_index */
217 } scm_t_weak_table;
218
219
220 #define SCM_WEAK_TABLE_P(x) (SCM_HAS_TYP7 (x, scm_tc7_weak_table))
221 #define SCM_VALIDATE_WEAK_TABLE(pos, arg) \
222 SCM_MAKE_VALIDATE_MSG (pos, arg, WEAK_TABLE_P, "weak-table")
223 #define SCM_WEAK_TABLE(x) ((scm_t_weak_table *) SCM_CELL_WORD_1 (x))
224
225
226 static unsigned long
227 hash_to_index (unsigned long hash, unsigned long size)
228 {
229 return (hash >> 1) % size;
230 }
231
232 static unsigned long
233 entry_distance (unsigned long hash, unsigned long k, unsigned long size)
234 {
235 unsigned long origin = hash_to_index (hash, size);
236
237 if (k >= origin)
238 return k - origin;
239 else
240 /* The other key was displaced and wrapped around. */
241 return size - origin + k;
242 }
243
244 static void
245 rob_from_rich (scm_t_weak_table *table, unsigned long k)
246 {
247 unsigned long empty, size;
248
249 size = table->size;
250
251 /* If we are to free up slot K in the table, we need room to do so. */
252 assert (table->n_items < size);
253
254 empty = k;
255 do
256 empty = (empty + 1) % size;
257 while (table->entries[empty].hash);
258
259 do
260 {
261 unsigned long last = empty ? (empty - 1) : (size - 1);
262 move_weak_entry (&table->entries[last], &table->entries[empty],
263 table->kind);
264 empty = last;
265 }
266 while (empty != k);
267
268 table->entries[empty].hash = 0;
269 table->entries[empty].key = 0;
270 table->entries[empty].value = 0;
271 }
272
273 static void
274 give_to_poor (scm_t_weak_table *table, unsigned long k)
275 {
276 /* Slot K was just freed up; possibly shuffle others down. */
277 unsigned long size = table->size;
278
279 while (1)
280 {
281 unsigned long next = (k + 1) % size;
282 unsigned long hash;
283 scm_t_weak_entry copy;
284
285 hash = table->entries[next].hash;
286
287 if (!hash || hash_to_index (hash, size) == next)
288 break;
289
290 copy_weak_entry (&table->entries[next], &copy);
291
292 if (!copy.key || !copy.value)
293 /* Lost weak reference. */
294 {
295 give_to_poor (table, next);
296 table->n_items--;
297 continue;
298 }
299
300 move_weak_entry (&table->entries[next], &table->entries[k],
301 table->kind);
302
303 k = next;
304 }
305
306 /* We have shuffled down any entries that should be shuffled down; now
307 free the end. */
308 table->entries[k].hash = 0;
309 table->entries[k].key = 0;
310 table->entries[k].value = 0;
311 }
312
313
314 \f
315
316 /* The GC "kinds" for singly-weak tables. */
317 static int weak_key_gc_kind;
318 static int weak_value_gc_kind;
319
320 static struct GC_ms_entry *
321 mark_weak_key_table (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
322 struct GC_ms_entry *mark_stack_limit, GC_word env)
323 {
324 scm_t_weak_entry *entries = (scm_t_weak_entry*) addr;
325 unsigned long k, size = GC_size (addr) / sizeof (scm_t_weak_entry);
326
327 for (k = 0; k < size; k++)
328 if (entries[k].hash && entries[k].key)
329 {
330 SCM value = SCM_PACK (entries[k].value);
331 mark_stack_ptr = GC_MARK_AND_PUSH ((GC_word*) SCM2PTR (value),
332 mark_stack_ptr, mark_stack_limit,
333 NULL);
334 }
335
336 return mark_stack_ptr;
337 }
338
339 static struct GC_ms_entry *
340 mark_weak_value_table (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
341 struct GC_ms_entry *mark_stack_limit, GC_word env)
342 {
343 scm_t_weak_entry *entries = (scm_t_weak_entry*) addr;
344 unsigned long k, size = GC_size (addr) / sizeof (scm_t_weak_entry);
345
346 for (k = 0; k < size; k++)
347 if (entries[k].hash && entries[k].value)
348 {
349 SCM key = SCM_PACK (entries[k].key);
350 mark_stack_ptr = GC_MARK_AND_PUSH ((GC_word*) SCM2PTR (key),
351 mark_stack_ptr, mark_stack_limit,
352 NULL);
353 }
354
355 return mark_stack_ptr;
356 }
357
358 static scm_t_weak_entry *
359 allocate_entries (unsigned long size, scm_t_weak_table_kind kind)
360 {
361 scm_t_weak_entry *ret;
362 size_t bytes = size * sizeof (*ret);
363
364 switch (kind)
365 {
366 case SCM_WEAK_TABLE_KIND_KEY:
367 ret = GC_generic_malloc (bytes, weak_key_gc_kind);
368 break;
369 case SCM_WEAK_TABLE_KIND_VALUE:
370 ret = GC_generic_malloc (bytes, weak_value_gc_kind);
371 break;
372 case SCM_WEAK_TABLE_KIND_BOTH:
373 ret = scm_gc_malloc_pointerless (bytes, "weak-table");
374 break;
375 default:
376 abort ();
377 }
378
379 memset (ret, 0, bytes);
380
381 return ret;
382 }
383
384 \f
385
386 /* Growing or shrinking is triggered when the load factor
387 *
388 * L = N / S (N: number of items in table, S: bucket vector length)
389 *
390 * passes an upper limit of 0.9 or a lower limit of 0.2.
391 *
392 * The implementation stores the upper and lower number of items which
393 * trigger a resize in the hashtable object.
394 *
395 * Possible hash table sizes (primes) are stored in the array
396 * hashtable_size.
397 */
398
399 static unsigned long hashtable_size[] = {
400 31, 61, 113, 223, 443, 883, 1759, 3517, 7027, 14051, 28099, 56197, 112363,
401 224717, 449419, 898823, 1797641, 3595271, 7190537, 14381041, 28762081,
402 57524111, 115048217, 230096423
403 };
404
405 #define HASHTABLE_SIZE_N (sizeof(hashtable_size)/sizeof(unsigned long))
406
407 static int
408 compute_size_index (scm_t_weak_table *table)
409 {
410 int i = table->size_index;
411
412 if (table->n_items < table->lower)
413 {
414 /* rehashing is not triggered when i <= min_size */
415 do
416 --i;
417 while (i > table->min_size_index
418 && table->n_items < hashtable_size[i] / 5);
419 }
420 else if (table->n_items > table->upper)
421 {
422 ++i;
423 if (i >= HASHTABLE_SIZE_N)
424 /* The biggest size currently is 230096423, which for a 32-bit
425 machine will occupy 2.3GB of memory at a load of 80%. There
426 is probably something better to do here, but if you have a
427 weak map of that size, you are hosed in any case. */
428 abort ();
429 }
430
431 return i;
432 }
433
434 static int
435 is_acceptable_size_index (scm_t_weak_table *table, int size_index)
436 {
437 int computed = compute_size_index (table);
438
439 if (size_index == computed)
440 /* We were going to grow or shrink, and allocating the new vector
441 didn't change the target size. */
442 return 1;
443
444 if (size_index == computed + 1)
445 {
446 /* We were going to enlarge the table, but allocating the new
447 vector finalized some objects, making an enlargement
448 unnecessary. It might still be a good idea to use the larger
449 table, though. (This branch also gets hit if, while allocating
450 the vector, some other thread was actively removing items from
451 the table. That is less likely, though.) */
452 unsigned long new_lower = hashtable_size[size_index] / 5;
453
454 return table->size > new_lower;
455 }
456
457 if (size_index == computed - 1)
458 {
459 /* We were going to shrink the table, but when we dropped the lock
460 to allocate the new vector, some other thread added elements to
461 the table. */
462 return 0;
463 }
464
465 /* The computed size differs from our newly allocated size by more
466 than one size index -- recalculate. */
467 return 0;
468 }
469
470 static void
471 resize_table (scm_t_weak_table *table)
472 {
473 scm_t_weak_entry *old_entries, *new_entries;
474 int new_size_index;
475 unsigned long old_size, new_size, old_k;
476
477 do
478 {
479 new_size_index = compute_size_index (table);
480 if (new_size_index == table->size_index)
481 return;
482 new_size = hashtable_size[new_size_index];
483 scm_i_pthread_mutex_unlock (&table->lock);
484 /* Allocating memory might cause finalizers to run, which could
485 run anything, so drop our lock to avoid deadlocks. */
486 new_entries = allocate_entries (new_size, table->kind);
487 scm_i_pthread_mutex_lock (&table->lock);
488 }
489 while (!is_acceptable_size_index (table, new_size_index));
490
491 old_entries = table->entries;
492 old_size = table->size;
493
494 table->size_index = new_size_index;
495 table->size = new_size;
496 if (new_size_index <= table->min_size_index)
497 table->lower = 0;
498 else
499 table->lower = new_size / 5;
500 table->upper = 9 * new_size / 10;
501 table->n_items = 0;
502 table->entries = new_entries;
503
504 for (old_k = 0; old_k < old_size; old_k++)
505 {
506 scm_t_weak_entry copy;
507 unsigned long new_k, distance;
508
509 if (!old_entries[old_k].hash)
510 continue;
511
512 copy_weak_entry (&old_entries[old_k], &copy);
513
514 if (!copy.key || !copy.value)
515 continue;
516
517 new_k = hash_to_index (copy.hash, new_size);
518
519 for (distance = 0; ; distance++, new_k = (new_k + 1) % new_size)
520 {
521 unsigned long other_hash = new_entries[new_k].hash;
522
523 if (!other_hash)
524 /* Found an empty entry. */
525 break;
526
527 /* Displace the entry if our distance is less, otherwise keep
528 looking. */
529 if (entry_distance (other_hash, new_k, new_size) < distance)
530 {
531 rob_from_rich (table, new_k);
532 break;
533 }
534 }
535
536 table->n_items++;
537 new_entries[new_k].hash = copy.hash;
538 new_entries[new_k].key = copy.key;
539 new_entries[new_k].value = copy.value;
540
541 register_disappearing_links (&new_entries[new_k],
542 SCM_PACK (copy.key), SCM_PACK (copy.value),
543 table->kind);
544 }
545 }
546
547 /* Run after GC via do_vacuum_weak_table, this function runs over the
548 whole table, removing lost weak references, reshuffling the table as it
549 goes. It might resize the table if it reaps enough entries. */
550 static void
551 vacuum_weak_table (scm_t_weak_table *table)
552 {
553 scm_t_weak_entry *entries = table->entries;
554 unsigned long size = table->size;
555 unsigned long k;
556
557 for (k = 0; k < size; k++)
558 {
559 unsigned long hash = entries[k].hash;
560
561 if (hash)
562 {
563 scm_t_weak_entry copy;
564
565 copy_weak_entry (&entries[k], &copy);
566
567 if (!copy.key || !copy.value)
568 /* Lost weak reference; reshuffle. */
569 {
570 give_to_poor (table, k);
571 table->n_items--;
572 }
573 }
574 }
575
576 if (table->n_items < table->lower)
577 resize_table (table);
578 }
579
580
581 \f
582
583 static SCM
584 weak_table_ref (scm_t_weak_table *table, unsigned long hash,
585 scm_t_table_predicate_fn pred, void *closure,
586 SCM dflt)
587 {
588 unsigned long k, distance, size;
589 scm_t_weak_entry *entries;
590
591 size = table->size;
592 entries = table->entries;
593
594 hash = (hash << 1) | 0x1;
595 k = hash_to_index (hash, size);
596
597 for (distance = 0; distance < size; distance++, k = (k + 1) % size)
598 {
599 unsigned long other_hash;
600
601 retry:
602 other_hash = entries[k].hash;
603
604 if (!other_hash)
605 /* Not found. */
606 return dflt;
607
608 if (hash == other_hash)
609 {
610 scm_t_weak_entry copy;
611
612 copy_weak_entry (&entries[k], &copy);
613
614 if (!copy.key || !copy.value)
615 /* Lost weak reference; reshuffle. */
616 {
617 give_to_poor (table, k);
618 table->n_items--;
619 goto retry;
620 }
621
622 if (pred (SCM_PACK (copy.key), SCM_PACK (copy.value), closure))
623 /* Found. */
624 return SCM_PACK (copy.value);
625 }
626
627 /* If the entry's distance is less, our key is not in the table. */
628 if (entry_distance (other_hash, k, size) < distance)
629 return dflt;
630 }
631
632 /* If we got here, then we were unfortunate enough to loop through the
633 whole table. Shouldn't happen, but hey. */
634 return dflt;
635 }
636
637
638 static void
639 weak_table_put_x (scm_t_weak_table *table, unsigned long hash,
640 scm_t_table_predicate_fn pred, void *closure,
641 SCM key, SCM value)
642 {
643 unsigned long k, distance, size;
644 scm_t_weak_entry *entries;
645
646 size = table->size;
647 entries = table->entries;
648
649 hash = (hash << 1) | 0x1;
650 k = hash_to_index (hash, size);
651
652 for (distance = 0; ; distance++, k = (k + 1) % size)
653 {
654 unsigned long other_hash;
655
656 retry:
657 other_hash = entries[k].hash;
658
659 if (!other_hash)
660 /* Found an empty entry. */
661 break;
662
663 if (other_hash == hash)
664 {
665 scm_t_weak_entry copy;
666
667 copy_weak_entry (&entries[k], &copy);
668
669 if (!copy.key || !copy.value)
670 /* Lost weak reference; reshuffle. */
671 {
672 give_to_poor (table, k);
673 table->n_items--;
674 goto retry;
675 }
676
677 if (pred (SCM_PACK (copy.key), SCM_PACK (copy.value), closure))
678 /* Found an entry with this key. */
679 break;
680 }
681
682 if (table->n_items > table->upper)
683 /* Full table, time to resize. */
684 {
685 resize_table (table);
686 return weak_table_put_x (table, hash >> 1, pred, closure, key, value);
687 }
688
689 /* Displace the entry if our distance is less, otherwise keep
690 looking. */
691 if (entry_distance (other_hash, k, size) < distance)
692 {
693 rob_from_rich (table, k);
694 break;
695 }
696 }
697
698 if (entries[k].hash)
699 unregister_disappearing_links (&entries[k], table->kind);
700 else
701 table->n_items++;
702
703 entries[k].hash = hash;
704 entries[k].key = SCM_UNPACK (key);
705 entries[k].value = SCM_UNPACK (value);
706
707 register_disappearing_links (&entries[k], key, value, table->kind);
708 }
709
710
711 static void
712 weak_table_remove_x (scm_t_weak_table *table, unsigned long hash,
713 scm_t_table_predicate_fn pred, void *closure)
714 {
715 unsigned long k, distance, size;
716 scm_t_weak_entry *entries;
717
718 size = table->size;
719 entries = table->entries;
720
721 hash = (hash << 1) | 0x1;
722 k = hash_to_index (hash, size);
723
724 for (distance = 0; distance < size; distance++, k = (k + 1) % size)
725 {
726 unsigned long other_hash;
727
728 retry:
729 other_hash = entries[k].hash;
730
731 if (!other_hash)
732 /* Not found. */
733 return;
734
735 if (other_hash == hash)
736 {
737 scm_t_weak_entry copy;
738
739 copy_weak_entry (&entries[k], &copy);
740
741 if (!copy.key || !copy.value)
742 /* Lost weak reference; reshuffle. */
743 {
744 give_to_poor (table, k);
745 table->n_items--;
746 goto retry;
747 }
748
749 if (pred (SCM_PACK (copy.key), SCM_PACK (copy.value), closure))
750 /* Found an entry with this key. */
751 {
752 entries[k].hash = 0;
753 entries[k].key = 0;
754 entries[k].value = 0;
755
756 unregister_disappearing_links (&entries[k], table->kind);
757
758 if (--table->n_items < table->lower)
759 resize_table (table);
760 else
761 give_to_poor (table, k);
762
763 return;
764 }
765 }
766
767 /* If the entry's distance is less, our key is not in the table. */
768 if (entry_distance (other_hash, k, size) < distance)
769 return;
770 }
771 }
772
773
774 \f
775 static SCM
776 make_weak_table (unsigned long k, scm_t_weak_table_kind kind)
777 {
778 scm_t_weak_table *table;
779
780 int i = 0, n = k ? k : 31;
781 while (i + 1 < HASHTABLE_SIZE_N && n > hashtable_size[i])
782 ++i;
783 n = hashtable_size[i];
784
785 table = scm_gc_malloc (sizeof (*table), "weak-table");
786 table->entries = allocate_entries (n, kind);
787 table->kind = kind;
788 table->n_items = 0;
789 table->size = n;
790 table->lower = 0;
791 table->upper = 9 * n / 10;
792 table->size_index = i;
793 table->min_size_index = i;
794 scm_i_pthread_mutex_init (&table->lock, NULL);
795
796 return scm_cell (scm_tc7_weak_table, (scm_t_bits)table);
797 }
798
799 void
800 scm_i_weak_table_print (SCM exp, SCM port, scm_print_state *pstate)
801 {
802 scm_puts_unlocked ("#<", port);
803 scm_puts_unlocked ("weak-table ", port);
804 scm_uintprint (SCM_WEAK_TABLE (exp)->n_items, 10, port);
805 scm_putc_unlocked ('/', port);
806 scm_uintprint (SCM_WEAK_TABLE (exp)->size, 10, port);
807 scm_puts_unlocked (">", port);
808 }
809
810 static void
811 do_vacuum_weak_table (SCM table)
812 {
813 scm_t_weak_table *t;
814
815 t = SCM_WEAK_TABLE (table);
816
817 if (scm_i_pthread_mutex_trylock (&t->lock) == 0)
818 {
819 vacuum_weak_table (t);
820 scm_i_pthread_mutex_unlock (&t->lock);
821 }
822
823 return;
824 }
825
826 /* The before-gc C hook only runs if GC_table_start_callback is available,
827 so if not, fall back on a finalizer-based implementation. */
828 static int
829 weak_gc_callback (void **weak)
830 {
831 void *val = weak[0];
832 void (*callback) (SCM) = weak[1];
833
834 if (!val)
835 return 0;
836
837 callback (SCM_PACK_POINTER (val));
838
839 return 1;
840 }
841
842 #ifdef HAVE_GC_TABLE_START_CALLBACK
843 static void*
844 weak_gc_hook (void *hook_data, void *fn_data, void *data)
845 {
846 if (!weak_gc_callback (fn_data))
847 scm_c_hook_remove (&scm_before_gc_c_hook, weak_gc_hook, fn_data);
848
849 return NULL;
850 }
851 #else
852 static void
853 weak_gc_finalizer (void *ptr, void *data)
854 {
855 if (weak_gc_callback (ptr))
856 scm_i_set_finalizer (ptr, weak_gc_finalizer, data);
857 }
858 #endif
859
860 static void
861 scm_c_register_weak_gc_callback (SCM obj, void (*callback) (SCM))
862 {
863 void **weak = GC_MALLOC_ATOMIC (sizeof (void*) * 2);
864
865 weak[0] = SCM_UNPACK_POINTER (obj);
866 weak[1] = (void*)callback;
867 GC_GENERAL_REGISTER_DISAPPEARING_LINK (weak, SCM2PTR (obj));
868
869 #ifdef HAVE_GC_TABLE_START_CALLBACK
870 scm_c_hook_add (&scm_after_gc_c_hook, weak_gc_hook, weak, 0);
871 #else
872 scm_i_set_finalizer (weak, weak_gc_finalizer, NULL);
873 #endif
874 }
875
876 SCM
877 scm_c_make_weak_table (unsigned long k, scm_t_weak_table_kind kind)
878 {
879 SCM ret;
880
881 ret = make_weak_table (k, kind);
882
883 scm_c_register_weak_gc_callback (ret, do_vacuum_weak_table);
884
885 return ret;
886 }
887
888 SCM
889 scm_weak_table_p (SCM obj)
890 {
891 return scm_from_bool (SCM_WEAK_TABLE_P (obj));
892 }
893
894 SCM
895 scm_c_weak_table_ref (SCM table, unsigned long raw_hash,
896 scm_t_table_predicate_fn pred,
897 void *closure, SCM dflt)
898 #define FUNC_NAME "weak-table-ref"
899 {
900 SCM ret;
901 scm_t_weak_table *t;
902
903 SCM_VALIDATE_WEAK_TABLE (1, table);
904
905 t = SCM_WEAK_TABLE (table);
906
907 scm_i_pthread_mutex_lock (&t->lock);
908
909 ret = weak_table_ref (t, raw_hash, pred, closure, dflt);
910
911 scm_i_pthread_mutex_unlock (&t->lock);
912
913 return ret;
914 }
915 #undef FUNC_NAME
916
917 void
918 scm_c_weak_table_put_x (SCM table, unsigned long raw_hash,
919 scm_t_table_predicate_fn pred,
920 void *closure, SCM key, SCM value)
921 #define FUNC_NAME "weak-table-put!"
922 {
923 scm_t_weak_table *t;
924
925 SCM_VALIDATE_WEAK_TABLE (1, table);
926
927 t = SCM_WEAK_TABLE (table);
928
929 scm_i_pthread_mutex_lock (&t->lock);
930
931 weak_table_put_x (t, raw_hash, pred, closure, key, value);
932
933 scm_i_pthread_mutex_unlock (&t->lock);
934 }
935 #undef FUNC_NAME
936
937 void
938 scm_c_weak_table_remove_x (SCM table, unsigned long raw_hash,
939 scm_t_table_predicate_fn pred,
940 void *closure)
941 #define FUNC_NAME "weak-table-remove!"
942 {
943 scm_t_weak_table *t;
944
945 SCM_VALIDATE_WEAK_TABLE (1, table);
946
947 t = SCM_WEAK_TABLE (table);
948
949 scm_i_pthread_mutex_lock (&t->lock);
950
951 weak_table_remove_x (t, raw_hash, pred, closure);
952
953 scm_i_pthread_mutex_unlock (&t->lock);
954 }
955 #undef FUNC_NAME
956
957 static int
958 assq_predicate (SCM x, SCM y, void *closure)
959 {
960 return scm_is_eq (x, SCM_PACK_POINTER (closure));
961 }
962
963 SCM
964 scm_weak_table_refq (SCM table, SCM key, SCM dflt)
965 {
966 if (SCM_UNBNDP (dflt))
967 dflt = SCM_BOOL_F;
968
969 return scm_c_weak_table_ref (table, scm_ihashq (key, -1),
970 assq_predicate, SCM_UNPACK_POINTER (key),
971 dflt);
972 }
973
974 void
975 scm_weak_table_putq_x (SCM table, SCM key, SCM value)
976 {
977 scm_c_weak_table_put_x (table, scm_ihashq (key, -1),
978 assq_predicate, SCM_UNPACK_POINTER (key),
979 key, value);
980 }
981
982 void
983 scm_weak_table_remq_x (SCM table, SCM key)
984 {
985 scm_c_weak_table_remove_x (table, scm_ihashq (key, -1),
986 assq_predicate, SCM_UNPACK_POINTER (key));
987 }
988
989 void
990 scm_weak_table_clear_x (SCM table)
991 #define FUNC_NAME "weak-table-clear!"
992 {
993 scm_t_weak_table *t;
994
995 SCM_VALIDATE_WEAK_TABLE (1, table);
996
997 t = SCM_WEAK_TABLE (table);
998
999 scm_i_pthread_mutex_lock (&t->lock);
1000
1001 memset (t->entries, 0, sizeof (scm_t_weak_entry) * t->size);
1002 t->n_items = 0;
1003
1004 scm_i_pthread_mutex_unlock (&t->lock);
1005 }
1006 #undef FUNC_NAME
1007
1008 SCM
1009 scm_c_weak_table_fold (scm_t_table_fold_fn proc, void *closure,
1010 SCM init, SCM table)
1011 {
1012 scm_t_weak_table *t;
1013 scm_t_weak_entry *entries;
1014 unsigned long k, size;
1015
1016 t = SCM_WEAK_TABLE (table);
1017
1018 scm_i_pthread_mutex_lock (&t->lock);
1019
1020 size = t->size;
1021 entries = t->entries;
1022
1023 for (k = 0; k < size; k++)
1024 {
1025 if (entries[k].hash)
1026 {
1027 scm_t_weak_entry copy;
1028
1029 copy_weak_entry (&entries[k], &copy);
1030
1031 if (copy.key && copy.value)
1032 {
1033 /* Release table lock while we call the function. */
1034 scm_i_pthread_mutex_unlock (&t->lock);
1035 init = proc (closure,
1036 SCM_PACK (copy.key), SCM_PACK (copy.value),
1037 init);
1038 scm_i_pthread_mutex_lock (&t->lock);
1039 }
1040 }
1041 }
1042
1043 scm_i_pthread_mutex_unlock (&t->lock);
1044
1045 return init;
1046 }
1047
1048 static SCM
1049 fold_trampoline (void *closure, SCM k, SCM v, SCM init)
1050 {
1051 return scm_call_3 (SCM_PACK_POINTER (closure), k, v, init);
1052 }
1053
1054 SCM
1055 scm_weak_table_fold (SCM proc, SCM init, SCM table)
1056 #define FUNC_NAME "weak-table-fold"
1057 {
1058 SCM_VALIDATE_WEAK_TABLE (3, table);
1059 SCM_VALIDATE_PROC (1, proc);
1060
1061 return scm_c_weak_table_fold (fold_trampoline, SCM_UNPACK_POINTER (proc), init, table);
1062 }
1063 #undef FUNC_NAME
1064
1065 static SCM
1066 for_each_trampoline (void *closure, SCM k, SCM v, SCM seed)
1067 {
1068 scm_call_2 (SCM_PACK_POINTER (closure), k, v);
1069 return seed;
1070 }
1071
1072 void
1073 scm_weak_table_for_each (SCM proc, SCM table)
1074 #define FUNC_NAME "weak-table-for-each"
1075 {
1076 SCM_VALIDATE_WEAK_TABLE (2, table);
1077 SCM_VALIDATE_PROC (1, proc);
1078
1079 scm_c_weak_table_fold (for_each_trampoline, SCM_UNPACK_POINTER (proc), SCM_BOOL_F, table);
1080 }
1081 #undef FUNC_NAME
1082
1083 static SCM
1084 map_trampoline (void *closure, SCM k, SCM v, SCM seed)
1085 {
1086 return scm_cons (scm_call_2 (SCM_PACK_POINTER (closure), k, v), seed);
1087 }
1088
1089 SCM
1090 scm_weak_table_map_to_list (SCM proc, SCM table)
1091 #define FUNC_NAME "weak-table-map->list"
1092 {
1093 SCM_VALIDATE_WEAK_TABLE (2, table);
1094 SCM_VALIDATE_PROC (1, proc);
1095
1096 return scm_c_weak_table_fold (map_trampoline, SCM_UNPACK_POINTER (proc), SCM_EOL, table);
1097 }
1098 #undef FUNC_NAME
1099
1100
1101 \f
1102
1103 /* Legacy interface. */
1104
1105 SCM_DEFINE (scm_make_weak_key_hash_table, "make-weak-key-hash-table", 0, 1, 0,
1106 (SCM n),
1107 "@deffnx {Scheme Procedure} make-weak-value-hash-table size\n"
1108 "@deffnx {Scheme Procedure} make-doubly-weak-hash-table size\n"
1109 "Return a weak hash table with @var{size} buckets.\n"
1110 "\n"
1111 "You can modify weak hash tables in exactly the same way you\n"
1112 "would modify regular hash tables. (@pxref{Hash Tables})")
1113 #define FUNC_NAME s_scm_make_weak_key_hash_table
1114 {
1115 return scm_c_make_weak_table (SCM_UNBNDP (n) ? 0 : scm_to_ulong (n),
1116 SCM_WEAK_TABLE_KIND_KEY);
1117 }
1118 #undef FUNC_NAME
1119
1120
1121 SCM_DEFINE (scm_make_weak_value_hash_table, "make-weak-value-hash-table", 0, 1, 0,
1122 (SCM n),
1123 "Return a hash table with weak values with @var{size} buckets.\n"
1124 "(@pxref{Hash Tables})")
1125 #define FUNC_NAME s_scm_make_weak_value_hash_table
1126 {
1127 return scm_c_make_weak_table (SCM_UNBNDP (n) ? 0 : scm_to_ulong (n),
1128 SCM_WEAK_TABLE_KIND_VALUE);
1129 }
1130 #undef FUNC_NAME
1131
1132
1133 SCM_DEFINE (scm_make_doubly_weak_hash_table, "make-doubly-weak-hash-table", 1, 0, 0,
1134 (SCM n),
1135 "Return a hash table with weak keys and values with @var{size}\n"
1136 "buckets. (@pxref{Hash Tables})")
1137 #define FUNC_NAME s_scm_make_doubly_weak_hash_table
1138 {
1139 return scm_c_make_weak_table (SCM_UNBNDP (n) ? 0 : scm_to_ulong (n),
1140 SCM_WEAK_TABLE_KIND_BOTH);
1141 }
1142 #undef FUNC_NAME
1143
1144
1145 SCM_DEFINE (scm_weak_key_hash_table_p, "weak-key-hash-table?", 1, 0, 0,
1146 (SCM obj),
1147 "@deffnx {Scheme Procedure} weak-value-hash-table? obj\n"
1148 "@deffnx {Scheme Procedure} doubly-weak-hash-table? obj\n"
1149 "Return @code{#t} if @var{obj} is the specified weak hash\n"
1150 "table. Note that a doubly weak hash table is neither a weak key\n"
1151 "nor a weak value hash table.")
1152 #define FUNC_NAME s_scm_weak_key_hash_table_p
1153 {
1154 return scm_from_bool (SCM_WEAK_TABLE_P (obj) &&
1155 SCM_WEAK_TABLE (obj)->kind == SCM_WEAK_TABLE_KIND_KEY);
1156 }
1157 #undef FUNC_NAME
1158
1159
1160 SCM_DEFINE (scm_weak_value_hash_table_p, "weak-value-hash-table?", 1, 0, 0,
1161 (SCM obj),
1162 "Return @code{#t} if @var{obj} is a weak value hash table.")
1163 #define FUNC_NAME s_scm_weak_value_hash_table_p
1164 {
1165 return scm_from_bool (SCM_WEAK_TABLE_P (obj) &&
1166 SCM_WEAK_TABLE (obj)->kind == SCM_WEAK_TABLE_KIND_VALUE);
1167 }
1168 #undef FUNC_NAME
1169
1170
1171 SCM_DEFINE (scm_doubly_weak_hash_table_p, "doubly-weak-hash-table?", 1, 0, 0,
1172 (SCM obj),
1173 "Return @code{#t} if @var{obj} is a doubly weak hash table.")
1174 #define FUNC_NAME s_scm_doubly_weak_hash_table_p
1175 {
1176 return scm_from_bool (SCM_WEAK_TABLE_P (obj) &&
1177 SCM_WEAK_TABLE (obj)->kind == SCM_WEAK_TABLE_KIND_BOTH);
1178 }
1179 #undef FUNC_NAME
1180
1181
1182
1183 \f
1184
1185 void
1186 scm_weak_table_prehistory (void)
1187 {
1188 weak_key_gc_kind =
1189 GC_new_kind (GC_new_free_list (),
1190 GC_MAKE_PROC (GC_new_proc (mark_weak_key_table), 0),
1191 0, 0);
1192 weak_value_gc_kind =
1193 GC_new_kind (GC_new_free_list (),
1194 GC_MAKE_PROC (GC_new_proc (mark_weak_value_table), 0),
1195 0, 0);
1196 }
1197
1198 void
1199 scm_init_weak_table ()
1200 {
1201 #include "libguile/weak-table.x"
1202 }
1203
1204 /*
1205 Local Variables:
1206 c-file-style: "gnu"
1207 End:
1208 */