1 /* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
30 #include "libguile/_scm.h"
31 #include "libguile/hash.h"
32 #include "libguile/eval.h"
33 #include "libguile/ports.h"
35 #include "libguile/validate.h"
36 #include "libguile/weak-table.h"
41 This file implements weak hash tables. Weak hash tables are
42 generally used when you want to augment some object with additional
43 data, but when you don't have space to store the data in the object.
44 For example, procedure properties are implemented with weak tables.
46 Weak tables are implemented using an open-addressed hash table.
47 Basically this means that there is an array of entries, and the item
48 is expected to be found the slot corresponding to its hash code,
49 modulo the length of the array.
51 Collisions are handled using linear probing with the Robin Hood
52 technique. See Pedro Celis' paper, "Robin Hood Hashing":
54 http://www.cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
56 The vector of entries is allocated in such a way that the GC doesn't
57 trace the weak values. For doubly-weak tables, this means that the
58 entries are allocated as an "atomic" piece of memory. Key-weak and
59 value-weak tables use a special GC kind with a custom mark procedure.
60 When items are added weakly into table, a disappearing link is
61 registered to their locations. If the referent is collected, then
62 that link will be zeroed out.
64 An entry in the table consists of the key and the value, together
65 with the hash code of the key. We munge hash codes so that they are
66 never 0. In this way we can detect removed entries (key of zero but
67 nonzero hash code), and can then reshuffle elements as needed to
68 maintain the robin hood ordering.
70 Compared to buckets-and-chains hash tables, open addressing has the
71 advantage that it is very cache-friendly. It also uses less memory.
73 Implementation-wise, there are two things to note.
75 1. We assume that hash codes are evenly distributed across the
76 range of unsigned longs. The actual hash code stored in the
77 entry is left-shifted by 1 bit (losing 1 bit of hash precision),
78 and then or'd with 1. In this way we ensure that the hash field
79 of an occupied entry is nonzero. To map to an index, we
80 right-shift the hash by one, divide by the size, and take the
83 2. Since the weak references are stored in an atomic region with
84 disappearing links, they need to be accessed with the GC alloc
85 lock. `copy_weak_entry' will do that for you. The hash code
86 itself can be read outside the lock, though.
97 struct weak_entry_data
{
99 scm_t_weak_entry
*out
;
103 do_copy_weak_entry (void *data
)
105 struct weak_entry_data
*e
= data
;
107 e
->out
->hash
= e
->in
->hash
;
108 e
->out
->key
= e
->in
->key
;
109 e
->out
->value
= e
->in
->value
;
115 copy_weak_entry (scm_t_weak_entry
*src
, scm_t_weak_entry
*dst
)
117 struct weak_entry_data data
;
122 GC_call_with_alloc_lock (do_copy_weak_entry
, &data
);
126 register_disappearing_links (scm_t_weak_entry
*entry
,
128 scm_t_weak_table_kind kind
)
130 if (SCM_UNPACK (k
) && SCM_HEAP_OBJECT_P (k
)
131 && (kind
== SCM_WEAK_TABLE_KIND_KEY
132 || kind
== SCM_WEAK_TABLE_KIND_BOTH
))
133 SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &entry
->key
,
136 if (SCM_UNPACK (v
) && SCM_HEAP_OBJECT_P (v
)
137 && (kind
== SCM_WEAK_TABLE_KIND_VALUE
138 || kind
== SCM_WEAK_TABLE_KIND_BOTH
))
139 SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &entry
->value
,
144 unregister_disappearing_links (scm_t_weak_entry
*entry
,
145 scm_t_weak_table_kind kind
)
147 if (kind
== SCM_WEAK_TABLE_KIND_KEY
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
148 GC_unregister_disappearing_link ((void **) &entry
->key
);
150 if (kind
== SCM_WEAK_TABLE_KIND_VALUE
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
151 GC_unregister_disappearing_link ((void **) &entry
->value
);
155 move_disappearing_links (scm_t_weak_entry
*from
, scm_t_weak_entry
*to
,
156 SCM key
, SCM value
, scm_t_weak_table_kind kind
)
158 if ((kind
== SCM_WEAK_TABLE_KIND_KEY
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
159 && SCM_HEAP_OBJECT_P (key
))
161 #ifdef HAVE_GC_MOVE_DISAPPEARING_LINK
162 GC_move_disappearing_link ((void **) &from
->key
, (void **) &to
->key
);
164 GC_unregister_disappearing_link ((void **) &from
->key
);
165 SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &to
->key
, SCM2PTR (key
));
169 if ((kind
== SCM_WEAK_TABLE_KIND_VALUE
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
170 && SCM_HEAP_OBJECT_P (value
))
172 #ifdef HAVE_GC_MOVE_DISAPPEARING_LINK
173 GC_move_disappearing_link ((void **) &from
->value
, (void **) &to
->value
);
175 GC_unregister_disappearing_link ((void **) &from
->value
);
176 SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &to
->value
, SCM2PTR (value
));
182 move_weak_entry (scm_t_weak_entry
*from
, scm_t_weak_entry
*to
,
183 scm_t_weak_table_kind kind
)
187 scm_t_weak_entry copy
;
189 copy_weak_entry (from
, ©
);
190 to
->hash
= copy
.hash
;
192 to
->value
= copy
.value
;
194 move_disappearing_links (from
, to
,
195 SCM_PACK (copy
.key
), SCM_PACK (copy
.value
),
208 scm_t_weak_entry
*entries
; /* the data */
209 scm_i_pthread_mutex_t lock
; /* the lock */
210 scm_t_weak_table_kind kind
; /* what kind of table it is */
211 unsigned long size
; /* total number of slots. */
212 unsigned long n_items
; /* number of items in table */
213 unsigned long lower
; /* when to shrink */
214 unsigned long upper
; /* when to grow */
215 int size_index
; /* index into hashtable_size */
216 int min_size_index
; /* minimum size_index */
220 #define SCM_WEAK_TABLE_P(x) (SCM_HAS_TYP7 (x, scm_tc7_weak_table))
221 #define SCM_VALIDATE_WEAK_TABLE(pos, arg) \
222 SCM_MAKE_VALIDATE_MSG (pos, arg, WEAK_TABLE_P, "weak-table")
223 #define SCM_WEAK_TABLE(x) ((scm_t_weak_table *) SCM_CELL_WORD_1 (x))
227 hash_to_index (unsigned long hash
, unsigned long size
)
229 return (hash
>> 1) % size
;
233 entry_distance (unsigned long hash
, unsigned long k
, unsigned long size
)
235 unsigned long origin
= hash_to_index (hash
, size
);
240 /* The other key was displaced and wrapped around. */
241 return size
- origin
+ k
;
245 rob_from_rich (scm_t_weak_table
*table
, unsigned long k
)
247 unsigned long empty
, size
;
251 /* If we are to free up slot K in the table, we need room to do so. */
252 assert (table
->n_items
< size
);
256 empty
= (empty
+ 1) % size
;
257 while (table
->entries
[empty
].hash
);
261 unsigned long last
= empty
? (empty
- 1) : (size
- 1);
262 move_weak_entry (&table
->entries
[last
], &table
->entries
[empty
],
268 table
->entries
[empty
].hash
= 0;
269 table
->entries
[empty
].key
= 0;
270 table
->entries
[empty
].value
= 0;
274 give_to_poor (scm_t_weak_table
*table
, unsigned long k
)
276 /* Slot K was just freed up; possibly shuffle others down. */
277 unsigned long size
= table
->size
;
281 unsigned long next
= (k
+ 1) % size
;
283 scm_t_weak_entry copy
;
285 hash
= table
->entries
[next
].hash
;
287 if (!hash
|| hash_to_index (hash
, size
) == next
)
290 copy_weak_entry (&table
->entries
[next
], ©
);
292 if (!copy
.key
|| !copy
.value
)
293 /* Lost weak reference. */
295 give_to_poor (table
, next
);
300 move_weak_entry (&table
->entries
[next
], &table
->entries
[k
],
306 /* We have shuffled down any entries that should be shuffled down; now
308 table
->entries
[k
].hash
= 0;
309 table
->entries
[k
].key
= 0;
310 table
->entries
[k
].value
= 0;
316 /* The GC "kinds" for singly-weak tables. */
317 static int weak_key_gc_kind
;
318 static int weak_value_gc_kind
;
320 static struct GC_ms_entry
*
321 mark_weak_key_table (GC_word
*addr
, struct GC_ms_entry
*mark_stack_ptr
,
322 struct GC_ms_entry
*mark_stack_limit
, GC_word env
)
324 scm_t_weak_entry
*entries
= (scm_t_weak_entry
*) addr
;
325 unsigned long k
, size
= GC_size (addr
) / sizeof (scm_t_weak_entry
);
327 for (k
= 0; k
< size
; k
++)
328 if (entries
[k
].hash
&& entries
[k
].key
)
330 SCM value
= SCM_PACK (entries
[k
].value
);
331 mark_stack_ptr
= GC_MARK_AND_PUSH ((GC_word
*) SCM2PTR (value
),
332 mark_stack_ptr
, mark_stack_limit
,
336 return mark_stack_ptr
;
339 static struct GC_ms_entry
*
340 mark_weak_value_table (GC_word
*addr
, struct GC_ms_entry
*mark_stack_ptr
,
341 struct GC_ms_entry
*mark_stack_limit
, GC_word env
)
343 scm_t_weak_entry
*entries
= (scm_t_weak_entry
*) addr
;
344 unsigned long k
, size
= GC_size (addr
) / sizeof (scm_t_weak_entry
);
346 for (k
= 0; k
< size
; k
++)
347 if (entries
[k
].hash
&& entries
[k
].value
)
349 SCM key
= SCM_PACK (entries
[k
].key
);
350 mark_stack_ptr
= GC_MARK_AND_PUSH ((GC_word
*) SCM2PTR (key
),
351 mark_stack_ptr
, mark_stack_limit
,
355 return mark_stack_ptr
;
358 static scm_t_weak_entry
*
359 allocate_entries (unsigned long size
, scm_t_weak_table_kind kind
)
361 scm_t_weak_entry
*ret
;
362 size_t bytes
= size
* sizeof (*ret
);
366 case SCM_WEAK_TABLE_KIND_KEY
:
367 ret
= GC_generic_malloc (bytes
, weak_key_gc_kind
);
369 case SCM_WEAK_TABLE_KIND_VALUE
:
370 ret
= GC_generic_malloc (bytes
, weak_value_gc_kind
);
372 case SCM_WEAK_TABLE_KIND_BOTH
:
373 ret
= scm_gc_malloc_pointerless (bytes
, "weak-table");
379 memset (ret
, 0, bytes
);
386 /* Growing or shrinking is triggered when the load factor
388 * L = N / S (N: number of items in table, S: bucket vector length)
390 * passes an upper limit of 0.9 or a lower limit of 0.2.
392 * The implementation stores the upper and lower number of items which
393 * trigger a resize in the hashtable object.
395 * Possible hash table sizes (primes) are stored in the array
399 static unsigned long hashtable_size
[] = {
400 31, 61, 113, 223, 443, 883, 1759, 3517, 7027, 14051, 28099, 56197, 112363,
401 224717, 449419, 898823, 1797641, 3595271, 7190537, 14381041, 28762081,
402 57524111, 115048217, 230096423
405 #define HASHTABLE_SIZE_N (sizeof(hashtable_size)/sizeof(unsigned long))
408 compute_size_index (scm_t_weak_table
*table
)
410 int i
= table
->size_index
;
412 if (table
->n_items
< table
->lower
)
414 /* rehashing is not triggered when i <= min_size */
417 while (i
> table
->min_size_index
418 && table
->n_items
< hashtable_size
[i
] / 5);
420 else if (table
->n_items
> table
->upper
)
423 if (i
>= HASHTABLE_SIZE_N
)
424 /* The biggest size currently is 230096423, which for a 32-bit
425 machine will occupy 2.3GB of memory at a load of 80%. There
426 is probably something better to do here, but if you have a
427 weak map of that size, you are hosed in any case. */
435 is_acceptable_size_index (scm_t_weak_table
*table
, int size_index
)
437 int computed
= compute_size_index (table
);
439 if (size_index
== computed
)
440 /* We were going to grow or shrink, and allocating the new vector
441 didn't change the target size. */
444 if (size_index
== computed
+ 1)
446 /* We were going to enlarge the table, but allocating the new
447 vector finalized some objects, making an enlargement
448 unnecessary. It might still be a good idea to use the larger
449 table, though. (This branch also gets hit if, while allocating
450 the vector, some other thread was actively removing items from
451 the table. That is less likely, though.) */
452 unsigned long new_lower
= hashtable_size
[size_index
] / 5;
454 return table
->size
> new_lower
;
457 if (size_index
== computed
- 1)
459 /* We were going to shrink the table, but when we dropped the lock
460 to allocate the new vector, some other thread added elements to
465 /* The computed size differs from our newly allocated size by more
466 than one size index -- recalculate. */
471 resize_table (scm_t_weak_table
*table
)
473 scm_t_weak_entry
*old_entries
, *new_entries
;
475 unsigned long old_size
, new_size
, old_k
;
479 new_size_index
= compute_size_index (table
);
480 if (new_size_index
== table
->size_index
)
482 new_size
= hashtable_size
[new_size_index
];
483 scm_i_pthread_mutex_unlock (&table
->lock
);
484 /* Allocating memory might cause finalizers to run, which could
485 run anything, so drop our lock to avoid deadlocks. */
486 new_entries
= allocate_entries (new_size
, table
->kind
);
487 scm_i_pthread_mutex_lock (&table
->lock
);
489 while (!is_acceptable_size_index (table
, new_size_index
));
491 old_entries
= table
->entries
;
492 old_size
= table
->size
;
494 table
->size_index
= new_size_index
;
495 table
->size
= new_size
;
496 if (new_size_index
<= table
->min_size_index
)
499 table
->lower
= new_size
/ 5;
500 table
->upper
= 9 * new_size
/ 10;
502 table
->entries
= new_entries
;
504 for (old_k
= 0; old_k
< old_size
; old_k
++)
506 scm_t_weak_entry copy
;
507 unsigned long new_k
, distance
;
509 if (!old_entries
[old_k
].hash
)
512 copy_weak_entry (&old_entries
[old_k
], ©
);
514 if (!copy
.key
|| !copy
.value
)
517 new_k
= hash_to_index (copy
.hash
, new_size
);
519 for (distance
= 0; ; distance
++, new_k
= (new_k
+ 1) % new_size
)
521 unsigned long other_hash
= new_entries
[new_k
].hash
;
524 /* Found an empty entry. */
527 /* Displace the entry if our distance is less, otherwise keep
529 if (entry_distance (other_hash
, new_k
, new_size
) < distance
)
531 rob_from_rich (table
, new_k
);
537 new_entries
[new_k
].hash
= copy
.hash
;
538 new_entries
[new_k
].key
= copy
.key
;
539 new_entries
[new_k
].value
= copy
.value
;
541 register_disappearing_links (&new_entries
[new_k
],
542 SCM_PACK (copy
.key
), SCM_PACK (copy
.value
),
547 /* Run after GC via do_vacuum_weak_table, this function runs over the
548 whole table, removing lost weak references, reshuffling the table as it
549 goes. It might resize the table if it reaps enough entries. */
551 vacuum_weak_table (scm_t_weak_table
*table
)
553 scm_t_weak_entry
*entries
= table
->entries
;
554 unsigned long size
= table
->size
;
557 for (k
= 0; k
< size
; k
++)
559 unsigned long hash
= entries
[k
].hash
;
563 scm_t_weak_entry copy
;
565 copy_weak_entry (&entries
[k
], ©
);
567 if (!copy
.key
|| !copy
.value
)
568 /* Lost weak reference; reshuffle. */
570 give_to_poor (table
, k
);
576 if (table
->n_items
< table
->lower
)
577 resize_table (table
);
584 weak_table_ref (scm_t_weak_table
*table
, unsigned long hash
,
585 scm_t_table_predicate_fn pred
, void *closure
,
588 unsigned long k
, distance
, size
;
589 scm_t_weak_entry
*entries
;
592 entries
= table
->entries
;
594 hash
= (hash
<< 1) | 0x1;
595 k
= hash_to_index (hash
, size
);
597 for (distance
= 0; distance
< size
; distance
++, k
= (k
+ 1) % size
)
599 unsigned long other_hash
;
602 other_hash
= entries
[k
].hash
;
608 if (hash
== other_hash
)
610 scm_t_weak_entry copy
;
612 copy_weak_entry (&entries
[k
], ©
);
614 if (!copy
.key
|| !copy
.value
)
615 /* Lost weak reference; reshuffle. */
617 give_to_poor (table
, k
);
622 if (pred (SCM_PACK (copy
.key
), SCM_PACK (copy
.value
), closure
))
624 return SCM_PACK (copy
.value
);
627 /* If the entry's distance is less, our key is not in the table. */
628 if (entry_distance (other_hash
, k
, size
) < distance
)
632 /* If we got here, then we were unfortunate enough to loop through the
633 whole table. Shouldn't happen, but hey. */
639 weak_table_put_x (scm_t_weak_table
*table
, unsigned long hash
,
640 scm_t_table_predicate_fn pred
, void *closure
,
643 unsigned long k
, distance
, size
;
644 scm_t_weak_entry
*entries
;
647 entries
= table
->entries
;
649 hash
= (hash
<< 1) | 0x1;
650 k
= hash_to_index (hash
, size
);
652 for (distance
= 0; ; distance
++, k
= (k
+ 1) % size
)
654 unsigned long other_hash
;
657 other_hash
= entries
[k
].hash
;
660 /* Found an empty entry. */
663 if (other_hash
== hash
)
665 scm_t_weak_entry copy
;
667 copy_weak_entry (&entries
[k
], ©
);
669 if (!copy
.key
|| !copy
.value
)
670 /* Lost weak reference; reshuffle. */
672 give_to_poor (table
, k
);
677 if (pred (SCM_PACK (copy
.key
), SCM_PACK (copy
.value
), closure
))
678 /* Found an entry with this key. */
682 if (table
->n_items
> table
->upper
)
683 /* Full table, time to resize. */
685 resize_table (table
);
686 return weak_table_put_x (table
, hash
>> 1, pred
, closure
, key
, value
);
689 /* Displace the entry if our distance is less, otherwise keep
691 if (entry_distance (other_hash
, k
, size
) < distance
)
693 rob_from_rich (table
, k
);
699 unregister_disappearing_links (&entries
[k
], table
->kind
);
703 entries
[k
].hash
= hash
;
704 entries
[k
].key
= SCM_UNPACK (key
);
705 entries
[k
].value
= SCM_UNPACK (value
);
707 register_disappearing_links (&entries
[k
], key
, value
, table
->kind
);
712 weak_table_remove_x (scm_t_weak_table
*table
, unsigned long hash
,
713 scm_t_table_predicate_fn pred
, void *closure
)
715 unsigned long k
, distance
, size
;
716 scm_t_weak_entry
*entries
;
719 entries
= table
->entries
;
721 hash
= (hash
<< 1) | 0x1;
722 k
= hash_to_index (hash
, size
);
724 for (distance
= 0; distance
< size
; distance
++, k
= (k
+ 1) % size
)
726 unsigned long other_hash
;
729 other_hash
= entries
[k
].hash
;
735 if (other_hash
== hash
)
737 scm_t_weak_entry copy
;
739 copy_weak_entry (&entries
[k
], ©
);
741 if (!copy
.key
|| !copy
.value
)
742 /* Lost weak reference; reshuffle. */
744 give_to_poor (table
, k
);
749 if (pred (SCM_PACK (copy
.key
), SCM_PACK (copy
.value
), closure
))
750 /* Found an entry with this key. */
754 entries
[k
].value
= 0;
756 unregister_disappearing_links (&entries
[k
], table
->kind
);
758 if (--table
->n_items
< table
->lower
)
759 resize_table (table
);
761 give_to_poor (table
, k
);
767 /* If the entry's distance is less, our key is not in the table. */
768 if (entry_distance (other_hash
, k
, size
) < distance
)
776 make_weak_table (unsigned long k
, scm_t_weak_table_kind kind
)
778 scm_t_weak_table
*table
;
780 int i
= 0, n
= k
? k
: 31;
781 while (i
+ 1 < HASHTABLE_SIZE_N
&& n
> hashtable_size
[i
])
783 n
= hashtable_size
[i
];
785 table
= scm_gc_malloc (sizeof (*table
), "weak-table");
786 table
->entries
= allocate_entries (n
, kind
);
791 table
->upper
= 9 * n
/ 10;
792 table
->size_index
= i
;
793 table
->min_size_index
= i
;
794 scm_i_pthread_mutex_init (&table
->lock
, NULL
);
796 return scm_cell (scm_tc7_weak_table
, (scm_t_bits
)table
);
800 scm_i_weak_table_print (SCM exp
, SCM port
, scm_print_state
*pstate
)
802 scm_puts_unlocked ("#<", port
);
803 scm_puts_unlocked ("weak-table ", port
);
804 scm_uintprint (SCM_WEAK_TABLE (exp
)->n_items
, 10, port
);
805 scm_putc_unlocked ('/', port
);
806 scm_uintprint (SCM_WEAK_TABLE (exp
)->size
, 10, port
);
807 scm_puts_unlocked (">", port
);
811 do_vacuum_weak_table (SCM table
)
815 t
= SCM_WEAK_TABLE (table
);
817 if (scm_i_pthread_mutex_trylock (&t
->lock
) == 0)
819 vacuum_weak_table (t
);
820 scm_i_pthread_mutex_unlock (&t
->lock
);
826 /* The before-gc C hook only runs if GC_table_start_callback is available,
827 so if not, fall back on a finalizer-based implementation. */
829 weak_gc_callback (void **weak
)
832 void (*callback
) (SCM
) = weak
[1];
837 callback (SCM_PACK_POINTER (val
));
842 #ifdef HAVE_GC_TABLE_START_CALLBACK
844 weak_gc_hook (void *hook_data
, void *fn_data
, void *data
)
846 if (!weak_gc_callback (fn_data
))
847 scm_c_hook_remove (&scm_before_gc_c_hook
, weak_gc_hook
, fn_data
);
853 weak_gc_finalizer (void *ptr
, void *data
)
855 if (weak_gc_callback (ptr
))
856 scm_i_set_finalizer (ptr
, weak_gc_finalizer
, data
);
861 scm_c_register_weak_gc_callback (SCM obj
, void (*callback
) (SCM
))
863 void **weak
= GC_MALLOC_ATOMIC (sizeof (void*) * 2);
865 weak
[0] = SCM_UNPACK_POINTER (obj
);
866 weak
[1] = (void*)callback
;
867 GC_GENERAL_REGISTER_DISAPPEARING_LINK (weak
, SCM2PTR (obj
));
869 #ifdef HAVE_GC_TABLE_START_CALLBACK
870 scm_c_hook_add (&scm_after_gc_c_hook
, weak_gc_hook
, weak
, 0);
872 scm_i_set_finalizer (weak
, weak_gc_finalizer
, NULL
);
877 scm_c_make_weak_table (unsigned long k
, scm_t_weak_table_kind kind
)
881 ret
= make_weak_table (k
, kind
);
883 scm_c_register_weak_gc_callback (ret
, do_vacuum_weak_table
);
889 scm_weak_table_p (SCM obj
)
891 return scm_from_bool (SCM_WEAK_TABLE_P (obj
));
895 scm_c_weak_table_ref (SCM table
, unsigned long raw_hash
,
896 scm_t_table_predicate_fn pred
,
897 void *closure
, SCM dflt
)
898 #define FUNC_NAME "weak-table-ref"
903 SCM_VALIDATE_WEAK_TABLE (1, table
);
905 t
= SCM_WEAK_TABLE (table
);
907 scm_i_pthread_mutex_lock (&t
->lock
);
909 ret
= weak_table_ref (t
, raw_hash
, pred
, closure
, dflt
);
911 scm_i_pthread_mutex_unlock (&t
->lock
);
918 scm_c_weak_table_put_x (SCM table
, unsigned long raw_hash
,
919 scm_t_table_predicate_fn pred
,
920 void *closure
, SCM key
, SCM value
)
921 #define FUNC_NAME "weak-table-put!"
925 SCM_VALIDATE_WEAK_TABLE (1, table
);
927 t
= SCM_WEAK_TABLE (table
);
929 scm_i_pthread_mutex_lock (&t
->lock
);
931 weak_table_put_x (t
, raw_hash
, pred
, closure
, key
, value
);
933 scm_i_pthread_mutex_unlock (&t
->lock
);
938 scm_c_weak_table_remove_x (SCM table
, unsigned long raw_hash
,
939 scm_t_table_predicate_fn pred
,
941 #define FUNC_NAME "weak-table-remove!"
945 SCM_VALIDATE_WEAK_TABLE (1, table
);
947 t
= SCM_WEAK_TABLE (table
);
949 scm_i_pthread_mutex_lock (&t
->lock
);
951 weak_table_remove_x (t
, raw_hash
, pred
, closure
);
953 scm_i_pthread_mutex_unlock (&t
->lock
);
958 assq_predicate (SCM x
, SCM y
, void *closure
)
960 return scm_is_eq (x
, SCM_PACK_POINTER (closure
));
964 scm_weak_table_refq (SCM table
, SCM key
, SCM dflt
)
966 if (SCM_UNBNDP (dflt
))
969 return scm_c_weak_table_ref (table
, scm_ihashq (key
, -1),
970 assq_predicate
, SCM_UNPACK_POINTER (key
),
975 scm_weak_table_putq_x (SCM table
, SCM key
, SCM value
)
977 scm_c_weak_table_put_x (table
, scm_ihashq (key
, -1),
978 assq_predicate
, SCM_UNPACK_POINTER (key
),
983 scm_weak_table_remq_x (SCM table
, SCM key
)
985 scm_c_weak_table_remove_x (table
, scm_ihashq (key
, -1),
986 assq_predicate
, SCM_UNPACK_POINTER (key
));
990 scm_weak_table_clear_x (SCM table
)
991 #define FUNC_NAME "weak-table-clear!"
995 SCM_VALIDATE_WEAK_TABLE (1, table
);
997 t
= SCM_WEAK_TABLE (table
);
999 scm_i_pthread_mutex_lock (&t
->lock
);
1001 memset (t
->entries
, 0, sizeof (scm_t_weak_entry
) * t
->size
);
1004 scm_i_pthread_mutex_unlock (&t
->lock
);
1009 scm_c_weak_table_fold (scm_t_table_fold_fn proc
, void *closure
,
1010 SCM init
, SCM table
)
1012 scm_t_weak_table
*t
;
1013 scm_t_weak_entry
*entries
;
1014 unsigned long k
, size
;
1016 t
= SCM_WEAK_TABLE (table
);
1018 scm_i_pthread_mutex_lock (&t
->lock
);
1021 entries
= t
->entries
;
1023 for (k
= 0; k
< size
; k
++)
1025 if (entries
[k
].hash
)
1027 scm_t_weak_entry copy
;
1029 copy_weak_entry (&entries
[k
], ©
);
1031 if (copy
.key
&& copy
.value
)
1033 /* Release table lock while we call the function. */
1034 scm_i_pthread_mutex_unlock (&t
->lock
);
1035 init
= proc (closure
,
1036 SCM_PACK (copy
.key
), SCM_PACK (copy
.value
),
1038 scm_i_pthread_mutex_lock (&t
->lock
);
1043 scm_i_pthread_mutex_unlock (&t
->lock
);
1049 fold_trampoline (void *closure
, SCM k
, SCM v
, SCM init
)
1051 return scm_call_3 (SCM_PACK_POINTER (closure
), k
, v
, init
);
1055 scm_weak_table_fold (SCM proc
, SCM init
, SCM table
)
1056 #define FUNC_NAME "weak-table-fold"
1058 SCM_VALIDATE_WEAK_TABLE (3, table
);
1059 SCM_VALIDATE_PROC (1, proc
);
1061 return scm_c_weak_table_fold (fold_trampoline
, SCM_UNPACK_POINTER (proc
), init
, table
);
1066 for_each_trampoline (void *closure
, SCM k
, SCM v
, SCM seed
)
1068 scm_call_2 (SCM_PACK_POINTER (closure
), k
, v
);
1073 scm_weak_table_for_each (SCM proc
, SCM table
)
1074 #define FUNC_NAME "weak-table-for-each"
1076 SCM_VALIDATE_WEAK_TABLE (2, table
);
1077 SCM_VALIDATE_PROC (1, proc
);
1079 scm_c_weak_table_fold (for_each_trampoline
, SCM_UNPACK_POINTER (proc
), SCM_BOOL_F
, table
);
1084 map_trampoline (void *closure
, SCM k
, SCM v
, SCM seed
)
1086 return scm_cons (scm_call_2 (SCM_PACK_POINTER (closure
), k
, v
), seed
);
1090 scm_weak_table_map_to_list (SCM proc
, SCM table
)
1091 #define FUNC_NAME "weak-table-map->list"
1093 SCM_VALIDATE_WEAK_TABLE (2, table
);
1094 SCM_VALIDATE_PROC (1, proc
);
1096 return scm_c_weak_table_fold (map_trampoline
, SCM_UNPACK_POINTER (proc
), SCM_EOL
, table
);
1103 /* Legacy interface. */
1105 SCM_DEFINE (scm_make_weak_key_hash_table
, "make-weak-key-hash-table", 0, 1, 0,
1107 "@deffnx {Scheme Procedure} make-weak-value-hash-table size\n"
1108 "@deffnx {Scheme Procedure} make-doubly-weak-hash-table size\n"
1109 "Return a weak hash table with @var{size} buckets.\n"
1111 "You can modify weak hash tables in exactly the same way you\n"
1112 "would modify regular hash tables. (@pxref{Hash Tables})")
1113 #define FUNC_NAME s_scm_make_weak_key_hash_table
1115 return scm_c_make_weak_table (SCM_UNBNDP (n
) ? 0 : scm_to_ulong (n
),
1116 SCM_WEAK_TABLE_KIND_KEY
);
1121 SCM_DEFINE (scm_make_weak_value_hash_table
, "make-weak-value-hash-table", 0, 1, 0,
1123 "Return a hash table with weak values with @var{size} buckets.\n"
1124 "(@pxref{Hash Tables})")
1125 #define FUNC_NAME s_scm_make_weak_value_hash_table
1127 return scm_c_make_weak_table (SCM_UNBNDP (n
) ? 0 : scm_to_ulong (n
),
1128 SCM_WEAK_TABLE_KIND_VALUE
);
1133 SCM_DEFINE (scm_make_doubly_weak_hash_table
, "make-doubly-weak-hash-table", 1, 0, 0,
1135 "Return a hash table with weak keys and values with @var{size}\n"
1136 "buckets. (@pxref{Hash Tables})")
1137 #define FUNC_NAME s_scm_make_doubly_weak_hash_table
1139 return scm_c_make_weak_table (SCM_UNBNDP (n
) ? 0 : scm_to_ulong (n
),
1140 SCM_WEAK_TABLE_KIND_BOTH
);
1145 SCM_DEFINE (scm_weak_key_hash_table_p
, "weak-key-hash-table?", 1, 0, 0,
1147 "@deffnx {Scheme Procedure} weak-value-hash-table? obj\n"
1148 "@deffnx {Scheme Procedure} doubly-weak-hash-table? obj\n"
1149 "Return @code{#t} if @var{obj} is the specified weak hash\n"
1150 "table. Note that a doubly weak hash table is neither a weak key\n"
1151 "nor a weak value hash table.")
1152 #define FUNC_NAME s_scm_weak_key_hash_table_p
1154 return scm_from_bool (SCM_WEAK_TABLE_P (obj
) &&
1155 SCM_WEAK_TABLE (obj
)->kind
== SCM_WEAK_TABLE_KIND_KEY
);
1160 SCM_DEFINE (scm_weak_value_hash_table_p
, "weak-value-hash-table?", 1, 0, 0,
1162 "Return @code{#t} if @var{obj} is a weak value hash table.")
1163 #define FUNC_NAME s_scm_weak_value_hash_table_p
1165 return scm_from_bool (SCM_WEAK_TABLE_P (obj
) &&
1166 SCM_WEAK_TABLE (obj
)->kind
== SCM_WEAK_TABLE_KIND_VALUE
);
1171 SCM_DEFINE (scm_doubly_weak_hash_table_p
, "doubly-weak-hash-table?", 1, 0, 0,
1173 "Return @code{#t} if @var{obj} is a doubly weak hash table.")
1174 #define FUNC_NAME s_scm_doubly_weak_hash_table_p
1176 return scm_from_bool (SCM_WEAK_TABLE_P (obj
) &&
1177 SCM_WEAK_TABLE (obj
)->kind
== SCM_WEAK_TABLE_KIND_BOTH
);
1186 scm_weak_table_prehistory (void)
1189 GC_new_kind (GC_new_free_list (),
1190 GC_MAKE_PROC (GC_new_proc (mark_weak_key_table
), 0),
1192 weak_value_gc_kind
=
1193 GC_new_kind (GC_new_free_list (),
1194 GC_MAKE_PROC (GC_new_proc (mark_weak_value_table
), 0),
1199 scm_init_weak_table ()
1201 #include "libguile/weak-table.x"