1 /* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
30 #include "libguile/_scm.h"
31 #include "libguile/hash.h"
32 #include "libguile/eval.h"
33 #include "libguile/ports.h"
35 #include "libguile/validate.h"
36 #include "libguile/weak-table.h"
41 This file implements weak hash tables. Weak hash tables are
42 generally used when you want to augment some object with additional
43 data, but when you don't have space to store the data in the object.
44 For example, procedure properties are implemented with weak tables.
46 Weak tables are implemented using an open-addressed hash table.
47 Basically this means that there is an array of entries, and the item
48 is expected to be found the slot corresponding to its hash code,
49 modulo the length of the array.
51 Collisions are handled using linear probing with the Robin Hood
52 technique. See Pedro Celis' paper, "Robin Hood Hashing":
54 http://www.cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
56 The vector of entries is allocated in such a way that the GC doesn't
57 trace the weak values. For doubly-weak tables, this means that the
58 entries are allocated as an "atomic" piece of memory. Key-weak and
59 value-weak tables use a special GC kind with a custom mark procedure.
60 When items are added weakly into table, a disappearing link is
61 registered to their locations. If the referent is collected, then
62 that link will be zeroed out.
64 An entry in the table consists of the key and the value, together
65 with the hash code of the key. We munge hash codes so that they are
66 never 0. In this way we can detect removed entries (key of zero but
67 nonzero hash code), and can then reshuffle elements as needed to
68 maintain the robin hood ordering.
70 Compared to buckets-and-chains hash tables, open addressing has the
71 advantage that it is very cache-friendly. It also uses less memory.
73 Implementation-wise, there are two things to note.
75 1. We assume that hash codes are evenly distributed across the
76 range of unsigned longs. The actual hash code stored in the
77 entry is left-shifted by 1 bit (losing 1 bit of hash precision),
78 and then or'd with 1. In this way we ensure that the hash field
79 of an occupied entry is nonzero. To map to an index, we
80 right-shift the hash by one, divide by the size, and take the
83 2. Since the weak references are stored in an atomic region with
84 disappearing links, they need to be accessed with the GC alloc
85 lock. `copy_weak_entry' will do that for you. The hash code
86 itself can be read outside the lock, though.
97 struct weak_entry_data
{
99 scm_t_weak_entry
*out
;
103 do_copy_weak_entry (void *data
)
105 struct weak_entry_data
*e
= data
;
107 e
->out
->hash
= e
->in
->hash
;
108 e
->out
->key
= e
->in
->key
;
109 e
->out
->value
= e
->in
->value
;
115 copy_weak_entry (scm_t_weak_entry
*src
, scm_t_weak_entry
*dst
)
117 struct weak_entry_data data
;
122 GC_call_with_alloc_lock (do_copy_weak_entry
, &data
);
126 register_disappearing_links (scm_t_weak_entry
*entry
,
128 scm_t_weak_table_kind kind
)
130 if (SCM_UNPACK (k
) && SCM_HEAP_OBJECT_P (k
)
131 && (kind
== SCM_WEAK_TABLE_KIND_KEY
132 || kind
== SCM_WEAK_TABLE_KIND_BOTH
))
133 SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR
) &entry
->key
,
134 (GC_PTR
) SCM2PTR (k
));
136 if (SCM_UNPACK (v
) && SCM_HEAP_OBJECT_P (v
)
137 && (kind
== SCM_WEAK_TABLE_KIND_VALUE
138 || kind
== SCM_WEAK_TABLE_KIND_BOTH
))
139 SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR
) &entry
->value
,
140 (GC_PTR
) SCM2PTR (v
));
144 unregister_disappearing_links (scm_t_weak_entry
*entry
,
145 scm_t_weak_table_kind kind
)
147 if (kind
== SCM_WEAK_TABLE_KIND_KEY
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
148 GC_unregister_disappearing_link ((GC_PTR
) &entry
->key
);
150 if (kind
== SCM_WEAK_TABLE_KIND_VALUE
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
151 GC_unregister_disappearing_link ((GC_PTR
) &entry
->value
);
155 move_disappearing_links (scm_t_weak_entry
*from
, scm_t_weak_entry
*to
,
156 SCM key
, SCM value
, scm_t_weak_table_kind kind
)
158 if ((kind
== SCM_WEAK_TABLE_KIND_KEY
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
159 && SCM_HEAP_OBJECT_P (key
))
161 #ifdef HAVE_GC_MOVE_DISAPPEARING_LINK
162 GC_move_disappearing_link ((GC_PTR
) &from
->key
, (GC_PTR
) &to
->key
);
164 GC_unregister_disappearing_link (&from
->key
);
165 SCM_I_REGISTER_DISAPPEARING_LINK (&to
->key
, SCM2PTR (key
));
169 if ((kind
== SCM_WEAK_TABLE_KIND_VALUE
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
170 && SCM_HEAP_OBJECT_P (value
))
172 #ifdef HAVE_GC_MOVE_DISAPPEARING_LINK
173 GC_move_disappearing_link ((GC_PTR
) &from
->value
, (GC_PTR
) &to
->value
);
175 GC_unregister_disappearing_link (&from
->value
);
176 SCM_I_REGISTER_DISAPPEARING_LINK (&to
->value
, SCM2PTR (value
));
182 move_weak_entry (scm_t_weak_entry
*from
, scm_t_weak_entry
*to
,
183 scm_t_weak_table_kind kind
)
187 scm_t_weak_entry copy
;
189 copy_weak_entry (from
, ©
);
190 to
->hash
= copy
.hash
;
192 to
->value
= copy
.value
;
194 move_disappearing_links (from
, to
,
195 SCM_PACK (copy
.key
), SCM_PACK (copy
.value
),
208 scm_t_weak_entry
*entries
; /* the data */
209 scm_i_pthread_mutex_t lock
; /* the lock */
210 scm_t_weak_table_kind kind
; /* what kind of table it is */
211 unsigned long size
; /* total number of slots. */
212 unsigned long n_items
; /* number of items in table */
213 unsigned long lower
; /* when to shrink */
214 unsigned long upper
; /* when to grow */
215 int size_index
; /* index into hashtable_size */
216 int min_size_index
; /* minimum size_index */
220 #define SCM_WEAK_TABLE_P(x) (SCM_HAS_TYP7 (x, scm_tc7_weak_table))
221 #define SCM_VALIDATE_WEAK_TABLE(pos, arg) \
222 SCM_MAKE_VALIDATE_MSG (pos, arg, WEAK_TABLE_P, "weak-table")
223 #define SCM_WEAK_TABLE(x) ((scm_t_weak_table *) SCM_CELL_WORD_1 (x))
227 hash_to_index (unsigned long hash
, unsigned long size
)
229 return (hash
>> 1) % size
;
233 entry_distance (unsigned long hash
, unsigned long k
, unsigned long size
)
235 unsigned long origin
= hash_to_index (hash
, size
);
240 /* The other key was displaced and wrapped around. */
241 return size
- origin
+ k
;
245 rob_from_rich (scm_t_weak_table
*table
, unsigned long k
)
247 unsigned long empty
, size
;
251 /* If we are to free up slot K in the table, we need room to do so. */
252 assert (table
->n_items
< size
);
256 empty
= (empty
+ 1) % size
;
257 while (table
->entries
[empty
].hash
);
261 unsigned long last
= empty
? (empty
- 1) : (size
- 1);
262 move_weak_entry (&table
->entries
[last
], &table
->entries
[empty
],
268 table
->entries
[empty
].hash
= 0;
269 table
->entries
[empty
].key
= 0;
270 table
->entries
[empty
].value
= 0;
274 give_to_poor (scm_t_weak_table
*table
, unsigned long k
)
276 /* Slot K was just freed up; possibly shuffle others down. */
277 unsigned long size
= table
->size
;
281 unsigned long next
= (k
+ 1) % size
;
283 scm_t_weak_entry copy
;
285 hash
= table
->entries
[next
].hash
;
287 if (!hash
|| hash_to_index (hash
, size
) == next
)
290 copy_weak_entry (&table
->entries
[next
], ©
);
292 if (!copy
.key
|| !copy
.value
)
293 /* Lost weak reference. */
295 give_to_poor (table
, next
);
300 move_weak_entry (&table
->entries
[next
], &table
->entries
[k
],
306 /* We have shuffled down any entries that should be shuffled down; now
308 table
->entries
[k
].hash
= 0;
309 table
->entries
[k
].key
= 0;
310 table
->entries
[k
].value
= 0;
316 /* The GC "kinds" for singly-weak tables. */
317 static int weak_key_gc_kind
;
318 static int weak_value_gc_kind
;
320 static struct GC_ms_entry
*
321 mark_weak_key_table (GC_word
*addr
, struct GC_ms_entry
*mark_stack_ptr
,
322 struct GC_ms_entry
*mark_stack_limit
, GC_word env
)
324 scm_t_weak_entry
*entries
= (scm_t_weak_entry
*) addr
;
325 unsigned long k
, size
= GC_size (addr
) / sizeof (scm_t_weak_entry
);
327 for (k
= 0; k
< size
; k
++)
328 if (entries
[k
].hash
&& entries
[k
].key
)
330 SCM value
= SCM_PACK (entries
[k
].value
);
331 mark_stack_ptr
= GC_MARK_AND_PUSH ((GC_word
*) SCM2PTR (value
),
332 mark_stack_ptr
, mark_stack_limit
,
336 return mark_stack_ptr
;
339 static struct GC_ms_entry
*
340 mark_weak_value_table (GC_word
*addr
, struct GC_ms_entry
*mark_stack_ptr
,
341 struct GC_ms_entry
*mark_stack_limit
, GC_word env
)
343 scm_t_weak_entry
*entries
= (scm_t_weak_entry
*) addr
;
344 unsigned long k
, size
= GC_size (addr
) / sizeof (scm_t_weak_entry
);
346 for (k
= 0; k
< size
; k
++)
347 if (entries
[k
].hash
&& entries
[k
].value
)
349 SCM key
= SCM_PACK (entries
[k
].key
);
350 mark_stack_ptr
= GC_MARK_AND_PUSH ((GC_word
*) SCM2PTR (key
),
351 mark_stack_ptr
, mark_stack_limit
,
355 return mark_stack_ptr
;
358 static scm_t_weak_entry
*
359 allocate_entries (unsigned long size
, scm_t_weak_table_kind kind
)
361 scm_t_weak_entry
*ret
;
362 size_t bytes
= size
* sizeof (*ret
);
366 case SCM_WEAK_TABLE_KIND_KEY
:
367 ret
= GC_generic_malloc (bytes
, weak_key_gc_kind
);
369 case SCM_WEAK_TABLE_KIND_VALUE
:
370 ret
= GC_generic_malloc (bytes
, weak_value_gc_kind
);
372 case SCM_WEAK_TABLE_KIND_BOTH
:
373 ret
= scm_gc_malloc_pointerless (bytes
, "weak-table");
379 memset (ret
, 0, bytes
);
386 /* Growing or shrinking is triggered when the load factor
388 * L = N / S (N: number of items in table, S: bucket vector length)
390 * passes an upper limit of 0.9 or a lower limit of 0.2.
392 * The implementation stores the upper and lower number of items which
393 * trigger a resize in the hashtable object.
395 * Possible hash table sizes (primes) are stored in the array
399 static unsigned long hashtable_size
[] = {
400 31, 61, 113, 223, 443, 883, 1759, 3517, 7027, 14051, 28099, 56197, 112363,
401 224717, 449419, 898823, 1797641, 3595271, 7190537, 14381041, 28762081,
402 57524111, 115048217, 230096423
405 #define HASHTABLE_SIZE_N (sizeof(hashtable_size)/sizeof(unsigned long))
408 compute_size_index (scm_t_weak_table
*table
)
410 int i
= table
->size_index
;
412 if (table
->n_items
< table
->lower
)
414 /* rehashing is not triggered when i <= min_size */
417 while (i
> table
->min_size_index
418 && table
->n_items
< hashtable_size
[i
] / 5);
420 else if (table
->n_items
> table
->upper
)
423 if (i
>= HASHTABLE_SIZE_N
)
424 /* The biggest size currently is 230096423, which for a 32-bit
425 machine will occupy 2.3GB of memory at a load of 80%. There
426 is probably something better to do here, but if you have a
427 weak map of that size, you are hosed in any case. */
435 resize_table (scm_t_weak_table
*table
)
437 scm_t_weak_entry
*old_entries
, *new_entries
;
439 unsigned long old_size
, new_size
, old_k
;
443 new_size_index
= compute_size_index (table
);
444 if (new_size_index
== table
->size_index
)
446 new_size
= hashtable_size
[new_size_index
];
447 scm_i_pthread_mutex_unlock (&table
->lock
);
448 /* Allocating memory might cause finalizers to run, which could
449 run anything, so drop our lock to avoid deadlocks. */
450 new_entries
= allocate_entries (new_size
, table
->kind
);
451 scm_i_pthread_mutex_unlock (&table
->lock
);
453 while (new_size_index
!= compute_size_index (table
));
455 old_entries
= table
->entries
;
456 old_size
= table
->size
;
458 table
->size_index
= new_size_index
;
459 table
->size
= new_size
;
460 if (new_size_index
<= table
->min_size_index
)
463 table
->lower
= new_size
/ 5;
464 table
->upper
= 9 * new_size
/ 10;
466 table
->entries
= new_entries
;
468 for (old_k
= 0; old_k
< old_size
; old_k
++)
470 scm_t_weak_entry copy
;
471 unsigned long new_k
, distance
;
473 if (!old_entries
[old_k
].hash
)
476 copy_weak_entry (&old_entries
[old_k
], ©
);
478 if (!copy
.key
|| !copy
.value
)
481 new_k
= hash_to_index (copy
.hash
, new_size
);
483 for (distance
= 0; ; distance
++, new_k
= (new_k
+ 1) % new_size
)
485 unsigned long other_hash
= new_entries
[new_k
].hash
;
488 /* Found an empty entry. */
491 /* Displace the entry if our distance is less, otherwise keep
493 if (entry_distance (other_hash
, new_k
, new_size
) < distance
)
495 rob_from_rich (table
, new_k
);
501 new_entries
[new_k
].hash
= copy
.hash
;
502 new_entries
[new_k
].key
= copy
.key
;
503 new_entries
[new_k
].value
= copy
.value
;
505 register_disappearing_links (&new_entries
[new_k
],
506 SCM_PACK (copy
.key
), SCM_PACK (copy
.value
),
511 /* Run after GC via do_vacuum_weak_table, this function runs over the
512 whole table, removing lost weak references, reshuffling the table as it
513 goes. It might resize the table if it reaps enough entries. */
515 vacuum_weak_table (scm_t_weak_table
*table
)
517 scm_t_weak_entry
*entries
= table
->entries
;
518 unsigned long size
= table
->size
;
521 for (k
= 0; k
< size
; k
++)
523 unsigned long hash
= entries
[k
].hash
;
527 scm_t_weak_entry copy
;
529 copy_weak_entry (&entries
[k
], ©
);
531 if (!copy
.key
|| !copy
.value
)
532 /* Lost weak reference; reshuffle. */
534 give_to_poor (table
, k
);
540 if (table
->n_items
< table
->lower
)
541 resize_table (table
);
548 weak_table_ref (scm_t_weak_table
*table
, unsigned long hash
,
549 scm_t_table_predicate_fn pred
, void *closure
,
552 unsigned long k
, distance
, size
;
553 scm_t_weak_entry
*entries
;
556 entries
= table
->entries
;
558 hash
= (hash
<< 1) | 0x1;
559 k
= hash_to_index (hash
, size
);
561 for (distance
= 0; distance
< size
; distance
++, k
= (k
+ 1) % size
)
563 unsigned long other_hash
;
566 other_hash
= entries
[k
].hash
;
572 if (hash
== other_hash
)
574 scm_t_weak_entry copy
;
576 copy_weak_entry (&entries
[k
], ©
);
578 if (!copy
.key
|| !copy
.value
)
579 /* Lost weak reference; reshuffle. */
581 give_to_poor (table
, k
);
586 if (pred (SCM_PACK (copy
.key
), SCM_PACK (copy
.value
), closure
))
588 return SCM_PACK (copy
.value
);
591 /* If the entry's distance is less, our key is not in the table. */
592 if (entry_distance (other_hash
, k
, size
) < distance
)
596 /* If we got here, then we were unfortunate enough to loop through the
597 whole table. Shouldn't happen, but hey. */
603 weak_table_put_x (scm_t_weak_table
*table
, unsigned long hash
,
604 scm_t_table_predicate_fn pred
, void *closure
,
607 unsigned long k
, distance
, size
;
608 scm_t_weak_entry
*entries
;
611 entries
= table
->entries
;
613 hash
= (hash
<< 1) | 0x1;
614 k
= hash_to_index (hash
, size
);
616 for (distance
= 0; ; distance
++, k
= (k
+ 1) % size
)
618 unsigned long other_hash
;
621 other_hash
= entries
[k
].hash
;
624 /* Found an empty entry. */
627 if (other_hash
== hash
)
629 scm_t_weak_entry copy
;
631 copy_weak_entry (&entries
[k
], ©
);
633 if (!copy
.key
|| !copy
.value
)
634 /* Lost weak reference; reshuffle. */
636 give_to_poor (table
, k
);
641 if (pred (SCM_PACK (copy
.key
), SCM_PACK (copy
.value
), closure
))
642 /* Found an entry with this key. */
646 if (table
->n_items
> table
->upper
)
647 /* Full table, time to resize. */
649 resize_table (table
);
650 return weak_table_put_x (table
, hash
>> 1, pred
, closure
, key
, value
);
653 /* Displace the entry if our distance is less, otherwise keep
655 if (entry_distance (other_hash
, k
, size
) < distance
)
657 rob_from_rich (table
, k
);
663 unregister_disappearing_links (&entries
[k
], table
->kind
);
667 entries
[k
].hash
= hash
;
668 entries
[k
].key
= SCM_UNPACK (key
);
669 entries
[k
].value
= SCM_UNPACK (value
);
671 register_disappearing_links (&entries
[k
], key
, value
, table
->kind
);
676 weak_table_remove_x (scm_t_weak_table
*table
, unsigned long hash
,
677 scm_t_table_predicate_fn pred
, void *closure
)
679 unsigned long k
, distance
, size
;
680 scm_t_weak_entry
*entries
;
683 entries
= table
->entries
;
685 hash
= (hash
<< 1) | 0x1;
686 k
= hash_to_index (hash
, size
);
688 for (distance
= 0; distance
< size
; distance
++, k
= (k
+ 1) % size
)
690 unsigned long other_hash
;
693 other_hash
= entries
[k
].hash
;
699 if (other_hash
== hash
)
701 scm_t_weak_entry copy
;
703 copy_weak_entry (&entries
[k
], ©
);
705 if (!copy
.key
|| !copy
.value
)
706 /* Lost weak reference; reshuffle. */
708 give_to_poor (table
, k
);
713 if (pred (SCM_PACK (copy
.key
), SCM_PACK (copy
.value
), closure
))
714 /* Found an entry with this key. */
718 entries
[k
].value
= 0;
720 unregister_disappearing_links (&entries
[k
], table
->kind
);
722 if (--table
->n_items
< table
->lower
)
723 resize_table (table
);
725 give_to_poor (table
, k
);
731 /* If the entry's distance is less, our key is not in the table. */
732 if (entry_distance (other_hash
, k
, size
) < distance
)
740 make_weak_table (unsigned long k
, scm_t_weak_table_kind kind
)
742 scm_t_weak_table
*table
;
744 int i
= 0, n
= k
? k
: 31;
745 while (i
+ 1 < HASHTABLE_SIZE_N
&& n
> hashtable_size
[i
])
747 n
= hashtable_size
[i
];
749 table
= scm_gc_malloc (sizeof (*table
), "weak-table");
750 table
->entries
= allocate_entries (n
, kind
);
755 table
->upper
= 9 * n
/ 10;
756 table
->size_index
= i
;
757 table
->min_size_index
= i
;
758 scm_i_pthread_mutex_init (&table
->lock
, NULL
);
760 return scm_cell (scm_tc7_weak_table
, (scm_t_bits
)table
);
764 scm_i_weak_table_print (SCM exp
, SCM port
, scm_print_state
*pstate
)
766 scm_puts_unlocked ("#<", port
);
767 scm_puts_unlocked ("weak-table ", port
);
768 scm_uintprint (SCM_WEAK_TABLE (exp
)->n_items
, 10, port
);
769 scm_putc_unlocked ('/', port
);
770 scm_uintprint (SCM_WEAK_TABLE (exp
)->size
, 10, port
);
771 scm_puts_unlocked (">", port
);
775 do_vacuum_weak_table (SCM table
)
779 t
= SCM_WEAK_TABLE (table
);
781 if (scm_i_pthread_mutex_trylock (&t
->lock
) == 0)
783 vacuum_weak_table (t
);
784 scm_i_pthread_mutex_unlock (&t
->lock
);
790 /* The before-gc C hook only runs if GC_table_start_callback is available,
791 so if not, fall back on a finalizer-based implementation. */
793 weak_gc_callback (void **weak
)
796 void (*callback
) (SCM
) = weak
[1];
801 callback (SCM_PACK_POINTER (val
));
806 #ifdef HAVE_GC_TABLE_START_CALLBACK
808 weak_gc_hook (void *hook_data
, void *fn_data
, void *data
)
810 if (!weak_gc_callback (fn_data
))
811 scm_c_hook_remove (&scm_before_gc_c_hook
, weak_gc_hook
, fn_data
);
817 weak_gc_finalizer (void *ptr
, void *data
)
819 if (weak_gc_callback (ptr
))
820 GC_REGISTER_FINALIZER_NO_ORDER (ptr
, weak_gc_finalizer
, data
, NULL
, NULL
);
825 scm_c_register_weak_gc_callback (SCM obj
, void (*callback
) (SCM
))
827 void **weak
= GC_MALLOC_ATOMIC (sizeof (void*) * 2);
829 weak
[0] = SCM_UNPACK_POINTER (obj
);
830 weak
[1] = (void*)callback
;
831 GC_GENERAL_REGISTER_DISAPPEARING_LINK (weak
, SCM2PTR (obj
));
833 #ifdef HAVE_GC_TABLE_START_CALLBACK
834 scm_c_hook_add (&scm_after_gc_c_hook
, weak_gc_hook
, weak
, 0);
836 GC_REGISTER_FINALIZER_NO_ORDER (weak
, weak_gc_finalizer
, NULL
, NULL
, NULL
);
841 scm_c_make_weak_table (unsigned long k
, scm_t_weak_table_kind kind
)
845 ret
= make_weak_table (k
, kind
);
847 scm_c_register_weak_gc_callback (ret
, do_vacuum_weak_table
);
853 scm_weak_table_p (SCM obj
)
855 return scm_from_bool (SCM_WEAK_TABLE_P (obj
));
859 scm_c_weak_table_ref (SCM table
, unsigned long raw_hash
,
860 scm_t_table_predicate_fn pred
,
861 void *closure
, SCM dflt
)
862 #define FUNC_NAME "weak-table-ref"
867 SCM_VALIDATE_WEAK_TABLE (1, table
);
869 t
= SCM_WEAK_TABLE (table
);
871 scm_i_pthread_mutex_lock (&t
->lock
);
873 ret
= weak_table_ref (t
, raw_hash
, pred
, closure
, dflt
);
875 scm_i_pthread_mutex_unlock (&t
->lock
);
882 scm_c_weak_table_put_x (SCM table
, unsigned long raw_hash
,
883 scm_t_table_predicate_fn pred
,
884 void *closure
, SCM key
, SCM value
)
885 #define FUNC_NAME "weak-table-put!"
889 SCM_VALIDATE_WEAK_TABLE (1, table
);
891 t
= SCM_WEAK_TABLE (table
);
893 scm_i_pthread_mutex_lock (&t
->lock
);
895 weak_table_put_x (t
, raw_hash
, pred
, closure
, key
, value
);
897 scm_i_pthread_mutex_unlock (&t
->lock
);
902 scm_c_weak_table_remove_x (SCM table
, unsigned long raw_hash
,
903 scm_t_table_predicate_fn pred
,
905 #define FUNC_NAME "weak-table-remove!"
909 SCM_VALIDATE_WEAK_TABLE (1, table
);
911 t
= SCM_WEAK_TABLE (table
);
913 scm_i_pthread_mutex_lock (&t
->lock
);
915 weak_table_remove_x (t
, raw_hash
, pred
, closure
);
917 scm_i_pthread_mutex_unlock (&t
->lock
);
922 assq_predicate (SCM x
, SCM y
, void *closure
)
924 return scm_is_eq (x
, SCM_PACK_POINTER (closure
));
928 scm_weak_table_refq (SCM table
, SCM key
, SCM dflt
)
930 if (SCM_UNBNDP (dflt
))
933 return scm_c_weak_table_ref (table
, scm_ihashq (key
, -1),
934 assq_predicate
, SCM_UNPACK_POINTER (key
),
939 scm_weak_table_putq_x (SCM table
, SCM key
, SCM value
)
941 scm_c_weak_table_put_x (table
, scm_ihashq (key
, -1),
942 assq_predicate
, SCM_UNPACK_POINTER (key
),
944 return SCM_UNSPECIFIED
;
948 scm_weak_table_remq_x (SCM table
, SCM key
)
950 scm_c_weak_table_remove_x (table
, scm_ihashq (key
, -1),
951 assq_predicate
, SCM_UNPACK_POINTER (key
));
952 return SCM_UNSPECIFIED
;
956 scm_weak_table_clear_x (SCM table
)
957 #define FUNC_NAME "weak-table-clear!"
961 SCM_VALIDATE_WEAK_TABLE (1, table
);
963 t
= SCM_WEAK_TABLE (table
);
965 scm_i_pthread_mutex_lock (&t
->lock
);
967 memset (t
->entries
, 0, sizeof (scm_t_weak_entry
) * t
->size
);
970 scm_i_pthread_mutex_unlock (&t
->lock
);
972 return SCM_UNSPECIFIED
;
977 scm_c_weak_table_fold (scm_t_table_fold_fn proc
, void *closure
,
981 scm_t_weak_entry
*entries
;
982 unsigned long k
, size
;
984 t
= SCM_WEAK_TABLE (table
);
986 scm_i_pthread_mutex_lock (&t
->lock
);
989 entries
= t
->entries
;
991 for (k
= 0; k
< size
; k
++)
995 scm_t_weak_entry copy
;
997 copy_weak_entry (&entries
[k
], ©
);
999 if (copy
.key
&& copy
.value
)
1001 /* Release table lock while we call the function. */
1002 scm_i_pthread_mutex_unlock (&t
->lock
);
1003 init
= proc (closure
,
1004 SCM_PACK (copy
.key
), SCM_PACK (copy
.value
),
1006 scm_i_pthread_mutex_lock (&t
->lock
);
1011 scm_i_pthread_mutex_unlock (&t
->lock
);
1017 fold_trampoline (void *closure
, SCM k
, SCM v
, SCM init
)
1019 return scm_call_3 (SCM_PACK_POINTER (closure
), k
, v
, init
);
1023 scm_weak_table_fold (SCM proc
, SCM init
, SCM table
)
1024 #define FUNC_NAME "weak-table-fold"
1026 SCM_VALIDATE_WEAK_TABLE (3, table
);
1027 SCM_VALIDATE_PROC (1, proc
);
1029 return scm_c_weak_table_fold (fold_trampoline
, SCM_UNPACK_POINTER (proc
), init
, table
);
1034 for_each_trampoline (void *closure
, SCM k
, SCM v
, SCM seed
)
1036 scm_call_2 (SCM_PACK_POINTER (closure
), k
, v
);
1041 scm_weak_table_for_each (SCM proc
, SCM table
)
1042 #define FUNC_NAME "weak-table-for-each"
1044 SCM_VALIDATE_WEAK_TABLE (2, table
);
1045 SCM_VALIDATE_PROC (1, proc
);
1047 scm_c_weak_table_fold (for_each_trampoline
, SCM_UNPACK_POINTER (proc
), SCM_BOOL_F
, table
);
1049 return SCM_UNSPECIFIED
;
1054 map_trampoline (void *closure
, SCM k
, SCM v
, SCM seed
)
1056 return scm_cons (scm_call_2 (SCM_PACK_POINTER (closure
), k
, v
), seed
);
1060 scm_weak_table_map_to_list (SCM proc
, SCM table
)
1061 #define FUNC_NAME "weak-table-map->list"
1063 SCM_VALIDATE_WEAK_TABLE (2, table
);
1064 SCM_VALIDATE_PROC (1, proc
);
1066 return scm_c_weak_table_fold (map_trampoline
, SCM_UNPACK_POINTER (proc
), SCM_EOL
, table
);
1073 /* Legacy interface. */
1075 SCM_DEFINE (scm_make_weak_key_hash_table
, "make-weak-key-hash-table", 0, 1, 0,
1077 "@deffnx {Scheme Procedure} make-weak-value-hash-table size\n"
1078 "@deffnx {Scheme Procedure} make-doubly-weak-hash-table size\n"
1079 "Return a weak hash table with @var{size} buckets.\n"
1081 "You can modify weak hash tables in exactly the same way you\n"
1082 "would modify regular hash tables. (@pxref{Hash Tables})")
1083 #define FUNC_NAME s_scm_make_weak_key_hash_table
1085 return scm_c_make_weak_table (SCM_UNBNDP (n
) ? 0 : scm_to_ulong (n
),
1086 SCM_WEAK_TABLE_KIND_KEY
);
1091 SCM_DEFINE (scm_make_weak_value_hash_table
, "make-weak-value-hash-table", 0, 1, 0,
1093 "Return a hash table with weak values with @var{size} buckets.\n"
1094 "(@pxref{Hash Tables})")
1095 #define FUNC_NAME s_scm_make_weak_value_hash_table
1097 return scm_c_make_weak_table (SCM_UNBNDP (n
) ? 0 : scm_to_ulong (n
),
1098 SCM_WEAK_TABLE_KIND_VALUE
);
1103 SCM_DEFINE (scm_make_doubly_weak_hash_table
, "make-doubly-weak-hash-table", 1, 0, 0,
1105 "Return a hash table with weak keys and values with @var{size}\n"
1106 "buckets. (@pxref{Hash Tables})")
1107 #define FUNC_NAME s_scm_make_doubly_weak_hash_table
1109 return scm_c_make_weak_table (SCM_UNBNDP (n
) ? 0 : scm_to_ulong (n
),
1110 SCM_WEAK_TABLE_KIND_BOTH
);
1115 SCM_DEFINE (scm_weak_key_hash_table_p
, "weak-key-hash-table?", 1, 0, 0,
1117 "@deffnx {Scheme Procedure} weak-value-hash-table? obj\n"
1118 "@deffnx {Scheme Procedure} doubly-weak-hash-table? obj\n"
1119 "Return @code{#t} if @var{obj} is the specified weak hash\n"
1120 "table. Note that a doubly weak hash table is neither a weak key\n"
1121 "nor a weak value hash table.")
1122 #define FUNC_NAME s_scm_weak_key_hash_table_p
1124 return scm_from_bool (SCM_WEAK_TABLE_P (obj
) &&
1125 SCM_WEAK_TABLE (obj
)->kind
== SCM_WEAK_TABLE_KIND_KEY
);
1130 SCM_DEFINE (scm_weak_value_hash_table_p
, "weak-value-hash-table?", 1, 0, 0,
1132 "Return @code{#t} if @var{obj} is a weak value hash table.")
1133 #define FUNC_NAME s_scm_weak_value_hash_table_p
1135 return scm_from_bool (SCM_WEAK_TABLE_P (obj
) &&
1136 SCM_WEAK_TABLE (obj
)->kind
== SCM_WEAK_TABLE_KIND_VALUE
);
1141 SCM_DEFINE (scm_doubly_weak_hash_table_p
, "doubly-weak-hash-table?", 1, 0, 0,
1143 "Return @code{#t} if @var{obj} is a doubly weak hash table.")
1144 #define FUNC_NAME s_scm_doubly_weak_hash_table_p
1146 return scm_from_bool (SCM_WEAK_TABLE_P (obj
) &&
1147 SCM_WEAK_TABLE (obj
)->kind
== SCM_WEAK_TABLE_KIND_BOTH
);
1156 scm_weak_table_prehistory (void)
1159 GC_new_kind (GC_new_free_list (),
1160 GC_MAKE_PROC (GC_new_proc (mark_weak_key_table
), 0),
1162 weak_value_gc_kind
=
1163 GC_new_kind (GC_new_free_list (),
1164 GC_MAKE_PROC (GC_new_proc (mark_weak_value_table
), 0),
1169 scm_init_weak_table ()
1171 #include "libguile/weak-table.x"