1 /* Copyright (C) 2011, 2012, 2013, 2014 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
30 #include "libguile/_scm.h"
31 #include "libguile/hash.h"
32 #include "libguile/eval.h"
33 #include "libguile/ports.h"
35 #include "libguile/validate.h"
36 #include "libguile/weak-table.h"
41 This file implements weak hash tables. Weak hash tables are
42 generally used when you want to augment some object with additional
43 data, but when you don't have space to store the data in the object.
44 For example, procedure properties are implemented with weak tables.
46 Weak tables are implemented using an open-addressed hash table.
47 Basically this means that there is an array of entries, and the item
48 is expected to be found the slot corresponding to its hash code,
49 modulo the length of the array.
51 Collisions are handled using linear probing with the Robin Hood
52 technique. See Pedro Celis' paper, "Robin Hood Hashing":
54 http://www.cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
56 The vector of entries is allocated in such a way that the GC doesn't
57 trace the weak values. For doubly-weak tables, this means that the
58 entries are allocated as an "atomic" piece of memory. Key-weak and
59 value-weak tables use a special GC kind with a custom mark procedure.
60 When items are added weakly into table, a disappearing link is
61 registered to their locations. If the referent is collected, then
62 that link will be zeroed out.
64 An entry in the table consists of the key and the value, together
65 with the hash code of the key. We munge hash codes so that they are
66 never 0. In this way we can detect removed entries (key of zero but
67 nonzero hash code), and can then reshuffle elements as needed to
68 maintain the robin hood ordering.
70 Compared to buckets-and-chains hash tables, open addressing has the
71 advantage that it is very cache-friendly. It also uses less memory.
73 Implementation-wise, there are two things to note.
75 1. We assume that hash codes are evenly distributed across the
76 range of unsigned longs. The actual hash code stored in the
77 entry is left-shifted by 1 bit (losing 1 bit of hash precision),
78 and then or'd with 1. In this way we ensure that the hash field
79 of an occupied entry is nonzero. To map to an index, we
80 right-shift the hash by one, divide by the size, and take the
83 2. Since the weak references are stored in an atomic region with
84 disappearing links, they need to be accessed with the GC alloc
85 lock. `copy_weak_entry' will do that for you. The hash code
86 itself can be read outside the lock, though.
97 struct weak_entry_data
{
99 scm_t_weak_entry
*out
;
103 do_copy_weak_entry (void *data
)
105 struct weak_entry_data
*e
= data
;
107 e
->out
->hash
= e
->in
->hash
;
108 e
->out
->key
= e
->in
->key
;
109 e
->out
->value
= e
->in
->value
;
115 copy_weak_entry (scm_t_weak_entry
*src
, scm_t_weak_entry
*dst
)
117 struct weak_entry_data data
;
122 GC_call_with_alloc_lock (do_copy_weak_entry
, &data
);
126 register_disappearing_links (scm_t_weak_entry
*entry
,
128 scm_t_weak_table_kind kind
)
130 if (SCM_UNPACK (k
) && SCM_HEAP_OBJECT_P (k
)
131 && (kind
== SCM_WEAK_TABLE_KIND_KEY
132 || kind
== SCM_WEAK_TABLE_KIND_BOTH
))
133 SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &entry
->key
,
136 if (SCM_UNPACK (v
) && SCM_HEAP_OBJECT_P (v
)
137 && (kind
== SCM_WEAK_TABLE_KIND_VALUE
138 || kind
== SCM_WEAK_TABLE_KIND_BOTH
))
139 SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &entry
->value
,
144 unregister_disappearing_links (scm_t_weak_entry
*entry
,
145 scm_t_weak_table_kind kind
)
147 if (kind
== SCM_WEAK_TABLE_KIND_KEY
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
148 GC_unregister_disappearing_link ((void **) &entry
->key
);
150 if (kind
== SCM_WEAK_TABLE_KIND_VALUE
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
151 GC_unregister_disappearing_link ((void **) &entry
->value
);
154 #ifndef HAVE_GC_MOVE_DISAPPEARING_LINK
156 GC_move_disappearing_link (void **from
, void **to
)
158 GC_unregister_disappearing_link (from
);
159 SCM_I_REGISTER_DISAPPEARING_LINK (to
, *to
);
164 move_disappearing_links (scm_t_weak_entry
*from
, scm_t_weak_entry
*to
,
165 SCM key
, SCM value
, scm_t_weak_table_kind kind
)
167 if ((kind
== SCM_WEAK_TABLE_KIND_KEY
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
168 && SCM_HEAP_OBJECT_P (key
))
169 GC_move_disappearing_link ((void **) &from
->key
, (void **) &to
->key
);
171 if ((kind
== SCM_WEAK_TABLE_KIND_VALUE
|| kind
== SCM_WEAK_TABLE_KIND_BOTH
)
172 && SCM_HEAP_OBJECT_P (value
))
173 GC_move_disappearing_link ((void **) &from
->value
, (void **) &to
->value
);
177 move_weak_entry (scm_t_weak_entry
*from
, scm_t_weak_entry
*to
,
178 scm_t_weak_table_kind kind
)
182 scm_t_weak_entry copy
;
184 copy_weak_entry (from
, ©
);
185 to
->hash
= copy
.hash
;
187 to
->value
= copy
.value
;
189 move_disappearing_links (from
, to
,
190 SCM_PACK (copy
.key
), SCM_PACK (copy
.value
),
203 scm_t_weak_entry
*entries
; /* the data */
204 scm_i_pthread_mutex_t lock
; /* the lock */
205 scm_t_weak_table_kind kind
; /* what kind of table it is */
206 unsigned long size
; /* total number of slots. */
207 unsigned long n_items
; /* number of items in table */
208 unsigned long lower
; /* when to shrink */
209 unsigned long upper
; /* when to grow */
210 int size_index
; /* index into hashtable_size */
211 int min_size_index
; /* minimum size_index */
215 #define SCM_WEAK_TABLE_P(x) (SCM_HAS_TYP7 (x, scm_tc7_weak_table))
216 #define SCM_VALIDATE_WEAK_TABLE(pos, arg) \
217 SCM_MAKE_VALIDATE_MSG (pos, arg, WEAK_TABLE_P, "weak-table")
218 #define SCM_WEAK_TABLE(x) ((scm_t_weak_table *) SCM_CELL_WORD_1 (x))
222 hash_to_index (unsigned long hash
, unsigned long size
)
224 return (hash
>> 1) % size
;
228 entry_distance (unsigned long hash
, unsigned long k
, unsigned long size
)
230 unsigned long origin
= hash_to_index (hash
, size
);
235 /* The other key was displaced and wrapped around. */
236 return size
- origin
+ k
;
240 rob_from_rich (scm_t_weak_table
*table
, unsigned long k
)
242 unsigned long empty
, size
;
246 /* If we are to free up slot K in the table, we need room to do so. */
247 assert (table
->n_items
< size
);
251 empty
= (empty
+ 1) % size
;
252 while (table
->entries
[empty
].hash
);
256 unsigned long last
= empty
? (empty
- 1) : (size
- 1);
257 move_weak_entry (&table
->entries
[last
], &table
->entries
[empty
],
263 table
->entries
[empty
].hash
= 0;
264 table
->entries
[empty
].key
= 0;
265 table
->entries
[empty
].value
= 0;
269 give_to_poor (scm_t_weak_table
*table
, unsigned long k
)
271 /* Slot K was just freed up; possibly shuffle others down. */
272 unsigned long size
= table
->size
;
276 unsigned long next
= (k
+ 1) % size
;
278 scm_t_weak_entry copy
;
280 hash
= table
->entries
[next
].hash
;
282 if (!hash
|| hash_to_index (hash
, size
) == next
)
285 copy_weak_entry (&table
->entries
[next
], ©
);
287 if (!copy
.key
|| !copy
.value
)
288 /* Lost weak reference. */
290 give_to_poor (table
, next
);
295 move_weak_entry (&table
->entries
[next
], &table
->entries
[k
],
301 /* We have shuffled down any entries that should be shuffled down; now
303 table
->entries
[k
].hash
= 0;
304 table
->entries
[k
].key
= 0;
305 table
->entries
[k
].value
= 0;
311 /* The GC "kinds" for singly-weak tables. */
312 static int weak_key_gc_kind
;
313 static int weak_value_gc_kind
;
315 static struct GC_ms_entry
*
316 mark_weak_key_table (GC_word
*addr
, struct GC_ms_entry
*mark_stack_ptr
,
317 struct GC_ms_entry
*mark_stack_limit
, GC_word env
)
319 scm_t_weak_entry
*entries
= (scm_t_weak_entry
*) addr
;
320 unsigned long k
, size
= GC_size (addr
) / sizeof (scm_t_weak_entry
);
322 for (k
= 0; k
< size
; k
++)
323 if (entries
[k
].hash
&& entries
[k
].key
)
325 SCM value
= SCM_PACK (entries
[k
].value
);
326 mark_stack_ptr
= GC_MARK_AND_PUSH ((GC_word
*) SCM2PTR (value
),
327 mark_stack_ptr
, mark_stack_limit
,
331 return mark_stack_ptr
;
334 static struct GC_ms_entry
*
335 mark_weak_value_table (GC_word
*addr
, struct GC_ms_entry
*mark_stack_ptr
,
336 struct GC_ms_entry
*mark_stack_limit
, GC_word env
)
338 scm_t_weak_entry
*entries
= (scm_t_weak_entry
*) addr
;
339 unsigned long k
, size
= GC_size (addr
) / sizeof (scm_t_weak_entry
);
341 for (k
= 0; k
< size
; k
++)
342 if (entries
[k
].hash
&& entries
[k
].value
)
344 SCM key
= SCM_PACK (entries
[k
].key
);
345 mark_stack_ptr
= GC_MARK_AND_PUSH ((GC_word
*) SCM2PTR (key
),
346 mark_stack_ptr
, mark_stack_limit
,
350 return mark_stack_ptr
;
353 static scm_t_weak_entry
*
354 allocate_entries (unsigned long size
, scm_t_weak_table_kind kind
)
356 scm_t_weak_entry
*ret
;
357 size_t bytes
= size
* sizeof (*ret
);
361 case SCM_WEAK_TABLE_KIND_KEY
:
362 ret
= GC_generic_malloc (bytes
, weak_key_gc_kind
);
364 case SCM_WEAK_TABLE_KIND_VALUE
:
365 ret
= GC_generic_malloc (bytes
, weak_value_gc_kind
);
367 case SCM_WEAK_TABLE_KIND_BOTH
:
368 ret
= scm_gc_malloc_pointerless (bytes
, "weak-table");
374 memset (ret
, 0, bytes
);
381 /* Growing or shrinking is triggered when the load factor
383 * L = N / S (N: number of items in table, S: bucket vector length)
385 * passes an upper limit of 0.9 or a lower limit of 0.2.
387 * The implementation stores the upper and lower number of items which
388 * trigger a resize in the hashtable object.
390 * Possible hash table sizes (primes) are stored in the array
394 static unsigned long hashtable_size
[] = {
395 31, 61, 113, 223, 443, 883, 1759, 3517, 7027, 14051, 28099, 56197, 112363,
396 224717, 449419, 898823, 1797641, 3595271, 7190537, 14381041, 28762081,
397 57524111, 115048217, 230096423
400 #define HASHTABLE_SIZE_N (sizeof(hashtable_size)/sizeof(unsigned long))
403 compute_size_index (scm_t_weak_table
*table
)
405 int i
= table
->size_index
;
407 if (table
->n_items
< table
->lower
)
409 /* rehashing is not triggered when i <= min_size */
412 while (i
> table
->min_size_index
413 && table
->n_items
< hashtable_size
[i
] / 5);
415 else if (table
->n_items
> table
->upper
)
418 if (i
>= HASHTABLE_SIZE_N
)
419 /* The biggest size currently is 230096423, which for a 32-bit
420 machine will occupy 2.3GB of memory at a load of 80%. There
421 is probably something better to do here, but if you have a
422 weak map of that size, you are hosed in any case. */
430 is_acceptable_size_index (scm_t_weak_table
*table
, int size_index
)
432 int computed
= compute_size_index (table
);
434 if (size_index
== computed
)
435 /* We were going to grow or shrink, and allocating the new vector
436 didn't change the target size. */
439 if (size_index
== computed
+ 1)
441 /* We were going to enlarge the table, but allocating the new
442 vector finalized some objects, making an enlargement
443 unnecessary. It might still be a good idea to use the larger
444 table, though. (This branch also gets hit if, while allocating
445 the vector, some other thread was actively removing items from
446 the table. That is less likely, though.) */
447 unsigned long new_lower
= hashtable_size
[size_index
] / 5;
449 return table
->size
> new_lower
;
452 if (size_index
== computed
- 1)
454 /* We were going to shrink the table, but when we dropped the lock
455 to allocate the new vector, some other thread added elements to
460 /* The computed size differs from our newly allocated size by more
461 than one size index -- recalculate. */
466 resize_table (scm_t_weak_table
*table
)
468 scm_t_weak_entry
*old_entries
, *new_entries
;
470 unsigned long old_size
, new_size
, old_k
;
474 new_size_index
= compute_size_index (table
);
475 if (new_size_index
== table
->size_index
)
477 new_size
= hashtable_size
[new_size_index
];
478 new_entries
= allocate_entries (new_size
, table
->kind
);
480 while (!is_acceptable_size_index (table
, new_size_index
));
482 old_entries
= table
->entries
;
483 old_size
= table
->size
;
485 table
->size_index
= new_size_index
;
486 table
->size
= new_size
;
487 if (new_size_index
<= table
->min_size_index
)
490 table
->lower
= new_size
/ 5;
491 table
->upper
= 9 * new_size
/ 10;
493 table
->entries
= new_entries
;
495 for (old_k
= 0; old_k
< old_size
; old_k
++)
497 scm_t_weak_entry copy
;
498 unsigned long new_k
, distance
;
500 if (!old_entries
[old_k
].hash
)
503 copy_weak_entry (&old_entries
[old_k
], ©
);
505 if (!copy
.key
|| !copy
.value
)
508 new_k
= hash_to_index (copy
.hash
, new_size
);
510 for (distance
= 0; ; distance
++, new_k
= (new_k
+ 1) % new_size
)
512 unsigned long other_hash
= new_entries
[new_k
].hash
;
515 /* Found an empty entry. */
518 /* Displace the entry if our distance is less, otherwise keep
520 if (entry_distance (other_hash
, new_k
, new_size
) < distance
)
522 rob_from_rich (table
, new_k
);
528 new_entries
[new_k
].hash
= copy
.hash
;
529 new_entries
[new_k
].key
= copy
.key
;
530 new_entries
[new_k
].value
= copy
.value
;
532 register_disappearing_links (&new_entries
[new_k
],
533 SCM_PACK (copy
.key
), SCM_PACK (copy
.value
),
538 /* Run after GC via do_vacuum_weak_table, this function runs over the
539 whole table, removing lost weak references, reshuffling the table as it
540 goes. It might resize the table if it reaps enough entries. */
542 vacuum_weak_table (scm_t_weak_table
*table
)
544 scm_t_weak_entry
*entries
= table
->entries
;
545 unsigned long size
= table
->size
;
548 for (k
= 0; k
< size
; k
++)
550 unsigned long hash
= entries
[k
].hash
;
554 scm_t_weak_entry copy
;
556 copy_weak_entry (&entries
[k
], ©
);
558 if (!copy
.key
|| !copy
.value
)
559 /* Lost weak reference; reshuffle. */
561 give_to_poor (table
, k
);
567 if (table
->n_items
< table
->lower
)
568 resize_table (table
);
575 weak_table_ref (scm_t_weak_table
*table
, unsigned long hash
,
576 scm_t_table_predicate_fn pred
, void *closure
,
579 unsigned long k
, distance
, size
;
580 scm_t_weak_entry
*entries
;
583 entries
= table
->entries
;
585 hash
= (hash
<< 1) | 0x1;
586 k
= hash_to_index (hash
, size
);
588 for (distance
= 0; distance
< size
; distance
++, k
= (k
+ 1) % size
)
590 unsigned long other_hash
;
593 other_hash
= entries
[k
].hash
;
599 if (hash
== other_hash
)
601 scm_t_weak_entry copy
;
603 copy_weak_entry (&entries
[k
], ©
);
605 if (!copy
.key
|| !copy
.value
)
606 /* Lost weak reference; reshuffle. */
608 give_to_poor (table
, k
);
613 if (pred (SCM_PACK (copy
.key
), SCM_PACK (copy
.value
), closure
))
615 return SCM_PACK (copy
.value
);
618 /* If the entry's distance is less, our key is not in the table. */
619 if (entry_distance (other_hash
, k
, size
) < distance
)
623 /* If we got here, then we were unfortunate enough to loop through the
624 whole table. Shouldn't happen, but hey. */
630 weak_table_put_x (scm_t_weak_table
*table
, unsigned long hash
,
631 scm_t_table_predicate_fn pred
, void *closure
,
634 unsigned long k
, distance
, size
;
635 scm_t_weak_entry
*entries
;
638 entries
= table
->entries
;
640 hash
= (hash
<< 1) | 0x1;
641 k
= hash_to_index (hash
, size
);
643 for (distance
= 0; ; distance
++, k
= (k
+ 1) % size
)
645 unsigned long other_hash
;
648 other_hash
= entries
[k
].hash
;
651 /* Found an empty entry. */
654 if (other_hash
== hash
)
656 scm_t_weak_entry copy
;
658 copy_weak_entry (&entries
[k
], ©
);
660 if (!copy
.key
|| !copy
.value
)
661 /* Lost weak reference; reshuffle. */
663 give_to_poor (table
, k
);
668 if (pred (SCM_PACK (copy
.key
), SCM_PACK (copy
.value
), closure
))
669 /* Found an entry with this key. */
673 if (table
->n_items
> table
->upper
)
674 /* Full table, time to resize. */
676 resize_table (table
);
677 return weak_table_put_x (table
, hash
>> 1, pred
, closure
, key
, value
);
680 /* Displace the entry if our distance is less, otherwise keep
682 if (entry_distance (other_hash
, k
, size
) < distance
)
684 rob_from_rich (table
, k
);
690 unregister_disappearing_links (&entries
[k
], table
->kind
);
694 entries
[k
].hash
= hash
;
695 entries
[k
].key
= SCM_UNPACK (key
);
696 entries
[k
].value
= SCM_UNPACK (value
);
698 register_disappearing_links (&entries
[k
], key
, value
, table
->kind
);
703 weak_table_remove_x (scm_t_weak_table
*table
, unsigned long hash
,
704 scm_t_table_predicate_fn pred
, void *closure
)
706 unsigned long k
, distance
, size
;
707 scm_t_weak_entry
*entries
;
710 entries
= table
->entries
;
712 hash
= (hash
<< 1) | 0x1;
713 k
= hash_to_index (hash
, size
);
715 for (distance
= 0; distance
< size
; distance
++, k
= (k
+ 1) % size
)
717 unsigned long other_hash
;
720 other_hash
= entries
[k
].hash
;
726 if (other_hash
== hash
)
728 scm_t_weak_entry copy
;
730 copy_weak_entry (&entries
[k
], ©
);
732 if (!copy
.key
|| !copy
.value
)
733 /* Lost weak reference; reshuffle. */
735 give_to_poor (table
, k
);
740 if (pred (SCM_PACK (copy
.key
), SCM_PACK (copy
.value
), closure
))
741 /* Found an entry with this key. */
745 entries
[k
].value
= 0;
747 unregister_disappearing_links (&entries
[k
], table
->kind
);
749 if (--table
->n_items
< table
->lower
)
750 resize_table (table
);
752 give_to_poor (table
, k
);
758 /* If the entry's distance is less, our key is not in the table. */
759 if (entry_distance (other_hash
, k
, size
) < distance
)
767 make_weak_table (unsigned long k
, scm_t_weak_table_kind kind
)
769 scm_t_weak_table
*table
;
771 int i
= 0, n
= k
? k
: 31;
772 while (i
+ 1 < HASHTABLE_SIZE_N
&& n
> hashtable_size
[i
])
774 n
= hashtable_size
[i
];
776 table
= scm_gc_malloc (sizeof (*table
), "weak-table");
777 table
->entries
= allocate_entries (n
, kind
);
782 table
->upper
= 9 * n
/ 10;
783 table
->size_index
= i
;
784 table
->min_size_index
= i
;
785 scm_i_pthread_mutex_init (&table
->lock
, NULL
);
787 return scm_cell (scm_tc7_weak_table
, (scm_t_bits
)table
);
791 scm_i_weak_table_print (SCM exp
, SCM port
, scm_print_state
*pstate
)
793 scm_puts_unlocked ("#<", port
);
794 scm_puts_unlocked ("weak-table ", port
);
795 scm_uintprint (SCM_WEAK_TABLE (exp
)->n_items
, 10, port
);
796 scm_putc_unlocked ('/', port
);
797 scm_uintprint (SCM_WEAK_TABLE (exp
)->size
, 10, port
);
798 scm_puts_unlocked (">", port
);
802 do_vacuum_weak_table (SCM table
)
806 t
= SCM_WEAK_TABLE (table
);
808 /* Unlike weak sets, the weak table interface allows custom predicates
809 to call out to arbitrary Scheme. There are two ways that this code
810 can be re-entrant, then: calling weak hash procedures while in a
811 custom predicate, or via finalizers run explicitly by (gc) or in an
812 async (for non-threaded Guile). We add a restriction that
813 prohibits the first case, by convention. But since we can't
814 prohibit the second case, here we trylock instead of lock. Not so
816 if (scm_i_pthread_mutex_trylock (&t
->lock
) == 0)
818 vacuum_weak_table (t
);
819 scm_i_pthread_mutex_unlock (&t
->lock
);
826 scm_c_make_weak_table (unsigned long k
, scm_t_weak_table_kind kind
)
830 ret
= make_weak_table (k
, kind
);
832 scm_i_register_weak_gc_callback (ret
, do_vacuum_weak_table
);
838 scm_weak_table_p (SCM obj
)
840 return scm_from_bool (SCM_WEAK_TABLE_P (obj
));
844 scm_c_weak_table_ref (SCM table
, unsigned long raw_hash
,
845 scm_t_table_predicate_fn pred
,
846 void *closure
, SCM dflt
)
847 #define FUNC_NAME "weak-table-ref"
852 SCM_VALIDATE_WEAK_TABLE (1, table
);
854 t
= SCM_WEAK_TABLE (table
);
856 scm_i_pthread_mutex_lock (&t
->lock
);
858 ret
= weak_table_ref (t
, raw_hash
, pred
, closure
, dflt
);
860 scm_i_pthread_mutex_unlock (&t
->lock
);
867 scm_c_weak_table_put_x (SCM table
, unsigned long raw_hash
,
868 scm_t_table_predicate_fn pred
,
869 void *closure
, SCM key
, SCM value
)
870 #define FUNC_NAME "weak-table-put!"
874 SCM_VALIDATE_WEAK_TABLE (1, table
);
876 t
= SCM_WEAK_TABLE (table
);
878 scm_i_pthread_mutex_lock (&t
->lock
);
880 weak_table_put_x (t
, raw_hash
, pred
, closure
, key
, value
);
882 scm_i_pthread_mutex_unlock (&t
->lock
);
887 scm_c_weak_table_remove_x (SCM table
, unsigned long raw_hash
,
888 scm_t_table_predicate_fn pred
,
890 #define FUNC_NAME "weak-table-remove!"
894 SCM_VALIDATE_WEAK_TABLE (1, table
);
896 t
= SCM_WEAK_TABLE (table
);
898 scm_i_pthread_mutex_lock (&t
->lock
);
900 weak_table_remove_x (t
, raw_hash
, pred
, closure
);
902 scm_i_pthread_mutex_unlock (&t
->lock
);
907 assq_predicate (SCM x
, SCM y
, void *closure
)
909 return scm_is_eq (x
, SCM_PACK_POINTER (closure
));
913 scm_weak_table_refq (SCM table
, SCM key
, SCM dflt
)
915 if (SCM_UNBNDP (dflt
))
918 return scm_c_weak_table_ref (table
, scm_ihashq (key
, -1),
919 assq_predicate
, SCM_UNPACK_POINTER (key
),
924 scm_weak_table_putq_x (SCM table
, SCM key
, SCM value
)
926 scm_c_weak_table_put_x (table
, scm_ihashq (key
, -1),
927 assq_predicate
, SCM_UNPACK_POINTER (key
),
932 scm_weak_table_remq_x (SCM table
, SCM key
)
934 scm_c_weak_table_remove_x (table
, scm_ihashq (key
, -1),
935 assq_predicate
, SCM_UNPACK_POINTER (key
));
939 scm_weak_table_clear_x (SCM table
)
940 #define FUNC_NAME "weak-table-clear!"
944 SCM_VALIDATE_WEAK_TABLE (1, table
);
946 t
= SCM_WEAK_TABLE (table
);
948 scm_i_pthread_mutex_lock (&t
->lock
);
950 memset (t
->entries
, 0, sizeof (scm_t_weak_entry
) * t
->size
);
953 scm_i_pthread_mutex_unlock (&t
->lock
);
958 scm_c_weak_table_fold (scm_t_table_fold_fn proc
, void *closure
,
962 scm_t_weak_entry
*entries
;
963 unsigned long k
, size
;
965 t
= SCM_WEAK_TABLE (table
);
967 scm_i_pthread_mutex_lock (&t
->lock
);
970 entries
= t
->entries
;
972 for (k
= 0; k
< size
; k
++)
976 scm_t_weak_entry copy
;
978 copy_weak_entry (&entries
[k
], ©
);
980 if (copy
.key
&& copy
.value
)
982 /* Release table lock while we call the function. */
983 scm_i_pthread_mutex_unlock (&t
->lock
);
984 init
= proc (closure
,
985 SCM_PACK (copy
.key
), SCM_PACK (copy
.value
),
987 scm_i_pthread_mutex_lock (&t
->lock
);
992 scm_i_pthread_mutex_unlock (&t
->lock
);
998 fold_trampoline (void *closure
, SCM k
, SCM v
, SCM init
)
1000 return scm_call_3 (SCM_PACK_POINTER (closure
), k
, v
, init
);
1004 scm_weak_table_fold (SCM proc
, SCM init
, SCM table
)
1005 #define FUNC_NAME "weak-table-fold"
1007 SCM_VALIDATE_WEAK_TABLE (3, table
);
1008 SCM_VALIDATE_PROC (1, proc
);
1010 return scm_c_weak_table_fold (fold_trampoline
, SCM_UNPACK_POINTER (proc
), init
, table
);
1015 for_each_trampoline (void *closure
, SCM k
, SCM v
, SCM seed
)
1017 scm_call_2 (SCM_PACK_POINTER (closure
), k
, v
);
1022 scm_weak_table_for_each (SCM proc
, SCM table
)
1023 #define FUNC_NAME "weak-table-for-each"
1025 SCM_VALIDATE_WEAK_TABLE (2, table
);
1026 SCM_VALIDATE_PROC (1, proc
);
1028 scm_c_weak_table_fold (for_each_trampoline
, SCM_UNPACK_POINTER (proc
), SCM_BOOL_F
, table
);
1033 map_trampoline (void *closure
, SCM k
, SCM v
, SCM seed
)
1035 return scm_cons (scm_call_2 (SCM_PACK_POINTER (closure
), k
, v
), seed
);
1039 scm_weak_table_map_to_list (SCM proc
, SCM table
)
1040 #define FUNC_NAME "weak-table-map->list"
1042 SCM_VALIDATE_WEAK_TABLE (2, table
);
1043 SCM_VALIDATE_PROC (1, proc
);
1045 return scm_c_weak_table_fold (map_trampoline
, SCM_UNPACK_POINTER (proc
), SCM_EOL
, table
);
1052 /* Legacy interface. */
1054 SCM_DEFINE (scm_make_weak_key_hash_table
, "make-weak-key-hash-table", 0, 1, 0,
1056 "@deffnx {Scheme Procedure} make-weak-value-hash-table size\n"
1057 "@deffnx {Scheme Procedure} make-doubly-weak-hash-table size\n"
1058 "Return a weak hash table with @var{size} buckets.\n"
1060 "You can modify weak hash tables in exactly the same way you\n"
1061 "would modify regular hash tables. (@pxref{Hash Tables})")
1062 #define FUNC_NAME s_scm_make_weak_key_hash_table
1064 return scm_c_make_weak_table (SCM_UNBNDP (n
) ? 0 : scm_to_ulong (n
),
1065 SCM_WEAK_TABLE_KIND_KEY
);
1070 SCM_DEFINE (scm_make_weak_value_hash_table
, "make-weak-value-hash-table", 0, 1, 0,
1072 "Return a hash table with weak values with @var{size} buckets.\n"
1073 "(@pxref{Hash Tables})")
1074 #define FUNC_NAME s_scm_make_weak_value_hash_table
1076 return scm_c_make_weak_table (SCM_UNBNDP (n
) ? 0 : scm_to_ulong (n
),
1077 SCM_WEAK_TABLE_KIND_VALUE
);
1082 SCM_DEFINE (scm_make_doubly_weak_hash_table
, "make-doubly-weak-hash-table", 0, 1, 0,
1084 "Return a hash table with weak keys and values with @var{size}\n"
1085 "buckets. (@pxref{Hash Tables})")
1086 #define FUNC_NAME s_scm_make_doubly_weak_hash_table
1088 return scm_c_make_weak_table (SCM_UNBNDP (n
) ? 0 : scm_to_ulong (n
),
1089 SCM_WEAK_TABLE_KIND_BOTH
);
1094 SCM_DEFINE (scm_weak_key_hash_table_p
, "weak-key-hash-table?", 1, 0, 0,
1096 "@deffnx {Scheme Procedure} weak-value-hash-table? obj\n"
1097 "@deffnx {Scheme Procedure} doubly-weak-hash-table? obj\n"
1098 "Return @code{#t} if @var{obj} is the specified weak hash\n"
1099 "table. Note that a doubly weak hash table is neither a weak key\n"
1100 "nor a weak value hash table.")
1101 #define FUNC_NAME s_scm_weak_key_hash_table_p
1103 return scm_from_bool (SCM_WEAK_TABLE_P (obj
) &&
1104 SCM_WEAK_TABLE (obj
)->kind
== SCM_WEAK_TABLE_KIND_KEY
);
1109 SCM_DEFINE (scm_weak_value_hash_table_p
, "weak-value-hash-table?", 1, 0, 0,
1111 "Return @code{#t} if @var{obj} is a weak value hash table.")
1112 #define FUNC_NAME s_scm_weak_value_hash_table_p
1114 return scm_from_bool (SCM_WEAK_TABLE_P (obj
) &&
1115 SCM_WEAK_TABLE (obj
)->kind
== SCM_WEAK_TABLE_KIND_VALUE
);
1120 SCM_DEFINE (scm_doubly_weak_hash_table_p
, "doubly-weak-hash-table?", 1, 0, 0,
1122 "Return @code{#t} if @var{obj} is a doubly weak hash table.")
1123 #define FUNC_NAME s_scm_doubly_weak_hash_table_p
1125 return scm_from_bool (SCM_WEAK_TABLE_P (obj
) &&
1126 SCM_WEAK_TABLE (obj
)->kind
== SCM_WEAK_TABLE_KIND_BOTH
);
1135 scm_weak_table_prehistory (void)
1138 GC_new_kind (GC_new_free_list (),
1139 GC_MAKE_PROC (GC_new_proc (mark_weak_key_table
), 0),
1141 weak_value_gc_kind
=
1142 GC_new_kind (GC_new_free_list (),
1143 GC_MAKE_PROC (GC_new_proc (mark_weak_value_table
), 0),
1148 scm_init_weak_table ()
1150 #include "libguile/weak-table.x"