-/* Copyright (C) 1995,1996,1998,1999,2000,2001, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 1995,1996,1998,1999,2000,2001, 2003, 2004, 2006 Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
\f
+#include <stdio.h>
+
#include "libguile/_scm.h"
#include "libguile/alist.h"
#include "libguile/hash.h"
#include "libguile/validate.h"
#include "libguile/hashtab.h"
-\f
-static void
-loop (void)
-{
- int loop = 1;
- printf ("looping %d\n", getpid ());
- while (loop)
- ;
-}
-void
-scm_i_hashtable_decrement (SCM h)
-{
- scm_t_hashtable *t = SCM_HASHTABLE (h);
- if (t->n_items == 0)
- {
- printf ("hashtab underflow\n");
- loop ();
- }
- t->n_items--;
-}
+\f
/* NOTES
*
#define HASHTABLE_SIZE_N (sizeof(hashtable_size)/sizeof(unsigned long))
-/* Turn an empty vector hash table into an opaque resizable one. */
-
static char *s_hashtable = "hashtable";
-SCM weak_hashtables = SCM_EOL;
+\f
+/* Helper functions and macros to deal with weak pairs.
+
+ Weak pairs need to be accessed very carefully since their components can
+ be nullified by the GC when the object they refer to becomes unreachable.
+ Hence the macros and functions below that detect such weak pairs within
+ buckets and remove them. */
+
+
+/* Return a ``usable'' version of ALIST, an alist of weak pairs. By
+ ``usable'', we mean that it contains only valid Scheme objects. On
+ return, REMOVE_ITEMS is set to the number of pairs that have been
+ deleted. */
+static SCM
+scm_fixup_weak_alist (SCM alist, size_t *removed_items)
+{
+ SCM result;
+ SCM prev = SCM_EOL;
+
+ *removed_items = 0;
+ for (result = alist;
+ scm_is_pair (alist);
+ prev = alist, alist = SCM_CDR (alist))
+ {
+ SCM pair = SCM_CAR (alist);
+
+ if (scm_is_pair (pair))
+ {
+ if (SCM_WEAK_PAIR_DELETED_P (pair))
+ {
+ /* Remove from ALIST weak pair PAIR whose car/cdr has been
+ nullified by the GC. */
+ if (prev == SCM_EOL)
+ result = SCM_CDR (alist);
+ else
+ SCM_SETCDR (prev, SCM_CDR (alist));
+
+ (*removed_items)++;
+ continue;
+ }
+ }
+ }
+
+ return result;
+}
+
+
+/* Helper macros. */
+
+/* Return true if OBJ is either a weak hash table or a weak alist vector (as
+ defined in `weaks.[ch]').
+ FIXME: We should eventually keep only weah hash tables. Actually, the
+ procs in `weaks.c' already no longer return vectors. */
+/* XXX: We assume that if OBJ is a vector, then it's a _weak_ alist vector. */
+#define IS_WEAK_THING(_obj) \
+ ((SCM_HASHTABLE_P (table) && (SCM_HASHTABLE_WEAK_P (table))) \
+ || (SCM_I_IS_VECTOR (table)))
+
+
+
+/* Fixup BUCKET, an alist part of weak hash table OBJ. BUCKETS is the full
+ bucket vector for OBJ and IDX is the index of BUCKET within this
+ vector. See also `scm_internal_hash_fold ()'. */
+#define START_WEAK_BUCKET_FIXUP(_obj, _buckets, _idx, _bucket, _hashfn) \
+do \
+ { \
+ size_t _removed; \
+ \
+ /* Disable the GC so that BUCKET remains valid until ASSOC_FN has \
+ returned. */ \
+ /* FIXME: We could maybe trigger a rehash here depending on whether \
+ `scm_fixup_weak_alist ()' noticed some change. */ \
+ GC_disable (); \
+ (_bucket) = scm_fixup_weak_alist ((_bucket), &_removed); \
+ SCM_SIMPLE_VECTOR_SET ((_buckets), (_idx), (_bucket)); \
+ \
+ if ((_removed) && (SCM_HASHTABLE_P (_obj))) \
+ { \
+ SCM_SET_HASHTABLE_N_ITEMS ((_obj), \
+ SCM_HASHTABLE_N_ITEMS (_obj) - _removed); \
+ scm_i_rehash ((_obj), (_hashfn), \
+ NULL, "START_WEAK_BUCKET_FIXUP"); \
+ } \
+ } \
+while (0)
+
+/* Terminate a weak bucket fixup phase. */
+#define END_WEAK_BUCKET_FIXUP(_obj, _buckets, _idx, _bucket, _hashfn) \
+ do { GC_enable (); } while (0)
+
+
+\f
static SCM
-make_hash_table (int flags, unsigned long k, const char *func_name) {
+make_hash_table (int flags, unsigned long k, const char *func_name)
+{
SCM table, vector;
scm_t_hashtable *t;
int i = 0, n = k ? k : 31;
while (i < HASHTABLE_SIZE_N && n > hashtable_size[i])
++i;
n = hashtable_size[i];
- if (flags)
- /* The SCM_WVECTF_NOSCAN flag informs the weak vector code not to
- perform the final scan for broken references. Instead we do
- that ourselves in scan_weak_hashtables. */
- vector = scm_i_allocate_weak_vector (flags | SCM_WVECTF_NOSCAN,
- scm_from_int (n),
- SCM_EOL);
- else
- vector = scm_c_make_vector (n, SCM_EOL);
- t = scm_gc_malloc (sizeof (*t), s_hashtable);
+
+ /* In both cases, i.e., regardless of whether we are creating a weak hash
+ table, we return a non-weak vector. This is because the vector itself
+ is not weak in the case of a weak hash table: the alist pairs are. */
+ vector = scm_c_make_vector (n, SCM_EOL);
+
+ t = scm_gc_malloc_pointerless (sizeof (*t), s_hashtable);
t->min_size_index = t->size_index = i;
t->n_items = 0;
t->lower = 0;
t->upper = 9 * n / 10;
t->flags = flags;
- if (flags)
- {
- SCM_NEWSMOB3 (table, scm_tc16_hashtable, vector, t, weak_hashtables);
- weak_hashtables = table;
- }
- else
- SCM_NEWSMOB3 (table, scm_tc16_hashtable, vector, t, SCM_EOL);
+ t->hash_fn = NULL;
+
+ SCM_NEWSMOB2 (table, scm_tc16_hashtable, vector, t);
+
return table;
}
-
void
scm_i_rehash (SCM table,
unsigned long (*hash_fn)(),
if (i >= HASHTABLE_SIZE_N)
/* don't rehash */
return;
- /* store for use in rehash_after_gc */
- SCM_HASHTABLE (table)->hash_fn = hash_fn;
- SCM_HASHTABLE (table)->closure = closure;
+
+ /* Remember HASH_FN for rehash_after_gc, but only when CLOSURE
+ is not needed since CLOSURE can not be guaranteed to be valid
+ after this function returns.
+ */
+ if (closure == NULL)
+ SCM_HASHTABLE (table)->hash_fn = hash_fn;
}
SCM_HASHTABLE (table)->size_index = i;
-
+
new_size = hashtable_size[i];
if (i <= SCM_HASHTABLE (table)->min_size_index)
SCM_HASHTABLE (table)->lower = 0;
SCM_HASHTABLE (table)->lower = new_size / 4;
SCM_HASHTABLE (table)->upper = 9 * new_size / 10;
buckets = SCM_HASHTABLE_VECTOR (table);
-
- if (SCM_HASHTABLE_WEAK_P (table))
- new_buckets = scm_i_allocate_weak_vector (SCM_HASHTABLE_FLAGS (table)
- | SCM_WVECTF_NOSCAN,
- scm_from_ulong (new_size),
- SCM_EOL);
- else
- new_buckets = scm_c_make_vector (new_size, SCM_EOL);
+
+ new_buckets = scm_c_make_vector (new_size, SCM_EOL);
+
+ /* When this is a weak hashtable, running the GC might change it.
+ We need to cope with this while rehashing its elements. We do
+ this by first installing the new, empty bucket vector. Then we
+ remove the elements from the old bucket vector and insert them
+ into the new one.
+ */
+
+ SCM_SET_HASHTABLE_VECTOR (table, new_buckets);
+ SCM_SET_HASHTABLE_N_ITEMS (table, 0);
old_size = SCM_SIMPLE_VECTOR_LENGTH (buckets);
for (i = 0; i < old_size; ++i)
{
- SCM ls = SCM_SIMPLE_VECTOR_REF (buckets, i), handle;
- while (!scm_is_null (ls))
+ SCM ls, cell, handle;
+
+ ls = SCM_SIMPLE_VECTOR_REF (buckets, i);
+ SCM_SIMPLE_VECTOR_SET (buckets, i, SCM_EOL);
+
+ while (scm_is_pair (ls))
{
unsigned long h;
- handle = SCM_CAR (ls);
+
+ cell = ls;
+ handle = SCM_CAR (cell);
+ ls = SCM_CDR (ls);
+
+ if (SCM_WEAK_PAIR_DELETED_P (handle))
+ /* HANDLE is a nullified weak pair: skip it. */
+ continue;
+
h = hash_fn (SCM_CAR (handle), new_size, closure);
if (h >= new_size)
scm_out_of_range (func_name, scm_from_ulong (h));
- SCM_SIMPLE_VECTOR_SET
- (new_buckets, h,
- scm_cons (handle,
- SCM_SIMPLE_VECTOR_REF (new_buckets, h)));
- ls = SCM_CDR (ls);
+ SCM_SETCDR (cell, SCM_SIMPLE_VECTOR_REF (new_buckets, h));
+ SCM_SIMPLE_VECTOR_SET (new_buckets, h, cell);
+ SCM_HASHTABLE_INCREMENT (table);
}
}
- SCM_SET_HASHTABLE_VECTOR (table, new_buckets);
}
static int
hashtable_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
{
- scm_t_hashtable *t = SCM_HASHTABLE (exp);
scm_puts ("#<", port);
if (SCM_HASHTABLE_WEAK_KEY_P (exp))
scm_puts ("weak-key-", port);
else if (SCM_HASHTABLE_DOUBLY_WEAK_P (exp))
scm_puts ("doubly-weak-", port);
scm_puts ("hash-table ", port);
- scm_uintprint (t->n_items, 10, port);
+ scm_uintprint (SCM_HASHTABLE_N_ITEMS (exp), 10, port);
scm_putc ('/', port);
scm_uintprint (SCM_SIMPLE_VECTOR_LENGTH (SCM_HASHTABLE_VECTOR (exp)),
10, port);
return 1;
}
-#define UNMARKED_CELL_P(x) (SCM_NIMP(x) && !SCM_GC_MARK_P (x))
-
-/* keep track of hash tables that need to shrink after scan */
-static SCM to_rehash = SCM_EOL;
-
-/* scan hash tables for broken references, remove them, and update
- hash tables item count */
-static void *
-scan_weak_hashtables (void *dummy1 SCM_UNUSED,
- void *dummy2 SCM_UNUSED,
- void *dummy3 SCM_UNUSED)
-{
- SCM *next = &weak_hashtables;
- SCM h = *next;
- while (!scm_is_null (h))
- {
- if (!SCM_GC_MARK_P (h))
- *next = h = SCM_HASHTABLE_NEXT (h);
- else
- {
- SCM alist;
- int i, n = SCM_HASHTABLE_N_BUCKETS (h);
- int weak_car = SCM_HASHTABLE_FLAGS (h) & SCM_HASHTABLEF_WEAK_CAR;
- int weak_cdr = SCM_HASHTABLE_FLAGS (h) & SCM_HASHTABLEF_WEAK_CDR;
- int check_size_p = 0;
- for (i = 0; i < n; ++i)
- {
- SCM *next_spine = NULL;
- alist = SCM_HASHTABLE_BUCKET (h, i);
- while (scm_is_pair (alist))
- {
- if ((weak_car && UNMARKED_CELL_P (SCM_CAAR (alist)))
- || (weak_cdr && UNMARKED_CELL_P (SCM_CDAR (alist))))
- {
- if (next_spine)
- *next_spine = SCM_CDR (alist);
- else
- SCM_SET_HASHTABLE_BUCKET (h, i, SCM_CDR (alist));
- SCM_HASHTABLE_DECREMENT (h);
- check_size_p = 1;
- }
- else
- next_spine = SCM_CDRLOC (alist);
- alist = SCM_CDR (alist);
- }
- }
- if (check_size_p
- && SCM_HASHTABLE_N_ITEMS (h) < SCM_HASHTABLE_LOWER (h))
- {
- SCM tmp = SCM_HASHTABLE_NEXT (h);
- /* temporarily move table from weak_hashtables to to_rehash */
- SCM_SET_HASHTABLE_NEXT (h, to_rehash);
- to_rehash = h;
- *next = h = tmp;
- }
- else
- {
- next = SCM_HASHTABLE_NEXTLOC (h);
- h = SCM_HASHTABLE_NEXT (h);
- }
- }
- }
- return 0;
-}
-
-static void *
-rehash_after_gc (void *dummy1 SCM_UNUSED,
- void *dummy2 SCM_UNUSED,
- void *dummy3 SCM_UNUSED)
-{
- if (!scm_is_null (to_rehash))
- {
- SCM first = to_rehash, last, h;
- /* important to clear to_rehash here so that we don't get stuck
- in an infinite loop if scm_i_rehash causes GC */
- to_rehash = SCM_EOL;
- h = first;
- do
- {
- scm_i_rehash (h,
- /* use same hash_fn and closure as last time */
- SCM_HASHTABLE (h)->hash_fn,
- SCM_HASHTABLE (h)->closure,
- "rehash_after_gc");
- last = h;
- h = SCM_HASHTABLE_NEXT (h);
- } while (!scm_is_null (h));
- /* move tables back to weak_hashtables */
- SCM_SET_HASHTABLE_NEXT (last, weak_hashtables);
- weak_hashtables = first;
- }
- return 0;
-}
-
-static size_t
-hashtable_free (SCM obj)
-{
- scm_gc_free (SCM_HASHTABLE (obj), sizeof (scm_t_hashtable), s_hashtable);
- return 0;
-}
-
SCM
scm_c_make_hash_table (unsigned long k)
SCM_DEFINE (scm_make_hash_table, "make-hash-table", 0, 1, 0,
(SCM n),
- "Make a hash table with optional minimum number of buckets @var{n}\n")
+ "Make a new abstract hash table object with minimum number of buckets @var{n}\n")
#define FUNC_NAME s_scm_make_hash_table
{
if (SCM_UNBNDP (n))
(SCM n),
"@deffnx {Scheme Procedure} make-weak-value-hash-table size\n"
"@deffnx {Scheme Procedure} make-doubly-weak-hash-table size\n"
- "Return a weak hash table with @var{size} buckets. As with any\n"
- "hash table, choosing a good size for the table requires some\n"
- "caution.\n"
+ "Return a weak hash table with @var{size} buckets.\n"
"\n"
"You can modify weak hash tables in exactly the same way you\n"
"would modify regular hash tables. (@pxref{Hash Tables})")
SCM_DEFINE (scm_hash_table_p, "hash-table?", 1, 0, 0,
(SCM obj),
- "Return @code{#t} if @var{obj} is a hash table.")
+ "Return @code{#t} if @var{obj} is an abstract hash table object.")
#define FUNC_NAME s_scm_hash_table_p
{
return scm_from_bool (SCM_HASHTABLE_P (obj));
scm_hash_fn_get_handle (SCM table, SCM obj, unsigned long (*hash_fn)(), SCM (*assoc_fn)(), void * closure)
#define FUNC_NAME "scm_hash_fn_get_handle"
{
+ int weak = 0;
unsigned long k;
- SCM h;
+ SCM buckets, alist, h;
if (SCM_HASHTABLE_P (table))
- table = SCM_HASHTABLE_VECTOR (table);
+ buckets = SCM_HASHTABLE_VECTOR (table);
else
- SCM_VALIDATE_VECTOR (1, table);
- if (SCM_SIMPLE_VECTOR_LENGTH (table) == 0)
+ {
+ SCM_VALIDATE_VECTOR (1, table);
+ buckets = table;
+ }
+
+ if (SCM_SIMPLE_VECTOR_LENGTH (buckets) == 0)
return SCM_BOOL_F;
- k = hash_fn (obj, SCM_SIMPLE_VECTOR_LENGTH (table), closure);
- if (k >= SCM_SIMPLE_VECTOR_LENGTH (table))
+ k = hash_fn (obj, SCM_SIMPLE_VECTOR_LENGTH (buckets), closure);
+ if (k >= SCM_SIMPLE_VECTOR_LENGTH (buckets))
scm_out_of_range ("hash_fn_get_handle", scm_from_ulong (k));
- h = assoc_fn (obj, SCM_SIMPLE_VECTOR_REF (table, k), closure);
+
+ weak = IS_WEAK_THING (table);
+ alist = SCM_SIMPLE_VECTOR_REF (buckets, k);
+
+ if (weak)
+ START_WEAK_BUCKET_FIXUP (table, buckets, k, alist, hash_fn);
+
+ h = assoc_fn (obj, alist, closure);
+ if (weak)
+ END_WEAK_BUCKET_FIXUP (table, buckets, k, alist, hash_fn);
+
return h;
}
#undef FUNC_NAME
SCM (*assoc_fn)(), void * closure)
#define FUNC_NAME "scm_hash_fn_create_handle_x"
{
+ int weak = 0;
unsigned long k;
- SCM buckets, it;
+ SCM buckets, alist, it;
if (SCM_HASHTABLE_P (table))
buckets = SCM_HASHTABLE_VECTOR (table);
k = hash_fn (obj, SCM_SIMPLE_VECTOR_LENGTH (buckets), closure);
if (k >= SCM_SIMPLE_VECTOR_LENGTH (buckets))
scm_out_of_range ("hash_fn_create_handle_x", scm_from_ulong (k));
- it = assoc_fn (obj, SCM_SIMPLE_VECTOR_REF (buckets, k), closure);
+
+ weak = IS_WEAK_THING (table);
+ alist = SCM_SIMPLE_VECTOR_REF (buckets, k);
+ if (weak)
+ START_WEAK_BUCKET_FIXUP (table, buckets, k, alist, hash_fn);
+
+ it = assoc_fn (obj, alist, closure);
+ if (weak)
+ END_WEAK_BUCKET_FIXUP (table, buckets, k, alist, hash_fn);
+
if (scm_is_true (it))
return it;
else
{
- SCM old_bucket = SCM_SIMPLE_VECTOR_REF (buckets, k);
- SCM new_bucket = scm_acons (obj, init, old_bucket);
+ /* When this is a weak hashtable, running the GC can change it.
+ Thus, we must allocate the new cells first and can only then
+ access BUCKETS. Also, we need to fetch the bucket vector
+ again since the hashtable might have been rehashed. This
+ necessitates a new hash value as well.
+ */
+ SCM handle, new_bucket;
+
+ if ((SCM_HASHTABLE_P (table)) && (SCM_HASHTABLE_WEAK_P (table)))
+ {
+ /* FIXME: We don't support weak alist vectors. */
+ /* Use a weak cell. */
+ if (SCM_HASHTABLE_DOUBLY_WEAK_P (table))
+ handle = scm_doubly_weak_pair (obj, init);
+ else if (SCM_HASHTABLE_WEAK_KEY_P (table))
+ handle = scm_weak_car_pair (obj, init);
+ else
+ handle = scm_weak_cdr_pair (obj, init);
+ }
+ else
+ /* Use a regular, non-weak cell. */
+ handle = scm_cons (obj, init);
+
+ new_bucket = scm_cons (handle, SCM_EOL);
+
+ if (!scm_is_eq (table, buckets)
+ && !scm_is_eq (SCM_HASHTABLE_VECTOR (table), buckets))
+ {
+ buckets = SCM_HASHTABLE_VECTOR (table);
+ k = hash_fn (obj, SCM_SIMPLE_VECTOR_LENGTH (buckets), closure);
+ if (k >= SCM_SIMPLE_VECTOR_LENGTH (buckets))
+ scm_out_of_range ("hash_fn_create_handle_x", scm_from_ulong (k));
+ }
+ SCM_SETCDR (new_bucket, SCM_SIMPLE_VECTOR_REF (buckets, k));
SCM_SIMPLE_VECTOR_SET (buckets, k, new_bucket);
- if (table != buckets)
+ if (!scm_is_eq (table, buckets))
{
+ /* Update element count and maybe rehash the table. The
+ table might have too few entries here since weak hash
+ tables used with the hashx_* functions can not be
+ rehashed after GC.
+ */
SCM_HASHTABLE_INCREMENT (table);
- if (SCM_HASHTABLE_N_ITEMS (table) > SCM_HASHTABLE_UPPER (table))
+ if (SCM_HASHTABLE_N_ITEMS (table) < SCM_HASHTABLE_LOWER (table)
+ || SCM_HASHTABLE_N_ITEMS (table) > SCM_HASHTABLE_UPPER (table))
scm_i_rehash (table, hash_fn, closure, FUNC_NAME);
}
return SCM_CAR (new_bucket);
}
-
-
-
-SCM
-scm_hash_fn_remove_x (SCM table, SCM obj, unsigned long (*hash_fn)(), SCM (*assoc_fn)(),
- SCM (*delete_fn)(), void * closure)
+SCM
+scm_hash_fn_remove_x (SCM table, SCM obj,
+ unsigned long (*hash_fn)(),
+ SCM (*assoc_fn)(),
+ void *closure)
{
+ int weak = 0;
unsigned long k;
- SCM buckets, h;
+ SCM buckets, alist, h;
if (SCM_HASHTABLE_P (table))
buckets = SCM_HASHTABLE_VECTOR (table);
k = hash_fn (obj, SCM_SIMPLE_VECTOR_LENGTH (buckets), closure);
if (k >= SCM_SIMPLE_VECTOR_LENGTH (buckets))
scm_out_of_range ("hash_fn_remove_x", scm_from_ulong (k));
- h = assoc_fn (obj, SCM_SIMPLE_VECTOR_REF (buckets, k), closure);
+
+ weak = IS_WEAK_THING (table);
+ alist = SCM_SIMPLE_VECTOR_REF (buckets, k);
+ if (weak)
+ START_WEAK_BUCKET_FIXUP (table, buckets, k, alist, hash_fn);
+
+ h = assoc_fn (obj, alist, closure);
+ if (weak)
+ END_WEAK_BUCKET_FIXUP (table, buckets, k, alist, hash_fn);
+
if (scm_is_true (h))
{
SCM_SIMPLE_VECTOR_SET
- (buckets, k,
- delete_fn (h, SCM_SIMPLE_VECTOR_REF (buckets, k)));
- if (table != buckets)
+ (buckets, k, scm_delq_x (h, SCM_SIMPLE_VECTOR_REF (buckets, k)));
+ if (!scm_is_eq (table, buckets))
{
SCM_HASHTABLE_DECREMENT (table);
if (SCM_HASHTABLE_N_ITEMS (table) < SCM_HASHTABLE_LOWER (table))
SCM_DEFINE (scm_hash_clear_x, "hash-clear!", 1, 0, 0,
(SCM table),
- "Remove all items from TABLE (without triggering a resize).")
+ "Remove all items from @var{table} (without triggering a resize).")
#define FUNC_NAME s_scm_hash_clear_x
{
- SCM_VALIDATE_HASHTABLE (1, table);
- scm_vector_fill_x (SCM_HASHTABLE_VECTOR (table), SCM_EOL);
- SCM_SET_HASHTABLE_N_ITEMS (table, 0);
+ if (SCM_HASHTABLE_P (table))
+ {
+ scm_vector_fill_x (SCM_HASHTABLE_VECTOR (table), SCM_EOL);
+ SCM_SET_HASHTABLE_N_ITEMS (table, 0);
+ }
+ else
+ scm_vector_fill_x (table, SCM_EOL);
return SCM_UNSPECIFIED;
}
#undef FUNC_NAME
"@var{table}. Uses @code{eq?} for equality tests.")
#define FUNC_NAME s_scm_hashq_remove_x
{
- return scm_hash_fn_remove_x (table, key, scm_ihashq, scm_sloppy_assq,
- scm_delq_x, 0);
+ return scm_hash_fn_remove_x (table, key, scm_ihashq, scm_sloppy_assq, 0);
}
#undef FUNC_NAME
"@var{table}. Uses @code{eqv?} for equality tests.")
#define FUNC_NAME s_scm_hashv_remove_x
{
- return scm_hash_fn_remove_x (table, key, scm_ihashv, scm_sloppy_assv,
- scm_delv_x, 0);
+ return scm_hash_fn_remove_x (table, key, scm_ihashv, scm_sloppy_assv, 0);
}
#undef FUNC_NAME
"@var{table}. Uses @code{equal?} for equality tests.")
#define FUNC_NAME s_scm_hash_remove_x
{
- return scm_hash_fn_remove_x (table, key, scm_ihash, scm_sloppy_assoc,
- scm_delete_x, 0);
+ return scm_hash_fn_remove_x (table, key, scm_ihash, scm_sloppy_assoc, 0);
}
#undef FUNC_NAME
{
SCM hash;
SCM assoc;
- SCM delete;
} scm_t_ihashx_closure;
}
-
-
-static SCM
-scm_delx_x (SCM obj, SCM alist, scm_t_ihashx_closure *closure)
-{
- return scm_call_2 (closure->delete, obj, alist);
-}
-
-
-
SCM_DEFINE (scm_hashx_get_handle, "hashx-get-handle", 4, 0, 0,
(SCM hash, SCM assoc, SCM table, SCM key),
"This behaves the same way as the corresponding\n"
}
#undef FUNC_NAME
-
-
-SCM
-scm_hashx_remove_x (SCM hash, SCM assoc, SCM delete, SCM table, SCM obj)
+SCM_DEFINE (scm_hashx_remove_x, "hashx-remove!", 4, 0, 0,
+ (SCM hash, SCM assoc, SCM table, SCM obj),
+ "This behaves the same way as the corresponding @code{remove!}\n"
+ "function, but uses @var{hash} as a hash function and\n"
+ "@var{assoc} to compare keys. @code{hash} must be a function\n"
+ "that takes two arguments, a key to be hashed and a table size.\n"
+ "@code{assoc} must be an associator function, like @code{assoc},\n"
+ "@code{assq} or @code{assv}.\n"
+ "\n"
+ " By way of illustration, @code{hashq-remove! table key} is\n"
+ "equivalent to @code{hashx-remove! hashq assq #f table key}.")
+#define FUNC_NAME s_scm_hashx_remove_x
{
scm_t_ihashx_closure closure;
closure.hash = hash;
closure.assoc = assoc;
- closure.delete = delete;
- return scm_hash_fn_remove_x (table, obj, scm_ihashx, scm_sloppy_assx, scm_delx_x, 0);
+ return scm_hash_fn_remove_x (table, obj, scm_ihashx, scm_sloppy_assx,
+ (void *) &closure);
}
+#undef FUNC_NAME
/* Hash table iterators */
if (SCM_HASHTABLE_P (table))
buckets = SCM_HASHTABLE_VECTOR (table);
else
+ /* Weak alist vector. */
buckets = table;
n = SCM_SIMPLE_VECTOR_LENGTH (buckets);
for (i = 0; i < n; ++i)
{
- SCM ls = SCM_SIMPLE_VECTOR_REF (buckets, i), handle;
- while (!scm_is_null (ls))
+ SCM prev, ls;
+
+ for (prev = SCM_BOOL_F, ls = SCM_SIMPLE_VECTOR_REF (buckets, i);
+ !scm_is_null (ls);
+ prev = ls, ls = SCM_CDR (ls))
{
+ SCM handle;
+
if (!scm_is_pair (ls))
scm_wrong_type_arg (s_scm_hash_fold, SCM_ARG3, buckets);
+
handle = SCM_CAR (ls);
if (!scm_is_pair (handle))
scm_wrong_type_arg (s_scm_hash_fold, SCM_ARG3, buckets);
+
+ if (IS_WEAK_THING (table))
+ {
+ if (SCM_WEAK_PAIR_DELETED_P (handle))
+ {
+ /* We hit a weak pair whose car/cdr has become
+ unreachable: unlink it from the bucket. */
+ if (prev != SCM_BOOL_F)
+ SCM_SETCDR (prev, SCM_CDR (ls));
+ else
+ SCM_SIMPLE_VECTOR_SET (buckets, i, SCM_CDR (ls));
+
+ if (SCM_HASHTABLE_P (table))
+ /* Update the item count. */
+ SCM_HASHTABLE_DECREMENT (table);
+
+ continue;
+ }
+ }
+
result = fn (closure, SCM_CAR (handle), SCM_CDR (handle), result);
- ls = SCM_CDR (ls);
}
}
void
scm_hashtab_prehistory ()
{
+ /* Initialize the hashtab SMOB type. */
scm_tc16_hashtable = scm_make_smob_type (s_hashtable, 0);
- scm_set_smob_mark (scm_tc16_hashtable, scm_markcdr);
scm_set_smob_print (scm_tc16_hashtable, hashtable_print);
- scm_set_smob_free (scm_tc16_hashtable, hashtable_free);
- scm_c_hook_add (&scm_after_sweep_c_hook, scan_weak_hashtables, 0, 0);
- scm_c_hook_add (&scm_after_gc_c_hook, rehash_after_gc, 0, 0);
}
void