-/* Copyright (C) 2011 Free Software Foundation, Inc.
+/* Copyright (C) 2011, 2012, 2013 Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
return size - origin + k;
}
+#ifndef HAVE_GC_MOVE_DISAPPEARING_LINK
+static void
+GC_move_disappearing_link (void **from, void **to)
+{
+ GC_unregister_disappearing_link (from);
+ SCM_I_REGISTER_DISAPPEARING_LINK (to, *to);
+}
+#endif
+
static void
move_weak_entry (scm_t_weak_entry *from, scm_t_weak_entry *to)
{
to->key = copy.key;
if (copy.key && SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
- {
-#ifdef HAVE_GC_MOVE_DISAPPEARING_LINK
- GC_move_disappearing_link ((GC_PTR) &from->key, (GC_PTR) &to->key);
-#else
- GC_unregister_disappearing_link ((GC_PTR) &from->key);
- SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &to->key,
- (GC_PTR) to->key);
-#endif
- }
+ GC_move_disappearing_link ((void **) &from->key, (void **) &to->key);
}
else
{
#define HASHSET_SIZE_N (sizeof(hashset_size)/sizeof(unsigned long))
-static void
-resize_set (scm_t_weak_set *set)
+static int
+compute_size_index (scm_t_weak_set *set)
{
- scm_t_weak_entry *old_entries, *new_entries;
- int i;
- unsigned long old_size, new_size, old_k;
+ int i = set->size_index;
- old_entries = set->entries;
- old_size = set->size;
-
if (set->n_items < set->lower)
{
/* rehashing is not triggered when i <= min_size */
- i = set->size_index;
do
--i;
while (i > set->min_size_index
- && set->n_items < hashset_size[i] / 4);
+ && set->n_items < hashset_size[i] / 5);
}
- else
+ else if (set->n_items > set->upper)
{
- i = set->size_index + 1;
+ ++i;
if (i >= HASHSET_SIZE_N)
/* The biggest size currently is 230096423, which for a 32-bit
machine will occupy 1.5GB of memory at a load of 80%. There
abort ();
}
- new_size = hashset_size[i];
- new_entries = scm_gc_malloc_pointerless (new_size * sizeof(scm_t_weak_entry),
- "weak set");
+ return i;
+}
+
+static int
+is_acceptable_size_index (scm_t_weak_set *set, int size_index)
+{
+ int computed = compute_size_index (set);
+
+ if (size_index == computed)
+ /* We were going to grow or shrink, and allocating the new vector
+ didn't change the target size. */
+ return 1;
+
+ if (size_index == computed + 1)
+ {
+ /* We were going to enlarge the set, but allocating the new
+ vector finalized some objects, making an enlargement
+ unnecessary. It might still be a good idea to use the larger
+ set, though. (This branch also gets hit if, while allocating
+ the vector, some other thread was actively removing items from
+ the set. That is less likely, though.) */
+ unsigned long new_lower = hashset_size[size_index] / 5;
+
+ return set->size > new_lower;
+ }
+
+ if (size_index == computed - 1)
+ {
+ /* We were going to shrink the set, but when we dropped the lock
+ to allocate the new vector, some other thread added elements to
+ the set. */
+ return 0;
+ }
+
+ /* The computed size differs from our newly allocated size by more
+ than one size index -- recalculate. */
+ return 0;
+}
+
+static void
+resize_set (scm_t_weak_set *set)
+{
+ scm_t_weak_entry *old_entries, *new_entries;
+ int new_size_index;
+ unsigned long old_size, new_size, old_k;
+
+ do
+ {
+ new_size_index = compute_size_index (set);
+ if (new_size_index == set->size_index)
+ return;
+ new_size = hashset_size[new_size_index];
+ new_entries = scm_gc_malloc_pointerless (new_size * sizeof(scm_t_weak_entry),
+ "weak set");
+ }
+ while (!is_acceptable_size_index (set, new_size_index));
+
+ old_entries = set->entries;
+ old_size = set->size;
+
memset (new_entries, 0, new_size * sizeof(scm_t_weak_entry));
- set->size_index = i;
+ set->size_index = new_size_index;
set->size = new_size;
- if (i <= set->min_size_index)
+ if (new_size_index <= set->min_size_index)
set->lower = 0;
else
set->lower = new_size / 5;
new_entries[new_k].key = copy.key;
if (SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
- SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &new_entries[new_k].key,
- (GC_PTR) new_entries[new_k].key);
+ SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &new_entries[new_k].key,
+ (void *) new_entries[new_k].key);
}
}
-/* Run after GC via do_vacuum_weak_set, this function runs over the
- whole table, removing lost weak references, reshuffling the set as it
- goes. It might resize the set if it reaps enough entries. */
+/* Run from a finalizer via do_vacuum_weak_set, this function runs over
+ the whole table, removing lost weak references, reshuffling the set
+ as it goes. It might resize the set if it reaps enough entries. */
static void
vacuum_weak_set (scm_t_weak_set *set)
{
entries[k].key = SCM_UNPACK (obj);
if (SCM_HEAP_OBJECT_P (obj))
- SCM_I_REGISTER_DISAPPEARING_LINK ((GC_PTR) &entries[k].key,
- (GC_PTR) SCM_HEAP_OBJECT_BASE (obj));
+ SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &entries[k].key,
+ (void *) SCM2PTR (obj));
return obj;
}
entries[k].key = 0;
if (SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
- GC_unregister_disappearing_link ((GC_PTR) &entries[k].key);
+ GC_unregister_disappearing_link ((void **) &entries[k].key);
if (--set->n_items < set->lower)
resize_set (set);
s = SCM_WEAK_SET (set);
- if (scm_i_pthread_mutex_trylock (&s->lock) == 0)
- {
- vacuum_weak_set (s);
- scm_i_pthread_mutex_unlock (&s->lock);
- }
-
- return;
-}
-
-/* The before-gc C hook only runs if GC_set_start_callback is available,
- so if not, fall back on a finalizer-based implementation. */
-static int
-weak_gc_callback (void **weak)
-{
- void *val = weak[0];
- void (*callback) (SCM) = weak[1];
-
- if (!val)
- return 0;
-
- callback (SCM_PACK_POINTER (val));
-
- return 1;
-}
-
-#ifdef HAVE_GC_SET_START_CALLBACK
-static void*
-weak_gc_hook (void *hook_data, void *fn_data, void *data)
-{
- if (!weak_gc_callback (fn_data))
- scm_c_hook_remove (&scm_before_gc_c_hook, weak_gc_hook, fn_data);
-
- return NULL;
-}
-#else
-static void
-weak_gc_finalizer (void *ptr, void *data)
-{
- if (weak_gc_callback (ptr))
- GC_REGISTER_FINALIZER_NO_ORDER (ptr, weak_gc_finalizer, data, NULL, NULL);
-}
-#endif
-
-static void
-scm_c_register_weak_gc_callback (SCM obj, void (*callback) (SCM))
-{
- void **weak = GC_MALLOC_ATOMIC (sizeof (void*) * 2);
-
- weak[0] = SCM_UNPACK_POINTER (obj);
- weak[1] = (void*)callback;
- GC_GENERAL_REGISTER_DISAPPEARING_LINK (weak, SCM_HEAP_OBJECT_BASE (obj));
-
-#ifdef HAVE_GC_SET_START_CALLBACK
- scm_c_hook_add (&scm_after_gc_c_hook, weak_gc_hook, weak, 0);
-#else
- GC_REGISTER_FINALIZER_NO_ORDER (weak, weak_gc_finalizer, NULL, NULL, NULL);
-#endif
+ /* We should always be able to grab this lock, because we are run from
+ a finalizer, which runs in another thread (or an async, which is
+ mostly equivalent). */
+ scm_i_pthread_mutex_lock (&s->lock);
+ vacuum_weak_set (s);
+ scm_i_pthread_mutex_unlock (&s->lock);
}
SCM
ret = make_weak_set (k);
- scm_c_register_weak_gc_callback (ret, do_vacuum_weak_set);
+ scm_i_register_weak_gc_callback (ret, do_vacuum_weak_set);
return ret;
}