# define SCM_CHAR_CODE_LIMIT 256L
#endif
+#define SCM_I_UTYPE_MAX(type) ((type)-1)
+#define SCM_I_TYPE_MAX(type,umax) ((type)((umax)/2))
+#define SCM_I_TYPE_MIN(type,umax) (-((type)((umax)/2))-1)
+
+#define SCM_T_UINT8_MAX SCM_I_UTYPE_MAX(scm_t_uint8)
+#define SCM_T_INT8_MIN SCM_I_TYPE_MIN(scm_t_int8,SCM_T_UINT8_MAX)
+#define SCM_T_INT8_MAX SCM_I_TYPE_MAX(scm_t_int8,SCM_T_UINT8_MAX)
+
+#define SCM_T_UINT16_MAX SCM_I_UTYPE_MAX(scm_t_uint16)
+#define SCM_T_INT16_MIN SCM_I_TYPE_MIN(scm_t_int16,SCM_T_UINT16_MAX)
+#define SCM_T_INT16_MAX SCM_I_TYPE_MAX(scm_t_int16,SCM_T_UINT16_MAX)
+
+#define SCM_T_UINT32_MAX SCM_I_UTYPE_MAX(scm_t_uint32)
+#define SCM_T_INT32_MIN SCM_I_TYPE_MIN(scm_t_int32,SCM_T_UINT32_MAX)
+#define SCM_T_INT32_MAX SCM_I_TYPE_MAX(scm_t_int32,SCM_T_UINT32_MAX)
+
+#if SCM_HAVE_T_INT64
+#define SCM_T_UINT64_MAX SCM_I_UTYPE_MAX(scm_t_uint64)
+#define SCM_T_INT64_MIN SCM_I_TYPE_MIN(scm_t_int64,SCM_T_UINT64_MAX)
+#define SCM_T_INT64_MAX SCM_I_TYPE_MAX(scm_t_int64,SCM_T_UINT64_MAX)
+#endif
+
+#if SCM_SIZEOF_LONG_LONG
+#define SCM_I_ULLONG_MAX SCM_I_UTYPE_MAX(unsigned long long)
+#define SCM_I_LLONG_MIN SCM_I_TYPE_MIN(long long,SCM_I_ULLONG_MAX)
+#define SCM_I_LLONG_MAX SCM_I_TYPE_MAX(long long,SCM_I_ULLONG_MAX)
+#endif
+
+#define SCM_T_UINTMAX_MAX SCM_I_UTYPE_MAX(scm_t_uintmax)
+#define SCM_T_INTMAX_MIN SCM_I_TYPE_MIN(scm_t_intmax,SCM_T_UINTMAX_MAX)
+#define SCM_T_INTMAX_MAX SCM_I_TYPE_MAX(scm_t_intmax,SCM_T_UINTMAX_MAX)
+
+#define SCM_I_SIZE_MAX SCM_I_UTYPE_MAX(size_t)
+#define SCM_I_SSIZE_MIN SCM_I_TYPE_MIN(ssize_t,SCM_I_SIZE_MAX)
+#define SCM_I_SSIZE_MAX SCM_I_TYPE_MAX(ssize_t,SCM_I_SIZE_MAX)
+
\f
#include "libguile/tags.h"
#define SCM_FENCE
#endif
+/* In the old days, SCM_DEFER_INTS stopped signal handlers from running,
+ since in those days the handler directly ran scheme code, and that had to
+ be avoided when the heap was not in a consistent state etc. And since
+ the scheme code could do a stack swapping new continuation etc, signals
+ had to be deferred around various C library functions which were not safe
+ or not known to be safe to swap away, which was a lot of stuff.
+
+ These days signals are implemented with asyncs and don't directly run
+ scheme code in the handler, but hold it until an SCM_TICK etc where it
+ will be safe. This means interrupt protection is not needed and
+ SCM_DEFER_INTS / SCM_ALLOW_INTS is something of an anachronism.
+
+ What past SCM_DEFER_INTS usage also did though was indicate code that was
+ not reentrant, ie. could not be reentered by signal handler code. The
+ present definitions are a mutex lock, affording that reentrancy
+ protection against the new guile 1.8 free-running posix threads.
+
+ One big problem with the present defintions though is that code which
+ throws an error from within a DEFER/ALLOW region will leave the
+ defer_mutex locked and hence hang other threads that attempt to enter a
+ similar DEFER/ALLOW region.
+
+ The plan is to migrate reentrancy protection to an explicit mutex
+ (private or global, with unwind where necessary), and remove the
+ remaining DEFER/ALLOWs. */
+
#define SCM_DEFER_INTS scm_rec_mutex_lock (&scm_i_defer_mutex);
#define SCM_ALLOW_INTS scm_rec_mutex_unlock (&scm_i_defer_mutex);
? scm_apply_generic ((gf), (args)) \
: (scm_wrong_type_arg ((subr), (pos), \
scm_list_ref ((args), \
- SCM_MAKINUM ((pos) - 1))), \
+ scm_from_int ((pos) - 1))), \
SCM_UNSPECIFIED))
#define SCM_GASSERTn(cond, gf, args, pos, subr) \
if (!(cond)) SCM_WTA_DISPATCH_n((gf), (args), (pos), (subr))