-/* Copyright (C) 2001, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+/* Copyright (C) 2001, 2009-2014 Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
#define ARGS2(a1,a2) SCM a1 = sp[-1], a2 = sp[0]; sp--; NULLSTACK (1);
#define ARGS3(a1,a2,a3) SCM a1 = sp[-2], a2 = sp[-1], a3 = sp[0]; sp -= 2; NULLSTACK (2);
-#define RETURN(x) do { *sp = x; } while (0)
+#define RETURN(x) do { *sp = x; NEXT; } while (0)
VM_DEFINE_FUNCTION (128, not, "not", 1)
{
ARGS1 (x);
RETURN (scm_from_bool (scm_is_false (x)));
- NEXT;
}
VM_DEFINE_FUNCTION (129, not_not, "not-not", 1)
{
ARGS1 (x);
RETURN (scm_from_bool (!scm_is_false (x)));
- NEXT;
}
VM_DEFINE_FUNCTION (130, eq, "eq?", 2)
{
ARGS2 (x, y);
RETURN (scm_from_bool (scm_is_eq (x, y)));
- NEXT;
}
VM_DEFINE_FUNCTION (131, not_eq, "not-eq?", 2)
{
ARGS2 (x, y);
RETURN (scm_from_bool (!scm_is_eq (x, y)));
- NEXT;
}
VM_DEFINE_FUNCTION (132, nullp, "null?", 1)
{
ARGS1 (x);
RETURN (scm_from_bool (scm_is_null (x)));
- NEXT;
}
VM_DEFINE_FUNCTION (133, not_nullp, "not-null?", 1)
{
ARGS1 (x);
RETURN (scm_from_bool (!scm_is_null (x)));
- NEXT;
}
VM_DEFINE_FUNCTION (134, eqv, "eqv?", 2)
ARGS2 (x, y);
if (scm_is_eq (x, y))
RETURN (SCM_BOOL_T);
- else if (SCM_IMP (x) || SCM_IMP (y))
+ if (SCM_IMP (x) || SCM_IMP (y))
RETURN (SCM_BOOL_F);
- else
- {
- SYNC_REGISTER ();
- RETURN (scm_eqv_p (x, y));
- }
- NEXT;
+ SYNC_REGISTER ();
+ RETURN (scm_eqv_p (x, y));
}
VM_DEFINE_FUNCTION (135, equal, "equal?", 2)
ARGS2 (x, y);
if (scm_is_eq (x, y))
RETURN (SCM_BOOL_T);
- else if (SCM_IMP (x) || SCM_IMP (y))
+ if (SCM_IMP (x) || SCM_IMP (y))
RETURN (SCM_BOOL_F);
- else
- {
- SYNC_REGISTER ();
- RETURN (scm_equal_p (x, y));
- }
- NEXT;
+ SYNC_REGISTER ();
+ RETURN (scm_equal_p (x, y));
}
VM_DEFINE_FUNCTION (136, pairp, "pair?", 1)
{
ARGS1 (x);
RETURN (scm_from_bool (scm_is_pair (x)));
- NEXT;
}
VM_DEFINE_FUNCTION (137, listp, "list?", 1)
{
ARGS1 (x);
RETURN (scm_from_bool (scm_ilength (x) >= 0));
- NEXT;
}
VM_DEFINE_FUNCTION (138, symbolp, "symbol?", 1)
{
ARGS1 (x);
RETURN (scm_from_bool (scm_is_symbol (x)));
- NEXT;
}
VM_DEFINE_FUNCTION (139, vectorp, "vector?", 1)
{
ARGS1 (x);
RETURN (scm_from_bool (SCM_I_IS_VECTOR (x)));
- NEXT;
}
\f
ARGS2 (x, y);
CONS (x, x, y);
RETURN (x);
- NEXT;
}
#define VM_VALIDATE_CONS(x, proc) \
- if (SCM_UNLIKELY (!scm_is_pair (x))) \
- { \
- func_name = proc; \
- finish_args = x; \
- goto vm_error_not_a_pair; \
- }
+ VM_ASSERT (scm_is_pair (x), vm_error_not_a_pair (proc, x))
VM_DEFINE_FUNCTION (141, car, "car", 1)
{
ARGS1 (x);
VM_VALIDATE_CONS (x, "car");
RETURN (SCM_CAR (x));
- NEXT;
}
VM_DEFINE_FUNCTION (142, cdr, "cdr", 1)
ARGS1 (x);
VM_VALIDATE_CONS (x, "cdr");
RETURN (SCM_CDR (x));
- NEXT;
}
VM_DEFINE_INSTRUCTION (143, set_car, "set-car!", 0, 2, 0)
if (SCM_I_INUMP (x) && SCM_I_INUMP (y)) \
RETURN (scm_from_bool (((scm_t_signed_bits) SCM_UNPACK (x)) \
crel ((scm_t_signed_bits) SCM_UNPACK (y)))); \
- else \
- { \
- SYNC_REGISTER (); \
- RETURN (srel (x, y)); \
- } \
- NEXT; \
+ SYNC_REGISTER (); \
+ RETURN (srel (x, y)); \
}
VM_DEFINE_FUNCTION (145, ee, "ee?", 2)
/* The maximum/minimum tagged integers. */
#undef INUM_MAX
#undef INUM_MIN
-#define INUM_MAX (INTPTR_MAX - 1)
-#define INUM_MIN (INTPTR_MIN + scm_tc2_int)
+#undef INUM_STEP
+#define INUM_MAX \
+ ((scm_t_signed_bits) SCM_UNPACK (SCM_I_MAKINUM (SCM_MOST_POSITIVE_FIXNUM)))
+#define INUM_MIN \
+ ((scm_t_signed_bits) SCM_UNPACK (SCM_I_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM)))
+#define INUM_STEP \
+ ((scm_t_signed_bits) SCM_UNPACK (SCM_INUM1) \
+ - (scm_t_signed_bits) SCM_UNPACK (SCM_INUM0))
#undef FUNC2
-#define FUNC2(CFUNC,SFUNC) \
- { \
- ARGS2 (x, y); \
- if (SCM_I_INUMP (x) && SCM_I_INUMP (y)) \
- { \
- scm_t_int64 n = SCM_I_INUM (x) CFUNC SCM_I_INUM (y); \
- if (SCM_FIXABLE (n)) \
- { \
- RETURN (SCM_I_MAKINUM (n)); \
- NEXT; \
- } \
- } \
- SYNC_REGISTER (); \
- RETURN (SFUNC (x, y)); \
- NEXT; \
- }
+#define FUNC2(CFUNC,SFUNC) \
+{ \
+ ARGS2 (x, y); \
+ if (SCM_I_INUMP (x) && SCM_I_INUMP (y)) \
+ { \
+ scm_t_int64 n = SCM_I_INUM (x) CFUNC SCM_I_INUM (y);\
+ if (SCM_FIXABLE (n)) \
+ RETURN (SCM_I_MAKINUM (n)); \
+ } \
+ SYNC_REGISTER (); \
+ RETURN (SFUNC (x, y)); \
+}
/* Assembly tagged integer arithmetic routines. This code uses the
`asm goto' feature introduced in GCC 4.5. */
-#if defined __x86_64__ && SCM_GNUC_PREREQ (4, 5)
+#if SCM_GNUC_PREREQ (4, 5) && (defined __x86_64__ || defined __i386__)
+
+# undef _CX
+# if SIZEOF_VOID_P == 8
+# define _CX "rcx"
+# elif SIZEOF_VOID_P == 4
+# define _CX "ecx"
+# else
+# error unsupported word size
+# endif
/* The macros below check the CPU's overflow flag to improve fixnum
- arithmetic. The %rcx register is explicitly clobbered because `asm
- goto' can't have outputs, in which case the `r' constraint could be
- used to let the register allocator choose a register.
+ arithmetic. The _CX register (%rcx or %ecx) is explicitly
+ clobbered because `asm goto' can't have outputs, in which case the
+ `r' constraint could be used to let the register allocator choose a
+ register.
TODO: Use `cold' label attribute in GCC 4.6.
http://gcc.gnu.org/ml/gcc-patches/2010-10/msg01777.html */
# define ASM_ADD(x, y) \
{ \
- asm volatile goto ("mov %1, %%rcx; " \
- "test %[tag], %%cl; je %l[slow_add]; " \
- "test %[tag], %0; je %l[slow_add]; " \
- "add %0, %%rcx; jo %l[slow_add]; " \
- "sub %[tag], %%rcx; " \
- "mov %%rcx, (%[vsp])\n" \
+ asm volatile goto ("mov %1, %%"_CX"; " \
+ "test %[tag], %%cl; je %l[slow_add]; " \
+ "test %[tag], %0; je %l[slow_add]; " \
+ "sub %[tag], %%"_CX"; " \
+ "add %0, %%"_CX"; jo %l[slow_add]; " \
+ "mov %%"_CX", (%[vsp])\n" \
: /* no outputs */ \
: "r" (x), "r" (y), \
[vsp] "r" (sp), [tag] "i" (scm_tc2_int) \
- : "rcx", "memory" \
+ : _CX, "memory", "cc" \
: slow_add); \
NEXT; \
} \
# define ASM_SUB(x, y) \
{ \
- asm volatile goto ("mov %0, %%rcx; " \
- "test %[tag], %%cl; je %l[slow_sub]; " \
- "test %[tag], %1; je %l[slow_sub]; " \
- "sub %1, %%rcx; jo %l[slow_sub]; " \
- "add %[tag], %%rcx; " \
- "mov %%rcx, (%[vsp])\n" \
+ asm volatile goto ("mov %0, %%"_CX"; " \
+ "test %[tag], %%cl; je %l[slow_sub]; " \
+ "test %[tag], %1; je %l[slow_sub]; " \
+ "sub %1, %%"_CX"; jo %l[slow_sub]; " \
+ "add %[tag], %%"_CX"; " \
+ "mov %%"_CX", (%[vsp])\n" \
: /* no outputs */ \
: "r" (x), "r" (y), \
[vsp] "r" (sp), [tag] "i" (scm_tc2_int) \
- : "rcx", "memory" \
+ : _CX, "memory", "cc" \
: slow_sub); \
NEXT; \
} \
slow_sub: \
do { } while (0)
+# define ASM_MUL(x, y) \
+ { \
+ scm_t_signed_bits xx = SCM_I_INUM (x); \
+ asm volatile goto ("mov %1, %%"_CX"; " \
+ "test %[tag], %%cl; je %l[slow_mul]; " \
+ "sub %[tag], %%"_CX"; " \
+ "test %[tag], %0; je %l[slow_mul]; " \
+ "imul %2, %%"_CX"; jo %l[slow_mul]; " \
+ "add %[tag], %%"_CX"; " \
+ "mov %%"_CX", (%[vsp])\n" \
+ : /* no outputs */ \
+ : "r" (x), "r" (y), "r" (xx), \
+ [vsp] "r" (sp), [tag] "i" (scm_tc2_int) \
+ : _CX, "memory", "cc" \
+ : slow_mul); \
+ NEXT; \
+ } \
+ slow_mul: \
+ do { } while (0)
+
#endif
+#if SCM_GNUC_PREREQ (4, 5) && defined __arm__
+
+# define ASM_ADD(x, y) \
+ if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y))) \
+ { \
+ asm volatile goto ("adds r0, %0, %1; bvs %l[slow_add]; " \
+ "str r0, [%[vsp]]\n" \
+ : /* no outputs */ \
+ : "r" (x), "r" (y - scm_tc2_int), \
+ [vsp] "r" (sp) \
+ : "r0", "memory", "cc" \
+ : slow_add); \
+ NEXT; \
+ } \
+ slow_add: \
+ do { } while (0)
+
+# define ASM_SUB(x, y) \
+ if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y))) \
+ { \
+ asm volatile goto ("subs r0, %0, %1; bvs %l[slow_sub]; " \
+ "str r0, [%[vsp]]\n" \
+ : /* no outputs */ \
+ : "r" (x), "r" (y - scm_tc2_int), \
+ [vsp] "r" (sp) \
+ : "r0", "memory", "cc" \
+ : slow_sub); \
+ NEXT; \
+ } \
+ slow_sub: \
+ do { } while (0)
+
+# if defined (__ARM_ARCH_3M__) || defined (__ARM_ARCH_4__) \
+ || defined (__ARM_ARCH_4T__) || defined (__ARM_ARCH_5__) \
+ || defined (__ARM_ARCH_5T__) || defined (__ARM_ARCH_5E__) \
+ || defined (__ARM_ARCH_5TE__) || defined (__ARM_ARCH_5TEJ__) \
+ || defined (__ARM_ARCH_6__) || defined (__ARM_ARCH_6J__) \
+ || defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6Z__) \
+ || defined (__ARM_ARCH_6ZK__) || defined (__ARM_ARCH_6T2__) \
+ || defined (__ARM_ARCH_6M__) || defined (__ARM_ARCH_7__) \
+ || defined (__ARM_ARCH_7A__) || defined (__ARM_ARCH_7R__) \
+ || defined (__ARM_ARCH_7M__) || defined (__ARM_ARCH_7EM__) \
+ || defined (__ARM_ARCH_8A__)
+
+/* The ARM architectures listed above support the SMULL instruction */
+
+# define ASM_MUL(x, y) \
+ if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y))) \
+ { \
+ scm_t_signed_bits rlo, rhi; \
+ asm ("smull %0, %1, %2, %3\n" \
+ : "=&r" (rlo), "=&r" (rhi) \
+ : "r" (SCM_UNPACK (x) - scm_tc2_int), \
+ "r" (SCM_I_INUM (y))); \
+ if (SCM_LIKELY (SCM_SRS (rlo, 31) == rhi)) \
+ RETURN (SCM_PACK (rlo + scm_tc2_int)); \
+ } \
+ do { } while (0)
+
+# endif
+
+#endif
VM_DEFINE_FUNCTION (150, add, "add", 2)
{
ASM_ADD (x, y);
SYNC_REGISTER ();
RETURN (scm_sum (x, y));
- NEXT;
#endif
}
{
ARGS1 (x);
- /* Check for overflow. */
- if (SCM_LIKELY ((scm_t_intptr) SCM_UNPACK (x) < INUM_MAX))
+ /* Check for overflow. We must avoid overflow in the signed
+ addition below, even if X is not an inum. */
+ if (SCM_LIKELY ((scm_t_signed_bits) SCM_UNPACK (x) <= INUM_MAX - INUM_STEP))
{
SCM result;
- /* Add the integers without untagging. */
- result = SCM_PACK ((scm_t_intptr) SCM_UNPACK (x)
- + (scm_t_intptr) SCM_UNPACK (SCM_I_MAKINUM (1))
- - scm_tc2_int);
+ /* Add 1 to the integer without untagging. */
+ result = SCM_PACK ((scm_t_signed_bits) SCM_UNPACK (x) + INUM_STEP);
if (SCM_LIKELY (SCM_I_INUMP (result)))
- {
- RETURN (result);
- NEXT;
- }
+ RETURN (result);
}
SYNC_REGISTER ();
RETURN (scm_sum (x, SCM_I_MAKINUM (1)));
- NEXT;
}
VM_DEFINE_FUNCTION (152, sub, "sub", 2)
ASM_SUB (x, y);
SYNC_REGISTER ();
RETURN (scm_difference (x, y));
- NEXT;
#endif
}
{
ARGS1 (x);
- /* Check for underflow. */
- if (SCM_LIKELY ((scm_t_intptr) SCM_UNPACK (x) > INUM_MIN))
+ /* Check for overflow. We must avoid overflow in the signed
+ subtraction below, even if X is not an inum. */
+ if (SCM_LIKELY ((scm_t_signed_bits) SCM_UNPACK (x) >= INUM_MIN + INUM_STEP))
{
SCM result;
- /* Substract the integers without untagging. */
- result = SCM_PACK ((scm_t_intptr) SCM_UNPACK (x)
- - (scm_t_intptr) SCM_UNPACK (SCM_I_MAKINUM (1))
- + scm_tc2_int);
+ /* Substract 1 from the integer without untagging. */
+ result = SCM_PACK ((scm_t_signed_bits) SCM_UNPACK (x) - INUM_STEP);
if (SCM_LIKELY (SCM_I_INUMP (result)))
- {
- RETURN (result);
- NEXT;
- }
+ RETURN (result);
}
SYNC_REGISTER ();
RETURN (scm_difference (x, SCM_I_MAKINUM (1)));
- NEXT;
}
-# undef ASM_ADD
-# undef ASM_SUB
-
VM_DEFINE_FUNCTION (154, mul, "mul", 2)
{
ARGS2 (x, y);
+#ifdef ASM_MUL
+ ASM_MUL (x, y);
+#endif
SYNC_REGISTER ();
RETURN (scm_product (x, y));
- NEXT;
}
+# undef ASM_ADD
+# undef ASM_SUB
+# undef ASM_MUL
+
VM_DEFINE_FUNCTION (155, div, "div", 2)
{
ARGS2 (x, y);
SYNC_REGISTER ();
RETURN (scm_divide (x, y));
- NEXT;
}
VM_DEFINE_FUNCTION (156, quo, "quo", 2)
ARGS2 (x, y);
SYNC_REGISTER ();
RETURN (scm_quotient (x, y));
- NEXT;
}
VM_DEFINE_FUNCTION (157, rem, "rem", 2)
ARGS2 (x, y);
SYNC_REGISTER ();
RETURN (scm_remainder (x, y));
- NEXT;
}
VM_DEFINE_FUNCTION (158, mod, "mod", 2)
ARGS2 (x, y);
SYNC_REGISTER ();
RETURN (scm_modulo (x, y));
- NEXT;
}
VM_DEFINE_FUNCTION (159, ash, "ash", 2)
{
if (SCM_I_INUM (y) < 0)
/* Right shift, will be a fixnum. */
- {
- RETURN (SCM_I_MAKINUM (SCM_I_INUM (x) >> -SCM_I_INUM (y)));
- NEXT;
- }
+ RETURN (SCM_I_MAKINUM
+ (SCM_SRS (SCM_I_INUM (x),
+ (-SCM_I_INUM (y) <= SCM_I_FIXNUM_BIT-1)
+ ? -SCM_I_INUM (y) : SCM_I_FIXNUM_BIT-1)));
else
/* Left shift. See comments in scm_ash. */
{
&& ((scm_t_bits)
(SCM_SRS (nn, (SCM_I_FIXNUM_BIT-1 - bits_to_shift)) + 1)
<= 1))
- {
- RETURN (SCM_I_MAKINUM (nn << bits_to_shift));
- NEXT;
- }
+ RETURN (SCM_I_MAKINUM (nn < 0
+ ? -(-nn << bits_to_shift)
+ : (nn << bits_to_shift)));
/* fall through */
}
/* fall through */
}
SYNC_REGISTER ();
RETURN (scm_ash (x, y));
- NEXT;
}
VM_DEFINE_FUNCTION (160, logand, "logand", 2)
{
ARGS2 (x, y);
- if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y)))
- RETURN (SCM_I_MAKINUM (SCM_I_INUM (x) & SCM_I_INUM (y)));
- else
- {
- SYNC_REGISTER ();
- RETURN (scm_logand (x, y));
- }
- NEXT;
+ if (SCM_I_INUMP (x) && SCM_I_INUMP (y))
+ /* Compute bitwise AND without untagging */
+ RETURN (SCM_PACK (SCM_UNPACK (x) & SCM_UNPACK (y)));
+ SYNC_REGISTER ();
+ RETURN (scm_logand (x, y));
}
VM_DEFINE_FUNCTION (161, logior, "logior", 2)
{
ARGS2 (x, y);
- if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y)))
- RETURN (SCM_I_MAKINUM (SCM_I_INUM (x) | SCM_I_INUM (y)));
- else
- {
- SYNC_REGISTER ();
- RETURN (scm_logior (x, y));
- }
- NEXT;
+ if (SCM_I_INUMP (x) && SCM_I_INUMP (y))
+ /* Compute bitwise OR without untagging */
+ RETURN (SCM_PACK (SCM_UNPACK (x) | SCM_UNPACK (y)));
+ SYNC_REGISTER ();
+ RETURN (scm_logior (x, y));
}
VM_DEFINE_FUNCTION (162, logxor, "logxor", 2)
{
ARGS2 (x, y);
- if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y)))
+ if (SCM_I_INUMP (x) && SCM_I_INUMP (y))
RETURN (SCM_I_MAKINUM (SCM_I_INUM (x) ^ SCM_I_INUM (y)));
- else
- {
- SYNC_REGISTER ();
- RETURN (scm_logxor (x, y));
- }
- NEXT;
+ SYNC_REGISTER ();
+ RETURN (scm_logxor (x, y));
}
\f
SYNC_REGISTER ();
RETURN (scm_vector_ref (vect, idx));
}
- NEXT;
}
VM_DEFINE_INSTRUCTION (164, vector_set, "vector-set", 0, 3, 0)
* Structs
*/
#define VM_VALIDATE_STRUCT(obj, proc) \
- if (SCM_UNLIKELY (!SCM_STRUCTP (obj))) \
- { \
- func_name = proc; \
- finish_args = (obj); \
- goto vm_error_not_a_struct; \
- }
+ VM_ASSERT (SCM_STRUCTP (obj), vm_error_not_a_struct (proc, obj))
VM_DEFINE_FUNCTION (166, struct_p, "struct?", 1)
{
ARGS1 (obj);
RETURN (scm_from_bool (SCM_STRUCTP (obj)));
- NEXT;
}
VM_DEFINE_FUNCTION (167, struct_vtable, "struct-vtable", 1)
ARGS1 (obj);
VM_VALIDATE_STRUCT (obj, "struct_vtable");
RETURN (SCM_STRUCT_VTABLE (obj));
- NEXT;
}
VM_DEFINE_INSTRUCTION (168, make_struct, "make-struct", 2, -1, 1)
if (SCM_LIKELY (index < len))
{
- scm_t_bits *data = SCM_STRUCT_DATA (obj);
- RETURN (SCM_PACK (data[index]));
- NEXT;
+ scm_t_bits *data = SCM_STRUCT_DATA (obj);
+ RETURN (SCM_PACK (data[index]));
}
}
SYNC_REGISTER ();
RETURN (scm_struct_ref (obj, pos));
- NEXT;
}
VM_DEFINE_FUNCTION (170, struct_set, "struct-set", 3)
len = SCM_STRUCT_DATA_REF (vtable, scm_vtable_index_size);
if (SCM_LIKELY (index < len))
{
- scm_t_bits *data = SCM_STRUCT_DATA (obj);
- data[index] = SCM_UNPACK (val);
- RETURN (val);
- NEXT;
+ scm_t_bits *data = SCM_STRUCT_DATA (obj);
+ data[index] = SCM_UNPACK (val);
+ RETURN (val);
}
}
SYNC_REGISTER ();
RETURN (scm_struct_set_x (obj, pos, val));
- NEXT;
}
\f
ARGS1 (obj);
if (SCM_INSTANCEP (obj))
RETURN (SCM_CLASS_OF (obj));
- else
- {
- SYNC_REGISTER ();
- RETURN (scm_class_of (obj));
- }
- NEXT;
+ SYNC_REGISTER ();
+ RETURN (scm_class_of (obj));
}
/* FIXME: No checking whatsoever. */
ARGS2 (instance, idx);
slot = SCM_I_INUM (idx);
RETURN (SCM_PACK (SCM_STRUCT_DATA (instance) [slot]));
- NEXT;
}
/* FIXME: No checking whatsoever. */
* Bytevectors
*/
#define VM_VALIDATE_BYTEVECTOR(x, proc) \
- do \
- { \
- if (SCM_UNLIKELY (!SCM_BYTEVECTOR_P (x))) \
- { \
- func_name = proc; \
- finish_args = x; \
- goto vm_error_not_a_bytevector; \
- } \
- } \
- while (0)
+ VM_ASSERT (SCM_BYTEVECTOR_P (x), vm_error_not_a_bytevector (proc, x))
#define BV_REF_WITH_ENDIANNESS(stem, fn_stem) \
{ \
ARGS2 (bv, idx); \
SYNC_REGISTER (); \
RETURN (scm_bytevector_##fn_stem##_ref (bv, idx, endianness)); \
- NEXT; \
} \
}
&& (i >= 0) \
&& (i + size <= SCM_BYTEVECTOR_LENGTH (bv)) \
&& (ALIGNED_P (int_ptr, scm_t_ ## type)))) \
- RETURN (SCM_I_MAKINUM (*int_ptr)); \
+ RETURN (SCM_I_MAKINUM (*int_ptr)); \
else \
{ \
SYNC_REGISTER (); \
- RETURN (scm_bytevector_ ## fn_stem ## _ref (bv, idx)); \
+ RETURN (scm_bytevector_ ## fn_stem ## _ref (bv, idx)); \
} \
- NEXT; \
}
#define BV_INT_REF(stem, type, size) \
SYNC_REGISTER (); \
RETURN (scm_bytevector_ ## stem ## _native_ref (bv, idx)); \
} \
- NEXT; \
}
#define BV_FLOAT_REF(stem, fn_stem, type, size) \
RETURN (scm_from_double (*float_ptr)); \
else \
RETURN (scm_bytevector_ ## fn_stem ## _native_ref (bv, idx)); \
- NEXT; \
}
VM_DEFINE_FUNCTION (182, bv_u8_ref, "bv-u8-ref", 2)