-/* Copyright (C) 2001, 2009, 2010, 2011 Free Software Foundation, Inc.
+/* Copyright (C) 2001, 2009-2014 Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
}
#define VM_VALIDATE_CONS(x, proc) \
- if (SCM_UNLIKELY (!scm_is_pair (x))) \
- { func_name = proc; \
- finish_args = x; \
- goto vm_error_not_a_pair; \
- }
+ VM_ASSERT (scm_is_pair (x), vm_error_not_a_pair (proc, x))
VM_DEFINE_FUNCTION (141, car, "car", 1)
{
*/
#undef REL
-#define REL(crel,srel) \
-{ \
- ARGS2 (x, y); \
- if (SCM_I_INUMP (x) && SCM_I_INUMP (y)) \
- RETURN (scm_from_bool ((scm_t_signed_bits) (x) \
- crel (scm_t_signed_bits) (y))); \
- SYNC_REGISTER (); \
- RETURN (srel (x, y)); \
-}
+#define REL(crel,srel) \
+ { \
+ ARGS2 (x, y); \
+ if (SCM_I_INUMP (x) && SCM_I_INUMP (y)) \
+ RETURN (scm_from_bool (((scm_t_signed_bits) SCM_UNPACK (x)) \
+ crel ((scm_t_signed_bits) SCM_UNPACK (y)))); \
+ SYNC_REGISTER (); \
+ RETURN (srel (x, y)); \
+ }
VM_DEFINE_FUNCTION (145, ee, "ee?", 2)
{
/* The maximum/minimum tagged integers. */
#undef INUM_MAX
#undef INUM_MIN
-#define INUM_MAX (INTPTR_MAX - 1)
-#define INUM_MIN (INTPTR_MIN + scm_tc2_int)
+#undef INUM_STEP
+#define INUM_MAX \
+ ((scm_t_signed_bits) SCM_UNPACK (SCM_I_MAKINUM (SCM_MOST_POSITIVE_FIXNUM)))
+#define INUM_MIN \
+ ((scm_t_signed_bits) SCM_UNPACK (SCM_I_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM)))
+#define INUM_STEP \
+ ((scm_t_signed_bits) SCM_UNPACK (SCM_INUM1) \
+ - (scm_t_signed_bits) SCM_UNPACK (SCM_INUM0))
#undef FUNC2
#define FUNC2(CFUNC,SFUNC) \
/* Assembly tagged integer arithmetic routines. This code uses the
`asm goto' feature introduced in GCC 4.5. */
-#if defined __x86_64__ && SCM_GNUC_PREREQ (4, 5)
+#if SCM_GNUC_PREREQ (4, 5) && (defined __x86_64__ || defined __i386__)
+
+# undef _CX
+# if SIZEOF_VOID_P == 8
+# define _CX "rcx"
+# elif SIZEOF_VOID_P == 4
+# define _CX "ecx"
+# else
+# error unsupported word size
+# endif
/* The macros below check the CPU's overflow flag to improve fixnum
- arithmetic. The %rcx register is explicitly clobbered because `asm
- goto' can't have outputs, in which case the `r' constraint could be
- used to let the register allocator choose a register.
+ arithmetic. The _CX register (%rcx or %ecx) is explicitly
+ clobbered because `asm goto' can't have outputs, in which case the
+ `r' constraint could be used to let the register allocator choose a
+ register.
TODO: Use `cold' label attribute in GCC 4.6.
http://gcc.gnu.org/ml/gcc-patches/2010-10/msg01777.html */
# define ASM_ADD(x, y) \
{ \
- asm volatile goto ("mov %1, %%rcx; " \
- "test %[tag], %%cl; je %l[slow_add]; " \
- "test %[tag], %0; je %l[slow_add]; " \
- "add %0, %%rcx; jo %l[slow_add]; " \
- "sub %[tag], %%rcx; " \
- "mov %%rcx, (%[vsp])\n" \
+ asm volatile goto ("mov %1, %%"_CX"; " \
+ "test %[tag], %%cl; je %l[slow_add]; " \
+ "test %[tag], %0; je %l[slow_add]; " \
+ "sub %[tag], %%"_CX"; " \
+ "add %0, %%"_CX"; jo %l[slow_add]; " \
+ "mov %%"_CX", (%[vsp])\n" \
: /* no outputs */ \
: "r" (x), "r" (y), \
[vsp] "r" (sp), [tag] "i" (scm_tc2_int) \
- : "rcx", "memory" \
+ : _CX, "memory", "cc" \
: slow_add); \
NEXT; \
} \
# define ASM_SUB(x, y) \
{ \
- asm volatile goto ("mov %0, %%rcx; " \
- "test %[tag], %%cl; je %l[slow_sub]; " \
- "test %[tag], %1; je %l[slow_sub]; " \
- "sub %1, %%rcx; jo %l[slow_sub]; " \
- "add %[tag], %%rcx; " \
- "mov %%rcx, (%[vsp])\n" \
+ asm volatile goto ("mov %0, %%"_CX"; " \
+ "test %[tag], %%cl; je %l[slow_sub]; " \
+ "test %[tag], %1; je %l[slow_sub]; " \
+ "sub %1, %%"_CX"; jo %l[slow_sub]; " \
+ "add %[tag], %%"_CX"; " \
+ "mov %%"_CX", (%[vsp])\n" \
: /* no outputs */ \
: "r" (x), "r" (y), \
[vsp] "r" (sp), [tag] "i" (scm_tc2_int) \
- : "rcx", "memory" \
+ : _CX, "memory", "cc" \
: slow_sub); \
NEXT; \
} \
slow_sub: \
do { } while (0)
+# define ASM_MUL(x, y) \
+ { \
+ scm_t_signed_bits xx = SCM_I_INUM (x); \
+ asm volatile goto ("mov %1, %%"_CX"; " \
+ "test %[tag], %%cl; je %l[slow_mul]; " \
+ "sub %[tag], %%"_CX"; " \
+ "test %[tag], %0; je %l[slow_mul]; " \
+ "imul %2, %%"_CX"; jo %l[slow_mul]; " \
+ "add %[tag], %%"_CX"; " \
+ "mov %%"_CX", (%[vsp])\n" \
+ : /* no outputs */ \
+ : "r" (x), "r" (y), "r" (xx), \
+ [vsp] "r" (sp), [tag] "i" (scm_tc2_int) \
+ : _CX, "memory", "cc" \
+ : slow_mul); \
+ NEXT; \
+ } \
+ slow_mul: \
+ do { } while (0)
+
#endif
+#if SCM_GNUC_PREREQ (4, 5) && defined __arm__
+
+# define ASM_ADD(x, y) \
+ if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y))) \
+ { \
+ asm volatile goto ("adds r0, %0, %1; bvs %l[slow_add]; " \
+ "str r0, [%[vsp]]\n" \
+ : /* no outputs */ \
+ : "r" (x), "r" (y - scm_tc2_int), \
+ [vsp] "r" (sp) \
+ : "r0", "memory", "cc" \
+ : slow_add); \
+ NEXT; \
+ } \
+ slow_add: \
+ do { } while (0)
+
+# define ASM_SUB(x, y) \
+ if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y))) \
+ { \
+ asm volatile goto ("subs r0, %0, %1; bvs %l[slow_sub]; " \
+ "str r0, [%[vsp]]\n" \
+ : /* no outputs */ \
+ : "r" (x), "r" (y - scm_tc2_int), \
+ [vsp] "r" (sp) \
+ : "r0", "memory", "cc" \
+ : slow_sub); \
+ NEXT; \
+ } \
+ slow_sub: \
+ do { } while (0)
+
+# if defined (__ARM_ARCH_3M__) || defined (__ARM_ARCH_4__) \
+ || defined (__ARM_ARCH_4T__) || defined (__ARM_ARCH_5__) \
+ || defined (__ARM_ARCH_5T__) || defined (__ARM_ARCH_5E__) \
+ || defined (__ARM_ARCH_5TE__) || defined (__ARM_ARCH_5TEJ__) \
+ || defined (__ARM_ARCH_6__) || defined (__ARM_ARCH_6J__) \
+ || defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6Z__) \
+ || defined (__ARM_ARCH_6ZK__) || defined (__ARM_ARCH_6T2__) \
+ || defined (__ARM_ARCH_6M__) || defined (__ARM_ARCH_7__) \
+ || defined (__ARM_ARCH_7A__) || defined (__ARM_ARCH_7R__) \
+ || defined (__ARM_ARCH_7M__) || defined (__ARM_ARCH_7EM__) \
+ || defined (__ARM_ARCH_8A__)
+
+/* The ARM architectures listed above support the SMULL instruction */
+
+# define ASM_MUL(x, y) \
+ if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y))) \
+ { \
+ scm_t_signed_bits rlo, rhi; \
+ asm ("smull %0, %1, %2, %3\n" \
+ : "=&r" (rlo), "=&r" (rhi) \
+ : "r" (SCM_UNPACK (x) - scm_tc2_int), \
+ "r" (SCM_I_INUM (y))); \
+ if (SCM_LIKELY (SCM_SRS (rlo, 31) == rhi)) \
+ RETURN (SCM_PACK (rlo + scm_tc2_int)); \
+ } \
+ do { } while (0)
+
+# endif
+
+#endif
VM_DEFINE_FUNCTION (150, add, "add", 2)
{
{
ARGS1 (x);
- /* Check for overflow. */
- if (SCM_LIKELY ((scm_t_intptr) x < INUM_MAX))
+ /* Check for overflow. We must avoid overflow in the signed
+ addition below, even if X is not an inum. */
+ if (SCM_LIKELY ((scm_t_signed_bits) SCM_UNPACK (x) <= INUM_MAX - INUM_STEP))
{
SCM result;
- /* Add the integers without untagging. */
- result = SCM_PACK ((scm_t_intptr) x
- + (scm_t_intptr) SCM_I_MAKINUM (1)
- - scm_tc2_int);
+ /* Add 1 to the integer without untagging. */
+ result = SCM_PACK ((scm_t_signed_bits) SCM_UNPACK (x) + INUM_STEP);
if (SCM_LIKELY (SCM_I_INUMP (result)))
RETURN (result);
{
ARGS1 (x);
- /* Check for underflow. */
- if (SCM_LIKELY ((scm_t_intptr) x > INUM_MIN))
+ /* Check for overflow. We must avoid overflow in the signed
+ subtraction below, even if X is not an inum. */
+ if (SCM_LIKELY ((scm_t_signed_bits) SCM_UNPACK (x) >= INUM_MIN + INUM_STEP))
{
SCM result;
- /* Substract the integers without untagging. */
- result = SCM_PACK ((scm_t_intptr) x
- - (scm_t_intptr) SCM_I_MAKINUM (1)
- + scm_tc2_int);
+ /* Substract 1 from the integer without untagging. */
+ result = SCM_PACK ((scm_t_signed_bits) SCM_UNPACK (x) - INUM_STEP);
if (SCM_LIKELY (SCM_I_INUMP (result)))
RETURN (result);
RETURN (scm_difference (x, SCM_I_MAKINUM (1)));
}
-# undef ASM_ADD
-# undef ASM_SUB
-
VM_DEFINE_FUNCTION (154, mul, "mul", 2)
{
ARGS2 (x, y);
+#ifdef ASM_MUL
+ ASM_MUL (x, y);
+#endif
SYNC_REGISTER ();
RETURN (scm_product (x, y));
}
+# undef ASM_ADD
+# undef ASM_SUB
+# undef ASM_MUL
+
VM_DEFINE_FUNCTION (155, div, "div", 2)
{
ARGS2 (x, y);
{
if (SCM_I_INUM (y) < 0)
/* Right shift, will be a fixnum. */
- RETURN (SCM_I_MAKINUM (SCM_I_INUM (x) >> -SCM_I_INUM (y)));
+ RETURN (SCM_I_MAKINUM
+ (SCM_SRS (SCM_I_INUM (x),
+ (-SCM_I_INUM (y) <= SCM_I_FIXNUM_BIT-1)
+ ? -SCM_I_INUM (y) : SCM_I_FIXNUM_BIT-1)));
else
/* Left shift. See comments in scm_ash. */
{
&& ((scm_t_bits)
(SCM_SRS (nn, (SCM_I_FIXNUM_BIT-1 - bits_to_shift)) + 1)
<= 1))
- RETURN (SCM_I_MAKINUM (nn << bits_to_shift));
+ RETURN (SCM_I_MAKINUM (nn < 0
+ ? -(-nn << bits_to_shift)
+ : (nn << bits_to_shift)));
/* fall through */
}
/* fall through */
{
ARGS2 (x, y);
if (SCM_I_INUMP (x) && SCM_I_INUMP (y))
- RETURN (SCM_I_MAKINUM (SCM_I_INUM (x) & SCM_I_INUM (y)));
+ /* Compute bitwise AND without untagging */
+ RETURN (SCM_PACK (SCM_UNPACK (x) & SCM_UNPACK (y)));
SYNC_REGISTER ();
RETURN (scm_logand (x, y));
}
{
ARGS2 (x, y);
if (SCM_I_INUMP (x) && SCM_I_INUMP (y))
- RETURN (SCM_I_MAKINUM (SCM_I_INUM (x) | SCM_I_INUM (y)));
+ /* Compute bitwise OR without untagging */
+ RETURN (SCM_PACK (SCM_UNPACK (x) | SCM_UNPACK (y)));
SYNC_REGISTER ();
RETURN (scm_logior (x, y));
}
* Structs
*/
#define VM_VALIDATE_STRUCT(obj, proc) \
- if (SCM_UNLIKELY (!SCM_STRUCTP (obj))) \
- { \
- func_name = proc; \
- finish_args = (obj); \
- goto vm_error_not_a_struct; \
- }
+ VM_ASSERT (SCM_STRUCTP (obj), vm_error_not_a_struct (proc, obj))
VM_DEFINE_FUNCTION (166, struct_p, "struct?", 1)
{
* Bytevectors
*/
#define VM_VALIDATE_BYTEVECTOR(x, proc) \
- do \
- { \
- if (SCM_UNLIKELY (!SCM_BYTEVECTOR_P (x))) \
- { \
- func_name = proc; \
- finish_args = x; \
- goto vm_error_not_a_bytevector; \
- } \
- } \
- while (0)
+ VM_ASSERT (SCM_BYTEVECTOR_P (x), vm_error_not_a_bytevector (proc, x))
#define BV_REF_WITH_ENDIANNESS(stem, fn_stem) \
{ \
/* Return true (non-zero) if PTR has suitable alignment for TYPE. */
#define ALIGNED_P(ptr, type) \
- ((scm_t_uintptr) (ptr) % alignof (type) == 0)
+ ((scm_t_uintptr) (ptr) % alignof_type (type) == 0)
VM_DEFINE_FUNCTION (174, bv_u16_ref, "bv-u16-ref", 3)
BV_REF_WITH_ENDIANNESS (u16, u16)