1 /* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 /* This file is included in vm_engine.c */
26 #define ARGS1(a1) SCM a1 = sp[0];
27 #define ARGS2(a1,a2) SCM a1 = sp[-1], a2 = sp[0]; sp--; NULLSTACK (1);
28 #define ARGS3(a1,a2,a3) SCM a1 = sp[-2], a2 = sp[-1], a3 = sp[0]; sp -= 2; NULLSTACK (2);
30 #define RETURN(x) do { *sp = x; NEXT; } while (0)
32 VM_DEFINE_FUNCTION (128, not, "not", 1)
35 RETURN (scm_from_bool (scm_is_false (x
)));
38 VM_DEFINE_FUNCTION (129, not_not
, "not-not", 1)
41 RETURN (scm_from_bool (!scm_is_false (x
)));
44 VM_DEFINE_FUNCTION (130, eq
, "eq?", 2)
47 RETURN (scm_from_bool (scm_is_eq (x
, y
)));
50 VM_DEFINE_FUNCTION (131, not_eq, "not-eq?", 2)
53 RETURN (scm_from_bool (!scm_is_eq (x
, y
)));
56 VM_DEFINE_FUNCTION (132, nullp
, "null?", 1)
59 RETURN (scm_from_bool (scm_is_null (x
)));
62 VM_DEFINE_FUNCTION (133, not_nullp
, "not-null?", 1)
65 RETURN (scm_from_bool (!scm_is_null (x
)));
68 VM_DEFINE_FUNCTION (134, eqv
, "eqv?", 2)
73 if (SCM_IMP (x
) || SCM_IMP (y
))
76 RETURN (scm_eqv_p (x
, y
));
79 VM_DEFINE_FUNCTION (135, equal
, "equal?", 2)
84 if (SCM_IMP (x
) || SCM_IMP (y
))
87 RETURN (scm_equal_p (x
, y
));
90 VM_DEFINE_FUNCTION (136, pairp
, "pair?", 1)
93 RETURN (scm_from_bool (scm_is_pair (x
)));
96 VM_DEFINE_FUNCTION (137, listp
, "list?", 1)
99 RETURN (scm_from_bool (scm_ilength (x
) >= 0));
102 VM_DEFINE_FUNCTION (138, symbolp
, "symbol?", 1)
105 RETURN (scm_from_bool (scm_is_symbol (x
)));
108 VM_DEFINE_FUNCTION (139, vectorp
, "vector?", 1)
111 RETURN (scm_from_bool (SCM_I_IS_VECTOR (x
)));
119 VM_DEFINE_FUNCTION (140, cons
, "cons", 2)
126 #define VM_VALIDATE_CONS(x, proc) \
127 VM_ASSERT (scm_is_pair (x), vm_error_not_a_pair (proc, x))
129 VM_DEFINE_FUNCTION (141, car
, "car", 1)
132 VM_VALIDATE_CONS (x
, "car");
133 RETURN (SCM_CAR (x
));
136 VM_DEFINE_FUNCTION (142, cdr
, "cdr", 1)
139 VM_VALIDATE_CONS (x
, "cdr");
140 RETURN (SCM_CDR (x
));
143 VM_DEFINE_INSTRUCTION (143, set_car
, "set-car!", 0, 2, 0)
147 VM_VALIDATE_CONS (x
, "set-car!");
152 VM_DEFINE_INSTRUCTION (144, set_cdr
, "set-cdr!", 0, 2, 0)
156 VM_VALIDATE_CONS (x
, "set-cdr!");
163 * Numeric relational tests
167 #define REL(crel,srel) \
170 if (SCM_I_INUMP (x) && SCM_I_INUMP (y)) \
171 RETURN (scm_from_bool (((scm_t_signed_bits) SCM_UNPACK (x)) \
172 crel ((scm_t_signed_bits) SCM_UNPACK (y)))); \
174 RETURN (srel (x, y)); \
177 VM_DEFINE_FUNCTION (145, ee
, "ee?", 2)
179 REL (==, scm_num_eq_p
);
182 VM_DEFINE_FUNCTION (146, lt
, "lt?", 2)
187 VM_DEFINE_FUNCTION (147, le
, "le?", 2)
192 VM_DEFINE_FUNCTION (148, gt
, "gt?", 2)
197 VM_DEFINE_FUNCTION (149, ge
, "ge?", 2)
207 /* The maximum/minimum tagged integers. */
212 ((scm_t_signed_bits) SCM_UNPACK (SCM_I_MAKINUM (SCM_MOST_POSITIVE_FIXNUM)))
214 ((scm_t_signed_bits) SCM_UNPACK (SCM_I_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM)))
216 ((scm_t_signed_bits) SCM_UNPACK (SCM_INUM1) \
217 - (scm_t_signed_bits) SCM_UNPACK (SCM_INUM0))
220 #define FUNC2(CFUNC,SFUNC) \
223 if (SCM_I_INUMP (x) && SCM_I_INUMP (y)) \
225 scm_t_int64 n = SCM_I_INUM (x) CFUNC SCM_I_INUM (y);\
226 if (SCM_FIXABLE (n)) \
227 RETURN (SCM_I_MAKINUM (n)); \
230 RETURN (SFUNC (x, y)); \
233 /* Assembly tagged integer arithmetic routines. This code uses the
234 `asm goto' feature introduced in GCC 4.5. */
236 #if SCM_GNUC_PREREQ (4, 5) && (defined __x86_64__ || defined __i386__)
239 # if SIZEOF_VOID_P == 8
241 # elif SIZEOF_VOID_P == 4
244 # error unsupported word size
247 /* The macros below check the CPU's overflow flag to improve fixnum
248 arithmetic. The _CX register (%rcx or %ecx) is explicitly
249 clobbered because `asm goto' can't have outputs, in which case the
250 `r' constraint could be used to let the register allocator choose a
253 TODO: Use `cold' label attribute in GCC 4.6.
254 http://gcc.gnu.org/ml/gcc-patches/2010-10/msg01777.html */
256 # define ASM_ADD(x, y) \
258 asm volatile goto ("mov %1, %%"_CX"; " \
259 "test %[tag], %%cl; je %l[slow_add]; " \
260 "test %[tag], %0; je %l[slow_add]; " \
261 "sub %[tag], %%"_CX"; " \
262 "add %0, %%"_CX"; jo %l[slow_add]; " \
263 "mov %%"_CX", (%[vsp])\n" \
265 : "r" (x), "r" (y), \
266 [vsp] "r" (sp), [tag] "i" (scm_tc2_int) \
267 : _CX, "memory", "cc" \
274 # define ASM_SUB(x, y) \
276 asm volatile goto ("mov %0, %%"_CX"; " \
277 "test %[tag], %%cl; je %l[slow_sub]; " \
278 "test %[tag], %1; je %l[slow_sub]; " \
279 "sub %1, %%"_CX"; jo %l[slow_sub]; " \
280 "add %[tag], %%"_CX"; " \
281 "mov %%"_CX", (%[vsp])\n" \
283 : "r" (x), "r" (y), \
284 [vsp] "r" (sp), [tag] "i" (scm_tc2_int) \
285 : _CX, "memory", "cc" \
292 # define ASM_MUL(x, y) \
294 scm_t_signed_bits xx = SCM_I_INUM (x); \
295 asm volatile goto ("mov %1, %%"_CX"; " \
296 "test %[tag], %%cl; je %l[slow_mul]; " \
297 "sub %[tag], %%"_CX"; " \
298 "test %[tag], %0; je %l[slow_mul]; " \
299 "imul %2, %%"_CX"; jo %l[slow_mul]; " \
300 "add %[tag], %%"_CX"; " \
301 "mov %%"_CX", (%[vsp])\n" \
303 : "r" (x), "r" (y), "r" (xx), \
304 [vsp] "r" (sp), [tag] "i" (scm_tc2_int) \
305 : _CX, "memory", "cc" \
314 #if SCM_GNUC_PREREQ (4, 5) && defined __arm__
316 # define ASM_ADD(x, y) \
317 if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y))) \
319 asm volatile goto ("adds r0, %0, %1; bvs %l[slow_add]; " \
320 "str r0, [%[vsp]]\n" \
322 : "r" (x), "r" (y - scm_tc2_int), \
324 : "r0", "memory", "cc" \
331 # define ASM_SUB(x, y) \
332 if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y))) \
334 asm volatile goto ("subs r0, %0, %1; bvs %l[slow_sub]; " \
335 "str r0, [%[vsp]]\n" \
337 : "r" (x), "r" (y - scm_tc2_int), \
339 : "r0", "memory", "cc" \
346 # if defined (__ARM_ARCH_3M__) || defined (__ARM_ARCH_4__) \
347 || defined (__ARM_ARCH_4T__) || defined (__ARM_ARCH_5__) \
348 || defined (__ARM_ARCH_5T__) || defined (__ARM_ARCH_5E__) \
349 || defined (__ARM_ARCH_5TE__) || defined (__ARM_ARCH_5TEJ__) \
350 || defined (__ARM_ARCH_6__) || defined (__ARM_ARCH_6J__) \
351 || defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6Z__) \
352 || defined (__ARM_ARCH_6ZK__) || defined (__ARM_ARCH_6T2__) \
353 || defined (__ARM_ARCH_6M__) || defined (__ARM_ARCH_7__) \
354 || defined (__ARM_ARCH_7A__) || defined (__ARM_ARCH_7R__) \
355 || defined (__ARM_ARCH_7M__) || defined (__ARM_ARCH_7EM__) \
356 || defined (__ARM_ARCH_8A__)
358 /* The ARM architectures listed above support the SMULL instruction */
360 # define ASM_MUL(x, y) \
361 if (SCM_LIKELY (SCM_I_INUMP (x) && SCM_I_INUMP (y))) \
363 scm_t_signed_bits rlo, rhi; \
364 asm ("smull %0, %1, %2, %3\n" \
365 : "=r" (rlo), "=r" (rhi) \
366 : "r" (SCM_UNPACK (x) - scm_tc2_int), \
367 "r" (SCM_I_INUM (y))); \
368 if (SCM_LIKELY (SCM_SRS (rlo, 31) == rhi)) \
369 RETURN (SCM_PACK (rlo + scm_tc2_int)); \
377 VM_DEFINE_FUNCTION (150, add
, "add", 2)
385 RETURN (scm_sum (x
, y
));
389 VM_DEFINE_FUNCTION (151, add1
, "add1", 1)
393 /* Check for overflow. We must avoid overflow in the signed
394 addition below, even if X is not an inum. */
395 if (SCM_LIKELY ((scm_t_signed_bits
) SCM_UNPACK (x
) <= INUM_MAX
- INUM_STEP
))
399 /* Add 1 to the integer without untagging. */
400 result
= SCM_PACK ((scm_t_signed_bits
) SCM_UNPACK (x
) + INUM_STEP
);
402 if (SCM_LIKELY (SCM_I_INUMP (result
)))
407 RETURN (scm_sum (x
, SCM_I_MAKINUM (1)));
410 VM_DEFINE_FUNCTION (152, sub
, "sub", 2)
413 FUNC2 (-, scm_difference
);
418 RETURN (scm_difference (x
, y
));
422 VM_DEFINE_FUNCTION (153, sub1
, "sub1", 1)
426 /* Check for overflow. We must avoid overflow in the signed
427 subtraction below, even if X is not an inum. */
428 if (SCM_LIKELY ((scm_t_signed_bits
) SCM_UNPACK (x
) >= INUM_MIN
+ INUM_STEP
))
432 /* Substract 1 from the integer without untagging. */
433 result
= SCM_PACK ((scm_t_signed_bits
) SCM_UNPACK (x
) - INUM_STEP
);
435 if (SCM_LIKELY (SCM_I_INUMP (result
)))
440 RETURN (scm_difference (x
, SCM_I_MAKINUM (1)));
443 VM_DEFINE_FUNCTION (154, mul
, "mul", 2)
450 RETURN (scm_product (x
, y
));
457 VM_DEFINE_FUNCTION (155, div
, "div", 2)
461 RETURN (scm_divide (x
, y
));
464 VM_DEFINE_FUNCTION (156, quo
, "quo", 2)
468 RETURN (scm_quotient (x
, y
));
471 VM_DEFINE_FUNCTION (157, rem
, "rem", 2)
475 RETURN (scm_remainder (x
, y
));
478 VM_DEFINE_FUNCTION (158, mod
, "mod", 2)
482 RETURN (scm_modulo (x
, y
));
485 VM_DEFINE_FUNCTION (159, ash
, "ash", 2)
488 if (SCM_I_INUMP (x
) && SCM_I_INUMP (y
))
490 if (SCM_I_INUM (y
) < 0)
491 /* Right shift, will be a fixnum. */
492 RETURN (SCM_I_MAKINUM
493 (SCM_SRS (SCM_I_INUM (x
),
494 (-SCM_I_INUM (y
) <= SCM_I_FIXNUM_BIT
-1)
495 ? -SCM_I_INUM (y
) : SCM_I_FIXNUM_BIT
-1)));
497 /* Left shift. See comments in scm_ash. */
499 scm_t_signed_bits nn
, bits_to_shift
;
502 bits_to_shift
= SCM_I_INUM (y
);
504 if (bits_to_shift
< SCM_I_FIXNUM_BIT
-1
506 (SCM_SRS (nn
, (SCM_I_FIXNUM_BIT
-1 - bits_to_shift
)) + 1)
508 RETURN (SCM_I_MAKINUM (nn
<< bits_to_shift
));
514 RETURN (scm_ash (x
, y
));
517 VM_DEFINE_FUNCTION (160, logand
, "logand", 2)
520 if (SCM_I_INUMP (x
) && SCM_I_INUMP (y
))
521 /* Compute bitwise AND without untagging */
522 RETURN (SCM_PACK (SCM_UNPACK (x
) & SCM_UNPACK (y
)));
524 RETURN (scm_logand (x
, y
));
527 VM_DEFINE_FUNCTION (161, logior
, "logior", 2)
530 if (SCM_I_INUMP (x
) && SCM_I_INUMP (y
))
531 /* Compute bitwise OR without untagging */
532 RETURN (SCM_PACK (SCM_UNPACK (x
) | SCM_UNPACK (y
)));
534 RETURN (scm_logior (x
, y
));
537 VM_DEFINE_FUNCTION (162, logxor
, "logxor", 2)
540 if (SCM_I_INUMP (x
) && SCM_I_INUMP (y
))
541 RETURN (SCM_I_MAKINUM (SCM_I_INUM (x
) ^ SCM_I_INUM (y
)));
543 RETURN (scm_logxor (x
, y
));
551 VM_DEFINE_FUNCTION (163, vector_ref
, "vector-ref", 2)
553 scm_t_signed_bits i
= 0;
555 if (SCM_LIKELY (SCM_I_IS_NONWEAK_VECTOR (vect
)
557 && ((i
= SCM_I_INUM (idx
)) >= 0)
558 && i
< SCM_I_VECTOR_LENGTH (vect
)))
559 RETURN (SCM_I_VECTOR_ELTS (vect
)[i
]);
563 RETURN (scm_vector_ref (vect
, idx
));
567 VM_DEFINE_INSTRUCTION (164, vector_set
, "vector-set", 0, 3, 0)
569 scm_t_signed_bits i
= 0;
571 POP3 (val
, idx
, vect
);
572 if (SCM_LIKELY (SCM_I_IS_NONWEAK_VECTOR (vect
)
574 && ((i
= SCM_I_INUM (idx
)) >= 0)
575 && i
< SCM_I_VECTOR_LENGTH (vect
)))
576 SCM_I_VECTOR_WELTS (vect
)[i
] = val
;
580 scm_vector_set_x (vect
, idx
, val
);
585 VM_DEFINE_INSTRUCTION (165, make_array
, "make-array", 3, -1, 1)
591 len
= (len
<< 8) + FETCH ();
592 len
= (len
<< 8) + FETCH ();
595 PRE_CHECK_UNDERFLOW (len
);
596 ret
= scm_from_contiguous_array (shape
, sp
- len
+ 1, len
);
606 #define VM_VALIDATE_STRUCT(obj, proc) \
607 VM_ASSERT (SCM_STRUCTP (obj), vm_error_not_a_struct (proc, obj))
609 VM_DEFINE_FUNCTION (166, struct_p
, "struct?", 1)
612 RETURN (scm_from_bool (SCM_STRUCTP (obj
)));
615 VM_DEFINE_FUNCTION (167, struct_vtable
, "struct-vtable", 1)
618 VM_VALIDATE_STRUCT (obj
, "struct_vtable");
619 RETURN (SCM_STRUCT_VTABLE (obj
));
622 VM_DEFINE_INSTRUCTION (168, make_struct
, "make-struct", 2, -1, 1)
624 unsigned h
= FETCH ();
625 unsigned l
= FETCH ();
626 scm_t_bits n
= ((h
<< 8U) + l
);
627 SCM vtable
= sp
[-(n
- 1)];
628 const SCM
*inits
= sp
- n
+ 2;
633 if (SCM_LIKELY (SCM_STRUCTP (vtable
)
634 && SCM_VTABLE_FLAG_IS_SET (vtable
, SCM_VTABLE_FLAG_SIMPLE
)
635 && (SCM_STRUCT_DATA_REF (vtable
, scm_vtable_index_size
) + 1
637 && !SCM_VTABLE_INSTANCE_FINALIZER (vtable
)))
639 /* Verily, we are making a simple struct with the right number of
640 initializers, and no finalizer. */
641 ret
= scm_words ((scm_t_bits
)SCM_STRUCT_DATA (vtable
) | scm_tc3_struct
,
643 SCM_SET_CELL_WORD_1 (ret
, (scm_t_bits
)SCM_CELL_OBJECT_LOC (ret
, 2));
644 memcpy (SCM_STRUCT_DATA (ret
), inits
, (n
- 1) * sizeof (SCM
));
647 ret
= scm_c_make_structv (vtable
, 0, n
- 1, (scm_t_bits
*) inits
);
655 VM_DEFINE_FUNCTION (169, struct_ref
, "struct-ref", 2)
659 if (SCM_LIKELY (SCM_STRUCTP (obj
)
660 && SCM_STRUCT_VTABLE_FLAG_IS_SET (obj
,
661 SCM_VTABLE_FLAG_SIMPLE
)
662 && SCM_I_INUMP (pos
)))
665 scm_t_bits index
, len
;
667 /* True, an inum is a signed value, but cast to unsigned it will
668 certainly be more than the length, so we will fall through if
669 index is negative. */
670 index
= SCM_I_INUM (pos
);
671 vtable
= SCM_STRUCT_VTABLE (obj
);
672 len
= SCM_STRUCT_DATA_REF (vtable
, scm_vtable_index_size
);
674 if (SCM_LIKELY (index
< len
))
676 scm_t_bits
*data
= SCM_STRUCT_DATA (obj
);
677 RETURN (SCM_PACK (data
[index
]));
682 RETURN (scm_struct_ref (obj
, pos
));
685 VM_DEFINE_FUNCTION (170, struct_set
, "struct-set", 3)
687 ARGS3 (obj
, pos
, val
);
689 if (SCM_LIKELY (SCM_STRUCTP (obj
)
690 && SCM_STRUCT_VTABLE_FLAG_IS_SET (obj
,
691 SCM_VTABLE_FLAG_SIMPLE
)
692 && SCM_STRUCT_VTABLE_FLAG_IS_SET (obj
,
693 SCM_VTABLE_FLAG_SIMPLE_RW
)
694 && SCM_I_INUMP (pos
)))
697 scm_t_bits index
, len
;
699 /* See above regarding index being >= 0. */
700 index
= SCM_I_INUM (pos
);
701 vtable
= SCM_STRUCT_VTABLE (obj
);
702 len
= SCM_STRUCT_DATA_REF (vtable
, scm_vtable_index_size
);
703 if (SCM_LIKELY (index
< len
))
705 scm_t_bits
*data
= SCM_STRUCT_DATA (obj
);
706 data
[index
] = SCM_UNPACK (val
);
712 RETURN (scm_struct_set_x (obj
, pos
, val
));
719 VM_DEFINE_FUNCTION (171, class_of
, "class-of", 1)
722 if (SCM_INSTANCEP (obj
))
723 RETURN (SCM_CLASS_OF (obj
));
725 RETURN (scm_class_of (obj
));
728 /* FIXME: No checking whatsoever. */
729 VM_DEFINE_FUNCTION (172, slot_ref
, "slot-ref", 2)
732 ARGS2 (instance
, idx
);
733 slot
= SCM_I_INUM (idx
);
734 RETURN (SCM_PACK (SCM_STRUCT_DATA (instance
) [slot
]));
737 /* FIXME: No checking whatsoever. */
738 VM_DEFINE_INSTRUCTION (173, slot_set
, "slot-set", 0, 3, 0)
740 SCM instance
, idx
, val
;
742 POP3 (val
, idx
, instance
);
743 slot
= SCM_I_INUM (idx
);
744 SCM_STRUCT_DATA (instance
) [slot
] = SCM_UNPACK (val
);
752 #define VM_VALIDATE_BYTEVECTOR(x, proc) \
753 VM_ASSERT (SCM_BYTEVECTOR_P (x), vm_error_not_a_bytevector (proc, x))
755 #define BV_REF_WITH_ENDIANNESS(stem, fn_stem) \
759 if (scm_is_eq (endianness, scm_i_native_endianness)) \
760 goto VM_LABEL (bv_##stem##_native_ref); \
764 RETURN (scm_bytevector_##fn_stem##_ref (bv, idx, endianness)); \
768 /* Return true (non-zero) if PTR has suitable alignment for TYPE. */
769 #define ALIGNED_P(ptr, type) \
770 ((scm_t_uintptr) (ptr) % alignof_type (type) == 0)
772 VM_DEFINE_FUNCTION (174, bv_u16_ref
, "bv-u16-ref", 3)
773 BV_REF_WITH_ENDIANNESS (u16
, u16
)
774 VM_DEFINE_FUNCTION (175, bv_s16_ref
, "bv-s16-ref", 3)
775 BV_REF_WITH_ENDIANNESS (s16
, s16
)
776 VM_DEFINE_FUNCTION (176, bv_u32_ref
, "bv-u32-ref", 3)
777 BV_REF_WITH_ENDIANNESS (u32
, u32
)
778 VM_DEFINE_FUNCTION (177, bv_s32_ref
, "bv-s32-ref", 3)
779 BV_REF_WITH_ENDIANNESS (s32
, s32
)
780 VM_DEFINE_FUNCTION (178, bv_u64_ref
, "bv-u64-ref", 3)
781 BV_REF_WITH_ENDIANNESS (u64
, u64
)
782 VM_DEFINE_FUNCTION (179, bv_s64_ref
, "bv-s64-ref", 3)
783 BV_REF_WITH_ENDIANNESS (s64
, s64
)
784 VM_DEFINE_FUNCTION (180, bv_f32_ref
, "bv-f32-ref", 3)
785 BV_REF_WITH_ENDIANNESS (f32
, ieee_single
)
786 VM_DEFINE_FUNCTION (181, bv_f64_ref
, "bv-f64-ref", 3)
787 BV_REF_WITH_ENDIANNESS (f64
, ieee_double
)
789 #undef BV_REF_WITH_ENDIANNESS
791 #define BV_FIXABLE_INT_REF(stem, fn_stem, type, size) \
793 scm_t_signed_bits i; \
794 const scm_t_ ## type *int_ptr; \
797 VM_VALIDATE_BYTEVECTOR (bv, "bv-" #stem "-ref"); \
798 i = SCM_I_INUM (idx); \
799 int_ptr = (scm_t_ ## type *) (SCM_BYTEVECTOR_CONTENTS (bv) + i); \
801 if (SCM_LIKELY (SCM_I_INUMP (idx) \
803 && (i + size <= SCM_BYTEVECTOR_LENGTH (bv)) \
804 && (ALIGNED_P (int_ptr, scm_t_ ## type)))) \
805 RETURN (SCM_I_MAKINUM (*int_ptr)); \
809 RETURN (scm_bytevector_ ## fn_stem ## _ref (bv, idx)); \
813 #define BV_INT_REF(stem, type, size) \
815 scm_t_signed_bits i; \
816 const scm_t_ ## type *int_ptr; \
819 VM_VALIDATE_BYTEVECTOR (bv, "bv-" #stem "-ref"); \
820 i = SCM_I_INUM (idx); \
821 int_ptr = (scm_t_ ## type *) (SCM_BYTEVECTOR_CONTENTS (bv) + i); \
823 if (SCM_LIKELY (SCM_I_INUMP (idx) \
825 && (i + size <= SCM_BYTEVECTOR_LENGTH (bv)) \
826 && (ALIGNED_P (int_ptr, scm_t_ ## type)))) \
828 scm_t_ ## type x = *int_ptr; \
829 if (SCM_FIXABLE (x)) \
830 RETURN (SCM_I_MAKINUM (x)); \
834 RETURN (scm_from_ ## type (x)); \
840 RETURN (scm_bytevector_ ## stem ## _native_ref (bv, idx)); \
844 #define BV_FLOAT_REF(stem, fn_stem, type, size) \
846 scm_t_signed_bits i; \
847 const type *float_ptr; \
850 VM_VALIDATE_BYTEVECTOR (bv, "bv-" #stem "-ref"); \
851 i = SCM_I_INUM (idx); \
852 float_ptr = (type *) (SCM_BYTEVECTOR_CONTENTS (bv) + i); \
855 if (SCM_LIKELY (SCM_I_INUMP (idx) \
857 && (i + size <= SCM_BYTEVECTOR_LENGTH (bv)) \
858 && (ALIGNED_P (float_ptr, type)))) \
859 RETURN (scm_from_double (*float_ptr)); \
861 RETURN (scm_bytevector_ ## fn_stem ## _native_ref (bv, idx)); \
864 VM_DEFINE_FUNCTION (182, bv_u8_ref
, "bv-u8-ref", 2)
865 BV_FIXABLE_INT_REF (u8
, u8
, uint8
, 1)
866 VM_DEFINE_FUNCTION (183, bv_s8_ref
, "bv-s8-ref", 2)
867 BV_FIXABLE_INT_REF (s8
, s8
, int8
, 1)
868 VM_DEFINE_FUNCTION (184, bv_u16_native_ref
, "bv-u16-native-ref", 2)
869 BV_FIXABLE_INT_REF (u16
, u16_native
, uint16
, 2)
870 VM_DEFINE_FUNCTION (185, bv_s16_native_ref
, "bv-s16-native-ref", 2)
871 BV_FIXABLE_INT_REF (s16
, s16_native
, int16
, 2)
872 VM_DEFINE_FUNCTION (186, bv_u32_native_ref
, "bv-u32-native-ref", 2)
873 #if SIZEOF_VOID_P > 4
874 BV_FIXABLE_INT_REF (u32
, u32_native
, uint32
, 4)
876 BV_INT_REF (u32
, uint32
, 4)
878 VM_DEFINE_FUNCTION (187, bv_s32_native_ref
, "bv-s32-native-ref", 2)
879 #if SIZEOF_VOID_P > 4
880 BV_FIXABLE_INT_REF (s32
, s32_native
, int32
, 4)
882 BV_INT_REF (s32
, int32
, 4)
884 VM_DEFINE_FUNCTION (188, bv_u64_native_ref
, "bv-u64-native-ref", 2)
885 BV_INT_REF (u64
, uint64
, 8)
886 VM_DEFINE_FUNCTION (189, bv_s64_native_ref
, "bv-s64-native-ref", 2)
887 BV_INT_REF (s64
, int64
, 8)
888 VM_DEFINE_FUNCTION (190, bv_f32_native_ref
, "bv-f32-native-ref", 2)
889 BV_FLOAT_REF (f32
, ieee_single
, float, 4)
890 VM_DEFINE_FUNCTION (191, bv_f64_native_ref
, "bv-f64-native-ref", 2)
891 BV_FLOAT_REF (f64
, ieee_double
, double, 8)
893 #undef BV_FIXABLE_INT_REF
899 #define BV_SET_WITH_ENDIANNESS(stem, fn_stem) \
903 if (scm_is_eq (endianness, scm_i_native_endianness)) \
904 goto VM_LABEL (bv_##stem##_native_set); \
906 SCM bv, idx, val; POP3 (val, idx, bv); \
908 scm_bytevector_##fn_stem##_set_x (bv, idx, val, endianness); \
913 VM_DEFINE_INSTRUCTION (192, bv_u16_set
, "bv-u16-set", 0, 4, 0)
914 BV_SET_WITH_ENDIANNESS (u16
, u16
)
915 VM_DEFINE_INSTRUCTION (193, bv_s16_set
, "bv-s16-set", 0, 4, 0)
916 BV_SET_WITH_ENDIANNESS (s16
, s16
)
917 VM_DEFINE_INSTRUCTION (194, bv_u32_set
, "bv-u32-set", 0, 4, 0)
918 BV_SET_WITH_ENDIANNESS (u32
, u32
)
919 VM_DEFINE_INSTRUCTION (195, bv_s32_set
, "bv-s32-set", 0, 4, 0)
920 BV_SET_WITH_ENDIANNESS (s32
, s32
)
921 VM_DEFINE_INSTRUCTION (196, bv_u64_set
, "bv-u64-set", 0, 4, 0)
922 BV_SET_WITH_ENDIANNESS (u64
, u64
)
923 VM_DEFINE_INSTRUCTION (197, bv_s64_set
, "bv-s64-set", 0, 4, 0)
924 BV_SET_WITH_ENDIANNESS (s64
, s64
)
925 VM_DEFINE_INSTRUCTION (198, bv_f32_set
, "bv-f32-set", 0, 4, 0)
926 BV_SET_WITH_ENDIANNESS (f32
, ieee_single
)
927 VM_DEFINE_INSTRUCTION (199, bv_f64_set
, "bv-f64-set", 0, 4, 0)
928 BV_SET_WITH_ENDIANNESS (f64
, ieee_double
)
930 #undef BV_SET_WITH_ENDIANNESS
932 #define BV_FIXABLE_INT_SET(stem, fn_stem, type, min, max, size) \
934 scm_t_signed_bits i, j = 0; \
936 scm_t_ ## type *int_ptr; \
938 POP3 (val, idx, bv); \
939 VM_VALIDATE_BYTEVECTOR (bv, "bv-" #stem "-set"); \
940 i = SCM_I_INUM (idx); \
941 int_ptr = (scm_t_ ## type *) (SCM_BYTEVECTOR_CONTENTS (bv) + i); \
943 if (SCM_LIKELY (SCM_I_INUMP (idx) \
945 && (i + size <= SCM_BYTEVECTOR_LENGTH (bv)) \
946 && (ALIGNED_P (int_ptr, scm_t_ ## type)) \
947 && (SCM_I_INUMP (val)) \
948 && ((j = SCM_I_INUM (val)) >= min) \
950 *int_ptr = (scm_t_ ## type) j; \
954 scm_bytevector_ ## fn_stem ## _set_x (bv, idx, val); \
959 #define BV_INT_SET(stem, type, size) \
961 scm_t_signed_bits i = 0; \
963 scm_t_ ## type *int_ptr; \
965 POP3 (val, idx, bv); \
966 VM_VALIDATE_BYTEVECTOR (bv, "bv-" #stem "-set"); \
967 i = SCM_I_INUM (idx); \
968 int_ptr = (scm_t_ ## type *) (SCM_BYTEVECTOR_CONTENTS (bv) + i); \
970 if (SCM_LIKELY (SCM_I_INUMP (idx) \
972 && (i + size <= SCM_BYTEVECTOR_LENGTH (bv)) \
973 && (ALIGNED_P (int_ptr, scm_t_ ## type)))) \
974 *int_ptr = scm_to_ ## type (val); \
978 scm_bytevector_ ## stem ## _native_set_x (bv, idx, val); \
983 #define BV_FLOAT_SET(stem, fn_stem, type, size) \
985 scm_t_signed_bits i = 0; \
989 POP3 (val, idx, bv); \
990 VM_VALIDATE_BYTEVECTOR (bv, "bv-" #stem "-set"); \
991 i = SCM_I_INUM (idx); \
992 float_ptr = (type *) (SCM_BYTEVECTOR_CONTENTS (bv) + i); \
994 if (SCM_LIKELY (SCM_I_INUMP (idx) \
996 && (i + size <= SCM_BYTEVECTOR_LENGTH (bv)) \
997 && (ALIGNED_P (float_ptr, type)))) \
998 *float_ptr = scm_to_double (val); \
1002 scm_bytevector_ ## fn_stem ## _native_set_x (bv, idx, val); \
1007 VM_DEFINE_INSTRUCTION (200, bv_u8_set
, "bv-u8-set", 0, 3, 0)
1008 BV_FIXABLE_INT_SET (u8
, u8
, uint8
, 0, SCM_T_UINT8_MAX
, 1)
1009 VM_DEFINE_INSTRUCTION (201, bv_s8_set
, "bv-s8-set", 0, 3, 0)
1010 BV_FIXABLE_INT_SET (s8
, s8
, int8
, SCM_T_INT8_MIN
, SCM_T_INT8_MAX
, 1)
1011 VM_DEFINE_INSTRUCTION (202, bv_u16_native_set
, "bv-u16-native-set", 0, 3, 0)
1012 BV_FIXABLE_INT_SET (u16
, u16_native
, uint16
, 0, SCM_T_UINT16_MAX
, 2)
1013 VM_DEFINE_INSTRUCTION (203, bv_s16_native_set
, "bv-s16-native-set", 0, 3, 0)
1014 BV_FIXABLE_INT_SET (s16
, s16_native
, int16
, SCM_T_INT16_MIN
, SCM_T_INT16_MAX
, 2)
1015 VM_DEFINE_INSTRUCTION (204, bv_u32_native_set
, "bv-u32-native-set", 0, 3, 0)
1016 #if SIZEOF_VOID_P > 4
1017 BV_FIXABLE_INT_SET (u32
, u32_native
, uint32
, 0, SCM_T_UINT32_MAX
, 4)
1019 BV_INT_SET (u32
, uint32
, 4)
1021 VM_DEFINE_INSTRUCTION (205, bv_s32_native_set
, "bv-s32-native-set", 0, 3, 0)
1022 #if SIZEOF_VOID_P > 4
1023 BV_FIXABLE_INT_SET (s32
, s32_native
, int32
, SCM_T_INT32_MIN
, SCM_T_INT32_MAX
, 4)
1025 BV_INT_SET (s32
, int32
, 4)
1027 VM_DEFINE_INSTRUCTION (206, bv_u64_native_set
, "bv-u64-native-set", 0, 3, 0)
1028 BV_INT_SET (u64
, uint64
, 8)
1029 VM_DEFINE_INSTRUCTION (207, bv_s64_native_set
, "bv-s64-native-set", 0, 3, 0)
1030 BV_INT_SET (s64
, int64
, 8)
1031 VM_DEFINE_INSTRUCTION (208, bv_f32_native_set
, "bv-f32-native-set", 0, 3, 0)
1032 BV_FLOAT_SET (f32
, ieee_single
, float, 4)
1033 VM_DEFINE_INSTRUCTION (209, bv_f64_native_set
, "bv-f64-native-set", 0, 3, 0)
1034 BV_FLOAT_SET (f64
, ieee_double
, double, 8)
1036 #undef BV_FIXABLE_INT_SET
1041 (defun renumber-ops ()
1042 "start from top of buffer and renumber 'VM_DEFINE_FOO (\n' sequences"
1045 (let ((counter 127)) (goto-char (point-min))
1046 (while (re-search-forward "^VM_DEFINE_[^ ]+ (\\([^,]+\\)," (point-max) t)
1048 (number-to-string (setq counter (1+ counter)))