1 /* Copyright (C) 1999 Free Software Foundation, Inc.
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2, or (at your option)
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this software; see the file COPYING. If not, write to
14 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
15 * Boston, MA 02111-1307 USA
17 * As a special exception, the Free Software Foundation gives permission
18 * for additional uses of the text contained in its release of GUILE.
20 * The exception is that, if you link the GUILE library with other files
21 * to produce an executable, this does not by itself cause the
22 * resulting executable to be covered by the GNU General Public License.
23 * Your use of that executable is in no way restricted on account of
24 * linking the GUILE library code into it.
26 * This exception does not however invalidate any other reasons why
27 * the executable file might be covered by the GNU General Public License.
29 * This exception applies only to the code released by the
30 * Free Software Foundation under the name GUILE. If you copy
31 * code from other Free Software Foundation releases into a copy of
32 * GUILE, as the General Public License permits, the exception does
33 * not apply to the code that you add in this way. To avoid misleading
34 * anyone as to the status of such modified files, you must delete
35 * this exception notice from them.
37 * If you write modifications of your own for GUILE, it is your choice
38 * whether to permit this exception to apply to your modifications.
39 * If you do not wish that, delete this exception notice. */
41 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
42 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
45 /* Written in December 1998 by Roland Orre <orre@nada.kth.se>
46 * This implements the same sort interface as slib/sort.scm
47 * for lists and vectors where slib defines:
48 * sorted?, merge, merge!, sort, sort!
49 * For scsh compatibility sort-list and sort-list! are also defined.
50 * In cases where a stable-sort is required use stable-sort or
51 * stable-sort!. An additional feature is
52 * (restricted-vector-sort! vector less? startpos endpos)
53 * which allows you to sort part of a vector.
54 * Thanks to Aubrey Jaffer for the slib/sort.scm library.
55 * Thanks to Richard A. O'Keefe (based on Prolog code by D.H.D.Warren)
56 * for the merge sort inspiration.
57 * Thanks to Douglas C. Schmidt (schmidt@ics.uci.edu) for the
61 /* We need this to get the definitions for HAVE_ALLOCA_H, etc. */
62 #include "scmconfig.h"
64 /* AIX requires this to be the first thing in the file. The #pragma
65 directive is indented so pre-ANSI compilers will ignore it, rather
74 # ifndef alloca /* predefined by HP cc +Olibcalls */
89 #include "scm_validate.h"
92 /* The routine quicksort was extracted from the GNU C Library qsort.c
93 written by Douglas C. Schmidt (schmidt@ics.uci.edu)
94 and adapted to guile by adding an extra pointer less
95 to quicksort by Roland Orre <orre@nada.kth.se>.
97 The reason to do this instead of using the library function qsort
98 was to avoid dependency of the ANSI-C extensions for local functions
99 and also to avoid obscure pool based solutions.
101 This sorting routine is not much more efficient than the stable
102 version but doesn't consume extra memory.
105 /* Byte-wise swap two items of size SIZE. */
106 #define SWAP(a, b, size) \
109 register size_t __size = (size); \
110 register char *__a = (a), *__b = (b); \
116 } while (--__size > 0); \
119 /* Discontinue quicksort algorithm when partition gets below this size.
120 This particular magic number was chosen to work best on a Sun 4/260. */
123 /* Stack node declarations used to store unfulfilled partition obligations. */
131 /* The next 4 #defines implement a very fast in-line stack abstraction. */
132 #define STACK_SIZE (8 * sizeof(unsigned long int))
133 #define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top))
134 #define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi)))
135 #define STACK_NOT_EMPTY (stack < top)
138 /* Order size using quicksort. This implementation incorporates
139 four optimizations discussed in Sedgewick:
141 1. Non-recursive, using an explicit stack of pointer that store the
142 next array partition to sort. To save time, this maximum amount
143 of space required to store an array of MAX_INT is allocated on the
144 stack. Assuming a 32-bit integer, this needs only 32 *
145 sizeof(stack_node) == 136 bits. Pretty cheap, actually.
147 2. Chose the pivot element using a median-of-three decision tree.
148 This reduces the probability of selecting a bad pivot value and
149 eliminates certain extraneous comparisons.
151 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving
152 insertion sort to order the MAX_THRESH items within each partition.
153 This is a big win, since insertion sort is faster for small, mostly
154 sorted array segments.
156 4. The larger of the two sub-partitions is always pushed onto the
157 stack first, with the algorithm then concentrating on the
158 smaller partition. This *guarantees* no more than log (n)
159 stack size is needed (actually O(1) in this case)! */
161 typedef int (*cmp_fun_t
) (SCM less
,
165 static const char s_buggy_less
[] = "buggy less predicate used when sorting";
168 quicksort (void *const pbase
,
174 register char *base_ptr
= (char *) pbase
;
176 /* Allocating SIZE bytes for a pivot buffer facilitates a better
177 algorithm below since we can do comparisons directly on the pivot. */
178 char *pivot_buffer
= (char *) alloca (size
);
179 const size_t max_thresh
= MAX_THRESH
* size
;
181 if (total_elems
== 0)
182 /* Avoid lossage with unsigned arithmetic below. */
185 if (total_elems
> MAX_THRESH
)
188 char *hi
= &lo
[size
* (total_elems
- 1)];
189 /* Largest size needed for 32-bit int!!! */
190 stack_node stack
[STACK_SIZE
];
191 stack_node
*top
= stack
+ 1;
193 while (STACK_NOT_EMPTY
)
198 char *pivot
= pivot_buffer
;
200 /* Select median value from among LO, MID, and HI. Rearrange
201 LO and HI so the three values are sorted. This lowers the
202 probability of picking a pathological pivot value and
203 skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
205 char *mid
= lo
+ size
* ((hi
- lo
) / size
>> 1);
207 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
208 SWAP (mid
, lo
, size
);
209 if ((*cmp
) (less
, (void *) hi
, (void *) mid
))
210 SWAP (mid
, hi
, size
);
213 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
214 SWAP (mid
, lo
, size
);
216 memcpy (pivot
, mid
, size
);
217 pivot
= pivot_buffer
;
219 left_ptr
= lo
+ size
;
220 right_ptr
= hi
- size
;
222 /* Here's the famous ``collapse the walls'' section of quicksort.
223 Gotta like those tight inner loops! They are the main reason
224 that this algorithm runs much faster than others. */
227 while ((*cmp
) (less
, (void *) left_ptr
, (void *) pivot
))
230 /* The comparison predicate may be buggy */
232 scm_misc_error (NULL
, s_buggy_less
, SCM_EOL
);
235 while ((*cmp
) (less
, (void *) pivot
, (void *) right_ptr
))
238 /* The comparison predicate may be buggy */
240 scm_misc_error (NULL
, s_buggy_less
, SCM_EOL
);
243 if (left_ptr
< right_ptr
)
245 SWAP (left_ptr
, right_ptr
, size
);
249 else if (left_ptr
== right_ptr
)
256 while (left_ptr
<= right_ptr
);
258 /* Set up pointers for next iteration. First determine whether
259 left and right partitions are below the threshold size. If so,
260 ignore one or both. Otherwise, push the larger partition's
261 bounds on the stack and continue sorting the smaller one. */
263 if ((size_t) (right_ptr
- lo
) <= max_thresh
)
265 if ((size_t) (hi
- left_ptr
) <= max_thresh
)
266 /* Ignore both small partitions. */
269 /* Ignore small left partition. */
272 else if ((size_t) (hi
- left_ptr
) <= max_thresh
)
273 /* Ignore small right partition. */
275 else if ((right_ptr
- lo
) > (hi
- left_ptr
))
277 /* Push larger left partition indices. */
278 PUSH (lo
, right_ptr
);
283 /* Push larger right partition indices. */
290 /* Once the BASE_PTR array is partially sorted by quicksort the rest
291 is completely sorted using insertion sort, since this is efficient
292 for partitions below MAX_THRESH size. BASE_PTR points to the beginning
293 of the array to sort, and END_PTR points at the very last element in
294 the array (*not* one beyond it!). */
297 char *const end_ptr
= &base_ptr
[size
* (total_elems
- 1)];
298 char *tmp_ptr
= base_ptr
;
299 char *thresh
= min (end_ptr
, base_ptr
+ max_thresh
);
300 register char *run_ptr
;
302 /* Find smallest element in first threshold and place it at the
303 array's beginning. This is the smallest array element,
304 and the operation speeds up insertion sort's inner loop. */
306 for (run_ptr
= tmp_ptr
+ size
; run_ptr
<= thresh
; run_ptr
+= size
)
307 if ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
310 if (tmp_ptr
!= base_ptr
)
311 SWAP (tmp_ptr
, base_ptr
, size
);
313 /* Insertion sort, running from left-hand-side up to right-hand-side. */
315 run_ptr
= base_ptr
+ size
;
316 while ((run_ptr
+= size
) <= end_ptr
)
318 tmp_ptr
= run_ptr
- size
;
319 while ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
322 /* The comparison predicate may be buggy */
323 if (tmp_ptr
< base_ptr
)
324 scm_misc_error (NULL
, s_buggy_less
, SCM_EOL
);
328 if (tmp_ptr
!= run_ptr
)
332 trav
= run_ptr
+ size
;
333 while (--trav
>= run_ptr
)
338 for (hi
= lo
= trav
; (lo
-= size
) >= tmp_ptr
; hi
= lo
)
348 /* comparison routines */
351 subr2less (SCM less
, const void *a
, const void *b
)
353 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
, *(SCM
*) b
));
357 subr2oless (SCM less
, const void *a
, const void *b
)
359 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
,
365 lsubrless (SCM less
, const void *a
, const void *b
)
367 return SCM_NFALSEP (SCM_SUBRF (less
)
368 (scm_cons (*(SCM
*) a
,
369 scm_cons (*(SCM
*) b
, SCM_EOL
))));
373 closureless (SCM code
, const void *a
, const void *b
)
375 SCM env
= SCM_EXTEND_ENV (SCM_CAR (SCM_CODE (code
)),
376 scm_cons (*(SCM
*) a
,
377 scm_cons (*(SCM
*) b
, SCM_EOL
)),
379 /* Evaluate the closure body */
380 return SCM_NFALSEP (scm_eval_body (SCM_CDR (SCM_CODE (code
)), env
));
384 applyless (SCM less
, const void *a
, const void *b
)
386 return SCM_NFALSEP (scm_apply ((SCM
) less
,
387 scm_cons (*(SCM
*) a
,
388 scm_cons (*(SCM
*) b
, SCM_EOL
)),
393 scm_cmp_function (SCM p
)
395 switch (SCM_TYP7 (p
))
401 case scm_tc7_subr_2o
:
405 case scm_tcs_closures
:
410 } /* scm_cmp_function */
413 /* Question: Is there any need to make this a more general array sort?
414 It is probably enough to manage the vector type. */
415 /* endpos equal as for substring, i.e. endpos is not included. */
416 /* More natural wih length? */
418 SCM_DEFINE (scm_restricted_vector_sort_x
, "restricted-vector-sort!", 4, 0, 0,
419 (SCM vec
, SCM less
, SCM startpos
, SCM endpos
),
421 #define FUNC_NAME s_scm_restricted_vector_sort_x
423 size_t vlen
, spos
, len
, size
= sizeof (SCM
);
426 SCM_VALIDATE_NIM (1,vec
);
427 SCM_VALIDATE_NIM (2,less
);
428 switch (SCM_TYP7 (vec
))
430 case scm_tc7_vector
: /* the only type we manage is vector */
432 #if 0 /* HAVE_ARRAYS */
433 case scm_tc7_ivect
: /* long */
434 case scm_tc7_uvect
: /* unsigned */
435 case scm_tc7_fvect
: /* float */
436 case scm_tc7_dvect
: /* double */
441 vp
= SCM_VELTS (vec
); /* vector pointer */
442 vlen
= SCM_LENGTH (vec
);
444 SCM_VALIDATE_INUM_COPY (3,startpos
,spos
);
445 SCM_ASSERT_RANGE (3,startpos
,(spos
>= 0) && (spos
<= vlen
));
446 SCM_VALIDATE_INUM_RANGE (4,endpos
,0,vlen
+1);
447 len
= SCM_INUM (endpos
) - spos
;
449 quicksort (&vp
[spos
], len
, size
, scm_cmp_function (less
), less
);
450 return SCM_UNSPECIFIED
;
455 /* (sorted? sequence less?)
456 * is true when sequence is a list (x0 x1 ... xm) or a vector #(x0 ... xm)
457 * such that for all 1 <= i <= m,
458 * (not (less? (list-ref list i) (list-ref list (- i 1)))). */
459 SCM_DEFINE (scm_sorted_p
, "sorted?", 2, 0, 0,
460 (SCM items
, SCM less
),
462 #define FUNC_NAME s_scm_sorted_p
464 long len
, j
; /* list/vector length, temp j */
465 SCM item
, rest
; /* rest of items loop variable */
467 cmp_fun_t cmp
= scm_cmp_function (less
);
469 if (SCM_NULLP (items
))
472 SCM_VALIDATE_NIM (1,items
);
473 SCM_VALIDATE_NIM (2,less
);
475 if (SCM_CONSP (items
))
477 len
= scm_ilength (items
); /* also checks that it's a pure list */
478 SCM_ASSERT_RANGE (1,items
,len
>= 0);
482 item
= SCM_CAR (items
);
483 rest
= SCM_CDR (items
);
487 if ((*cmp
) (less
, &SCM_CAR(rest
), &item
))
491 item
= SCM_CAR (rest
);
492 rest
= SCM_CDR (rest
);
500 switch (SCM_TYP7 (items
))
504 vp
= SCM_VELTS (items
); /* vector pointer */
505 len
= SCM_LENGTH (items
);
509 if ((*cmp
) (less
, &vp
[1], vp
))
520 #if 0 /* HAVE_ARRAYS */
521 case scm_tc7_ivect
: /* long */
522 case scm_tc7_uvect
: /* unsigned */
523 case scm_tc7_fvect
: /* float */
524 case scm_tc7_dvect
: /* double */
535 takes two lists a and b such that (sorted? a less?) and (sorted? b less?)
536 and returns a new list in which the elements of a and b have been stably
537 interleaved so that (sorted? (merge a b less?) less?).
538 Note: this does _not_ accept vectors. */
539 SCM_DEFINE (scm_merge
, "merge", 3, 0, 0,
540 (SCM alist
, SCM blist
, SCM less
),
542 #define FUNC_NAME s_scm_merge
544 long alen
, blen
; /* list lengths */
546 cmp_fun_t cmp
= scm_cmp_function (less
);
547 SCM_VALIDATE_NIM (3,less
);
549 if (SCM_NULLP (alist
))
551 else if (SCM_NULLP (blist
))
555 SCM_VALIDATE_NONEMPTYLIST_COPYLEN (1,alist
,alen
);
556 SCM_VALIDATE_NONEMPTYLIST_COPYLEN (2,blist
,blen
);
557 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
559 build
= scm_cons (SCM_CAR (blist
), SCM_EOL
);
560 blist
= SCM_CDR (blist
);
565 build
= scm_cons (SCM_CAR (alist
), SCM_EOL
);
566 alist
= SCM_CDR (alist
);
570 while ((alen
> 0) && (blen
> 0))
572 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
574 SCM_SETCDR (last
, scm_cons (SCM_CAR (blist
), SCM_EOL
));
575 blist
= SCM_CDR (blist
);
580 SCM_SETCDR (last
, scm_cons (SCM_CAR (alist
), SCM_EOL
));
581 alist
= SCM_CDR (alist
);
584 last
= SCM_CDR (last
);
586 if ((alen
> 0) && (blen
== 0))
587 SCM_SETCDR (last
, alist
);
588 else if ((alen
== 0) && (blen
> 0))
589 SCM_SETCDR (last
, blist
);
597 scm_merge_list_x (SCM alist
, SCM blist
,
598 long alen
, long blen
,
599 cmp_fun_t cmp
, SCM less
)
603 if (SCM_NULLP (alist
))
605 else if (SCM_NULLP (blist
))
609 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
612 blist
= SCM_CDR (blist
);
618 alist
= SCM_CDR (alist
);
622 while ((alen
> 0) && (blen
> 0))
624 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
626 SCM_SETCDR (last
, blist
);
627 blist
= SCM_CDR (blist
);
632 SCM_SETCDR (last
, alist
);
633 alist
= SCM_CDR (alist
);
636 last
= SCM_CDR (last
);
638 if ((alen
> 0) && (blen
== 0))
639 SCM_SETCDR (last
, alist
);
640 else if ((alen
== 0) && (blen
> 0))
641 SCM_SETCDR (last
, blist
);
644 } /* scm_merge_list_x */
646 SCM_DEFINE (scm_merge_x
, "merge!", 3, 0, 0,
647 (SCM alist
, SCM blist
, SCM less
),
649 #define FUNC_NAME s_scm_merge_x
651 long alen
, blen
; /* list lengths */
653 SCM_VALIDATE_NIM (3,less
);
654 if (SCM_NULLP (alist
))
656 else if (SCM_NULLP (blist
))
660 SCM_VALIDATE_NONEMPTYLIST_COPYLEN (1,alist
,alen
);
661 SCM_VALIDATE_NONEMPTYLIST_COPYLEN (2,blist
,blen
);
662 return scm_merge_list_x (alist
, blist
,
664 scm_cmp_function (less
),
670 /* This merge sort algorithm is same as slib's by Richard A. O'Keefe.
671 The algorithm is stable. We also tried to use the algorithm used by
672 scsh's merge-sort but that algorithm showed to not be stable, even
673 though it claimed to be.
676 scm_merge_list_step (SCM
* seq
,
686 a
= scm_merge_list_step (seq
, cmp
, less
, mid
);
687 b
= scm_merge_list_step (seq
, cmp
, less
, n
- mid
);
688 return scm_merge_list_x (a
, b
, mid
, n
- mid
, cmp
, less
);
693 SCM rest
= SCM_CDR (*seq
);
694 SCM x
= SCM_CAR (*seq
);
695 SCM y
= SCM_CAR (SCM_CDR (*seq
));
696 *seq
= SCM_CDR (rest
);
697 SCM_SETCDR (rest
, SCM_EOL
);
698 if ((*cmp
) (less
, &y
, &x
))
709 SCM_SETCDR (p
, SCM_EOL
);
714 } /* scm_merge_list_step */
717 /* scm_sort_x manages lists and vectors, not stable sort */
718 SCM_DEFINE (scm_sort_x
, "sort!", 2, 0, 0,
719 (SCM items
, SCM less
),
721 #define FUNC_NAME s_scm_sort_x
723 long len
; /* list/vector length */
724 if (SCM_NULLP(items
))
726 SCM_VALIDATE_NIM (1,items
);
727 SCM_VALIDATE_NIM (2,less
);
729 if (SCM_CONSP (items
))
731 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
732 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
734 else if (SCM_VECTORP (items
))
736 len
= SCM_LENGTH (items
);
737 scm_restricted_vector_sort_x (items
,
744 RETURN_SCM_WTA (1,items
);
748 /* scm_sort manages lists and vectors, not stable sort */
750 SCM_DEFINE (scm_sort
, "sort", 2, 0, 0,
751 (SCM items
, SCM less
),
753 #define FUNC_NAME s_scm_sort
755 SCM sortvec
; /* the vector we actually sort */
756 long len
; /* list/vector length */
757 if (SCM_NULLP(items
))
759 SCM_VALIDATE_NIM (1,items
);
760 SCM_VALIDATE_NIM (2,less
);
761 if (SCM_CONSP (items
))
763 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
764 items
= scm_list_copy (items
);
765 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
768 /* support ordinary vectors even if arrays not available? */
769 else if (SCM_VECTORP (items
))
771 len
= SCM_LENGTH (items
);
772 sortvec
= scm_make_uve (len
, scm_array_prototype (items
));
773 scm_array_copy_x (items
, sortvec
);
774 scm_restricted_vector_sort_x (sortvec
,
782 RETURN_SCM_WTA (1,items
);
787 scm_merge_vector_x (void *const vecbase
,
788 void *const tempbase
,
795 register SCM
*vp
= (SCM
*) vecbase
;
796 register SCM
*temp
= (SCM
*) tempbase
;
797 long it
; /* Index for temp vector */
798 long i1
= low
; /* Index for lower vector segment */
799 long i2
= mid
+ 1; /* Index for upper vector segment */
801 /* Copy while both segments contain more characters */
802 for (it
= low
; (i1
<= mid
) && (i2
<= high
); ++it
)
803 if ((*cmp
) (less
, &vp
[i2
], &vp
[i1
]))
808 /* Copy while first segment contains more characters */
810 temp
[it
++] = vp
[i1
++];
812 /* Copy while second segment contains more characters */
814 temp
[it
++] = vp
[i2
++];
816 /* Copy back from temp to vp */
817 for (it
= low
; it
<= high
; ++it
)
819 } /* scm_merge_vector_x */
822 scm_merge_vector_step (void *const vp
,
831 long mid
= (low
+ high
) / 2;
832 scm_merge_vector_step (vp
, temp
, cmp
, less
, low
, mid
);
833 scm_merge_vector_step (vp
, temp
, cmp
, less
, mid
+1, high
);
834 scm_merge_vector_x (vp
, temp
, cmp
, less
, low
, mid
, high
);
836 } /* scm_merge_vector_step */
839 /* stable-sort! manages lists and vectors */
841 SCM_DEFINE (scm_stable_sort_x
, "stable-sort!", 2, 0, 0,
842 (SCM items
, SCM less
),
844 #define FUNC_NAME s_scm_stable_sort_x
846 long len
; /* list/vector length */
848 if (SCM_NULLP (items
))
850 SCM_VALIDATE_NIM (1,items
);
851 SCM_VALIDATE_NIM (2,less
);
852 if (SCM_CONSP (items
))
854 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
855 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
857 else if (SCM_VECTORP (items
))
860 len
= SCM_LENGTH (items
);
861 temp
= malloc (len
* sizeof(SCM
));
862 vp
= SCM_VELTS (items
);
863 scm_merge_vector_step (vp
,
865 scm_cmp_function (less
),
873 RETURN_SCM_WTA (1,items
);
877 /* stable_sort manages lists and vectors */
879 SCM_DEFINE (scm_stable_sort
, "stable-sort", 2, 0, 0,
880 (SCM items
, SCM less
),
882 #define FUNC_NAME s_scm_stable_sort
884 long len
; /* list/vector length */
885 if (SCM_NULLP (items
))
887 SCM_VALIDATE_NIM (1,items
);
888 SCM_VALIDATE_NIM (2,less
);
889 if (SCM_CONSP (items
))
891 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
892 items
= scm_list_copy (items
);
893 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
896 /* support ordinary vectors even if arrays not available? */
897 else if (SCM_VECTORP (items
))
901 len
= SCM_LENGTH (items
);
902 retvec
= scm_make_uve (len
, scm_array_prototype (items
));
903 scm_array_copy_x (items
, retvec
);
904 temp
= malloc (len
* sizeof (SCM
));
905 vp
= SCM_VELTS (retvec
);
906 scm_merge_vector_step (vp
,
908 scm_cmp_function (less
),
917 RETURN_SCM_WTA (1,items
);
922 SCM_DEFINE (scm_sort_list_x
, "sort-list!", 2, 0, 0,
923 (SCM items
, SCM less
),
925 #define FUNC_NAME s_scm_sort_list_x
928 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
929 SCM_VALIDATE_NIM (2,less
);
930 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
935 SCM_DEFINE (scm_sort_list
, "sort-list", 2, 0, 0,
936 (SCM items
, SCM less
),
938 #define FUNC_NAME s_scm_sort_list
941 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
942 SCM_VALIDATE_NIM (2,less
);
943 items
= scm_list_copy (items
);
944 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
953 scm_add_feature ("sort");