1 /* Copyright (C) 1999 Free Software Foundation, Inc.
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2, or (at your option)
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this software; see the file COPYING. If not, write to
14 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
15 * Boston, MA 02111-1307 USA
17 * As a special exception, the Free Software Foundation gives permission
18 * for additional uses of the text contained in its release of GUILE.
20 * The exception is that, if you link the GUILE library with other files
21 * to produce an executable, this does not by itself cause the
22 * resulting executable to be covered by the GNU General Public License.
23 * Your use of that executable is in no way restricted on account of
24 * linking the GUILE library code into it.
26 * This exception does not however invalidate any other reasons why
27 * the executable file might be covered by the GNU General Public License.
29 * This exception applies only to the code released by the
30 * Free Software Foundation under the name GUILE. If you copy
31 * code from other Free Software Foundation releases into a copy of
32 * GUILE, as the General Public License permits, the exception does
33 * not apply to the code that you add in this way. To avoid misleading
34 * anyone as to the status of such modified files, you must delete
35 * this exception notice from them.
37 * If you write modifications of your own for GUILE, it is your choice
38 * whether to permit this exception to apply to your modifications.
39 * If you do not wish that, delete this exception notice. */
41 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
42 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
45 /* Written in December 1998 by Roland Orre <orre@nada.kth.se>
46 * This implements the same sort interface as slib/sort.scm
47 * for lists and vectors where slib defines:
48 * sorted?, merge, merge!, sort, sort!
49 * For scsh compatibility sort-list and sort-list! are also defined.
50 * In cases where a stable-sort is required use stable-sort or
51 * stable-sort!. An additional feature is
52 * (restricted-vector-sort! vector less? startpos endpos)
53 * which allows you to sort part of a vector.
54 * Thanks to Aubrey Jaffer for the slib/sort.scm library.
55 * Thanks to Richard A. O'Keefe (based on Prolog code by D.H.D.Warren)
56 * for the merge sort inspiration.
57 * Thanks to Douglas C. Schmidt (schmidt@ics.uci.edu) for the
61 /* We need this to get the definitions for HAVE_ALLOCA_H, etc. */
62 #include "scmconfig.h"
64 /* AIX requires this to be the first thing in the file. The #pragma
65 directive is indented so pre-ANSI compilers will ignore it, rather
74 # ifndef alloca /* predefined by HP cc +Olibcalls */
93 /* The routine quicksort was extracted from the GNU C Library qsort.c
94 written by Douglas C. Schmidt (schmidt@ics.uci.edu)
95 and adapted to guile by adding an extra pointer less
96 to quicksort by Roland Orre <orre@nada.kth.se>.
98 The reason to do this instead of using the library function qsort
99 was to avoid dependency of the ANSI-C extensions for local functions
100 and also to avoid obscure pool based solutions.
102 This sorting routine is not much more efficient than the stable
103 version but doesn't consume extra memory.
106 /* Byte-wise swap two items of size SIZE. */
107 #define SWAP(a, b, size) \
110 register size_t __size = (size); \
111 register char *__a = (a), *__b = (b); \
117 } while (--__size > 0); \
120 /* Discontinue quicksort algorithm when partition gets below this size.
121 This particular magic number was chosen to work best on a Sun 4/260. */
124 /* Stack node declarations used to store unfulfilled partition obligations. */
132 /* The next 4 #defines implement a very fast in-line stack abstraction. */
133 #define STACK_SIZE (8 * sizeof(unsigned long int))
134 #define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top))
135 #define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi)))
136 #define STACK_NOT_EMPTY (stack < top)
139 /* Order size using quicksort. This implementation incorporates
140 four optimizations discussed in Sedgewick:
142 1. Non-recursive, using an explicit stack of pointer that store the
143 next array partition to sort. To save time, this maximum amount
144 of space required to store an array of MAX_INT is allocated on the
145 stack. Assuming a 32-bit integer, this needs only 32 *
146 sizeof(stack_node) == 136 bits. Pretty cheap, actually.
148 2. Chose the pivot element using a median-of-three decision tree.
149 This reduces the probability of selecting a bad pivot value and
150 eliminates certain extraneous comparisons.
152 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving
153 insertion sort to order the MAX_THRESH items within each partition.
154 This is a big win, since insertion sort is faster for small, mostly
155 sorted array segments.
157 4. The larger of the two sub-partitions is always pushed onto the
158 stack first, with the algorithm then concentrating on the
159 smaller partition. This *guarantees* no more than log (n)
160 stack size is needed (actually O(1) in this case)! */
162 typedef int (*cmp_fun_t
) (SCM less
,
166 static const char s_buggy_less
[] = "buggy less predicate used when sorting";
169 quicksort (void *const pbase
,
175 register char *base_ptr
= (char *) pbase
;
177 /* Allocating SIZE bytes for a pivot buffer facilitates a better
178 algorithm below since we can do comparisons directly on the pivot. */
179 char *pivot_buffer
= (char *) alloca (size
);
180 const size_t max_thresh
= MAX_THRESH
* size
;
182 if (total_elems
== 0)
183 /* Avoid lossage with unsigned arithmetic below. */
186 if (total_elems
> MAX_THRESH
)
189 char *hi
= &lo
[size
* (total_elems
- 1)];
190 /* Largest size needed for 32-bit int!!! */
191 stack_node stack
[STACK_SIZE
];
192 stack_node
*top
= stack
+ 1;
194 while (STACK_NOT_EMPTY
)
199 char *pivot
= pivot_buffer
;
201 /* Select median value from among LO, MID, and HI. Rearrange
202 LO and HI so the three values are sorted. This lowers the
203 probability of picking a pathological pivot value and
204 skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
206 char *mid
= lo
+ size
* ((hi
- lo
) / size
>> 1);
208 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
209 SWAP (mid
, lo
, size
);
210 if ((*cmp
) (less
, (void *) hi
, (void *) mid
))
211 SWAP (mid
, hi
, size
);
214 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
215 SWAP (mid
, lo
, size
);
217 memcpy (pivot
, mid
, size
);
218 pivot
= pivot_buffer
;
220 left_ptr
= lo
+ size
;
221 right_ptr
= hi
- size
;
223 /* Here's the famous ``collapse the walls'' section of quicksort.
224 Gotta like those tight inner loops! They are the main reason
225 that this algorithm runs much faster than others. */
228 while ((*cmp
) (less
, (void *) left_ptr
, (void *) pivot
))
231 /* The comparison predicate may be buggy */
233 scm_misc_error (NULL
, s_buggy_less
, SCM_EOL
);
236 while ((*cmp
) (less
, (void *) pivot
, (void *) right_ptr
))
239 /* The comparison predicate may be buggy */
241 scm_misc_error (NULL
, s_buggy_less
, SCM_EOL
);
244 if (left_ptr
< right_ptr
)
246 SWAP (left_ptr
, right_ptr
, size
);
250 else if (left_ptr
== right_ptr
)
257 while (left_ptr
<= right_ptr
);
259 /* Set up pointers for next iteration. First determine whether
260 left and right partitions are below the threshold size. If so,
261 ignore one or both. Otherwise, push the larger partition's
262 bounds on the stack and continue sorting the smaller one. */
264 if ((size_t) (right_ptr
- lo
) <= max_thresh
)
266 if ((size_t) (hi
- left_ptr
) <= max_thresh
)
267 /* Ignore both small partitions. */
270 /* Ignore small left partition. */
273 else if ((size_t) (hi
- left_ptr
) <= max_thresh
)
274 /* Ignore small right partition. */
276 else if ((right_ptr
- lo
) > (hi
- left_ptr
))
278 /* Push larger left partition indices. */
279 PUSH (lo
, right_ptr
);
284 /* Push larger right partition indices. */
291 /* Once the BASE_PTR array is partially sorted by quicksort the rest
292 is completely sorted using insertion sort, since this is efficient
293 for partitions below MAX_THRESH size. BASE_PTR points to the beginning
294 of the array to sort, and END_PTR points at the very last element in
295 the array (*not* one beyond it!). */
298 char *const end_ptr
= &base_ptr
[size
* (total_elems
- 1)];
299 char *tmp_ptr
= base_ptr
;
300 char *thresh
= min (end_ptr
, base_ptr
+ max_thresh
);
301 register char *run_ptr
;
303 /* Find smallest element in first threshold and place it at the
304 array's beginning. This is the smallest array element,
305 and the operation speeds up insertion sort's inner loop. */
307 for (run_ptr
= tmp_ptr
+ size
; run_ptr
<= thresh
; run_ptr
+= size
)
308 if ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
311 if (tmp_ptr
!= base_ptr
)
312 SWAP (tmp_ptr
, base_ptr
, size
);
314 /* Insertion sort, running from left-hand-side up to right-hand-side. */
316 run_ptr
= base_ptr
+ size
;
317 while ((run_ptr
+= size
) <= end_ptr
)
319 tmp_ptr
= run_ptr
- size
;
320 while ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
323 /* The comparison predicate may be buggy */
324 if (tmp_ptr
< base_ptr
)
325 scm_misc_error (NULL
, s_buggy_less
, SCM_EOL
);
329 if (tmp_ptr
!= run_ptr
)
333 trav
= run_ptr
+ size
;
334 while (--trav
>= run_ptr
)
339 for (hi
= lo
= trav
; (lo
-= size
) >= tmp_ptr
; hi
= lo
)
349 /* comparison routines */
352 subr2less (SCM less
, const void *a
, const void *b
)
354 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
, *(SCM
*) b
));
358 subr2oless (SCM less
, const void *a
, const void *b
)
360 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
,
366 lsubrless (SCM less
, const void *a
, const void *b
)
368 return SCM_NFALSEP (SCM_SUBRF (less
)
369 (scm_cons (*(SCM
*) a
,
370 scm_cons (*(SCM
*) b
, SCM_EOL
))));
374 closureless (SCM code
, const void *a
, const void *b
)
376 SCM env
= SCM_EXTEND_ENV (SCM_CAR (SCM_CODE (code
)),
377 scm_cons (*(SCM
*) a
,
378 scm_cons (*(SCM
*) b
, SCM_EOL
)),
380 /* Evaluate the closure body */
381 return SCM_NFALSEP (scm_eval_body (SCM_CDR (SCM_CODE (code
)), env
));
385 applyless (SCM less
, const void *a
, const void *b
)
387 return SCM_NFALSEP (scm_apply ((SCM
) less
,
388 scm_cons (*(SCM
*) a
,
389 scm_cons (*(SCM
*) b
, SCM_EOL
)),
394 scm_cmp_function (SCM p
)
396 switch (SCM_TYP7 (p
))
402 case scm_tc7_subr_2o
:
406 case scm_tcs_closures
:
411 } /* scm_cmp_function */
414 /* Question: Is there any need to make this a more general array sort?
415 It is probably enough to manage the vector type. */
416 /* endpos equal as for substring, i.e. endpos is not included. */
417 /* More natural with length? */
419 SCM_DEFINE (scm_restricted_vector_sort_x
, "restricted-vector-sort!", 4, 0, 0,
420 (SCM vec
, SCM less
, SCM startpos
, SCM endpos
),
422 #define FUNC_NAME s_scm_restricted_vector_sort_x
424 size_t vlen
, spos
, len
, size
= sizeof (SCM
);
427 SCM_VALIDATE_NIM (1,vec
);
428 SCM_VALIDATE_NIM (2,less
);
429 switch (SCM_TYP7 (vec
))
431 case scm_tc7_vector
: /* the only type we manage is vector */
433 #if 0 /* HAVE_ARRAYS */
434 case scm_tc7_ivect
: /* long */
435 case scm_tc7_uvect
: /* unsigned */
436 case scm_tc7_fvect
: /* float */
437 case scm_tc7_dvect
: /* double */
442 vp
= SCM_VELTS (vec
); /* vector pointer */
443 vlen
= SCM_LENGTH (vec
);
445 SCM_VALIDATE_INUM_COPY (3,startpos
,spos
);
446 SCM_ASSERT_RANGE (3,startpos
,(spos
>= 0) && (spos
<= vlen
));
447 SCM_VALIDATE_INUM_RANGE (4,endpos
,0,vlen
+1);
448 len
= SCM_INUM (endpos
) - spos
;
450 quicksort (&vp
[spos
], len
, size
, scm_cmp_function (less
), less
);
451 return SCM_UNSPECIFIED
;
456 /* (sorted? sequence less?)
457 * is true when sequence is a list (x0 x1 ... xm) or a vector #(x0 ... xm)
458 * such that for all 1 <= i <= m,
459 * (not (less? (list-ref list i) (list-ref list (- i 1)))). */
460 SCM_DEFINE (scm_sorted_p
, "sorted?", 2, 0, 0,
461 (SCM items
, SCM less
),
463 #define FUNC_NAME s_scm_sorted_p
465 long len
, j
; /* list/vector length, temp j */
466 SCM item
, rest
; /* rest of items loop variable */
468 cmp_fun_t cmp
= scm_cmp_function (less
);
470 if (SCM_NULLP (items
))
473 SCM_VALIDATE_NIM (1,items
);
474 SCM_VALIDATE_NIM (2,less
);
476 if (SCM_CONSP (items
))
478 len
= scm_ilength (items
); /* also checks that it's a pure list */
479 SCM_ASSERT_RANGE (1,items
,len
>= 0);
483 item
= SCM_CAR (items
);
484 rest
= SCM_CDR (items
);
488 if ((*cmp
) (less
, &SCM_CAR(rest
), &item
))
492 item
= SCM_CAR (rest
);
493 rest
= SCM_CDR (rest
);
501 switch (SCM_TYP7 (items
))
505 vp
= SCM_VELTS (items
); /* vector pointer */
506 len
= SCM_LENGTH (items
);
510 if ((*cmp
) (less
, &vp
[1], vp
))
521 #if 0 /* HAVE_ARRAYS */
522 case scm_tc7_ivect
: /* long */
523 case scm_tc7_uvect
: /* unsigned */
524 case scm_tc7_fvect
: /* float */
525 case scm_tc7_dvect
: /* double */
536 takes two lists a and b such that (sorted? a less?) and (sorted? b less?)
537 and returns a new list in which the elements of a and b have been stably
538 interleaved so that (sorted? (merge a b less?) less?).
539 Note: this does _not_ accept vectors. */
540 SCM_DEFINE (scm_merge
, "merge", 3, 0, 0,
541 (SCM alist
, SCM blist
, SCM less
),
543 #define FUNC_NAME s_scm_merge
545 long alen
, blen
; /* list lengths */
547 cmp_fun_t cmp
= scm_cmp_function (less
);
548 SCM_VALIDATE_NIM (3,less
);
550 if (SCM_NULLP (alist
))
552 else if (SCM_NULLP (blist
))
556 SCM_VALIDATE_NONEMPTYLIST_COPYLEN (1,alist
,alen
);
557 SCM_VALIDATE_NONEMPTYLIST_COPYLEN (2,blist
,blen
);
558 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
560 build
= scm_cons (SCM_CAR (blist
), SCM_EOL
);
561 blist
= SCM_CDR (blist
);
566 build
= scm_cons (SCM_CAR (alist
), SCM_EOL
);
567 alist
= SCM_CDR (alist
);
571 while ((alen
> 0) && (blen
> 0))
573 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
575 SCM_SETCDR (last
, scm_cons (SCM_CAR (blist
), SCM_EOL
));
576 blist
= SCM_CDR (blist
);
581 SCM_SETCDR (last
, scm_cons (SCM_CAR (alist
), SCM_EOL
));
582 alist
= SCM_CDR (alist
);
585 last
= SCM_CDR (last
);
587 if ((alen
> 0) && (blen
== 0))
588 SCM_SETCDR (last
, alist
);
589 else if ((alen
== 0) && (blen
> 0))
590 SCM_SETCDR (last
, blist
);
598 scm_merge_list_x (SCM alist
, SCM blist
,
599 long alen
, long blen
,
600 cmp_fun_t cmp
, SCM less
)
604 if (SCM_NULLP (alist
))
606 else if (SCM_NULLP (blist
))
610 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
613 blist
= SCM_CDR (blist
);
619 alist
= SCM_CDR (alist
);
623 while ((alen
> 0) && (blen
> 0))
625 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
627 SCM_SETCDR (last
, blist
);
628 blist
= SCM_CDR (blist
);
633 SCM_SETCDR (last
, alist
);
634 alist
= SCM_CDR (alist
);
637 last
= SCM_CDR (last
);
639 if ((alen
> 0) && (blen
== 0))
640 SCM_SETCDR (last
, alist
);
641 else if ((alen
== 0) && (blen
> 0))
642 SCM_SETCDR (last
, blist
);
645 } /* scm_merge_list_x */
647 SCM_DEFINE (scm_merge_x
, "merge!", 3, 0, 0,
648 (SCM alist
, SCM blist
, SCM less
),
650 #define FUNC_NAME s_scm_merge_x
652 long alen
, blen
; /* list lengths */
654 SCM_VALIDATE_NIM (3,less
);
655 if (SCM_NULLP (alist
))
657 else if (SCM_NULLP (blist
))
661 SCM_VALIDATE_NONEMPTYLIST_COPYLEN (1,alist
,alen
);
662 SCM_VALIDATE_NONEMPTYLIST_COPYLEN (2,blist
,blen
);
663 return scm_merge_list_x (alist
, blist
,
665 scm_cmp_function (less
),
671 /* This merge sort algorithm is same as slib's by Richard A. O'Keefe.
672 The algorithm is stable. We also tried to use the algorithm used by
673 scsh's merge-sort but that algorithm showed to not be stable, even
674 though it claimed to be.
677 scm_merge_list_step (SCM
* seq
,
687 a
= scm_merge_list_step (seq
, cmp
, less
, mid
);
688 b
= scm_merge_list_step (seq
, cmp
, less
, n
- mid
);
689 return scm_merge_list_x (a
, b
, mid
, n
- mid
, cmp
, less
);
694 SCM rest
= SCM_CDR (*seq
);
695 SCM x
= SCM_CAR (*seq
);
696 SCM y
= SCM_CAR (SCM_CDR (*seq
));
697 *seq
= SCM_CDR (rest
);
698 SCM_SETCDR (rest
, SCM_EOL
);
699 if ((*cmp
) (less
, &y
, &x
))
710 SCM_SETCDR (p
, SCM_EOL
);
715 } /* scm_merge_list_step */
718 /* scm_sort_x manages lists and vectors, not stable sort */
719 SCM_DEFINE (scm_sort_x
, "sort!", 2, 0, 0,
720 (SCM items
, SCM less
),
722 #define FUNC_NAME s_scm_sort_x
724 long len
; /* list/vector length */
725 if (SCM_NULLP(items
))
727 SCM_VALIDATE_NIM (1,items
);
728 SCM_VALIDATE_NIM (2,less
);
730 if (SCM_CONSP (items
))
732 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
733 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
735 else if (SCM_VECTORP (items
))
737 len
= SCM_LENGTH (items
);
738 scm_restricted_vector_sort_x (items
,
745 RETURN_SCM_WTA (1,items
);
749 /* scm_sort manages lists and vectors, not stable sort */
751 SCM_DEFINE (scm_sort
, "sort", 2, 0, 0,
752 (SCM items
, SCM less
),
754 #define FUNC_NAME s_scm_sort
756 SCM sortvec
; /* the vector we actually sort */
757 long len
; /* list/vector length */
758 if (SCM_NULLP(items
))
760 SCM_VALIDATE_NIM (1,items
);
761 SCM_VALIDATE_NIM (2,less
);
762 if (SCM_CONSP (items
))
764 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
765 items
= scm_list_copy (items
);
766 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
769 /* support ordinary vectors even if arrays not available? */
770 else if (SCM_VECTORP (items
))
772 len
= SCM_LENGTH (items
);
773 sortvec
= scm_make_uve (len
, scm_array_prototype (items
));
774 scm_array_copy_x (items
, sortvec
);
775 scm_restricted_vector_sort_x (sortvec
,
783 RETURN_SCM_WTA (1,items
);
788 scm_merge_vector_x (void *const vecbase
,
789 void *const tempbase
,
796 register SCM
*vp
= (SCM
*) vecbase
;
797 register SCM
*temp
= (SCM
*) tempbase
;
798 long it
; /* Index for temp vector */
799 long i1
= low
; /* Index for lower vector segment */
800 long i2
= mid
+ 1; /* Index for upper vector segment */
802 /* Copy while both segments contain more characters */
803 for (it
= low
; (i1
<= mid
) && (i2
<= high
); ++it
)
804 if ((*cmp
) (less
, &vp
[i2
], &vp
[i1
]))
809 /* Copy while first segment contains more characters */
811 temp
[it
++] = vp
[i1
++];
813 /* Copy while second segment contains more characters */
815 temp
[it
++] = vp
[i2
++];
817 /* Copy back from temp to vp */
818 for (it
= low
; it
<= high
; ++it
)
820 } /* scm_merge_vector_x */
823 scm_merge_vector_step (void *const vp
,
832 long mid
= (low
+ high
) / 2;
833 scm_merge_vector_step (vp
, temp
, cmp
, less
, low
, mid
);
834 scm_merge_vector_step (vp
, temp
, cmp
, less
, mid
+1, high
);
835 scm_merge_vector_x (vp
, temp
, cmp
, less
, low
, mid
, high
);
837 } /* scm_merge_vector_step */
840 /* stable-sort! manages lists and vectors */
842 SCM_DEFINE (scm_stable_sort_x
, "stable-sort!", 2, 0, 0,
843 (SCM items
, SCM less
),
845 #define FUNC_NAME s_scm_stable_sort_x
847 long len
; /* list/vector length */
849 if (SCM_NULLP (items
))
851 SCM_VALIDATE_NIM (1,items
);
852 SCM_VALIDATE_NIM (2,less
);
853 if (SCM_CONSP (items
))
855 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
856 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
858 else if (SCM_VECTORP (items
))
861 len
= SCM_LENGTH (items
);
862 temp
= malloc (len
* sizeof(SCM
));
863 vp
= SCM_VELTS (items
);
864 scm_merge_vector_step (vp
,
866 scm_cmp_function (less
),
874 RETURN_SCM_WTA (1,items
);
878 /* stable_sort manages lists and vectors */
880 SCM_DEFINE (scm_stable_sort
, "stable-sort", 2, 0, 0,
881 (SCM items
, SCM less
),
883 #define FUNC_NAME s_scm_stable_sort
885 long len
; /* list/vector length */
886 if (SCM_NULLP (items
))
888 SCM_VALIDATE_NIM (1,items
);
889 SCM_VALIDATE_NIM (2,less
);
890 if (SCM_CONSP (items
))
892 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
893 items
= scm_list_copy (items
);
894 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
897 /* support ordinary vectors even if arrays not available? */
898 else if (SCM_VECTORP (items
))
902 len
= SCM_LENGTH (items
);
903 retvec
= scm_make_uve (len
, scm_array_prototype (items
));
904 scm_array_copy_x (items
, retvec
);
905 temp
= malloc (len
* sizeof (SCM
));
906 vp
= SCM_VELTS (retvec
);
907 scm_merge_vector_step (vp
,
909 scm_cmp_function (less
),
918 RETURN_SCM_WTA (1,items
);
923 SCM_DEFINE (scm_sort_list_x
, "sort-list!", 2, 0, 0,
924 (SCM items
, SCM less
),
926 #define FUNC_NAME s_scm_sort_list_x
929 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
930 SCM_VALIDATE_NIM (2,less
);
931 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
936 SCM_DEFINE (scm_sort_list
, "sort-list", 2, 0, 0,
937 (SCM items
, SCM less
),
939 #define FUNC_NAME s_scm_sort_list
942 SCM_VALIDATE_LIST_COPYLEN (1,items
,len
);
943 SCM_VALIDATE_NIM (2,less
);
944 items
= scm_list_copy (items
);
945 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
954 scm_add_feature ("sort");