1 /* Copyright (C) 1999 Free Software Foundation, Inc.
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2, or (at your option)
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this software; see the file COPYING. If not, write to
14 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
15 * Boston, MA 02111-1307 USA
17 * As a special exception, the Free Software Foundation gives permission
18 * for additional uses of the text contained in its release of GUILE.
20 * The exception is that, if you link the GUILE library with other files
21 * to produce an executable, this does not by itself cause the
22 * resulting executable to be covered by the GNU General Public License.
23 * Your use of that executable is in no way restricted on account of
24 * linking the GUILE library code into it.
26 * This exception does not however invalidate any other reasons why
27 * the executable file might be covered by the GNU General Public License.
29 * This exception applies only to the code released by the
30 * Free Software Foundation under the name GUILE. If you copy
31 * code from other Free Software Foundation releases into a copy of
32 * GUILE, as the General Public License permits, the exception does
33 * not apply to the code that you add in this way. To avoid misleading
34 * anyone as to the status of such modified files, you must delete
35 * this exception notice from them.
37 * If you write modifications of your own for GUILE, it is your choice
38 * whether to permit this exception to apply to your modifications.
39 * If you do not wish that, delete this exception notice. */
41 /* Written in December 1998 by Roland Orre <orre@nada.kth.se>
42 * This implements the same sort interface as slib/sort.scm
43 * for lists and vectors where slib defines:
44 * sorted?, merge, merge!, sort, sort!
45 * For scsh compatibility sort-list and sort-list! are also defined.
46 * In cases where a stable-sort is required use stable-sort or
47 * stable-sort!. An additional feature is
48 * (restricted-vector-sort! vector less? startpos endpos)
49 * which allows you to sort part of a vector.
50 * Thanks to Aubrey Jaffer for the slib/sort.scm library.
51 * Thanks to Richard A. O'Keefe (based on Prolog code by D.H.D.Warren)
52 * for the merge sort inspiration.
53 * Thanks to Douglas C. Schmidt (schmidt@ics.uci.edu) for the
57 /* We need this to get the definitions for HAVE_ALLOCA_H, etc. */
58 #include "scmconfig.h"
60 /* AIX requires this to be the first thing in the file. The #pragma
61 directive is indented so pre-ANSI compilers will ignore it, rather
70 # ifndef alloca /* predefined by HP cc +Olibcalls */
87 /* The routine quicksort was extracted from the GNU C Library qsort.c
88 written by Douglas C. Schmidt (schmidt@ics.uci.edu)
89 and adapted to guile by adding an extra pointer less
90 to quicksort by Roland Orre <orre@nada.kth.se>.
92 The reason to do this instead of using the library function qsort
93 was to avoid dependency of the ANSI-C extensions for local functions
94 and also to avoid obscure pool based solutions.
96 This sorting routine is not much more efficient than the stable
97 version but doesn't consume extra memory.
100 /* Byte-wise swap two items of size SIZE. */
101 #define SWAP(a, b, size) \
104 register size_t __size = (size); \
105 register char *__a = (a), *__b = (b); \
111 } while (--__size > 0); \
114 /* Discontinue quicksort algorithm when partition gets below this size.
115 This particular magic number was chosen to work best on a Sun 4/260. */
118 /* Stack node declarations used to store unfulfilled partition obligations. */
126 /* The next 4 #defines implement a very fast in-line stack abstraction. */
127 #define STACK_SIZE (8 * sizeof(unsigned long int))
128 #define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top))
129 #define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi)))
130 #define STACK_NOT_EMPTY (stack < top)
133 /* Order size using quicksort. This implementation incorporates
134 four optimizations discussed in Sedgewick:
136 1. Non-recursive, using an explicit stack of pointer that store the
137 next array partition to sort. To save time, this maximum amount
138 of space required to store an array of MAX_INT is allocated on the
139 stack. Assuming a 32-bit integer, this needs only 32 *
140 sizeof(stack_node) == 136 bits. Pretty cheap, actually.
142 2. Chose the pivot element using a median-of-three decision tree.
143 This reduces the probability of selecting a bad pivot value and
144 eliminates certain extraneous comparisons.
146 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving
147 insertion sort to order the MAX_THRESH items within each partition.
148 This is a big win, since insertion sort is faster for small, mostly
149 sorted array segments.
151 4. The larger of the two sub-partitions is always pushed onto the
152 stack first, with the algorithm then concentrating on the
153 smaller partition. This *guarantees* no more than log (n)
154 stack size is needed (actually O(1) in this case)! */
156 typedef int (*cmp_fun_t
) (SCM less
,
160 static const char s_buggy_less
[] = "buggy less predicate used when sorting";
163 quicksort (void *const pbase
,
169 register char *base_ptr
= (char *) pbase
;
171 /* Allocating SIZE bytes for a pivot buffer facilitates a better
172 algorithm below since we can do comparisons directly on the pivot. */
173 char *pivot_buffer
= (char *) alloca (size
);
174 const size_t max_thresh
= MAX_THRESH
* size
;
176 if (total_elems
== 0)
177 /* Avoid lossage with unsigned arithmetic below. */
180 if (total_elems
> MAX_THRESH
)
183 char *hi
= &lo
[size
* (total_elems
- 1)];
184 /* Largest size needed for 32-bit int!!! */
185 stack_node stack
[STACK_SIZE
];
186 stack_node
*top
= stack
+ 1;
188 while (STACK_NOT_EMPTY
)
193 char *pivot
= pivot_buffer
;
195 /* Select median value from among LO, MID, and HI. Rearrange
196 LO and HI so the three values are sorted. This lowers the
197 probability of picking a pathological pivot value and
198 skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
200 char *mid
= lo
+ size
* ((hi
- lo
) / size
>> 1);
202 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
203 SWAP (mid
, lo
, size
);
204 if ((*cmp
) (less
, (void *) hi
, (void *) mid
))
205 SWAP (mid
, hi
, size
);
208 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
209 SWAP (mid
, lo
, size
);
211 memcpy (pivot
, mid
, size
);
212 pivot
= pivot_buffer
;
214 left_ptr
= lo
+ size
;
215 right_ptr
= hi
- size
;
217 /* Here's the famous ``collapse the walls'' section of quicksort.
218 Gotta like those tight inner loops! They are the main reason
219 that this algorithm runs much faster than others. */
222 while ((*cmp
) (less
, (void *) left_ptr
, (void *) pivot
))
225 /* The comparison predicate may be buggy */
227 scm_misc_error (0, s_buggy_less
, SCM_EOL
);
230 while ((*cmp
) (less
, (void *) pivot
, (void *) right_ptr
))
233 /* The comparison predicate may be buggy */
235 scm_misc_error (0, s_buggy_less
, SCM_EOL
);
238 if (left_ptr
< right_ptr
)
240 SWAP (left_ptr
, right_ptr
, size
);
244 else if (left_ptr
== right_ptr
)
251 while (left_ptr
<= right_ptr
);
253 /* Set up pointers for next iteration. First determine whether
254 left and right partitions are below the threshold size. If so,
255 ignore one or both. Otherwise, push the larger partition's
256 bounds on the stack and continue sorting the smaller one. */
258 if ((size_t) (right_ptr
- lo
) <= max_thresh
)
260 if ((size_t) (hi
- left_ptr
) <= max_thresh
)
261 /* Ignore both small partitions. */
264 /* Ignore small left partition. */
267 else if ((size_t) (hi
- left_ptr
) <= max_thresh
)
268 /* Ignore small right partition. */
270 else if ((right_ptr
- lo
) > (hi
- left_ptr
))
272 /* Push larger left partition indices. */
273 PUSH (lo
, right_ptr
);
278 /* Push larger right partition indices. */
285 /* Once the BASE_PTR array is partially sorted by quicksort the rest
286 is completely sorted using insertion sort, since this is efficient
287 for partitions below MAX_THRESH size. BASE_PTR points to the beginning
288 of the array to sort, and END_PTR points at the very last element in
289 the array (*not* one beyond it!). */
292 char *const end_ptr
= &base_ptr
[size
* (total_elems
- 1)];
293 char *tmp_ptr
= base_ptr
;
294 char *thresh
= min (end_ptr
, base_ptr
+ max_thresh
);
295 register char *run_ptr
;
297 /* Find smallest element in first threshold and place it at the
298 array's beginning. This is the smallest array element,
299 and the operation speeds up insertion sort's inner loop. */
301 for (run_ptr
= tmp_ptr
+ size
; run_ptr
<= thresh
; run_ptr
+= size
)
302 if ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
305 if (tmp_ptr
!= base_ptr
)
306 SWAP (tmp_ptr
, base_ptr
, size
);
308 /* Insertion sort, running from left-hand-side up to right-hand-side. */
310 run_ptr
= base_ptr
+ size
;
311 while ((run_ptr
+= size
) <= end_ptr
)
313 tmp_ptr
= run_ptr
- size
;
314 while ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
317 /* The comparison predicate may be buggy */
318 if (tmp_ptr
< base_ptr
)
319 scm_misc_error (0, s_buggy_less
, SCM_EOL
);
323 if (tmp_ptr
!= run_ptr
)
327 trav
= run_ptr
+ size
;
328 while (--trav
>= run_ptr
)
333 for (hi
= lo
= trav
; (lo
-= size
) >= tmp_ptr
; hi
= lo
)
343 /* comparison routines */
346 subr2less (SCM less
, const void *a
, const void *b
)
348 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
, *(SCM
*) b
));
352 subr2oless (SCM less
, const void *a
, const void *b
)
354 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
,
360 lsubrless (SCM less
, const void *a
, const void *b
)
362 return SCM_NFALSEP (SCM_SUBRF (less
)
363 (scm_cons (*(SCM
*) a
,
364 scm_cons (*(SCM
*) b
, SCM_EOL
))));
368 closureless (SCM code
, const void *a
, const void *b
)
370 SCM env
= SCM_EXTEND_ENV (SCM_CAR (SCM_CODE (code
)),
371 scm_cons (*(SCM
*) a
,
372 scm_cons (*(SCM
*) b
, SCM_EOL
)),
374 /* Evaluate the closure body */
375 return SCM_NFALSEP (scm_eval_body (SCM_CDR (SCM_CODE (code
)), env
));
379 applyless (SCM less
, const void *a
, const void *b
)
381 return SCM_NFALSEP (scm_apply ((SCM
) less
,
382 scm_cons (*(SCM
*) a
,
383 scm_cons (*(SCM
*) b
, SCM_EOL
)),
388 scm_cmp_function (SCM p
)
390 switch (SCM_TYP7 (p
))
396 case scm_tc7_subr_2o
:
400 case scm_tcs_closures
:
405 } /* scm_cmp_function */
407 SCM_PROC (s_restricted_vector_sort_x
, "restricted-vector-sort!", 4, 0, 0, scm_restricted_vector_sort_x
);
409 /* Question: Is there any need to make this a more general array sort?
410 It is probably enough to manage the vector type. */
411 /* endpos equal as for substring, i.e. endpos is not included. */
412 /* More natural wih length? */
414 scm_restricted_vector_sort_x (SCM vec
, SCM less
, SCM startpos
, SCM endpos
)
416 size_t vlen
, spos
, len
, size
= sizeof (SCM
);
419 SCM_ASSERT (SCM_NIMP (vec
), vec
, SCM_ARG1
, s_restricted_vector_sort_x
);
420 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_restricted_vector_sort_x
);
421 switch (SCM_TYP7 (vec
))
423 case scm_tc7_vector
: /* the only type we manage is vector */
425 case scm_tc7_ivect
: /* long */
426 case scm_tc7_uvect
: /* unsigned */
427 case scm_tc7_fvect
: /* float */
428 case scm_tc7_dvect
: /* double */
430 scm_wta (vec
, (char *) SCM_ARG1
, s_restricted_vector_sort_x
);
432 vp
= SCM_VELTS (vec
); /* vector pointer */
433 vlen
= SCM_LENGTH (vec
);
435 SCM_ASSERT (SCM_INUMP(startpos
),
436 startpos
, SCM_ARG3
, s_restricted_vector_sort_x
);
437 spos
= SCM_INUM (startpos
);
438 SCM_ASSERT ((spos
>= 0) && (spos
<= vlen
),
439 startpos
, SCM_ARG3
, s_restricted_vector_sort_x
);
440 SCM_ASSERT ((SCM_INUMP (endpos
)) && (SCM_INUM (endpos
) <= vlen
),
441 endpos
, SCM_ARG4
, s_restricted_vector_sort_x
);
442 len
= SCM_INUM (endpos
) - spos
;
444 quicksort (&vp
[spos
], len
, size
, scm_cmp_function (less
), less
);
445 return SCM_UNSPECIFIED
;
447 } /* scm_restricted_vector_sort_x */
449 /* (sorted? sequence less?)
450 * is true when sequence is a list (x0 x1 ... xm) or a vector #(x0 ... xm)
451 * such that for all 1 <= i <= m,
452 * (not (less? (list-ref list i) (list-ref list (- i 1)))). */
453 SCM_PROC (s_sorted_p
, "sorted?", 2, 0, 0, scm_sorted_p
);
456 scm_sorted_p (SCM items
, SCM less
)
458 long len
, j
; /* list/vector length, temp j */
459 SCM item
, rest
; /* rest of items loop variable */
461 cmp_fun_t cmp
= scm_cmp_function (less
);
463 if (SCM_NULLP (items
))
465 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sorted_p
);
466 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sorted_p
);
468 if (SCM_CONSP (items
))
470 len
= scm_ilength (items
); /* also checks that it's a pure list */
471 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sorted_p
);
475 item
= SCM_CAR (items
);
476 rest
= SCM_CDR (items
);
480 if ((*cmp
) (less
, &SCM_CAR(rest
), &item
))
484 item
= SCM_CAR (rest
);
485 rest
= SCM_CDR (rest
);
493 switch (SCM_TYP7 (items
))
497 vp
= SCM_VELTS (items
); /* vector pointer */
498 len
= SCM_LENGTH (items
);
502 if ((*cmp
) (less
, &vp
[1], vp
))
513 case scm_tc7_ivect
: /* long */
514 case scm_tc7_uvect
: /* unsigned */
515 case scm_tc7_fvect
: /* float */
516 case scm_tc7_dvect
: /* double */
518 scm_wta (items
, (char *) SCM_ARG1
, s_sorted_p
);
525 takes two lists a and b such that (sorted? a less?) and (sorted? b less?)
526 and returns a new list in which the elements of a and b have been stably
527 interleaved so that (sorted? (merge a b less?) less?).
528 Note: this does _not_ accept vectors. */
529 SCM_PROC (s_merge
, "merge", 3, 0, 0, scm_merge
);
532 scm_merge (SCM alist
, SCM blist
, SCM less
)
534 long alen
, blen
; /* list lengths */
536 cmp_fun_t cmp
= scm_cmp_function (less
);
537 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_merge
);
539 if (SCM_NULLP (alist
))
541 else if (SCM_NULLP (blist
))
545 alen
= scm_ilength (alist
); /* checks that it's a pure list */
546 blen
= scm_ilength (blist
); /* checks that it's a pure list */
547 SCM_ASSERT (alen
> 0, alist
, SCM_ARG1
, s_merge
);
548 SCM_ASSERT (blen
> 0, blist
, SCM_ARG2
, s_merge
);
549 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
551 build
= scm_cons (SCM_CAR (blist
), SCM_EOL
);
552 blist
= SCM_CDR (blist
);
557 build
= scm_cons (SCM_CAR (alist
), SCM_EOL
);
558 alist
= SCM_CDR (alist
);
562 while ((alen
> 0) && (blen
> 0))
564 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
566 SCM_SETCDR (last
, scm_cons (SCM_CAR (blist
), SCM_EOL
));
567 blist
= SCM_CDR (blist
);
572 SCM_SETCDR (last
, scm_cons (SCM_CAR (alist
), SCM_EOL
));
573 alist
= SCM_CDR (alist
);
576 last
= SCM_CDR (last
);
578 if ((alen
> 0) && (blen
== 0))
579 SCM_SETCDR (last
, alist
);
580 else if ((alen
== 0) && (blen
> 0))
581 SCM_SETCDR (last
, blist
);
587 scm_merge_list_x (SCM alist
, SCM blist
,
588 long alen
, long blen
,
589 cmp_fun_t cmp
, SCM less
)
593 if (SCM_NULLP (alist
))
595 else if (SCM_NULLP (blist
))
599 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
602 blist
= SCM_CDR (blist
);
608 alist
= SCM_CDR (alist
);
612 while ((alen
> 0) && (blen
> 0))
614 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
616 SCM_SETCDR (last
, blist
);
617 blist
= SCM_CDR (blist
);
622 SCM_SETCDR (last
, alist
);
623 alist
= SCM_CDR (alist
);
626 last
= SCM_CDR (last
);
628 if ((alen
> 0) && (blen
== 0))
629 SCM_SETCDR (last
, alist
);
630 else if ((alen
== 0) && (blen
> 0))
631 SCM_SETCDR (last
, blist
);
634 } /* scm_merge_list_x */
636 SCM_PROC (s_merge_x
, "merge!", 3, 0, 0, scm_merge_x
);
639 scm_merge_x (SCM alist
, SCM blist
, SCM less
)
641 long alen
, blen
; /* list lengths */
643 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_merge_x
);
644 if (SCM_NULLP (alist
))
646 else if (SCM_NULLP (blist
))
650 alen
= scm_ilength (alist
); /* checks that it's a pure list */
651 blen
= scm_ilength (blist
); /* checks that it's a pure list */
652 SCM_ASSERT (alen
>= 0, alist
, SCM_ARG1
, s_merge
);
653 SCM_ASSERT (blen
>= 0, blist
, SCM_ARG2
, s_merge
);
654 return scm_merge_list_x (alist
, blist
,
656 scm_cmp_function (less
),
661 /* This merge sort algorithm is same as slib's by Richard A. O'Keefe.
662 The algorithm is stable. We also tried to use the algorithm used by
663 scsh's merge-sort but that algorithm showed to not be stable, even
664 though it claimed to be.
667 scm_merge_list_step (SCM
* seq
,
677 a
= scm_merge_list_step (seq
, cmp
, less
, mid
);
678 b
= scm_merge_list_step (seq
, cmp
, less
, n
- mid
);
679 return scm_merge_list_x (a
, b
, mid
, n
- mid
, cmp
, less
);
684 SCM rest
= SCM_CDR (*seq
);
685 SCM x
= SCM_CAR (*seq
);
686 SCM y
= SCM_CAR (SCM_CDR (*seq
));
687 *seq
= SCM_CDR (rest
);
688 SCM_SETCDR (rest
, SCM_EOL
);
689 if ((*cmp
) (less
, &y
, &x
))
700 SCM_SETCDR (p
, SCM_EOL
);
705 } /* scm_merge_list_step */
708 SCM_PROC (s_sort_x
, "sort!", 2, 0, 0, scm_sort_x
);
710 /* scm_sort_x manages lists and vectors, not stable sort */
712 scm_sort_x (SCM items
, SCM less
)
714 long len
; /* list/vector length */
715 if (SCM_NULLP(items
))
717 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sort_x
);
718 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_x
);
720 if (SCM_CONSP (items
))
722 len
= scm_ilength (items
);
723 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_x
);
724 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
726 else if (SCM_VECTORP (items
))
728 len
= SCM_LENGTH (items
);
729 scm_restricted_vector_sort_x (items
,
736 return scm_wta (items
, (char *) SCM_ARG1
, s_sort_x
);
739 SCM_PROC (s_sort
, "sort", 2, 0, 0, scm_sort
);
741 /* scm_sort manages lists and vectors, not stable sort */
743 scm_sort (SCM items
, SCM less
)
745 SCM sortvec
; /* the vector we actually sort */
746 long len
; /* list/vector length */
747 if (SCM_NULLP(items
))
749 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sort
);
750 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort
);
751 if (SCM_CONSP (items
))
753 len
= scm_ilength (items
);
754 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort
);
755 items
= scm_list_copy (items
);
756 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
758 else if (SCM_VECTORP (items
))
760 len
= SCM_LENGTH (items
);
761 sortvec
= scm_make_uve (len
, scm_array_prototype (items
));
762 scm_array_copy_x (items
, sortvec
);
763 scm_restricted_vector_sort_x (sortvec
,
770 return scm_wta (items
, (char *) SCM_ARG1
, s_sort_x
);
774 scm_merge_vector_x (void *const vecbase
,
775 void *const tempbase
,
782 register SCM
*vp
= (SCM
*) vecbase
;
783 register SCM
*temp
= (SCM
*) tempbase
;
784 long it
; /* Index for temp vector */
785 long i1
= low
; /* Index for lower vector segment */
786 long i2
= mid
+ 1; /* Index for upper vector segment */
788 /* Copy while both segments contain more characters */
789 for (it
= low
; (i1
<= mid
) && (i2
<= high
); ++it
)
790 if ((*cmp
) (less
, &vp
[i2
], &vp
[i1
]))
795 /* Copy while first segment contains more characters */
797 temp
[it
++] = vp
[i1
++];
799 /* Copy while second segment contains more characters */
801 temp
[it
++] = vp
[i2
++];
803 /* Copy back from temp to vp */
804 for (it
= low
; it
<= high
; ++it
)
806 } /* scm_merge_vector_x */
809 scm_merge_vector_step (void *const vp
,
818 long mid
= (low
+ high
) / 2;
819 scm_merge_vector_step (vp
, temp
, cmp
, less
, low
, mid
);
820 scm_merge_vector_step (vp
, temp
, cmp
, less
, mid
+1, high
);
821 scm_merge_vector_x (vp
, temp
, cmp
, less
, low
, mid
, high
);
823 } /* scm_merge_vector_step */
826 SCM_PROC (s_stable_sort_x
, "stable-sort!", 2, 0, 0, scm_stable_sort_x
);
827 /* stable-sort! manages lists and vectors */
830 scm_stable_sort_x (SCM items
, SCM less
)
832 long len
; /* list/vector length */
834 if (SCM_NULLP (items
))
836 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_stable_sort_x
);
837 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_stable_sort_x
);
838 if (SCM_CONSP (items
))
840 len
= scm_ilength (items
);
841 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_x
);
842 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
844 else if (SCM_VECTORP (items
))
847 len
= SCM_LENGTH (items
);
848 temp
= malloc (len
* sizeof(SCM
));
849 vp
= SCM_VELTS (items
);
850 scm_merge_vector_step (vp
,
852 scm_cmp_function (less
),
860 return scm_wta (items
, (char *) SCM_ARG1
, s_stable_sort_x
);
861 } /* scm_stable_sort_x */
863 SCM_PROC (s_stable_sort
, "stable-sort", 2, 0, 0, scm_stable_sort
);
865 /* stable_sort manages lists and vectors */
867 scm_stable_sort (SCM items
, SCM less
)
869 long len
; /* list/vector length */
870 if (SCM_NULLP (items
))
872 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_stable_sort
);
873 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_stable_sort
);
874 if (SCM_CONSP (items
))
876 len
= scm_ilength (items
);
877 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort
);
878 items
= scm_list_copy (items
);
879 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
881 else if (SCM_VECTORP (items
))
885 len
= SCM_LENGTH (items
);
886 retvec
= scm_make_uve (len
, scm_array_prototype (items
));
887 scm_array_copy_x (items
, retvec
);
888 temp
= malloc (len
* sizeof (SCM
));
889 vp
= SCM_VELTS (retvec
);
890 scm_merge_vector_step (vp
,
892 scm_cmp_function (less
),
900 return scm_wta (items
, (char *) SCM_ARG1
, s_stable_sort
);
901 } /* scm_stable_sort */
903 SCM_PROC (s_sort_list_x
, "sort-list!", 2, 0, 0, scm_sort_list_x
);
906 scm_sort_list_x (SCM items
, SCM less
)
908 long len
= scm_ilength (items
);
909 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_list_x
);
910 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_list_x
);
911 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
912 } /* scm_sort_list_x */
914 SCM_PROC (s_sort_list
, "sort-list", 2, 0, 0, scm_sort_list
);
917 scm_sort_list (SCM items
, SCM less
)
919 long len
= scm_ilength (items
);
920 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_list
);
921 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_list
);
922 items
= scm_list_copy (items
);
923 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
924 } /* scm_sort_list_x */
931 scm_add_feature ("sort");