1 /* Copyright (C) 1999 Free Software Foundation, Inc.
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2, or (at your option)
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this software; see the file COPYING. If not, write to
14 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
15 * Boston, MA 02111-1307 USA
17 * As a special exception, the Free Software Foundation gives permission
18 * for additional uses of the text contained in its release of GUILE.
20 * The exception is that, if you link the GUILE library with other files
21 * to produce an executable, this does not by itself cause the
22 * resulting executable to be covered by the GNU General Public License.
23 * Your use of that executable is in no way restricted on account of
24 * linking the GUILE library code into it.
26 * This exception does not however invalidate any other reasons why
27 * the executable file might be covered by the GNU General Public License.
29 * This exception applies only to the code released by the
30 * Free Software Foundation under the name GUILE. If you copy
31 * code from other Free Software Foundation releases into a copy of
32 * GUILE, as the General Public License permits, the exception does
33 * not apply to the code that you add in this way. To avoid misleading
34 * anyone as to the status of such modified files, you must delete
35 * this exception notice from them.
37 * If you write modifications of your own for GUILE, it is your choice
38 * whether to permit this exception to apply to your modifications.
39 * If you do not wish that, delete this exception notice. */
41 /* Written in December 1998 by Roland Orre <orre@nada.kth.se>
42 * This implements the same sort interface as slib/sort.scm
43 * for lists and vectors where slib defines:
44 * sorted?, merge, merge!, sort, sort!
45 * For scsh compatibility sort-list and sort-list! are also defined.
46 * In cases where a stable-sort is required use stable-sort or
47 * stable-sort!. An additional feature is
48 * (restricted-vector-sort! vector less? startpos endpos)
49 * which allows you to sort part of a vector.
50 * Thanks to Aubrey Jaffer for the slib/sort.scm library.
51 * Thanks to Richard A. O'Keefe (based on Prolog code by D.H.D.Warren)
52 * for the merge sort inspiration.
53 * Thanks to Douglas C. Schmidt (schmidt@ics.uci.edu) for the
57 /* We need this to get the definitions for HAVE_ALLOCA_H, etc. */
58 #include "scmconfig.h"
60 /* AIX requires this to be the first thing in the file. The #pragma
61 directive is indented so pre-ANSI compilers will ignore it, rather
70 # ifndef alloca /* predefined by HP cc +Olibcalls */
87 /* The routine quicksort was extracted from the GNU C Library qsort.c
88 written by Douglas C. Schmidt (schmidt@ics.uci.edu)
89 and adapted to guile by adding an extra pointer less
90 to quicksort by Roland Orre <orre@nada.kth.se>.
92 The reason to do this instead of using the library function qsort
93 was to avoid dependency of the ANSI-C extensions for local functions
94 and also to avoid obscure pool based solutions.
96 This sorting routine is not much more efficient than the stable
97 version but doesn't consume extra memory.
100 /* Byte-wise swap two items of size SIZE. */
101 #define SWAP(a, b, size) \
104 register size_t __size = (size); \
105 register char *__a = (a), *__b = (b); \
111 } while (--__size > 0); \
114 /* Discontinue quicksort algorithm when partition gets below this size.
115 This particular magic number was chosen to work best on a Sun 4/260. */
118 /* Stack node declarations used to store unfulfilled partition obligations. */
126 /* The next 4 #defines implement a very fast in-line stack abstraction. */
127 #define STACK_SIZE (8 * sizeof(unsigned long int))
128 #define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top))
129 #define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi)))
130 #define STACK_NOT_EMPTY (stack < top)
133 /* Order size using quicksort. This implementation incorporates
134 four optimizations discussed in Sedgewick:
136 1. Non-recursive, using an explicit stack of pointer that store the
137 next array partition to sort. To save time, this maximum amount
138 of space required to store an array of MAX_INT is allocated on the
139 stack. Assuming a 32-bit integer, this needs only 32 *
140 sizeof(stack_node) == 136 bits. Pretty cheap, actually.
142 2. Chose the pivot element using a median-of-three decision tree.
143 This reduces the probability of selecting a bad pivot value and
144 eliminates certain extraneous comparisons.
146 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving
147 insertion sort to order the MAX_THRESH items within each partition.
148 This is a big win, since insertion sort is faster for small, mostly
149 sorted array segments.
151 4. The larger of the two sub-partitions is always pushed onto the
152 stack first, with the algorithm then concentrating on the
153 smaller partition. This *guarantees* no more than log (n)
154 stack size is needed (actually O(1) in this case)! */
156 typedef int (*cmp_fun_t
) (SCM less
,
160 static const char s_buggy_less
[] = "buggy less predicate used when sorting";
163 quicksort (void *const pbase
,
169 register char *base_ptr
= (char *) pbase
;
171 /* Allocating SIZE bytes for a pivot buffer facilitates a better
172 algorithm below since we can do comparisons directly on the pivot. */
173 char *pivot_buffer
= (char *) alloca (size
);
174 const size_t max_thresh
= MAX_THRESH
* size
;
176 if (total_elems
== 0)
177 /* Avoid lossage with unsigned arithmetic below. */
180 if (total_elems
> MAX_THRESH
)
183 char *hi
= &lo
[size
* (total_elems
- 1)];
184 /* Largest size needed for 32-bit int!!! */
185 stack_node stack
[STACK_SIZE
];
186 stack_node
*top
= stack
+ 1;
188 while (STACK_NOT_EMPTY
)
193 char *pivot
= pivot_buffer
;
195 /* Select median value from among LO, MID, and HI. Rearrange
196 LO and HI so the three values are sorted. This lowers the
197 probability of picking a pathological pivot value and
198 skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
200 char *mid
= lo
+ size
* ((hi
- lo
) / size
>> 1);
202 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
203 SWAP (mid
, lo
, size
);
204 if ((*cmp
) (less
, (void *) hi
, (void *) mid
))
205 SWAP (mid
, hi
, size
);
208 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
209 SWAP (mid
, lo
, size
);
211 memcpy (pivot
, mid
, size
);
212 pivot
= pivot_buffer
;
214 left_ptr
= lo
+ size
;
215 right_ptr
= hi
- size
;
217 /* Here's the famous ``collapse the walls'' section of quicksort.
218 Gotta like those tight inner loops! They are the main reason
219 that this algorithm runs much faster than others. */
222 while ((*cmp
) (less
, (void *) left_ptr
, (void *) pivot
))
225 /* The comparison predicate may be buggy */
227 scm_misc_error (0, s_buggy_less
, SCM_EOL
);
230 while ((*cmp
) (less
, (void *) pivot
, (void *) right_ptr
))
233 /* The comparison predicate may be buggy */
235 scm_misc_error (0, s_buggy_less
, SCM_EOL
);
238 if (left_ptr
< right_ptr
)
240 SWAP (left_ptr
, right_ptr
, size
);
244 else if (left_ptr
== right_ptr
)
251 while (left_ptr
<= right_ptr
);
253 /* Set up pointers for next iteration. First determine whether
254 left and right partitions are below the threshold size. If so,
255 ignore one or both. Otherwise, push the larger partition's
256 bounds on the stack and continue sorting the smaller one. */
258 if ((size_t) (right_ptr
- lo
) <= max_thresh
)
260 if ((size_t) (hi
- left_ptr
) <= max_thresh
)
261 /* Ignore both small partitions. */
264 /* Ignore small left partition. */
267 else if ((size_t) (hi
- left_ptr
) <= max_thresh
)
268 /* Ignore small right partition. */
270 else if ((right_ptr
- lo
) > (hi
- left_ptr
))
272 /* Push larger left partition indices. */
273 PUSH (lo
, right_ptr
);
278 /* Push larger right partition indices. */
285 /* Once the BASE_PTR array is partially sorted by quicksort the rest
286 is completely sorted using insertion sort, since this is efficient
287 for partitions below MAX_THRESH size. BASE_PTR points to the beginning
288 of the array to sort, and END_PTR points at the very last element in
289 the array (*not* one beyond it!). */
292 char *const end_ptr
= &base_ptr
[size
* (total_elems
- 1)];
293 char *tmp_ptr
= base_ptr
;
294 char *thresh
= min (end_ptr
, base_ptr
+ max_thresh
);
295 register char *run_ptr
;
297 /* Find smallest element in first threshold and place it at the
298 array's beginning. This is the smallest array element,
299 and the operation speeds up insertion sort's inner loop. */
301 for (run_ptr
= tmp_ptr
+ size
; run_ptr
<= thresh
; run_ptr
+= size
)
302 if ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
305 if (tmp_ptr
!= base_ptr
)
306 SWAP (tmp_ptr
, base_ptr
, size
);
308 /* Insertion sort, running from left-hand-side up to right-hand-side. */
310 run_ptr
= base_ptr
+ size
;
311 while ((run_ptr
+= size
) <= end_ptr
)
313 tmp_ptr
= run_ptr
- size
;
314 while ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
317 /* The comparison predicate may be buggy */
318 if (tmp_ptr
< base_ptr
)
319 scm_misc_error (0, s_buggy_less
, SCM_EOL
);
323 if (tmp_ptr
!= run_ptr
)
327 trav
= run_ptr
+ size
;
328 while (--trav
>= run_ptr
)
333 for (hi
= lo
= trav
; (lo
-= size
) >= tmp_ptr
; hi
= lo
)
343 /* comparison routines */
346 subr2less (SCM less
, const void *a
, const void *b
)
348 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
, *(SCM
*) b
));
352 subr2oless (SCM less
, const void *a
, const void *b
)
354 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
,
360 lsubrless (SCM less
, const void *a
, const void *b
)
362 return SCM_NFALSEP (SCM_SUBRF (less
)
363 (scm_cons (*(SCM
*) a
,
364 scm_cons (*(SCM
*) b
, SCM_EOL
))));
368 closureless (SCM code
, const void *a
, const void *b
)
371 env
= SCM_EXTEND_ENV (SCM_CAR (SCM_CODE (code
)),
372 scm_cons (*(SCM
*) a
,
373 scm_cons (*(SCM
*) b
, SCM_EOL
)),
375 /* Evaluate the closure body */
376 code
= SCM_CDR (SCM_CODE (code
));
377 while (SCM_IMP (SCM_CAR (code
)) && SCM_ISYMP (SCM_CAR (code
)))
378 code
= scm_m_expand_body (code
, env
);
380 while (SCM_NNULLP (next
= SCM_CDR (next
)))
382 if (SCM_NIMP (SCM_CAR (code
)))
383 SCM_XEVAL (SCM_CAR (code
), env
);
386 return SCM_NFALSEP (SCM_XEVALCAR (code
, env
));
390 applyless (SCM less
, const void *a
, const void *b
)
392 return SCM_NFALSEP (scm_apply ((SCM
) less
,
393 scm_cons (*(SCM
*) a
,
394 scm_cons (*(SCM
*) b
, SCM_EOL
)),
399 scm_cmp_function (SCM p
)
401 switch (SCM_TYP7 (p
))
407 case scm_tc7_subr_2o
:
411 case scm_tcs_closures
:
416 } /* scm_cmp_function */
418 SCM_PROC (s_restricted_vector_sort_x
, "restricted-vector-sort!", 4, 0, 0, scm_restricted_vector_sort_x
);
420 /* Question: Is there any need to make this a more general array sort?
421 It is probably enough to manage the vector type. */
422 /* endpos equal as for substring, i.e. endpos is not included. */
423 /* More natural wih length? */
425 scm_restricted_vector_sort_x (SCM vec
, SCM less
, SCM startpos
, SCM endpos
)
427 size_t vlen
, spos
, len
, size
= sizeof (SCM
);
430 SCM_ASSERT (SCM_NIMP (vec
), vec
, SCM_ARG1
, s_restricted_vector_sort_x
);
431 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_restricted_vector_sort_x
);
432 switch (SCM_TYP7 (vec
))
434 case scm_tc7_vector
: /* the only type we manage is vector */
436 case scm_tc7_ivect
: /* long */
437 case scm_tc7_uvect
: /* unsigned */
438 case scm_tc7_fvect
: /* float */
439 case scm_tc7_dvect
: /* double */
441 scm_wta (vec
, (char *) SCM_ARG1
, s_restricted_vector_sort_x
);
443 vp
= SCM_VELTS (vec
); /* vector pointer */
444 vlen
= SCM_LENGTH (vec
);
446 SCM_ASSERT (SCM_INUMP(startpos
),
447 startpos
, SCM_ARG3
, s_restricted_vector_sort_x
);
448 spos
= SCM_INUM (startpos
);
449 SCM_ASSERT ((spos
>= 0) && (spos
<= vlen
),
450 startpos
, SCM_ARG3
, s_restricted_vector_sort_x
);
451 SCM_ASSERT ((SCM_INUMP (endpos
)) && (SCM_INUM (endpos
) <= vlen
),
452 endpos
, SCM_ARG4
, s_restricted_vector_sort_x
);
453 len
= SCM_INUM (endpos
) - spos
;
455 quicksort (&vp
[spos
], len
, size
, scm_cmp_function (less
), less
);
456 return SCM_UNSPECIFIED
;
458 } /* scm_restricted_vector_sort_x */
460 /* (sorted? sequence less?)
461 * is true when sequence is a list (x0 x1 ... xm) or a vector #(x0 ... xm)
462 * such that for all 1 <= i <= m,
463 * (not (less? (list-ref list i) (list-ref list (- i 1)))). */
464 SCM_PROC (s_sorted_p
, "sorted?", 2, 0, 0, scm_sorted_p
);
467 scm_sorted_p (SCM items
, SCM less
)
469 long len
, j
; /* list/vector length, temp j */
470 SCM item
, rest
; /* rest of items loop variable */
472 cmp_fun_t cmp
= scm_cmp_function (less
);
474 if (SCM_NULLP (items
))
476 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sorted_p
);
477 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sorted_p
);
479 if (SCM_CONSP (items
))
481 len
= scm_ilength (items
); /* also checks that it's a pure list */
482 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sorted_p
);
486 item
= SCM_CAR (items
);
487 rest
= SCM_CDR (items
);
491 if ((*cmp
) (less
, &SCM_CAR(rest
), &item
))
495 item
= SCM_CAR (rest
);
496 rest
= SCM_CDR (rest
);
504 switch (SCM_TYP7 (items
))
508 vp
= SCM_VELTS (items
); /* vector pointer */
509 len
= SCM_LENGTH (items
);
513 if ((*cmp
) (less
, &vp
[1], vp
))
524 case scm_tc7_ivect
: /* long */
525 case scm_tc7_uvect
: /* unsigned */
526 case scm_tc7_fvect
: /* float */
527 case scm_tc7_dvect
: /* double */
529 scm_wta (items
, (char *) SCM_ARG1
, s_sorted_p
);
536 takes two lists a and b such that (sorted? a less?) and (sorted? b less?)
537 and returns a new list in which the elements of a and b have been stably
538 interleaved so that (sorted? (merge a b less?) less?).
539 Note: this does _not_ accept vectors. */
540 SCM_PROC (s_merge
, "merge", 3, 0, 0, scm_merge
);
543 scm_merge (SCM alist
, SCM blist
, SCM less
)
545 long alen
, blen
; /* list lengths */
547 cmp_fun_t cmp
= scm_cmp_function (less
);
548 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_merge
);
550 if (SCM_NULLP (alist
))
552 else if (SCM_NULLP (blist
))
556 alen
= scm_ilength (alist
); /* checks that it's a pure list */
557 blen
= scm_ilength (blist
); /* checks that it's a pure list */
558 SCM_ASSERT (alen
> 0, alist
, SCM_ARG1
, s_merge
);
559 SCM_ASSERT (blen
> 0, blist
, SCM_ARG2
, s_merge
);
560 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
562 build
= scm_cons (SCM_CAR (blist
), SCM_EOL
);
563 blist
= SCM_CDR (blist
);
568 build
= scm_cons (SCM_CAR (alist
), SCM_EOL
);
569 alist
= SCM_CDR (alist
);
573 while ((alen
> 0) && (blen
> 0))
575 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
577 SCM_SETCDR (last
, scm_cons (SCM_CAR (blist
), SCM_EOL
));
578 blist
= SCM_CDR (blist
);
583 SCM_SETCDR (last
, scm_cons (SCM_CAR (alist
), SCM_EOL
));
584 alist
= SCM_CDR (alist
);
587 last
= SCM_CDR (last
);
589 if ((alen
> 0) && (blen
== 0))
590 SCM_SETCDR (last
, alist
);
591 else if ((alen
== 0) && (blen
> 0))
592 SCM_SETCDR (last
, blist
);
598 scm_merge_list_x (SCM alist
, SCM blist
,
599 long alen
, long blen
,
600 cmp_fun_t cmp
, SCM less
)
604 if (SCM_NULLP (alist
))
606 else if (SCM_NULLP (blist
))
610 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
613 blist
= SCM_CDR (blist
);
619 alist
= SCM_CDR (alist
);
623 while ((alen
> 0) && (blen
> 0))
625 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
627 SCM_SETCDR (last
, blist
);
628 blist
= SCM_CDR (blist
);
633 SCM_SETCDR (last
, alist
);
634 alist
= SCM_CDR (alist
);
637 last
= SCM_CDR (last
);
639 if ((alen
> 0) && (blen
== 0))
640 SCM_SETCDR (last
, alist
);
641 else if ((alen
== 0) && (blen
> 0))
642 SCM_SETCDR (last
, blist
);
645 } /* scm_merge_list_x */
647 SCM_PROC (s_merge_x
, "merge!", 3, 0, 0, scm_merge_x
);
650 scm_merge_x (SCM alist
, SCM blist
, SCM less
)
652 long alen
, blen
; /* list lengths */
654 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_merge_x
);
655 if (SCM_NULLP (alist
))
657 else if (SCM_NULLP (blist
))
661 alen
= scm_ilength (alist
); /* checks that it's a pure list */
662 blen
= scm_ilength (blist
); /* checks that it's a pure list */
663 SCM_ASSERT (alen
>= 0, alist
, SCM_ARG1
, s_merge
);
664 SCM_ASSERT (blen
>= 0, blist
, SCM_ARG2
, s_merge
);
665 return scm_merge_list_x (alist
, blist
,
667 scm_cmp_function (less
),
672 /* This merge sort algorithm is same as slib's by Richard A. O'Keefe.
673 The algorithm is stable. We also tried to use the algorithm used by
674 scsh's merge-sort but that algorithm showed to not be stable, even
675 though it claimed to be.
678 scm_merge_list_step (SCM
* seq
,
688 a
= scm_merge_list_step (seq
, cmp
, less
, mid
);
689 b
= scm_merge_list_step (seq
, cmp
, less
, n
- mid
);
690 return scm_merge_list_x (a
, b
, mid
, n
- mid
, cmp
, less
);
695 SCM rest
= SCM_CDR (*seq
);
696 SCM x
= SCM_CAR (*seq
);
697 SCM y
= SCM_CAR (SCM_CDR (*seq
));
698 *seq
= SCM_CDR (rest
);
699 SCM_SETCDR (rest
, SCM_EOL
);
700 if ((*cmp
) (less
, &y
, &x
))
711 SCM_SETCDR (p
, SCM_EOL
);
716 } /* scm_merge_list_step */
719 SCM_PROC (s_sort_x
, "sort!", 2, 0, 0, scm_sort_x
);
721 /* scm_sort_x manages lists and vectors, not stable sort */
723 scm_sort_x (SCM items
, SCM less
)
725 long len
; /* list/vector length */
726 if (SCM_NULLP(items
))
728 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sort_x
);
729 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_x
);
731 if (SCM_CONSP (items
))
733 len
= scm_ilength (items
);
734 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_x
);
735 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
737 else if (SCM_VECTORP (items
))
739 len
= SCM_LENGTH (items
);
740 scm_restricted_vector_sort_x (items
,
747 return scm_wta (items
, (char *) SCM_ARG1
, s_sort_x
);
750 SCM_PROC (s_sort
, "sort", 2, 0, 0, scm_sort
);
752 /* scm_sort manages lists and vectors, not stable sort */
754 scm_sort (SCM items
, SCM less
)
756 SCM sortvec
; /* the vector we actually sort */
757 long len
; /* list/vector length */
758 if (SCM_NULLP(items
))
760 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sort
);
761 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort
);
762 if (SCM_CONSP (items
))
764 len
= scm_ilength (items
);
765 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort
);
766 items
= scm_list_copy (items
);
767 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
769 else if (SCM_VECTORP (items
))
771 len
= SCM_LENGTH (items
);
772 sortvec
= scm_make_uve (len
, scm_array_prototype (items
));
773 scm_array_copy_x (items
, sortvec
);
774 scm_restricted_vector_sort_x (sortvec
,
781 return scm_wta (items
, (char *) SCM_ARG1
, s_sort_x
);
785 scm_merge_vector_x (void *const vecbase
,
786 void *const tempbase
,
793 register SCM
*vp
= (SCM
*) vecbase
;
794 register SCM
*temp
= (SCM
*) tempbase
;
795 long it
; /* Index for temp vector */
796 long i1
= low
; /* Index for lower vector segment */
797 long i2
= mid
+ 1; /* Index for upper vector segment */
799 /* Copy while both segments contain more characters */
800 for (it
= low
; (i1
<= mid
) && (i2
<= high
); ++it
)
801 if ((*cmp
) (less
, &vp
[i2
], &vp
[i1
]))
806 /* Copy while first segment contains more characters */
808 temp
[it
++] = vp
[i1
++];
810 /* Copy while second segment contains more characters */
812 temp
[it
++] = vp
[i2
++];
814 /* Copy back from temp to vp */
815 for (it
= low
; it
<= high
; ++it
)
817 } /* scm_merge_vector_x */
820 scm_merge_vector_step (void *const vp
,
829 long mid
= (low
+ high
) / 2;
830 scm_merge_vector_step (vp
, temp
, cmp
, less
, low
, mid
);
831 scm_merge_vector_step (vp
, temp
, cmp
, less
, mid
+1, high
);
832 scm_merge_vector_x (vp
, temp
, cmp
, less
, low
, mid
, high
);
834 } /* scm_merge_vector_step */
837 SCM_PROC (s_stable_sort_x
, "stable-sort!", 2, 0, 0, scm_stable_sort_x
);
838 /* stable-sort! manages lists and vectors */
841 scm_stable_sort_x (SCM items
, SCM less
)
843 long len
; /* list/vector length */
845 if (SCM_NULLP (items
))
847 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_stable_sort_x
);
848 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_stable_sort_x
);
849 if (SCM_CONSP (items
))
851 len
= scm_ilength (items
);
852 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_x
);
853 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
855 else if (SCM_VECTORP (items
))
858 len
= SCM_LENGTH (items
);
859 temp
= malloc (len
* sizeof(SCM
));
860 vp
= SCM_VELTS (items
);
861 scm_merge_vector_step (vp
,
863 scm_cmp_function (less
),
871 return scm_wta (items
, (char *) SCM_ARG1
, s_stable_sort_x
);
872 } /* scm_stable_sort_x */
874 SCM_PROC (s_stable_sort
, "stable-sort", 2, 0, 0, scm_stable_sort
);
876 /* stable_sort manages lists and vectors */
878 scm_stable_sort (SCM items
, SCM less
)
880 long len
; /* list/vector length */
881 if (SCM_NULLP (items
))
883 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_stable_sort
);
884 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_stable_sort
);
885 if (SCM_CONSP (items
))
887 len
= scm_ilength (items
);
888 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort
);
889 items
= scm_list_copy (items
);
890 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
892 else if (SCM_VECTORP (items
))
896 len
= SCM_LENGTH (items
);
897 retvec
= scm_make_uve (len
, scm_array_prototype (items
));
898 scm_array_copy_x (items
, retvec
);
899 temp
= malloc (len
* sizeof (SCM
));
900 vp
= SCM_VELTS (retvec
);
901 scm_merge_vector_step (vp
,
903 scm_cmp_function (less
),
911 return scm_wta (items
, (char *) SCM_ARG1
, s_stable_sort
);
912 } /* scm_stable_sort */
914 SCM_PROC (s_sort_list_x
, "sort-list!", 2, 0, 0, scm_sort_list_x
);
917 scm_sort_list_x (SCM items
, SCM less
)
919 long len
= scm_ilength (items
);
920 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_list_x
);
921 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_list_x
);
922 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
923 } /* scm_sort_list_x */
925 SCM_PROC (s_sort_list
, "sort-list", 2, 0, 0, scm_sort_list
);
928 scm_sort_list (SCM items
, SCM less
)
930 long len
= scm_ilength (items
);
931 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_list
);
932 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_list
);
933 items
= scm_list_copy (items
);
934 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
935 } /* scm_sort_list_x */
942 scm_add_feature ("sort");