1 /* Copyright (C) 1999 Free Software Foundation, Inc.
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2, or (at your option)
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this software; see the file COPYING. If not, write to
14 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
15 * Boston, MA 02111-1307 USA
17 * As a special exception, the Free Software Foundation gives permission
18 * for additional uses of the text contained in its release of GUILE.
20 * The exception is that, if you link the GUILE library with other files
21 * to produce an executable, this does not by itself cause the
22 * resulting executable to be covered by the GNU General Public License.
23 * Your use of that executable is in no way restricted on account of
24 * linking the GUILE library code into it.
26 * This exception does not however invalidate any other reasons why
27 * the executable file might be covered by the GNU General Public License.
29 * This exception applies only to the code released by the
30 * Free Software Foundation under the name GUILE. If you copy
31 * code from other Free Software Foundation releases into a copy of
32 * GUILE, as the General Public License permits, the exception does
33 * not apply to the code that you add in this way. To avoid misleading
34 * anyone as to the status of such modified files, you must delete
35 * this exception notice from them.
37 * If you write modifications of your own for GUILE, it is your choice
38 * whether to permit this exception to apply to your modifications.
39 * If you do not wish that, delete this exception notice. */
41 /* Written in December 1998 by Roland Orre <orre@nada.kth.se>
42 * This implements the same sort interface as slib/sort.scm
43 * for lists and vectors where slib defines:
44 * sorted?, merge, merge!, sort, sort!
45 * For scsh compatibility are also sort-list and sort-list! defined.
46 * In cases where a stable-sort is required use stable-sort or
47 * stable-sort!. As additional feature is
48 * (restricted-vector-sort! vector less? startpos endpos)
49 * added, where startpos and endpos allows you to sort part of a vector.
50 * Thanks to Aubrey Jaffer for the slib/sort.scm library.
51 * Thanks to Richard A. O'Keefe (based on Prolog code by D.H.D.Warren)
52 * for the merge sort inspiration.
53 * Thanks to Douglas C. Schmidt (schmidt@ics.uci.edu) for the
57 /* We need this to get the definitions for HAVE_ALLOCA_H, etc. */
58 #include "scmconfig.h"
60 /* AIX requires this to be the first thing in the file. The #pragma
61 directive is indented so pre-ANSI compilers will ignore it, rather
70 # ifndef alloca /* predefined by HP cc +Olibcalls */
87 /* The routine quicksort was extracted from the GNU C Library qsort.c
88 written by Douglas C. Schmidt (schmidt@ics.uci.edu)
89 and adapted to guile by adding an extra pointer less
90 to quicksort by Roland Orre <orre@nada.kth.se>.
92 The reason to do this instead of using the library function qsort
93 was to avoid dependency of the ANSI-C extensions for local functions
94 and also to avoid obscure pool based solutions.
97 /* Byte-wise swap two items of size SIZE. */
98 #define SWAP(a, b, size) \
101 register size_t __size = (size); \
102 register char *__a = (a), *__b = (b); \
108 } while (--__size > 0); \
111 /* Discontinue quicksort algorithm when partition gets below this size.
112 This particular magic number was chosen to work best on a Sun 4/260. */
115 /* Stack node declarations used to store unfulfilled partition obligations. */
123 /* The next 4 #defines implement a very fast in-line stack abstraction. */
124 #define STACK_SIZE (8 * sizeof(unsigned long int))
125 #define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top))
126 #define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi)))
127 #define STACK_NOT_EMPTY (stack < top)
130 /* Order size using quicksort. This implementation incorporates
131 four optimizations discussed in Sedgewick:
133 1. Non-recursive, using an explicit stack of pointer that store the
134 next array partition to sort. To save time, this maximum amount
135 of space required to store an array of MAX_INT is allocated on the
136 stack. Assuming a 32-bit integer, this needs only 32 *
137 sizeof(stack_node) == 136 bits. Pretty cheap, actually.
139 2. Chose the pivot element using a median-of-three decision tree.
140 This reduces the probability of selecting a bad pivot value and
141 eliminates certain extraneous comparisons.
143 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving
144 insertion sort to order the MAX_THRESH items within each partition.
145 This is a big win, since insertion sort is faster for small, mostly
146 sorted array segments.
148 4. The larger of the two sub-partitions is always pushed onto the
149 stack first, with the algorithm then concentrating on the
150 smaller partition. This *guarantees* no more than log (n)
151 stack size is needed (actually O(1) in this case)! */
153 typedef int (*cmp_fun_t
) (SCM less
,
158 quicksort (void *const pbase
,
164 register char *base_ptr
= (char *) pbase
;
166 /* Allocating SIZE bytes for a pivot buffer facilitates a better
167 algorithm below since we can do comparisons directly on the pivot. */
168 char *pivot_buffer
= (char *) alloca (size
);
169 const size_t max_thresh
= MAX_THRESH
* size
;
171 if (total_elems
== 0)
172 /* Avoid lossage with unsigned arithmetic below. */
175 if (total_elems
> MAX_THRESH
)
178 char *hi
= &lo
[size
* (total_elems
- 1)];
179 /* Largest size needed for 32-bit int!!! */
180 stack_node stack
[STACK_SIZE
];
181 stack_node
*top
= stack
+ 1;
183 while (STACK_NOT_EMPTY
)
188 char *pivot
= pivot_buffer
;
190 /* Select median value from among LO, MID, and HI. Rearrange
191 LO and HI so the three values are sorted. This lowers the
192 probability of picking a pathological pivot value and
193 skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
195 char *mid
= lo
+ size
* ((hi
- lo
) / size
>> 1);
197 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
198 SWAP (mid
, lo
, size
);
199 if ((*cmp
) (less
, (void *) hi
, (void *) mid
))
200 SWAP (mid
, hi
, size
);
203 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
204 SWAP (mid
, lo
, size
);
206 memcpy (pivot
, mid
, size
);
207 pivot
= pivot_buffer
;
209 left_ptr
= lo
+ size
;
210 right_ptr
= hi
- size
;
212 /* Here's the famous ``collapse the walls'' section of quicksort.
213 Gotta like those tight inner loops! They are the main reason
214 that this algorithm runs much faster than others. */
217 while ((*cmp
) (less
, (void *) left_ptr
, (void *) pivot
))
220 while ((*cmp
) (less
, (void *) pivot
, (void *) right_ptr
))
223 if (left_ptr
< right_ptr
)
225 SWAP (left_ptr
, right_ptr
, size
);
229 else if (left_ptr
== right_ptr
)
236 while (left_ptr
<= right_ptr
);
238 /* Set up pointers for next iteration. First determine whether
239 left and right partitions are below the threshold size. If so,
240 ignore one or both. Otherwise, push the larger partition's
241 bounds on the stack and continue sorting the smaller one. */
243 if ((size_t) (right_ptr
- lo
) <= max_thresh
)
245 if ((size_t) (hi
- left_ptr
) <= max_thresh
)
246 /* Ignore both small partitions. */
249 /* Ignore small left partition. */
252 else if ((size_t) (hi
- left_ptr
) <= max_thresh
)
253 /* Ignore small right partition. */
255 else if ((right_ptr
- lo
) > (hi
- left_ptr
))
257 /* Push larger left partition indices. */
258 PUSH (lo
, right_ptr
);
263 /* Push larger right partition indices. */
270 /* Once the BASE_PTR array is partially sorted by quicksort the rest
271 is completely sorted using insertion sort, since this is efficient
272 for partitions below MAX_THRESH size. BASE_PTR points to the beginning
273 of the array to sort, and END_PTR points at the very last element in
274 the array (*not* one beyond it!). */
277 char *const end_ptr
= &base_ptr
[size
* (total_elems
- 1)];
278 char *tmp_ptr
= base_ptr
;
279 char *thresh
= min (end_ptr
, base_ptr
+ max_thresh
);
280 register char *run_ptr
;
282 /* Find smallest element in first threshold and place it at the
283 array's beginning. This is the smallest array element,
284 and the operation speeds up insertion sort's inner loop. */
286 for (run_ptr
= tmp_ptr
+ size
; run_ptr
<= thresh
; run_ptr
+= size
)
287 if ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
290 if (tmp_ptr
!= base_ptr
)
291 SWAP (tmp_ptr
, base_ptr
, size
);
293 /* Insertion sort, running from left-hand-side up to right-hand-side. */
295 run_ptr
= base_ptr
+ size
;
296 while ((run_ptr
+= size
) <= end_ptr
)
298 tmp_ptr
= run_ptr
- size
;
299 while ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
303 if (tmp_ptr
!= run_ptr
)
307 trav
= run_ptr
+ size
;
308 while (--trav
>= run_ptr
)
313 for (hi
= lo
= trav
; (lo
-= size
) >= tmp_ptr
; hi
= lo
)
323 /* comparison routines */
326 subr2less (SCM less
, const void *a
, const void *b
)
328 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
, *(SCM
*) b
));
332 subr2oless (SCM less
, const void *a
, const void *b
)
334 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
,
340 lsubrless (SCM less
, const void *a
, const void *b
)
342 return SCM_NFALSEP (SCM_SUBRF (less
)
343 (scm_cons (*(SCM
*) a
,
344 scm_cons (*(SCM
*) b
, SCM_EOL
))));
348 closureless (SCM code
, const void *a
, const void *b
)
351 env
= SCM_EXTEND_ENV (SCM_CAR (SCM_CODE (code
)),
352 scm_cons (*(SCM
*) a
,
353 scm_cons (*(SCM
*) b
, SCM_EOL
)),
355 /* Evaluate the closure body */
356 code
= SCM_CDR (SCM_CODE (code
));
358 while (SCM_NNULLP (next
= SCM_CDR (next
)))
360 if (SCM_NIMP (SCM_CAR (code
)))
361 SCM_XEVAL (SCM_CAR (code
), env
);
364 return SCM_NFALSEP (SCM_XEVALCAR (code
, env
));
368 applyless (SCM less
, const void *a
, const void *b
)
370 return SCM_NFALSEP (scm_apply ((SCM
) less
,
371 scm_cons (*(SCM
*) a
,
372 scm_cons (*(SCM
*) b
, SCM_EOL
)),
377 scm_cmp_function (SCM p
)
379 switch (SCM_TYP7 (p
))
385 case scm_tc7_subr_2o
:
389 case scm_tcs_closures
:
394 } /* scm_cmp_function */
396 SCM_PROC (s_restricted_vector_sort_x
, "restricted-vector-sort!", 4, 0, 0, scm_restricted_vector_sort_x
);
398 /* Question: Is there any need to make this a more general array sort?
399 It is probably enough to manage the vector type. */
400 /* endpos equal as for substring, i.e. endpos is not included. */
401 /* More natural wih length? */
403 scm_restricted_vector_sort_x (SCM vec
, SCM less
, SCM startpos
, SCM endpos
)
405 size_t vlen
, spos
, len
, size
= sizeof (SCM
);
408 SCM_ASSERT (SCM_NIMP (vec
), vec
, SCM_ARG1
, s_restricted_vector_sort_x
);
409 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_restricted_vector_sort_x
);
410 switch (SCM_TYP7 (vec
))
412 case scm_tc7_vector
: /* the only type we manage is vector */
414 case scm_tc7_ivect
: /* long */
415 case scm_tc7_uvect
: /* unsigned */
416 case scm_tc7_fvect
: /* float */
417 case scm_tc7_dvect
: /* double */
419 scm_wta (vec
, (char *) SCM_ARG1
, s_restricted_vector_sort_x
);
421 vp
= SCM_VELTS (vec
); /* vector pointer */
422 vlen
= SCM_LENGTH (vec
);
424 SCM_ASSERT (SCM_INUMP(startpos
),
425 startpos
, SCM_ARG3
, s_restricted_vector_sort_x
);
426 spos
= SCM_INUM (startpos
);
427 SCM_ASSERT ((spos
>= 0) && (spos
<= vlen
),
428 startpos
, SCM_ARG3
, s_restricted_vector_sort_x
);
429 SCM_ASSERT ((SCM_INUMP (endpos
)) && (SCM_INUM (endpos
) <= vlen
),
430 endpos
, SCM_ARG4
, s_restricted_vector_sort_x
);
431 len
= SCM_INUM (endpos
) - spos
;
433 quicksort (&vp
[spos
], len
, size
, scm_cmp_function (less
), less
);
434 return SCM_UNSPECIFIED
;
436 } /* scm_restricted_vector_sort_x */
438 /* (sorted? sequence less?)
439 * is true when sequence is a list (x0 x1 ... xm) or a vector #(x0 ... xm)
440 * such that for all 1 <= i <= m,
441 * (not (less? (list-ref list i) (list-ref list (- i 1)))). */
442 SCM_PROC (s_sorted_p
, "sorted?", 2, 0, 0, scm_sorted_p
);
445 scm_sorted_p (SCM items
, SCM less
)
447 long len
, j
; /* list/vector length, temp j */
448 SCM item
, rest
; /* rest of items loop variable */
450 cmp_fun_t cmp
= scm_cmp_function (less
);
452 if (SCM_NULLP (items
))
454 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sorted_p
);
455 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sorted_p
);
457 if (SCM_CONSP (items
))
459 len
= scm_ilength (items
); /* also checks that it's a pure list */
460 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sorted_p
);
464 item
= SCM_CAR (items
);
465 rest
= SCM_CDR (items
);
469 if ((*cmp
) (less
, &SCM_CAR(rest
), &item
))
473 item
= SCM_CAR (rest
);
474 rest
= SCM_CDR (rest
);
482 switch (SCM_TYP7 (items
))
486 vp
= SCM_VELTS (items
); /* vector pointer */
487 len
= SCM_LENGTH (items
);
491 if ((*cmp
) (less
, &vp
[1], vp
))
502 case scm_tc7_ivect
: /* long */
503 case scm_tc7_uvect
: /* unsigned */
504 case scm_tc7_fvect
: /* float */
505 case scm_tc7_dvect
: /* double */
507 scm_wta (items
, (char *) SCM_ARG1
, s_sorted_p
);
514 takes two lists a and b such that (sorted? a less?) and (sorted? b less?)
515 and returns a new list in which the elements of a and b have been stably
516 interleaved so that (sorted? (merge a b less?) less?).
517 Note: this does _not_ accept vectors. */
518 SCM_PROC (s_merge
, "merge", 3, 0, 0, scm_merge
);
521 scm_merge (SCM alist
, SCM blist
, SCM less
)
523 long alen
, blen
; /* list lengths */
525 cmp_fun_t cmp
= scm_cmp_function (less
);
526 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_merge
);
528 if (SCM_NULLP (alist
))
530 else if (SCM_NULLP (blist
))
534 alen
= scm_ilength (alist
); /* checks that it's a pure list */
535 blen
= scm_ilength (blist
); /* checks that it's a pure list */
536 SCM_ASSERT (alen
> 0, alist
, SCM_ARG1
, s_merge
);
537 SCM_ASSERT (blen
> 0, blist
, SCM_ARG2
, s_merge
);
538 if ((*cmp
) (less
, &SCM_CAR (alist
), &SCM_CAR (blist
)))
540 build
= scm_cons (SCM_CAR (alist
), SCM_EOL
);
541 alist
= SCM_CDR (alist
);
546 build
= scm_cons (SCM_CAR (blist
), SCM_EOL
);
547 blist
= SCM_CDR (blist
);
551 while ((alen
> 0) && (blen
> 0))
553 if ((*cmp
) (less
, &SCM_CAR (alist
), &SCM_CAR (blist
)))
555 SCM_SETCDR (last
, scm_cons (SCM_CAR (alist
), SCM_EOL
));
556 alist
= SCM_CDR (alist
);
561 SCM_SETCDR (last
, scm_cons (SCM_CAR (blist
), SCM_EOL
));
562 blist
= SCM_CDR (blist
);
565 last
= SCM_CDR (last
);
567 if ((alen
> 0) && (blen
== 0))
568 SCM_SETCDR (last
, alist
);
569 else if ((alen
== 0) && (blen
> 0))
570 SCM_SETCDR (last
, blist
);
576 scm_merge_list_x (SCM alist
, SCM blist
,
577 long alen
, long blen
,
578 cmp_fun_t cmp
, SCM less
)
582 if (SCM_NULLP (alist
))
584 else if (SCM_NULLP (blist
))
588 if ((*cmp
) (less
, &SCM_CAR (alist
), &SCM_CAR (blist
)))
591 alist
= SCM_CDR (alist
);
597 blist
= SCM_CDR (blist
);
601 while ((alen
> 0) && (blen
> 0))
603 if ((*cmp
) (less
, &SCM_CAR (alist
), &SCM_CAR (blist
)))
605 SCM_SETCDR (last
, alist
);
606 alist
= SCM_CDR (alist
);
611 SCM_SETCDR (last
, blist
);
612 blist
= SCM_CDR (blist
);
615 last
= SCM_CDR (last
);
617 if ((alen
> 0) && (blen
== 0))
618 SCM_SETCDR (last
, alist
);
619 else if ((alen
== 0) && (blen
> 0))
620 SCM_SETCDR (last
, blist
);
623 } /* scm_merge_list_x */
625 SCM_PROC (s_merge_x
, "merge!", 3, 0, 0, scm_merge_x
);
628 scm_merge_x (SCM alist
, SCM blist
, SCM less
)
630 long alen
, blen
; /* list lengths */
632 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_merge_x
);
633 if (SCM_NULLP (alist
))
635 else if (SCM_NULLP (blist
))
639 alen
= scm_ilength (alist
); /* checks that it's a pure list */
640 blen
= scm_ilength (blist
); /* checks that it's a pure list */
641 SCM_ASSERT (alen
>= 0, alist
, SCM_ARG1
, s_merge
);
642 SCM_ASSERT (blen
>= 0, blist
, SCM_ARG2
, s_merge
);
643 return scm_merge_list_x (alist
, blist
,
645 scm_cmp_function (less
),
650 /* This merge sort algorithm is same as slib's by Richard A. O'Keefe.
651 The algorithm is stable. We also tried to use the algorithm used by
652 scsh's merge-sort but that algorithm showed to not be stable, even
653 though it claimed to be.
656 scm_merge_list_step (SCM
* seq
,
664 return scm_merge_list_x (scm_merge_list_step (seq
, cmp
, less
, mid
),
665 scm_merge_list_step (seq
, cmp
, less
, n
- mid
),
666 mid
, n
- mid
, cmp
, less
);
671 SCM rest
= SCM_CDR (*seq
);
672 SCM x
= SCM_CAR (*seq
);
673 SCM y
= SCM_CAR (SCM_CDR (*seq
));
674 *seq
= SCM_CDR (rest
);
675 SCM_SETCDR (rest
, SCM_EOL
);
676 if ((*cmp
) (less
, &y
, &x
))
687 SCM_SETCDR (p
, SCM_EOL
);
692 } /* scm_merge_list_step */
695 SCM_PROC (s_sort_x
, "sort!", 2, 0, 0, scm_sort_x
);
697 /* scm_sort_x manages lists and vectors, not stable sort */
699 scm_sort_x (SCM items
, SCM less
)
701 long len
; /* list/vector length */
702 if (SCM_NULLP(items
))
704 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sort_x
);
705 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_x
);
707 if (SCM_CONSP (items
))
709 len
= scm_ilength (items
);
710 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_x
);
711 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
713 else if (SCM_VECTORP (items
))
715 len
= SCM_LENGTH (items
);
716 scm_restricted_vector_sort_x (items
,
723 return scm_wta (items
, (char *) SCM_ARG1
, s_sort_x
);
726 SCM_PROC (s_sort
, "sort", 2, 0, 0, scm_sort
);
728 /* scm_sort manages lists and vectors, not stable sort */
730 scm_sort (SCM items
, SCM less
)
732 SCM sortvec
; /* the vector we actually sort */
733 long len
; /* list/vector length */
734 if (SCM_NULLP(items
))
736 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sort
);
737 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort
);
738 if (SCM_CONSP (items
))
740 len
= scm_ilength (items
);
741 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort
);
742 items
= scm_list_copy (items
);
743 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
745 else if (SCM_VECTORP (items
))
747 len
= SCM_LENGTH (items
);
748 sortvec
= scm_make_uve (len
, scm_array_prototype (items
));
749 scm_array_copy_x (items
, sortvec
);
750 scm_restricted_vector_sort_x (sortvec
,
757 return scm_wta (items
, (char *) SCM_ARG1
, s_sort_x
);
761 scm_merge_vector_x (void *const vecbase
,
762 void *const tempbase
,
769 register SCM
*vp
= (SCM
*) vecbase
;
770 register SCM
*temp
= (SCM
*) tempbase
;
771 long it
; /* Index for temp vector */
772 long i1
= low
; /* Index for lower vector segment */
773 long i2
= mid
+ 1; /* Index for upper vector segment */
775 /* Copy while both segments contain more characters */
776 for (it
= low
; (i1
<= mid
) && (i2
<= high
); ++it
)
777 if ((*cmp
) (less
, &vp
[i2
], &vp
[i1
]))
782 /* Copy while first segment contains more characters */
784 temp
[it
++] = vp
[i1
++];
786 /* Copy while second segment contains more characters */
788 temp
[it
++] = vp
[i2
++];
790 /* Copy back from temp to vp */
791 for (it
= low
; it
<= high
; ++it
)
793 } /* scm_merge_vector_x */
796 scm_merge_vector_step (void *const vp
,
805 long mid
= (low
+ high
) / 2;
806 scm_merge_vector_step (vp
, temp
, cmp
, less
, low
, mid
);
807 scm_merge_vector_step (vp
, temp
, cmp
, less
, mid
+1, high
);
808 scm_merge_vector_x (vp
, temp
, cmp
, less
, low
, mid
, high
);
810 } /* scm_merge_vector_step */
813 SCM_PROC (s_stable_sort_x
, "stable-sort!", 2, 0, 0, scm_stable_sort_x
);
814 /* stable-sort! manages lists and vectors */
817 scm_stable_sort_x (SCM items
, SCM less
)
819 long len
; /* list/vector length */
821 if (SCM_NULLP (items
))
823 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_stable_sort_x
);
824 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_stable_sort_x
);
825 if (SCM_CONSP (items
))
827 len
= scm_ilength (items
);
828 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_x
);
829 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
831 else if (SCM_VECTORP (items
))
834 len
= SCM_LENGTH (items
);
835 temp
= malloc (len
* sizeof(SCM
));
836 vp
= SCM_VELTS (items
);
837 scm_merge_vector_step (vp
,
839 scm_cmp_function (less
),
847 return scm_wta (items
, (char *) SCM_ARG1
, s_stable_sort_x
);
848 } /* scm_stable_sort_x */
850 SCM_PROC (s_stable_sort
, "stable-sort", 2, 0, 0, scm_stable_sort
);
852 /* stable_sort manages lists and vectors */
854 scm_stable_sort (SCM items
, SCM less
)
856 long len
; /* list/vector length */
857 if (SCM_NULLP (items
))
859 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_stable_sort
);
860 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_stable_sort
);
861 if (SCM_CONSP (items
))
863 len
= scm_ilength (items
);
864 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort
);
865 items
= scm_list_copy (items
);
866 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
868 else if (SCM_VECTORP (items
))
872 len
= SCM_LENGTH (items
);
873 retvec
= scm_make_uve (len
, scm_array_prototype (items
));
874 scm_array_copy_x (items
, retvec
);
875 temp
= malloc (len
* sizeof (SCM
));
876 vp
= SCM_VELTS (retvec
);
877 scm_merge_vector_step (vp
,
879 scm_cmp_function (less
),
887 return scm_wta (items
, (char *) SCM_ARG1
, s_stable_sort
);
888 } /* scm_stable_sort */
890 SCM_PROC (s_sort_list_x
, "sort-list!", 2, 0, 0, scm_sort_list_x
);
893 scm_sort_list_x (SCM items
, SCM less
)
895 long len
= scm_ilength (items
);
896 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_list_x
);
897 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_list_x
);
898 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
899 } /* scm_sort_list_x */
901 SCM_PROC (s_sort_list
, "sort-list", 2, 0, 0, scm_sort_list
);
904 scm_sort_list (SCM items
, SCM less
)
906 long len
= scm_ilength (items
);
907 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_list
);
908 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_list
);
909 items
= scm_list_copy (items
);
910 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
911 } /* scm_sort_list_x */
918 scm_add_feature ("sort");