1 /* Copyright (C) 1999 Free Software Foundation, Inc.
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2, or (at your option)
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this software; see the file COPYING. If not, write to
14 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
15 * Boston, MA 02111-1307 USA
17 * As a special exception, the Free Software Foundation gives permission
18 * for additional uses of the text contained in its release of GUILE.
20 * The exception is that, if you link the GUILE library with other files
21 * to produce an executable, this does not by itself cause the
22 * resulting executable to be covered by the GNU General Public License.
23 * Your use of that executable is in no way restricted on account of
24 * linking the GUILE library code into it.
26 * This exception does not however invalidate any other reasons why
27 * the executable file might be covered by the GNU General Public License.
29 * This exception applies only to the code released by the
30 * Free Software Foundation under the name GUILE. If you copy
31 * code from other Free Software Foundation releases into a copy of
32 * GUILE, as the General Public License permits, the exception does
33 * not apply to the code that you add in this way. To avoid misleading
34 * anyone as to the status of such modified files, you must delete
35 * this exception notice from them.
37 * If you write modifications of your own for GUILE, it is your choice
38 * whether to permit this exception to apply to your modifications.
39 * If you do not wish that, delete this exception notice. */
41 /* Written in December 1998 by Roland Orre <orre@nada.kth.se>
42 * This implements the same sort interface as slib/sort.scm
43 * for lists and vectors where slib defines:
44 * sorted?, merge, merge!, sort, sort!
45 * For scsh compatibility are also sort-list and sort-list! defined.
46 * In cases where a stable-sort is required use stable-sort or
47 * stable-sort!. As additional feature is
48 * (restricted-vector-sort! vector less? startpos endpos)
49 * added, where startpos and endpos allows you to sort part of a vector.
50 * Thanks to Aubrey Jaffer for the slib/sort.scm library.
51 * Thanks to Richard A. O'Keefe (based on Prolog code by D.H.D.Warren)
52 * for the merge sort inspiration.
53 * Thanks to Douglas C. Schmidt (schmidt@ics.uci.edu) for the
57 /* We need this to get the definitions for HAVE_ALLOCA_H, etc. */
58 #include "scmconfig.h"
60 /* AIX requires this to be the first thing in the file. The #pragma
61 directive is indented so pre-ANSI compilers will ignore it, rather
70 # ifndef alloca /* predefined by HP cc +Olibcalls */
87 /* The routine quicksort was extracted from the GNU C Library qsort.c
88 written by Douglas C. Schmidt (schmidt@ics.uci.edu)
89 and adapted to guile by adding an extra pointer less
90 to quicksort by Roland Orre <orre@nada.kth.se>.
92 The reason to do this instead of using the library function qsort
93 was to avoid dependency of the ANSI-C extensions for local functions
94 and also to avoid obscure pool based solutions.
97 /* Byte-wise swap two items of size SIZE. */
98 #define SWAP(a, b, size) \
101 register size_t __size = (size); \
102 register char *__a = (a), *__b = (b); \
108 } while (--__size > 0); \
111 /* Discontinue quicksort algorithm when partition gets below this size.
112 This particular magic number was chosen to work best on a Sun 4/260. */
115 /* Stack node declarations used to store unfulfilled partition obligations. */
123 /* The next 4 #defines implement a very fast in-line stack abstraction. */
124 #define STACK_SIZE (8 * sizeof(unsigned long int))
125 #define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top))
126 #define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi)))
127 #define STACK_NOT_EMPTY (stack < top)
130 /* Order size using quicksort. This implementation incorporates
131 four optimizations discussed in Sedgewick:
133 1. Non-recursive, using an explicit stack of pointer that store the
134 next array partition to sort. To save time, this maximum amount
135 of space required to store an array of MAX_INT is allocated on the
136 stack. Assuming a 32-bit integer, this needs only 32 *
137 sizeof(stack_node) == 136 bits. Pretty cheap, actually.
139 2. Chose the pivot element using a median-of-three decision tree.
140 This reduces the probability of selecting a bad pivot value and
141 eliminates certain extraneous comparisons.
143 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving
144 insertion sort to order the MAX_THRESH items within each partition.
145 This is a big win, since insertion sort is faster for small, mostly
146 sorted array segments.
148 4. The larger of the two sub-partitions is always pushed onto the
149 stack first, with the algorithm then concentrating on the
150 smaller partition. This *guarantees* no more than log (n)
151 stack size is needed (actually O(1) in this case)! */
153 typedef int (*cmp_fun_t
) (SCM less
,
158 quicksort (void *const pbase
,
164 register char *base_ptr
= (char *) pbase
;
166 /* Allocating SIZE bytes for a pivot buffer facilitates a better
167 algorithm below since we can do comparisons directly on the pivot. */
168 char *pivot_buffer
= (char *) alloca (size
);
169 const size_t max_thresh
= MAX_THRESH
* size
;
171 if (total_elems
== 0)
172 /* Avoid lossage with unsigned arithmetic below. */
175 if (total_elems
> MAX_THRESH
)
178 char *hi
= &lo
[size
* (total_elems
- 1)];
179 /* Largest size needed for 32-bit int!!! */
180 stack_node stack
[STACK_SIZE
];
181 stack_node
*top
= stack
+ 1;
183 while (STACK_NOT_EMPTY
)
188 char *pivot
= pivot_buffer
;
190 /* Select median value from among LO, MID, and HI. Rearrange
191 LO and HI so the three values are sorted. This lowers the
192 probability of picking a pathological pivot value and
193 skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
195 char *mid
= lo
+ size
* ((hi
- lo
) / size
>> 1);
197 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
198 SWAP (mid
, lo
, size
);
199 if ((*cmp
) (less
, (void *) hi
, (void *) mid
))
200 SWAP (mid
, hi
, size
);
203 if ((*cmp
) (less
, (void *) mid
, (void *) lo
))
204 SWAP (mid
, lo
, size
);
206 memcpy (pivot
, mid
, size
);
207 pivot
= pivot_buffer
;
209 left_ptr
= lo
+ size
;
210 right_ptr
= hi
- size
;
212 /* Here's the famous ``collapse the walls'' section of quicksort.
213 Gotta like those tight inner loops! They are the main reason
214 that this algorithm runs much faster than others. */
217 while ((*cmp
) (less
, (void *) left_ptr
, (void *) pivot
))
220 while ((*cmp
) (less
, (void *) pivot
, (void *) right_ptr
))
223 if (left_ptr
< right_ptr
)
225 SWAP (left_ptr
, right_ptr
, size
);
229 else if (left_ptr
== right_ptr
)
236 while (left_ptr
<= right_ptr
);
238 /* Set up pointers for next iteration. First determine whether
239 left and right partitions are below the threshold size. If so,
240 ignore one or both. Otherwise, push the larger partition's
241 bounds on the stack and continue sorting the smaller one. */
243 if ((size_t) (right_ptr
- lo
) <= max_thresh
)
245 if ((size_t) (hi
- left_ptr
) <= max_thresh
)
246 /* Ignore both small partitions. */
249 /* Ignore small left partition. */
252 else if ((size_t) (hi
- left_ptr
) <= max_thresh
)
253 /* Ignore small right partition. */
255 else if ((right_ptr
- lo
) > (hi
- left_ptr
))
257 /* Push larger left partition indices. */
258 PUSH (lo
, right_ptr
);
263 /* Push larger right partition indices. */
270 /* Once the BASE_PTR array is partially sorted by quicksort the rest
271 is completely sorted using insertion sort, since this is efficient
272 for partitions below MAX_THRESH size. BASE_PTR points to the beginning
273 of the array to sort, and END_PTR points at the very last element in
274 the array (*not* one beyond it!). */
277 char *const end_ptr
= &base_ptr
[size
* (total_elems
- 1)];
278 char *tmp_ptr
= base_ptr
;
279 char *thresh
= min (end_ptr
, base_ptr
+ max_thresh
);
280 register char *run_ptr
;
282 /* Find smallest element in first threshold and place it at the
283 array's beginning. This is the smallest array element,
284 and the operation speeds up insertion sort's inner loop. */
286 for (run_ptr
= tmp_ptr
+ size
; run_ptr
<= thresh
; run_ptr
+= size
)
287 if ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
290 if (tmp_ptr
!= base_ptr
)
291 SWAP (tmp_ptr
, base_ptr
, size
);
293 /* Insertion sort, running from left-hand-side up to right-hand-side. */
295 run_ptr
= base_ptr
+ size
;
296 while ((run_ptr
+= size
) <= end_ptr
)
298 tmp_ptr
= run_ptr
- size
;
299 while ((*cmp
) (less
, (void *) run_ptr
, (void *) tmp_ptr
))
303 if (tmp_ptr
!= run_ptr
)
307 trav
= run_ptr
+ size
;
308 while (--trav
>= run_ptr
)
313 for (hi
= lo
= trav
; (lo
-= size
) >= tmp_ptr
; hi
= lo
)
323 /* comparison routines */
326 subr2less (SCM less
, const void *a
, const void *b
)
328 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
, *(SCM
*) b
));
332 subr2oless (SCM less
, const void *a
, const void *b
)
334 return SCM_NFALSEP (SCM_SUBRF (less
) (*(SCM
*) a
,
340 lsubrless (SCM less
, const void *a
, const void *b
)
342 return SCM_NFALSEP (SCM_SUBRF (less
)
343 (scm_cons (*(SCM
*) a
,
344 scm_cons (*(SCM
*) b
, SCM_EOL
))));
348 closureless (SCM code
, const void *a
, const void *b
)
351 env
= SCM_EXTEND_ENV (SCM_CAR (SCM_CODE (code
)),
352 scm_cons (*(SCM
*) a
,
353 scm_cons (*(SCM
*) b
, SCM_EOL
)),
355 /* Evaluate the closure body */
356 code
= SCM_CDR (SCM_CODE (code
));
357 while (SCM_IMP (SCM_CAR (code
)) && SCM_ISYMP (SCM_CAR (code
)))
358 code
= scm_m_expand_body (code
, env
);
360 while (SCM_NNULLP (next
= SCM_CDR (next
)))
362 if (SCM_NIMP (SCM_CAR (code
)))
363 SCM_XEVAL (SCM_CAR (code
), env
);
366 return SCM_NFALSEP (SCM_XEVALCAR (code
, env
));
370 applyless (SCM less
, const void *a
, const void *b
)
372 return SCM_NFALSEP (scm_apply ((SCM
) less
,
373 scm_cons (*(SCM
*) a
,
374 scm_cons (*(SCM
*) b
, SCM_EOL
)),
379 scm_cmp_function (SCM p
)
381 switch (SCM_TYP7 (p
))
387 case scm_tc7_subr_2o
:
391 case scm_tcs_closures
:
396 } /* scm_cmp_function */
398 SCM_PROC (s_restricted_vector_sort_x
, "restricted-vector-sort!", 4, 0, 0, scm_restricted_vector_sort_x
);
400 /* Question: Is there any need to make this a more general array sort?
401 It is probably enough to manage the vector type. */
402 /* endpos equal as for substring, i.e. endpos is not included. */
403 /* More natural wih length? */
405 scm_restricted_vector_sort_x (SCM vec
, SCM less
, SCM startpos
, SCM endpos
)
407 size_t vlen
, spos
, len
, size
= sizeof (SCM
);
410 SCM_ASSERT (SCM_NIMP (vec
), vec
, SCM_ARG1
, s_restricted_vector_sort_x
);
411 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_restricted_vector_sort_x
);
412 switch (SCM_TYP7 (vec
))
414 case scm_tc7_vector
: /* the only type we manage is vector */
416 case scm_tc7_ivect
: /* long */
417 case scm_tc7_uvect
: /* unsigned */
418 case scm_tc7_fvect
: /* float */
419 case scm_tc7_dvect
: /* double */
421 scm_wta (vec
, (char *) SCM_ARG1
, s_restricted_vector_sort_x
);
423 vp
= SCM_VELTS (vec
); /* vector pointer */
424 vlen
= SCM_LENGTH (vec
);
426 SCM_ASSERT (SCM_INUMP(startpos
),
427 startpos
, SCM_ARG3
, s_restricted_vector_sort_x
);
428 spos
= SCM_INUM (startpos
);
429 SCM_ASSERT ((spos
>= 0) && (spos
<= vlen
),
430 startpos
, SCM_ARG3
, s_restricted_vector_sort_x
);
431 SCM_ASSERT ((SCM_INUMP (endpos
)) && (SCM_INUM (endpos
) <= vlen
),
432 endpos
, SCM_ARG4
, s_restricted_vector_sort_x
);
433 len
= SCM_INUM (endpos
) - spos
;
435 quicksort (&vp
[spos
], len
, size
, scm_cmp_function (less
), less
);
436 return SCM_UNSPECIFIED
;
438 } /* scm_restricted_vector_sort_x */
440 /* (sorted? sequence less?)
441 * is true when sequence is a list (x0 x1 ... xm) or a vector #(x0 ... xm)
442 * such that for all 1 <= i <= m,
443 * (not (less? (list-ref list i) (list-ref list (- i 1)))). */
444 SCM_PROC (s_sorted_p
, "sorted?", 2, 0, 0, scm_sorted_p
);
447 scm_sorted_p (SCM items
, SCM less
)
449 long len
, j
; /* list/vector length, temp j */
450 SCM item
, rest
; /* rest of items loop variable */
452 cmp_fun_t cmp
= scm_cmp_function (less
);
454 if (SCM_NULLP (items
))
456 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sorted_p
);
457 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sorted_p
);
459 if (SCM_CONSP (items
))
461 len
= scm_ilength (items
); /* also checks that it's a pure list */
462 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sorted_p
);
466 item
= SCM_CAR (items
);
467 rest
= SCM_CDR (items
);
471 if ((*cmp
) (less
, &SCM_CAR(rest
), &item
))
475 item
= SCM_CAR (rest
);
476 rest
= SCM_CDR (rest
);
484 switch (SCM_TYP7 (items
))
488 vp
= SCM_VELTS (items
); /* vector pointer */
489 len
= SCM_LENGTH (items
);
493 if ((*cmp
) (less
, &vp
[1], vp
))
504 case scm_tc7_ivect
: /* long */
505 case scm_tc7_uvect
: /* unsigned */
506 case scm_tc7_fvect
: /* float */
507 case scm_tc7_dvect
: /* double */
509 scm_wta (items
, (char *) SCM_ARG1
, s_sorted_p
);
516 takes two lists a and b such that (sorted? a less?) and (sorted? b less?)
517 and returns a new list in which the elements of a and b have been stably
518 interleaved so that (sorted? (merge a b less?) less?).
519 Note: this does _not_ accept vectors. */
520 SCM_PROC (s_merge
, "merge", 3, 0, 0, scm_merge
);
523 scm_merge (SCM alist
, SCM blist
, SCM less
)
525 long alen
, blen
; /* list lengths */
527 cmp_fun_t cmp
= scm_cmp_function (less
);
528 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_merge
);
530 if (SCM_NULLP (alist
))
532 else if (SCM_NULLP (blist
))
536 alen
= scm_ilength (alist
); /* checks that it's a pure list */
537 blen
= scm_ilength (blist
); /* checks that it's a pure list */
538 SCM_ASSERT (alen
> 0, alist
, SCM_ARG1
, s_merge
);
539 SCM_ASSERT (blen
> 0, blist
, SCM_ARG2
, s_merge
);
540 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
542 build
= scm_cons (SCM_CAR (blist
), SCM_EOL
);
543 blist
= SCM_CDR (blist
);
548 build
= scm_cons (SCM_CAR (alist
), SCM_EOL
);
549 alist
= SCM_CDR (alist
);
553 while ((alen
> 0) && (blen
> 0))
555 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
557 SCM_SETCDR (last
, scm_cons (SCM_CAR (blist
), SCM_EOL
));
558 blist
= SCM_CDR (blist
);
563 SCM_SETCDR (last
, scm_cons (SCM_CAR (alist
), SCM_EOL
));
564 alist
= SCM_CDR (alist
);
567 last
= SCM_CDR (last
);
569 if ((alen
> 0) && (blen
== 0))
570 SCM_SETCDR (last
, alist
);
571 else if ((alen
== 0) && (blen
> 0))
572 SCM_SETCDR (last
, blist
);
578 scm_merge_list_x (SCM alist
, SCM blist
,
579 long alen
, long blen
,
580 cmp_fun_t cmp
, SCM less
)
584 if (SCM_NULLP (alist
))
586 else if (SCM_NULLP (blist
))
590 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
593 blist
= SCM_CDR (blist
);
599 alist
= SCM_CDR (alist
);
603 while ((alen
> 0) && (blen
> 0))
605 if ((*cmp
) (less
, &SCM_CAR (blist
), &SCM_CAR (alist
)))
607 SCM_SETCDR (last
, blist
);
608 blist
= SCM_CDR (blist
);
613 SCM_SETCDR (last
, alist
);
614 alist
= SCM_CDR (alist
);
617 last
= SCM_CDR (last
);
619 if ((alen
> 0) && (blen
== 0))
620 SCM_SETCDR (last
, alist
);
621 else if ((alen
== 0) && (blen
> 0))
622 SCM_SETCDR (last
, blist
);
625 } /* scm_merge_list_x */
627 SCM_PROC (s_merge_x
, "merge!", 3, 0, 0, scm_merge_x
);
630 scm_merge_x (SCM alist
, SCM blist
, SCM less
)
632 long alen
, blen
; /* list lengths */
634 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_merge_x
);
635 if (SCM_NULLP (alist
))
637 else if (SCM_NULLP (blist
))
641 alen
= scm_ilength (alist
); /* checks that it's a pure list */
642 blen
= scm_ilength (blist
); /* checks that it's a pure list */
643 SCM_ASSERT (alen
>= 0, alist
, SCM_ARG1
, s_merge
);
644 SCM_ASSERT (blen
>= 0, blist
, SCM_ARG2
, s_merge
);
645 return scm_merge_list_x (alist
, blist
,
647 scm_cmp_function (less
),
652 /* This merge sort algorithm is same as slib's by Richard A. O'Keefe.
653 The algorithm is stable. We also tried to use the algorithm used by
654 scsh's merge-sort but that algorithm showed to not be stable, even
655 though it claimed to be.
658 scm_merge_list_step (SCM
* seq
,
668 a
= scm_merge_list_step (seq
, cmp
, less
, mid
);
669 b
= scm_merge_list_step (seq
, cmp
, less
, n
- mid
);
670 return scm_merge_list_x (a
, b
, mid
, n
- mid
, cmp
, less
);
675 SCM rest
= SCM_CDR (*seq
);
676 SCM x
= SCM_CAR (*seq
);
677 SCM y
= SCM_CAR (SCM_CDR (*seq
));
678 *seq
= SCM_CDR (rest
);
679 SCM_SETCDR (rest
, SCM_EOL
);
680 if ((*cmp
) (less
, &y
, &x
))
691 SCM_SETCDR (p
, SCM_EOL
);
696 } /* scm_merge_list_step */
699 SCM_PROC (s_sort_x
, "sort!", 2, 0, 0, scm_sort_x
);
701 /* scm_sort_x manages lists and vectors, not stable sort */
703 scm_sort_x (SCM items
, SCM less
)
705 long len
; /* list/vector length */
706 if (SCM_NULLP(items
))
708 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sort_x
);
709 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_x
);
711 if (SCM_CONSP (items
))
713 len
= scm_ilength (items
);
714 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_x
);
715 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
717 else if (SCM_VECTORP (items
))
719 len
= SCM_LENGTH (items
);
720 scm_restricted_vector_sort_x (items
,
727 return scm_wta (items
, (char *) SCM_ARG1
, s_sort_x
);
730 SCM_PROC (s_sort
, "sort", 2, 0, 0, scm_sort
);
732 /* scm_sort manages lists and vectors, not stable sort */
734 scm_sort (SCM items
, SCM less
)
736 SCM sortvec
; /* the vector we actually sort */
737 long len
; /* list/vector length */
738 if (SCM_NULLP(items
))
740 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_sort
);
741 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort
);
742 if (SCM_CONSP (items
))
744 len
= scm_ilength (items
);
745 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort
);
746 items
= scm_list_copy (items
);
747 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
749 else if (SCM_VECTORP (items
))
751 len
= SCM_LENGTH (items
);
752 sortvec
= scm_make_uve (len
, scm_array_prototype (items
));
753 scm_array_copy_x (items
, sortvec
);
754 scm_restricted_vector_sort_x (sortvec
,
761 return scm_wta (items
, (char *) SCM_ARG1
, s_sort_x
);
765 scm_merge_vector_x (void *const vecbase
,
766 void *const tempbase
,
773 register SCM
*vp
= (SCM
*) vecbase
;
774 register SCM
*temp
= (SCM
*) tempbase
;
775 long it
; /* Index for temp vector */
776 long i1
= low
; /* Index for lower vector segment */
777 long i2
= mid
+ 1; /* Index for upper vector segment */
779 /* Copy while both segments contain more characters */
780 for (it
= low
; (i1
<= mid
) && (i2
<= high
); ++it
)
781 if ((*cmp
) (less
, &vp
[i2
], &vp
[i1
]))
786 /* Copy while first segment contains more characters */
788 temp
[it
++] = vp
[i1
++];
790 /* Copy while second segment contains more characters */
792 temp
[it
++] = vp
[i2
++];
794 /* Copy back from temp to vp */
795 for (it
= low
; it
<= high
; ++it
)
797 } /* scm_merge_vector_x */
800 scm_merge_vector_step (void *const vp
,
809 long mid
= (low
+ high
) / 2;
810 scm_merge_vector_step (vp
, temp
, cmp
, less
, low
, mid
);
811 scm_merge_vector_step (vp
, temp
, cmp
, less
, mid
+1, high
);
812 scm_merge_vector_x (vp
, temp
, cmp
, less
, low
, mid
, high
);
814 } /* scm_merge_vector_step */
817 SCM_PROC (s_stable_sort_x
, "stable-sort!", 2, 0, 0, scm_stable_sort_x
);
818 /* stable-sort! manages lists and vectors */
821 scm_stable_sort_x (SCM items
, SCM less
)
823 long len
; /* list/vector length */
825 if (SCM_NULLP (items
))
827 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_stable_sort_x
);
828 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_stable_sort_x
);
829 if (SCM_CONSP (items
))
831 len
= scm_ilength (items
);
832 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_x
);
833 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
835 else if (SCM_VECTORP (items
))
838 len
= SCM_LENGTH (items
);
839 temp
= malloc (len
* sizeof(SCM
));
840 vp
= SCM_VELTS (items
);
841 scm_merge_vector_step (vp
,
843 scm_cmp_function (less
),
851 return scm_wta (items
, (char *) SCM_ARG1
, s_stable_sort_x
);
852 } /* scm_stable_sort_x */
854 SCM_PROC (s_stable_sort
, "stable-sort", 2, 0, 0, scm_stable_sort
);
856 /* stable_sort manages lists and vectors */
858 scm_stable_sort (SCM items
, SCM less
)
860 long len
; /* list/vector length */
861 if (SCM_NULLP (items
))
863 SCM_ASSERT (SCM_NIMP (items
), items
, SCM_ARG1
, s_stable_sort
);
864 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_stable_sort
);
865 if (SCM_CONSP (items
))
867 len
= scm_ilength (items
);
868 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort
);
869 items
= scm_list_copy (items
);
870 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
872 else if (SCM_VECTORP (items
))
876 len
= SCM_LENGTH (items
);
877 retvec
= scm_make_uve (len
, scm_array_prototype (items
));
878 scm_array_copy_x (items
, retvec
);
879 temp
= malloc (len
* sizeof (SCM
));
880 vp
= SCM_VELTS (retvec
);
881 scm_merge_vector_step (vp
,
883 scm_cmp_function (less
),
891 return scm_wta (items
, (char *) SCM_ARG1
, s_stable_sort
);
892 } /* scm_stable_sort */
894 SCM_PROC (s_sort_list_x
, "sort-list!", 2, 0, 0, scm_sort_list_x
);
897 scm_sort_list_x (SCM items
, SCM less
)
899 long len
= scm_ilength (items
);
900 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_list_x
);
901 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_list_x
);
902 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
903 } /* scm_sort_list_x */
905 SCM_PROC (s_sort_list
, "sort-list", 2, 0, 0, scm_sort_list
);
908 scm_sort_list (SCM items
, SCM less
)
910 long len
= scm_ilength (items
);
911 SCM_ASSERT (len
>= 0, items
, SCM_ARG1
, s_sort_list
);
912 SCM_ASSERT (SCM_NIMP (less
), less
, SCM_ARG2
, s_sort_list
);
913 items
= scm_list_copy (items
);
914 return scm_merge_list_step (&items
, scm_cmp_function (less
), less
, len
);
915 } /* scm_sort_list_x */
922 scm_add_feature ("sort");