1999-08-19 Gary Houston <ghouston@easynet.co.uk>
[bpt/guile.git] / libguile / sort.c
CommitLineData
54e09076
MD
1/* Copyright (C) 1999 Free Software Foundation, Inc.
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2, or (at your option)
5 * any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this software; see the file COPYING. If not, write to
14 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
15 * Boston, MA 02111-1307 USA
16 *
17 * As a special exception, the Free Software Foundation gives permission
18 * for additional uses of the text contained in its release of GUILE.
19 *
20 * The exception is that, if you link the GUILE library with other files
21 * to produce an executable, this does not by itself cause the
22 * resulting executable to be covered by the GNU General Public License.
23 * Your use of that executable is in no way restricted on account of
24 * linking the GUILE library code into it.
25 *
26 * This exception does not however invalidate any other reasons why
27 * the executable file might be covered by the GNU General Public License.
28 *
29 * This exception applies only to the code released by the
30 * Free Software Foundation under the name GUILE. If you copy
31 * code from other Free Software Foundation releases into a copy of
32 * GUILE, as the General Public License permits, the exception does
33 * not apply to the code that you add in this way. To avoid misleading
34 * anyone as to the status of such modified files, you must delete
35 * this exception notice from them.
36 *
37 * If you write modifications of your own for GUILE, it is your choice
38 * whether to permit this exception to apply to your modifications.
39 * If you do not wish that, delete this exception notice. */
40
41/* Written in December 1998 by Roland Orre <orre@nada.kth.se>
42 * This implements the same sort interface as slib/sort.scm
43 * for lists and vectors where slib defines:
44 * sorted?, merge, merge!, sort, sort!
45 * For scsh compatibility are also sort-list and sort-list! defined.
46 * In cases where a stable-sort is required use stable-sort or
47 * stable-sort!. As additional feature is
48 * (restricted-vector-sort! vector less? startpos endpos)
49 * added, where startpos and endpos allows you to sort part of a vector.
50 * Thanks to Aubrey Jaffer for the slib/sort.scm library.
51 * Thanks to Richard A. O'Keefe (based on Prolog code by D.H.D.Warren)
52 * for the merge sort inspiration.
53 * Thanks to Douglas C. Schmidt (schmidt@ics.uci.edu) for the
54 * quicksort code.
55 */
56
57/* We need this to get the definitions for HAVE_ALLOCA_H, etc. */
58#include "scmconfig.h"
59
60/* AIX requires this to be the first thing in the file. The #pragma
61 directive is indented so pre-ANSI compilers will ignore it, rather
62 than choke on it. */
63#ifndef __GNUC__
64# if HAVE_ALLOCA_H
65# include <alloca.h>
66# else
67# ifdef _AIX
68 #pragma alloca
69# else
70# ifndef alloca /* predefined by HP cc +Olibcalls */
71char *alloca ();
72# endif
73# endif
74# endif
75#endif
76
77#include "_scm.h"
78
79#include "eval.h"
80#include "unif.h"
81#include "ramap.h"
82#include "alist.h"
a5cae3f3 83#include "feature.h"
54e09076
MD
84
85#include "sort.h"
86
87/* The routine quicksort was extracted from the GNU C Library qsort.c
88 written by Douglas C. Schmidt (schmidt@ics.uci.edu)
89 and adapted to guile by adding an extra pointer less
90 to quicksort by Roland Orre <orre@nada.kth.se>.
91
92 The reason to do this instead of using the library function qsort
93 was to avoid dependency of the ANSI-C extensions for local functions
94 and also to avoid obscure pool based solutions.
95 */
96
97/* Byte-wise swap two items of size SIZE. */
98#define SWAP(a, b, size) \
99 do \
100 { \
101 register size_t __size = (size); \
102 register char *__a = (a), *__b = (b); \
103 do \
104 { \
105 char __tmp = *__a; \
106 *__a++ = *__b; \
107 *__b++ = __tmp; \
108 } while (--__size > 0); \
109 } while (0)
110
111/* Discontinue quicksort algorithm when partition gets below this size.
112 This particular magic number was chosen to work best on a Sun 4/260. */
113#define MAX_THRESH 4
114
115/* Stack node declarations used to store unfulfilled partition obligations. */
116typedef struct
117 {
118 char *lo;
119 char *hi;
120 }
121stack_node;
122
123/* The next 4 #defines implement a very fast in-line stack abstraction. */
124#define STACK_SIZE (8 * sizeof(unsigned long int))
125#define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top))
126#define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi)))
127#define STACK_NOT_EMPTY (stack < top)
128
129
130/* Order size using quicksort. This implementation incorporates
131 four optimizations discussed in Sedgewick:
132
133 1. Non-recursive, using an explicit stack of pointer that store the
134 next array partition to sort. To save time, this maximum amount
135 of space required to store an array of MAX_INT is allocated on the
136 stack. Assuming a 32-bit integer, this needs only 32 *
137 sizeof(stack_node) == 136 bits. Pretty cheap, actually.
138
139 2. Chose the pivot element using a median-of-three decision tree.
140 This reduces the probability of selecting a bad pivot value and
141 eliminates certain extraneous comparisons.
142
143 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving
144 insertion sort to order the MAX_THRESH items within each partition.
145 This is a big win, since insertion sort is faster for small, mostly
146 sorted array segments.
147
148 4. The larger of the two sub-partitions is always pushed onto the
149 stack first, with the algorithm then concentrating on the
150 smaller partition. This *guarantees* no more than log (n)
151 stack size is needed (actually O(1) in this case)! */
152
153typedef int (*cmp_fun_t) (SCM less,
154 const void*,
155 const void*);
156
157static void
158quicksort (void *const pbase,
159 size_t total_elems,
160 size_t size,
161 cmp_fun_t cmp,
162 SCM less)
163{
164 register char *base_ptr = (char *) pbase;
165
166 /* Allocating SIZE bytes for a pivot buffer facilitates a better
167 algorithm below since we can do comparisons directly on the pivot. */
168 char *pivot_buffer = (char *) alloca (size);
169 const size_t max_thresh = MAX_THRESH * size;
170
171 if (total_elems == 0)
172 /* Avoid lossage with unsigned arithmetic below. */
173 return;
174
175 if (total_elems > MAX_THRESH)
176 {
177 char *lo = base_ptr;
178 char *hi = &lo[size * (total_elems - 1)];
179 /* Largest size needed for 32-bit int!!! */
180 stack_node stack[STACK_SIZE];
181 stack_node *top = stack + 1;
182
183 while (STACK_NOT_EMPTY)
184 {
185 char *left_ptr;
186 char *right_ptr;
187
188 char *pivot = pivot_buffer;
189
190 /* Select median value from among LO, MID, and HI. Rearrange
191 LO and HI so the three values are sorted. This lowers the
192 probability of picking a pathological pivot value and
193 skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
194
195 char *mid = lo + size * ((hi - lo) / size >> 1);
196
197 if ((*cmp) (less, (void *) mid, (void *) lo))
198 SWAP (mid, lo, size);
199 if ((*cmp) (less, (void *) hi, (void *) mid))
200 SWAP (mid, hi, size);
201 else
202 goto jump_over;
203 if ((*cmp) (less, (void *) mid, (void *) lo))
204 SWAP (mid, lo, size);
205 jump_over:;
206 memcpy (pivot, mid, size);
207 pivot = pivot_buffer;
208
209 left_ptr = lo + size;
210 right_ptr = hi - size;
211
212 /* Here's the famous ``collapse the walls'' section of quicksort.
213 Gotta like those tight inner loops! They are the main reason
214 that this algorithm runs much faster than others. */
215 do
216 {
217 while ((*cmp) (less, (void *) left_ptr, (void *) pivot))
218 left_ptr += size;
219
220 while ((*cmp) (less, (void *) pivot, (void *) right_ptr))
221 right_ptr -= size;
222
223 if (left_ptr < right_ptr)
224 {
225 SWAP (left_ptr, right_ptr, size);
226 left_ptr += size;
227 right_ptr -= size;
228 }
229 else if (left_ptr == right_ptr)
230 {
231 left_ptr += size;
232 right_ptr -= size;
233 break;
234 }
235 }
236 while (left_ptr <= right_ptr);
237
238 /* Set up pointers for next iteration. First determine whether
239 left and right partitions are below the threshold size. If so,
240 ignore one or both. Otherwise, push the larger partition's
241 bounds on the stack and continue sorting the smaller one. */
242
243 if ((size_t) (right_ptr - lo) <= max_thresh)
244 {
245 if ((size_t) (hi - left_ptr) <= max_thresh)
246 /* Ignore both small partitions. */
247 POP (lo, hi);
248 else
249 /* Ignore small left partition. */
250 lo = left_ptr;
251 }
252 else if ((size_t) (hi - left_ptr) <= max_thresh)
253 /* Ignore small right partition. */
254 hi = right_ptr;
255 else if ((right_ptr - lo) > (hi - left_ptr))
256 {
257 /* Push larger left partition indices. */
258 PUSH (lo, right_ptr);
259 lo = left_ptr;
260 }
261 else
262 {
263 /* Push larger right partition indices. */
264 PUSH (left_ptr, hi);
265 hi = right_ptr;
266 }
267 }
268 }
269
270 /* Once the BASE_PTR array is partially sorted by quicksort the rest
271 is completely sorted using insertion sort, since this is efficient
272 for partitions below MAX_THRESH size. BASE_PTR points to the beginning
273 of the array to sort, and END_PTR points at the very last element in
274 the array (*not* one beyond it!). */
275
276 {
277 char *const end_ptr = &base_ptr[size * (total_elems - 1)];
278 char *tmp_ptr = base_ptr;
279 char *thresh = min (end_ptr, base_ptr + max_thresh);
280 register char *run_ptr;
281
282 /* Find smallest element in first threshold and place it at the
283 array's beginning. This is the smallest array element,
284 and the operation speeds up insertion sort's inner loop. */
285
286 for (run_ptr = tmp_ptr + size; run_ptr <= thresh; run_ptr += size)
287 if ((*cmp) (less, (void *) run_ptr, (void *) tmp_ptr))
288 tmp_ptr = run_ptr;
289
290 if (tmp_ptr != base_ptr)
291 SWAP (tmp_ptr, base_ptr, size);
292
293 /* Insertion sort, running from left-hand-side up to right-hand-side. */
294
295 run_ptr = base_ptr + size;
296 while ((run_ptr += size) <= end_ptr)
297 {
298 tmp_ptr = run_ptr - size;
299 while ((*cmp) (less, (void *) run_ptr, (void *) tmp_ptr))
300 tmp_ptr -= size;
301
302 tmp_ptr += size;
303 if (tmp_ptr != run_ptr)
304 {
305 char *trav;
306
307 trav = run_ptr + size;
308 while (--trav >= run_ptr)
309 {
310 char c = *trav;
311 char *hi, *lo;
312
313 for (hi = lo = trav; (lo -= size) >= tmp_ptr; hi = lo)
314 *hi = *lo;
315 *hi = c;
316 }
317 }
318 }
319 }
320} /* quicksort */
321
322
323/* comparison routines */
324
325static int
326subr2less (SCM less, const void *a, const void *b)
327{
328 return SCM_NFALSEP (SCM_SUBRF (less) (*(SCM *) a, *(SCM *) b));
329} /* subr2less */
330
331static int
332subr2oless (SCM less, const void *a, const void *b)
333{
334 return SCM_NFALSEP (SCM_SUBRF (less) (*(SCM *) a,
335 *(SCM *) b,
336 SCM_UNDEFINED));
337} /* subr2oless */
338
339static int
340lsubrless (SCM less, const void *a, const void *b)
341{
342 return SCM_NFALSEP (SCM_SUBRF (less)
343 (scm_cons (*(SCM *) a,
344 scm_cons (*(SCM *) b, SCM_EOL))));
345} /* lsubrless */
346
347static int
348closureless (SCM code, const void *a, const void *b)
349{
350 SCM env, next;
351 env = SCM_EXTEND_ENV (SCM_CAR (SCM_CODE (code)),
352 scm_cons (*(SCM *) a,
353 scm_cons (*(SCM *) b, SCM_EOL)),
354 SCM_ENV (code));
355 /* Evaluate the closure body */
356 code = SCM_CDR (SCM_CODE (code));
2ddb0920
MD
357 while (SCM_IMP (SCM_CAR (code)) && SCM_ISYMP (SCM_CAR (code)))
358 code = scm_m_expand_body (code, env);
54e09076
MD
359 next = code;
360 while (SCM_NNULLP (next = SCM_CDR (next)))
361 {
362 if (SCM_NIMP (SCM_CAR (code)))
363 SCM_XEVAL (SCM_CAR (code), env);
364 code = next;
365 }
366 return SCM_NFALSEP (SCM_XEVALCAR (code, env));
367} /* closureless */
368
369static int
370applyless (SCM less, const void *a, const void *b)
371{
372 return SCM_NFALSEP (scm_apply ((SCM) less,
373 scm_cons (*(SCM *) a,
374 scm_cons (*(SCM *) b, SCM_EOL)),
375 SCM_EOL));
376} /* applyless */
377
378static cmp_fun_t
379scm_cmp_function (SCM p)
380{
381 switch (SCM_TYP7 (p))
382 {
383 case scm_tc7_subr_2:
384 case scm_tc7_rpsubr:
385 case scm_tc7_asubr:
386 return subr2less;
387 case scm_tc7_subr_2o:
388 return subr2oless;
389 case scm_tc7_lsubr:
390 return lsubrless;
391 case scm_tcs_closures:
392 return closureless;
393 default:
394 return applyless;
395 }
396} /* scm_cmp_function */
397
398SCM_PROC (s_restricted_vector_sort_x, "restricted-vector-sort!", 4, 0, 0, scm_restricted_vector_sort_x);
399
400/* Question: Is there any need to make this a more general array sort?
401 It is probably enough to manage the vector type. */
402/* endpos equal as for substring, i.e. endpos is not included. */
403/* More natural wih length? */
404SCM
405scm_restricted_vector_sort_x (SCM vec, SCM less, SCM startpos, SCM endpos)
406{
407 size_t vlen, spos, len, size = sizeof (SCM);
408 SCM *vp;
409
410 SCM_ASSERT (SCM_NIMP (vec), vec, SCM_ARG1, s_restricted_vector_sort_x);
411 SCM_ASSERT (SCM_NIMP (less), less, SCM_ARG2, s_restricted_vector_sort_x);
412 switch (SCM_TYP7 (vec))
413 {
414 case scm_tc7_vector: /* the only type we manage is vector */
415 break;
416 case scm_tc7_ivect: /* long */
417 case scm_tc7_uvect: /* unsigned */
418 case scm_tc7_fvect: /* float */
419 case scm_tc7_dvect: /* double */
420 default:
421 scm_wta (vec, (char *) SCM_ARG1, s_restricted_vector_sort_x);
422 }
423 vp = SCM_VELTS (vec); /* vector pointer */
424 vlen = SCM_LENGTH (vec);
425
426 SCM_ASSERT (SCM_INUMP(startpos),
427 startpos, SCM_ARG3, s_restricted_vector_sort_x);
428 spos = SCM_INUM (startpos);
429 SCM_ASSERT ((spos >= 0) && (spos <= vlen),
430 startpos, SCM_ARG3, s_restricted_vector_sort_x);
431 SCM_ASSERT ((SCM_INUMP (endpos)) && (SCM_INUM (endpos) <= vlen),
432 endpos, SCM_ARG4, s_restricted_vector_sort_x);
433 len = SCM_INUM (endpos) - spos;
434
435 quicksort (&vp[spos], len, size, scm_cmp_function (less), less);
436 return SCM_UNSPECIFIED;
437 /* return vec; */
438} /* scm_restricted_vector_sort_x */
439
440/* (sorted? sequence less?)
441 * is true when sequence is a list (x0 x1 ... xm) or a vector #(x0 ... xm)
442 * such that for all 1 <= i <= m,
443 * (not (less? (list-ref list i) (list-ref list (- i 1)))). */
444SCM_PROC (s_sorted_p, "sorted?", 2, 0, 0, scm_sorted_p);
445
446SCM
447scm_sorted_p (SCM items, SCM less)
448{
449 long len, j; /* list/vector length, temp j */
450 SCM item, rest; /* rest of items loop variable */
451 SCM *vp;
452 cmp_fun_t cmp = scm_cmp_function (less);
453
454 if (SCM_NULLP (items))
455 return SCM_BOOL_T;
456 SCM_ASSERT (SCM_NIMP (items), items, SCM_ARG1, s_sorted_p);
457 SCM_ASSERT (SCM_NIMP (less), less, SCM_ARG2, s_sorted_p);
458
459 if (SCM_CONSP (items))
460 {
461 len = scm_ilength (items); /* also checks that it's a pure list */
462 SCM_ASSERT (len >= 0, items, SCM_ARG1, s_sorted_p);
463 if (len <= 1)
464 return SCM_BOOL_T;
465
466 item = SCM_CAR (items);
467 rest = SCM_CDR (items);
468 j = len - 1;
469 while (j > 0)
470 {
471 if ((*cmp) (less, &SCM_CAR(rest), &item))
472 return SCM_BOOL_F;
473 else
474 {
475 item = SCM_CAR (rest);
476 rest = SCM_CDR (rest);
477 j--;
478 }
479 }
480 return SCM_BOOL_T;
481 }
482 else
483 {
484 switch (SCM_TYP7 (items))
485 {
486 case scm_tc7_vector:
487 {
488 vp = SCM_VELTS (items); /* vector pointer */
489 len = SCM_LENGTH (items);
490 j = len - 1;
491 while (j > 0)
492 {
493 if ((*cmp) (less, &vp[1], vp))
494 return SCM_BOOL_F;
495 else
496 {
497 vp++;
498 j--;
499 }
500 }
501 return SCM_BOOL_T;
502 }
503 break;
504 case scm_tc7_ivect: /* long */
505 case scm_tc7_uvect: /* unsigned */
506 case scm_tc7_fvect: /* float */
507 case scm_tc7_dvect: /* double */
508 default:
509 scm_wta (items, (char *) SCM_ARG1, s_sorted_p);
510 }
511 }
512 return SCM_BOOL_F;
513} /* scm_sorted_p */
514
515/* (merge a b less?)
516 takes two lists a and b such that (sorted? a less?) and (sorted? b less?)
517 and returns a new list in which the elements of a and b have been stably
518 interleaved so that (sorted? (merge a b less?) less?).
519 Note: this does _not_ accept vectors. */
520SCM_PROC (s_merge, "merge", 3, 0, 0, scm_merge);
521
522SCM
523scm_merge (SCM alist, SCM blist, SCM less)
524{
525 long alen, blen; /* list lengths */
526 SCM build, last;
527 cmp_fun_t cmp = scm_cmp_function (less);
528 SCM_ASSERT (SCM_NIMP (less), less, SCM_ARG2, s_merge);
529
530 if (SCM_NULLP (alist))
531 return blist;
532 else if (SCM_NULLP (blist))
533 return alist;
534 else
535 {
536 alen = scm_ilength (alist); /* checks that it's a pure list */
537 blen = scm_ilength (blist); /* checks that it's a pure list */
538 SCM_ASSERT (alen > 0, alist, SCM_ARG1, s_merge);
539 SCM_ASSERT (blen > 0, blist, SCM_ARG2, s_merge);
c56cc3c8 540 if ((*cmp) (less, &SCM_CAR (blist), &SCM_CAR (alist)))
54e09076
MD
541 {
542 build = scm_cons (SCM_CAR (blist), SCM_EOL);
543 blist = SCM_CDR (blist);
544 blen--;
545 }
c56cc3c8
MD
546 else
547 {
548 build = scm_cons (SCM_CAR (alist), SCM_EOL);
549 alist = SCM_CDR (alist);
550 alen--;
551 }
54e09076
MD
552 last = build;
553 while ((alen > 0) && (blen > 0))
554 {
c56cc3c8 555 if ((*cmp) (less, &SCM_CAR (blist), &SCM_CAR (alist)))
54e09076
MD
556 {
557 SCM_SETCDR (last, scm_cons (SCM_CAR (blist), SCM_EOL));
558 blist = SCM_CDR (blist);
559 blen--;
560 }
c56cc3c8
MD
561 else
562 {
563 SCM_SETCDR (last, scm_cons (SCM_CAR (alist), SCM_EOL));
564 alist = SCM_CDR (alist);
565 alen--;
566 }
54e09076
MD
567 last = SCM_CDR (last);
568 }
569 if ((alen > 0) && (blen == 0))
570 SCM_SETCDR (last, alist);
571 else if ((alen == 0) && (blen > 0))
572 SCM_SETCDR (last, blist);
573 }
574 return build;
575} /* scm_merge */
576
577static SCM
578scm_merge_list_x (SCM alist, SCM blist,
579 long alen, long blen,
580 cmp_fun_t cmp, SCM less)
581{
582 SCM build, last;
583
584 if (SCM_NULLP (alist))
585 return blist;
586 else if (SCM_NULLP (blist))
587 return alist;
588 else
589 {
c56cc3c8 590 if ((*cmp) (less, &SCM_CAR (blist), &SCM_CAR (alist)))
54e09076
MD
591 {
592 build = blist;
593 blist = SCM_CDR (blist);
594 blen--;
595 }
c56cc3c8
MD
596 else
597 {
598 build = alist;
599 alist = SCM_CDR (alist);
600 alen--;
601 }
54e09076
MD
602 last = build;
603 while ((alen > 0) && (blen > 0))
604 {
c56cc3c8 605 if ((*cmp) (less, &SCM_CAR (blist), &SCM_CAR (alist)))
54e09076
MD
606 {
607 SCM_SETCDR (last, blist);
608 blist = SCM_CDR (blist);
609 blen--;
610 }
c56cc3c8
MD
611 else
612 {
613 SCM_SETCDR (last, alist);
614 alist = SCM_CDR (alist);
615 alen--;
616 }
54e09076
MD
617 last = SCM_CDR (last);
618 }
619 if ((alen > 0) && (blen == 0))
620 SCM_SETCDR (last, alist);
621 else if ((alen == 0) && (blen > 0))
622 SCM_SETCDR (last, blist);
623 }
624 return build;
625} /* scm_merge_list_x */
626
627SCM_PROC (s_merge_x, "merge!", 3, 0, 0, scm_merge_x);
628
629SCM
630scm_merge_x (SCM alist, SCM blist, SCM less)
631{
632 long alen, blen; /* list lengths */
633
634 SCM_ASSERT (SCM_NIMP (less), less, SCM_ARG2, s_merge_x);
635 if (SCM_NULLP (alist))
636 return blist;
637 else if (SCM_NULLP (blist))
638 return alist;
639 else
640 {
641 alen = scm_ilength (alist); /* checks that it's a pure list */
642 blen = scm_ilength (blist); /* checks that it's a pure list */
643 SCM_ASSERT (alen >= 0, alist, SCM_ARG1, s_merge);
644 SCM_ASSERT (blen >= 0, blist, SCM_ARG2, s_merge);
645 return scm_merge_list_x (alist, blist,
646 alen, blen,
647 scm_cmp_function (less),
648 less);
649 }
650} /* scm_merge_x */
651
652/* This merge sort algorithm is same as slib's by Richard A. O'Keefe.
653 The algorithm is stable. We also tried to use the algorithm used by
654 scsh's merge-sort but that algorithm showed to not be stable, even
655 though it claimed to be.
656*/
657static SCM
658scm_merge_list_step (SCM * seq,
659 cmp_fun_t cmp,
660 SCM less,
661 int n)
662{
c56cc3c8
MD
663 SCM a, b;
664
54e09076
MD
665 if (n > 2)
666 {
667 long mid = n / 2;
c56cc3c8
MD
668 a = scm_merge_list_step (seq, cmp, less, mid);
669 b = scm_merge_list_step (seq, cmp, less, n - mid);
670 return scm_merge_list_x (a, b, mid, n - mid, cmp, less);
54e09076
MD
671 }
672 else if (n == 2)
673 {
674 SCM p = *seq;
675 SCM rest = SCM_CDR (*seq);
676 SCM x = SCM_CAR (*seq);
677 SCM y = SCM_CAR (SCM_CDR (*seq));
678 *seq = SCM_CDR (rest);
679 SCM_SETCDR (rest, SCM_EOL);
680 if ((*cmp) (less, &y, &x))
681 {
682 SCM_CAR (p) = y;
683 SCM_CAR (rest) = x;
684 }
685 return p;
686 }
687 else if (n == 1)
688 {
689 SCM p = *seq;
690 *seq = SCM_CDR (p);
691 SCM_SETCDR (p, SCM_EOL);
692 return p;
693 }
694 else
695 return SCM_EOL;
696} /* scm_merge_list_step */
697
698
699SCM_PROC (s_sort_x, "sort!", 2, 0, 0, scm_sort_x);
700
701/* scm_sort_x manages lists and vectors, not stable sort */
702SCM
703scm_sort_x (SCM items, SCM less)
704{
705 long len; /* list/vector length */
706 if (SCM_NULLP(items))
707 return SCM_EOL;
708 SCM_ASSERT (SCM_NIMP (items), items, SCM_ARG1, s_sort_x);
709 SCM_ASSERT (SCM_NIMP (less), less, SCM_ARG2, s_sort_x);
710
711 if (SCM_CONSP (items))
712 {
713 len = scm_ilength (items);
714 SCM_ASSERT (len >= 0, items, SCM_ARG1, s_sort_x);
715 return scm_merge_list_step (&items, scm_cmp_function (less), less, len);
716 }
717 else if (SCM_VECTORP (items))
718 {
719 len = SCM_LENGTH (items);
720 scm_restricted_vector_sort_x (items,
721 less,
722 SCM_MAKINUM (0L),
723 SCM_MAKINUM (len));
724 return items;
725 }
726 else
727 return scm_wta (items, (char *) SCM_ARG1, s_sort_x);
728} /* scm_sort_x */
729
730SCM_PROC (s_sort, "sort", 2, 0, 0, scm_sort);
731
732/* scm_sort manages lists and vectors, not stable sort */
733SCM
734scm_sort (SCM items, SCM less)
735{
736 SCM sortvec; /* the vector we actually sort */
737 long len; /* list/vector length */
738 if (SCM_NULLP(items))
739 return SCM_EOL;
740 SCM_ASSERT (SCM_NIMP (items), items, SCM_ARG1, s_sort);
741 SCM_ASSERT (SCM_NIMP (less), less, SCM_ARG2, s_sort);
742 if (SCM_CONSP (items))
743 {
744 len = scm_ilength (items);
745 SCM_ASSERT (len >= 0, items, SCM_ARG1, s_sort);
746 items = scm_list_copy (items);
747 return scm_merge_list_step (&items, scm_cmp_function (less), less, len);
748 }
749 else if (SCM_VECTORP (items))
750 {
751 len = SCM_LENGTH (items);
752 sortvec = scm_make_uve (len, scm_array_prototype (items));
753 scm_array_copy_x (items, sortvec);
754 scm_restricted_vector_sort_x (sortvec,
755 less,
756 SCM_MAKINUM (0L),
757 SCM_MAKINUM (len));
758 return sortvec;
759 }
760 else
761 return scm_wta (items, (char *) SCM_ARG1, s_sort_x);
762} /* scm_sort */
763
764static void
765scm_merge_vector_x (void *const vecbase,
766 void *const tempbase,
767 cmp_fun_t cmp,
768 SCM less,
769 long low,
770 long mid,
771 long high)
772{
773 register SCM *vp = (SCM *) vecbase;
774 register SCM *temp = (SCM *) tempbase;
775 long it; /* Index for temp vector */
776 long i1 = low; /* Index for lower vector segment */
777 long i2 = mid + 1; /* Index for upper vector segment */
778
779 /* Copy while both segments contain more characters */
780 for (it = low; (i1 <= mid) && (i2 <= high); ++it)
781 if ((*cmp) (less, &vp[i2], &vp[i1]))
782 temp[it] = vp[i2++];
783 else
784 temp[it] = vp[i1++];
785
786 /* Copy while first segment contains more characters */
787 while (i1 <= mid)
788 temp[it++] = vp[i1++];
789
790 /* Copy while second segment contains more characters */
791 while (i2 <= high)
792 temp[it++] = vp[i2++];
793
794 /* Copy back from temp to vp */
795 for (it = low; it <= high; ++it)
796 vp[it] = temp[it];
797} /* scm_merge_vector_x */
798
799static void
800scm_merge_vector_step (void *const vp,
801 void *const temp,
802 cmp_fun_t cmp,
803 SCM less,
804 long low,
805 long high)
806{
807 if (high > low)
808 {
809 long mid = (low + high) / 2;
810 scm_merge_vector_step (vp, temp, cmp, less, low, mid);
811 scm_merge_vector_step (vp, temp, cmp, less, mid+1, high);
812 scm_merge_vector_x (vp, temp, cmp, less, low, mid, high);
813 }
814} /* scm_merge_vector_step */
815
816
817SCM_PROC (s_stable_sort_x, "stable-sort!", 2, 0, 0, scm_stable_sort_x);
818/* stable-sort! manages lists and vectors */
819
820SCM
821scm_stable_sort_x (SCM items, SCM less)
822{
823 long len; /* list/vector length */
824
825 if (SCM_NULLP (items))
826 return SCM_EOL;
827 SCM_ASSERT (SCM_NIMP (items), items, SCM_ARG1, s_stable_sort_x);
828 SCM_ASSERT (SCM_NIMP (less), less, SCM_ARG2, s_stable_sort_x);
829 if (SCM_CONSP (items))
830 {
831 len = scm_ilength (items);
832 SCM_ASSERT (len >= 0, items, SCM_ARG1, s_sort_x);
833 return scm_merge_list_step (&items, scm_cmp_function (less), less, len);
834 }
835 else if (SCM_VECTORP (items))
836 {
837 SCM *temp, *vp;
838 len = SCM_LENGTH (items);
839 temp = malloc (len * sizeof(SCM));
840 vp = SCM_VELTS (items);
841 scm_merge_vector_step (vp,
842 temp,
843 scm_cmp_function (less),
844 less,
845 0,
846 len - 1);
847 free(temp);
848 return items;
849 }
850 else
851 return scm_wta (items, (char *) SCM_ARG1, s_stable_sort_x);
852} /* scm_stable_sort_x */
853
854SCM_PROC (s_stable_sort, "stable-sort", 2, 0, 0, scm_stable_sort);
855
856/* stable_sort manages lists and vectors */
857SCM
858scm_stable_sort (SCM items, SCM less)
859{
860 long len; /* list/vector length */
861 if (SCM_NULLP (items))
862 return SCM_EOL;
863 SCM_ASSERT (SCM_NIMP (items), items, SCM_ARG1, s_stable_sort);
864 SCM_ASSERT (SCM_NIMP (less), less, SCM_ARG2, s_stable_sort);
865 if (SCM_CONSP (items))
866 {
867 len = scm_ilength (items);
868 SCM_ASSERT (len >= 0, items, SCM_ARG1, s_sort);
869 items = scm_list_copy (items);
870 return scm_merge_list_step (&items, scm_cmp_function (less), less, len);
871 }
872 else if (SCM_VECTORP (items))
873 {
874 SCM retvec;
875 SCM *temp, *vp;
876 len = SCM_LENGTH (items);
877 retvec = scm_make_uve (len, scm_array_prototype (items));
878 scm_array_copy_x (items, retvec);
879 temp = malloc (len * sizeof (SCM));
880 vp = SCM_VELTS (retvec);
881 scm_merge_vector_step (vp,
882 temp,
883 scm_cmp_function (less),
884 less,
885 0,
886 len - 1);
887 free (temp);
888 return retvec;
889 }
890 else
891 return scm_wta (items, (char *) SCM_ARG1, s_stable_sort);
892} /* scm_stable_sort */
893
894SCM_PROC (s_sort_list_x, "sort-list!", 2, 0, 0, scm_sort_list_x);
895
896SCM /* stable */
897scm_sort_list_x (SCM items, SCM less)
898{
899 long len = scm_ilength (items);
900 SCM_ASSERT (len >= 0, items, SCM_ARG1, s_sort_list_x);
901 SCM_ASSERT (SCM_NIMP (less), less, SCM_ARG2, s_sort_list_x);
902 return scm_merge_list_step (&items, scm_cmp_function (less), less, len);
903} /* scm_sort_list_x */
904
905SCM_PROC (s_sort_list, "sort-list", 2, 0, 0, scm_sort_list);
906
907SCM /* stable */
908scm_sort_list (SCM items, SCM less)
909{
910 long len = scm_ilength (items);
911 SCM_ASSERT (len >= 0, items, SCM_ARG1, s_sort_list);
912 SCM_ASSERT (SCM_NIMP (less), less, SCM_ARG2, s_sort_list);
913 items = scm_list_copy (items);
914 return scm_merge_list_step (&items, scm_cmp_function (less), less, len);
915} /* scm_sort_list_x */
916
917void
918scm_init_sort ()
919{
920#include "sort.x"
921
922 scm_add_feature ("sort");
923}