defsubst
[bpt/guile.git] / libguile / loader.c
1 /* Copyright (C) 2001, 2009, 2010, 2011, 2012
2 * 2013, 2014 Free Software Foundation, Inc.
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public License
6 * as published by the Free Software Foundation; either version 3 of
7 * the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301 USA
18 */
19
20 #if HAVE_CONFIG_H
21 # include <config.h>
22 #endif
23
24 #include <string.h>
25 #include <fcntl.h>
26 #include <unistd.h>
27
28 #ifdef HAVE_SYS_MMAN_H
29 #include <sys/mman.h>
30 #endif
31
32 #include <sys/stat.h>
33 #include <sys/types.h>
34 #include <assert.h>
35 #include <alignof.h>
36 #include <byteswap.h>
37 #include <verify.h>
38
39 #include <full-read.h>
40
41 #include "_scm.h"
42 #include "elf.h"
43 #include "programs.h"
44 #include "loader.h"
45
46 /* This file contains the loader for Guile's on-disk format: ELF with
47 some custom tags in the dynamic segment. */
48
49 #if SIZEOF_SCM_T_BITS == 4
50 #define Elf_Half Elf32_Half
51 #define Elf_Word Elf32_Word
52 #define Elf_Ehdr Elf32_Ehdr
53 #define ELFCLASS ELFCLASS32
54 #define Elf_Phdr Elf32_Phdr
55 #define Elf_Dyn Elf32_Dyn
56 #elif SIZEOF_SCM_T_BITS == 8
57 #define Elf_Half Elf64_Half
58 #define Elf_Word Elf64_Word
59 #define Elf_Ehdr Elf64_Ehdr
60 #define ELFCLASS ELFCLASS64
61 #define Elf_Phdr Elf64_Phdr
62 #define Elf_Dyn Elf64_Dyn
63 #else
64 #error
65 #endif
66
67 #define DT_LOGUILE 0x37146000 /* Start of Guile-specific */
68 #define DT_GUILE_GC_ROOT 0x37146000 /* Offset of GC roots */
69 #define DT_GUILE_GC_ROOT_SZ 0x37146001 /* Size in machine words of GC
70 roots */
71 #define DT_GUILE_ENTRY 0x37146002 /* Address of entry thunk */
72 #define DT_GUILE_VM_VERSION 0x37146003 /* Bytecode version */
73 #define DT_GUILE_FRAME_MAPS 0x37146004 /* Frame maps */
74 #define DT_HIGUILE 0x37146fff /* End of Guile-specific */
75
76 #ifdef WORDS_BIGENDIAN
77 #define ELFDATA ELFDATA2MSB
78 #else
79 #define ELFDATA ELFDATA2LSB
80 #endif
81
82 static void register_elf (char *data, size_t len, char *frame_maps);
83
84 enum bytecode_kind
85 {
86 BYTECODE_KIND_NONE,
87 BYTECODE_KIND_GUILE_2_2
88 };
89
90 static SCM
91 pointer_to_procedure (enum bytecode_kind bytecode_kind, char *ptr)
92 {
93 switch (bytecode_kind)
94 {
95 case BYTECODE_KIND_GUILE_2_2:
96 {
97 return scm_i_make_program ((scm_t_uint32 *) ptr);
98 }
99 case BYTECODE_KIND_NONE:
100 default:
101 abort ();
102 }
103 }
104
105 static const char*
106 check_elf_header (const Elf_Ehdr *header)
107 {
108 if (!(header->e_ident[EI_MAG0] == ELFMAG0
109 && header->e_ident[EI_MAG1] == ELFMAG1
110 && header->e_ident[EI_MAG2] == ELFMAG2
111 && header->e_ident[EI_MAG3] == ELFMAG3))
112 return "not an ELF file";
113
114 if (header->e_ident[EI_CLASS] != ELFCLASS)
115 return "ELF file does not have native word size";
116
117 if (header->e_ident[EI_DATA] != ELFDATA)
118 return "ELF file does not have native byte order";
119
120 if (header->e_ident[EI_VERSION] != EV_CURRENT)
121 return "bad ELF version";
122
123 if (header->e_ident[EI_OSABI] != ELFOSABI_STANDALONE)
124 return "unexpected OS ABI";
125
126 if (header->e_ident[EI_ABIVERSION] != 0)
127 return "unexpected ABI version";
128
129 if (header->e_type != ET_DYN)
130 return "unexpected ELF type";
131
132 if (header->e_machine != EM_NONE)
133 return "unexpected machine";
134
135 if (header->e_version != EV_CURRENT)
136 return "unexpected ELF version";
137
138 if (header->e_ehsize != sizeof *header)
139 return "unexpected header size";
140
141 if (header->e_phentsize != sizeof (Elf_Phdr))
142 return "unexpected program header size";
143
144 return NULL;
145 }
146
147 #define IS_ALIGNED(offset, alignment) \
148 (!((offset) & ((alignment) - 1)))
149 #define ALIGN(offset, alignment) \
150 ((offset + (alignment - 1)) & ~(alignment - 1))
151
152 /* Return the alignment required by the ELF at DATA, of LEN bytes. */
153 static size_t
154 elf_alignment (const char *data, size_t len)
155 {
156 Elf_Ehdr *header;
157 int i;
158 size_t alignment = 8;
159
160 if (len < sizeof(Elf_Ehdr))
161 return alignment;
162 header = (Elf_Ehdr *) data;
163 if (header->e_phoff + header->e_phnum * header->e_phentsize >= len)
164 return alignment;
165 for (i = 0; i < header->e_phnum; i++)
166 {
167 Elf_Phdr *phdr;
168 const char *phdr_addr = data + header->e_phoff + i * header->e_phentsize;
169
170 if (!IS_ALIGNED ((scm_t_uintptr) phdr_addr, alignof_type (Elf_Phdr)))
171 return alignment;
172 phdr = (Elf_Phdr *) phdr_addr;
173
174 if (phdr->p_align & (phdr->p_align - 1))
175 return alignment;
176
177 if (phdr->p_align > alignment)
178 alignment = phdr->p_align;
179 }
180
181 return alignment;
182 }
183
184 /* This function leaks the memory that it allocates. */
185 static char*
186 alloc_aligned (size_t len, unsigned alignment)
187 {
188 char *ret;
189
190 if (alignment == 8)
191 {
192 /* FIXME: Assert that we actually have an 8-byte-aligned malloc. */
193 ret = malloc (len);
194 }
195 #if defined(HAVE_SYS_MMAN_H) && defined(MMAP_ANONYMOUS)
196 else if (alignment == SCM_PAGE_SIZE)
197 {
198 ret = mmap (NULL, len, PROT_READ | PROT_WRITE, -1, 0);
199 if (ret == MAP_FAILED)
200 SCM_SYSERROR;
201 }
202 #endif
203 else
204 {
205 if (len + alignment < len)
206 abort ();
207
208 ret = malloc (len + alignment - 1);
209 if (!ret)
210 abort ();
211 ret = (char *) ALIGN ((scm_t_uintptr) ret, alignment);
212 }
213
214 return ret;
215 }
216
217 static char*
218 copy_and_align_elf_data (const char *data, size_t len)
219 {
220 size_t alignment;
221 char *copy;
222
223 alignment = elf_alignment (data, len);
224 copy = alloc_aligned (len, alignment);
225 memcpy(copy, data, len);
226
227 return copy;
228 }
229
230 #ifdef HAVE_SYS_MMAN_H
231 static int
232 segment_flags_to_prot (Elf_Word flags)
233 {
234 int prot = 0;
235
236 if (flags & PF_X)
237 prot |= PROT_EXEC;
238 if (flags & PF_W)
239 prot |= PROT_WRITE;
240 if (flags & PF_R)
241 prot |= PROT_READ;
242
243 return prot;
244 }
245 #endif
246
247 static char*
248 process_dynamic_segment (char *base, Elf_Phdr *dyn_phdr,
249 SCM *init_out, SCM *entry_out, char **frame_maps_out)
250 {
251 char *dyn_addr = base + dyn_phdr->p_vaddr;
252 Elf_Dyn *dyn = (Elf_Dyn *) dyn_addr;
253 size_t i, dyn_size = dyn_phdr->p_memsz / sizeof (Elf_Dyn);
254 char *init = 0, *gc_root = 0, *entry = 0, *frame_maps = 0;
255 scm_t_ptrdiff gc_root_size = 0;
256 enum bytecode_kind bytecode_kind = BYTECODE_KIND_NONE;
257
258 for (i = 0; i < dyn_size; i++)
259 {
260 if (dyn[i].d_tag == DT_NULL)
261 break;
262
263 switch (dyn[i].d_tag)
264 {
265 case DT_INIT:
266 if (init)
267 return "duplicate DT_INIT";
268 init = base + dyn[i].d_un.d_val;
269 break;
270 case DT_GUILE_GC_ROOT:
271 if (gc_root)
272 return "duplicate DT_GUILE_GC_ROOT";
273 gc_root = base + dyn[i].d_un.d_val;
274 break;
275 case DT_GUILE_GC_ROOT_SZ:
276 if (gc_root_size)
277 return "duplicate DT_GUILE_GC_ROOT_SZ";
278 gc_root_size = dyn[i].d_un.d_val;
279 break;
280 case DT_GUILE_ENTRY:
281 if (entry)
282 return "duplicate DT_GUILE_ENTRY";
283 entry = base + dyn[i].d_un.d_val;
284 break;
285 case DT_GUILE_VM_VERSION:
286 if (bytecode_kind != BYTECODE_KIND_NONE)
287 return "duplicate DT_GUILE_VM_VERSION";
288 {
289 scm_t_uint16 major = dyn[i].d_un.d_val >> 16;
290 scm_t_uint16 minor = dyn[i].d_un.d_val & 0xffff;
291 switch (major)
292 {
293 case 0x0202:
294 bytecode_kind = BYTECODE_KIND_GUILE_2_2;
295 /* As we get closer to 2.2, we will allow for backwards
296 compatibility and we can change this test to ">"
297 instead of "!=". However until then, to deal with VM
298 churn it's best to keep these things in
299 lock-step. */
300 if (minor != SCM_OBJCODE_MINOR_VERSION)
301 return "incompatible bytecode version";
302 break;
303 default:
304 return "incompatible bytecode kind";
305 }
306 break;
307 }
308 case DT_GUILE_FRAME_MAPS:
309 if (frame_maps)
310 return "duplicate DT_GUILE_FRAME_MAPS";
311 frame_maps = base + dyn[i].d_un.d_val;
312 break;
313 }
314 }
315
316 if (!entry)
317 return "missing DT_GUILE_ENTRY";
318
319 switch (bytecode_kind)
320 {
321 case BYTECODE_KIND_GUILE_2_2:
322 if ((scm_t_uintptr) init % 4)
323 return "unaligned DT_INIT";
324 if ((scm_t_uintptr) entry % 4)
325 return "unaligned DT_GUILE_ENTRY";
326 break;
327 case BYTECODE_KIND_NONE:
328 default:
329 return "missing DT_GUILE_VM_VERSION";
330 }
331
332 if (gc_root)
333 GC_add_roots (gc_root, gc_root + gc_root_size);
334
335 *init_out = init ? pointer_to_procedure (bytecode_kind, init) : SCM_BOOL_F;
336 *entry_out = pointer_to_procedure (bytecode_kind, entry);
337 *frame_maps_out = frame_maps;
338
339 return NULL;
340 }
341
342 #define ABORT(msg) do { err_msg = msg; goto cleanup; } while (0)
343
344 static SCM
345 load_thunk_from_memory (char *data, size_t len, int is_read_only, SCM constants)
346 #define FUNC_NAME "load-thunk-from-memory"
347 {
348 Elf_Ehdr *header;
349 Elf_Phdr *ph;
350 const char *err_msg = 0;
351 size_t n, alignment = 8;
352 int i;
353 int dynamic_segment = -1;
354 SCM init = SCM_BOOL_F, entry = SCM_BOOL_F;
355 char *frame_maps = 0;
356
357 if (len < sizeof *header)
358 ABORT ("object file too small");
359
360 header = (Elf_Ehdr*) data;
361
362 if ((err_msg = check_elf_header (header)))
363 goto cleanup;
364
365 if (header->e_phnum == 0)
366 ABORT ("no loadable segments");
367 n = header->e_phnum;
368
369 if (len < header->e_phoff + n * sizeof (Elf_Phdr))
370 ABORT ("object file too small");
371
372 ph = (Elf_Phdr*) (data + header->e_phoff);
373
374 /* Check that the segment table is sane. */
375 for (i = 0; i < n; i++)
376 {
377 if (ph[i].p_filesz != ph[i].p_memsz)
378 ABORT ("expected p_filesz == p_memsz");
379
380 if (!ph[i].p_flags)
381 ABORT ("expected nonzero segment flags");
382
383 if (ph[i].p_align < alignment)
384 {
385 if (ph[i].p_align % alignment)
386 ABORT ("expected new alignment to be multiple of old");
387 alignment = ph[i].p_align;
388 }
389
390 if (ph[i].p_type == PT_DYNAMIC)
391 {
392 if (dynamic_segment >= 0)
393 ABORT ("expected only one PT_DYNAMIC segment");
394 dynamic_segment = i;
395 continue;
396 }
397
398 if (ph[i].p_type != PT_LOAD)
399 ABORT ("unknown segment type");
400
401 if (i == 0)
402 {
403 if (ph[i].p_vaddr != 0)
404 ABORT ("first loadable vaddr is not 0");
405 }
406 else
407 {
408 if (ph[i].p_vaddr < ph[i-1].p_vaddr + ph[i-1].p_memsz)
409 ABORT ("overlapping segments");
410
411 if (ph[i].p_offset + ph[i].p_filesz > len)
412 ABORT ("segment beyond end of byte array");
413 }
414 }
415
416 if (dynamic_segment < 0)
417 ABORT ("no PT_DYNAMIC segment");
418
419 if (!IS_ALIGNED ((scm_t_uintptr) data, alignment))
420 ABORT ("incorrectly aligned base");
421
422 /* Allow writes to writable pages. */
423 if (is_read_only)
424 {
425 #ifdef HAVE_SYS_MMAN_H
426 for (i = 0; i < n; i++)
427 {
428 if (ph[i].p_type != PT_LOAD)
429 continue;
430 if (ph[i].p_flags == PF_R)
431 continue;
432 if (ph[i].p_align != 4096)
433 continue;
434
435 if (mprotect (data + ph[i].p_vaddr,
436 ph[i].p_memsz,
437 segment_flags_to_prot (ph[i].p_flags)))
438 goto cleanup;
439 }
440 #else
441 ABORT ("expected writable pages");
442 #endif
443 }
444
445 if ((err_msg = process_dynamic_segment (data, &ph[dynamic_segment],
446 &init, &entry, &frame_maps)))
447 goto cleanup;
448
449 if (scm_is_true (init))
450 {
451 if (scm_is_true (constants))
452 scm_call_1 (init, constants);
453 else
454 scm_call_0 (init);
455 }
456
457 register_elf (data, len, frame_maps);
458
459 /* Finally! Return the thunk. */
460 return entry;
461
462 cleanup:
463 {
464 if (errno)
465 SCM_SYSERROR;
466 scm_misc_error (FUNC_NAME, err_msg ? err_msg : "error loading ELF file",
467 SCM_EOL);
468 }
469 }
470 #undef FUNC_NAME
471
472 #define SCM_PAGE_SIZE 4096
473
474 static char*
475 map_file_contents (int fd, size_t len, int *is_read_only)
476 #define FUNC_NAME "load-thunk-from-file"
477 {
478 char *data;
479
480 #ifdef HAVE_SYS_MMAN_H
481 data = mmap (NULL, len, PROT_READ, MAP_PRIVATE, fd, 0);
482 if (data == MAP_FAILED)
483 SCM_SYSERROR;
484 *is_read_only = 1;
485 #else
486 if (lseek (fd, 0, SEEK_START) < 0)
487 {
488 int errno_save = errno;
489 (void) close (fd);
490 errno = errno_save;
491 SCM_SYSERROR;
492 }
493
494 /* Given that we are using the read fallback, optimistically assume
495 that the .go files were made with 8-byte alignment.
496 alignment. */
497 data = malloc (end);
498 if (!data)
499 {
500 (void) close (fd);
501 scm_misc_error (FUNC_NAME, "failed to allocate ~A bytes",
502 scm_list_1 (scm_from_size_t (end)));
503 }
504
505 if (full_read (fd, data, end) != end)
506 {
507 int errno_save = errno;
508 (void) close (fd);
509 errno = errno_save;
510 if (errno)
511 SCM_SYSERROR;
512 scm_misc_error (FUNC_NAME, "short read while loading objcode",
513 SCM_EOL);
514 }
515
516 /* If our optimism failed, fall back. */
517 {
518 unsigned alignment = sniff_elf_alignment (data, end);
519
520 if (alignment != 8)
521 {
522 char *copy = copy_and_align_elf_data (data, end, alignment);
523 free (data);
524 data = copy;
525 }
526 }
527
528 *is_read_only = 0;
529 #endif
530
531 return data;
532 }
533 #undef FUNC_NAME
534
535 SCM_DEFINE (scm_load_thunk_from_file, "load-thunk-from-file", 1, 0, 0,
536 (SCM filename),
537 "")
538 #define FUNC_NAME s_scm_load_thunk_from_file
539 {
540 char *c_filename;
541 int fd, is_read_only;
542 off_t end;
543 char *data;
544
545 SCM_VALIDATE_STRING (1, filename);
546
547 c_filename = scm_to_locale_string (filename);
548 fd = open (c_filename, O_RDONLY | O_BINARY | O_CLOEXEC);
549 free (c_filename);
550 if (fd < 0) SCM_SYSERROR;
551
552 end = lseek (fd, 0, SEEK_END);
553 if (end < 0)
554 SCM_SYSERROR;
555
556 data = map_file_contents (fd, end, &is_read_only);
557
558 (void) close (fd);
559
560 return load_thunk_from_memory (data, end, is_read_only, SCM_BOOL_F);
561 }
562 #undef FUNC_NAME
563
564 SCM_DEFINE (scm_load_thunk_from_memory, "load-thunk-from-memory", 1, 0, 0,
565 (SCM obj),
566 "")
567 #define FUNC_NAME s_scm_load_thunk_from_memory
568 {
569 char *data;
570 size_t len;
571 SCM bv, constants;
572
573 SCM_VALIDATE_CONS (1, obj);
574 bv = scm_car (obj);
575 constants = scm_cdr (obj);
576 SCM_ASSERT (scm_is_bytevector (bv)
577 && (scm_is_vector (constants) || scm_is_false (constants)),
578 obj, 1, FUNC_NAME);
579
580 data = (char *) SCM_BYTEVECTOR_CONTENTS (bv);
581 len = SCM_BYTEVECTOR_LENGTH (bv);
582
583 /* Copy data in order to align it, to trace its GC roots and
584 writable sections, and to keep it in memory. */
585
586 data = copy_and_align_elf_data (data, len);
587
588 return load_thunk_from_memory (data, len, 0, constants);
589 }
590 #undef FUNC_NAME
591
592 \f
593
594 struct mapped_elf_image
595 {
596 char *start;
597 char *end;
598 char *frame_maps;
599 };
600
601 static struct mapped_elf_image *mapped_elf_images = NULL;
602 static size_t mapped_elf_images_count = 0;
603 static size_t mapped_elf_images_allocated = 0;
604
605 static size_t
606 find_mapped_elf_insertion_index (char *ptr)
607 {
608 /* "mapped_elf_images_count" must never be dereferenced. */
609 size_t start = 0, end = mapped_elf_images_count;
610
611 while (start < end)
612 {
613 size_t n = start + (end - start) / 2;
614
615 if (ptr < mapped_elf_images[n].end)
616 end = n;
617 else
618 start = n + 1;
619 }
620
621 return start;
622 }
623
624 static void
625 register_elf (char *data, size_t len, char *frame_maps)
626 {
627 scm_i_pthread_mutex_lock (&scm_i_misc_mutex);
628 {
629 /* My kingdom for a generic growable sorted vector library. */
630 if (mapped_elf_images_count == mapped_elf_images_allocated)
631 {
632 struct mapped_elf_image *prev;
633 size_t n;
634
635 if (mapped_elf_images_allocated)
636 mapped_elf_images_allocated *= 2;
637 else
638 mapped_elf_images_allocated = 16;
639
640 prev = mapped_elf_images;
641 mapped_elf_images =
642 scm_gc_malloc_pointerless (sizeof (*mapped_elf_images)
643 * mapped_elf_images_allocated,
644 "mapped elf images");
645
646 for (n = 0; n < mapped_elf_images_count; n++)
647 {
648 mapped_elf_images[n].start = prev[n].start;
649 mapped_elf_images[n].end = prev[n].end;
650 mapped_elf_images[n].frame_maps = prev[n].frame_maps;
651 }
652 }
653
654 {
655 size_t end;
656 size_t n = find_mapped_elf_insertion_index (data);
657
658 for (end = mapped_elf_images_count; n < end; end--)
659 {
660 const struct mapped_elf_image *prev = &mapped_elf_images[end - 1];
661 mapped_elf_images[end].start = prev->start;
662 mapped_elf_images[end].end = prev->end;
663 mapped_elf_images[end].frame_maps = prev->frame_maps;
664 }
665 mapped_elf_images_count++;
666
667 mapped_elf_images[n].start = data;
668 mapped_elf_images[n].end = data + len;
669 mapped_elf_images[n].frame_maps = frame_maps;
670 }
671 }
672 scm_i_pthread_mutex_unlock (&scm_i_misc_mutex);
673 }
674
675 static struct mapped_elf_image *
676 find_mapped_elf_image_unlocked (char *ptr)
677 {
678 size_t n = find_mapped_elf_insertion_index ((char *) ptr);
679
680 if (n < mapped_elf_images_count
681 && mapped_elf_images[n].start <= ptr
682 && ptr < mapped_elf_images[n].end)
683 return &mapped_elf_images[n];
684
685 return NULL;
686 }
687
688 static int
689 find_mapped_elf_image (char *ptr, struct mapped_elf_image *image)
690 {
691 int result;
692
693 scm_i_pthread_mutex_lock (&scm_i_misc_mutex);
694 {
695 struct mapped_elf_image *img = find_mapped_elf_image_unlocked (ptr);
696 if (img)
697 {
698 memcpy (image, img, sizeof (*image));
699 result = 1;
700 }
701 else
702 result = 0;
703 }
704 scm_i_pthread_mutex_unlock (&scm_i_misc_mutex);
705
706 return result;
707 }
708
709 static SCM
710 scm_find_mapped_elf_image (SCM ip)
711 {
712 struct mapped_elf_image image;
713
714 if (find_mapped_elf_image ((char *) scm_to_uintptr_t (ip), &image))
715 {
716 signed char *data = (signed char *) image.start;
717 size_t len = image.end - image.start;
718
719 return scm_c_take_gc_bytevector (data, len, SCM_BOOL_F);
720 }
721
722 return SCM_BOOL_F;
723 }
724
725 static SCM
726 scm_all_mapped_elf_images (void)
727 {
728 SCM result = SCM_EOL;
729
730 scm_i_pthread_mutex_lock (&scm_i_misc_mutex);
731 {
732 size_t n;
733 for (n = 0; n < mapped_elf_images_count; n++)
734 {
735 signed char *data = (signed char *) mapped_elf_images[n].start;
736 size_t len = mapped_elf_images[n].end - mapped_elf_images[n].start;
737 result = scm_cons (scm_c_take_gc_bytevector (data, len, SCM_BOOL_F),
738 result);
739 }
740 }
741 scm_i_pthread_mutex_unlock (&scm_i_misc_mutex);
742
743 return result;
744 }
745
746 struct frame_map_prefix
747 {
748 scm_t_uint32 text_offset;
749 scm_t_uint32 maps_offset;
750 };
751
752 struct frame_map_header
753 {
754 scm_t_uint32 addr;
755 scm_t_uint32 map_offset;
756 };
757
758 verify (sizeof (struct frame_map_prefix) == 8);
759 verify (sizeof (struct frame_map_header) == 8);
760
761 const scm_t_uint8 *
762 scm_find_dead_slot_map_unlocked (const scm_t_uint32 *ip)
763 {
764 struct mapped_elf_image *image;
765 char *base;
766 struct frame_map_prefix *prefix;
767 struct frame_map_header *headers;
768 scm_t_uintptr addr = (scm_t_uintptr) ip;
769 size_t start, end;
770
771 image = find_mapped_elf_image_unlocked ((char *) ip);
772 if (!image || !image->frame_maps)
773 return NULL;
774
775 base = image->frame_maps;
776 prefix = (struct frame_map_prefix *) base;
777 headers = (struct frame_map_header *) (base + sizeof (*prefix));
778
779 if (addr < ((scm_t_uintptr) image->start) + prefix->text_offset)
780 return NULL;
781 addr -= ((scm_t_uintptr) image->start) + prefix->text_offset;
782
783 start = 0;
784 end = (prefix->maps_offset - sizeof (*prefix)) / sizeof (*headers);
785
786 if (end == 0 || addr > headers[end - 1].addr)
787 return NULL;
788
789 while (start < end)
790 {
791 size_t n = start + (end - start) / 2;
792
793 if (addr == headers[n].addr)
794 return (const scm_t_uint8*) (base + headers[n].map_offset);
795 else if (addr < headers[n].addr)
796 end = n;
797 else
798 start = n + 1;
799 }
800
801 return NULL;
802 }
803
804 \f
805 void
806 scm_bootstrap_loader (void)
807 {
808 scm_c_register_extension ("libguile-" SCM_EFFECTIVE_VERSION,
809 "scm_init_loader",
810 (scm_t_extension_init_func)scm_init_loader, NULL);
811 }
812
813 void
814 scm_init_loader (void)
815 {
816 #ifndef SCM_MAGIC_SNARFER
817 #include "libguile/loader.x"
818 #endif
819
820 scm_c_define_gsubr ("find-mapped-elf-image", 1, 0, 0,
821 (scm_t_subr) scm_find_mapped_elf_image);
822 scm_c_define_gsubr ("all-mapped-elf-images", 0, 0, 0,
823 (scm_t_subr) scm_all_mapped_elf_images);
824 }
825
826 /*
827 Local Variables:
828 c-file-style: "gnu"
829 End:
830 */