1 /* vi: set sw=4 ts=4: */
3 * This file contains the helper routines to load an ELF shared
4 * library into memory and add the symbol table info to the chain.
6 * Copyright (C) 2000-2006 by Erik Andersen <andersen@codepoet.org>
7 * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald,
8 * David Engel, Hongjiu Lu and Mitch D'Souza
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. The name of the above contributors may not be
16 * used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #ifdef __LDSO_CACHE_SUPPORT__
37 static caddr_t _dl_cache_addr = NULL;
38 static size_t _dl_cache_size = 0;
40 int _dl_map_cache(void)
48 if (_dl_cache_addr == (caddr_t) - 1)
50 else if (_dl_cache_addr != NULL)
53 if (_dl_stat(LDSO_CACHE, &st)
54 || (fd = _dl_open(LDSO_CACHE, O_RDONLY, 0)) < 0) {
55 _dl_cache_addr = (caddr_t) - 1; /* so we won't try again */
59 _dl_cache_size = st.st_size;
60 _dl_cache_addr = _dl_mmap(0, _dl_cache_size, PROT_READ, LDSO_CACHE_MMAP_FLAGS, fd, 0);
62 if (_dl_mmap_check_error(_dl_cache_addr)) {
63 _dl_dprintf(2, "%s:%i: can't map '%s'\n",
64 _dl_progname, __LINE__, LDSO_CACHE);
68 header = (header_t *) _dl_cache_addr;
70 if (_dl_cache_size < sizeof(header_t) ||
71 _dl_memcmp(header->magic, LDSO_CACHE_MAGIC, LDSO_CACHE_MAGIC_LEN)
72 || _dl_memcmp(header->version, LDSO_CACHE_VER, LDSO_CACHE_VER_LEN)
74 (sizeof(header_t) + header->nlibs * sizeof(libentry_t))
75 || _dl_cache_addr[_dl_cache_size - 1] != '\0')
77 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname,
82 strtabsize = _dl_cache_size - sizeof(header_t) -
83 header->nlibs * sizeof(libentry_t);
84 libent = (libentry_t *) & header[1];
86 for (i = 0; i < header->nlibs; i++) {
87 if (libent[i].sooffset >= strtabsize ||
88 libent[i].liboffset >= strtabsize)
90 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE);
98 _dl_munmap(_dl_cache_addr, _dl_cache_size);
99 _dl_cache_addr = (caddr_t) - 1;
103 int _dl_unmap_cache(void)
105 if (_dl_cache_addr == NULL || _dl_cache_addr == (caddr_t) - 1)
109 _dl_munmap(_dl_cache_addr, _dl_cache_size);
110 _dl_cache_addr = NULL;
119 _dl_protect_relro (struct elf_resolve *l)
121 ElfW(Addr) base = (ElfW(Addr)) DL_RELOC_ADDR(l->loadaddr, l->relro_addr);
122 ElfW(Addr) start = (base & ~(_dl_pagesize - 1));
123 ElfW(Addr) end = ((base + l->relro_size) & ~(_dl_pagesize - 1));
124 _dl_if_debug_dprint("RELRO protecting %s: start:%x, end:%x\n", l->libname, start, end);
126 _dl_mprotect ((void *) start, end - start, PROT_READ) < 0) {
127 _dl_dprintf(2, "%s: cannot apply additional memory protection after relocation", l->libname);
132 /* This function's behavior must exactly match that
133 * in uClibc/ldso/util/ldd.c */
134 static struct elf_resolve *
135 search_for_named_library(const char *name, int secure, const char *path_list,
136 struct dyn_elf **rpnt)
138 char *path, *path_n, *mylibname;
139 struct elf_resolve *tpnt;
145 /* We need a writable copy of this string, but we don't
146 * need this allocated permanently since we don't want
147 * to leak memory, so use alloca to put path on the stack */
148 done = _dl_strlen(path_list);
149 path = alloca(done + 1);
151 /* another bit of local storage */
152 mylibname = alloca(2050);
154 /* gcc inlines alloca using a single instruction adjusting
155 * the stack pointer and no stack overflow check and thus
156 * no NULL error return. No point leaving in dead code... */
158 if (!path || !mylibname) {
159 _dl_dprintf(2, "Out of memory!\n");
164 _dl_memcpy(path, path_list, done+1);
166 /* Unlike ldd.c, don't bother to eliminate double //s */
168 /* Replace colons with zeros in path_list */
169 /* : at the beginning or end of path maps to CWD */
170 /* :: anywhere maps CWD */
182 _dl_strcpy(mylibname, path_n);
184 _dl_strcpy(mylibname, "."); /* Assume current dir if empty path */
185 _dl_strcat(mylibname, "/");
186 _dl_strcat(mylibname, name);
187 if ((tpnt = _dl_load_elf_shared_library(secure, rpnt, mylibname)) != NULL)
196 /* Used to return error codes back to dlopen et. al. */
197 unsigned long _dl_error_number;
198 unsigned long _dl_internal_error_number;
200 struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt,
201 struct elf_resolve *tpnt, char *full_libname, int __attribute__((unused)) trace_loaded_objects)
204 struct elf_resolve *tpnt1;
207 _dl_internal_error_number = 0;
208 libname = full_libname;
210 /* quick hack to ensure mylibname buffer doesn't overflow. don't
211 allow full_libname or any directory to be longer than 1024. */
212 if (_dl_strlen(full_libname) > 1024)
215 /* Skip over any initial initial './' and '/' stuff to
216 * get the short form libname with no path garbage */
217 pnt = _dl_strrchr(libname, '/');
222 _dl_if_debug_dprint("\tfind library='%s'; searching\n", libname);
223 /* If the filename has any '/', try it straight and leave it at that.
224 For IBCS2 compatibility under linux, we substitute the string
225 /usr/i486-sysv4/lib for /usr/lib in library names. */
227 if (libname != full_libname) {
228 _dl_if_debug_dprint("\ttrying file='%s'\n", full_libname);
229 tpnt1 = _dl_load_elf_shared_library(secure, rpnt, full_libname);
236 * The ABI specifies that RPATH is searched before LD_LIBRARY_PATH or
237 * the default path of /usr/lib. Check in rpath directories.
239 #ifdef __LDSO_RUNPATH__
240 pnt = (tpnt ? (char *) tpnt->dynamic_info[DT_RPATH] : NULL);
242 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
243 _dl_if_debug_dprint("\tsearching RPATH='%s'\n", pnt);
244 if ((tpnt1 = search_for_named_library(libname, secure, pnt, rpnt)) != NULL)
249 /* Check in LD_{ELF_}LIBRARY_PATH, if specified and allowed */
250 if (_dl_library_path) {
251 _dl_if_debug_dprint("\tsearching LD_LIBRARY_PATH='%s'\n", _dl_library_path);
252 if ((tpnt1 = search_for_named_library(libname, secure, _dl_library_path, rpnt)) != NULL)
259 * The ABI specifies that RUNPATH is searched after LD_LIBRARY_PATH.
261 #ifdef __LDSO_RUNPATH__
262 pnt = (tpnt ? (char *)tpnt->dynamic_info[DT_RUNPATH] : NULL);
264 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
265 _dl_if_debug_dprint("\tsearching RUNPATH='%s'\n", pnt);
266 if ((tpnt1 = search_for_named_library(libname, secure, pnt, rpnt)) != NULL)
272 * Where should the cache be searched? There is no such concept in the
273 * ABI, so we have some flexibility here. For now, search it before
274 * the hard coded paths that follow (i.e before /lib and /usr/lib).
276 #ifdef __LDSO_CACHE_SUPPORT__
277 if (_dl_cache_addr != NULL && _dl_cache_addr != (caddr_t) - 1) {
279 header_t *header = (header_t *) _dl_cache_addr;
280 libentry_t *libent = (libentry_t *) & header[1];
281 char *strs = (char *) &libent[header->nlibs];
283 _dl_if_debug_dprint("\tsearching cache='%s'\n", LDSO_CACHE);
284 for (i = 0; i < header->nlibs; i++) {
285 if ((libent[i].flags == LIB_ELF ||
286 libent[i].flags == LIB_ELF_LIBC0 ||
287 libent[i].flags == LIB_ELF_LIBC5) &&
288 _dl_strcmp(libname, strs + libent[i].sooffset) == 0 &&
289 (tpnt1 = _dl_load_elf_shared_library(secure,
290 rpnt, strs + libent[i].liboffset)))
296 /* Look for libraries wherever the shared library loader
298 _dl_if_debug_dprint("\tsearching ldso dir='%s'\n", _dl_ldsopath);
299 if ((tpnt1 = search_for_named_library(libname, secure, _dl_ldsopath, rpnt)) != NULL)
305 /* Lastly, search the standard list of paths for the library.
306 This list must exactly match the list in uClibc/ldso/util/ldd.c */
307 _dl_if_debug_dprint("\tsearching full lib path list\n");
308 if ((tpnt1 = search_for_named_library(libname, secure,
309 UCLIBC_RUNTIME_PREFIX "lib:"
310 UCLIBC_RUNTIME_PREFIX "usr/lib"
311 #ifndef __LDSO_CACHE_SUPPORT__
312 ":" UCLIBC_RUNTIME_PREFIX "usr/X11R6/lib"
321 /* Well, we shot our wad on that one. All we can do now is punt */
322 if (_dl_internal_error_number)
323 _dl_error_number = _dl_internal_error_number;
325 _dl_error_number = LD_ERROR_NOFILE;
326 _dl_if_debug_dprint("Bummer: could not find '%s'!\n", libname);
332 * Read one ELF library into memory, mmap it into the correct locations and
333 * add the symbol info to the symbol chain. Perform any relocations that
337 struct elf_resolve *_dl_load_elf_shared_library(int secure,
338 struct dyn_elf **rpnt, char *libname)
341 unsigned long dynamic_addr = 0;
343 struct elf_resolve *tpnt;
345 char *status, *header;
346 unsigned long dynamic_info[DYNAMIC_SIZE];
348 unsigned long libaddr;
349 unsigned long minvma = 0xffffffff, maxvma = 0;
350 int i, flags, piclib, infile;
351 ElfW(Addr) relro_addr = 0;
352 size_t relro_size = 0;
354 DL_LOADADDR_TYPE lib_loadaddr;
355 DL_INIT_LOADADDR_EXTRA_DECLS
358 infile = _dl_open(libname, O_RDONLY, 0);
360 _dl_internal_error_number = LD_ERROR_NOFILE;
364 if (_dl_fstat(infile, &st) < 0) {
365 _dl_internal_error_number = LD_ERROR_NOFILE;
369 /* If we are in secure mode (i.e. a setu/gid binary using LD_PRELOAD),
370 we don't load the library if it isn't setuid. */
372 if (!(st.st_mode & S_ISUID)) {
377 /* Check if file is already loaded */
378 for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) {
379 if (tpnt->st_dev == st.st_dev && tpnt->st_ino == st.st_ino) {
386 header = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
387 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
388 if (_dl_mmap_check_error(header)) {
389 _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
390 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
395 _dl_read(infile, header, _dl_pagesize);
396 epnt = (ElfW(Ehdr) *) (intptr_t) header;
397 if (epnt->e_ident[0] != 0x7f ||
398 epnt->e_ident[1] != 'E' ||
399 epnt->e_ident[2] != 'L' ||
400 epnt->e_ident[3] != 'F')
402 _dl_dprintf(2, "%s: '%s' is not an ELF file\n", _dl_progname,
404 _dl_internal_error_number = LD_ERROR_NOTELF;
406 _dl_munmap(header, _dl_pagesize);
410 if ((epnt->e_type != ET_DYN) || (epnt->e_machine != MAGIC1
412 && epnt->e_machine != MAGIC2
416 _dl_internal_error_number =
417 (epnt->e_type != ET_DYN ? LD_ERROR_NOTDYN : LD_ERROR_NOTMAGIC);
418 _dl_dprintf(2, "%s: '%s' is not an ELF executable for " ELF_TARGET
419 "\n", _dl_progname, libname);
421 _dl_munmap(header, _dl_pagesize);
425 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
428 for (i = 0; i < epnt->e_phnum; i++) {
430 if (ppnt->p_type == PT_DYNAMIC) {
432 _dl_dprintf(2, "%s: '%s' has more than one dynamic section\n",
433 _dl_progname, libname);
434 dynamic_addr = ppnt->p_vaddr;
437 if (ppnt->p_type == PT_LOAD) {
438 /* See if this is a PIC library. */
439 if (i == 0 && ppnt->p_vaddr > 0x1000000) {
441 minvma = ppnt->p_vaddr;
443 if (piclib && ppnt->p_vaddr < minvma) {
444 minvma = ppnt->p_vaddr;
446 if (((unsigned long) ppnt->p_vaddr + ppnt->p_memsz) > maxvma) {
447 maxvma = ppnt->p_vaddr + ppnt->p_memsz;
453 DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname);
455 maxvma = (maxvma + ADDR_ALIGN) & ~ADDR_ALIGN;
456 minvma = minvma & ~0xffffU;
458 flags = MAP_PRIVATE /*| MAP_DENYWRITE */ ;
462 if (piclib == 0 || piclib == 1) {
463 status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
464 maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
465 if (_dl_mmap_check_error(status)) {
466 _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
467 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
469 _dl_munmap(header, _dl_pagesize);
472 libaddr = (unsigned long) status;
476 /* Get the memory to store the library */
477 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
479 DL_INIT_LOADADDR(lib_loadaddr, libaddr, ppnt, epnt->e_phnum);
481 for (i = 0; i < epnt->e_phnum; i++) {
482 if (DL_IS_SPECIAL_SEGMENT (epnt, ppnt)) {
485 addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
489 DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
493 if (ppnt->p_type == PT_GNU_RELRO) {
494 relro_addr = ppnt->p_vaddr;
495 relro_size = ppnt->p_memsz;
497 if (ppnt->p_type == PT_LOAD) {
501 /* See if this is a PIC library. */
502 if (i == 0 && ppnt->p_vaddr > 0x1000000) {
504 /* flags |= MAP_FIXED; */
507 if (ppnt->p_flags & PF_W) {
508 unsigned long map_size;
510 char *piclib2map = 0;
513 /* We might be able to avoid this
514 call if memsz doesn't require
515 an additional page, but this
516 would require mmap to always
517 return page-aligned addresses
518 and a whole number of pages
519 allocated. Unfortunately on
520 uClinux may return misaligned
521 addresses and may allocate
522 partial pages, so we may end up
523 doing unnecessary mmap calls.
525 This is what we could do if we
526 knew mmap would always return
529 ((ppnt->p_vaddr + ppnt->p_filesz
532 < ppnt->p_vaddr + ppnt->p_memsz)
534 Instead, we have to do this: */
535 ppnt->p_filesz < ppnt->p_memsz)
537 piclib2map = (char *)
538 _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN)
540 LXFLAGS(ppnt->p_flags),
541 flags | MAP_ANONYMOUS, -1, 0);
542 if (_dl_mmap_check_error(piclib2map))
545 (lib_loadaddr, piclib2map
546 + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
549 tryaddr = piclib == 2 ? piclib2map
550 : ((char*) (piclib ? libaddr : 0) +
551 (ppnt->p_vaddr & PAGE_ALIGN));
553 size = (ppnt->p_vaddr & ADDR_ALIGN)
556 /* For !MMU, mmap to fixed address will fail.
557 So instead of desperately call mmap and fail,
558 we set status to MAP_FAILED to save a call
560 #ifndef __ARCH_USE_MMU__
563 status = (char *) _dl_mmap
564 (tryaddr, size, LXFLAGS(ppnt->p_flags),
565 flags | (piclib2map ? MAP_FIXED : 0),
566 infile, ppnt->p_offset & OFFS_ALIGN);
567 #ifndef __ARCH_USE_MMU__
572 if (_dl_mmap_check_error(status) && piclib2map
573 && (_DL_PREAD (infile, tryaddr, size,
574 ppnt->p_offset & OFFS_ALIGN)
578 if (_dl_mmap_check_error(status)
579 || (tryaddr && tryaddr != status)) {
581 _dl_dprintf(2, "%s:%i: can't map '%s'\n",
582 _dl_progname, __LINE__, libname);
583 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
584 DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
586 _dl_munmap(header, _dl_pagesize);
592 (lib_loadaddr, status
593 + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
595 /* Now we want to allocate and
596 zero-out any data from the end of
597 the region we mapped in from the
598 file (filesz) to the end of the
599 loadable segment (memsz). We may
600 need additional pages for memsz,
601 that we map in below, and we can
602 count on the kernel to zero them
603 out, but we have to zero out stuff
604 in the last page that we mapped in
605 from the file. However, we can't
606 assume to have actually obtained
607 full pages from the kernel, since
608 we didn't ask for them, and uClibc
609 may not give us full pages for
610 small allocations. So only zero
611 out up to memsz or the end of the
612 page, whichever comes first. */
614 /* CPNT is the beginning of the memsz
615 portion not backed by filesz. */
616 cpnt = (char *) (status + size);
618 /* MAP_SIZE is the address of the
619 beginning of the next page. */
620 map_size = (ppnt->p_vaddr + ppnt->p_filesz
621 + ADDR_ALIGN) & PAGE_ALIGN;
624 # define MIN(a,b) ((a) < (b) ? (a) : (b))
633 if (map_size < ppnt->p_vaddr + ppnt->p_memsz
635 tryaddr = map_size + (char*)(piclib ? libaddr : 0);
636 status = (char *) _dl_mmap(tryaddr,
637 ppnt->p_vaddr + ppnt->p_memsz - map_size,
638 LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
639 if (_dl_mmap_check_error(status)
640 || tryaddr != status)
644 tryaddr = (piclib == 2 ? 0
645 : (char *) (ppnt->p_vaddr & PAGE_ALIGN)
646 + (piclib ? libaddr : 0));
647 size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
648 status = (char *) _dl_mmap
649 (tryaddr, size, LXFLAGS(ppnt->p_flags),
650 flags | (piclib == 2 ? MAP_EXECUTABLE
651 | MAP_DENYWRITE : 0),
652 infile, ppnt->p_offset & OFFS_ALIGN);
653 if (_dl_mmap_check_error(status)
654 || (tryaddr && tryaddr != status))
657 (lib_loadaddr, status
658 + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
661 /* if (libaddr == 0 && piclib) {
662 libaddr = (unsigned long) status;
670 /* For a non-PIC library, the addresses are all absolute */
672 dynamic_addr = (unsigned long) DL_RELOC_ADDR(lib_loadaddr, dynamic_addr);
676 * OK, the ELF library is now loaded into VM in the correct locations
677 * The next step is to go through and do the dynamic linking (if needed).
680 /* Start by scanning the dynamic section to get all of the pointers */
683 _dl_internal_error_number = LD_ERROR_NODYNAMIC;
684 _dl_dprintf(2, "%s: '%s' is missing a dynamic section\n",
685 _dl_progname, libname);
686 _dl_munmap(header, _dl_pagesize);
690 dpnt = (ElfW(Dyn) *) dynamic_addr;
691 _dl_memset(dynamic_info, 0, sizeof(dynamic_info));
692 _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr);
693 /* If the TEXTREL is set, this means that we need to make the pages
694 writable before we perform relocations. Do this now. They get set
697 if (dynamic_info[DT_TEXTREL]) {
698 #ifndef __FORCE_SHAREABLE_TEXT_SEGMENTS__
699 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
700 for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
701 if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W))
702 _dl_mprotect((void *) ((piclib ? libaddr : 0) +
703 (ppnt->p_vaddr & PAGE_ALIGN)),
704 (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
705 PROT_READ | PROT_WRITE | PROT_EXEC);
708 _dl_dprintf(_dl_debug_file, "Can't modify %s's text section. Use GCC option -fPIC for shared objects, please.\n",libname);
713 tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info,
715 tpnt->relro_addr = relro_addr;
716 tpnt->relro_size = relro_size;
717 tpnt->st_dev = st.st_dev;
718 tpnt->st_ino = st.st_ino;
719 tpnt->ppnt = (ElfW(Phdr) *) DL_RELOC_ADDR(tpnt->loadaddr, epnt->e_phoff);
720 tpnt->n_phent = epnt->e_phnum;
723 * Add this object into the symbol chain
726 (*rpnt)->next = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf));
727 _dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
728 (*rpnt)->next->prev = (*rpnt);
729 *rpnt = (*rpnt)->next;
732 /* When statically linked, the first time we dlopen a DSO
733 * the *rpnt is NULL, so we need to allocate memory for it,
734 * and initialize the _dl_symbol_table.
737 *rpnt = _dl_symbol_tables = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf));
738 _dl_memset(*rpnt, 0, sizeof(struct dyn_elf));
742 tpnt->symbol_scope = _dl_symbol_tables;
744 tpnt->libtype = elf_lib;
747 * OK, the next thing we need to do is to insert the dynamic linker into
748 * the proper entry in the GOT so that the PLT symbols can be properly
752 lpnt = (unsigned long *) dynamic_info[DT_PLTGOT];
755 lpnt = (unsigned long *) (dynamic_info[DT_PLTGOT]);
756 INIT_GOT(lpnt, tpnt);
759 _dl_if_debug_dprint("\n\tfile='%s'; generating link map\n", libname);
760 _dl_if_debug_dprint("\t\tdynamic: %x base: %x\n", dynamic_addr, DL_LOADADDR_BASE(lib_loadaddr));
761 _dl_if_debug_dprint("\t\t entry: %x phdr: %x phnum: %x\n\n",
762 DL_RELOC_ADDR(lib_loadaddr, epnt->e_entry), tpnt->ppnt, tpnt->n_phent);
764 _dl_munmap(header, _dl_pagesize);
769 /* now_flag must be RTLD_NOW or zero */
770 int _dl_fixup(struct dyn_elf *rpnt, int now_flag)
773 struct elf_resolve *tpnt;
774 ElfW(Word) reloc_size, relative_count;
775 ElfW(Addr) reloc_addr;
778 goof = _dl_fixup(rpnt->next, now_flag);
783 if (!(tpnt->init_flag & RELOCS_DONE)) {
784 _dl_if_debug_dprint("relocation processing: %s\n", tpnt->libname);
786 if (unlikely(tpnt->dynamic_info[UNSUPPORTED_RELOC_TYPE])) {
787 _dl_if_debug_dprint("%s: can't handle %s relocation records\n",
788 _dl_progname, UNSUPPORTED_RELOC_STR);
793 reloc_size = tpnt->dynamic_info[DT_RELOC_TABLE_SIZE];
794 /* On some machines, notably SPARC & PPC, DT_REL* includes DT_JMPREL in its
795 range. Note that according to the ELF spec, this is completely legal! */
796 #ifdef ELF_MACHINE_PLTREL_OVERLAP
797 reloc_size -= tpnt->dynamic_info [DT_PLTRELSZ];
799 if (tpnt->dynamic_info[DT_RELOC_TABLE_ADDR] &&
800 !(tpnt->init_flag & RELOCS_DONE)) {
801 reloc_addr = tpnt->dynamic_info[DT_RELOC_TABLE_ADDR];
802 relative_count = tpnt->dynamic_info[DT_RELCONT_IDX];
803 if (relative_count) { /* Optimize the XX_RELATIVE relocations if possible */
804 reloc_size -= relative_count * sizeof(ELF_RELOC);
805 elf_machine_relative(tpnt->loadaddr, reloc_addr, relative_count);
806 reloc_addr += relative_count * sizeof(ELF_RELOC);
808 goof += _dl_parse_relocation_information(rpnt,
811 tpnt->init_flag |= RELOCS_DONE;
813 if (tpnt->dynamic_info[DT_BIND_NOW])
815 if (tpnt->dynamic_info[DT_JMPREL] &&
816 (!(tpnt->init_flag & JMP_RELOCS_DONE) ||
817 (now_flag && !(tpnt->rtld_flags & now_flag)))) {
818 tpnt->rtld_flags |= now_flag;
819 if (!(tpnt->rtld_flags & RTLD_NOW)) {
820 _dl_parse_lazy_relocation_information(rpnt,
821 tpnt->dynamic_info[DT_JMPREL],
822 tpnt->dynamic_info [DT_PLTRELSZ]);
824 goof += _dl_parse_relocation_information(rpnt,
825 tpnt->dynamic_info[DT_JMPREL],
826 tpnt->dynamic_info[DT_PLTRELSZ]);
828 tpnt->init_flag |= JMP_RELOCS_DONE;
833 /* Minimal printf which handles only %s, %d, and %x */
834 void _dl_dprintf(int fd, const char *fmt, ...)
842 char *start, *ptr, *string;
848 buf = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
849 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
850 if (_dl_mmap_check_error(buf)) {
851 _dl_write(fd, "mmap of a spare page failed!\n", 29);
857 if (_dl_strlen(fmt) >= (_dl_pagesize - 1)) {
858 _dl_write(fd, "overflow\n", 11);
862 _dl_strcpy(buf, fmt);
866 while (*ptr != '%' && *ptr) {
872 _dl_write(fd, start, _dl_strlen(start));
876 string = va_arg(args, char *);
879 _dl_write(fd, "(null)", 6);
881 _dl_write(fd, string, _dl_strlen(string));
889 num = va_arg(args, long int);
891 num = va_arg(args, int);
893 string = _dl_simple_ltoa(tmp, num);
894 _dl_write(fd, string, _dl_strlen(string));
902 num = va_arg(args, long int);
904 num = va_arg(args, int);
906 string = _dl_simple_ltoahex(tmp, num);
907 _dl_write(fd, string, _dl_strlen(string));
911 _dl_write(fd, "(null)", 6);
917 _dl_write(fd, start, _dl_strlen(start));
921 _dl_munmap(buf, _dl_pagesize);
925 char *_dl_strdup(const char *string)
930 len = _dl_strlen(string);
931 retval = _dl_malloc(len + 1);
932 _dl_strcpy(retval, string);
936 void _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[],
937 void *debug_addr, DL_LOADADDR_TYPE load_off)
939 __dl_parse_dynamic_info(dpnt, dynamic_info, debug_addr, load_off);
942 /* we want this in ldso.so and libdl.a but nowhere else */
944 #if defined IS_IN_rtld || (defined IS_IN_libdl && ! defined SHARED)
945 extern __typeof(dl_iterate_phdr) __dl_iterate_phdr;
947 __dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info, size_t size, void *data), void *data)
949 struct elf_resolve *l;
950 struct dl_phdr_info info;
953 for (l = _dl_loaded_modules; l != NULL; l = l->next) {
954 info.dlpi_addr = l->loadaddr;
955 info.dlpi_name = l->libname;
956 info.dlpi_phdr = l->ppnt;
957 info.dlpi_phnum = l->n_phent;
958 ret = callback (&info, sizeof (struct dl_phdr_info), data);
964 strong_alias(__dl_iterate_phdr, dl_iterate_phdr)