1 /* vi: set sw=4 ts=4: */
3 * This file contains the helper routines to load an ELF shared
4 * library into memory and add the symbol table info to the chain.
6 * Copyright (C) 2000-2006 by Erik Andersen <andersen@codepoet.org>
7 * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald,
8 * David Engel, Hongjiu Lu and Mitch D'Souza
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. The name of the above contributors may not be
16 * used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #ifdef __LDSO_CACHE_SUPPORT__
37 static caddr_t _dl_cache_addr = NULL;
38 static size_t _dl_cache_size = 0;
40 int _dl_map_cache(void)
48 if (_dl_cache_addr == MAP_FAILED)
50 else if (_dl_cache_addr != NULL)
53 if (_dl_stat(LDSO_CACHE, &st)
54 || (fd = _dl_open(LDSO_CACHE, O_RDONLY|O_CLOEXEC, 0)) < 0) {
55 _dl_cache_addr = MAP_FAILED; /* so we won't try again */
59 _dl_cache_size = st.st_size;
60 _dl_cache_addr = _dl_mmap(0, _dl_cache_size, PROT_READ, LDSO_CACHE_MMAP_FLAGS, fd, 0);
62 if (_dl_mmap_check_error(_dl_cache_addr)) {
63 _dl_dprintf(2, "%s:%i: can't map '%s'\n",
64 _dl_progname, __LINE__, LDSO_CACHE);
68 header = (header_t *) _dl_cache_addr;
70 if (_dl_cache_size < sizeof(header_t) ||
71 _dl_memcmp(header->magic, LDSO_CACHE_MAGIC, LDSO_CACHE_MAGIC_LEN)
72 || _dl_memcmp(header->version, LDSO_CACHE_VER, LDSO_CACHE_VER_LEN)
74 (sizeof(header_t) + header->nlibs * sizeof(libentry_t))
75 || _dl_cache_addr[_dl_cache_size - 1] != '\0')
77 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname,
82 strtabsize = _dl_cache_size - sizeof(header_t) -
83 header->nlibs * sizeof(libentry_t);
84 libent = (libentry_t *) & header[1];
86 for (i = 0; i < header->nlibs; i++) {
87 if (libent[i].sooffset >= strtabsize ||
88 libent[i].liboffset >= strtabsize)
90 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE);
98 _dl_munmap(_dl_cache_addr, _dl_cache_size);
99 _dl_cache_addr = MAP_FAILED;
103 int _dl_unmap_cache(void)
105 if (_dl_cache_addr == NULL || _dl_cache_addr == MAP_FAILED)
109 _dl_munmap(_dl_cache_addr, _dl_cache_size);
110 _dl_cache_addr = NULL;
119 _dl_protect_relro (struct elf_resolve *l)
121 ElfW(Addr) base = (ElfW(Addr)) DL_RELOC_ADDR(l->loadaddr, l->relro_addr);
122 ElfW(Addr) start = (base & PAGE_ALIGN);
123 ElfW(Addr) end = ((base + l->relro_size) & PAGE_ALIGN);
124 _dl_if_debug_dprint("RELRO protecting %s: start:%x, end:%x\n", l->libname, start, end);
126 _dl_mprotect ((void *) start, end - start, PROT_READ) < 0) {
127 _dl_dprintf(2, "%s: cannot apply additional memory protection after relocation", l->libname);
132 /* This function's behavior must exactly match that
133 * in uClibc/ldso/util/ldd.c */
134 static struct elf_resolve *
135 search_for_named_library(const char *name, unsigned rflags, const char *path_list,
136 struct dyn_elf **rpnt)
138 char *path, *path_n, *mylibname;
139 struct elf_resolve *tpnt;
145 /* We need a writable copy of this string, but we don't
146 * need this allocated permanently since we don't want
147 * to leak memory, so use alloca to put path on the stack */
148 done = _dl_strlen(path_list);
149 path = alloca(done + 1);
151 /* another bit of local storage */
152 mylibname = alloca(2050);
154 _dl_memcpy(path, path_list, done+1);
156 /* Unlike ldd.c, don't bother to eliminate double //s */
158 /* Replace colons with zeros in path_list */
159 /* : at the beginning or end of path maps to CWD */
160 /* :: anywhere maps CWD */
172 _dl_strcpy(mylibname, path_n);
174 _dl_strcpy(mylibname, "."); /* Assume current dir if empty path */
175 _dl_strcat(mylibname, "/");
176 _dl_strcat(mylibname, name);
177 #ifdef __LDSO_SAFE_RUNPATH__
178 if (*mylibname == '/')
180 if ((tpnt = _dl_load_elf_shared_library(rflags, rpnt, mylibname)) != NULL)
189 /* Used to return error codes back to dlopen et. al. */
190 unsigned long _dl_error_number;
191 unsigned long _dl_internal_error_number;
193 struct elf_resolve *_dl_load_shared_library(unsigned rflags, struct dyn_elf **rpnt,
194 struct elf_resolve *tpnt, char *full_libname, int attribute_unused trace_loaded_objects)
197 struct elf_resolve *tpnt1;
200 _dl_internal_error_number = 0;
201 libname = full_libname;
203 /* quick hack to ensure mylibname buffer doesn't overflow. don't
204 allow full_libname or any directory to be longer than 1024. */
205 if (_dl_strlen(full_libname) > 1024)
208 /* Skip over any initial initial './' and '/' stuff to
209 * get the short form libname with no path garbage */
210 pnt = _dl_strrchr(libname, '/');
215 _dl_if_debug_dprint("\tfind library='%s'; searching\n", libname);
216 /* If the filename has any '/', try it straight and leave it at that.
217 For IBCS2 compatibility under linux, we substitute the string
218 /usr/i486-sysv4/lib for /usr/lib in library names. */
220 if (libname != full_libname) {
221 _dl_if_debug_dprint("\ttrying file='%s'\n", full_libname);
222 tpnt1 = _dl_load_elf_shared_library(rflags, rpnt, full_libname);
229 * The ABI specifies that RPATH is searched before LD_LIBRARY_PATH or
230 * the default path of /usr/lib. Check in rpath directories.
232 #ifdef __LDSO_RUNPATH__
233 pnt = (tpnt ? (char *) tpnt->dynamic_info[DT_RPATH] : NULL);
235 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
236 _dl_if_debug_dprint("\tsearching RPATH='%s'\n", pnt);
237 if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt)) != NULL)
242 #ifdef __LDSO_LD_LIBRARY_PATH__
243 /* Check in LD_{ELF_}LIBRARY_PATH, if specified and allowed */
244 if (_dl_library_path) {
245 _dl_if_debug_dprint("\tsearching LD_LIBRARY_PATH='%s'\n", _dl_library_path);
246 if ((tpnt1 = search_for_named_library(libname, rflags, _dl_library_path, rpnt)) != NULL)
253 * The ABI specifies that RUNPATH is searched after LD_LIBRARY_PATH.
255 #ifdef __LDSO_RUNPATH__
256 pnt = (tpnt ? (char *)tpnt->dynamic_info[DT_RUNPATH] : NULL);
258 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
259 _dl_if_debug_dprint("\tsearching RUNPATH='%s'\n", pnt);
260 if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt)) != NULL)
266 * Where should the cache be searched? There is no such concept in the
267 * ABI, so we have some flexibility here. For now, search it before
268 * the hard coded paths that follow (i.e before /lib and /usr/lib).
270 #ifdef __LDSO_CACHE_SUPPORT__
271 if (_dl_cache_addr != NULL && _dl_cache_addr != MAP_FAILED) {
273 header_t *header = (header_t *) _dl_cache_addr;
274 libentry_t *libent = (libentry_t *) & header[1];
275 char *strs = (char *) &libent[header->nlibs];
277 _dl_if_debug_dprint("\tsearching cache='%s'\n", LDSO_CACHE);
278 for (i = 0; i < header->nlibs; i++) {
279 if ((libent[i].flags == LIB_ELF
280 || libent[i].flags == LIB_ELF_LIBC0
281 || libent[i].flags == LIB_ELF_LIBC5)
282 && _dl_strcmp(libname, strs + libent[i].sooffset) == 0
283 && (tpnt1 = _dl_load_elf_shared_library(rflags, rpnt, strs + libent[i].liboffset))
290 #if defined SHARED && defined __LDSO_SEARCH_INTERP_PATH__
291 /* Look for libraries wherever the shared library loader
293 _dl_if_debug_dprint("\tsearching ldso dir='%s'\n", _dl_ldsopath);
294 tpnt1 = search_for_named_library(libname, rflags, _dl_ldsopath, rpnt);
298 /* Lastly, search the standard list of paths for the library.
299 This list must exactly match the list in uClibc/ldso/util/ldd.c */
300 _dl_if_debug_dprint("\tsearching full lib path list\n");
301 tpnt1 = search_for_named_library(libname, rflags,
302 UCLIBC_RUNTIME_PREFIX "lib:"
303 UCLIBC_RUNTIME_PREFIX "usr/lib"
304 #ifndef __LDSO_CACHE_SUPPORT__
305 ":" UCLIBC_RUNTIME_PREFIX "usr/X11R6/lib"
312 /* Well, we shot our wad on that one. All we can do now is punt */
313 if (_dl_internal_error_number)
314 _dl_error_number = _dl_internal_error_number;
316 _dl_error_number = LD_ERROR_NOFILE;
317 _dl_if_debug_dprint("Bummer: could not find '%s'!\n", libname);
321 /* Define the _dl_library_offset for the architectures that need it */
325 * Make a writeable mapping of a segment, regardless of whether PF_W is
329 map_writeable (int infile, ElfW(Phdr) *ppnt, int piclib, int flags,
330 unsigned long libaddr)
332 int prot_flags = ppnt->p_flags | PF_W;
333 char *status, *retval;
336 unsigned long map_size;
338 char *piclib2map = NULL;
341 /* We might be able to avoid this call if memsz doesn't
342 require an additional page, but this would require mmap
343 to always return page-aligned addresses and a whole
344 number of pages allocated. Unfortunately on uClinux
345 may return misaligned addresses and may allocate
346 partial pages, so we may end up doing unnecessary mmap
349 This is what we could do if we knew mmap would always
350 return aligned pages:
352 ((ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) &
353 PAGE_ALIGN) < ppnt->p_vaddr + ppnt->p_memsz)
355 Instead, we have to do this: */
356 ppnt->p_filesz < ppnt->p_memsz)
358 piclib2map = (char *)
359 _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_memsz,
360 LXFLAGS(prot_flags), flags | MAP_ANONYMOUS, -1, 0);
361 if (_dl_mmap_check_error(piclib2map))
365 tryaddr = piclib == 2 ? piclib2map
366 : ((char *) (piclib ? libaddr : DL_GET_LIB_OFFSET()) +
367 (ppnt->p_vaddr & PAGE_ALIGN));
369 size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
371 /* For !MMU, mmap to fixed address will fail.
372 So instead of desperately call mmap and fail,
373 we set status to MAP_FAILED to save a call
375 #ifndef __ARCH_USE_MMU__
378 status = (char *) _dl_mmap
379 (tryaddr, size, LXFLAGS(prot_flags),
380 flags | (piclib2map ? MAP_FIXED : 0),
381 infile, ppnt->p_offset & OFFS_ALIGN);
382 #ifndef __ARCH_USE_MMU__
387 if (_dl_mmap_check_error(status) && piclib2map
388 && (_DL_PREAD (infile, tryaddr, size,
389 ppnt->p_offset & OFFS_ALIGN) == size))
392 if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status))
400 /* Now we want to allocate and zero-out any data from the end
401 of the region we mapped in from the file (filesz) to the
402 end of the loadable segment (memsz). We may need
403 additional pages for memsz, that we map in below, and we
404 can count on the kernel to zero them out, but we have to
405 zero out stuff in the last page that we mapped in from the
406 file. However, we can't assume to have actually obtained
407 full pages from the kernel, since we didn't ask for them,
408 and uClibc may not give us full pages for small
409 allocations. So only zero out up to memsz or the end of
410 the page, whichever comes first. */
412 /* CPNT is the beginning of the memsz portion not backed by
414 cpnt = (char *) (status + size);
416 /* MAP_SIZE is the address of the
417 beginning of the next page. */
418 map_size = (ppnt->p_vaddr + ppnt->p_filesz
419 + ADDR_ALIGN) & PAGE_ALIGN;
428 if (map_size < ppnt->p_vaddr + ppnt->p_memsz && !piclib2map) {
429 tryaddr = map_size + (char*)(piclib ? libaddr : 0);
430 status = (char *) _dl_mmap(tryaddr,
431 ppnt->p_vaddr + ppnt->p_memsz - map_size,
433 flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
434 if (_dl_mmap_check_error(status) || tryaddr != status)
441 * Read one ELF library into memory, mmap it into the correct locations and
442 * add the symbol info to the symbol chain. Perform any relocations that
446 struct elf_resolve *_dl_load_elf_shared_library(unsigned rflags,
447 struct dyn_elf **rpnt, const char *libname)
450 unsigned long dynamic_addr = 0;
452 struct elf_resolve *tpnt;
454 #if defined(USE_TLS) && USE_TLS
455 ElfW(Phdr) *tlsppnt = NULL;
457 char *status, *header;
458 unsigned long dynamic_info[DYNAMIC_SIZE];
460 unsigned long libaddr;
461 unsigned long minvma = 0xffffffff, maxvma = 0;
462 unsigned int rtld_flags;
463 int i, flags, piclib, infile;
464 ElfW(Addr) relro_addr = 0;
465 size_t relro_size = 0;
468 DL_LOADADDR_TYPE lib_loadaddr;
469 DL_INIT_LOADADDR_EXTRA_DECLS
472 infile = _dl_open(libname, O_RDONLY, 0);
474 _dl_internal_error_number = LD_ERROR_NOFILE;
478 if (_dl_fstat(infile, &st) < 0) {
479 _dl_internal_error_number = LD_ERROR_NOFILE;
483 /* If we are in secure mode (i.e. a setuid/gid binary using LD_PRELOAD),
484 we don't load the library if it isn't setuid. */
485 if (rflags & DL_RESOLVE_SECURE) {
486 if (!(st.st_mode & S_ISUID)) {
492 /* Check if file is already loaded */
493 for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) {
494 if (tpnt->st_dev == st.st_dev && tpnt->st_ino == st.st_ino) {
501 if (rflags & DL_RESOLVE_NOLOAD) {
505 header = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
506 MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0);
507 if (_dl_mmap_check_error(header)) {
508 _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
509 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
514 _dl_read(infile, header, _dl_pagesize);
515 epnt = (ElfW(Ehdr) *) (intptr_t) header;
516 p32 = (uint32_t*)&epnt->e_ident;
517 if (*p32 != ELFMAG_U32) {
518 _dl_dprintf(2, "%s: '%s' is not an ELF file\n", _dl_progname,
520 _dl_internal_error_number = LD_ERROR_NOTELF;
522 _dl_munmap(header, _dl_pagesize);
526 if ((epnt->e_type != ET_DYN
527 #ifdef __LDSO_STANDALONE_SUPPORT__
528 && epnt->e_type != ET_EXEC
530 ) || (epnt->e_machine != MAGIC1
532 && epnt->e_machine != MAGIC2
536 _dl_internal_error_number =
537 (epnt->e_type != ET_DYN ? LD_ERROR_NOTDYN : LD_ERROR_NOTMAGIC);
538 _dl_dprintf(2, "%s: '%s' is not an ELF executable for " ELF_TARGET
539 "\n", _dl_progname, libname);
541 _dl_munmap(header, _dl_pagesize);
545 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
548 for (i = 0; i < epnt->e_phnum; i++) {
550 if (ppnt->p_type == PT_DYNAMIC) {
552 _dl_dprintf(2, "%s: '%s' has more than one dynamic section\n",
553 _dl_progname, libname);
554 dynamic_addr = ppnt->p_vaddr;
557 if (ppnt->p_type == PT_LOAD) {
558 /* See if this is a PIC library. */
559 if (minvma == 0xffffffff && ppnt->p_vaddr > 0x1000000) {
561 minvma = ppnt->p_vaddr;
563 if (piclib && ppnt->p_vaddr < minvma) {
564 minvma = ppnt->p_vaddr;
566 if (((unsigned long) ppnt->p_vaddr + ppnt->p_memsz) > maxvma) {
567 maxvma = ppnt->p_vaddr + ppnt->p_memsz;
570 if (ppnt->p_type == PT_TLS) {
571 #if defined(USE_TLS) && USE_TLS
572 if (ppnt->p_memsz == 0)
573 /* Nothing to do for an empty segment. */
576 /* Save for after 'tpnt' is actually allocated. */
580 * Yup, the user was an idiot and tried to sneak in a library with
581 * TLS in it and we don't support it. Let's fall on our own sword
582 * and scream at the luser while we die.
584 _dl_dprintf(2, "%s: '%s' library contains unsupported TLS\n",
585 _dl_progname, libname);
586 _dl_internal_error_number = LD_ERROR_TLS_FAILED;
588 _dl_munmap(header, _dl_pagesize);
595 #ifdef __LDSO_STANDALONE_SUPPORT__
596 if (epnt->e_type == ET_EXEC)
600 DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname);
602 maxvma = (maxvma + ADDR_ALIGN) & PAGE_ALIGN;
603 minvma = minvma & ~ADDR_ALIGN;
605 flags = MAP_PRIVATE /*| MAP_DENYWRITE */ ;
607 if (piclib == 0 || piclib == 1) {
608 status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
609 maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
610 if (_dl_mmap_check_error(status)) {
612 _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
613 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
615 _dl_munmap(header, _dl_pagesize);
618 libaddr = (unsigned long) status;
622 /* Get the memory to store the library */
623 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
625 DL_INIT_LOADADDR(lib_loadaddr, libaddr - minvma, ppnt, epnt->e_phnum);
626 /* Set _dl_library_offset to lib_loadaddr or 0. */
627 DL_SET_LIB_OFFSET(lib_loadaddr);
629 for (i = 0; i < epnt->e_phnum; i++) {
630 if (DL_IS_SPECIAL_SEGMENT (epnt, ppnt)) {
633 addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
636 DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
640 DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
644 if (ppnt->p_type == PT_GNU_RELRO) {
645 relro_addr = ppnt->p_vaddr;
646 relro_size = ppnt->p_memsz;
648 if (ppnt->p_type == PT_LOAD) {
652 if (ppnt->p_flags & PF_W) {
653 status = map_writeable (infile, ppnt, piclib, flags, libaddr);
657 tryaddr = (piclib == 2 ? 0
658 : (char *) (ppnt->p_vaddr & PAGE_ALIGN)
659 + (piclib ? libaddr : DL_GET_LIB_OFFSET()));
660 size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
661 status = (char *) _dl_mmap
662 (tryaddr, size, LXFLAGS(ppnt->p_flags),
663 flags | (piclib == 2 ? MAP_EXECUTABLE
664 | MAP_DENYWRITE : 0),
665 infile, ppnt->p_offset & OFFS_ALIGN);
666 if (_dl_mmap_check_error(status)
667 || (tryaddr && tryaddr != status))
670 DL_INIT_LOADADDR_HDR(lib_loadaddr,
671 status + (ppnt->p_vaddr & ADDR_ALIGN),
674 /* if (libaddr == 0 && piclib) {
675 libaddr = (unsigned long) status;
683 * The dynamic_addr must be take into acount lib_loadaddr value, to note
684 * it is zero when the SO has been mapped to the elf's physical addr
686 #ifdef __LDSO_PRELINK_SUPPORT__
687 if (DL_GET_LIB_OFFSET()) {
691 dynamic_addr = (unsigned long) DL_RELOC_ADDR(lib_loadaddr, dynamic_addr);
695 * OK, the ELF library is now loaded into VM in the correct locations
696 * The next step is to go through and do the dynamic linking (if needed).
699 /* Start by scanning the dynamic section to get all of the pointers */
702 _dl_internal_error_number = LD_ERROR_NODYNAMIC;
703 _dl_dprintf(2, "%s: '%s' is missing a dynamic section\n",
704 _dl_progname, libname);
705 _dl_munmap(header, _dl_pagesize);
710 dpnt = (ElfW(Dyn) *) dynamic_addr;
711 _dl_memset(dynamic_info, 0, sizeof(dynamic_info));
712 rtld_flags = _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr);
713 /* If the TEXTREL is set, this means that we need to make the pages
714 writable before we perform relocations. Do this now. They get set
717 if (dynamic_info[DT_TEXTREL]) {
718 #ifndef __FORCE_SHAREABLE_TEXT_SEGMENTS__
719 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
720 for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
721 if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) {
722 #ifdef __ARCH_USE_MMU__
723 _dl_mprotect((void *) ((piclib ? libaddr : DL_GET_LIB_OFFSET()) +
724 (ppnt->p_vaddr & PAGE_ALIGN)),
725 (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
726 PROT_READ | PROT_WRITE | PROT_EXEC);
729 new_addr = map_writeable (infile, ppnt, piclib, flags, libaddr);
731 _dl_dprintf(_dl_debug_file, "Can't modify %s's text section.",
735 DL_UPDATE_LOADADDR_HDR(lib_loadaddr,
736 new_addr + (ppnt->p_vaddr & ADDR_ALIGN),
738 /* This has invalidated all pointers into the previously readonly segment.
739 Update any them to point into the remapped segment. */
740 _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr);
745 _dl_dprintf(2, "Can't modify %s's text section."
746 " Use GCC option -fPIC for shared objects, please.\n",
754 tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info,
756 tpnt->mapaddr = libaddr;
757 tpnt->relro_addr = relro_addr;
758 tpnt->relro_size = relro_size;
759 tpnt->st_dev = st.st_dev;
760 tpnt->st_ino = st.st_ino;
761 tpnt->ppnt = (ElfW(Phdr) *)
762 DL_RELOC_ADDR(DL_GET_RUN_ADDR(tpnt->loadaddr, tpnt->mapaddr),
764 tpnt->n_phent = epnt->e_phnum;
765 tpnt->rtld_flags |= rtld_flags;
766 #ifdef __LDSO_STANDALONE_SUPPORT__
767 tpnt->l_entry = epnt->e_entry;
770 #if defined(USE_TLS) && USE_TLS
772 _dl_debug_early("Found TLS header for %s\n", libname);
773 # if NO_TLS_OFFSET != 0
774 tpnt->l_tls_offset = NO_TLS_OFFSET;
776 tpnt->l_tls_blocksize = tlsppnt->p_memsz;
777 tpnt->l_tls_align = tlsppnt->p_align;
778 if (tlsppnt->p_align == 0)
779 tpnt->l_tls_firstbyte_offset = 0;
781 tpnt->l_tls_firstbyte_offset = tlsppnt->p_vaddr &
782 (tlsppnt->p_align - 1);
783 tpnt->l_tls_initimage_size = tlsppnt->p_filesz;
784 tpnt->l_tls_initimage = (void *) tlsppnt->p_vaddr;
786 /* Assign the next available module ID. */
787 tpnt->l_tls_modid = _dl_next_tls_modid ();
789 /* We know the load address, so add it to the offset. */
790 #ifdef __LDSO_STANDALONE_SUPPORT__
791 if ((tpnt->l_tls_initimage != NULL) && piclib)
793 if (tpnt->l_tls_initimage != NULL)
796 # ifdef __SUPPORT_LD_DEBUG_EARLY__
797 char *tmp = (char *) tpnt->l_tls_initimage;
798 tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
799 _dl_debug_early("Relocated TLS initial image from %x to %x (size = %x)\n", tmp, tpnt->l_tls_initimage, tpnt->l_tls_initimage_size);
802 tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
809 * Add this object into the symbol chain
812 #ifdef __LDSO_STANDALONE_SUPPORT__
813 /* Do not create a new chain entry for the main executable */
817 (*rpnt)->next = _dl_malloc(sizeof(struct dyn_elf));
818 _dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
819 (*rpnt)->next->prev = (*rpnt);
820 *rpnt = (*rpnt)->next;
823 /* When statically linked, the first time we dlopen a DSO
824 * the *rpnt is NULL, so we need to allocate memory for it,
825 * and initialize the _dl_symbol_table.
828 *rpnt = _dl_symbol_tables = _dl_malloc(sizeof(struct dyn_elf));
829 _dl_memset(*rpnt, 0, sizeof(struct dyn_elf));
834 #ifdef __LDSO_STANDALONE_SUPPORT__
835 tpnt->libtype = (epnt->e_type == ET_DYN) ? elf_lib : elf_executable;
837 tpnt->libtype = elf_lib;
841 * OK, the next thing we need to do is to insert the dynamic linker into
842 * the proper entry in the GOT so that the PLT symbols can be properly
846 lpnt = (unsigned long *) dynamic_info[DT_PLTGOT];
849 lpnt = (unsigned long *) (dynamic_info[DT_PLTGOT]);
850 INIT_GOT(lpnt, tpnt);
854 /* Handle DSBT initialization */
856 struct elf_resolve *t, *ref;
857 int idx = tpnt->dsbt_index;
858 void **dsbt = tpnt->dsbt_table;
861 * It is okay (required actually) to have zero idx for an executable.
862 * This is the case when running ldso standalone and the program
863 * is being mapped in via _dl_load_shared_library().
865 if (idx == 0 && tpnt->libtype != elf_executable) {
866 if (!dynamic_info[DT_TEXTREL]) {
867 /* This DSO has not been assigned an index. */
868 _dl_dprintf(2, "%s: '%s' is missing a dsbt index assignment!\n",
869 _dl_progname, libname);
872 /* Find a dsbt table from another module. */
874 for (t = _dl_loaded_modules; t; t = t->next) {
875 if (ref == NULL && t != tpnt) {
880 idx = tpnt->dsbt_size;
882 if (!ref || ref->dsbt_table[idx] == NULL)
885 _dl_dprintf(2, "%s: '%s' caused DSBT table overflow!\n",
886 _dl_progname, libname);
889 _dl_if_debug_dprint("\n\tfile='%s'; assigned index %d\n",
891 tpnt->dsbt_index = idx;
894 /* make sure index is not already used */
895 if (_dl_ldso_dsbt[idx]) {
896 struct elf_resolve *dup;
897 const char *dup_name;
899 for (dup = _dl_loaded_modules; dup; dup = dup->next)
900 if (dup != tpnt && dup->dsbt_index == idx)
903 dup_name = dup->libname;
905 dup_name = "runtime linker";
907 dup_name = "unknown library";
908 _dl_dprintf(2, "%s: '%s' dsbt index %d already used by %s!\n",
909 _dl_progname, libname, idx, dup_name);
914 * Setup dsbt slot for this module in dsbt of all modules.
916 for (t = _dl_loaded_modules; t; t = t->next)
917 t->dsbt_table[idx] = dsbt;
918 _dl_ldso_dsbt[idx] = dsbt;
919 _dl_memcpy(dsbt, _dl_ldso_dsbt,
920 tpnt->dsbt_size * sizeof(tpnt->dsbt_table[0]));
923 _dl_if_debug_dprint("\n\tfile='%s'; generating link map\n", libname);
924 _dl_if_debug_dprint("\t\tdynamic: %x base: %x\n", dynamic_addr, DL_LOADADDR_BASE(lib_loadaddr));
925 _dl_if_debug_dprint("\t\t entry: %x phdr: %x phnum: %x\n\n",
926 DL_RELOC_ADDR(lib_loadaddr, epnt->e_entry), tpnt->ppnt, tpnt->n_phent);
928 _dl_munmap(header, _dl_pagesize);
933 /* now_flag must be RTLD_NOW or zero */
934 int _dl_fixup(struct dyn_elf *rpnt, struct r_scope_elem *scope, int now_flag)
937 struct elf_resolve *tpnt;
938 ElfW(Word) reloc_size, relative_count;
939 ElfW(Addr) reloc_addr;
942 goof = _dl_fixup(rpnt->next, scope, now_flag);
947 if (!(tpnt->init_flag & RELOCS_DONE))
948 _dl_if_debug_dprint("relocation processing: %s\n", tpnt->libname);
950 if (unlikely(tpnt->dynamic_info[UNSUPPORTED_RELOC_TYPE])) {
951 _dl_if_debug_dprint("%s: can't handle %s relocation records\n",
952 _dl_progname, UNSUPPORTED_RELOC_STR);
957 reloc_size = tpnt->dynamic_info[DT_RELOC_TABLE_SIZE];
958 /* On some machines, notably SPARC & PPC, DT_REL* includes DT_JMPREL in its
959 range. Note that according to the ELF spec, this is completely legal! */
960 #ifdef ELF_MACHINE_PLTREL_OVERLAP
961 reloc_size -= tpnt->dynamic_info [DT_PLTRELSZ];
963 if (tpnt->dynamic_info[DT_RELOC_TABLE_ADDR] &&
964 !(tpnt->init_flag & RELOCS_DONE)) {
965 reloc_addr = tpnt->dynamic_info[DT_RELOC_TABLE_ADDR];
966 relative_count = tpnt->dynamic_info[DT_RELCONT_IDX];
967 if (relative_count) { /* Optimize the XX_RELATIVE relocations if possible */
968 reloc_size -= relative_count * sizeof(ELF_RELOC);
969 #ifdef __LDSO_PRELINK_SUPPORT__
970 if (tpnt->loadaddr || (!tpnt->dynamic_info[DT_GNU_PRELINKED_IDX]))
972 elf_machine_relative(tpnt->loadaddr, reloc_addr, relative_count);
973 reloc_addr += relative_count * sizeof(ELF_RELOC);
975 goof += _dl_parse_relocation_information(rpnt, scope,
978 tpnt->init_flag |= RELOCS_DONE;
980 if (tpnt->dynamic_info[DT_BIND_NOW])
982 if (tpnt->dynamic_info[DT_JMPREL] &&
983 (!(tpnt->init_flag & JMP_RELOCS_DONE) ||
984 (now_flag && !(tpnt->rtld_flags & now_flag)))) {
985 tpnt->rtld_flags |= now_flag;
986 if (!(tpnt->rtld_flags & RTLD_NOW)) {
987 _dl_parse_lazy_relocation_information(rpnt,
988 tpnt->dynamic_info[DT_JMPREL],
989 tpnt->dynamic_info [DT_PLTRELSZ]);
991 goof += _dl_parse_relocation_information(rpnt, scope,
992 tpnt->dynamic_info[DT_JMPREL],
993 tpnt->dynamic_info[DT_PLTRELSZ]);
995 tpnt->init_flag |= JMP_RELOCS_DONE;
999 /* _dl_add_to_slotinfo is called by init_tls() for initial DSO
1000 or by dlopen() for dynamically loaded DSO. */
1001 #if defined(USE_TLS) && USE_TLS
1002 /* Add object to slot information data if necessasy. */
1003 if (tpnt->l_tls_blocksize != 0 && tls_init_tp_called)
1004 _dl_add_to_slotinfo ((struct link_map *) tpnt);
1011 /* Minimal printf which handles only %s, %d, and %x */
1012 void _dl_dprintf(int fd, const char *fmt, ...)
1020 char *start, *ptr, *string;
1026 buf = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
1027 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1028 if (_dl_mmap_check_error(buf)) {
1029 _dl_write(fd, "mmap of a spare page failed!\n", 29);
1035 if (_dl_strlen(fmt) >= (_dl_pagesize - 1)) {
1036 _dl_write(fd, "overflow\n", 11);
1040 _dl_strcpy(buf, fmt);
1041 va_start(args, fmt);
1044 while (*ptr != '%' && *ptr) {
1050 _dl_write(fd, start, _dl_strlen(start));
1054 string = va_arg(args, char *);
1057 _dl_write(fd, "(null)", 6);
1059 _dl_write(fd, string, _dl_strlen(string));
1067 num = va_arg(args, long int);
1069 num = va_arg(args, int);
1071 string = _dl_simple_ltoa(tmp, num);
1072 _dl_write(fd, string, _dl_strlen(string));
1080 num = va_arg(args, long int);
1082 num = va_arg(args, int);
1084 string = _dl_simple_ltoahex(tmp, num);
1085 _dl_write(fd, string, _dl_strlen(string));
1089 _dl_write(fd, "(null)", 6);
1095 _dl_write(fd, start, _dl_strlen(start));
1099 _dl_munmap(buf, _dl_pagesize);
1103 char *_dl_strdup(const char *string)
1108 len = _dl_strlen(string);
1109 retval = _dl_malloc(len + 1);
1110 _dl_strcpy(retval, string);
1115 unsigned int _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[],
1116 void *debug_addr, DL_LOADADDR_TYPE load_off)
1118 return __dl_parse_dynamic_info(dpnt, dynamic_info, debug_addr, load_off);