1 /* vi: set sw=4 ts=4: */
3 * This file contains the helper routines to load an ELF shared
4 * library into memory and add the symbol table info to the chain.
6 * Copyright (C) 2000-2006 by Erik Andersen <andersen@codepoet.org>
7 * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald,
8 * David Engel, Hongjiu Lu and Mitch D'Souza
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. The name of the above contributors may not be
16 * used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #ifdef __LDSO_CACHE_SUPPORT__
37 static caddr_t _dl_cache_addr = NULL;
38 static size_t _dl_cache_size = 0;
40 int _dl_map_cache(void)
48 if (_dl_cache_addr == MAP_FAILED)
50 else if (_dl_cache_addr != NULL)
53 if (_dl_stat(LDSO_CACHE, &st)
54 || (fd = _dl_open(LDSO_CACHE, O_RDONLY|O_CLOEXEC, 0)) < 0) {
55 _dl_cache_addr = MAP_FAILED; /* so we won't try again */
59 _dl_cache_size = st.st_size;
60 _dl_cache_addr = _dl_mmap(0, _dl_cache_size, PROT_READ, LDSO_CACHE_MMAP_FLAGS, fd, 0);
62 if (_dl_mmap_check_error(_dl_cache_addr)) {
63 _dl_dprintf(2, "%s:%i: can't map '%s'\n",
64 _dl_progname, __LINE__, LDSO_CACHE);
68 header = (header_t *) _dl_cache_addr;
70 if (_dl_cache_size < sizeof(header_t) ||
71 _dl_memcmp(header->magic, LDSO_CACHE_MAGIC, LDSO_CACHE_MAGIC_LEN)
72 || _dl_memcmp(header->version, LDSO_CACHE_VER, LDSO_CACHE_VER_LEN)
74 (sizeof(header_t) + header->nlibs * sizeof(libentry_t))
75 || _dl_cache_addr[_dl_cache_size - 1] != '\0')
77 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname,
82 strtabsize = _dl_cache_size - sizeof(header_t) -
83 header->nlibs * sizeof(libentry_t);
84 libent = (libentry_t *) & header[1];
86 for (i = 0; i < header->nlibs; i++) {
87 if (libent[i].sooffset >= strtabsize ||
88 libent[i].liboffset >= strtabsize)
90 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE);
98 _dl_munmap(_dl_cache_addr, _dl_cache_size);
99 _dl_cache_addr = MAP_FAILED;
103 int _dl_unmap_cache(void)
105 if (_dl_cache_addr == NULL || _dl_cache_addr == MAP_FAILED)
109 _dl_munmap(_dl_cache_addr, _dl_cache_size);
110 _dl_cache_addr = NULL;
119 _dl_protect_relro (struct elf_resolve *l)
121 ElfW(Addr) base = (ElfW(Addr)) DL_RELOC_ADDR(l->loadaddr, l->relro_addr);
122 ElfW(Addr) start = (base & PAGE_ALIGN);
123 ElfW(Addr) end = ((base + l->relro_size) & PAGE_ALIGN);
124 _dl_if_debug_dprint("RELRO protecting %s: start:%x, end:%x\n", l->libname, start, end);
126 _dl_mprotect ((void *) start, end - start, PROT_READ) < 0) {
127 _dl_dprintf(2, "%s: cannot apply additional memory protection after relocation", l->libname);
132 /* This function's behavior must exactly match that
133 * in uClibc/ldso/util/ldd.c */
134 static struct elf_resolve *
135 search_for_named_library(const char *name, unsigned rflags, const char *path_list,
136 struct dyn_elf **rpnt, const char* origin)
139 struct elf_resolve *tpnt;
146 /* another bit of local storage */
147 mylibname = alloca(2050);
149 /* Unlike ldd.c, don't bother to eliminate double //s */
151 /* Replace colons with zeros in path_list */
152 /* : at the beginning or end of path maps to CWD */
153 /* :: anywhere maps CWD */
155 for (p = path_list; p != NULL; p = pn) {
156 pn = _dl_strchr(p + 1, ':');
161 plen = _dl_strlen(p);
163 if (plen >= 7 && _dl_memcmp(p, "$ORIGIN", 7) == 0) {
165 if (rflags && plen != 7)
169 for (olen = _dl_strlen(origin) - 1; olen >= 0 && origin[olen] != '/'; olen--)
173 _dl_memcpy(&mylibname[0], origin, olen);
174 _dl_memcpy(&mylibname[olen], p + 7, plen - 7);
175 mylibname[olen + plen - 7] = 0;
176 } else if (plen != 0) {
177 _dl_memcpy(mylibname, p, plen);
180 _dl_strcpy(mylibname, ".");
182 _dl_strcat(mylibname, "/");
183 _dl_strcat(mylibname, name);
184 #ifdef __LDSO_SAFE_RUNPATH__
185 if (*mylibname == '/')
187 if ((tpnt = _dl_load_elf_shared_library(rflags, rpnt, mylibname)) != NULL)
193 /* Used to return error codes back to dlopen et. al. */
194 unsigned long _dl_error_number;
195 unsigned long _dl_internal_error_number;
197 struct elf_resolve *_dl_load_shared_library(unsigned rflags, struct dyn_elf **rpnt,
198 struct elf_resolve *tpnt, char *full_libname, int attribute_unused trace_loaded_objects)
201 struct elf_resolve *tpnt1;
204 _dl_internal_error_number = 0;
205 libname = full_libname;
207 /* quick hack to ensure mylibname buffer doesn't overflow. don't
208 allow full_libname or any directory to be longer than 1024. */
209 if (_dl_strlen(full_libname) > 1024)
212 /* Skip over any initial initial './' and '/' stuff to
213 * get the short form libname with no path garbage */
214 pnt = _dl_strrchr(libname, '/');
219 _dl_if_debug_dprint("\tfind library='%s'; searching\n", libname);
220 /* If the filename has any '/', try it straight and leave it at that.
221 For IBCS2 compatibility under linux, we substitute the string
222 /usr/i486-sysv4/lib for /usr/lib in library names. */
224 if (libname != full_libname) {
225 _dl_if_debug_dprint("\ttrying file='%s'\n", full_libname);
226 tpnt1 = _dl_load_elf_shared_library(rflags, rpnt, full_libname);
233 * The ABI specifies that RPATH is searched before LD_LIBRARY_PATH or
234 * the default path of /usr/lib. Check in rpath directories.
236 #ifdef __LDSO_RUNPATH__
237 pnt = (tpnt ? (char *) tpnt->dynamic_info[DT_RPATH] : NULL);
239 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
240 _dl_if_debug_dprint("\tsearching RPATH='%s'\n", pnt);
241 if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt,
242 tpnt->libname)) != NULL)
248 #ifdef __LDSO_LD_LIBRARY_PATH__
249 /* Check in LD_{ELF_}LIBRARY_PATH, if specified and allowed */
250 if (_dl_library_path) {
251 _dl_if_debug_dprint("\tsearching LD_LIBRARY_PATH='%s'\n", _dl_library_path);
252 if ((tpnt1 = search_for_named_library(libname, rflags, _dl_library_path, rpnt, NULL)) != NULL)
259 * The ABI specifies that RUNPATH is searched after LD_LIBRARY_PATH.
261 #ifdef __LDSO_RUNPATH__
262 pnt = (tpnt ? (char *)tpnt->dynamic_info[DT_RUNPATH] : NULL);
264 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
265 _dl_if_debug_dprint("\tsearching RUNPATH='%s'\n", pnt);
266 if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt, NULL)) != NULL)
269 #ifdef __LDSO_RUNPATH_OF_EXECUTABLE__
271 * Try the DT_RPATH of the executable itself.
273 pnt = (char *) _dl_loaded_modules->dynamic_info[DT_RPATH];
275 pnt += (unsigned long) _dl_loaded_modules->dynamic_info[DT_STRTAB];
276 _dl_if_debug_dprint("\tsearching exe's RPATH='%s'\n", pnt);
277 if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt)) != NULL)
284 * Where should the cache be searched? There is no such concept in the
285 * ABI, so we have some flexibility here. For now, search it before
286 * the hard coded paths that follow (i.e before /lib and /usr/lib).
288 #ifdef __LDSO_CACHE_SUPPORT__
289 if (_dl_cache_addr != NULL && _dl_cache_addr != MAP_FAILED) {
291 header_t *header = (header_t *) _dl_cache_addr;
292 libentry_t *libent = (libentry_t *) & header[1];
293 char *strs = (char *) &libent[header->nlibs];
295 _dl_if_debug_dprint("\tsearching cache='%s'\n", LDSO_CACHE);
296 for (i = 0; i < header->nlibs; i++) {
297 if ((libent[i].flags == LIB_ELF
298 || libent[i].flags == LIB_ELF_LIBC0
299 || libent[i].flags == LIB_ELF_LIBC5)
300 && _dl_strcmp(libname, strs + libent[i].sooffset) == 0
301 && (tpnt1 = _dl_load_elf_shared_library(rflags, rpnt, strs + libent[i].liboffset))
308 #if defined SHARED && defined __LDSO_SEARCH_INTERP_PATH__
309 /* Look for libraries wherever the shared library loader
311 _dl_if_debug_dprint("\tsearching ldso dir='%s'\n", _dl_ldsopath);
312 tpnt1 = search_for_named_library(libname, rflags, _dl_ldsopath, rpnt, NULL);
316 /* Lastly, search the standard list of paths for the library.
317 This list must exactly match the list in uClibc/ldso/util/ldd.c */
318 _dl_if_debug_dprint("\tsearching full lib path list\n");
319 tpnt1 = search_for_named_library(libname, rflags,
320 UCLIBC_RUNTIME_PREFIX "lib:"
321 UCLIBC_RUNTIME_PREFIX "usr/lib"
322 #ifndef __LDSO_CACHE_SUPPORT__
323 ":" UCLIBC_RUNTIME_PREFIX "usr/X11R6/lib"
329 #ifdef __LDSO_RUNPATH_OF_EXECUTABLE__
330 /* Very last resort, try the executable's DT_RUNPATH and DT_RPATH */
331 /* http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#shobj_dependencies
332 * The set of directories specified by a given DT_RUNPATH entry is
333 * used to find only the immediate dependencies of the executable or
334 * shared object containing the DT_RUNPATH entry. That is, it is
335 * used only for those dependencies contained in the DT_NEEDED
336 * entries of the dynamic structure containing the DT_RUNPATH entry,
337 * itself. One object's DT_RUNPATH entry does not affect the search
338 * for any other object's dependencies.
340 * glibc (around 2.19) violates this and the usual suspects are
341 * abusing this bug^Wrelaxed, user-friendly behaviour.
344 pnt = (char *) _dl_loaded_modules->dynamic_info[DT_RUNPATH];
346 pnt += (unsigned long) _dl_loaded_modules->dynamic_info[DT_STRTAB];
347 _dl_if_debug_dprint("\tsearching exe's RUNPATH='%s'\n", pnt);
348 if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt)) != NULL)
351 pnt = (char *) _dl_loaded_modules->dynamic_info[DT_RPATH];
353 pnt += (unsigned long) _dl_loaded_modules->dynamic_info[DT_STRTAB];
354 _dl_if_debug_dprint("\tsearching exe's RPATH='%s'\n", pnt);
355 if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt)) != NULL)
362 /* Well, we shot our wad on that one. All we can do now is punt */
363 if (_dl_internal_error_number)
364 _dl_error_number = _dl_internal_error_number;
366 _dl_error_number = LD_ERROR_NOFILE;
367 _dl_if_debug_dprint("Bummer: could not find '%s'!\n", libname);
371 /* Define the _dl_library_offset for the architectures that need it */
375 * Make a writeable mapping of a segment, regardless of whether PF_W is
379 map_writeable (int infile, ElfW(Phdr) *ppnt, int piclib, int flags,
380 unsigned long libaddr)
382 int prot_flags = ppnt->p_flags | PF_W;
383 char *status, *retval;
386 unsigned long map_size;
388 char *piclib2map = NULL;
391 /* We might be able to avoid this call if memsz doesn't
392 require an additional page, but this would require mmap
393 to always return page-aligned addresses and a whole
394 number of pages allocated. Unfortunately on uClinux
395 may return misaligned addresses and may allocate
396 partial pages, so we may end up doing unnecessary mmap
399 This is what we could do if we knew mmap would always
400 return aligned pages:
402 ((ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) &
403 PAGE_ALIGN) < ppnt->p_vaddr + ppnt->p_memsz)
405 Instead, we have to do this: */
406 ppnt->p_filesz < ppnt->p_memsz)
408 piclib2map = (char *)
409 _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_memsz,
410 LXFLAGS(prot_flags), flags | MAP_ANONYMOUS, -1, 0);
411 if (_dl_mmap_check_error(piclib2map))
415 tryaddr = piclib == 2 ? piclib2map
416 : ((char *) (piclib ? libaddr : DL_GET_LIB_OFFSET()) +
417 (ppnt->p_vaddr & PAGE_ALIGN));
419 size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
421 /* For !MMU, mmap to fixed address will fail.
422 So instead of desperately call mmap and fail,
423 we set status to MAP_FAILED to save a call
425 #ifndef __ARCH_USE_MMU__
428 status = (char *) _dl_mmap
429 (tryaddr, size, LXFLAGS(prot_flags),
430 flags | (piclib2map ? MAP_FIXED : 0),
431 infile, ppnt->p_offset & OFFS_ALIGN);
432 #ifndef __ARCH_USE_MMU__
437 if (_dl_mmap_check_error(status) && piclib2map
438 && (_DL_PREAD (infile, tryaddr, size,
439 ppnt->p_offset & OFFS_ALIGN) == size))
442 if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status))
450 /* Now we want to allocate and zero-out any data from the end
451 of the region we mapped in from the file (filesz) to the
452 end of the loadable segment (memsz). We may need
453 additional pages for memsz, that we map in below, and we
454 can count on the kernel to zero them out, but we have to
455 zero out stuff in the last page that we mapped in from the
456 file. However, we can't assume to have actually obtained
457 full pages from the kernel, since we didn't ask for them,
458 and uClibc may not give us full pages for small
459 allocations. So only zero out up to memsz or the end of
460 the page, whichever comes first. */
462 /* CPNT is the beginning of the memsz portion not backed by
464 cpnt = (char *) (status + size);
466 /* MAP_SIZE is the address of the
467 beginning of the next page. */
468 map_size = (ppnt->p_vaddr + ppnt->p_filesz
469 + ADDR_ALIGN) & PAGE_ALIGN;
478 if (map_size < ppnt->p_vaddr + ppnt->p_memsz && !piclib2map) {
479 tryaddr = map_size + (char*)(piclib ? libaddr : 0);
480 status = (char *) _dl_mmap(tryaddr,
481 ppnt->p_vaddr + ppnt->p_memsz - map_size,
483 flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
484 if (_dl_mmap_check_error(status) || tryaddr != status)
491 * Read one ELF library into memory, mmap it into the correct locations and
492 * add the symbol info to the symbol chain. Perform any relocations that
496 struct elf_resolve *_dl_load_elf_shared_library(unsigned rflags,
497 struct dyn_elf **rpnt, const char *libname)
500 unsigned long dynamic_addr = 0;
502 struct elf_resolve *tpnt;
504 #if defined(USE_TLS) && USE_TLS
505 ElfW(Phdr) *tlsppnt = NULL;
507 char *status, *header;
508 unsigned long dynamic_info[DYNAMIC_SIZE];
510 unsigned long libaddr;
511 unsigned long minvma = 0xffffffff, maxvma = 0;
512 unsigned int rtld_flags;
513 int i, flags, piclib, infile;
514 ElfW(Addr) relro_addr = 0;
515 size_t relro_size = 0;
518 DL_LOADADDR_TYPE lib_loadaddr;
519 DL_INIT_LOADADDR_EXTRA_DECLS
522 infile = _dl_open(libname, O_RDONLY, 0);
524 _dl_internal_error_number = LD_ERROR_NOFILE;
528 if (_dl_fstat(infile, &st) < 0) {
529 _dl_internal_error_number = LD_ERROR_NOFILE;
533 /* If we are in secure mode (i.e. a setuid/gid binary using LD_PRELOAD),
534 we don't load the library if it isn't setuid. */
535 if (rflags & DL_RESOLVE_SECURE) {
536 if (!(st.st_mode & S_ISUID)) {
542 /* Check if file is already loaded */
543 for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) {
544 if (tpnt->st_dev == st.st_dev && tpnt->st_ino == st.st_ino) {
551 if (rflags & DL_RESOLVE_NOLOAD) {
555 header = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
556 MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0);
557 if (_dl_mmap_check_error(header)) {
558 _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
559 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
564 _dl_read(infile, header, _dl_pagesize);
565 epnt = (ElfW(Ehdr) *) (intptr_t) header;
566 p32 = (uint32_t*)&epnt->e_ident;
567 if (*p32 != ELFMAG_U32) {
568 _dl_dprintf(2, "%s: '%s' is not an ELF file\n", _dl_progname,
570 _dl_internal_error_number = LD_ERROR_NOTELF;
572 _dl_munmap(header, _dl_pagesize);
576 if ((epnt->e_type != ET_DYN
577 #ifdef __LDSO_STANDALONE_SUPPORT__
578 && epnt->e_type != ET_EXEC
580 ) || (epnt->e_machine != MAGIC1
582 && epnt->e_machine != MAGIC2
586 _dl_internal_error_number =
587 (epnt->e_type != ET_DYN ? LD_ERROR_NOTDYN : LD_ERROR_NOTMAGIC);
588 _dl_dprintf(2, "%s: '%s' is not an ELF executable for " ELF_TARGET
589 "\n", _dl_progname, libname);
591 _dl_munmap(header, _dl_pagesize);
595 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
598 for (i = 0; i < epnt->e_phnum; i++) {
600 if (ppnt->p_type == PT_DYNAMIC) {
602 _dl_dprintf(2, "%s: '%s' has more than one dynamic section\n",
603 _dl_progname, libname);
604 dynamic_addr = ppnt->p_vaddr;
607 if (ppnt->p_type == PT_LOAD) {
608 /* See if this is a PIC library. */
609 if (minvma == 0xffffffff && ppnt->p_vaddr > 0x1000000) {
611 minvma = ppnt->p_vaddr;
613 if (piclib && ppnt->p_vaddr < minvma) {
614 minvma = ppnt->p_vaddr;
616 if (((unsigned long) ppnt->p_vaddr + ppnt->p_memsz) > maxvma) {
617 maxvma = ppnt->p_vaddr + ppnt->p_memsz;
620 if (ppnt->p_type == PT_TLS) {
621 #if defined(USE_TLS) && USE_TLS
622 if (ppnt->p_memsz == 0)
623 /* Nothing to do for an empty segment. */
626 /* Save for after 'tpnt' is actually allocated. */
630 * Yup, the user was an idiot and tried to sneak in a library with
631 * TLS in it and we don't support it. Let's fall on our own sword
632 * and scream at the luser while we die.
634 _dl_dprintf(2, "%s: '%s' library contains unsupported TLS\n",
635 _dl_progname, libname);
636 _dl_internal_error_number = LD_ERROR_TLS_FAILED;
638 _dl_munmap(header, _dl_pagesize);
645 #ifdef __LDSO_STANDALONE_SUPPORT__
646 if (epnt->e_type == ET_EXEC)
650 DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname);
652 maxvma = (maxvma + ADDR_ALIGN) & PAGE_ALIGN;
653 minvma = minvma & ~ADDR_ALIGN;
655 flags = MAP_PRIVATE /*| MAP_DENYWRITE */ ;
657 if (piclib == 0 || piclib == 1) {
658 status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
659 maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
660 if (_dl_mmap_check_error(status)) {
662 _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
663 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
665 _dl_munmap(header, _dl_pagesize);
668 libaddr = (unsigned long) status;
672 /* Get the memory to store the library */
673 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
675 DL_INIT_LOADADDR(lib_loadaddr, libaddr - minvma, ppnt, epnt->e_phnum);
676 /* Set _dl_library_offset to lib_loadaddr or 0. */
677 DL_SET_LIB_OFFSET(lib_loadaddr);
679 for (i = 0; i < epnt->e_phnum; i++) {
680 if (DL_IS_SPECIAL_SEGMENT (epnt, ppnt)) {
683 addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
686 DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
690 DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
694 if (ppnt->p_type == PT_GNU_RELRO) {
695 relro_addr = ppnt->p_vaddr;
696 relro_size = ppnt->p_memsz;
698 if (ppnt->p_type == PT_LOAD) {
702 if (ppnt->p_flags & PF_W) {
703 status = map_writeable (infile, ppnt, piclib, flags, libaddr);
707 tryaddr = (piclib == 2 ? 0
708 : (char *) (ppnt->p_vaddr & PAGE_ALIGN)
709 + (piclib ? libaddr : DL_GET_LIB_OFFSET()));
710 size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
711 status = (char *) _dl_mmap
712 (tryaddr, size, LXFLAGS(ppnt->p_flags),
713 flags | (piclib == 2 ? MAP_EXECUTABLE
714 | MAP_DENYWRITE : 0),
715 infile, ppnt->p_offset & OFFS_ALIGN);
716 if (_dl_mmap_check_error(status)
717 || (tryaddr && tryaddr != status))
720 DL_INIT_LOADADDR_HDR(lib_loadaddr,
721 status + (ppnt->p_vaddr & ADDR_ALIGN),
724 /* if (libaddr == 0 && piclib) {
725 libaddr = (unsigned long) status;
733 * The dynamic_addr must be take into acount lib_loadaddr value, to note
734 * it is zero when the SO has been mapped to the elf's physical addr
736 #ifdef __LDSO_PRELINK_SUPPORT__
737 if (DL_GET_LIB_OFFSET()) {
741 dynamic_addr = (unsigned long) DL_RELOC_ADDR(lib_loadaddr, dynamic_addr);
745 * OK, the ELF library is now loaded into VM in the correct locations
746 * The next step is to go through and do the dynamic linking (if needed).
749 /* Start by scanning the dynamic section to get all of the pointers */
752 _dl_internal_error_number = LD_ERROR_NODYNAMIC;
753 _dl_dprintf(2, "%s: '%s' is missing a dynamic section\n",
754 _dl_progname, libname);
755 _dl_munmap(header, _dl_pagesize);
760 dpnt = (ElfW(Dyn) *) dynamic_addr;
761 _dl_memset(dynamic_info, 0, sizeof(dynamic_info));
762 rtld_flags = _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr);
763 /* If the TEXTREL is set, this means that we need to make the pages
764 writable before we perform relocations. Do this now. They get set
767 if (dynamic_info[DT_TEXTREL]) {
768 #ifndef __FORCE_SHAREABLE_TEXT_SEGMENTS__
769 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
770 for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
771 if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) {
772 #ifdef __ARCH_USE_MMU__
773 _dl_mprotect((void *) ((piclib ? libaddr : DL_GET_LIB_OFFSET()) +
774 (ppnt->p_vaddr & PAGE_ALIGN)),
775 (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
776 PROT_READ | PROT_WRITE | PROT_EXEC);
779 new_addr = map_writeable (infile, ppnt, piclib, flags, libaddr);
781 _dl_dprintf(2, "Can't modify %s's text section.",
785 DL_UPDATE_LOADADDR_HDR(lib_loadaddr,
786 new_addr + (ppnt->p_vaddr & ADDR_ALIGN),
788 /* This has invalidated all pointers into the previously readonly segment.
789 Update any them to point into the remapped segment. */
790 _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr);
795 _dl_dprintf(2, "Can't modify %s's text section."
796 " Use GCC option -fPIC for shared objects, please.\n",
804 tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info,
806 tpnt->mapaddr = libaddr;
807 tpnt->relro_addr = relro_addr;
808 tpnt->relro_size = relro_size;
809 tpnt->st_dev = st.st_dev;
810 tpnt->st_ino = st.st_ino;
811 tpnt->ppnt = (ElfW(Phdr) *)
812 DL_RELOC_ADDR(DL_GET_RUN_ADDR(tpnt->loadaddr, tpnt->mapaddr),
814 tpnt->n_phent = epnt->e_phnum;
815 tpnt->rtld_flags |= rtld_flags;
816 #ifdef __LDSO_STANDALONE_SUPPORT__
817 tpnt->l_entry = epnt->e_entry;
820 #if defined(USE_TLS) && USE_TLS
822 _dl_debug_early("Found TLS header for %s\n", libname);
823 # if NO_TLS_OFFSET != 0
824 tpnt->l_tls_offset = NO_TLS_OFFSET;
826 tpnt->l_tls_blocksize = tlsppnt->p_memsz;
827 tpnt->l_tls_align = tlsppnt->p_align;
828 if (tlsppnt->p_align == 0)
829 tpnt->l_tls_firstbyte_offset = 0;
831 tpnt->l_tls_firstbyte_offset = tlsppnt->p_vaddr &
832 (tlsppnt->p_align - 1);
833 tpnt->l_tls_initimage_size = tlsppnt->p_filesz;
834 tpnt->l_tls_initimage = (void *) tlsppnt->p_vaddr;
836 /* Assign the next available module ID. */
837 tpnt->l_tls_modid = _dl_next_tls_modid ();
839 /* We know the load address, so add it to the offset. */
840 #ifdef __LDSO_STANDALONE_SUPPORT__
841 if ((tpnt->l_tls_initimage != NULL) && piclib)
843 if (tpnt->l_tls_initimage != NULL)
846 # ifdef __SUPPORT_LD_DEBUG_EARLY__
847 char *tmp = (char *) tpnt->l_tls_initimage;
848 tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
849 _dl_debug_early("Relocated TLS initial image from %x to %x (size = %x)\n", tmp, tpnt->l_tls_initimage, tpnt->l_tls_initimage_size);
852 tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
859 * Add this object into the symbol chain
862 #ifdef __LDSO_STANDALONE_SUPPORT__
863 /* Do not create a new chain entry for the main executable */
867 (*rpnt)->next = _dl_malloc(sizeof(struct dyn_elf));
868 _dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
869 (*rpnt)->next->prev = (*rpnt);
870 *rpnt = (*rpnt)->next;
873 /* When statically linked, the first time we dlopen a DSO
874 * the *rpnt is NULL, so we need to allocate memory for it,
875 * and initialize the _dl_symbol_table.
878 *rpnt = _dl_symbol_tables = _dl_malloc(sizeof(struct dyn_elf));
879 _dl_memset(*rpnt, 0, sizeof(struct dyn_elf));
884 #ifdef __LDSO_STANDALONE_SUPPORT__
885 tpnt->libtype = (epnt->e_type == ET_DYN) ? elf_lib : elf_executable;
887 tpnt->libtype = elf_lib;
891 * OK, the next thing we need to do is to insert the dynamic linker into
892 * the proper entry in the GOT so that the PLT symbols can be properly
896 lpnt = (unsigned long *) dynamic_info[DT_PLTGOT];
899 lpnt = (unsigned long *) (dynamic_info[DT_PLTGOT]);
900 INIT_GOT(lpnt, tpnt);
904 /* Handle DSBT initialization */
906 struct elf_resolve *t, *ref;
907 int idx = tpnt->dsbt_index;
908 void **dsbt = tpnt->dsbt_table;
911 * It is okay (required actually) to have zero idx for an executable.
912 * This is the case when running ldso standalone and the program
913 * is being mapped in via _dl_load_shared_library().
915 if (idx == 0 && tpnt->libtype != elf_executable) {
916 if (!dynamic_info[DT_TEXTREL]) {
917 /* This DSO has not been assigned an index. */
918 _dl_dprintf(2, "%s: '%s' is missing a dsbt index assignment!\n",
919 _dl_progname, libname);
922 /* Find a dsbt table from another module. */
924 for (t = _dl_loaded_modules; t; t = t->next) {
925 if (ref == NULL && t != tpnt) {
930 idx = tpnt->dsbt_size;
932 if (!ref || ref->dsbt_table[idx] == NULL)
935 _dl_dprintf(2, "%s: '%s' caused DSBT table overflow!\n",
936 _dl_progname, libname);
939 _dl_if_debug_dprint("\n\tfile='%s'; assigned index %d\n",
941 tpnt->dsbt_index = idx;
944 /* make sure index is not already used */
945 if (_dl_ldso_dsbt[idx]) {
946 struct elf_resolve *dup;
947 const char *dup_name;
949 for (dup = _dl_loaded_modules; dup; dup = dup->next)
950 if (dup != tpnt && dup->dsbt_index == idx)
953 dup_name = dup->libname;
955 dup_name = "runtime linker";
957 dup_name = "unknown library";
958 _dl_dprintf(2, "%s: '%s' dsbt index %d already used by %s!\n",
959 _dl_progname, libname, idx, dup_name);
964 * Setup dsbt slot for this module in dsbt of all modules.
966 for (t = _dl_loaded_modules; t; t = t->next)
967 t->dsbt_table[idx] = dsbt;
968 _dl_ldso_dsbt[idx] = dsbt;
969 _dl_memcpy(dsbt, _dl_ldso_dsbt,
970 tpnt->dsbt_size * sizeof(tpnt->dsbt_table[0]));
973 _dl_if_debug_dprint("\n\tfile='%s'; generating link map\n", libname);
974 _dl_if_debug_dprint("\t\tdynamic: %x base: %x\n", dynamic_addr, DL_LOADADDR_BASE(lib_loadaddr));
975 _dl_if_debug_dprint("\t\t entry: %x phdr: %x phnum: %x\n\n",
976 DL_RELOC_ADDR(lib_loadaddr, epnt->e_entry), tpnt->ppnt, tpnt->n_phent);
978 _dl_munmap(header, _dl_pagesize);
983 /* now_flag must be RTLD_NOW or zero */
984 int _dl_fixup(struct dyn_elf *rpnt, struct r_scope_elem *scope, int now_flag)
987 struct elf_resolve *tpnt;
988 ElfW(Word) reloc_size, relative_count;
989 ElfW(Addr) reloc_addr;
992 goof = _dl_fixup(rpnt->next, scope, now_flag);
997 if (!(tpnt->init_flag & RELOCS_DONE))
998 _dl_if_debug_dprint("relocation processing: %s\n", tpnt->libname);
1000 if (unlikely(tpnt->dynamic_info[UNSUPPORTED_RELOC_TYPE])) {
1001 _dl_if_debug_dprint("%s: can't handle %s relocation records\n",
1002 _dl_progname, UNSUPPORTED_RELOC_STR);
1007 reloc_size = tpnt->dynamic_info[DT_RELOC_TABLE_SIZE];
1008 /* On some machines, notably SPARC & PPC, DT_REL* includes DT_JMPREL in its
1009 range. Note that according to the ELF spec, this is completely legal! */
1010 #ifdef ELF_MACHINE_PLTREL_OVERLAP
1011 reloc_size -= tpnt->dynamic_info [DT_PLTRELSZ];
1013 if (tpnt->dynamic_info[DT_RELOC_TABLE_ADDR] &&
1014 !(tpnt->init_flag & RELOCS_DONE)) {
1015 reloc_addr = tpnt->dynamic_info[DT_RELOC_TABLE_ADDR];
1016 relative_count = tpnt->dynamic_info[DT_RELCONT_IDX];
1017 if (relative_count) { /* Optimize the XX_RELATIVE relocations if possible */
1018 reloc_size -= relative_count * sizeof(ELF_RELOC);
1019 #ifdef __LDSO_PRELINK_SUPPORT__
1020 if (tpnt->loadaddr || (!tpnt->dynamic_info[DT_GNU_PRELINKED_IDX]))
1022 elf_machine_relative(tpnt->loadaddr, reloc_addr, relative_count);
1023 reloc_addr += relative_count * sizeof(ELF_RELOC);
1025 goof += _dl_parse_relocation_information(rpnt, scope,
1028 tpnt->init_flag |= RELOCS_DONE;
1030 if (tpnt->dynamic_info[DT_BIND_NOW])
1031 now_flag = RTLD_NOW;
1032 if (tpnt->dynamic_info[DT_JMPREL] &&
1033 (!(tpnt->init_flag & JMP_RELOCS_DONE) ||
1034 (now_flag && !(tpnt->rtld_flags & now_flag)))) {
1035 tpnt->rtld_flags |= now_flag;
1036 if (!(tpnt->rtld_flags & RTLD_NOW)) {
1037 _dl_parse_lazy_relocation_information(rpnt,
1038 tpnt->dynamic_info[DT_JMPREL],
1039 tpnt->dynamic_info [DT_PLTRELSZ]);
1041 goof += _dl_parse_relocation_information(rpnt, scope,
1042 tpnt->dynamic_info[DT_JMPREL],
1043 tpnt->dynamic_info[DT_PLTRELSZ]);
1045 tpnt->init_flag |= JMP_RELOCS_DONE;
1049 /* _dl_add_to_slotinfo is called by init_tls() for initial DSO
1050 or by dlopen() for dynamically loaded DSO. */
1051 #if defined(USE_TLS) && USE_TLS
1052 /* Add object to slot information data if necessasy. */
1053 if (tpnt->l_tls_blocksize != 0 && tls_init_tp_called)
1054 _dl_add_to_slotinfo ((struct link_map *) tpnt);
1061 /* Minimal printf which handles only %s, %d, and %x */
1062 void _dl_dprintf(int fd, const char *fmt, ...)
1070 char *start, *ptr, *string;
1076 buf = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
1077 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1078 if (_dl_mmap_check_error(buf)) {
1079 _dl_write(fd, "mmap of a spare page failed!\n", 29);
1085 if (_dl_strlen(fmt) >= (_dl_pagesize - 1)) {
1086 _dl_write(fd, "overflow\n", 11);
1090 _dl_strcpy(buf, fmt);
1091 va_start(args, fmt);
1094 while (*ptr != '%' && *ptr) {
1100 _dl_write(fd, start, _dl_strlen(start));
1104 string = va_arg(args, char *);
1107 _dl_write(fd, "(null)", 6);
1109 _dl_write(fd, string, _dl_strlen(string));
1117 num = va_arg(args, long int);
1119 num = va_arg(args, int);
1121 string = _dl_simple_ltoa(tmp, num);
1122 _dl_write(fd, string, _dl_strlen(string));
1130 num = va_arg(args, long int);
1132 num = va_arg(args, int);
1134 string = _dl_simple_ltoahex(tmp, num);
1135 _dl_write(fd, string, _dl_strlen(string));
1139 _dl_write(fd, "(null)", 6);
1145 _dl_write(fd, start, _dl_strlen(start));
1149 _dl_munmap(buf, _dl_pagesize);
1153 char *_dl_strdup(const char *string)
1158 len = _dl_strlen(string);
1159 retval = _dl_malloc(len + 1);
1160 _dl_strcpy(retval, string);
1165 unsigned int _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[],
1166 void *debug_addr, DL_LOADADDR_TYPE load_off)
1168 return __dl_parse_dynamic_info(dpnt, dynamic_info, debug_addr, load_off);