1 /* vi: set sw=4 ts=4: */
3 * This file contains the helper routines to load an ELF shared
4 * library into memory and add the symbol table info to the chain.
6 * Copyright (C) 2000-2004 by Erik Andersen <andersen@codpoet.org>
7 * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald,
8 * David Engel, Hongjiu Lu and Mitch D'Souza
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. The name of the above contributors may not be
16 * used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 static caddr_t _dl_cache_addr = NULL;
38 static size_t _dl_cache_size = 0;
40 int _dl_map_cache(void)
48 if (_dl_cache_addr == (caddr_t) - 1)
50 else if (_dl_cache_addr != NULL)
53 if (_dl_stat(LDSO_CACHE, &st)
54 || (fd = _dl_open(LDSO_CACHE, O_RDONLY, 0)) < 0) {
55 _dl_dprintf(2, "%s: can't open cache '%s'\n", _dl_progname, LDSO_CACHE);
56 _dl_cache_addr = (caddr_t) - 1; /* so we won't try again */
60 _dl_cache_size = st.st_size;
61 _dl_cache_addr = (caddr_t) _dl_mmap(0, _dl_cache_size, PROT_READ, MAP_SHARED, fd, 0);
63 if (_dl_mmap_check_error(_dl_cache_addr)) {
64 _dl_dprintf(2, "%s: can't map cache '%s'\n",
65 _dl_progname, LDSO_CACHE);
69 header = (header_t *) _dl_cache_addr;
71 if (_dl_cache_size < sizeof(header_t) ||
72 _dl_memcmp(header->magic, LDSO_CACHE_MAGIC, LDSO_CACHE_MAGIC_LEN)
73 || _dl_memcmp(header->version, LDSO_CACHE_VER, LDSO_CACHE_VER_LEN)
75 (sizeof(header_t) + header->nlibs * sizeof(libentry_t))
76 || _dl_cache_addr[_dl_cache_size - 1] != '\0')
78 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname,
83 strtabsize = _dl_cache_size - sizeof(header_t) -
84 header->nlibs * sizeof(libentry_t);
85 libent = (libentry_t *) & header[1];
87 for (i = 0; i < header->nlibs; i++) {
88 if (libent[i].sooffset >= strtabsize ||
89 libent[i].liboffset >= strtabsize)
91 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE);
99 _dl_munmap(_dl_cache_addr, _dl_cache_size);
100 _dl_cache_addr = (caddr_t) - 1;
104 int _dl_unmap_cache(void)
106 if (_dl_cache_addr == NULL || _dl_cache_addr == (caddr_t) - 1)
110 _dl_munmap(_dl_cache_addr, _dl_cache_size);
111 _dl_cache_addr = NULL;
118 /* This function's behavior must exactly match that
119 * in uClibc/ldso/util/ldd.c */
120 static struct elf_resolve *
121 search_for_named_library(const char *name, int secure, const char *path_list,
122 struct dyn_elf **rpnt)
126 char mylibname[2050];
127 struct elf_resolve *tpnt1;
132 /* We need a writable copy of this string */
133 path = _dl_strdup(path_list);
135 _dl_dprintf(2, "Out of memory!\n");
140 /* Unlike ldd.c, don't bother to eliminate double //s */
143 /* Replace colons with zeros in path_list and count them */
144 for(i=_dl_strlen(path); i > 0; i--) {
152 for (i = 0; i < count; i++) {
153 _dl_strcpy(mylibname, path_n);
154 _dl_strcat(mylibname, "/");
155 _dl_strcat(mylibname, name);
156 if ((tpnt1 = _dl_load_elf_shared_library(secure, rpnt, mylibname)) != NULL)
160 path_n += (_dl_strlen(path_n) + 1);
165 /* Check if the named library is already loaded... */
166 struct elf_resolve *_dl_check_if_named_library_is_loaded(const char *full_libname,
167 int trace_loaded_objects)
169 const char *pnt, *pnt1;
170 struct elf_resolve *tpnt1;
171 const char *libname, *libname2;
172 static const char libc[] = "libc.so.";
173 static const char aborted_wrong_lib[] = "%s: aborted attempt to load %s!\n";
175 pnt = libname = full_libname;
177 #if defined (__SUPPORT_LD_DEBUG__)
179 _dl_dprintf(_dl_debug_file, "Checking if '%s' is already loaded\n", full_libname);
181 /* quick hack to ensure mylibname buffer doesn't overflow. don't
182 allow full_libname or any directory to be longer than 1024. */
183 if (_dl_strlen(full_libname) > 1024)
186 /* Skip over any initial initial './' and '/' stuff to
187 * get the short form libname with no path garbage */
188 pnt1 = _dl_strrchr(pnt, '/');
193 /* Make sure they are not trying to load the wrong C library!
194 * This sometimes happens esp with shared libraries when the
195 * library path is somehow wrong! */
196 #define isdigit(c) (c >= '0' && c <= '9')
197 if ((_dl_strncmp(libname, libc, 8) == 0) && _dl_strlen(libname) >=8 &&
200 /* Abort attempts to load glibc, libc5, etc */
201 if ( libname[8]!='0') {
202 if (!trace_loaded_objects) {
203 _dl_dprintf(2, aborted_wrong_lib, libname, _dl_progname);
210 /* Critical step! Weed out duplicates early to avoid
211 * function aliasing, which wastes memory, and causes
212 * really bad things to happen with weaks and globals. */
213 for (tpnt1 = _dl_loaded_modules; tpnt1; tpnt1 = tpnt1->next) {
215 /* Skip over any initial initial './' and '/' stuff to
216 * get the short form libname with no path garbage */
217 libname2 = tpnt1->libname;
218 pnt1 = _dl_strrchr(libname2, '/');
223 if (_dl_strcmp(libname2, libname) == 0) {
224 /* Well, that was certainly easy */
233 /* Used to return error codes back to dlopen et. al. */
234 unsigned long _dl_error_number;
235 unsigned long _dl_internal_error_number;
237 struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt,
238 struct elf_resolve *tpnt, char *full_libname, int trace_loaded_objects)
241 struct elf_resolve *tpnt1;
244 _dl_internal_error_number = 0;
245 libname = full_libname;
247 /* quick hack to ensure mylibname buffer doesn't overflow. don't
248 allow full_libname or any directory to be longer than 1024. */
249 if (_dl_strlen(full_libname) > 1024)
252 /* Skip over any initial initial './' and '/' stuff to
253 * get the short form libname with no path garbage */
254 pnt1 = _dl_strrchr(libname, '/');
259 /* Critical step! Weed out duplicates early to avoid
260 * function aliasing, which wastes memory, and causes
261 * really bad things to happen with weaks and globals. */
262 if ((tpnt1=_dl_check_if_named_library_is_loaded(libname, trace_loaded_objects))!=NULL)
266 #if defined (__SUPPORT_LD_DEBUG__)
267 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tfind library='%s'; searching\n", libname);
269 /* If the filename has any '/', try it straight and leave it at that.
270 For IBCS2 compatibility under linux, we substitute the string
271 /usr/i486-sysv4/lib for /usr/lib in library names. */
273 if (libname != full_libname) {
274 #if defined (__SUPPORT_LD_DEBUG__)
275 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\ttrying file='%s'\n", full_libname);
277 tpnt1 = _dl_load_elf_shared_library(secure, rpnt, full_libname);
285 * The ABI specifies that RPATH is searched before LD_*_PATH or
286 * the default path of /usr/lib. Check in rpath directories.
288 for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) {
289 if (tpnt->libtype == elf_executable) {
290 pnt = (char *) tpnt->dynamic_info[DT_RPATH];
292 pnt += (unsigned long) tpnt->loadaddr + tpnt->dynamic_info[DT_STRTAB];
293 #if defined (__SUPPORT_LD_DEBUG__)
294 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tsearching RPATH='%s'\n", pnt);
296 if ((tpnt1 = search_for_named_library(libname, secure, pnt, rpnt)) != NULL)
304 /* Check in LD_{ELF_}LIBRARY_PATH, if specified and allowed */
305 if (_dl_library_path) {
306 #if defined (__SUPPORT_LD_DEBUG__)
307 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tsearching LD_LIBRARY_PATH='%s'\n", _dl_library_path);
309 if ((tpnt1 = search_for_named_library(libname, secure, _dl_library_path, rpnt)) != NULL)
316 * Where should the cache be searched? There is no such concept in the
317 * ABI, so we have some flexibility here. For now, search it before
318 * the hard coded paths that follow (i.e before /lib and /usr/lib).
321 if (_dl_cache_addr != NULL && _dl_cache_addr != (caddr_t) - 1) {
323 header_t *header = (header_t *) _dl_cache_addr;
324 libentry_t *libent = (libentry_t *) & header[1];
325 char *strs = (char *) &libent[header->nlibs];
327 #if defined (__SUPPORT_LD_DEBUG__)
328 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tsearching cache='%s'\n", LDSO_CACHE);
330 for (i = 0; i < header->nlibs; i++) {
331 if ((libent[i].flags == LIB_ELF ||
332 libent[i].flags == LIB_ELF_LIBC5) &&
333 _dl_strcmp(libname, strs + libent[i].sooffset) == 0 &&
334 (tpnt1 = _dl_load_elf_shared_library(secure,
335 rpnt, strs + libent[i].liboffset)))
341 /* Look for libraries wherever the shared library loader
343 #if defined (__SUPPORT_LD_DEBUG__)
344 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tsearching ldso dir='%s'\n", _dl_ldsopath);
346 if ((tpnt1 = search_for_named_library(libname, secure, _dl_ldsopath, rpnt)) != NULL)
352 /* Lastly, search the standard list of paths for the library.
353 This list must exactly match the list in uClibc/ldso/util/ldd.c */
354 #if defined (__SUPPORT_LD_DEBUG__)
355 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tsearching full lib path list\n");
357 if ((tpnt1 = search_for_named_library(libname, secure,
358 UCLIBC_RUNTIME_PREFIX "usr/X11R6/lib:"
359 UCLIBC_RUNTIME_PREFIX "usr/lib:"
360 UCLIBC_RUNTIME_PREFIX "lib:"
369 /* Well, we shot our wad on that one. All we can do now is punt */
370 if (_dl_internal_error_number)
371 _dl_error_number = _dl_internal_error_number;
373 _dl_error_number = LD_ERROR_NOFILE;
374 #if defined (__SUPPORT_LD_DEBUG__)
375 if(_dl_debug) _dl_dprintf(2, "Bummer: could not find '%s'!\n", libname);
382 * Read one ELF library into memory, mmap it into the correct locations and
383 * add the symbol info to the symbol chain. Perform any relocations that
387 struct elf_resolve *_dl_load_elf_shared_library(int secure,
388 struct dyn_elf **rpnt, char *libname)
391 unsigned long dynamic_addr = 0;
392 unsigned long dynamic_size = 0;
394 struct elf_resolve *tpnt;
396 char *status, *header;
397 unsigned long dynamic_info[24];
399 unsigned long libaddr;
400 unsigned long minvma = 0xffffffff, maxvma = 0;
401 int i, flags, piclib, infile;
403 /* If this file is already loaded, skip this step */
404 tpnt = _dl_check_hashed_files(libname);
407 (*rpnt)->next = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf));
408 _dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
409 (*rpnt)->next->prev = (*rpnt);
410 *rpnt = (*rpnt)->next;
412 tpnt->symbol_scope = _dl_symbol_tables;
415 tpnt->libtype = elf_lib;
416 #if defined (__SUPPORT_LD_DEBUG__)
417 if(_dl_debug) _dl_dprintf(2, "file='%s'; already loaded\n", libname);
422 /* If we are in secure mode (i.e. a setu/gid binary using LD_PRELOAD),
423 we don't load the library if it isn't setuid. */
428 if (_dl_stat(libname, &st) || !(st.st_mode & S_ISUID))
433 infile = _dl_open(libname, O_RDONLY, 0);
437 * NO! When we open shared libraries we may search several paths.
438 * it is inappropriate to generate an error here.
440 _dl_dprintf(2, "%s: can't open '%s'\n", _dl_progname, libname);
442 _dl_internal_error_number = LD_ERROR_NOFILE;
446 header = _dl_mmap((void *) 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
447 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
448 if (_dl_mmap_check_error(header)) {
449 _dl_dprintf(2, "%s: can't map '%s'\n", _dl_progname, libname);
450 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
455 _dl_read(infile, header, PAGE_SIZE);
456 epnt = (ElfW(Ehdr) *) (intptr_t) header;
457 if (epnt->e_ident[0] != 0x7f ||
458 epnt->e_ident[1] != 'E' ||
459 epnt->e_ident[2] != 'L' ||
460 epnt->e_ident[3] != 'F')
462 _dl_dprintf(2, "%s: '%s' is not an ELF file\n", _dl_progname,
464 _dl_internal_error_number = LD_ERROR_NOTELF;
466 _dl_munmap(header, PAGE_SIZE);
470 if ((epnt->e_type != ET_DYN) || (epnt->e_machine != MAGIC1
472 && epnt->e_machine != MAGIC2
476 _dl_internal_error_number =
477 (epnt->e_type != ET_DYN ? LD_ERROR_NOTDYN : LD_ERROR_NOTMAGIC);
478 _dl_dprintf(2, "%s: '%s' is not an ELF executable for " ELF_TARGET
479 "\n", _dl_progname, libname);
481 _dl_munmap(header, PAGE_SIZE);
485 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
488 for (i = 0; i < epnt->e_phnum; i++) {
490 if (ppnt->p_type == PT_DYNAMIC) {
492 _dl_dprintf(2, "%s: '%s' has more than one dynamic section\n",
493 _dl_progname, libname);
494 dynamic_addr = ppnt->p_vaddr;
495 dynamic_size = ppnt->p_filesz;
498 if (ppnt->p_type == PT_LOAD) {
499 /* See if this is a PIC library. */
500 if (i == 0 && ppnt->p_vaddr > 0x1000000) {
502 minvma = ppnt->p_vaddr;
504 if (piclib && ppnt->p_vaddr < minvma) {
505 minvma = ppnt->p_vaddr;
507 if (((unsigned long) ppnt->p_vaddr + ppnt->p_memsz) > maxvma) {
508 maxvma = ppnt->p_vaddr + ppnt->p_memsz;
514 maxvma = (maxvma + ADDR_ALIGN) & ~ADDR_ALIGN;
515 minvma = minvma & ~0xffffU;
517 flags = MAP_PRIVATE /*| MAP_DENYWRITE */ ;
521 status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
522 maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
523 if (_dl_mmap_check_error(status)) {
524 _dl_dprintf(2, "%s: can't map %s\n", _dl_progname, libname);
525 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
527 _dl_munmap(header, PAGE_SIZE);
530 libaddr = (unsigned long) status;
533 /* Get the memory to store the library */
534 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
536 for (i = 0; i < epnt->e_phnum; i++) {
537 if (ppnt->p_type == PT_LOAD) {
539 /* See if this is a PIC library. */
540 if (i == 0 && ppnt->p_vaddr > 0x1000000) {
542 /* flags |= MAP_FIXED; */
547 if (ppnt->p_flags & PF_W) {
548 unsigned long map_size;
551 status = (char *) _dl_mmap((char *) ((piclib ? libaddr : 0) +
552 (ppnt->p_vaddr & PAGE_ALIGN)), (ppnt->p_vaddr & ADDR_ALIGN)
553 + ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags, infile,
554 ppnt->p_offset & OFFS_ALIGN);
556 if (_dl_mmap_check_error(status)) {
557 _dl_dprintf(2, "%s: can't map '%s'\n",
558 _dl_progname, libname);
559 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
560 _dl_munmap((char *) libaddr, maxvma - minvma);
562 _dl_munmap(header, PAGE_SIZE);
566 /* Pad the last page with zeroes. */
567 cpnt = (char *) (status + (ppnt->p_vaddr & ADDR_ALIGN) +
569 while (((unsigned long) cpnt) & ADDR_ALIGN)
572 /* I am not quite sure if this is completely
573 * correct to do or not, but the basic way that
574 * we handle bss segments is that we mmap
575 * /dev/zero if there are any pages left over
576 * that are not mapped as part of the file */
578 map_size = (ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) & PAGE_ALIGN;
580 if (map_size < ppnt->p_vaddr + ppnt->p_memsz)
581 status = (char *) _dl_mmap((char *) map_size +
582 (piclib ? libaddr : 0),
583 ppnt->p_vaddr + ppnt->p_memsz - map_size,
584 LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS, -1, 0);
586 status = (char *) _dl_mmap((char *) (ppnt->p_vaddr & PAGE_ALIGN)
587 + (piclib ? libaddr : 0), (ppnt->p_vaddr & ADDR_ALIGN) +
588 ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags,
589 infile, ppnt->p_offset & OFFS_ALIGN);
590 if (_dl_mmap_check_error(status)) {
591 _dl_dprintf(2, "%s: can't map '%s'\n", _dl_progname, libname);
592 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
593 _dl_munmap((char *) libaddr, maxvma - minvma);
595 _dl_munmap(header, PAGE_SIZE);
599 /* if(libaddr == 0 && piclib) {
600 libaddr = (unsigned long) status;
608 /* For a non-PIC library, the addresses are all absolute */
610 dynamic_addr += (unsigned long) libaddr;
614 * OK, the ELF library is now loaded into VM in the correct locations
615 * The next step is to go through and do the dynamic linking (if needed).
618 /* Start by scanning the dynamic section to get all of the pointers */
621 _dl_internal_error_number = LD_ERROR_NODYNAMIC;
622 _dl_dprintf(2, "%s: '%s' is missing a dynamic section\n",
623 _dl_progname, libname);
624 _dl_munmap(header, PAGE_SIZE);
628 dpnt = (Elf32_Dyn *) dynamic_addr;
630 dynamic_size = dynamic_size / sizeof(Elf32_Dyn);
631 _dl_memset(dynamic_info, 0, sizeof(dynamic_info));
633 #if defined(__mips__)
637 Elf32_Dyn *dpnt = (Elf32_Dyn *) dynamic_addr;
650 for (indx = 0; indx < dynamic_size; indx++)
652 if (dpnt->d_tag > DT_JMPREL) {
656 dynamic_info[dpnt->d_tag] = dpnt->d_un.d_val;
657 if (dpnt->d_tag == DT_TEXTREL)
658 dynamic_info[DT_TEXTREL] = 1;
663 /* If the TEXTREL is set, this means that we need to make the pages
664 writable before we perform relocations. Do this now. They get set
667 if (dynamic_info[DT_TEXTREL]) {
668 #ifndef FORCE_SHAREABLE_TEXT_SEGMENTS
669 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
670 for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
671 if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W))
672 _dl_mprotect((void *) ((piclib ? libaddr : 0) +
673 (ppnt->p_vaddr & PAGE_ALIGN)),
674 (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
675 PROT_READ | PROT_WRITE | PROT_EXEC);
678 _dl_dprintf(_dl_debug_file, "Can't modify %s's text section. Use GCC option -fPIC for shared objects, please.\n",libname);
683 tpnt = _dl_add_elf_hash_table(libname, (char *) libaddr, dynamic_info,
684 dynamic_addr, dynamic_size);
686 tpnt->ppnt = (ElfW(Phdr) *)(intptr_t) (tpnt->loadaddr + epnt->e_phoff);
687 tpnt->n_phent = epnt->e_phnum;
690 * Add this object into the symbol chain
693 (*rpnt)->next = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf));
694 _dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
695 (*rpnt)->next->prev = (*rpnt);
696 *rpnt = (*rpnt)->next;
698 tpnt->symbol_scope = _dl_symbol_tables;
701 tpnt->libtype = elf_lib;
704 * OK, the next thing we need to do is to insert the dynamic linker into
705 * the proper entry in the GOT so that the PLT symbols can be properly
709 lpnt = (unsigned long *) dynamic_info[DT_PLTGOT];
712 lpnt = (unsigned long *) (dynamic_info[DT_PLTGOT] +
714 INIT_GOT(lpnt, tpnt);
717 #if defined (__SUPPORT_LD_DEBUG__)
719 _dl_dprintf(2, "\n\tfile='%s'; generating link map\n", libname);
720 _dl_dprintf(2, "\t\tdynamic: %x base: %x size: %x\n",
721 dynamic_addr, libaddr, dynamic_size);
722 _dl_dprintf(2, "\t\t entry: %x phdr: %x phnum: %d\n\n",
723 epnt->e_entry + libaddr, tpnt->ppnt, tpnt->n_phent);
727 _dl_munmap(header, PAGE_SIZE);
732 int _dl_fixup(struct dyn_elf *rpnt, int flag)
735 struct elf_resolve *tpnt;
738 goof += _dl_fixup(rpnt->next, flag);
741 #if defined (__SUPPORT_LD_DEBUG__)
742 if(_dl_debug) _dl_dprintf(_dl_debug_file,"\nrelocation processing: %s", tpnt->libname);
745 if (unlikely(tpnt->dynamic_info[UNSUPPORTED_RELOC_TYPE])) {
746 #if defined (__SUPPORT_LD_DEBUG__)
748 _dl_dprintf(2, "%s: can't handle %s relocation records\n",
749 _dl_progname, UNSUPPORTED_RELOC_STR);
756 if (tpnt->dynamic_info[DT_RELOC_TABLE_ADDR]) {
757 if (tpnt->init_flag & RELOCS_DONE)
759 tpnt->init_flag |= RELOCS_DONE;
760 goof += _dl_parse_relocation_information(rpnt,
761 tpnt->dynamic_info[DT_RELOC_TABLE_ADDR],
762 tpnt->dynamic_info[DT_RELOC_TABLE_SIZE], 0);
765 if (tpnt->dynamic_info[DT_JMPREL]) {
766 if (tpnt->init_flag & JMP_RELOCS_DONE)
768 tpnt->init_flag |= JMP_RELOCS_DONE;
769 if (flag & RTLD_LAZY) {
770 _dl_parse_lazy_relocation_information(rpnt,
771 tpnt->dynamic_info[DT_JMPREL],
772 tpnt->dynamic_info [DT_PLTRELSZ], 0);
774 goof += _dl_parse_relocation_information(rpnt,
775 tpnt->dynamic_info[DT_JMPREL],
776 tpnt->dynamic_info[DT_PLTRELSZ], 0);
780 if (tpnt->init_flag & COPY_RELOCS_DONE)
782 tpnt->init_flag |= COPY_RELOCS_DONE;
783 goof += _dl_parse_copy_information(rpnt,
784 tpnt->dynamic_info[DT_RELOC_TABLE_ADDR],
785 tpnt->dynamic_info[DT_RELOC_TABLE_SIZE], 0);
787 #if defined (__SUPPORT_LD_DEBUG__)
789 _dl_dprintf(_dl_debug_file,"\nrelocation processing: %s", tpnt->libname);
790 _dl_dprintf(_dl_debug_file,"; finished\n\n");
797 /* Minimal printf which handles only %s, %d, and %x */
798 void _dl_dprintf(int fd, const char *fmt, ...)
802 char *start, *ptr, *string;
805 buf = _dl_mmap((void *) 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
806 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
807 if (_dl_mmap_check_error(buf)) {
808 _dl_write(fd, "mmap of a spare page failed!\n", 29);
817 if (_dl_strlen(fmt) >= (PAGE_SIZE - 1)) {
818 _dl_write(fd, "overflow\n", 11);
822 _dl_strcpy(buf, fmt);
826 while (*ptr != '%' && *ptr) {
832 _dl_write(fd, start, _dl_strlen(start));
836 string = va_arg(args, char *);
839 _dl_write(fd, "(null)", 6);
841 _dl_write(fd, string, _dl_strlen(string));
848 num = va_arg(args, int);
850 string = _dl_simple_ltoa(tmp, num);
851 _dl_write(fd, string, _dl_strlen(string));
858 num = va_arg(args, int);
860 string = _dl_simple_ltoahex(tmp, num);
861 _dl_write(fd, string, _dl_strlen(string));
865 _dl_write(fd, "(null)", 6);
871 _dl_write(fd, start, _dl_strlen(start));
875 _dl_munmap(buf, PAGE_SIZE);
879 char *_dl_strdup(const char *string)
884 len = _dl_strlen(string);
885 retval = _dl_malloc(len + 1);
886 _dl_strcpy(retval, string);
890 void *(*_dl_malloc_function) (size_t size) = NULL;
891 void *_dl_malloc(int size)
896 #ifdef __SUPPORT_LD_DEBUG_EARLY__
897 _dl_dprintf(2, "malloc: request for %d bytes\n", size);
901 if (_dl_malloc_function)
902 return (*_dl_malloc_function) (size);
904 if (_dl_malloc_addr - _dl_mmap_zero + size > PAGE_SIZE) {
905 #ifdef __SUPPORT_LD_DEBUG_EARLY__
906 _dl_dprintf(2, "malloc: mmapping more memory\n");
908 _dl_mmap_zero = _dl_malloc_addr = _dl_mmap((void *) 0, size,
909 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
910 if (_dl_mmap_check_error(_dl_mmap_zero)) {
911 _dl_dprintf(2, "%s: mmap of a spare page failed!\n", _dl_progname);
915 retval = _dl_malloc_addr;
916 _dl_malloc_addr += size;
919 * Align memory to 4 byte boundary. Some platforms require this, others
920 * simply get better performance.
922 _dl_malloc_addr = (unsigned char *) (((unsigned long) _dl_malloc_addr + 3) & ~(3));