1 /* vi: set sw=4 ts=4: */
3 * This file contains the helper routines to load an ELF shared
4 * library into memory and add the symbol table info to the chain.
6 * Copyright (C) 2000-2004 by Erik Andersen <andersen@codpoet.org>
7 * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald,
8 * David Engel, Hongjiu Lu and Mitch D'Souza
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. The name of the above contributors may not be
16 * used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 static caddr_t _dl_cache_addr = NULL;
38 static size_t _dl_cache_size = 0;
40 int _dl_map_cache(void)
48 if (_dl_cache_addr == (caddr_t) - 1)
50 else if (_dl_cache_addr != NULL)
53 if (_dl_stat(LDSO_CACHE, &st)
54 || (fd = _dl_open(LDSO_CACHE, O_RDONLY, 0)) < 0) {
55 _dl_dprintf(2, "%s: can't open cache '%s'\n", _dl_progname, LDSO_CACHE);
56 _dl_cache_addr = (caddr_t) - 1; /* so we won't try again */
60 _dl_cache_size = st.st_size;
61 _dl_cache_addr = (caddr_t) _dl_mmap(0, _dl_cache_size, PROT_READ, MAP_SHARED, fd, 0);
63 if (_dl_mmap_check_error(_dl_cache_addr)) {
64 _dl_dprintf(2, "%s: can't map cache '%s'\n",
65 _dl_progname, LDSO_CACHE);
69 header = (header_t *) _dl_cache_addr;
71 if (_dl_cache_size < sizeof(header_t) ||
72 _dl_memcmp(header->magic, LDSO_CACHE_MAGIC, LDSO_CACHE_MAGIC_LEN)
73 || _dl_memcmp(header->version, LDSO_CACHE_VER, LDSO_CACHE_VER_LEN)
75 (sizeof(header_t) + header->nlibs * sizeof(libentry_t))
76 || _dl_cache_addr[_dl_cache_size - 1] != '\0')
78 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname,
83 strtabsize = _dl_cache_size - sizeof(header_t) -
84 header->nlibs * sizeof(libentry_t);
85 libent = (libentry_t *) & header[1];
87 for (i = 0; i < header->nlibs; i++) {
88 if (libent[i].sooffset >= strtabsize ||
89 libent[i].liboffset >= strtabsize)
91 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE);
99 _dl_munmap(_dl_cache_addr, _dl_cache_size);
100 _dl_cache_addr = (caddr_t) - 1;
104 int _dl_unmap_cache(void)
106 if (_dl_cache_addr == NULL || _dl_cache_addr == (caddr_t) - 1)
110 _dl_munmap(_dl_cache_addr, _dl_cache_size);
111 _dl_cache_addr = NULL;
118 /* This function's behavior must exactly match that
119 * in uClibc/ldso/util/ldd.c */
120 static struct elf_resolve *
121 search_for_named_library(const char *name, int secure, const char *path_list,
122 struct dyn_elf **rpnt)
126 char mylibname[2050];
127 struct elf_resolve *tpnt1;
132 /* We need a writable copy of this string */
133 path = _dl_strdup(path_list);
135 _dl_dprintf(2, "Out of memory!\n");
140 /* Unlike ldd.c, don't bother to eliminate double //s */
143 /* Replace colons with zeros in path_list and count them */
144 for(i=_dl_strlen(path); i > 0; i--) {
152 for (i = 0; i < count; i++) {
153 _dl_strcpy(mylibname, path_n);
154 _dl_strcat(mylibname, "/");
155 _dl_strcat(mylibname, name);
156 if ((tpnt1 = _dl_load_elf_shared_library(secure, rpnt, mylibname)) != NULL)
160 path_n += (_dl_strlen(path_n) + 1);
165 /* Check if the named library is already loaded... */
166 struct elf_resolve *_dl_check_if_named_library_is_loaded(const char *full_libname,
167 int trace_loaded_objects)
169 const char *pnt, *pnt1;
170 struct elf_resolve *tpnt1;
171 const char *libname, *libname2;
172 static const char libc[] = "libc.so.";
173 static const char aborted_wrong_lib[] = "%s: aborted attempt to load %s!\n";
175 pnt = libname = full_libname;
177 #if defined (__SUPPORT_LD_DEBUG__)
179 _dl_dprintf(_dl_debug_file, "Checking if '%s' is already loaded\n", full_libname);
181 /* quick hack to ensure mylibname buffer doesn't overflow. don't
182 allow full_libname or any directory to be longer than 1024. */
183 if (_dl_strlen(full_libname) > 1024)
186 /* Skip over any initial initial './' and '/' stuff to
187 * get the short form libname with no path garbage */
188 pnt1 = _dl_strrchr(pnt, '/');
193 /* Make sure they are not trying to load the wrong C library!
194 * This sometimes happens esp with shared libraries when the
195 * library path is somehow wrong! */
196 #define isdigit(c) (c >= '0' && c <= '9')
197 if ((_dl_strncmp(libname, libc, 8) == 0) && _dl_strlen(libname) >=8 &&
200 /* Abort attempts to load glibc, libc5, etc */
201 if ( libname[8]!='0') {
202 if (!trace_loaded_objects) {
203 _dl_dprintf(2, aborted_wrong_lib, libname, _dl_progname);
210 /* Critical step! Weed out duplicates early to avoid
211 * function aliasing, which wastes memory, and causes
212 * really bad things to happen with weaks and globals. */
213 for (tpnt1 = _dl_loaded_modules; tpnt1; tpnt1 = tpnt1->next) {
215 /* Skip over any initial initial './' and '/' stuff to
216 * get the short form libname with no path garbage */
217 libname2 = tpnt1->libname;
218 pnt1 = _dl_strrchr(libname2, '/');
223 if (_dl_strcmp(libname2, libname) == 0) {
224 /* Well, that was certainly easy */
233 /* Used to return error codes back to dlopen et. al. */
234 unsigned long _dl_error_number;
235 unsigned long _dl_internal_error_number;
237 struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt,
238 struct elf_resolve *tpnt, char *full_libname, int trace_loaded_objects)
241 struct elf_resolve *tpnt1;
244 _dl_internal_error_number = 0;
245 libname = full_libname;
247 /* quick hack to ensure mylibname buffer doesn't overflow. don't
248 allow full_libname or any directory to be longer than 1024. */
249 if (_dl_strlen(full_libname) > 1024)
252 /* Skip over any initial initial './' and '/' stuff to
253 * get the short form libname with no path garbage */
254 pnt1 = _dl_strrchr(libname, '/');
259 /* Critical step! Weed out duplicates early to avoid
260 * function aliasing, which wastes memory, and causes
261 * really bad things to happen with weaks and globals. */
262 if ((tpnt1=_dl_check_if_named_library_is_loaded(libname, trace_loaded_objects))!=NULL)
266 #if defined (__SUPPORT_LD_DEBUG__)
267 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tfind library='%s'; searching\n", libname);
269 /* If the filename has any '/', try it straight and leave it at that.
270 For IBCS2 compatibility under linux, we substitute the string
271 /usr/i486-sysv4/lib for /usr/lib in library names. */
273 if (libname != full_libname) {
274 #if defined (__SUPPORT_LD_DEBUG__)
275 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\ttrying file='%s'\n", full_libname);
277 tpnt1 = _dl_load_elf_shared_library(secure, rpnt, full_libname);
285 * The ABI specifies that RPATH is searched before LD_*_PATH or
286 * the default path of /usr/lib. Check in rpath directories.
288 for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) {
289 if (tpnt->libtype == elf_executable) {
290 pnt = (char *) tpnt->dynamic_info[DT_RPATH];
292 pnt += (unsigned long) tpnt->loadaddr + tpnt->dynamic_info[DT_STRTAB];
293 #if defined (__SUPPORT_LD_DEBUG__)
294 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tsearching RPATH='%s'\n", pnt);
296 if ((tpnt1 = search_for_named_library(libname, secure, pnt, rpnt)) != NULL)
304 /* Check in LD_{ELF_}LIBRARY_PATH, if specified and allowed */
305 if (_dl_library_path) {
306 #if defined (__SUPPORT_LD_DEBUG__)
307 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tsearching LD_LIBRARY_PATH='%s'\n", _dl_library_path);
309 if ((tpnt1 = search_for_named_library(libname, secure, _dl_library_path, rpnt)) != NULL)
316 * Where should the cache be searched? There is no such concept in the
317 * ABI, so we have some flexibility here. For now, search it before
318 * the hard coded paths that follow (i.e before /lib and /usr/lib).
321 if (_dl_cache_addr != NULL && _dl_cache_addr != (caddr_t) - 1) {
323 header_t *header = (header_t *) _dl_cache_addr;
324 libentry_t *libent = (libentry_t *) & header[1];
325 char *strs = (char *) &libent[header->nlibs];
327 #if defined (__SUPPORT_LD_DEBUG__)
328 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tsearching cache='%s'\n", LDSO_CACHE);
330 for (i = 0; i < header->nlibs; i++) {
331 if ((libent[i].flags == LIB_ELF ||
332 libent[i].flags == LIB_ELF_LIBC5) &&
333 _dl_strcmp(libname, strs + libent[i].sooffset) == 0 &&
334 (tpnt1 = _dl_load_elf_shared_library(secure,
335 rpnt, strs + libent[i].liboffset)))
341 /* Look for libraries wherever the shared library loader
343 #if defined (__SUPPORT_LD_DEBUG__)
344 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tsearching ldso dir='%s'\n", _dl_ldsopath);
346 if ((tpnt1 = search_for_named_library(libname, secure, _dl_ldsopath, rpnt)) != NULL)
352 /* Lastly, search the standard list of paths for the library.
353 This list must exactly match the list in uClibc/ldso/util/ldd.c */
354 #if defined (__SUPPORT_LD_DEBUG__)
355 if(_dl_debug) _dl_dprintf(_dl_debug_file, "\tsearching full lib path list\n");
357 if ((tpnt1 = search_for_named_library(libname, secure,
358 UCLIBC_RUNTIME_PREFIX "usr/X11R6/lib:"
359 UCLIBC_RUNTIME_PREFIX "usr/lib:"
360 UCLIBC_RUNTIME_PREFIX "lib:"
370 /* Well, we shot our wad on that one. All we can do now is punt */
371 if (_dl_internal_error_number)
372 _dl_error_number = _dl_internal_error_number;
374 _dl_error_number = LD_ERROR_NOFILE;
375 #if defined (__SUPPORT_LD_DEBUG__)
376 if(_dl_debug) _dl_dprintf(2, "Bummer: could not find '%s'!\n", libname);
383 * Read one ELF library into memory, mmap it into the correct locations and
384 * add the symbol info to the symbol chain. Perform any relocations that
388 struct elf_resolve *_dl_load_elf_shared_library(int secure,
389 struct dyn_elf **rpnt, char *libname)
392 unsigned long dynamic_addr = 0;
393 unsigned long dynamic_size = 0;
395 struct elf_resolve *tpnt;
397 char *status, *header;
398 unsigned long dynamic_info[24];
400 unsigned long libaddr;
401 unsigned long minvma = 0xffffffff, maxvma = 0;
402 int i, flags, piclib, infile;
404 /* If this file is already loaded, skip this step */
405 tpnt = _dl_check_hashed_files(libname);
408 (*rpnt)->next = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf));
409 _dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
410 (*rpnt)->next->prev = (*rpnt);
411 *rpnt = (*rpnt)->next;
413 tpnt->symbol_scope = _dl_symbol_tables;
416 tpnt->libtype = elf_lib;
417 #if defined (__SUPPORT_LD_DEBUG__)
418 if(_dl_debug) _dl_dprintf(2, "file='%s'; already loaded\n", libname);
423 /* If we are in secure mode (i.e. a setu/gid binary using LD_PRELOAD),
424 we don't load the library if it isn't setuid. */
429 if (_dl_stat(libname, &st) || !(st.st_mode & S_ISUID))
434 infile = _dl_open(libname, O_RDONLY, 0);
438 * NO! When we open shared libraries we may search several paths.
439 * it is inappropriate to generate an error here.
441 _dl_dprintf(2, "%s: can't open '%s'\n", _dl_progname, libname);
443 _dl_internal_error_number = LD_ERROR_NOFILE;
447 header = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
448 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
449 if (_dl_mmap_check_error(header)) {
450 _dl_dprintf(2, "%s: can't map '%s'\n", _dl_progname, libname);
451 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
456 _dl_read(infile, header, _dl_pagesize);
457 epnt = (ElfW(Ehdr) *) (intptr_t) header;
458 if (epnt->e_ident[0] != 0x7f ||
459 epnt->e_ident[1] != 'E' ||
460 epnt->e_ident[2] != 'L' ||
461 epnt->e_ident[3] != 'F')
463 _dl_dprintf(2, "%s: '%s' is not an ELF file\n", _dl_progname,
465 _dl_internal_error_number = LD_ERROR_NOTELF;
467 _dl_munmap(header, _dl_pagesize);
471 if ((epnt->e_type != ET_DYN) || (epnt->e_machine != MAGIC1
473 && epnt->e_machine != MAGIC2
477 _dl_internal_error_number =
478 (epnt->e_type != ET_DYN ? LD_ERROR_NOTDYN : LD_ERROR_NOTMAGIC);
479 _dl_dprintf(2, "%s: '%s' is not an ELF executable for " ELF_TARGET
480 "\n", _dl_progname, libname);
482 _dl_munmap(header, _dl_pagesize);
486 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
489 for (i = 0; i < epnt->e_phnum; i++) {
491 if (ppnt->p_type == PT_DYNAMIC) {
493 _dl_dprintf(2, "%s: '%s' has more than one dynamic section\n",
494 _dl_progname, libname);
495 dynamic_addr = ppnt->p_vaddr;
496 dynamic_size = ppnt->p_filesz;
499 if (ppnt->p_type == PT_LOAD) {
500 /* See if this is a PIC library. */
501 if (i == 0 && ppnt->p_vaddr > 0x1000000) {
503 minvma = ppnt->p_vaddr;
505 if (piclib && ppnt->p_vaddr < minvma) {
506 minvma = ppnt->p_vaddr;
508 if (((unsigned long) ppnt->p_vaddr + ppnt->p_memsz) > maxvma) {
509 maxvma = ppnt->p_vaddr + ppnt->p_memsz;
515 maxvma = (maxvma + ADDR_ALIGN) & ~ADDR_ALIGN;
516 minvma = minvma & ~0xffffU;
518 flags = MAP_PRIVATE /*| MAP_DENYWRITE */ ;
522 status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
523 maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
524 if (_dl_mmap_check_error(status)) {
525 _dl_dprintf(2, "%s: can't map %s\n", _dl_progname, libname);
526 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
528 _dl_munmap(header, _dl_pagesize);
531 libaddr = (unsigned long) status;
534 /* Get the memory to store the library */
535 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
537 for (i = 0; i < epnt->e_phnum; i++) {
538 if (ppnt->p_type == PT_LOAD) {
540 /* See if this is a PIC library. */
541 if (i == 0 && ppnt->p_vaddr > 0x1000000) {
543 /* flags |= MAP_FIXED; */
548 if (ppnt->p_flags & PF_W) {
549 unsigned long map_size;
552 status = (char *) _dl_mmap((char *) ((piclib ? libaddr : 0) +
553 (ppnt->p_vaddr & PAGE_ALIGN)), (ppnt->p_vaddr & ADDR_ALIGN)
554 + ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags, infile,
555 ppnt->p_offset & OFFS_ALIGN);
557 if (_dl_mmap_check_error(status)) {
558 _dl_dprintf(2, "%s: can't map '%s'\n",
559 _dl_progname, libname);
560 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
561 _dl_munmap((char *) libaddr, maxvma - minvma);
563 _dl_munmap(header, _dl_pagesize);
567 /* Pad the last page with zeroes. */
568 cpnt = (char *) (status + (ppnt->p_vaddr & ADDR_ALIGN) +
570 while (((unsigned long) cpnt) & ADDR_ALIGN)
573 /* I am not quite sure if this is completely
574 * correct to do or not, but the basic way that
575 * we handle bss segments is that we mmap
576 * /dev/zero if there are any pages left over
577 * that are not mapped as part of the file */
579 map_size = (ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) & PAGE_ALIGN;
581 if (map_size < ppnt->p_vaddr + ppnt->p_memsz)
582 status = (char *) _dl_mmap((char *) map_size +
583 (piclib ? libaddr : 0),
584 ppnt->p_vaddr + ppnt->p_memsz - map_size,
585 LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS, -1, 0);
587 status = (char *) _dl_mmap((char *) (ppnt->p_vaddr & PAGE_ALIGN)
588 + (piclib ? libaddr : 0), (ppnt->p_vaddr & ADDR_ALIGN) +
589 ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags,
590 infile, ppnt->p_offset & OFFS_ALIGN);
591 if (_dl_mmap_check_error(status)) {
592 _dl_dprintf(2, "%s: can't map '%s'\n", _dl_progname, libname);
593 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
594 _dl_munmap((char *) libaddr, maxvma - minvma);
596 _dl_munmap(header, _dl_pagesize);
600 /* if(libaddr == 0 && piclib) {
601 libaddr = (unsigned long) status;
609 /* For a non-PIC library, the addresses are all absolute */
611 dynamic_addr += (unsigned long) libaddr;
615 * OK, the ELF library is now loaded into VM in the correct locations
616 * The next step is to go through and do the dynamic linking (if needed).
619 /* Start by scanning the dynamic section to get all of the pointers */
622 _dl_internal_error_number = LD_ERROR_NODYNAMIC;
623 _dl_dprintf(2, "%s: '%s' is missing a dynamic section\n",
624 _dl_progname, libname);
625 _dl_munmap(header, _dl_pagesize);
629 dpnt = (Elf32_Dyn *) dynamic_addr;
631 dynamic_size = dynamic_size / sizeof(Elf32_Dyn);
632 _dl_memset(dynamic_info, 0, sizeof(dynamic_info));
634 #if defined(__mips__)
638 Elf32_Dyn *dpnt = (Elf32_Dyn *) dynamic_addr;
651 for (indx = 0; indx < dynamic_size; indx++)
653 if (dpnt->d_tag > DT_JMPREL) {
657 dynamic_info[dpnt->d_tag] = dpnt->d_un.d_val;
658 if (dpnt->d_tag == DT_TEXTREL)
659 dynamic_info[DT_TEXTREL] = 1;
664 /* If the TEXTREL is set, this means that we need to make the pages
665 writable before we perform relocations. Do this now. They get set
668 if (dynamic_info[DT_TEXTREL]) {
669 #ifndef FORCE_SHAREABLE_TEXT_SEGMENTS
670 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
671 for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
672 if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W))
673 _dl_mprotect((void *) ((piclib ? libaddr : 0) +
674 (ppnt->p_vaddr & PAGE_ALIGN)),
675 (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
676 PROT_READ | PROT_WRITE | PROT_EXEC);
679 _dl_dprintf(_dl_debug_file, "Can't modify %s's text section. Use GCC option -fPIC for shared objects, please.\n",libname);
684 tpnt = _dl_add_elf_hash_table(libname, (char *) libaddr, dynamic_info,
685 dynamic_addr, dynamic_size);
687 tpnt->ppnt = (ElfW(Phdr) *)(intptr_t) (tpnt->loadaddr + epnt->e_phoff);
688 tpnt->n_phent = epnt->e_phnum;
691 * Add this object into the symbol chain
694 (*rpnt)->next = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf));
695 _dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
696 (*rpnt)->next->prev = (*rpnt);
697 *rpnt = (*rpnt)->next;
699 tpnt->symbol_scope = _dl_symbol_tables;
702 tpnt->libtype = elf_lib;
705 * OK, the next thing we need to do is to insert the dynamic linker into
706 * the proper entry in the GOT so that the PLT symbols can be properly
710 lpnt = (unsigned long *) dynamic_info[DT_PLTGOT];
713 lpnt = (unsigned long *) (dynamic_info[DT_PLTGOT] +
715 INIT_GOT(lpnt, tpnt);
718 #if defined (__SUPPORT_LD_DEBUG__)
720 _dl_dprintf(2, "\n\tfile='%s'; generating link map\n", libname);
721 _dl_dprintf(2, "\t\tdynamic: %x base: %x size: %x\n",
722 dynamic_addr, libaddr, dynamic_size);
723 _dl_dprintf(2, "\t\t entry: %x phdr: %x phnum: %x\n\n",
724 epnt->e_entry + libaddr, tpnt->ppnt, tpnt->n_phent);
728 _dl_munmap(header, _dl_pagesize);
733 int _dl_fixup(struct dyn_elf *rpnt, int flag)
736 struct elf_resolve *tpnt;
739 goof += _dl_fixup(rpnt->next, flag);
742 #if defined (__SUPPORT_LD_DEBUG__)
743 if(_dl_debug) _dl_dprintf(_dl_debug_file,"\nrelocation processing: %s", tpnt->libname);
746 if (unlikely(tpnt->dynamic_info[UNSUPPORTED_RELOC_TYPE])) {
747 #if defined (__SUPPORT_LD_DEBUG__)
749 _dl_dprintf(2, "%s: can't handle %s relocation records\n",
750 _dl_progname, UNSUPPORTED_RELOC_STR);
757 if (tpnt->dynamic_info[DT_RELOC_TABLE_ADDR]) {
758 if (tpnt->init_flag & RELOCS_DONE)
760 tpnt->init_flag |= RELOCS_DONE;
761 goof += _dl_parse_relocation_information(rpnt,
762 tpnt->dynamic_info[DT_RELOC_TABLE_ADDR],
763 tpnt->dynamic_info[DT_RELOC_TABLE_SIZE], 0);
766 if (tpnt->dynamic_info[DT_JMPREL]) {
767 if (tpnt->init_flag & JMP_RELOCS_DONE)
769 tpnt->init_flag |= JMP_RELOCS_DONE;
770 if (flag & RTLD_LAZY) {
771 _dl_parse_lazy_relocation_information(rpnt,
772 tpnt->dynamic_info[DT_JMPREL],
773 tpnt->dynamic_info [DT_PLTRELSZ], 0);
775 goof += _dl_parse_relocation_information(rpnt,
776 tpnt->dynamic_info[DT_JMPREL],
777 tpnt->dynamic_info[DT_PLTRELSZ], 0);
781 if (tpnt->init_flag & COPY_RELOCS_DONE)
783 tpnt->init_flag |= COPY_RELOCS_DONE;
784 goof += _dl_parse_copy_information(rpnt,
785 tpnt->dynamic_info[DT_RELOC_TABLE_ADDR],
786 tpnt->dynamic_info[DT_RELOC_TABLE_SIZE], 0);
788 #if defined (__SUPPORT_LD_DEBUG__)
790 _dl_dprintf(_dl_debug_file,"\nrelocation processing: %s", tpnt->libname);
791 _dl_dprintf(_dl_debug_file,"; finished\n\n");
798 /* Minimal printf which handles only %s, %d, and %x */
799 void _dl_dprintf(int fd, const char *fmt, ...)
803 char *start, *ptr, *string;
806 buf = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
807 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
808 if (_dl_mmap_check_error(buf)) {
809 _dl_write(fd, "mmap of a spare page failed!\n", 29);
818 if (_dl_strlen(fmt) >= (_dl_pagesize - 1)) {
819 _dl_write(fd, "overflow\n", 11);
823 _dl_strcpy(buf, fmt);
827 while (*ptr != '%' && *ptr) {
833 _dl_write(fd, start, _dl_strlen(start));
837 string = va_arg(args, char *);
840 _dl_write(fd, "(null)", 6);
842 _dl_write(fd, string, _dl_strlen(string));
849 num = va_arg(args, int);
851 string = _dl_simple_ltoa(tmp, num);
852 _dl_write(fd, string, _dl_strlen(string));
859 num = va_arg(args, int);
861 string = _dl_simple_ltoahex(tmp, num);
862 _dl_write(fd, string, _dl_strlen(string));
866 _dl_write(fd, "(null)", 6);
872 _dl_write(fd, start, _dl_strlen(start));
876 _dl_munmap(buf, _dl_pagesize);
880 char *_dl_strdup(const char *string)
885 len = _dl_strlen(string);
886 retval = _dl_malloc(len + 1);
887 _dl_strcpy(retval, string);
891 void *(*_dl_malloc_function) (size_t size) = NULL;
897 #if defined __UCLIBC_HAS_FLOATS__ && ! defined __UCLIBC_HAS_SOFT_FLOAT__
902 void *_dl_malloc(int size)
907 #ifdef __SUPPORT_LD_DEBUG_EARLY__
908 _dl_dprintf(2, "malloc: request for %d bytes\n", size);
912 if (_dl_malloc_function)
913 return (*_dl_malloc_function) (size);
915 if ((int)(_dl_malloc_addr - _dl_mmap_zero + size) > (int)_dl_pagesize) {
918 /* Since the above assumes we get a full page even if
919 we request less than that, make sure we request a
920 full page, since uClinux may give us less than than
921 a full page. We might round even
922 larger-than-a-page sizes, but we end up never
923 reusing _dl_mmap_zero/_dl_malloc_addr in that case,
926 The actual page size doesn't really matter; as long
927 as we're self-consistent here, we're safe. */
928 if (size < (int)_dl_pagesize)
929 rounded_size = (size + _dl_pagesize - 1) & _dl_pagesize;
934 #ifdef __SUPPORT_LD_DEBUG_EARLY__
935 _dl_dprintf(2, "malloc: mmapping more memory\n");
937 _dl_mmap_zero = _dl_malloc_addr = _dl_mmap((void *) 0, rounded_size,
938 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
939 if (_dl_mmap_check_error(_dl_mmap_zero)) {
940 _dl_dprintf(2, "%s: mmap of a spare page failed!\n", _dl_progname);
944 retval = _dl_malloc_addr;
945 _dl_malloc_addr += size;
947 /* Align memory to 4 byte boundary. Some platforms require this,
948 * others simply get better performance. */
949 _dl_malloc_addr = (unsigned char *) (((unsigned long) _dl_malloc_addr +
950 __alignof__(union __align_type) - 1) & ~(__alignof__(union __align_type) - 1));
954 void (*_dl_free_function) (void *p) = NULL;
955 void _dl_free (void *p) {
956 if (_dl_free_function)
957 (*_dl_free_function) (p);