OSDN Git Service

b9de19910a164ae4a9ed13d214a6a63097eea8a8
[uclinux-h8/uClibc.git] / ldso / ldso / dl-elf.c
1 /* vi: set sw=4 ts=4: */
2 /*
3  * This file contains the helper routines to load an ELF shared
4  * library into memory and add the symbol table info to the chain.
5  *
6  * Copyright (C) 2000-2006 by Erik Andersen <andersen@codepoet.org>
7  * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald,
8  *                              David Engel, Hongjiu Lu and Mitch D'Souza
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. The name of the above contributors may not be
16  *    used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32
33 #include "ldso.h"
34
35 #ifdef __LDSO_CACHE_SUPPORT__
36
37 static caddr_t _dl_cache_addr = NULL;
38 static size_t _dl_cache_size = 0;
39
40 int _dl_map_cache(void)
41 {
42         int fd;
43         struct stat st;
44         header_t *header;
45         libentry_t *libent;
46         int i, strtabsize;
47
48         if (_dl_cache_addr == MAP_FAILED)
49                 return -1;
50         else if (_dl_cache_addr != NULL)
51                 return 0;
52
53         if (_dl_stat(LDSO_CACHE, &st)
54             || (fd = _dl_open(LDSO_CACHE, O_RDONLY|O_CLOEXEC, 0)) < 0) {
55                 _dl_cache_addr = MAP_FAILED;    /* so we won't try again */
56                 return -1;
57         }
58
59         _dl_cache_size = st.st_size;
60         _dl_cache_addr = _dl_mmap(0, _dl_cache_size, PROT_READ, LDSO_CACHE_MMAP_FLAGS, fd, 0);
61         _dl_close(fd);
62         if (_dl_mmap_check_error(_dl_cache_addr)) {
63                 _dl_dprintf(2, "%s:%i: can't map '%s'\n",
64                                 _dl_progname, __LINE__, LDSO_CACHE);
65                 return -1;
66         }
67
68         header = (header_t *) _dl_cache_addr;
69
70         if (_dl_cache_size < sizeof(header_t) ||
71                         _dl_memcmp(header->magic, LDSO_CACHE_MAGIC, LDSO_CACHE_MAGIC_LEN)
72                         || _dl_memcmp(header->version, LDSO_CACHE_VER, LDSO_CACHE_VER_LEN)
73                         || _dl_cache_size <
74                         (sizeof(header_t) + header->nlibs * sizeof(libentry_t))
75                         || _dl_cache_addr[_dl_cache_size - 1] != '\0')
76         {
77                 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname,
78                                 LDSO_CACHE);
79                 goto fail;
80         }
81
82         strtabsize = _dl_cache_size - sizeof(header_t) -
83                 header->nlibs * sizeof(libentry_t);
84         libent = (libentry_t *) & header[1];
85
86         for (i = 0; i < header->nlibs; i++) {
87                 if (libent[i].sooffset >= strtabsize ||
88                                 libent[i].liboffset >= strtabsize)
89                 {
90                         _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE);
91                         goto fail;
92                 }
93         }
94
95         return 0;
96
97 fail:
98         _dl_munmap(_dl_cache_addr, _dl_cache_size);
99         _dl_cache_addr = MAP_FAILED;
100         return -1;
101 }
102
103 int _dl_unmap_cache(void)
104 {
105         if (_dl_cache_addr == NULL || _dl_cache_addr == MAP_FAILED)
106                 return -1;
107
108 #if 1
109         _dl_munmap(_dl_cache_addr, _dl_cache_size);
110         _dl_cache_addr = NULL;
111 #endif
112
113         return 0;
114 }
115 #endif
116
117
118 void
119 _dl_protect_relro (struct elf_resolve *l)
120 {
121         ElfW(Addr) base = (ElfW(Addr)) DL_RELOC_ADDR(l->loadaddr, l->relro_addr);
122         ElfW(Addr) start = (base & PAGE_ALIGN);
123         ElfW(Addr) end = ((base + l->relro_size) & PAGE_ALIGN);
124         _dl_if_debug_dprint("RELRO protecting %s:  start:%x, end:%x\n", l->libname, start, end);
125         if (start != end &&
126             _dl_mprotect ((void *) start, end - start, PROT_READ) < 0) {
127                 _dl_dprintf(2, "%s: cannot apply additional memory protection after relocation", l->libname);
128                 _dl_exit(0);
129         }
130 }
131
132 /* This function's behavior must exactly match that
133  * in uClibc/ldso/util/ldd.c */
134 static struct elf_resolve *
135 search_for_named_library(const char *name, unsigned rflags, const char *path_list,
136         struct dyn_elf **rpnt)
137 {
138         char *path, *path_n, *mylibname;
139         struct elf_resolve *tpnt;
140         int done;
141
142         if (path_list==NULL)
143                 return NULL;
144
145         /* We need a writable copy of this string, but we don't
146          * need this allocated permanently since we don't want
147          * to leak memory, so use alloca to put path on the stack */
148         done = _dl_strlen(path_list);
149         path = alloca(done + 1);
150
151         /* another bit of local storage */
152         mylibname = alloca(2050);
153
154         _dl_memcpy(path, path_list, done+1);
155
156         /* Unlike ldd.c, don't bother to eliminate double //s */
157
158         /* Replace colons with zeros in path_list */
159         /* : at the beginning or end of path maps to CWD */
160         /* :: anywhere maps CWD */
161         /* "" maps to CWD */
162         done = 0;
163         path_n = path;
164         do {
165                 if (*path == 0) {
166                         *path = ':';
167                         done = 1;
168                 }
169                 if (*path == ':') {
170                         *path = 0;
171                         if (*path_n)
172                                 _dl_strcpy(mylibname, path_n);
173                         else
174                                 _dl_strcpy(mylibname, "."); /* Assume current dir if empty path */
175                         _dl_strcat(mylibname, "/");
176                         _dl_strcat(mylibname, name);
177                         if ((tpnt = _dl_load_elf_shared_library(rflags, rpnt, mylibname)) != NULL)
178                                 return tpnt;
179                         path_n = path+1;
180                 }
181                 path++;
182         } while (!done);
183         return NULL;
184 }
185
186 /* Used to return error codes back to dlopen et. al.  */
187 unsigned long _dl_error_number;
188 unsigned long _dl_internal_error_number;
189
190 struct elf_resolve *_dl_load_shared_library(unsigned rflags, struct dyn_elf **rpnt,
191         struct elf_resolve *tpnt, char *full_libname, int attribute_unused trace_loaded_objects)
192 {
193         char *pnt;
194         struct elf_resolve *tpnt1;
195         char *libname;
196
197         _dl_internal_error_number = 0;
198         libname = full_libname;
199
200         /* quick hack to ensure mylibname buffer doesn't overflow.  don't
201            allow full_libname or any directory to be longer than 1024. */
202         if (_dl_strlen(full_libname) > 1024)
203                 goto goof;
204
205         /* Skip over any initial initial './' and '/' stuff to
206          * get the short form libname with no path garbage */
207         pnt = _dl_strrchr(libname, '/');
208         if (pnt) {
209                 libname = pnt + 1;
210         }
211
212         _dl_if_debug_dprint("\tfind library='%s'; searching\n", libname);
213         /* If the filename has any '/', try it straight and leave it at that.
214            For IBCS2 compatibility under linux, we substitute the string
215            /usr/i486-sysv4/lib for /usr/lib in library names. */
216
217         if (libname != full_libname) {
218                 _dl_if_debug_dprint("\ttrying file='%s'\n", full_libname);
219                 tpnt1 = _dl_load_elf_shared_library(rflags, rpnt, full_libname);
220                 if (tpnt1) {
221                         return tpnt1;
222                 }
223         }
224
225         /*
226          * The ABI specifies that RPATH is searched before LD_LIBRARY_PATH or
227          * the default path of /usr/lib.  Check in rpath directories.
228          */
229 #ifdef __LDSO_RUNPATH__
230         pnt = (tpnt ? (char *) tpnt->dynamic_info[DT_RPATH] : NULL);
231         if (pnt) {
232                 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
233                 _dl_if_debug_dprint("\tsearching RPATH='%s'\n", pnt);
234                 if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt)) != NULL)
235                         return tpnt1;
236         }
237 #endif
238
239 #ifdef __LDSO_LD_LIBRARY_PATH__
240         /* Check in LD_{ELF_}LIBRARY_PATH, if specified and allowed */
241         if (_dl_library_path) {
242                 _dl_if_debug_dprint("\tsearching LD_LIBRARY_PATH='%s'\n", _dl_library_path);
243                 if ((tpnt1 = search_for_named_library(libname, rflags, _dl_library_path, rpnt)) != NULL)
244                 {
245                         return tpnt1;
246                 }
247         }
248 #endif
249         /*
250          * The ABI specifies that RUNPATH is searched after LD_LIBRARY_PATH.
251          */
252 #ifdef __LDSO_RUNPATH__
253         pnt = (tpnt ? (char *)tpnt->dynamic_info[DT_RUNPATH] : NULL);
254         if (pnt) {
255                 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
256                 _dl_if_debug_dprint("\tsearching RUNPATH='%s'\n", pnt);
257                 if ((tpnt1 = search_for_named_library(libname, rflags, pnt, rpnt)) != NULL)
258                         return tpnt1;
259         }
260 #endif
261
262         /*
263          * Where should the cache be searched?  There is no such concept in the
264          * ABI, so we have some flexibility here.  For now, search it before
265          * the hard coded paths that follow (i.e before /lib and /usr/lib).
266          */
267 #ifdef __LDSO_CACHE_SUPPORT__
268         if (_dl_cache_addr != NULL && _dl_cache_addr != MAP_FAILED) {
269                 int i;
270                 header_t *header = (header_t *) _dl_cache_addr;
271                 libentry_t *libent = (libentry_t *) & header[1];
272                 char *strs = (char *) &libent[header->nlibs];
273
274                 _dl_if_debug_dprint("\tsearching cache='%s'\n", LDSO_CACHE);
275                 for (i = 0; i < header->nlibs; i++) {
276                         if ((libent[i].flags == LIB_ELF
277                              || libent[i].flags == LIB_ELF_LIBC0
278                              || libent[i].flags == LIB_ELF_LIBC5)
279                          && _dl_strcmp(libname, strs + libent[i].sooffset) == 0
280                          && (tpnt1 = _dl_load_elf_shared_library(rflags, rpnt, strs + libent[i].liboffset))
281                         ) {
282                                 return tpnt1;
283                         }
284                 }
285         }
286 #endif
287 #if defined SHARED && defined __LDSO_SEARCH_INTERP_PATH__
288         /* Look for libraries wherever the shared library loader
289          * was installed */
290         _dl_if_debug_dprint("\tsearching ldso dir='%s'\n", _dl_ldsopath);
291         tpnt1 = search_for_named_library(libname, rflags, _dl_ldsopath, rpnt);
292         if (tpnt1 != NULL)
293                 return tpnt1;
294 #endif
295         /* Lastly, search the standard list of paths for the library.
296            This list must exactly match the list in uClibc/ldso/util/ldd.c */
297         _dl_if_debug_dprint("\tsearching full lib path list\n");
298         tpnt1 = search_for_named_library(libname, rflags,
299                                         UCLIBC_RUNTIME_PREFIX "lib:"
300                                         UCLIBC_RUNTIME_PREFIX "usr/lib"
301 #ifndef __LDSO_CACHE_SUPPORT__
302                                         ":" UCLIBC_RUNTIME_PREFIX "usr/X11R6/lib"
303 #endif
304                                         , rpnt);
305         if (tpnt1 != NULL)
306                 return tpnt1;
307
308 goof:
309         /* Well, we shot our wad on that one.  All we can do now is punt */
310         if (_dl_internal_error_number)
311                 _dl_error_number = _dl_internal_error_number;
312         else
313                 _dl_error_number = LD_ERROR_NOFILE;
314         _dl_if_debug_dprint("Bummer: could not find '%s'!\n", libname);
315         return NULL;
316 }
317
318 /*
319  * Make a writeable mapping of a segment, regardless of whether PF_W is
320  * set or not.
321  */
322 static void *
323 map_writeable (int infile, ElfW(Phdr) *ppnt, int piclib, int flags,
324                unsigned long libaddr)
325 {
326         int prot_flags = ppnt->p_flags | PF_W;
327         char *status, *retval;
328         char *tryaddr;
329         ssize_t size;
330         unsigned long map_size;
331         char *cpnt;
332         char *piclib2map = NULL;
333
334         if (piclib == 2 &&
335             /* We might be able to avoid this call if memsz doesn't
336                require an additional page, but this would require mmap
337                to always return page-aligned addresses and a whole
338                number of pages allocated.  Unfortunately on uClinux
339                may return misaligned addresses and may allocate
340                partial pages, so we may end up doing unnecessary mmap
341                calls.
342
343                This is what we could do if we knew mmap would always
344                return aligned pages:
345
346                ((ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) &
347                PAGE_ALIGN) < ppnt->p_vaddr + ppnt->p_memsz)
348
349                Instead, we have to do this:  */
350             ppnt->p_filesz < ppnt->p_memsz)
351         {
352                 piclib2map = (char *)
353                         _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_memsz,
354                                  LXFLAGS(prot_flags), flags | MAP_ANONYMOUS, -1, 0);
355                 if (_dl_mmap_check_error(piclib2map))
356                         return 0;
357         }
358
359         tryaddr = piclib == 2 ? piclib2map
360                 : ((char*) (piclib ? libaddr : 0) +
361                    (ppnt->p_vaddr & PAGE_ALIGN));
362
363         size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
364
365         /* For !MMU, mmap to fixed address will fail.
366            So instead of desperately call mmap and fail,
367            we set status to MAP_FAILED to save a call
368            to mmap ().  */
369 #ifndef __ARCH_USE_MMU__
370         if (piclib2map == 0)
371 #endif
372                 status = (char *) _dl_mmap
373                         (tryaddr, size, LXFLAGS(prot_flags),
374                          flags | (piclib2map ? MAP_FIXED : 0),
375                          infile, ppnt->p_offset & OFFS_ALIGN);
376 #ifndef __ARCH_USE_MMU__
377         else
378                 status = MAP_FAILED;
379 #endif
380 #ifdef _DL_PREAD
381         if (_dl_mmap_check_error(status) && piclib2map
382             && (_DL_PREAD (infile, tryaddr, size,
383                            ppnt->p_offset & OFFS_ALIGN) == size))
384                 status = tryaddr;
385 #endif
386         if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status))
387                 return 0;
388
389         if (piclib2map)
390                 retval = piclib2map;
391         else
392                 retval = status;
393
394         /* Now we want to allocate and zero-out any data from the end
395            of the region we mapped in from the file (filesz) to the
396            end of the loadable segment (memsz).  We may need
397            additional pages for memsz, that we map in below, and we
398            can count on the kernel to zero them out, but we have to
399            zero out stuff in the last page that we mapped in from the
400            file.  However, we can't assume to have actually obtained
401            full pages from the kernel, since we didn't ask for them,
402            and uClibc may not give us full pages for small
403            allocations.  So only zero out up to memsz or the end of
404            the page, whichever comes first.  */
405
406         /* CPNT is the beginning of the memsz portion not backed by
407            filesz.  */
408         cpnt = (char *) (status + size);
409
410         /* MAP_SIZE is the address of the
411            beginning of the next page.  */
412         map_size = (ppnt->p_vaddr + ppnt->p_filesz
413                     + ADDR_ALIGN) & PAGE_ALIGN;
414
415         _dl_memset (cpnt, 0,
416                     MIN (map_size
417                          - (ppnt->p_vaddr
418                             + ppnt->p_filesz),
419                          ppnt->p_memsz
420                          - ppnt->p_filesz));
421
422         if (map_size < ppnt->p_vaddr + ppnt->p_memsz && !piclib2map) {
423                 tryaddr = map_size + (char*)(piclib ? libaddr : 0);
424                 status = (char *) _dl_mmap(tryaddr,
425                                            ppnt->p_vaddr + ppnt->p_memsz - map_size,
426                                            LXFLAGS(prot_flags),
427                                            flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
428                 if (_dl_mmap_check_error(status) || tryaddr != status)
429                         return NULL;
430         }
431         return retval;
432 }
433
434 /*
435  * Read one ELF library into memory, mmap it into the correct locations and
436  * add the symbol info to the symbol chain.  Perform any relocations that
437  * are required.
438  */
439
440 struct elf_resolve *_dl_load_elf_shared_library(unsigned rflags,
441         struct dyn_elf **rpnt, const char *libname)
442 {
443         ElfW(Ehdr) *epnt;
444         unsigned long dynamic_addr = 0;
445         ElfW(Dyn) *dpnt;
446         struct elf_resolve *tpnt;
447         ElfW(Phdr) *ppnt;
448 #if defined(USE_TLS) && USE_TLS
449         ElfW(Phdr) *tlsppnt = NULL;
450 #endif
451         char *status, *header;
452         unsigned long dynamic_info[DYNAMIC_SIZE];
453         unsigned long *lpnt;
454         unsigned long libaddr;
455         unsigned long minvma = 0xffffffff, maxvma = 0;
456         unsigned int rtld_flags;
457         int i, flags, piclib, infile;
458         ElfW(Addr) relro_addr = 0;
459         size_t relro_size = 0;
460         struct stat st;
461         uint32_t *p32;
462         DL_LOADADDR_TYPE lib_loadaddr = 0;
463         DL_INIT_LOADADDR_EXTRA_DECLS
464
465         libaddr = 0;
466         infile = _dl_open(libname, O_RDONLY, 0);
467         if (infile < 0) {
468                 _dl_internal_error_number = LD_ERROR_NOFILE;
469                 return NULL;
470         }
471
472         if (_dl_fstat(infile, &st) < 0) {
473                 _dl_internal_error_number = LD_ERROR_NOFILE;
474                 _dl_close(infile);
475                 return NULL;
476         }
477         /* If we are in secure mode (i.e. a setuid/gid binary using LD_PRELOAD),
478            we don't load the library if it isn't setuid. */
479         if (rflags & DL_RESOLVE_SECURE) {
480                 if (!(st.st_mode & S_ISUID)) {
481                         _dl_close(infile);
482                         return NULL;
483                 }
484         }
485
486         /* Check if file is already loaded */
487         for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) {
488                 if (tpnt->st_dev == st.st_dev && tpnt->st_ino == st.st_ino) {
489                         /* Already loaded */
490                         tpnt->usage_count++;
491                         _dl_close(infile);
492                         return tpnt;
493                 }
494         }
495         if (rflags & DL_RESOLVE_NOLOAD) {
496                 _dl_close(infile);
497                 return NULL;
498         }
499         header = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
500                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZE, -1, 0);
501         if (_dl_mmap_check_error(header)) {
502                 _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
503                 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
504                 _dl_close(infile);
505                 return NULL;
506         }
507
508         _dl_read(infile, header, _dl_pagesize);
509         epnt = (ElfW(Ehdr) *) (intptr_t) header;
510         p32 = (uint32_t*)&epnt->e_ident;
511         if (*p32 != ELFMAG_U32) {
512                 _dl_dprintf(2, "%s: '%s' is not an ELF file\n", _dl_progname,
513                                 libname);
514                 _dl_internal_error_number = LD_ERROR_NOTELF;
515                 _dl_close(infile);
516                 _dl_munmap(header, _dl_pagesize);
517                 return NULL;
518         }
519
520         if ((epnt->e_type != ET_DYN
521 #ifdef __LDSO_STANDALONE_SUPPORT__
522                 && epnt->e_type != ET_EXEC
523 #endif
524                 ) || (epnt->e_machine != MAGIC1
525 #ifdef MAGIC2
526                                 && epnt->e_machine != MAGIC2
527 #endif
528                         ))
529         {
530                 _dl_internal_error_number =
531                         (epnt->e_type != ET_DYN ? LD_ERROR_NOTDYN : LD_ERROR_NOTMAGIC);
532                 _dl_dprintf(2, "%s: '%s' is not an ELF executable for " ELF_TARGET
533                                 "\n", _dl_progname, libname);
534                 _dl_close(infile);
535                 _dl_munmap(header, _dl_pagesize);
536                 return NULL;
537         }
538
539         ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
540
541         piclib = 1;
542         for (i = 0; i < epnt->e_phnum; i++) {
543
544                 if (ppnt->p_type == PT_DYNAMIC) {
545                         if (dynamic_addr)
546                                 _dl_dprintf(2, "%s: '%s' has more than one dynamic section\n",
547                                                 _dl_progname, libname);
548                         dynamic_addr = ppnt->p_vaddr;
549                 }
550
551                 if (ppnt->p_type == PT_LOAD) {
552                         /* See if this is a PIC library. */
553                         if (minvma == 0xffffffff && ppnt->p_vaddr > 0x1000000) {
554                                 piclib = 0;
555                                 minvma = ppnt->p_vaddr;
556                         }
557                         if (piclib && ppnt->p_vaddr < minvma) {
558                                 minvma = ppnt->p_vaddr;
559                         }
560                         if (((unsigned long) ppnt->p_vaddr + ppnt->p_memsz) > maxvma) {
561                                 maxvma = ppnt->p_vaddr + ppnt->p_memsz;
562                         }
563                 }
564                 if (ppnt->p_type == PT_TLS) {
565 #if defined(USE_TLS) && USE_TLS
566                         if (ppnt->p_memsz == 0)
567                                 /* Nothing to do for an empty segment.  */
568                                 continue;
569                         else
570                                 /* Save for after 'tpnt' is actually allocated. */
571                                 tlsppnt = ppnt;
572 #else
573                         /*
574                          * Yup, the user was an idiot and tried to sneak in a library with
575                          * TLS in it and we don't support it. Let's fall on our own sword
576                          * and scream at the luser while we die.
577                          */
578                         _dl_dprintf(2, "%s: '%s' library contains unsupported TLS\n",
579                                 _dl_progname, libname);
580                         _dl_internal_error_number = LD_ERROR_TLS_FAILED;
581                         _dl_close(infile);
582                         _dl_munmap(header, _dl_pagesize);
583                         return NULL;
584 #endif
585                 }
586                 ppnt++;
587         }
588
589 #ifdef __LDSO_STANDALONE_SUPPORT__
590         if (epnt->e_type == ET_EXEC)
591                 piclib = 0;
592 #endif
593
594         DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname);
595
596         maxvma = (maxvma + ADDR_ALIGN) & PAGE_ALIGN;
597         minvma = minvma & ~ADDR_ALIGN;
598
599         flags = MAP_PRIVATE /*| MAP_DENYWRITE */ ;
600
601         if (piclib == 0 || piclib == 1) {
602                 status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
603                                 maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
604                 if (_dl_mmap_check_error(status)) {
605                 cant_map:
606                         _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
607                         _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
608                         _dl_close(infile);
609                         _dl_munmap(header, _dl_pagesize);
610                         return NULL;
611                 }
612                 libaddr = (unsigned long) status;
613                 flags |= MAP_FIXED;
614         }
615
616         /* Get the memory to store the library */
617         ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
618
619         DL_INIT_LOADADDR(lib_loadaddr, libaddr - minvma, ppnt, epnt->e_phnum);
620
621         for (i = 0; i < epnt->e_phnum; i++) {
622                 if (DL_IS_SPECIAL_SEGMENT (epnt, ppnt)) {
623                         char *addr;
624
625                         addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
626                         if (addr == NULL) {
627                         cant_map1:
628                                 DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
629                                 goto cant_map;
630                         }
631
632                         DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
633                         ppnt++;
634                         continue;
635                 }
636                 if (ppnt->p_type == PT_GNU_RELRO) {
637                         relro_addr = ppnt->p_vaddr;
638                         relro_size = ppnt->p_memsz;
639                 }
640                 if (ppnt->p_type == PT_LOAD) {
641                         char *tryaddr;
642                         ssize_t size;
643
644                         if (ppnt->p_flags & PF_W) {
645                                 status = map_writeable (infile, ppnt, piclib, flags, libaddr);
646                                 if (status == NULL)
647                                         goto cant_map1;
648                         } else {
649                                 tryaddr = (piclib == 2 ? 0
650                                            : (char *) (ppnt->p_vaddr & PAGE_ALIGN)
651                                            + (piclib ? libaddr : lib_loadaddr));
652                                 size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
653                                 status = (char *) _dl_mmap
654                                            (tryaddr, size, LXFLAGS(ppnt->p_flags),
655                                             flags | (piclib == 2 ? MAP_EXECUTABLE
656                                                      | MAP_DENYWRITE : 0),
657                                             infile, ppnt->p_offset & OFFS_ALIGN);
658                                 if (_dl_mmap_check_error(status)
659                                     || (tryaddr && tryaddr != status))
660                                   goto cant_map1;
661                         }
662                         DL_INIT_LOADADDR_HDR(lib_loadaddr,
663                                              status + (ppnt->p_vaddr & ADDR_ALIGN),
664                                              ppnt);
665
666                         /* if (libaddr == 0 && piclib) {
667                            libaddr = (unsigned long) status;
668                            flags |= MAP_FIXED;
669                            } */
670                 }
671                 ppnt++;
672         }
673
674         /*
675          * The dynamic_addr must be take into acount lib_loadaddr value, to note
676          * it is zero when the SO has been mapped to the elf's physical addr
677          */
678         if (lib_loadaddr) {
679                 dynamic_addr = (unsigned long) DL_RELOC_ADDR(lib_loadaddr, dynamic_addr);
680         }
681
682         /*
683          * OK, the ELF library is now loaded into VM in the correct locations
684          * The next step is to go through and do the dynamic linking (if needed).
685          */
686
687         /* Start by scanning the dynamic section to get all of the pointers */
688
689         if (!dynamic_addr) {
690                 _dl_internal_error_number = LD_ERROR_NODYNAMIC;
691                 _dl_dprintf(2, "%s: '%s' is missing a dynamic section\n",
692                                 _dl_progname, libname);
693                 _dl_munmap(header, _dl_pagesize);
694                 _dl_close(infile);
695                 return NULL;
696         }
697
698         dpnt = (ElfW(Dyn) *) dynamic_addr;
699         _dl_memset(dynamic_info, 0, sizeof(dynamic_info));
700         rtld_flags = _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr);
701         /* If the TEXTREL is set, this means that we need to make the pages
702            writable before we perform relocations.  Do this now. They get set
703            back again later. */
704
705         if (dynamic_info[DT_TEXTREL]) {
706 #ifndef __FORCE_SHAREABLE_TEXT_SEGMENTS__
707                 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
708                 for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
709                         if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) {
710 #ifdef __ARCH_USE_MMU__
711                                 _dl_mprotect((void *) ((piclib ? libaddr : lib_loadaddr) +
712                                                         (ppnt->p_vaddr & PAGE_ALIGN)),
713                                                 (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
714                                                 PROT_READ | PROT_WRITE | PROT_EXEC);
715 #else
716                                 void *new_addr;
717                                 new_addr = map_writeable (infile, ppnt, piclib, flags, libaddr);
718                                 if (!new_addr) {
719                                         _dl_dprintf(_dl_debug_file, "Can't modify %s's text section.",
720                                                     libname);
721                                         _dl_exit(1);
722                                 }
723                                 DL_UPDATE_LOADADDR_HDR(lib_loadaddr,
724                                                        new_addr + (ppnt->p_vaddr & ADDR_ALIGN),
725                                                        ppnt);
726                                 /* This has invalidated all pointers into the previously readonly segment.
727                                    Update any them to point into the remapped segment.  */
728                                 _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr);
729 #endif
730                         }
731                 }
732 #else
733                 _dl_dprintf(_dl_debug_file, "Can't modify %s's text section."
734                         " Use GCC option -fPIC for shared objects, please.\n",
735                         libname);
736                 _dl_exit(1);
737 #endif
738         }
739
740         _dl_close(infile);
741
742         tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info,
743                         dynamic_addr, 0);
744         tpnt->mapaddr = libaddr;
745         tpnt->relro_addr = relro_addr;
746         tpnt->relro_size = relro_size;
747         tpnt->st_dev = st.st_dev;
748         tpnt->st_ino = st.st_ino;
749         tpnt->ppnt = (ElfW(Phdr) *) DL_RELOC_ADDR(tpnt->mapaddr, epnt->e_phoff);
750         tpnt->n_phent = epnt->e_phnum;
751         tpnt->rtld_flags |= rtld_flags;
752 #ifdef __LDSO_STANDALONE_SUPPORT__
753         tpnt->l_entry = epnt->e_entry;
754 #endif
755
756 #if defined(USE_TLS) && USE_TLS
757         if (tlsppnt) {
758                 _dl_debug_early("Found TLS header for %s\n", libname);
759 # if NO_TLS_OFFSET != 0
760                 tpnt->l_tls_offset = NO_TLS_OFFSET;
761 # endif
762                 tpnt->l_tls_blocksize = tlsppnt->p_memsz;
763                 tpnt->l_tls_align = tlsppnt->p_align;
764                 if (tlsppnt->p_align == 0)
765                         tpnt->l_tls_firstbyte_offset = 0;
766                 else
767                         tpnt->l_tls_firstbyte_offset = tlsppnt->p_vaddr &
768                                 (tlsppnt->p_align - 1);
769                 tpnt->l_tls_initimage_size = tlsppnt->p_filesz;
770                 tpnt->l_tls_initimage = (void *) tlsppnt->p_vaddr;
771
772                 /* Assign the next available module ID.  */
773                 tpnt->l_tls_modid = _dl_next_tls_modid ();
774
775                 /* We know the load address, so add it to the offset. */
776 #ifdef __LDSO_STANDALONE_SUPPORT__
777                 if ((tpnt->l_tls_initimage != NULL) && piclib)
778 #else
779                 if (tpnt->l_tls_initimage != NULL)
780 #endif
781                 {
782 # ifdef __SUPPORT_LD_DEBUG_EARLY__
783                         unsigned int tmp = (unsigned int) tpnt->l_tls_initimage;
784                         tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
785                         _dl_debug_early("Relocated TLS initial image from %x to %x (size = %x)\n", tmp, tpnt->l_tls_initimage, tpnt->l_tls_initimage_size);
786                         tmp = 0;
787 # else
788                         tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
789 # endif
790                 }
791         }
792 #endif
793
794         /*
795          * Add this object into the symbol chain
796          */
797         if (*rpnt
798 #ifdef __LDSO_STANDALONE_SUPPORT__
799                 /* Do not create a new chain entry for the main executable */
800                 && (*rpnt)->dyn
801 #endif
802                 ) {
803                 (*rpnt)->next = _dl_malloc(sizeof(struct dyn_elf));
804                 _dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
805                 (*rpnt)->next->prev = (*rpnt);
806                 *rpnt = (*rpnt)->next;
807         }
808 #ifndef SHARED
809         /* When statically linked, the first time we dlopen a DSO
810          * the *rpnt is NULL, so we need to allocate memory for it,
811          * and initialize the _dl_symbol_table.
812          */
813         else {
814                 *rpnt = _dl_symbol_tables = _dl_malloc(sizeof(struct dyn_elf));
815                 _dl_memset(*rpnt, 0, sizeof(struct dyn_elf));
816         }
817 #endif
818         (*rpnt)->dyn = tpnt;
819         tpnt->usage_count++;
820 #ifdef __LDSO_STANDALONE_SUPPORT__
821         tpnt->libtype = (epnt->e_type == ET_DYN) ? elf_lib : elf_executable;
822 #else
823         tpnt->libtype = elf_lib;
824 #endif
825
826         /*
827          * OK, the next thing we need to do is to insert the dynamic linker into
828          * the proper entry in the GOT so that the PLT symbols can be properly
829          * resolved.
830          */
831
832         lpnt = (unsigned long *) dynamic_info[DT_PLTGOT];
833
834         if (lpnt) {
835                 lpnt = (unsigned long *) (dynamic_info[DT_PLTGOT]);
836                 INIT_GOT(lpnt, tpnt);
837         }
838
839 #ifdef __DSBT__
840         /* Handle DSBT initialization */
841         {
842                 struct elf_resolve *t, *ref;
843                 int idx = tpnt->loadaddr.map->dsbt_index;
844                 unsigned *dsbt = tpnt->loadaddr.map->dsbt_table;
845
846                 if (idx == 0) {
847                         if (!dynamic_info[DT_TEXTREL]) {
848                                 /* This DSO has not been assigned an index. */
849                                 _dl_dprintf(2, "%s: '%s' is missing a dsbt index assignment!\n",
850                                             _dl_progname, libname);
851                                 _dl_exit(1);
852                         }
853                         /* Find a dsbt table from another module. */
854                         ref = NULL;
855                         for (t = _dl_loaded_modules; t; t = t->next) {
856                                 if (ref == NULL && t != tpnt) {
857                                         ref = t;
858                                         break;
859                                 }
860                         }
861                         idx = tpnt->loadaddr.map->dsbt_size;
862                         while (idx-- > 0)
863                                 if (!ref || ref->loadaddr.map->dsbt_table[idx] == NULL)
864                                         break;
865                         if (idx <= 0) {
866                                 _dl_dprintf(2, "%s: '%s' caused DSBT table overflow!\n",
867                                             _dl_progname, libname);
868                                 _dl_exit(1);
869                         }
870                         _dl_if_debug_dprint("\n\tfile='%s';  assigned index %d\n",
871                                             libname, idx);
872                         tpnt->loadaddr.map->dsbt_index = idx;
873
874                 }
875
876                 /*
877                  * Setup dsbt slot for this module in dsbt of all modules.
878                  */
879                 ref = NULL;
880                 for (t = _dl_loaded_modules; t; t = t->next) {
881                         /* find a dsbt table from another module */
882                         if (ref == NULL && t != tpnt) {
883                                 ref = t;
884
885                                 /* make sure index is not already used */
886                                 if (t->loadaddr.map->dsbt_table[idx]) {
887                                         struct elf_resolve *dup;
888                                         char *dup_name;
889
890                                         for (dup = _dl_loaded_modules; dup; dup = dup->next)
891                                                 if (dup != tpnt && dup->loadaddr.map->dsbt_index == idx)
892                                                         break;
893                                         if (dup)
894                                                 dup_name = dup->libname;
895                                         else if (idx == 1)
896                                                 dup_name = "runtime linker";
897                                         else
898                                                 dup_name = "unknown library";
899                                         _dl_dprintf(2, "%s: '%s' dsbt index %d already used by %s!\n",
900                                                     _dl_progname, libname, idx, dup_name);
901                                         _dl_exit(1);
902                                 }
903                         }
904                         t->loadaddr.map->dsbt_table[idx] = (unsigned)dsbt;
905                 }
906                 if (ref)
907                         _dl_memcpy(dsbt, ref->loadaddr.map->dsbt_table,
908                                    tpnt->loadaddr.map->dsbt_size * sizeof(unsigned *));
909         }
910 #endif
911         _dl_if_debug_dprint("\n\tfile='%s';  generating link map\n", libname);
912         _dl_if_debug_dprint("\t\tdynamic: %x  base: %x\n", dynamic_addr, DL_LOADADDR_BASE(lib_loadaddr));
913         _dl_if_debug_dprint("\t\t  entry: %x  phdr: %x  phnum: %x\n\n",
914                         DL_RELOC_ADDR(lib_loadaddr, epnt->e_entry), tpnt->ppnt, tpnt->n_phent);
915
916         _dl_munmap(header, _dl_pagesize);
917
918         return tpnt;
919 }
920
921 /* now_flag must be RTLD_NOW or zero */
922 int _dl_fixup(struct dyn_elf *rpnt, struct r_scope_elem *scope, int now_flag)
923 {
924         int goof = 0;
925         struct elf_resolve *tpnt;
926         ElfW(Word) reloc_size, relative_count;
927         ElfW(Addr) reloc_addr;
928
929         if (rpnt->next)
930                 goof = _dl_fixup(rpnt->next, scope, now_flag);
931         if (goof)
932                 return goof;
933         tpnt = rpnt->dyn;
934
935         if (!(tpnt->init_flag & RELOCS_DONE))
936                 _dl_if_debug_dprint("relocation processing: %s\n", tpnt->libname);
937
938         if (unlikely(tpnt->dynamic_info[UNSUPPORTED_RELOC_TYPE])) {
939                 _dl_if_debug_dprint("%s: can't handle %s relocation records\n",
940                                 _dl_progname, UNSUPPORTED_RELOC_STR);
941                 goof++;
942                 return goof;
943         }
944
945         reloc_size = tpnt->dynamic_info[DT_RELOC_TABLE_SIZE];
946 /* On some machines, notably SPARC & PPC, DT_REL* includes DT_JMPREL in its
947    range.  Note that according to the ELF spec, this is completely legal! */
948 #ifdef ELF_MACHINE_PLTREL_OVERLAP
949         reloc_size -= tpnt->dynamic_info [DT_PLTRELSZ];
950 #endif
951         if (tpnt->dynamic_info[DT_RELOC_TABLE_ADDR] &&
952             !(tpnt->init_flag & RELOCS_DONE)) {
953                 reloc_addr = tpnt->dynamic_info[DT_RELOC_TABLE_ADDR];
954                 relative_count = tpnt->dynamic_info[DT_RELCONT_IDX];
955                 if (relative_count) { /* Optimize the XX_RELATIVE relocations if possible */
956                         reloc_size -= relative_count * sizeof(ELF_RELOC);
957                         if (tpnt->loadaddr
958 #ifdef __LDSO_PRELINK_SUPPORT__
959                                 || (!tpnt->dynamic_info[DT_GNU_PRELINKED_IDX])
960 #endif
961                                 )
962                                 elf_machine_relative(tpnt->loadaddr, reloc_addr, relative_count);
963                         reloc_addr += relative_count * sizeof(ELF_RELOC);
964                 }
965                 goof += _dl_parse_relocation_information(rpnt, scope,
966                                 reloc_addr,
967                                 reloc_size);
968                 tpnt->init_flag |= RELOCS_DONE;
969         }
970         if (tpnt->dynamic_info[DT_BIND_NOW])
971                 now_flag = RTLD_NOW;
972         if (tpnt->dynamic_info[DT_JMPREL] &&
973             (!(tpnt->init_flag & JMP_RELOCS_DONE) ||
974              (now_flag && !(tpnt->rtld_flags & now_flag)))) {
975                 tpnt->rtld_flags |= now_flag;
976                 if (!(tpnt->rtld_flags & RTLD_NOW)) {
977                         _dl_parse_lazy_relocation_information(rpnt,
978                                         tpnt->dynamic_info[DT_JMPREL],
979                                         tpnt->dynamic_info [DT_PLTRELSZ]);
980                 } else {
981                         goof += _dl_parse_relocation_information(rpnt, scope,
982                                         tpnt->dynamic_info[DT_JMPREL],
983                                         tpnt->dynamic_info[DT_PLTRELSZ]);
984                 }
985                 tpnt->init_flag |= JMP_RELOCS_DONE;
986         }
987
988 #if 0
989 /* _dl_add_to_slotinfo is called by init_tls() for initial DSO
990    or by dlopen() for dynamically loaded DSO. */
991 #if defined(USE_TLS) && USE_TLS
992         /* Add object to slot information data if necessasy. */
993         if (tpnt->l_tls_blocksize != 0 && tls_init_tp_called)
994                 _dl_add_to_slotinfo ((struct link_map *) tpnt);
995 #endif
996 #endif
997         return goof;
998 }
999
1000 /* Minimal printf which handles only %s, %d, and %x */
1001 void _dl_dprintf(int fd, const char *fmt, ...)
1002 {
1003 #if __WORDSIZE > 32
1004         long int num;
1005 #else
1006         int num;
1007 #endif
1008         va_list args;
1009         char *start, *ptr, *string;
1010         char *buf;
1011
1012         if (!fmt)
1013                 return;
1014
1015         buf = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
1016                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1017         if (_dl_mmap_check_error(buf)) {
1018                 _dl_write(fd, "mmap of a spare page failed!\n", 29);
1019                 _dl_exit(20);
1020         }
1021
1022         start = ptr = buf;
1023
1024         if (_dl_strlen(fmt) >= (_dl_pagesize - 1)) {
1025                 _dl_write(fd, "overflow\n", 11);
1026                 _dl_exit(20);
1027         }
1028
1029         _dl_strcpy(buf, fmt);
1030         va_start(args, fmt);
1031
1032         while (start) {
1033                 while (*ptr != '%' && *ptr) {
1034                         ptr++;
1035                 }
1036
1037                 if (*ptr == '%') {
1038                         *ptr++ = '\0';
1039                         _dl_write(fd, start, _dl_strlen(start));
1040
1041                         switch (*ptr++) {
1042                                 case 's':
1043                                         string = va_arg(args, char *);
1044
1045                                         if (!string)
1046                                                 _dl_write(fd, "(null)", 6);
1047                                         else
1048                                                 _dl_write(fd, string, _dl_strlen(string));
1049                                         break;
1050
1051                                 case 'i':
1052                                 case 'd':
1053                                         {
1054                                                 char tmp[22];
1055 #if __WORDSIZE > 32
1056                                                 num = va_arg(args, long int);
1057 #else
1058                                                 num = va_arg(args, int);
1059 #endif
1060                                                 string = _dl_simple_ltoa(tmp, num);
1061                                                 _dl_write(fd, string, _dl_strlen(string));
1062                                                 break;
1063                                         }
1064                                 case 'x':
1065                                 case 'X':
1066                                         {
1067                                                 char tmp[22];
1068 #if __WORDSIZE > 32
1069                                                 num = va_arg(args, long int);
1070 #else
1071                                                 num = va_arg(args, int);
1072 #endif
1073                                                 string = _dl_simple_ltoahex(tmp, num);
1074                                                 _dl_write(fd, string, _dl_strlen(string));
1075                                                 break;
1076                                         }
1077                                 default:
1078                                         _dl_write(fd, "(null)", 6);
1079                                         break;
1080                         }
1081
1082                         start = ptr;
1083                 } else {
1084                         _dl_write(fd, start, _dl_strlen(start));
1085                         start = NULL;
1086                 }
1087         }
1088         _dl_munmap(buf, _dl_pagesize);
1089         return;
1090 }
1091
1092 char *_dl_strdup(const char *string)
1093 {
1094         char *retval;
1095         int len;
1096
1097         len = _dl_strlen(string);
1098         retval = _dl_malloc(len + 1);
1099         _dl_strcpy(retval, string);
1100         return retval;
1101 }
1102
1103 unsigned int _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[],
1104                                     void *debug_addr, DL_LOADADDR_TYPE load_off)
1105 {
1106         return __dl_parse_dynamic_info(dpnt, dynamic_info, debug_addr, load_off);
1107 }