OSDN Git Service

ldso: use ADDR_ALIGN instead of hard-coded value
[uclinux-h8/uClibc.git] / ldso / ldso / dl-elf.c
1 /* vi: set sw=4 ts=4: */
2 /*
3  * This file contains the helper routines to load an ELF shared
4  * library into memory and add the symbol table info to the chain.
5  *
6  * Copyright (C) 2000-2006 by Erik Andersen <andersen@codepoet.org>
7  * Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald,
8  *                              David Engel, Hongjiu Lu and Mitch D'Souza
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. The name of the above contributors may not be
16  *    used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32
33 #include "ldso.h"
34
35 #ifdef __LDSO_CACHE_SUPPORT__
36
37 static caddr_t _dl_cache_addr = NULL;
38 static size_t _dl_cache_size = 0;
39
40 int _dl_map_cache(void)
41 {
42         int fd;
43         struct stat st;
44         header_t *header;
45         libentry_t *libent;
46         int i, strtabsize;
47
48         if (_dl_cache_addr == MAP_FAILED)
49                 return -1;
50         else if (_dl_cache_addr != NULL)
51                 return 0;
52
53         if (_dl_stat(LDSO_CACHE, &st)
54             || (fd = _dl_open(LDSO_CACHE, O_RDONLY|O_CLOEXEC, 0)) < 0) {
55                 _dl_cache_addr = MAP_FAILED;    /* so we won't try again */
56                 return -1;
57         }
58
59         _dl_cache_size = st.st_size;
60         _dl_cache_addr = _dl_mmap(0, _dl_cache_size, PROT_READ, LDSO_CACHE_MMAP_FLAGS, fd, 0);
61         _dl_close(fd);
62         if (_dl_mmap_check_error(_dl_cache_addr)) {
63                 _dl_dprintf(2, "%s:%i: can't map '%s'\n",
64                                 _dl_progname, __LINE__, LDSO_CACHE);
65                 return -1;
66         }
67
68         header = (header_t *) _dl_cache_addr;
69
70         if (_dl_cache_size < sizeof(header_t) ||
71                         _dl_memcmp(header->magic, LDSO_CACHE_MAGIC, LDSO_CACHE_MAGIC_LEN)
72                         || _dl_memcmp(header->version, LDSO_CACHE_VER, LDSO_CACHE_VER_LEN)
73                         || _dl_cache_size <
74                         (sizeof(header_t) + header->nlibs * sizeof(libentry_t))
75                         || _dl_cache_addr[_dl_cache_size - 1] != '\0')
76         {
77                 _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname,
78                                 LDSO_CACHE);
79                 goto fail;
80         }
81
82         strtabsize = _dl_cache_size - sizeof(header_t) -
83                 header->nlibs * sizeof(libentry_t);
84         libent = (libentry_t *) & header[1];
85
86         for (i = 0; i < header->nlibs; i++) {
87                 if (libent[i].sooffset >= strtabsize ||
88                                 libent[i].liboffset >= strtabsize)
89                 {
90                         _dl_dprintf(2, "%s: cache '%s' is corrupt\n", _dl_progname, LDSO_CACHE);
91                         goto fail;
92                 }
93         }
94
95         return 0;
96
97 fail:
98         _dl_munmap(_dl_cache_addr, _dl_cache_size);
99         _dl_cache_addr = MAP_FAILED;
100         return -1;
101 }
102
103 int _dl_unmap_cache(void)
104 {
105         if (_dl_cache_addr == NULL || _dl_cache_addr == MAP_FAILED)
106                 return -1;
107
108 #if 1
109         _dl_munmap(_dl_cache_addr, _dl_cache_size);
110         _dl_cache_addr = NULL;
111 #endif
112
113         return 0;
114 }
115 #endif
116
117
118 void
119 _dl_protect_relro (struct elf_resolve *l)
120 {
121         ElfW(Addr) base = (ElfW(Addr)) DL_RELOC_ADDR(l->loadaddr, l->relro_addr);
122         ElfW(Addr) start = (base & PAGE_ALIGN);
123         ElfW(Addr) end = ((base + l->relro_size) & PAGE_ALIGN);
124         _dl_if_debug_dprint("RELRO protecting %s:  start:%x, end:%x\n", l->libname, start, end);
125         if (start != end &&
126             _dl_mprotect ((void *) start, end - start, PROT_READ) < 0) {
127                 _dl_dprintf(2, "%s: cannot apply additional memory protection after relocation", l->libname);
128                 _dl_exit(0);
129         }
130 }
131
132 /* This function's behavior must exactly match that
133  * in uClibc/ldso/util/ldd.c */
134 static struct elf_resolve *
135 search_for_named_library(const char *name, int secure, const char *path_list,
136         struct dyn_elf **rpnt)
137 {
138         char *path, *path_n, *mylibname;
139         struct elf_resolve *tpnt;
140         int done;
141
142         if (path_list==NULL)
143                 return NULL;
144
145         /* We need a writable copy of this string, but we don't
146          * need this allocated permanently since we don't want
147          * to leak memory, so use alloca to put path on the stack */
148         done = _dl_strlen(path_list);
149         path = alloca(done + 1);
150
151         /* another bit of local storage */
152         mylibname = alloca(2050);
153
154         _dl_memcpy(path, path_list, done+1);
155
156         /* Unlike ldd.c, don't bother to eliminate double //s */
157
158         /* Replace colons with zeros in path_list */
159         /* : at the beginning or end of path maps to CWD */
160         /* :: anywhere maps CWD */
161         /* "" maps to CWD */
162         done = 0;
163         path_n = path;
164         do {
165                 if (*path == 0) {
166                         *path = ':';
167                         done = 1;
168                 }
169                 if (*path == ':') {
170                         *path = 0;
171                         if (*path_n)
172                                 _dl_strcpy(mylibname, path_n);
173                         else
174                                 _dl_strcpy(mylibname, "."); /* Assume current dir if empty path */
175                         _dl_strcat(mylibname, "/");
176                         _dl_strcat(mylibname, name);
177                         if ((tpnt = _dl_load_elf_shared_library(secure, rpnt, mylibname)) != NULL)
178                                 return tpnt;
179                         path_n = path+1;
180                 }
181                 path++;
182         } while (!done);
183         return NULL;
184 }
185
186 /* Used to return error codes back to dlopen et. al.  */
187 unsigned long _dl_error_number;
188 unsigned long _dl_internal_error_number;
189
190 struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt,
191         struct elf_resolve *tpnt, char *full_libname, int __attribute__((unused)) trace_loaded_objects)
192 {
193         char *pnt;
194         struct elf_resolve *tpnt1;
195         char *libname;
196
197         _dl_internal_error_number = 0;
198         libname = full_libname;
199
200         /* quick hack to ensure mylibname buffer doesn't overflow.  don't
201            allow full_libname or any directory to be longer than 1024. */
202         if (_dl_strlen(full_libname) > 1024)
203                 goto goof;
204
205         /* Skip over any initial initial './' and '/' stuff to
206          * get the short form libname with no path garbage */
207         pnt = _dl_strrchr(libname, '/');
208         if (pnt) {
209                 libname = pnt + 1;
210         }
211
212         _dl_if_debug_dprint("\tfind library='%s'; searching\n", libname);
213         /* If the filename has any '/', try it straight and leave it at that.
214            For IBCS2 compatibility under linux, we substitute the string
215            /usr/i486-sysv4/lib for /usr/lib in library names. */
216
217         if (libname != full_libname) {
218                 _dl_if_debug_dprint("\ttrying file='%s'\n", full_libname);
219                 tpnt1 = _dl_load_elf_shared_library(secure, rpnt, full_libname);
220                 if (tpnt1) {
221                         return tpnt1;
222                 }
223         }
224
225         /*
226          * The ABI specifies that RPATH is searched before LD_LIBRARY_PATH or
227          * the default path of /usr/lib.  Check in rpath directories.
228          */
229 #ifdef __LDSO_RUNPATH__
230         pnt = (tpnt ? (char *) tpnt->dynamic_info[DT_RPATH] : NULL);
231         if (pnt) {
232                 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
233                 _dl_if_debug_dprint("\tsearching RPATH='%s'\n", pnt);
234                 if ((tpnt1 = search_for_named_library(libname, secure, pnt, rpnt)) != NULL)
235                         return tpnt1;
236         }
237 #endif
238
239         /* Check in LD_{ELF_}LIBRARY_PATH, if specified and allowed */
240         if (_dl_library_path) {
241                 _dl_if_debug_dprint("\tsearching LD_LIBRARY_PATH='%s'\n", _dl_library_path);
242                 if ((tpnt1 = search_for_named_library(libname, secure, _dl_library_path, rpnt)) != NULL)
243                 {
244                         return tpnt1;
245                 }
246         }
247
248         /*
249          * The ABI specifies that RUNPATH is searched after LD_LIBRARY_PATH.
250          */
251 #ifdef __LDSO_RUNPATH__
252         pnt = (tpnt ? (char *)tpnt->dynamic_info[DT_RUNPATH] : NULL);
253         if (pnt) {
254                 pnt += (unsigned long) tpnt->dynamic_info[DT_STRTAB];
255                 _dl_if_debug_dprint("\tsearching RUNPATH='%s'\n", pnt);
256                 if ((tpnt1 = search_for_named_library(libname, secure, pnt, rpnt)) != NULL)
257                         return tpnt1;
258         }
259 #endif
260
261         /*
262          * Where should the cache be searched?  There is no such concept in the
263          * ABI, so we have some flexibility here.  For now, search it before
264          * the hard coded paths that follow (i.e before /lib and /usr/lib).
265          */
266 #ifdef __LDSO_CACHE_SUPPORT__
267         if (_dl_cache_addr != NULL && _dl_cache_addr != MAP_FAILED) {
268                 int i;
269                 header_t *header = (header_t *) _dl_cache_addr;
270                 libentry_t *libent = (libentry_t *) & header[1];
271                 char *strs = (char *) &libent[header->nlibs];
272
273                 _dl_if_debug_dprint("\tsearching cache='%s'\n", LDSO_CACHE);
274                 for (i = 0; i < header->nlibs; i++) {
275                         if ((libent[i].flags == LIB_ELF
276                              || libent[i].flags == LIB_ELF_LIBC0
277                              || libent[i].flags == LIB_ELF_LIBC5)
278                          && _dl_strcmp(libname, strs + libent[i].sooffset) == 0
279                          && (tpnt1 = _dl_load_elf_shared_library(secure, rpnt, strs + libent[i].liboffset))
280                         ) {
281                                 return tpnt1;
282                         }
283                 }
284         }
285 #endif
286
287         /* Look for libraries wherever the shared library loader
288          * was installed */
289         _dl_if_debug_dprint("\tsearching ldso dir='%s'\n", _dl_ldsopath);
290         tpnt1 = search_for_named_library(libname, secure, _dl_ldsopath, rpnt);
291         if (tpnt1 != NULL)
292                 return tpnt1;
293
294         /* Lastly, search the standard list of paths for the library.
295            This list must exactly match the list in uClibc/ldso/util/ldd.c */
296         _dl_if_debug_dprint("\tsearching full lib path list\n");
297         tpnt1 = search_for_named_library(libname, secure,
298                                         UCLIBC_RUNTIME_PREFIX "lib:"
299                                         UCLIBC_RUNTIME_PREFIX "usr/lib"
300 #ifndef __LDSO_CACHE_SUPPORT__
301                                         ":" UCLIBC_RUNTIME_PREFIX "usr/X11R6/lib"
302 #endif
303                                         , rpnt);
304         if (tpnt1 != NULL)
305                 return tpnt1;
306
307 goof:
308         /* Well, we shot our wad on that one.  All we can do now is punt */
309         if (_dl_internal_error_number)
310                 _dl_error_number = _dl_internal_error_number;
311         else
312                 _dl_error_number = LD_ERROR_NOFILE;
313         _dl_if_debug_dprint("Bummer: could not find '%s'!\n", libname);
314         return NULL;
315 }
316
317
318 /*
319  * Read one ELF library into memory, mmap it into the correct locations and
320  * add the symbol info to the symbol chain.  Perform any relocations that
321  * are required.
322  */
323
324 struct elf_resolve *_dl_load_elf_shared_library(int secure,
325         struct dyn_elf **rpnt, char *libname)
326 {
327         ElfW(Ehdr) *epnt;
328         unsigned long dynamic_addr = 0;
329         ElfW(Dyn) *dpnt;
330         struct elf_resolve *tpnt;
331         ElfW(Phdr) *ppnt;
332 #if defined(USE_TLS) && USE_TLS
333         ElfW(Phdr) *tlsppnt = NULL;
334 #endif
335         char *status, *header;
336         unsigned long dynamic_info[DYNAMIC_SIZE];
337         unsigned long *lpnt;
338         unsigned long libaddr;
339         unsigned long minvma = 0xffffffff, maxvma = 0;
340         unsigned int rtld_flags;
341         int i, flags, piclib, infile;
342         ElfW(Addr) relro_addr = 0;
343         size_t relro_size = 0;
344         struct stat st;
345         uint32_t *p32;
346         DL_LOADADDR_TYPE lib_loadaddr;
347         DL_INIT_LOADADDR_EXTRA_DECLS
348
349         libaddr = 0;
350         infile = _dl_open(libname, O_RDONLY, 0);
351         if (infile < 0) {
352                 _dl_internal_error_number = LD_ERROR_NOFILE;
353                 return NULL;
354         }
355
356         if (_dl_fstat(infile, &st) < 0) {
357                 _dl_internal_error_number = LD_ERROR_NOFILE;
358                 _dl_close(infile);
359                 return NULL;
360         }
361         /* If we are in secure mode (i.e. a setu/gid binary using LD_PRELOAD),
362            we don't load the library if it isn't setuid. */
363         if (secure) {
364                 if (!(st.st_mode & S_ISUID)) {
365                         _dl_close(infile);
366                         return NULL;
367                 }
368         }
369
370         /* Check if file is already loaded */
371         for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) {
372                 if (tpnt->st_dev == st.st_dev && tpnt->st_ino == st.st_ino) {
373                         /* Already loaded */
374                         tpnt->usage_count++;
375                         _dl_close(infile);
376                         return tpnt;
377                 }
378         }
379         header = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
380                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZE, -1, 0);
381         if (_dl_mmap_check_error(header)) {
382                 _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
383                 _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
384                 _dl_close(infile);
385                 return NULL;
386         }
387
388         _dl_read(infile, header, _dl_pagesize);
389         epnt = (ElfW(Ehdr) *) (intptr_t) header;
390         p32 = (uint32_t*)&epnt->e_ident;
391         if (*p32 != ELFMAG_U32) {
392                 _dl_dprintf(2, "%s: '%s' is not an ELF file\n", _dl_progname,
393                                 libname);
394                 _dl_internal_error_number = LD_ERROR_NOTELF;
395                 _dl_close(infile);
396                 _dl_munmap(header, _dl_pagesize);
397                 return NULL;
398         }
399
400         if ((epnt->e_type != ET_DYN) || (epnt->e_machine != MAGIC1
401 #ifdef MAGIC2
402                                 && epnt->e_machine != MAGIC2
403 #endif
404                                 ))
405         {
406                 _dl_internal_error_number =
407                         (epnt->e_type != ET_DYN ? LD_ERROR_NOTDYN : LD_ERROR_NOTMAGIC);
408                 _dl_dprintf(2, "%s: '%s' is not an ELF executable for " ELF_TARGET
409                                 "\n", _dl_progname, libname);
410                 _dl_close(infile);
411                 _dl_munmap(header, _dl_pagesize);
412                 return NULL;
413         }
414
415         ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
416
417         piclib = 1;
418         for (i = 0; i < epnt->e_phnum; i++) {
419
420                 if (ppnt->p_type == PT_DYNAMIC) {
421                         if (dynamic_addr)
422                                 _dl_dprintf(2, "%s: '%s' has more than one dynamic section\n",
423                                                 _dl_progname, libname);
424                         dynamic_addr = ppnt->p_vaddr;
425                 }
426
427                 if (ppnt->p_type == PT_LOAD) {
428                         /* See if this is a PIC library. */
429                         if (i == 0 && ppnt->p_vaddr > 0x1000000) {
430                                 piclib = 0;
431                                 minvma = ppnt->p_vaddr;
432                         }
433                         if (piclib && ppnt->p_vaddr < minvma) {
434                                 minvma = ppnt->p_vaddr;
435                         }
436                         if (((unsigned long) ppnt->p_vaddr + ppnt->p_memsz) > maxvma) {
437                                 maxvma = ppnt->p_vaddr + ppnt->p_memsz;
438                         }
439                 }
440                 if (ppnt->p_type == PT_TLS) {
441 #if defined(USE_TLS) && USE_TLS
442                         if (ppnt->p_memsz == 0)
443                                 /* Nothing to do for an empty segment.  */
444                                 continue;
445                         else
446                                 /* Save for after 'tpnt' is actually allocated. */
447                                 tlsppnt = ppnt;
448 #else
449                         /*
450                          * Yup, the user was an idiot and tried to sneak in a library with
451                          * TLS in it and we don't support it. Let's fall on our own sword
452                          * and scream at the luser while we die.
453                          */
454                         _dl_dprintf(2, "%s: '%s' library contains unsupported TLS\n",
455                                 _dl_progname, libname);
456                         _dl_internal_error_number = LD_ERROR_TLS_FAILED;
457                         _dl_close(infile);
458                         _dl_munmap(header, _dl_pagesize);
459                         return NULL;
460 #endif
461                 }
462                 ppnt++;
463         }
464
465         DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname);
466
467         maxvma = (maxvma + ADDR_ALIGN) & PAGE_ALIGN;
468         minvma = minvma & ~ADDR_ALIGN;
469
470         flags = MAP_PRIVATE /*| MAP_DENYWRITE */ ;
471         if (!piclib)
472                 flags |= MAP_FIXED;
473
474         if (piclib == 0 || piclib == 1) {
475                 status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
476                                 maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
477                 if (_dl_mmap_check_error(status)) {
478                         _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
479                         _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
480                         _dl_close(infile);
481                         _dl_munmap(header, _dl_pagesize);
482                         return NULL;
483                 }
484                 libaddr = (unsigned long) status;
485                 flags |= MAP_FIXED;
486         }
487
488         /* Get the memory to store the library */
489         ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
490
491         DL_INIT_LOADADDR(lib_loadaddr, libaddr, ppnt, epnt->e_phnum);
492
493         for (i = 0; i < epnt->e_phnum; i++) {
494                 if (DL_IS_SPECIAL_SEGMENT (epnt, ppnt)) {
495                         char *addr;
496
497                         addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
498                         if (addr == NULL)
499                                 goto cant_map;
500
501                         DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
502                         ppnt++;
503                         continue;
504                 }
505                 if (ppnt->p_type == PT_GNU_RELRO) {
506                         relro_addr = ppnt->p_vaddr;
507                         relro_size = ppnt->p_memsz;
508                 }
509                 if (ppnt->p_type == PT_LOAD) {
510                         char *tryaddr;
511                         ssize_t size;
512
513                         /* See if this is a PIC library. */
514                         if (i == 0 && ppnt->p_vaddr > 0x1000000) {
515                                 piclib = 0;
516                                 /* flags |= MAP_FIXED; */
517                         }
518
519                         if (ppnt->p_flags & PF_W) {
520                                 unsigned long map_size;
521                                 char *cpnt;
522                                 char *piclib2map = 0;
523
524                                 if (piclib == 2 &&
525                                     /* We might be able to avoid this
526                                        call if memsz doesn't require
527                                        an additional page, but this
528                                        would require mmap to always
529                                        return page-aligned addresses
530                                        and a whole number of pages
531                                        allocated.  Unfortunately on
532                                        uClinux may return misaligned
533                                        addresses and may allocate
534                                        partial pages, so we may end up
535                                        doing unnecessary mmap calls.
536
537                                        This is what we could do if we
538                                        knew mmap would always return
539                                        aligned pages:
540
541                                     ((ppnt->p_vaddr + ppnt->p_filesz
542                                       + ADDR_ALIGN)
543                                      & PAGE_ALIGN)
544                                     < ppnt->p_vaddr + ppnt->p_memsz)
545
546                                        Instead, we have to do this:  */
547                                     ppnt->p_filesz < ppnt->p_memsz)
548                                   {
549                                     piclib2map = (char *)
550                                       _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN)
551                                                + ppnt->p_memsz,
552                                                LXFLAGS(ppnt->p_flags),
553                                                flags | MAP_ANONYMOUS, -1, 0);
554                                     if (_dl_mmap_check_error(piclib2map))
555                                       goto cant_map;
556                                     DL_INIT_LOADADDR_HDR
557                                       (lib_loadaddr, piclib2map
558                                        + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
559                                   }
560
561                                 tryaddr = piclib == 2 ? piclib2map
562                                   : ((char*) (piclib ? libaddr : 0) +
563                                      (ppnt->p_vaddr & PAGE_ALIGN));
564
565                                 size = (ppnt->p_vaddr & ADDR_ALIGN)
566                                   + ppnt->p_filesz;
567
568                                 /* For !MMU, mmap to fixed address will fail.
569                                    So instead of desperately call mmap and fail,
570                                    we set status to MAP_FAILED to save a call
571                                    to mmap ().  */
572 #ifndef __ARCH_USE_MMU__
573                                 if (piclib2map == 0)
574 #endif
575                                   status = (char *) _dl_mmap
576                                     (tryaddr, size, LXFLAGS(ppnt->p_flags),
577                                      flags | (piclib2map ? MAP_FIXED : 0),
578                                      infile, ppnt->p_offset & OFFS_ALIGN);
579 #ifndef __ARCH_USE_MMU__
580                                 else
581                                   status = MAP_FAILED;
582 #endif
583 #ifdef _DL_PREAD
584                                 if (_dl_mmap_check_error(status) && piclib2map
585                                     && (_DL_PREAD (infile, tryaddr, size,
586                                                    ppnt->p_offset & OFFS_ALIGN)
587                                         == size))
588                                   status = tryaddr;
589 #endif
590                                 if (_dl_mmap_check_error(status)
591                                     || (tryaddr && tryaddr != status)) {
592                                 cant_map:
593                                         _dl_dprintf(2, "%s:%i: can't map '%s'\n",
594                                                         _dl_progname, __LINE__, libname);
595                                         _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
596                                         DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
597                                         _dl_close(infile);
598                                         _dl_munmap(header, _dl_pagesize);
599                                         return NULL;
600                                 }
601
602                                 if (! piclib2map) {
603                                   DL_INIT_LOADADDR_HDR
604                                     (lib_loadaddr, status
605                                      + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
606                                 }
607                                 /* Now we want to allocate and
608                                    zero-out any data from the end of
609                                    the region we mapped in from the
610                                    file (filesz) to the end of the
611                                    loadable segment (memsz).  We may
612                                    need additional pages for memsz,
613                                    that we map in below, and we can
614                                    count on the kernel to zero them
615                                    out, but we have to zero out stuff
616                                    in the last page that we mapped in
617                                    from the file.  However, we can't
618                                    assume to have actually obtained
619                                    full pages from the kernel, since
620                                    we didn't ask for them, and uClibc
621                                    may not give us full pages for
622                                    small allocations.  So only zero
623                                    out up to memsz or the end of the
624                                    page, whichever comes first.  */
625
626                                 /* CPNT is the beginning of the memsz
627                                    portion not backed by filesz.  */
628                                 cpnt = (char *) (status + size);
629
630                                 /* MAP_SIZE is the address of the
631                                    beginning of the next page.  */
632                                 map_size = (ppnt->p_vaddr + ppnt->p_filesz
633                                             + ADDR_ALIGN) & PAGE_ALIGN;
634
635 #ifndef MIN
636 # define MIN(a,b) ((a) < (b) ? (a) : (b))
637 #endif
638                                 _dl_memset (cpnt, 0,
639                                             MIN (map_size
640                                                  - (ppnt->p_vaddr
641                                                     + ppnt->p_filesz),
642                                                  ppnt->p_memsz
643                                                  - ppnt->p_filesz));
644
645                                 if (map_size < ppnt->p_vaddr + ppnt->p_memsz
646                                     && !piclib2map) {
647                                         tryaddr = map_size + (char*)(piclib ? libaddr : 0);
648                                         status = (char *) _dl_mmap(tryaddr,
649                                                 ppnt->p_vaddr + ppnt->p_memsz - map_size,
650                                                 LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
651                                         if (_dl_mmap_check_error(status)
652                                             || tryaddr != status)
653                                                 goto cant_map;
654                                 }
655                         } else {
656                                 tryaddr = (piclib == 2 ? 0
657                                            : (char *) (ppnt->p_vaddr & PAGE_ALIGN)
658                                            + (piclib ? libaddr : 0));
659                                 size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
660                                 status = (char *) _dl_mmap
661                                            (tryaddr, size, LXFLAGS(ppnt->p_flags),
662                                             flags | (piclib == 2 ? MAP_EXECUTABLE
663                                                      | MAP_DENYWRITE : 0),
664                                             infile, ppnt->p_offset & OFFS_ALIGN);
665                                 if (_dl_mmap_check_error(status)
666                                     || (tryaddr && tryaddr != status))
667                                   goto cant_map;
668                                 DL_INIT_LOADADDR_HDR
669                                   (lib_loadaddr, status
670                                    + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
671                         }
672
673                         /* if (libaddr == 0 && piclib) {
674                            libaddr = (unsigned long) status;
675                            flags |= MAP_FIXED;
676                            } */
677                 }
678                 ppnt++;
679         }
680         _dl_close(infile);
681
682         /* For a non-PIC library, the addresses are all absolute */
683         if (piclib) {
684                 dynamic_addr = (unsigned long) DL_RELOC_ADDR(lib_loadaddr, dynamic_addr);
685         }
686
687         /*
688          * OK, the ELF library is now loaded into VM in the correct locations
689          * The next step is to go through and do the dynamic linking (if needed).
690          */
691
692         /* Start by scanning the dynamic section to get all of the pointers */
693
694         if (!dynamic_addr) {
695                 _dl_internal_error_number = LD_ERROR_NODYNAMIC;
696                 _dl_dprintf(2, "%s: '%s' is missing a dynamic section\n",
697                                 _dl_progname, libname);
698                 _dl_munmap(header, _dl_pagesize);
699                 return NULL;
700         }
701
702         dpnt = (ElfW(Dyn) *) dynamic_addr;
703         _dl_memset(dynamic_info, 0, sizeof(dynamic_info));
704         rtld_flags = _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr);
705         /* If the TEXTREL is set, this means that we need to make the pages
706            writable before we perform relocations.  Do this now. They get set
707            back again later. */
708
709         if (dynamic_info[DT_TEXTREL]) {
710 #ifndef __FORCE_SHAREABLE_TEXT_SEGMENTS__
711                 ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
712                 for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
713                         if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) {
714                                 _dl_mprotect((void *) ((piclib ? libaddr : 0) +
715                                                         (ppnt->p_vaddr & PAGE_ALIGN)),
716                                                 (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
717                                                 PROT_READ | PROT_WRITE | PROT_EXEC);
718                         }
719                 }
720 #else
721                 _dl_dprintf(_dl_debug_file, "Can't modify %s's text section."
722                         " Use GCC option -fPIC for shared objects, please.\n",
723                         libname);
724                 _dl_exit(1);
725 #endif
726         }
727
728         tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info,
729                         dynamic_addr, 0);
730         tpnt->relro_addr = relro_addr;
731         tpnt->relro_size = relro_size;
732         tpnt->st_dev = st.st_dev;
733         tpnt->st_ino = st.st_ino;
734         tpnt->ppnt = (ElfW(Phdr) *) DL_RELOC_ADDR(tpnt->loadaddr, epnt->e_phoff);
735         tpnt->n_phent = epnt->e_phnum;
736         tpnt->rtld_flags |= rtld_flags;
737
738 #if defined(USE_TLS) && USE_TLS
739         if (tlsppnt) {
740                 _dl_debug_early("Found TLS header for %s\n", libname);
741 # if NO_TLS_OFFSET != 0
742                 tpnt->l_tls_offset = NO_TLS_OFFSET;
743 # endif
744                 tpnt->l_tls_blocksize = tlsppnt->p_memsz;
745                 tpnt->l_tls_align = tlsppnt->p_align;
746                 if (tlsppnt->p_align == 0)
747                         tpnt->l_tls_firstbyte_offset = 0;
748                 else
749                         tpnt->l_tls_firstbyte_offset = tlsppnt->p_vaddr &
750                                 (tlsppnt->p_align - 1);
751                 tpnt->l_tls_initimage_size = tlsppnt->p_filesz;
752                 tpnt->l_tls_initimage = (void *) tlsppnt->p_vaddr;
753
754                 /* Assign the next available module ID.  */
755                 tpnt->l_tls_modid = _dl_next_tls_modid ();
756
757                 /* We know the load address, so add it to the offset. */
758                 if (tpnt->l_tls_initimage != NULL)
759                 {
760 # ifdef __SUPPORT_LD_DEBUG_EARLY__
761                         unsigned int tmp = (unsigned int) tpnt->l_tls_initimage;
762                         tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
763                         _dl_debug_early("Relocated TLS initial image from %x to %x (size = %x)\n", tmp, tpnt->l_tls_initimage, tpnt->l_tls_initimage_size);
764                         tmp = 0;
765 # else
766                         tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
767 # endif
768                 }
769         }
770 #endif
771
772         /*
773          * Add this object into the symbol chain
774          */
775         if (*rpnt) {
776                 (*rpnt)->next = _dl_malloc(sizeof(struct dyn_elf));
777                 _dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
778                 (*rpnt)->next->prev = (*rpnt);
779                 *rpnt = (*rpnt)->next;
780         }
781 #ifndef SHARED
782         /* When statically linked, the first time we dlopen a DSO
783          * the *rpnt is NULL, so we need to allocate memory for it,
784          * and initialize the _dl_symbol_table.
785          */
786         else {
787                 *rpnt = _dl_symbol_tables = _dl_malloc(sizeof(struct dyn_elf));
788                 _dl_memset(*rpnt, 0, sizeof(struct dyn_elf));
789         }
790 #endif
791         (*rpnt)->dyn = tpnt;
792         tpnt->symbol_scope = _dl_symbol_tables;
793         tpnt->usage_count++;
794         tpnt->libtype = elf_lib;
795
796         /*
797          * OK, the next thing we need to do is to insert the dynamic linker into
798          * the proper entry in the GOT so that the PLT symbols can be properly
799          * resolved.
800          */
801
802         lpnt = (unsigned long *) dynamic_info[DT_PLTGOT];
803
804         if (lpnt) {
805                 lpnt = (unsigned long *) (dynamic_info[DT_PLTGOT]);
806                 INIT_GOT(lpnt, tpnt);
807         }
808
809         _dl_if_debug_dprint("\n\tfile='%s';  generating link map\n", libname);
810         _dl_if_debug_dprint("\t\tdynamic: %x  base: %x\n", dynamic_addr, DL_LOADADDR_BASE(lib_loadaddr));
811         _dl_if_debug_dprint("\t\t  entry: %x  phdr: %x  phnum: %x\n\n",
812                         DL_RELOC_ADDR(lib_loadaddr, epnt->e_entry), tpnt->ppnt, tpnt->n_phent);
813
814         _dl_munmap(header, _dl_pagesize);
815
816         return tpnt;
817 }
818
819 /* now_flag must be RTLD_NOW or zero */
820 int _dl_fixup(struct dyn_elf *rpnt, int now_flag)
821 {
822         int goof = 0;
823         struct elf_resolve *tpnt;
824         ElfW(Word) reloc_size, relative_count;
825         ElfW(Addr) reloc_addr;
826
827         if (rpnt->next)
828                 goof = _dl_fixup(rpnt->next, now_flag);
829         if (goof)
830                 return goof;
831         tpnt = rpnt->dyn;
832
833         if (!(tpnt->init_flag & RELOCS_DONE))
834                 _dl_if_debug_dprint("relocation processing: %s\n", tpnt->libname);
835
836         if (unlikely(tpnt->dynamic_info[UNSUPPORTED_RELOC_TYPE])) {
837                 _dl_if_debug_dprint("%s: can't handle %s relocation records\n",
838                                 _dl_progname, UNSUPPORTED_RELOC_STR);
839                 goof++;
840                 return goof;
841         }
842
843         reloc_size = tpnt->dynamic_info[DT_RELOC_TABLE_SIZE];
844 /* On some machines, notably SPARC & PPC, DT_REL* includes DT_JMPREL in its
845    range.  Note that according to the ELF spec, this is completely legal! */
846 #ifdef ELF_MACHINE_PLTREL_OVERLAP
847         reloc_size -= tpnt->dynamic_info [DT_PLTRELSZ];
848 #endif
849         if (tpnt->dynamic_info[DT_RELOC_TABLE_ADDR] &&
850             !(tpnt->init_flag & RELOCS_DONE)) {
851                 reloc_addr = tpnt->dynamic_info[DT_RELOC_TABLE_ADDR];
852                 relative_count = tpnt->dynamic_info[DT_RELCONT_IDX];
853                 if (relative_count) { /* Optimize the XX_RELATIVE relocations if possible */
854                         reloc_size -= relative_count * sizeof(ELF_RELOC);
855                         elf_machine_relative(tpnt->loadaddr, reloc_addr, relative_count);
856                         reloc_addr += relative_count * sizeof(ELF_RELOC);
857                 }
858                 goof += _dl_parse_relocation_information(rpnt,
859                                 reloc_addr,
860                                 reloc_size);
861                 tpnt->init_flag |= RELOCS_DONE;
862         }
863         if (tpnt->dynamic_info[DT_BIND_NOW])
864                 now_flag = RTLD_NOW;
865         if (tpnt->dynamic_info[DT_JMPREL] &&
866             (!(tpnt->init_flag & JMP_RELOCS_DONE) ||
867              (now_flag && !(tpnt->rtld_flags & now_flag)))) {
868                 tpnt->rtld_flags |= now_flag;
869                 if (!(tpnt->rtld_flags & RTLD_NOW)) {
870                         _dl_parse_lazy_relocation_information(rpnt,
871                                         tpnt->dynamic_info[DT_JMPREL],
872                                         tpnt->dynamic_info [DT_PLTRELSZ]);
873                 } else {
874                         goof += _dl_parse_relocation_information(rpnt,
875                                         tpnt->dynamic_info[DT_JMPREL],
876                                         tpnt->dynamic_info[DT_PLTRELSZ]);
877                 }
878                 tpnt->init_flag |= JMP_RELOCS_DONE;
879         }
880
881 #if 0
882 /* _dl_add_to_slotinfo is called by init_tls() for initial DSO
883    or by dlopen() for dynamically loaded DSO. */
884 #if defined(USE_TLS) && USE_TLS
885         /* Add object to slot information data if necessasy. */
886         if (tpnt->l_tls_blocksize != 0 && tls_init_tp_called)
887                 _dl_add_to_slotinfo ((struct link_map *) tpnt);
888 #endif
889 #endif
890         return goof;
891 }
892
893 /* Minimal printf which handles only %s, %d, and %x */
894 void _dl_dprintf(int fd, const char *fmt, ...)
895 {
896 #if __WORDSIZE > 32
897         long int num;
898 #else
899         int num;
900 #endif
901         va_list args;
902         char *start, *ptr, *string;
903         char *buf;
904
905         if (!fmt)
906                 return;
907
908         buf = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
909                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
910         if (_dl_mmap_check_error(buf)) {
911                 _dl_write(fd, "mmap of a spare page failed!\n", 29);
912                 _dl_exit(20);
913         }
914
915         start = ptr = buf;
916
917         if (_dl_strlen(fmt) >= (_dl_pagesize - 1)) {
918                 _dl_write(fd, "overflow\n", 11);
919                 _dl_exit(20);
920         }
921
922         _dl_strcpy(buf, fmt);
923         va_start(args, fmt);
924
925         while (start) {
926                 while (*ptr != '%' && *ptr) {
927                         ptr++;
928                 }
929
930                 if (*ptr == '%') {
931                         *ptr++ = '\0';
932                         _dl_write(fd, start, _dl_strlen(start));
933
934                         switch (*ptr++) {
935                                 case 's':
936                                         string = va_arg(args, char *);
937
938                                         if (!string)
939                                                 _dl_write(fd, "(null)", 6);
940                                         else
941                                                 _dl_write(fd, string, _dl_strlen(string));
942                                         break;
943
944                                 case 'i':
945                                 case 'd':
946                                         {
947                                                 char tmp[22];
948 #if __WORDSIZE > 32
949                                                 num = va_arg(args, long int);
950 #else
951                                                 num = va_arg(args, int);
952 #endif
953                                                 string = _dl_simple_ltoa(tmp, num);
954                                                 _dl_write(fd, string, _dl_strlen(string));
955                                                 break;
956                                         }
957                                 case 'x':
958                                 case 'X':
959                                         {
960                                                 char tmp[22];
961 #if __WORDSIZE > 32
962                                                 num = va_arg(args, long int);
963 #else
964                                                 num = va_arg(args, int);
965 #endif
966                                                 string = _dl_simple_ltoahex(tmp, num);
967                                                 _dl_write(fd, string, _dl_strlen(string));
968                                                 break;
969                                         }
970                                 default:
971                                         _dl_write(fd, "(null)", 6);
972                                         break;
973                         }
974
975                         start = ptr;
976                 } else {
977                         _dl_write(fd, start, _dl_strlen(start));
978                         start = NULL;
979                 }
980         }
981         _dl_munmap(buf, _dl_pagesize);
982         return;
983 }
984
985 char *_dl_strdup(const char *string)
986 {
987         char *retval;
988         int len;
989
990         len = _dl_strlen(string);
991         retval = _dl_malloc(len + 1);
992         _dl_strcpy(retval, string);
993         return retval;
994 }
995
996 unsigned int _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[],
997                                     void *debug_addr, DL_LOADADDR_TYPE load_off)
998 {
999         return __dl_parse_dynamic_info(dpnt, dynamic_info, debug_addr, load_off);
1000 }