* This file contains the helper routines to load an ELF shared
* library into memory and add the symbol table info to the chain.
*
- * Copyright (C) 2000-2004 by Erik Andersen <andersen@codepoet.org>
+ * Copyright (C) 2000-2006 by Erik Andersen <andersen@codepoet.org>
* Copyright (c) 1994-2000 Eric Youngdale, Peter MacDonald,
* David Engel, Hongjiu Lu and Mitch D'Souza
*
libentry_t *libent;
int i, strtabsize;
- if (_dl_cache_addr == (caddr_t) - 1)
+ if (_dl_cache_addr == MAP_FAILED)
return -1;
else if (_dl_cache_addr != NULL)
return 0;
if (_dl_stat(LDSO_CACHE, &st)
- || (fd = _dl_open(LDSO_CACHE, O_RDONLY, 0)) < 0) {
- _dl_cache_addr = (caddr_t) - 1; /* so we won't try again */
+ || (fd = _dl_open(LDSO_CACHE, O_RDONLY|O_CLOEXEC, 0)) < 0) {
+ _dl_cache_addr = MAP_FAILED; /* so we won't try again */
return -1;
}
_dl_cache_size = st.st_size;
- _dl_cache_addr = (caddr_t) _dl_mmap(0, _dl_cache_size, PROT_READ, MAP_SHARED, fd, 0);
+ _dl_cache_addr = _dl_mmap(0, _dl_cache_size, PROT_READ, LDSO_CACHE_MMAP_FLAGS, fd, 0);
_dl_close(fd);
if (_dl_mmap_check_error(_dl_cache_addr)) {
- _dl_dprintf(2, "%s: can't map cache '%s'\n",
- _dl_progname, LDSO_CACHE);
+ _dl_dprintf(2, "%s:%i: can't map '%s'\n",
+ _dl_progname, __LINE__, LDSO_CACHE);
return -1;
}
fail:
_dl_munmap(_dl_cache_addr, _dl_cache_size);
- _dl_cache_addr = (caddr_t) - 1;
+ _dl_cache_addr = MAP_FAILED;
return -1;
}
int _dl_unmap_cache(void)
{
- if (_dl_cache_addr == NULL || _dl_cache_addr == (caddr_t) - 1)
+ if (_dl_cache_addr == NULL || _dl_cache_addr == MAP_FAILED)
return -1;
#if 1
#endif
-void
+void
_dl_protect_relro (struct elf_resolve *l)
{
- ElfW(Addr) start = ((l->loadaddr + l->relro_addr)
- & ~(_dl_pagesize - 1));
- ElfW(Addr) end = ((l->loadaddr + l->relro_addr + l->relro_size)
- & ~(_dl_pagesize - 1));
+ ElfW(Addr) base = (ElfW(Addr)) DL_RELOC_ADDR(l->loadaddr, l->relro_addr);
+ ElfW(Addr) start = (base & PAGE_ALIGN);
+ ElfW(Addr) end = ((base + l->relro_size) & PAGE_ALIGN);
_dl_if_debug_dprint("RELRO protecting %s: start:%x, end:%x\n", l->libname, start, end);
if (start != end &&
_dl_mprotect ((void *) start, end - start, PROT_READ) < 0) {
search_for_named_library(const char *name, int secure, const char *path_list,
struct dyn_elf **rpnt)
{
- char *path, *path_n;
- char mylibname[2050];
+ char *path, *path_n, *mylibname;
struct elf_resolve *tpnt;
- int done = 0;
+ int done;
if (path_list==NULL)
return NULL;
- /* We need a writable copy of this string */
- path = _dl_strdup(path_list);
- if (!path) {
- _dl_dprintf(2, "Out of memory!\n");
- _dl_exit(0);
- }
+ /* We need a writable copy of this string, but we don't
+ * need this allocated permanently since we don't want
+ * to leak memory, so use alloca to put path on the stack */
+ done = _dl_strlen(path_list);
+ path = alloca(done + 1);
+
+ /* another bit of local storage */
+ mylibname = alloca(2050);
+
+ _dl_memcpy(path, path_list, done+1);
/* Unlike ldd.c, don't bother to eliminate double //s */
/* Replace colons with zeros in path_list */
/* : at the beginning or end of path maps to CWD */
/* :: anywhere maps CWD */
- /* "" maps to CWD */
+ /* "" maps to CWD */
+ done = 0;
path_n = path;
do {
if (*path == 0) {
unsigned long _dl_internal_error_number;
struct elf_resolve *_dl_load_shared_library(int secure, struct dyn_elf **rpnt,
- struct elf_resolve *tpnt, char *full_libname, int __attribute__((unused)) trace_loaded_objects)
+ struct elf_resolve *tpnt, char *full_libname, int attribute_unused trace_loaded_objects)
{
char *pnt;
struct elf_resolve *tpnt1;
* the hard coded paths that follow (i.e before /lib and /usr/lib).
*/
#ifdef __LDSO_CACHE_SUPPORT__
- if (_dl_cache_addr != NULL && _dl_cache_addr != (caddr_t) - 1) {
+ if (_dl_cache_addr != NULL && _dl_cache_addr != MAP_FAILED) {
int i;
header_t *header = (header_t *) _dl_cache_addr;
libentry_t *libent = (libentry_t *) & header[1];
_dl_if_debug_dprint("\tsearching cache='%s'\n", LDSO_CACHE);
for (i = 0; i < header->nlibs; i++) {
- if ((libent[i].flags == LIB_ELF ||
- libent[i].flags == LIB_ELF_LIBC0 ||
- libent[i].flags == LIB_ELF_LIBC5) &&
- _dl_strcmp(libname, strs + libent[i].sooffset) == 0 &&
- (tpnt1 = _dl_load_elf_shared_library(secure,
- rpnt, strs + libent[i].liboffset)))
+ if ((libent[i].flags == LIB_ELF
+ || libent[i].flags == LIB_ELF_LIBC0
+ || libent[i].flags == LIB_ELF_LIBC5)
+ && _dl_strcmp(libname, strs + libent[i].sooffset) == 0
+ && (tpnt1 = _dl_load_elf_shared_library(secure, rpnt, strs + libent[i].liboffset))
+ ) {
return tpnt1;
+ }
}
}
#endif
/* Look for libraries wherever the shared library loader
* was installed */
_dl_if_debug_dprint("\tsearching ldso dir='%s'\n", _dl_ldsopath);
- if ((tpnt1 = search_for_named_library(libname, secure, _dl_ldsopath, rpnt)) != NULL)
- {
+ tpnt1 = search_for_named_library(libname, secure, _dl_ldsopath, rpnt);
+ if (tpnt1 != NULL)
return tpnt1;
- }
-
/* Lastly, search the standard list of paths for the library.
This list must exactly match the list in uClibc/ldso/util/ldd.c */
_dl_if_debug_dprint("\tsearching full lib path list\n");
- if ((tpnt1 = search_for_named_library(libname, secure,
+ tpnt1 = search_for_named_library(libname, secure,
UCLIBC_RUNTIME_PREFIX "lib:"
UCLIBC_RUNTIME_PREFIX "usr/lib"
#ifndef __LDSO_CACHE_SUPPORT__
":" UCLIBC_RUNTIME_PREFIX "usr/X11R6/lib"
#endif
- , rpnt)
- ) != NULL)
- {
+ , rpnt);
+ if (tpnt1 != NULL)
return tpnt1;
- }
goof:
/* Well, we shot our wad on that one. All we can do now is punt */
return NULL;
}
+/*
+ * Make a writeable mapping of a segment, regardless of whether PF_W is
+ * set or not.
+ */
+static void *
+map_writeable (int infile, ElfW(Phdr) *ppnt, int piclib, int flags,
+ unsigned long libaddr)
+{
+ int prot_flags = ppnt->p_flags | PF_W;
+ char *status, *retval;
+ char *tryaddr;
+ ssize_t size;
+ unsigned long map_size;
+ char *cpnt;
+ char *piclib2map = NULL;
+
+ if (piclib == 2 &&
+ /* We might be able to avoid this call if memsz doesn't
+ require an additional page, but this would require mmap
+ to always return page-aligned addresses and a whole
+ number of pages allocated. Unfortunately on uClinux
+ may return misaligned addresses and may allocate
+ partial pages, so we may end up doing unnecessary mmap
+ calls.
+
+ This is what we could do if we knew mmap would always
+ return aligned pages:
+
+ ((ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) &
+ PAGE_ALIGN) < ppnt->p_vaddr + ppnt->p_memsz)
+
+ Instead, we have to do this: */
+ ppnt->p_filesz < ppnt->p_memsz)
+ {
+ piclib2map = (char *)
+ _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_memsz,
+ LXFLAGS(prot_flags), flags | MAP_ANONYMOUS, -1, 0);
+ if (_dl_mmap_check_error(piclib2map))
+ return 0;
+ }
+
+ tryaddr = piclib == 2 ? piclib2map
+ : ((char*) (piclib ? libaddr : 0) +
+ (ppnt->p_vaddr & PAGE_ALIGN));
+
+ size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
+
+ /* For !MMU, mmap to fixed address will fail.
+ So instead of desperately call mmap and fail,
+ we set status to MAP_FAILED to save a call
+ to mmap (). */
+#ifndef __ARCH_USE_MMU__
+ if (piclib2map == 0)
+#endif
+ status = (char *) _dl_mmap
+ (tryaddr, size, LXFLAGS(prot_flags),
+ flags | (piclib2map ? MAP_FIXED : 0),
+ infile, ppnt->p_offset & OFFS_ALIGN);
+#ifndef __ARCH_USE_MMU__
+ else
+ status = MAP_FAILED;
+#endif
+#ifdef _DL_PREAD
+ if (_dl_mmap_check_error(status) && piclib2map
+ && (_DL_PREAD (infile, tryaddr, size,
+ ppnt->p_offset & OFFS_ALIGN) == size))
+ status = tryaddr;
+#endif
+ if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status))
+ return 0;
+
+ if (piclib2map)
+ retval = piclib2map;
+ else
+ retval = status;
+
+ /* Now we want to allocate and zero-out any data from the end
+ of the region we mapped in from the file (filesz) to the
+ end of the loadable segment (memsz). We may need
+ additional pages for memsz, that we map in below, and we
+ can count on the kernel to zero them out, but we have to
+ zero out stuff in the last page that we mapped in from the
+ file. However, we can't assume to have actually obtained
+ full pages from the kernel, since we didn't ask for them,
+ and uClibc may not give us full pages for small
+ allocations. So only zero out up to memsz or the end of
+ the page, whichever comes first. */
+
+ /* CPNT is the beginning of the memsz portion not backed by
+ filesz. */
+ cpnt = (char *) (status + size);
+
+ /* MAP_SIZE is the address of the
+ beginning of the next page. */
+ map_size = (ppnt->p_vaddr + ppnt->p_filesz
+ + ADDR_ALIGN) & PAGE_ALIGN;
+
+ _dl_memset (cpnt, 0,
+ MIN (map_size
+ - (ppnt->p_vaddr
+ + ppnt->p_filesz),
+ ppnt->p_memsz
+ - ppnt->p_filesz));
+
+ if (map_size < ppnt->p_vaddr + ppnt->p_memsz && !piclib2map) {
+ tryaddr = map_size + (char*)(piclib ? libaddr : 0);
+ status = (char *) _dl_mmap(tryaddr,
+ ppnt->p_vaddr + ppnt->p_memsz - map_size,
+ LXFLAGS(prot_flags),
+ flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (_dl_mmap_check_error(status) || tryaddr != status)
+ return NULL;
+ }
+ return retval;
+}
/*
* Read one ELF library into memory, mmap it into the correct locations and
*/
struct elf_resolve *_dl_load_elf_shared_library(int secure,
- struct dyn_elf **rpnt, char *libname)
+ struct dyn_elf **rpnt, const char *libname)
{
ElfW(Ehdr) *epnt;
unsigned long dynamic_addr = 0;
ElfW(Dyn) *dpnt;
struct elf_resolve *tpnt;
ElfW(Phdr) *ppnt;
+#if defined(USE_TLS) && USE_TLS
+ ElfW(Phdr) *tlsppnt = NULL;
+#endif
char *status, *header;
unsigned long dynamic_info[DYNAMIC_SIZE];
unsigned long *lpnt;
unsigned long libaddr;
unsigned long minvma = 0xffffffff, maxvma = 0;
+ unsigned int rtld_flags;
int i, flags, piclib, infile;
ElfW(Addr) relro_addr = 0;
size_t relro_size = 0;
struct stat st;
+ uint32_t *p32;
+ DL_LOADADDR_TYPE lib_loadaddr = 0;
+ DL_INIT_LOADADDR_EXTRA_DECLS
libaddr = 0;
infile = _dl_open(libname, O_RDONLY, 0);
}
/* If we are in secure mode (i.e. a setu/gid binary using LD_PRELOAD),
we don't load the library if it isn't setuid. */
- if (secure)
+ if (secure) {
if (!(st.st_mode & S_ISUID)) {
_dl_close(infile);
return NULL;
}
+ }
/* Check if file is already loaded */
for (tpnt = _dl_loaded_modules; tpnt; tpnt = tpnt->next) {
- if(tpnt->st_dev == st.st_dev && tpnt->st_ino == st.st_ino) {
+ if (tpnt->st_dev == st.st_dev && tpnt->st_ino == st.st_ino) {
/* Already loaded */
tpnt->usage_count++;
_dl_close(infile);
}
}
header = _dl_mmap((void *) 0, _dl_pagesize, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZE, -1, 0);
if (_dl_mmap_check_error(header)) {
- _dl_dprintf(2, "%s: can't map '%s'\n", _dl_progname, libname);
+ _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
_dl_internal_error_number = LD_ERROR_MMAP_FAILED;
_dl_close(infile);
return NULL;
- };
+ }
_dl_read(infile, header, _dl_pagesize);
epnt = (ElfW(Ehdr) *) (intptr_t) header;
- if (epnt->e_ident[0] != 0x7f ||
- epnt->e_ident[1] != 'E' ||
- epnt->e_ident[2] != 'L' ||
- epnt->e_ident[3] != 'F')
- {
+ p32 = (uint32_t*)&epnt->e_ident;
+ if (*p32 != ELFMAG_U32) {
_dl_dprintf(2, "%s: '%s' is not an ELF file\n", _dl_progname,
libname);
_dl_internal_error_number = LD_ERROR_NOTELF;
_dl_close(infile);
_dl_munmap(header, _dl_pagesize);
return NULL;
- };
+ }
- if ((epnt->e_type != ET_DYN) || (epnt->e_machine != MAGIC1
+ if ((epnt->e_type != ET_DYN
+#ifdef __LDSO_STANDALONE_SUPPORT__
+ && epnt->e_type != ET_EXEC
+#endif
+ ) || (epnt->e_machine != MAGIC1
#ifdef MAGIC2
&& epnt->e_machine != MAGIC2
#endif
- ))
+ ))
{
_dl_internal_error_number =
(epnt->e_type != ET_DYN ? LD_ERROR_NOTDYN : LD_ERROR_NOTMAGIC);
_dl_close(infile);
_dl_munmap(header, _dl_pagesize);
return NULL;
- };
+ }
ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
_dl_dprintf(2, "%s: '%s' has more than one dynamic section\n",
_dl_progname, libname);
dynamic_addr = ppnt->p_vaddr;
- };
+ }
if (ppnt->p_type == PT_LOAD) {
/* See if this is a PIC library. */
- if (i == 0 && ppnt->p_vaddr > 0x1000000) {
+ if (minvma == 0xffffffff && ppnt->p_vaddr > 0x1000000) {
piclib = 0;
minvma = ppnt->p_vaddr;
}
maxvma = ppnt->p_vaddr + ppnt->p_memsz;
}
}
+ if (ppnt->p_type == PT_TLS) {
+#if defined(USE_TLS) && USE_TLS
+ if (ppnt->p_memsz == 0)
+ /* Nothing to do for an empty segment. */
+ continue;
+ else
+ /* Save for after 'tpnt' is actually allocated. */
+ tlsppnt = ppnt;
+#else
+ /*
+ * Yup, the user was an idiot and tried to sneak in a library with
+ * TLS in it and we don't support it. Let's fall on our own sword
+ * and scream at the luser while we die.
+ */
+ _dl_dprintf(2, "%s: '%s' library contains unsupported TLS\n",
+ _dl_progname, libname);
+ _dl_internal_error_number = LD_ERROR_TLS_FAILED;
+ _dl_close(infile);
+ _dl_munmap(header, _dl_pagesize);
+ return NULL;
+#endif
+ }
ppnt++;
- };
+ }
+
+#ifdef __LDSO_STANDALONE_SUPPORT__
+ if (epnt->e_type == ET_EXEC)
+ piclib = 0;
+#endif
+
+ DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname);
- maxvma = (maxvma + ADDR_ALIGN) & ~ADDR_ALIGN;
- minvma = minvma & ~0xffffU;
+ maxvma = (maxvma + ADDR_ALIGN) & PAGE_ALIGN;
+ minvma = minvma & ~ADDR_ALIGN;
flags = MAP_PRIVATE /*| MAP_DENYWRITE */ ;
- if (!piclib)
- flags |= MAP_FIXED;
- status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
- maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
- if (_dl_mmap_check_error(status)) {
- _dl_dprintf(2, "%s: can't map %s\n", _dl_progname, libname);
- _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
- _dl_close(infile);
- _dl_munmap(header, _dl_pagesize);
- return NULL;
- };
- libaddr = (unsigned long) status;
- flags |= MAP_FIXED;
+ if (piclib == 0 || piclib == 1) {
+ status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
+ maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
+ if (_dl_mmap_check_error(status)) {
+ cant_map:
+ _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
+ _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
+ _dl_close(infile);
+ _dl_munmap(header, _dl_pagesize);
+ return NULL;
+ }
+ libaddr = (unsigned long) status;
+ flags |= MAP_FIXED;
+ }
/* Get the memory to store the library */
ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
+ DL_INIT_LOADADDR(lib_loadaddr, libaddr - minvma, ppnt, epnt->e_phnum);
+
for (i = 0; i < epnt->e_phnum; i++) {
+ if (DL_IS_SPECIAL_SEGMENT (epnt, ppnt)) {
+ char *addr;
+
+ addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
+ if (addr == NULL) {
+ cant_map1:
+ DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
+ goto cant_map;
+ }
+
+ DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
+ ppnt++;
+ continue;
+ }
if (ppnt->p_type == PT_GNU_RELRO) {
relro_addr = ppnt->p_vaddr;
relro_size = ppnt->p_memsz;
}
if (ppnt->p_type == PT_LOAD) {
+ char *tryaddr;
+ ssize_t size;
- /* See if this is a PIC library. */
- if (i == 0 && ppnt->p_vaddr > 0x1000000) {
- piclib = 0;
- /* flags |= MAP_FIXED; */
+ if (ppnt->p_flags & PF_W) {
+ status = map_writeable (infile, ppnt, piclib, flags, libaddr);
+ if (status == NULL)
+ goto cant_map1;
+ } else {
+ tryaddr = (piclib == 2 ? 0
+ : (char *) (ppnt->p_vaddr & PAGE_ALIGN)
+ + (piclib ? libaddr : lib_loadaddr));
+ size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
+ status = (char *) _dl_mmap
+ (tryaddr, size, LXFLAGS(ppnt->p_flags),
+ flags | (piclib == 2 ? MAP_EXECUTABLE
+ | MAP_DENYWRITE : 0),
+ infile, ppnt->p_offset & OFFS_ALIGN);
+ if (_dl_mmap_check_error(status)
+ || (tryaddr && tryaddr != status))
+ goto cant_map1;
}
+ DL_INIT_LOADADDR_HDR(lib_loadaddr,
+ status + (ppnt->p_vaddr & ADDR_ALIGN),
+ ppnt);
-
-
- if (ppnt->p_flags & PF_W) {
- unsigned long map_size;
- char *cpnt;
-
- status = (char *) _dl_mmap((char *) ((piclib ? libaddr : 0) +
- (ppnt->p_vaddr & PAGE_ALIGN)), (ppnt->p_vaddr & ADDR_ALIGN)
- + ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags, infile,
- ppnt->p_offset & OFFS_ALIGN);
-
- if (_dl_mmap_check_error(status)) {
- _dl_dprintf(2, "%s: can't map '%s'\n",
- _dl_progname, libname);
- _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
- _dl_munmap((char *) libaddr, maxvma - minvma);
- _dl_close(infile);
- _dl_munmap(header, _dl_pagesize);
- return NULL;
- };
-
- /* Pad the last page with zeroes. */
- cpnt = (char *) (status + (ppnt->p_vaddr & ADDR_ALIGN) +
- ppnt->p_filesz);
- while (((unsigned long) cpnt) & ADDR_ALIGN)
- *cpnt++ = 0;
-
- /* I am not quite sure if this is completely
- * correct to do or not, but the basic way that
- * we handle bss segments is that we mmap
- * /dev/zero if there are any pages left over
- * that are not mapped as part of the file */
-
- map_size = (ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) & PAGE_ALIGN;
-
- if (map_size < ppnt->p_vaddr + ppnt->p_memsz)
- status = (char *) _dl_mmap((char *) map_size +
- (piclib ? libaddr : 0),
- ppnt->p_vaddr + ppnt->p_memsz - map_size,
- LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS, -1, 0);
- } else
- status = (char *) _dl_mmap((char *) (ppnt->p_vaddr & PAGE_ALIGN)
- + (piclib ? libaddr : 0), (ppnt->p_vaddr & ADDR_ALIGN) +
- ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags,
- infile, ppnt->p_offset & OFFS_ALIGN);
- if (_dl_mmap_check_error(status)) {
- _dl_dprintf(2, "%s: can't map '%s'\n", _dl_progname, libname);
- _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
- _dl_munmap((char *) libaddr, maxvma - minvma);
- _dl_close(infile);
- _dl_munmap(header, _dl_pagesize);
- return NULL;
- };
-
- /* if(libaddr == 0 && piclib) {
+ /* if (libaddr == 0 && piclib) {
libaddr = (unsigned long) status;
flags |= MAP_FIXED;
- }; */
- };
+ } */
+ }
ppnt++;
- };
- _dl_close(infile);
+ }
- /* For a non-PIC library, the addresses are all absolute */
- if (piclib) {
- dynamic_addr += (unsigned long) libaddr;
+ /*
+ * The dynamic_addr must be take into acount lib_loadaddr value, to note
+ * it is zero when the SO has been mapped to the elf's physical addr
+ */
+ if (lib_loadaddr) {
+ dynamic_addr = (unsigned long) DL_RELOC_ADDR(lib_loadaddr, dynamic_addr);
}
/*
_dl_dprintf(2, "%s: '%s' is missing a dynamic section\n",
_dl_progname, libname);
_dl_munmap(header, _dl_pagesize);
+ _dl_close(infile);
return NULL;
}
dpnt = (ElfW(Dyn) *) dynamic_addr;
_dl_memset(dynamic_info, 0, sizeof(dynamic_info));
- _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, libaddr);
+ rtld_flags = _dl_parse_dynamic_info(dpnt, dynamic_info, NULL, lib_loadaddr);
/* If the TEXTREL is set, this means that we need to make the pages
writable before we perform relocations. Do this now. They get set
back again later. */
#ifndef __FORCE_SHAREABLE_TEXT_SEGMENTS__
ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
- if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W))
- _dl_mprotect((void *) ((piclib ? libaddr : 0) +
+ if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) {
+#ifdef __ARCH_USE_MMU__
+ _dl_mprotect((void *) ((piclib ? libaddr : lib_loadaddr) +
(ppnt->p_vaddr & PAGE_ALIGN)),
(ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
PROT_READ | PROT_WRITE | PROT_EXEC);
+#else
+ void *new_addr;
+ new_addr = map_writeable (infile, ppnt, piclib, flags, libaddr);
+ if (!new_addr) {
+ _dl_dprintf(_dl_debug_file, "Can't modify %s's text section.",
+ libname);
+ _dl_exit(1);
+ }
+ DL_UPDATE_LOADADDR_HDR(lib_loadaddr,
+ new_addr + (ppnt->p_vaddr & ADDR_ALIGN),
+ ppnt);
+#endif
+ }
}
#else
- _dl_dprintf(_dl_debug_file, "Can't modify %s's text section. Use GCC option -fPIC for shared objects, please.\n",libname);
+ _dl_dprintf(_dl_debug_file, "Can't modify %s's text section."
+ " Use GCC option -fPIC for shared objects, please.\n",
+ libname);
_dl_exit(1);
#endif
}
- tpnt = _dl_add_elf_hash_table(libname, (char *) libaddr, dynamic_info,
+ _dl_close(infile);
+
+ tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info,
dynamic_addr, 0);
+ tpnt->mapaddr = libaddr;
tpnt->relro_addr = relro_addr;
tpnt->relro_size = relro_size;
tpnt->st_dev = st.st_dev;
tpnt->st_ino = st.st_ino;
- tpnt->ppnt = (ElfW(Phdr) *)(intptr_t) (tpnt->loadaddr + epnt->e_phoff);
+ tpnt->ppnt = (ElfW(Phdr) *) DL_RELOC_ADDR(tpnt->mapaddr, epnt->e_phoff);
tpnt->n_phent = epnt->e_phnum;
+ tpnt->rtld_flags |= rtld_flags;
+#ifdef __LDSO_STANDALONE_SUPPORT__
+ tpnt->l_entry = epnt->e_entry;
+#endif
+
+#if defined(USE_TLS) && USE_TLS
+ if (tlsppnt) {
+ _dl_debug_early("Found TLS header for %s\n", libname);
+# if NO_TLS_OFFSET != 0
+ tpnt->l_tls_offset = NO_TLS_OFFSET;
+# endif
+ tpnt->l_tls_blocksize = tlsppnt->p_memsz;
+ tpnt->l_tls_align = tlsppnt->p_align;
+ if (tlsppnt->p_align == 0)
+ tpnt->l_tls_firstbyte_offset = 0;
+ else
+ tpnt->l_tls_firstbyte_offset = tlsppnt->p_vaddr &
+ (tlsppnt->p_align - 1);
+ tpnt->l_tls_initimage_size = tlsppnt->p_filesz;
+ tpnt->l_tls_initimage = (void *) tlsppnt->p_vaddr;
+
+ /* Assign the next available module ID. */
+ tpnt->l_tls_modid = _dl_next_tls_modid ();
+
+ /* We know the load address, so add it to the offset. */
+#ifdef __LDSO_STANDALONE_SUPPORT__
+ if ((tpnt->l_tls_initimage != NULL) && piclib)
+#else
+ if (tpnt->l_tls_initimage != NULL)
+#endif
+ {
+# ifdef __SUPPORT_LD_DEBUG_EARLY__
+ unsigned int tmp = (unsigned int) tpnt->l_tls_initimage;
+ tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
+ _dl_debug_early("Relocated TLS initial image from %x to %x (size = %x)\n", tmp, tpnt->l_tls_initimage, tpnt->l_tls_initimage_size);
+ tmp = 0;
+# else
+ tpnt->l_tls_initimage = (char *) tlsppnt->p_vaddr + tpnt->loadaddr;
+# endif
+ }
+ }
+#endif
/*
* Add this object into the symbol chain
*/
- if (*rpnt) {
- (*rpnt)->next = (struct dyn_elf *) _dl_malloc(sizeof(struct dyn_elf));
+ if (*rpnt
+#ifdef __LDSO_STANDALONE_SUPPORT__
+ /* Do not create a new chain entry for the main executable */
+ && (*rpnt)->dyn
+#endif
+ ) {
+ (*rpnt)->next = _dl_malloc(sizeof(struct dyn_elf));
_dl_memset((*rpnt)->next, 0, sizeof(struct dyn_elf));
(*rpnt)->next->prev = (*rpnt);
*rpnt = (*rpnt)->next;
- (*rpnt)->dyn = tpnt;
- tpnt->symbol_scope = _dl_symbol_tables;
}
+#ifndef SHARED
+ /* When statically linked, the first time we dlopen a DSO
+ * the *rpnt is NULL, so we need to allocate memory for it,
+ * and initialize the _dl_symbol_table.
+ */
+ else {
+ *rpnt = _dl_symbol_tables = _dl_malloc(sizeof(struct dyn_elf));
+ _dl_memset(*rpnt, 0, sizeof(struct dyn_elf));
+ }
+#endif
+ (*rpnt)->dyn = tpnt;
tpnt->usage_count++;
+#ifdef __LDSO_STANDALONE_SUPPORT__
+ tpnt->libtype = (epnt->e_type == ET_DYN) ? elf_lib : elf_executable;
+#else
tpnt->libtype = elf_lib;
+#endif
/*
* OK, the next thing we need to do is to insert the dynamic linker into
INIT_GOT(lpnt, tpnt);
}
+#ifdef __DSBT__
+ /* Handle DSBT initialization */
+ {
+ struct elf_resolve *t, *ref;
+ int idx = tpnt->loadaddr.map->dsbt_index;
+ unsigned *dsbt = tpnt->loadaddr.map->dsbt_table;
+
+ if (idx == 0) {
+ if (!dynamic_info[DT_TEXTREL]) {
+ /* This DSO has not been assigned an index. */
+ _dl_dprintf(2, "%s: '%s' is missing a dsbt index assignment!\n",
+ _dl_progname, libname);
+ _dl_exit(1);
+ }
+ /* Find a dsbt table from another module. */
+ ref = NULL;
+ for (t = _dl_loaded_modules; t; t = t->next) {
+ if (ref == NULL && t != tpnt) {
+ ref = t;
+ break;
+ }
+ }
+ idx = tpnt->loadaddr.map->dsbt_size;
+ while (idx-- > 0)
+ if (!ref || ref->loadaddr.map->dsbt_table[idx] == NULL)
+ break;
+ if (idx <= 0) {
+ _dl_dprintf(2, "%s: '%s' caused DSBT table overflow!\n",
+ _dl_progname, libname);
+ _dl_exit(1);
+ }
+ _dl_if_debug_dprint("\n\tfile='%s'; assigned index %d\n",
+ libname, idx);
+ tpnt->loadaddr.map->dsbt_index = idx;
+
+ }
+
+ /*
+ * Setup dsbt slot for this module in dsbt of all modules.
+ */
+ ref = NULL;
+ for (t = _dl_loaded_modules; t; t = t->next) {
+ /* find a dsbt table from another module */
+ if (ref == NULL && t != tpnt) {
+ ref = t;
+
+ /* make sure index is not already used */
+ if (t->loadaddr.map->dsbt_table[idx]) {
+ struct elf_resolve *dup;
+ char *dup_name;
+
+ for (dup = _dl_loaded_modules; dup; dup = dup->next)
+ if (dup != tpnt && dup->loadaddr.map->dsbt_index == idx)
+ break;
+ if (dup)
+ dup_name = dup->libname;
+ else if (idx == 1)
+ dup_name = "runtime linker";
+ else
+ dup_name = "unknown library";
+ _dl_dprintf(2, "%s: '%s' dsbt index %d already used by %s!\n",
+ _dl_progname, libname, idx, dup_name);
+ _dl_exit(1);
+ }
+ }
+ t->loadaddr.map->dsbt_table[idx] = (unsigned)dsbt;
+ }
+ if (ref)
+ _dl_memcpy(dsbt, ref->loadaddr.map->dsbt_table,
+ tpnt->loadaddr.map->dsbt_size * sizeof(unsigned *));
+ }
+#endif
_dl_if_debug_dprint("\n\tfile='%s'; generating link map\n", libname);
- _dl_if_debug_dprint("\t\tdynamic: %x base: %x\n", dynamic_addr, libaddr);
+ _dl_if_debug_dprint("\t\tdynamic: %x base: %x\n", dynamic_addr, DL_LOADADDR_BASE(lib_loadaddr));
_dl_if_debug_dprint("\t\t entry: %x phdr: %x phnum: %x\n\n",
- epnt->e_entry + libaddr, tpnt->ppnt, tpnt->n_phent);
+ DL_RELOC_ADDR(lib_loadaddr, epnt->e_entry), tpnt->ppnt, tpnt->n_phent);
_dl_munmap(header, _dl_pagesize);
}
/* now_flag must be RTLD_NOW or zero */
-int _dl_fixup(struct dyn_elf *rpnt, int now_flag)
+int _dl_fixup(struct dyn_elf *rpnt, struct r_scope_elem *scope, int now_flag)
{
int goof = 0;
struct elf_resolve *tpnt;
ElfW(Addr) reloc_addr;
if (rpnt->next)
- goof = _dl_fixup(rpnt->next, now_flag);
+ goof = _dl_fixup(rpnt->next, scope, now_flag);
if (goof)
return goof;
tpnt = rpnt->dyn;
- if(!(tpnt->init_flag & RELOCS_DONE))
+ if (!(tpnt->init_flag & RELOCS_DONE))
_dl_if_debug_dprint("relocation processing: %s\n", tpnt->libname);
if (unlikely(tpnt->dynamic_info[UNSUPPORTED_RELOC_TYPE])) {
relative_count = tpnt->dynamic_info[DT_RELCONT_IDX];
if (relative_count) { /* Optimize the XX_RELATIVE relocations if possible */
reloc_size -= relative_count * sizeof(ELF_RELOC);
- elf_machine_relative(tpnt->loadaddr, reloc_addr, relative_count);
+ if (tpnt->loadaddr
+#ifdef __LDSO_PRELINK_SUPPORT__
+ || (!tpnt->dynamic_info[DT_GNU_PRELINKED_IDX])
+#endif
+ )
+ elf_machine_relative(tpnt->loadaddr, reloc_addr, relative_count);
reloc_addr += relative_count * sizeof(ELF_RELOC);
}
- goof += _dl_parse_relocation_information(rpnt,
+ goof += _dl_parse_relocation_information(rpnt, scope,
reloc_addr,
reloc_size);
tpnt->init_flag |= RELOCS_DONE;
if (tpnt->dynamic_info[DT_JMPREL] &&
(!(tpnt->init_flag & JMP_RELOCS_DONE) ||
(now_flag && !(tpnt->rtld_flags & now_flag)))) {
- tpnt->rtld_flags |= now_flag;
+ tpnt->rtld_flags |= now_flag;
if (!(tpnt->rtld_flags & RTLD_NOW)) {
_dl_parse_lazy_relocation_information(rpnt,
tpnt->dynamic_info[DT_JMPREL],
tpnt->dynamic_info [DT_PLTRELSZ]);
} else {
- goof += _dl_parse_relocation_information(rpnt,
+ goof += _dl_parse_relocation_information(rpnt, scope,
tpnt->dynamic_info[DT_JMPREL],
tpnt->dynamic_info[DT_PLTRELSZ]);
}
tpnt->init_flag |= JMP_RELOCS_DONE;
}
+
+#if 0
+/* _dl_add_to_slotinfo is called by init_tls() for initial DSO
+ or by dlopen() for dynamically loaded DSO. */
+#if defined(USE_TLS) && USE_TLS
+ /* Add object to slot information data if necessasy. */
+ if (tpnt->l_tls_blocksize != 0 && tls_init_tp_called)
+ _dl_add_to_slotinfo ((struct link_map *) tpnt);
+#endif
+#endif
return goof;
}
#endif
va_list args;
char *start, *ptr, *string;
- static char *buf;
+ char *buf;
if (!fmt)
return;
return retval;
}
-void _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[], void *debug_addr, ElfW(Addr) load_off)
+unsigned int _dl_parse_dynamic_info(ElfW(Dyn) *dpnt, unsigned long dynamic_info[],
+ void *debug_addr, DL_LOADADDR_TYPE load_off)
{
- __dl_parse_dynamic_info(dpnt, dynamic_info, debug_addr, load_off);
+ return __dl_parse_dynamic_info(dpnt, dynamic_info, debug_addr, load_off);
}
-
-/* we want this in ldso.so and libdl.a but nowhere else */
-#ifdef __USE_GNU
-#if defined IS_IN_rtld || (defined IS_IN_libdl && ! defined SHARED)
-extern __typeof(dl_iterate_phdr) __dl_iterate_phdr;
-int
-__dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info, size_t size, void *data), void *data)
-{
- struct elf_resolve *l;
- struct dl_phdr_info info;
- int ret = 0;
-
- for (l = _dl_loaded_modules; l != NULL; l = l->next) {
- info.dlpi_addr = l->loadaddr;
- info.dlpi_name = l->libname;
- info.dlpi_phdr = l->ppnt;
- info.dlpi_phnum = l->n_phent;
- ret = callback (&info, sizeof (struct dl_phdr_info), data);
- if (ret)
- break;
- }
- return ret;
-}
-strong_alias(__dl_iterate_phdr, dl_iterate_phdr)
-#endif
-#endif