1 /* Copyright (C) 2003, 2004 Red Hat, Inc.
2 * Contributed by Alexandre Oliva <aoliva@redhat.com>
3 * Copyright (C) 2006-2011 Analog Devices, Inc.
5 * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
9 # define _dl_assert(expr)
12 /* Initialize a DL_LOADADDR_TYPE given a got pointer and a complete
14 static __always_inline void
15 __dl_init_loadaddr_map (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Addr dl_boot_got_pointer,
16 struct elf32_fdpic_loadmap *map)
18 if (map->version != 0)
20 SEND_EARLY_STDERR ("Invalid loadmap version number\n");
25 SEND_EARLY_STDERR ("Invalid segment count in loadmap\n");
28 loadaddr->got_value = (void *)dl_boot_got_pointer;
32 /* Figure out how many LOAD segments there are in the given headers,
33 and allocate a block for the load map big enough for them.
34 got_value will be properly initialized later on, with INIT_GOT. */
35 static __always_inline int
36 __dl_init_loadaddr (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Phdr *ppnt,
42 for (i = 0; i < pcnt; i++)
43 if (ppnt[i].p_type == PT_LOAD)
46 loadaddr->got_value = 0;
48 size = sizeof (struct elf32_fdpic_loadmap)
49 + sizeof (struct elf32_fdpic_loadseg) * count;
50 loadaddr->map = _dl_malloc (size);
54 loadaddr->map->version = 0;
55 loadaddr->map->nsegs = 0;
60 /* Incrementally initialize a load map. */
61 static __always_inline void
62 __dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr,
63 Elf32_Phdr *phdr, int maxsegs)
65 struct elf32_fdpic_loadseg *segdata;
67 if (loadaddr.map->nsegs == maxsegs)
70 segdata = &loadaddr.map->segs[loadaddr.map->nsegs++];
71 segdata->addr = (Elf32_Addr) addr;
72 segdata->p_vaddr = phdr->p_vaddr;
73 segdata->p_memsz = phdr->p_memsz;
75 #if defined (__SUPPORT_LD_DEBUG__)
77 _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n",
78 loadaddr.map->nsegs-1,
79 segdata->p_vaddr, segdata->addr, segdata->p_memsz);
83 /* Replace an existing entry in the load map. */
84 static __always_inline void
85 __dl_update_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr,
88 struct elf32_fdpic_loadseg *segdata;
92 for (i = 0; i < loadaddr.map->nsegs; i++)
93 if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr
94 && loadaddr.map->segs[i].p_memsz == phdr->p_memsz)
96 if (i == loadaddr.map->nsegs)
99 segdata = loadaddr.map->segs + i;
100 oldaddr = (void *)segdata->addr;
101 _dl_munmap (oldaddr, segdata->p_memsz);
102 segdata->addr = (Elf32_Addr) addr;
104 #if defined (__SUPPORT_LD_DEBUG__)
106 _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n",
107 loadaddr.map->nsegs-1,
108 segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz);
112 static __always_inline void __dl_loadaddr_unmap
113 (struct elf32_fdpic_loadaddr loadaddr, struct funcdesc_ht *funcdesc_ht);
115 /* Figure out whether the given address is in one of the mapped
117 static __always_inline int
118 __dl_addr_in_loadaddr (void *p, struct elf32_fdpic_loadaddr loadaddr)
120 struct elf32_fdpic_loadmap *map = loadaddr.map;
123 for (c = 0; c < map->nsegs; c++)
124 if ((void*)map->segs[c].addr <= p
125 && (char*)p < (char*)map->segs[c].addr + map->segs[c].p_memsz)
131 static __always_inline void * _dl_funcdesc_for (void *entry_point, void *got_value);
133 /* The hashcode handling code below is heavily inspired in libiberty's
134 hashtab code, but with most adaptation points and support for
135 deleting elements removed.
137 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
138 Contributed by Vladimir Makarov (vmakarov@cygnus.com). */
140 static __always_inline unsigned long
141 higher_prime_number (unsigned long n)
143 /* These are primes that are near, but slightly smaller than, a
145 static const unsigned long primes[] = {
153 (unsigned long) 1021,
154 (unsigned long) 2039,
155 (unsigned long) 4093,
156 (unsigned long) 8191,
157 (unsigned long) 16381,
158 (unsigned long) 32749,
159 (unsigned long) 65521,
160 (unsigned long) 131071,
161 (unsigned long) 262139,
162 (unsigned long) 524287,
163 (unsigned long) 1048573,
164 (unsigned long) 2097143,
165 (unsigned long) 4194301,
166 (unsigned long) 8388593,
167 (unsigned long) 16777213,
168 (unsigned long) 33554393,
169 (unsigned long) 67108859,
170 (unsigned long) 134217689,
171 (unsigned long) 268435399,
172 (unsigned long) 536870909,
173 (unsigned long) 1073741789,
174 (unsigned long) 2147483647,
176 ((unsigned long) 2147483647) + ((unsigned long) 2147483644),
179 const unsigned long *low = &primes[0];
180 const unsigned long *high = &primes[sizeof(primes) / sizeof(primes[0])];
184 const unsigned long *mid = low + (high - low) / 2;
192 /* If we've run out of primes, abort. */
195 fprintf (stderr, "Cannot find prime bigger than %lu\n", n);
206 struct funcdesc_value **entries;
208 /* Current size (in entries) of the hash table */
211 /* Current number of elements. */
215 static __always_inline int
216 hash_pointer (const void *p)
218 return (int) ((long)p >> 3);
221 static __always_inline struct funcdesc_ht *
224 struct funcdesc_ht *ht = _dl_malloc (sizeof (struct funcdesc_ht));
229 ht->entries = _dl_malloc (sizeof (struct funcdesc_ht_value *) * ht->size);
235 _dl_memset (ht->entries, 0, sizeof (struct funcdesc_ht_value *) * ht->size);
240 /* This is only called from _dl_loadaddr_unmap, so it's safe to call
241 _dl_free(). See the discussion below. */
242 static __always_inline void
243 htab_delete (struct funcdesc_ht *htab)
247 for (i = htab->size - 1; i >= 0; i--)
248 if (htab->entries[i])
249 _dl_free (htab->entries[i]);
251 _dl_free (htab->entries);
255 /* Similar to htab_find_slot, but without several unwanted side effects:
256 - Does not call htab->eq_f when it finds an existing entry.
257 - Does not change the count of elements/searches/collisions in the
259 This function also assumes there are no deleted entries in the table.
260 HASH is the hash value for the element to be inserted. */
262 static __always_inline struct funcdesc_value **
263 find_empty_slot_for_expand (struct funcdesc_ht *htab, int hash)
265 size_t size = htab->size;
266 unsigned int index = hash % size;
267 struct funcdesc_value **slot = htab->entries + index;
273 hash2 = 1 + hash % (size - 2);
280 slot = htab->entries + index;
286 /* The following function changes size of memory allocated for the
287 entries and repeatedly inserts the table elements. The occupancy
288 of the table after the call will be about 50%. Naturally the hash
289 table must already exist. Remember also that the place of the
290 table entries is changed. If memory allocation failures are allowed,
291 this function will return zero, indicating that the table could not be
292 expanded. If all goes well, it will return a non-zero value. */
294 static __always_inline int
295 htab_expand (struct funcdesc_ht *htab)
297 struct funcdesc_value **oentries;
298 struct funcdesc_value **olimit;
299 struct funcdesc_value **p;
300 struct funcdesc_value **nentries;
303 oentries = htab->entries;
304 olimit = oentries + htab->size;
306 /* Resize only when table after removal of unused elements is either
307 too full or too empty. */
308 if (htab->n_elements * 2 > htab->size)
309 nsize = higher_prime_number (htab->n_elements * 2);
313 nentries = _dl_malloc (sizeof (struct funcdesc_value *) * nsize);
314 _dl_memset (nentries, 0, sizeof (struct funcdesc_value *) * nsize);
315 if (nentries == NULL)
317 htab->entries = nentries;
324 *find_empty_slot_for_expand (htab, hash_pointer ((*p)->entry_point))
331 #if 0 /* We can't tell whether this was allocated by the _dl_malloc()
332 built into ld.so or malloc() in the main executable or libc,
333 and calling free() for something that wasn't malloc()ed could
334 do Very Bad Things (TM). Take the conservative approach
335 here, potentially wasting as much memory as actually used by
336 the hash table, even if multiple growths occur. That's not
337 so bad as to require some overengineered solution that would
338 enable us to keep track of how it was allocated. */
344 /* This function searches for a hash table slot containing an entry
345 equal to the given element. To delete an entry, call this with
346 INSERT = 0, then call htab_clear_slot on the slot returned (possibly
347 after doing some checks). To insert an entry, call this with
348 INSERT = 1, then write the value you want into the returned slot.
349 When inserting an entry, NULL may be returned if memory allocation
352 static __always_inline struct funcdesc_value **
353 htab_find_slot (struct funcdesc_ht *htab, void *ptr, int insert)
358 struct funcdesc_value **entry;
360 if (htab->size * 3 <= htab->n_elements * 4
361 && htab_expand (htab) == 0)
364 hash = hash_pointer (ptr);
369 entry = &htab->entries[index];
372 else if ((*entry)->entry_point == ptr)
375 hash2 = 1 + hash % (size - 2);
382 entry = &htab->entries[index];
385 else if ((*entry)->entry_point == ptr)
398 _dl_funcdesc_for (void *entry_point, void *got_value)
400 struct elf_resolve *tpnt = ((void**)got_value)[2];
401 struct funcdesc_ht *ht = tpnt->funcdesc_ht;
402 struct funcdesc_value **entry;
404 _dl_assert (got_value == tpnt->loadaddr.got_value);
411 tpnt->funcdesc_ht = ht;
414 entry = htab_find_slot (ht, entry_point, 1);
417 _dl_assert ((*entry)->entry_point == entry_point);
418 return _dl_stabilize_funcdesc (*entry);
421 *entry = _dl_malloc (sizeof (struct funcdesc_value));
422 (*entry)->entry_point = entry_point;
423 (*entry)->got_value = got_value;
425 return _dl_stabilize_funcdesc (*entry);
428 static __always_inline void const *
429 _dl_lookup_address (void const *address)
431 struct elf_resolve *rpnt;
432 struct funcdesc_value const *fd;
434 /* Make sure we don't make assumptions about its alignment. */
435 __asm__ ("" : "+r" (address));
437 if ((Elf32_Addr)address & 7)
438 /* It's not a function descriptor. */
441 fd = (struct funcdesc_value const *)address;
443 for (rpnt = _dl_loaded_modules; rpnt; rpnt = rpnt->next)
445 if (! rpnt->funcdesc_ht)
448 if (fd->got_value != rpnt->loadaddr.got_value)
451 address = htab_find_slot (rpnt->funcdesc_ht, (void*)fd->entry_point, 0);
453 if (address && *(struct funcdesc_value *const*)address == fd)
455 address = (*(struct funcdesc_value *const*)address)->entry_point;
465 #ifndef __dl_loadaddr_unmap
467 __dl_loadaddr_unmap (struct elf32_fdpic_loadaddr loadaddr,
468 struct funcdesc_ht *funcdesc_ht)
472 for (i = 0; i < loadaddr.map->nsegs; i++)
473 _dl_munmap ((void*)loadaddr.map->segs[i].addr,
474 loadaddr.map->segs[i].p_memsz);
476 /* _dl_unmap is only called for dlopen()ed libraries, for which
477 calling free() is safe, or before we've completed the initial
478 relocation, in which case calling free() is probably pointless,
480 _dl_free (loadaddr.map);
482 htab_delete (funcdesc_ht);