1 /* Copyright (C) 2003, 2004 Red Hat, Inc.
2 * Contributed by Alexandre Oliva <aoliva@redhat.com>
3 * Copyright (C) 2006-2011 Analog Devices, Inc.
5 * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
10 #define __dl_loadaddr_unmap __dl_loadaddr_unmap
12 #include "../fdpic/dl-inlines.h"
14 static __always_inline void
15 __dl_loadaddr_unmap(struct elf32_fdpic_loadaddr loadaddr,
16 struct funcdesc_ht *funcdesc_ht)
20 for (i = 0; i < loadaddr.map->nsegs; i++) {
21 struct elf32_fdpic_loadseg *segdata;
23 segdata = loadaddr.map->segs + i;
26 * A more cleaner way is to add type for struct elf32_fdpic_loadseg,
27 * and release the memory according to the type.
28 * Currently, we hardcode the memory address of L1 SRAM.
30 if ((segdata->addr & 0xff800000) == 0xff800000) {
31 _dl_sram_free((void *)segdata->addr);
35 offs = (segdata->p_vaddr & ADDR_ALIGN);
36 _dl_munmap((void*)segdata->addr - offs,
37 segdata->p_memsz + offs);
41 * _dl_unmap is only called for dlopen()ed libraries, for which
42 * calling free() is safe, or before we've completed the initial
43 * relocation, in which case calling free() is probably pointless,
46 _dl_free(loadaddr.map);
48 htab_delete(funcdesc_ht);
51 static __always_inline int
52 __dl_is_special_segment(Elf32_Ehdr *epnt, Elf32_Phdr *ppnt)
54 if (ppnt->p_type != PT_LOAD)
57 /* Allow read-only executable segments to be loaded into L1 inst */
58 if ((epnt->e_flags & EF_BFIN_CODE_IN_L1) &&
59 !(ppnt->p_flags & PF_W) && (ppnt->p_flags & PF_X))
62 /* Allow writable non-executable segments to be loaded into L1 data */
63 if ((epnt->e_flags & EF_BFIN_DATA_IN_L1) &&
64 (ppnt->p_flags & PF_W) && !(ppnt->p_flags & PF_X))
68 * These L1 memory addresses are also used in GNU ld and linux kernel.
69 * They need to be kept synchronized.
71 switch (ppnt->p_vaddr) {
84 static __always_inline char *
85 __dl_map_segment(Elf32_Ehdr *epnt, Elf32_Phdr *ppnt, int infile, int flags)
89 /* Handle L1 inst mappings */
90 if (((epnt->e_flags & EF_BFIN_CODE_IN_L1) || ppnt->p_vaddr == 0xffa00000) &&
91 !(ppnt->p_flags & PF_W) && (ppnt->p_flags & PF_X))
93 size_t size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
94 void *status = _dl_mmap(NULL, size, LXFLAGS(ppnt->p_flags),
95 flags | MAP_EXECUTABLE | MAP_DENYWRITE,
96 infile, ppnt->p_offset & OFFS_ALIGN);
97 if (_dl_mmap_check_error(status))
100 addr = _dl_sram_alloc(ppnt->p_filesz, L1_INST_SRAM);
102 _dl_dma_memcpy(addr, status + (ppnt->p_vaddr & ADDR_ALIGN), ppnt->p_filesz);
104 _dl_dprintf(2, "%s:%i: L1 allocation failed\n", _dl_progname, __LINE__);
106 _dl_munmap(status, size);
110 /* Handle L1 data mappings */
111 if (((epnt->e_flags & EF_BFIN_DATA_IN_L1) ||
112 ppnt->p_vaddr == 0xff700000 ||
113 ppnt->p_vaddr == 0xff800000 ||
114 ppnt->p_vaddr == 0xff900000) &&
115 (ppnt->p_flags & PF_W) && !(ppnt->p_flags & PF_X))
117 if (ppnt->p_vaddr == 0xff800000)
118 addr = _dl_sram_alloc(ppnt->p_memsz, L1_DATA_A_SRAM);
119 else if (ppnt->p_vaddr == 0xff900000)
120 addr = _dl_sram_alloc(ppnt->p_memsz, L1_DATA_B_SRAM);
122 addr = _dl_sram_alloc (ppnt->p_memsz, L1_DATA_SRAM);
125 if (_DL_PREAD(infile, addr, ppnt->p_filesz, ppnt->p_offset) != ppnt->p_filesz) {
129 if (ppnt->p_filesz < ppnt->p_memsz)
130 _dl_memset(addr + ppnt->p_filesz, 0, ppnt->p_memsz - ppnt->p_filesz);
132 _dl_dprintf(2, "%s:%i: L1 allocation failed\n", _dl_progname, __LINE__);
136 /* Handle L2 mappings */
137 if (ppnt->p_vaddr == 0xfeb00000 || ppnt->p_vaddr == 0xfec00000) {
138 addr = _dl_sram_alloc(ppnt->p_memsz, L2_SRAM);
140 if (_DL_PREAD(infile, addr, ppnt->p_filesz, ppnt->p_offset) != ppnt->p_filesz) {
144 if (ppnt->p_filesz < ppnt->p_memsz)
145 _dl_memset(addr + ppnt->p_filesz, 0, ppnt->p_memsz - ppnt->p_filesz);
147 _dl_dprintf(2, "%s:%i: L2 allocation failed\n", _dl_progname, __LINE__);