From b1bac94fd40897ee9a468d84a85251d57fcf08cc Mon Sep 17 00:00:00 2001 From: amodra Date: Wed, 25 Aug 2010 06:49:54 +0000 Subject: [PATCH] * elf.c (_bfd_elf_map_sections_to_segments): Don't load program headers if any loaded section wraps the address space. Simplify ~(m-1) to -m. Use lma rather than vma when determining whether note sections are adjacent. --- bfd/ChangeLog | 7 +++++++ bfd/elf.c | 25 ++++++++++++++++++------- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/bfd/ChangeLog b/bfd/ChangeLog index 7ae83002a1..b2608ad22c 100644 --- a/bfd/ChangeLog +++ b/bfd/ChangeLog @@ -1,3 +1,10 @@ +2010-08-25 Alan Modra + + * elf.c (_bfd_elf_map_sections_to_segments): Don't load program + headers if any loaded section wraps the address space. Simplify + ~(m-1) to -m. Use lma rather than vma when determining whether + note sections are adjacent. + 2010-08-22 H.J. Lu PR ld/11933 diff --git a/bfd/elf.c b/bfd/elf.c index f9f2dadb59..9c56e2e7b1 100644 --- a/bfd/elf.c +++ b/bfd/elf.c @@ -3624,6 +3624,7 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info) asection *first_tls = NULL; asection *dynsec, *eh_frame_hdr; bfd_size_type amt; + bfd_vma addr_mask, wrap_to = 0; /* Select the allocated sections, and sort them. */ @@ -3632,6 +3633,12 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info) if (sections == NULL) goto error_return; + /* Calculate top address, avoiding undefined behaviour of shift + left operator when shift count is equal to size of type + being shifted. */ + addr_mask = ((bfd_vma) 1 << (bfd_arch_bits_per_address (abfd) - 1)) - 1; + addr_mask = (addr_mask << 1) + 1; + i = 0; for (s = abfd->sections; s != NULL; s = s->next) { @@ -3639,6 +3646,9 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info) { sections[i] = s; ++i; + /* A wrapping section potentially clashes with header. */ + if (((s->lma + s->size) & addr_mask) < (s->lma & addr_mask)) + wrap_to = (s->lma + s->size) & addr_mask; } } BFD_ASSERT (i <= bfd_count_sections (abfd)); @@ -3708,8 +3718,10 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info) if (phdr_size == (bfd_size_type) -1) phdr_size = get_program_header_size (abfd, info); if ((abfd->flags & D_PAGED) == 0 - || sections[0]->lma < phdr_size - || sections[0]->lma % maxpagesize < phdr_size % maxpagesize) + || (sections[0]->lma & addr_mask) < phdr_size + || ((sections[0]->lma & addr_mask) % maxpagesize + < phdr_size % maxpagesize) + || (sections[0]->lma & addr_mask & -maxpagesize) < wrap_to) phdr_in_segment = FALSE; } @@ -3774,9 +3786,8 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info) } else if (! writable && (hdr->flags & SEC_READONLY) == 0 - && (((last_hdr->lma + last_size - 1) - & ~(maxpagesize - 1)) - != (hdr->lma & ~(maxpagesize - 1)))) + && (((last_hdr->lma + last_size - 1) & -maxpagesize) + != (hdr->lma & -maxpagesize))) { /* We don't want to put a writable section in a read only segment, unless they are on the same page in memory @@ -3883,8 +3894,8 @@ _bfd_elf_map_sections_to_segments (bfd *abfd, struct bfd_link_info *info) if (s2->next->alignment_power == 2 && (s2->next->flags & SEC_LOAD) != 0 && CONST_STRNEQ (s2->next->name, ".note") - && align_power (s2->vma + s2->size, 2) - == s2->next->vma) + && align_power (s2->lma + s2->size, 2) + == s2->next->lma) count++; else break; -- 2.11.0