OSDN Git Service

Add support for .bss section in oat files.
authorVladimir Marko <vmarko@google.com>
Wed, 25 Feb 2015 12:02:49 +0000 (12:02 +0000)
committerVladimir Marko <vmarko@google.com>
Mon, 2 Mar 2015 18:07:03 +0000 (18:07 +0000)
Change-Id: I779b80b8139d9afdc28373f8c68edff5df7726ce

30 files changed:
compiler/common_compiler_test.cc
compiler/elf_builder.h
compiler/elf_writer_quick.cc
compiler/image_writer.cc
compiler/oat_writer.cc
compiler/oat_writer.h
oatdump/oatdump.cc
runtime/base/arena_allocator.cc
runtime/elf_file.cc
runtime/gc/accounting/atomic_stack.h
runtime/gc/accounting/bitmap.cc
runtime/gc/accounting/card_table.cc
runtime/gc/accounting/read_barrier_table.h
runtime/gc/accounting/space_bitmap.cc
runtime/gc/allocator/rosalloc.cc
runtime/gc/collector/mark_sweep.cc
runtime/gc/heap.cc
runtime/gc/space/bump_pointer_space.cc
runtime/gc/space/large_object_space.cc
runtime/gc/space/malloc_space.cc
runtime/gc/space/region_space.cc
runtime/indirect_reference_table.cc
runtime/jit/jit_code_cache.cc
runtime/mem_map.cc
runtime/mem_map.h
runtime/mem_map_test.cc
runtime/oat_file.cc
runtime/oat_file.h
runtime/thread_pool.cc
runtime/zip_archive.cc

index e8354b2..09be437 100644 (file)
@@ -253,6 +253,7 @@ void CommonCompilerTest::ReserveImageSpace() {
                                                 (size_t)100 * 1024 * 1024,  // 100MB
                                                 PROT_NONE,
                                                 false /* no need for 4gb flag with fixed mmap*/,
+                                                false /* not reusing existing reservation */,
                                                 &error_msg));
   CHECK(image_reservation_.get() != nullptr) << error_msg;
 }
index 94268de..9ab3602 100644 (file)
@@ -538,6 +538,8 @@ class ElfBuilder FINAL {
              Elf_Word rodata_size,
              Elf_Word text_relative_offset,
              Elf_Word text_size,
+             Elf_Word bss_relative_offset,
+             Elf_Word bss_size,
              const bool add_symbols,
              bool debug = false)
     : oat_writer_(oat_writer),
@@ -547,6 +549,7 @@ class ElfBuilder FINAL {
       text_builder_(".text", text_size, text_relative_offset, SHT_PROGBITS,
                     SHF_ALLOC | SHF_EXECINSTR),
       rodata_builder_(".rodata", rodata_size, rodata_relative_offset, SHT_PROGBITS, SHF_ALLOC),
+      bss_builder_(".bss", bss_size, bss_relative_offset, SHT_NOBITS, SHF_ALLOC),
       dynsym_builder_(".dynsym", SHT_DYNSYM, ".dynstr", SHT_STRTAB, true),
       symtab_builder_(".symtab", SHT_SYMTAB, ".strtab", SHT_STRTAB, false),
       hash_builder_(".hash", SHT_HASH, SHF_ALLOC, &dynsym_builder_, 0, sizeof(Elf_Word),
@@ -569,6 +572,11 @@ class ElfBuilder FINAL {
   }
 
   bool Init() {
+    // Since the .text section of an oat file contains relative references to .rodata
+    // and (optionally) .bss, we keep these 2 or 3 sections together. This creates
+    // a non-traditional layout where the .bss section is mapped independently of the
+    // .dynamic section and needs its own program header with LOAD RW.
+    //
     // The basic layout of the elf file. Order may be different in final output.
     // +-------------------------+
     // | Elf_Ehdr                |
@@ -576,6 +584,7 @@ class ElfBuilder FINAL {
     // | Elf_Phdr PHDR           |
     // | Elf_Phdr LOAD R         | .dynsym .dynstr .hash .rodata
     // | Elf_Phdr LOAD R X       | .text
+    // | Elf_Phdr LOAD RW        | .bss (Optional)
     // | Elf_Phdr LOAD RW        | .dynamic
     // | Elf_Phdr DYNAMIC        | .dynamic
     // +-------------------------+
@@ -584,6 +593,8 @@ class ElfBuilder FINAL {
     // | Elf_Sym  oatdata        |
     // | Elf_Sym  oatexec        |
     // | Elf_Sym  oatlastword    |
+    // | Elf_Sym  oatbss         | (Optional)
+    // | Elf_Sym  oatbsslastword | (Optional)
     // +-------------------------+
     // | .dynstr                 |
     // | \0                      |
@@ -631,6 +642,7 @@ class ElfBuilder FINAL {
     // | .hash\0                 |
     // | .rodata\0               |
     // | .text\0                 |
+    // | .bss\0                  |  (Optional)
     // | .shstrtab\0             |
     // | .symtab\0               |  (Optional)
     // | .strtab\0               |  (Optional)
@@ -654,8 +666,9 @@ class ElfBuilder FINAL {
     // | Elf_Shdr .dynsym        |
     // | Elf_Shdr .dynstr        |
     // | Elf_Shdr .hash          |
-    // | Elf_Shdr .text          |
     // | Elf_Shdr .rodata        |
+    // | Elf_Shdr .text          |
+    // | Elf_Shdr .bss           |  (Optional)
     // | Elf_Shdr .dynamic       |
     // | Elf_Shdr .shstrtab      |
     // | Elf_Shdr .debug_info    |  (Optional)
@@ -694,8 +707,11 @@ class ElfBuilder FINAL {
     program_headers_[PH_LOAD_R_X].p_type    = PT_LOAD;
     program_headers_[PH_LOAD_R_X].p_flags   = PF_R | PF_X;
 
-    program_headers_[PH_LOAD_RW_].p_type    = PT_LOAD;
-    program_headers_[PH_LOAD_RW_].p_flags   = PF_R | PF_W;
+    program_headers_[PH_LOAD_RW_BSS].p_type    = PT_LOAD;
+    program_headers_[PH_LOAD_RW_BSS].p_flags   = PF_R | PF_W;
+
+    program_headers_[PH_LOAD_RW_DYNAMIC].p_type    = PT_LOAD;
+    program_headers_[PH_LOAD_RW_DYNAMIC].p_flags   = PF_R | PF_W;
 
     program_headers_[PH_DYNAMIC].p_type    = PT_DYNAMIC;
     program_headers_[PH_DYNAMIC].p_flags   = PF_R | PF_W;
@@ -760,6 +776,14 @@ class ElfBuilder FINAL {
     text_builder_.SetSectionIndex(section_index_);
     section_index_++;
 
+    // Setup .bss
+    if (bss_builder_.GetSize() != 0u) {
+      section_ptrs_.push_back(bss_builder_.GetSection());
+      AssignSectionStr(&bss_builder_, &shstrtab_);
+      bss_builder_.SetSectionIndex(section_index_);
+      section_index_++;
+    }
+
     // Setup .dynamic
     section_ptrs_.push_back(dynamic_builder_.GetSection());
     AssignSectionStr(&dynamic_builder_, &shstrtab_);
@@ -820,10 +844,20 @@ class ElfBuilder FINAL {
     CHECK_ALIGNED(rodata_builder_.GetSection()->sh_offset +
                   rodata_builder_.GetSection()->sh_size, kPageSize);
 
+    // Get the layout of the .bss section.
+    bss_builder_.GetSection()->sh_offset =
+        NextOffset<Elf_Word, Elf_Shdr>(*bss_builder_.GetSection(),
+                                       *text_builder_.GetSection());
+    bss_builder_.GetSection()->sh_addr = bss_builder_.GetSection()->sh_offset;
+    bss_builder_.GetSection()->sh_size = bss_builder_.GetSize();
+    bss_builder_.GetSection()->sh_link = bss_builder_.GetLink();
+
     // Get the layout of the dynamic section.
-    dynamic_builder_.GetSection()->sh_offset =
-        NextOffset<Elf_Word, Elf_Shdr>(*dynamic_builder_.GetSection(), *text_builder_.GetSection());
-    dynamic_builder_.GetSection()->sh_addr = dynamic_builder_.GetSection()->sh_offset;
+    CHECK(IsAlignedParam(bss_builder_.GetSection()->sh_offset,
+                         dynamic_builder_.GetSection()->sh_addralign));
+    dynamic_builder_.GetSection()->sh_offset = bss_builder_.GetSection()->sh_offset;
+    dynamic_builder_.GetSection()->sh_addr =
+        NextOffset<Elf_Word, Elf_Shdr>(*dynamic_builder_.GetSection(), *bss_builder_.GetSection());
     dynamic_builder_.GetSection()->sh_size = dynamic_builder_.GetSize() * sizeof(Elf_Dyn);
     dynamic_builder_.GetSection()->sh_link = dynamic_builder_.GetLink();
 
@@ -987,16 +1021,23 @@ class ElfBuilder FINAL {
     program_headers_[PH_LOAD_R_X].p_memsz  = load_rx_size;
     program_headers_[PH_LOAD_R_X].p_align  = text_builder_.GetSection()->sh_addralign;
 
-    program_headers_[PH_LOAD_RW_].p_offset = dynamic_builder_.GetSection()->sh_offset;
-    program_headers_[PH_LOAD_RW_].p_vaddr  = dynamic_builder_.GetSection()->sh_offset;
-    program_headers_[PH_LOAD_RW_].p_paddr  = dynamic_builder_.GetSection()->sh_offset;
-    program_headers_[PH_LOAD_RW_].p_filesz = dynamic_builder_.GetSection()->sh_size;
-    program_headers_[PH_LOAD_RW_].p_memsz  = dynamic_builder_.GetSection()->sh_size;
-    program_headers_[PH_LOAD_RW_].p_align  = dynamic_builder_.GetSection()->sh_addralign;
+    program_headers_[PH_LOAD_RW_BSS].p_offset = bss_builder_.GetSection()->sh_offset;
+    program_headers_[PH_LOAD_RW_BSS].p_vaddr  = bss_builder_.GetSection()->sh_offset;
+    program_headers_[PH_LOAD_RW_BSS].p_paddr  = bss_builder_.GetSection()->sh_offset;
+    program_headers_[PH_LOAD_RW_BSS].p_filesz = 0;
+    program_headers_[PH_LOAD_RW_BSS].p_memsz  = bss_builder_.GetSection()->sh_size;
+    program_headers_[PH_LOAD_RW_BSS].p_align  = bss_builder_.GetSection()->sh_addralign;
+
+    program_headers_[PH_LOAD_RW_DYNAMIC].p_offset = dynamic_builder_.GetSection()->sh_offset;
+    program_headers_[PH_LOAD_RW_DYNAMIC].p_vaddr  = dynamic_builder_.GetSection()->sh_addr;
+    program_headers_[PH_LOAD_RW_DYNAMIC].p_paddr  = dynamic_builder_.GetSection()->sh_addr;
+    program_headers_[PH_LOAD_RW_DYNAMIC].p_filesz = dynamic_builder_.GetSection()->sh_size;
+    program_headers_[PH_LOAD_RW_DYNAMIC].p_memsz  = dynamic_builder_.GetSection()->sh_size;
+    program_headers_[PH_LOAD_RW_DYNAMIC].p_align  = dynamic_builder_.GetSection()->sh_addralign;
 
     program_headers_[PH_DYNAMIC].p_offset = dynamic_builder_.GetSection()->sh_offset;
-    program_headers_[PH_DYNAMIC].p_vaddr  = dynamic_builder_.GetSection()->sh_offset;
-    program_headers_[PH_DYNAMIC].p_paddr  = dynamic_builder_.GetSection()->sh_offset;
+    program_headers_[PH_DYNAMIC].p_vaddr  = dynamic_builder_.GetSection()->sh_addr;
+    program_headers_[PH_DYNAMIC].p_paddr  = dynamic_builder_.GetSection()->sh_addr;
     program_headers_[PH_DYNAMIC].p_filesz = dynamic_builder_.GetSection()->sh_size;
     program_headers_[PH_DYNAMIC].p_memsz  = dynamic_builder_.GetSection()->sh_size;
     program_headers_[PH_DYNAMIC].p_align  = dynamic_builder_.GetSection()->sh_addralign;
@@ -1004,15 +1045,29 @@ class ElfBuilder FINAL {
     // Finish setup of the Ehdr values.
     elf_header_.e_phoff = PHDR_OFFSET;
     elf_header_.e_shoff = sections_offset;
-    elf_header_.e_phnum = PH_NUM;
+    elf_header_.e_phnum = (bss_builder_.GetSection()->sh_size != 0u) ? PH_NUM : PH_NUM - 1;
     elf_header_.e_shnum = section_ptrs_.size();
     elf_header_.e_shstrndx = shstrtab_builder_.GetSectionIndex();
 
     // Add the rest of the pieces to the list.
     pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Elf Header", 0, &elf_header_,
                                                       sizeof(elf_header_)));
-    pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers", PHDR_OFFSET,
-                                                      &program_headers_, sizeof(program_headers_)));
+    if (bss_builder_.GetSection()->sh_size != 0u) {
+      pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers", PHDR_OFFSET,
+                                                        &program_headers_[0],
+                                                        elf_header_.e_phnum * sizeof(Elf_Phdr)));
+    } else {
+      // Skip PH_LOAD_RW_BSS.
+      Elf_Word part1_size = PH_LOAD_RW_BSS * sizeof(Elf_Phdr);
+      Elf_Word part2_size = (PH_NUM - PH_LOAD_RW_BSS - 1) * sizeof(Elf_Phdr);
+      CHECK_EQ(part1_size + part2_size, elf_header_.e_phnum * sizeof(Elf_Phdr));
+      pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers", PHDR_OFFSET,
+                                                        &program_headers_[0], part1_size));
+      pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers part 2",
+                                                        PHDR_OFFSET + part1_size,
+                                                        &program_headers_[PH_LOAD_RW_BSS + 1],
+                                                        part2_size));
+    }
     pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynamic",
                                                       dynamic_builder_.GetSection()->sh_offset,
                                                       dynamic.data(),
@@ -1175,6 +1230,12 @@ class ElfBuilder FINAL {
                               text_builder_.GetSize(), STB_GLOBAL, STT_OBJECT);
     dynsym_builder_.AddSymbol("oatlastword", &text_builder_, text_builder_.GetSize() - 4,
                               true, 4, STB_GLOBAL, STT_OBJECT);
+    if (bss_builder_.GetSize() != 0u) {
+      dynsym_builder_.AddSymbol("oatbss", &bss_builder_, 0, true,
+                                bss_builder_.GetSize(), STB_GLOBAL, STT_OBJECT);
+      dynsym_builder_.AddSymbol("oatbsslastword", &bss_builder_, bss_builder_.GetSize() - 4,
+                                true, 4, STB_GLOBAL, STT_OBJECT);
+    }
   }
 
   void AssignSectionStr(ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* builder,
@@ -1213,12 +1274,13 @@ class ElfBuilder FINAL {
   // What phdr is.
   static const uint32_t PHDR_OFFSET = sizeof(Elf_Ehdr);
   enum : uint8_t {
-    PH_PHDR     = 0,
-        PH_LOAD_R__ = 1,
-        PH_LOAD_R_X = 2,
-        PH_LOAD_RW_ = 3,
-        PH_DYNAMIC  = 4,
-        PH_NUM      = 5,
+    PH_PHDR             = 0,
+    PH_LOAD_R__         = 1,
+    PH_LOAD_R_X         = 2,
+    PH_LOAD_RW_BSS      = 3,
+    PH_LOAD_RW_DYNAMIC  = 4,
+    PH_DYNAMIC          = 5,
+    PH_NUM              = 6,
   };
   static const uint32_t PHDR_SIZE = sizeof(Elf_Phdr) * PH_NUM;
   Elf_Phdr program_headers_[PH_NUM];
@@ -1236,6 +1298,7 @@ class ElfBuilder FINAL {
 
   ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> text_builder_;
   ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> rodata_builder_;
+  ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> bss_builder_;
   ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> dynsym_builder_;
   ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> symtab_builder_;
   ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> hash_builder_;
index 401d5a9..a822b24 100644 (file)
@@ -229,6 +229,7 @@ bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
   const OatHeader& oat_header = oat_writer->GetOatHeader();
   Elf_Word oat_data_size = oat_header.GetExecutableOffset();
   uint32_t oat_exec_size = oat_writer->GetSize() - oat_data_size;
+  uint32_t oat_bss_size = oat_writer->GetBssSize();
 
   OatWriterWrapper wrapper(oat_writer);
 
@@ -243,6 +244,8 @@ bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
           oat_data_size,
           oat_data_size,
           oat_exec_size,
+          RoundUp(oat_data_size + oat_exec_size, kPageSize),
+          oat_bss_size,
           compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols(),
           debug));
 
index c588e1a..f5f9320 100644 (file)
@@ -408,8 +408,8 @@ ImageWriter::BinSlot ImageWriter::GetImageBinSlot(mirror::Object* object) const
 bool ImageWriter::AllocMemory() {
   size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize);
   std::string error_msg;
-  image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, PROT_READ | PROT_WRITE,
-                                    false, &error_msg));
+  image_.reset(MemMap::MapAnonymous("image writer image", nullptr, length, PROT_READ | PROT_WRITE,
+                                    false, false, &error_msg));
   if (UNLIKELY(image_.get() == nullptr)) {
     LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
     return false;
index 8411091..c32a992 100644 (file)
@@ -403,6 +403,7 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
     image_writer_(image_writer),
     dex_files_(&dex_files),
     size_(0u),
+    bss_size_(0u),
     oat_data_offset_(0u),
     image_file_location_oat_checksum_(image_file_location_oat_checksum),
     image_file_location_oat_begin_(image_file_location_oat_begin),
index 980611f..fd2ccae 100644 (file)
@@ -96,6 +96,10 @@ class OatWriter {
     return size_;
   }
 
+  size_t GetBssSize() const {
+    return bss_size_;
+  }
+
   const std::vector<uintptr_t>& GetAbsolutePatchLocations() const {
     return absolute_patch_locations_;
   }
@@ -266,6 +270,9 @@ class OatWriter {
   // Size required for Oat data structures.
   size_t size_;
 
+  // The size of the required .bss section holding the DexCache data.
+  size_t bss_size_;
+
   // Offset of the oat data from the start of the mmapped region of the elf file.
   size_t oat_data_offset_;
 
index 3ce86d8..aab4f8b 100644 (file)
@@ -88,6 +88,7 @@ class OatSymbolizer FINAL : public CodeOutput {
 
     uint32_t diff = static_cast<uint32_t>(oat_file_->End() - oat_file_->Begin());
     uint32_t oat_exec_size = diff - oat_data_size;
+    uint32_t oat_bss_size = oat_file_->BssSize();
 
     elf_output_ = OS::CreateEmptyFile(output_name_.c_str());
 
@@ -100,6 +101,8 @@ class OatSymbolizer FINAL : public CodeOutput {
         oat_data_size,
         oat_data_size,
         oat_exec_size,
+        RoundUp(oat_data_size + oat_exec_size, kPageSize),
+        oat_bss_size,
         true,
         false));
 
index b3f812e..e6380bf 100644 (file)
@@ -129,7 +129,7 @@ Arena::Arena(size_t size)
       next_(nullptr) {
   if (kUseMemMap) {
     std::string error_msg;
-    map_ = MemMap::MapAnonymous("dalvik-arena", NULL, size, PROT_READ | PROT_WRITE, false,
+    map_ = MemMap::MapAnonymous("dalvik-arena", nullptr, size, PROT_READ | PROT_WRITE, false, false,
                                 &error_msg);
     CHECK(map_ != nullptr) << error_msg;
     memory_ = map_->Begin();
index a22e274..3490bcf 100644 (file)
@@ -1370,7 +1370,7 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
       reservation_name += file_->GetPath();
       std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
                                                            reserve_base_override,
-                                                           GetLoadedSize(), PROT_NONE, false,
+                                                           GetLoadedSize(), PROT_NONE, false, false,
                                                            error_msg));
       if (reserve.get() == nullptr) {
         *error_msg = StringPrintf("Failed to allocate %s: %s",
@@ -1411,32 +1411,72 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
     } else {
       flags |= MAP_PRIVATE;
     }
-    if (file_length < (program_header->p_offset + program_header->p_memsz)) {
-      *error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF segment "
-                                "%d of %" PRIu64 " bytes: '%s'", file_length, i,
-                                static_cast<uint64_t>(program_header->p_offset + program_header->p_memsz),
+    if (program_header->p_filesz > program_header->p_memsz) {
+      *error_msg = StringPrintf("Invalid p_filesz > p_memsz (%" PRIu64 " > %" PRIu64 "): %s",
+                                static_cast<uint64_t>(program_header->p_filesz),
+                                static_cast<uint64_t>(program_header->p_memsz),
                                 file_->GetPath().c_str());
       return false;
     }
-    std::unique_ptr<MemMap> segment(MemMap::MapFileAtAddress(p_vaddr,
-                                                       program_header->p_memsz,
-                                                       prot, flags, file_->Fd(),
-                                                       program_header->p_offset,
-                                                       true,  // implies MAP_FIXED
-                                                       file_->GetPath().c_str(),
-                                                       error_msg));
-    if (segment.get() == nullptr) {
-      *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
-                                i, file_->GetPath().c_str(), error_msg->c_str());
+    if (program_header->p_filesz < program_header->p_memsz &&
+        !IsAligned<kPageSize>(program_header->p_filesz)) {
+      *error_msg = StringPrintf("Unsupported unaligned p_filesz < p_memsz (%" PRIu64
+                                " < %" PRIu64 "): %s",
+                                static_cast<uint64_t>(program_header->p_filesz),
+                                static_cast<uint64_t>(program_header->p_memsz),
+                                file_->GetPath().c_str());
       return false;
     }
-    if (segment->Begin() != p_vaddr) {
-      *error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, "
-                                "instead mapped to %p",
-                                i, file_->GetPath().c_str(), p_vaddr, segment->Begin());
+    if (file_length < (program_header->p_offset + program_header->p_filesz)) {
+      *error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF segment "
+                                "%d of %" PRIu64 " bytes: '%s'", file_length, i,
+                                static_cast<uint64_t>(program_header->p_offset + program_header->p_filesz),
+                                file_->GetPath().c_str());
       return false;
     }
-    segments_.push_back(segment.release());
+    if (program_header->p_filesz != 0u) {
+      std::unique_ptr<MemMap> segment(
+          MemMap::MapFileAtAddress(p_vaddr,
+                                   program_header->p_filesz,
+                                   prot, flags, file_->Fd(),
+                                   program_header->p_offset,
+                                   true,  // implies MAP_FIXED
+                                   file_->GetPath().c_str(),
+                                   error_msg));
+      if (segment.get() == nullptr) {
+        *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
+                                  i, file_->GetPath().c_str(), error_msg->c_str());
+        return false;
+      }
+      if (segment->Begin() != p_vaddr) {
+        *error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, "
+                                  "instead mapped to %p",
+                                  i, file_->GetPath().c_str(), p_vaddr, segment->Begin());
+        return false;
+      }
+      segments_.push_back(segment.release());
+    }
+    if (program_header->p_filesz < program_header->p_memsz) {
+      std::string name = StringPrintf("Zero-initialized segment %" PRIu64 " of ELF file %s",
+                                      static_cast<uint64_t>(i), file_->GetPath().c_str());
+      std::unique_ptr<MemMap> segment(
+          MemMap::MapAnonymous(name.c_str(),
+                               p_vaddr + program_header->p_filesz,
+                               program_header->p_memsz - program_header->p_filesz,
+                               prot, false, true /* reuse */, error_msg));
+      if (segment == nullptr) {
+        *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
+                                  i, file_->GetPath().c_str(), error_msg->c_str());
+        return false;
+      }
+      if (segment->Begin() != p_vaddr) {
+        *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s "
+                                  "at expected address %p, instead mapped to %p",
+                                  i, file_->GetPath().c_str(), p_vaddr, segment->Begin());
+        return false;
+      }
+      segments_.push_back(segment.release());
+    }
   }
 
   // Now that we are done loading, .dynamic should be in memory to find .dynstr, .dynsym, .hash
index 72734e9..5224d64 100644 (file)
@@ -236,8 +236,8 @@ class AtomicStack {
   // Size in number of elements.
   void Init() {
     std::string error_msg;
-    mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(begin_[0]),
-                                        PROT_READ | PROT_WRITE, false, &error_msg));
+    mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
+                                        PROT_READ | PROT_WRITE, false, false, &error_msg));
     CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
     uint8_t* addr = mem_map_->Begin();
     CHECK(addr != NULL);
index de47f60..20984fd 100644 (file)
@@ -40,7 +40,8 @@ MemMap* Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
       RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
   std::string error_msg;
   std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
-                                                       PROT_READ | PROT_WRITE, false, &error_msg));
+                                                       PROT_READ | PROT_WRITE, false, false,
+                                                       &error_msg));
   if (UNLIKELY(mem_map.get() == nullptr)) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
     return nullptr;
index ca1e7c1..ad1f192 100644 (file)
@@ -62,7 +62,7 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
   std::string error_msg;
   std::unique_ptr<MemMap> mem_map(
       MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
-                           false, &error_msg));
+                           false, false, &error_msg));
   CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
   // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
   // don't clear the card table to avoid unnecessary pages being allocated
index 84d5da3..bb9aae7 100644 (file)
@@ -37,7 +37,7 @@ class ReadBarrierTable {
               static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
     std::string error_msg;
     MemMap* mem_map = MemMap::MapAnonymous("read barrier table", nullptr, capacity,
-                                           PROT_READ | PROT_WRITE, false, &error_msg);
+                                           PROT_READ | PROT_WRITE, false, false, &error_msg);
     CHECK(mem_map != nullptr && mem_map->Begin() != nullptr)
         << "couldn't allocate read barrier table: " << error_msg;
     mem_map_.reset(mem_map);
index f5d3b47..ad8d988 100644 (file)
@@ -63,7 +63,8 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
   std::string error_msg;
   std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
-                                                       PROT_READ | PROT_WRITE, false, &error_msg));
+                                                       PROT_READ | PROT_WRITE, false, false,
+                                                       &error_msg));
   if (UNLIKELY(mem_map.get() == nullptr)) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
     return nullptr;
index 72aacf5..f51093a 100644 (file)
@@ -80,8 +80,9 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
   size_t num_of_pages = footprint_ / kPageSize;
   size_t max_num_of_pages = max_capacity_ / kPageSize;
   std::string error_msg;
-  page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", NULL, RoundUp(max_num_of_pages, kPageSize),
-                                               PROT_READ | PROT_WRITE, false, &error_msg));
+  page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
+                                               RoundUp(max_num_of_pages, kPageSize),
+                                               PROT_READ | PROT_WRITE, false, false, &error_msg));
   CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
   page_map_ = page_map_mem_map_->Begin();
   page_map_size_ = num_of_pages;
index cd63d26..8aac484 100644 (file)
@@ -106,7 +106,7 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
   MemMap* mem_map = MemMap::MapAnonymous(
       "mark sweep sweep array free buffer", nullptr,
       RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
-      PROT_READ | PROT_WRITE, false, &error_msg);
+      PROT_READ | PROT_WRITE, false, false, &error_msg);
   CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
   sweep_array_free_buffer_mem_map_.reset(mem_map);
 }
index a4bc941..9343622 100644 (file)
@@ -284,7 +284,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
     // address.
     non_moving_space_mem_map.reset(
         MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
-                             non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
+                             non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
+                             &error_str));
     CHECK(non_moving_space_mem_map != nullptr) << error_str;
     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
     request_begin = reinterpret_cast<uint8_t*>(300 * MB);
@@ -476,7 +477,7 @@ MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_be
                                            size_t capacity, std::string* out_error_str) {
   while (true) {
     MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
-                                       PROT_READ | PROT_WRITE, true, out_error_str);
+                                       PROT_READ | PROT_WRITE, true, false, out_error_str);
     if (map != nullptr || request_begin == nullptr) {
       return map;
     }
index 9675ba6..fbfc449 100644 (file)
@@ -29,7 +29,8 @@ BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capac
   capacity = RoundUp(capacity, kPageSize);
   std::string error_msg;
   std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
-                                                 PROT_READ | PROT_WRITE, true, &error_msg));
+                                                       PROT_READ | PROT_WRITE, true, false,
+                                                       &error_msg));
   if (mem_map.get() == nullptr) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
         << PrettySize(capacity) << " with message " << error_msg;
index c0c6444..7523de5 100644 (file)
@@ -110,8 +110,8 @@ LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
 mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
                                            size_t* bytes_allocated, size_t* usable_size) {
   std::string error_msg;
-  MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
-                                         PROT_READ | PROT_WRITE, true, &error_msg);
+  MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
+                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
   if (UNLIKELY(mem_map == NULL)) {
     LOG(WARNING) << "Large object allocation failed: " << error_msg;
     return NULL;
@@ -291,7 +291,7 @@ FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested
   CHECK_EQ(size % kAlignment, 0U);
   std::string error_msg;
   MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
-                                         PROT_READ | PROT_WRITE, true, &error_msg);
+                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
   CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
   return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
 }
@@ -305,9 +305,10 @@ FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t*
   CHECK_ALIGNED(space_capacity, kAlignment);
   const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
   std::string error_msg;
-  allocation_info_map_.reset(MemMap::MapAnonymous("large object free list space allocation info map",
-                                                  nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
-                                                  false, &error_msg));
+  allocation_info_map_.reset(
+      MemMap::MapAnonymous("large object free list space allocation info map",
+                           nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
+                           false, false, &error_msg));
   CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
       << error_msg;
   allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
index 9bbbb3c..67e8847 100644 (file)
@@ -90,7 +90,7 @@ MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size,
 
   std::string error_msg;
   MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, *capacity,
-                                         PROT_READ | PROT_WRITE, true, &error_msg);
+                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
   if (mem_map == nullptr) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
                << PrettySize(*capacity) << ": " << error_msg;
index 2c556d9..8bb73d6 100644 (file)
@@ -33,7 +33,8 @@ RegionSpace* RegionSpace::Create(const std::string& name, size_t capacity,
   capacity = RoundUp(capacity, kRegionSize);
   std::string error_msg;
   std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
-                                                       PROT_READ | PROT_WRITE, true, &error_msg));
+                                                       PROT_READ | PROT_WRITE, true, false,
+                                                       &error_msg));
   if (mem_map.get() == nullptr) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
         << PrettySize(capacity) << " with message " << error_msg;
index aa2a6b5..1a3f107 100644 (file)
@@ -74,7 +74,7 @@ IndirectReferenceTable::IndirectReferenceTable(size_t initialCount,
   std::string error_str;
   const size_t table_bytes = maxCount * sizeof(IrtEntry);
   table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes,
-                                            PROT_READ | PROT_WRITE, false, &error_str));
+                                            PROT_READ | PROT_WRITE, false, false, &error_str));
   CHECK(table_mem_map_.get() != nullptr) << error_str;
   CHECK_EQ(table_mem_map_->Size(), table_bytes);
   table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
index 8d4965e..4ae4d57 100644 (file)
@@ -31,7 +31,7 @@ JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
   std::string error_str;
   // Map name specific for android_os_Debug.cpp accounting.
   MemMap* map = MemMap::MapAnonymous("jit-code-cache", nullptr, capacity,
-                                     PROT_READ | PROT_WRITE | PROT_EXEC, false, &error_str);
+                                     PROT_READ | PROT_WRITE | PROT_EXEC, false, false, &error_str);
   if (map == nullptr) {
     std::ostringstream oss;
     oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
index 4b85469..588615f 100644 (file)
@@ -138,9 +138,10 @@ uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
 #endif
 
 // Return true if the address range is contained in a single /proc/self/map entry.
-static bool ContainedWithinExistingMap(uintptr_t begin,
-                                       uintptr_t end,
+static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size,
                                        std::string* error_msg) {
+  uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
+  uintptr_t end = begin + size;
   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
   if (map.get() == nullptr) {
     *error_msg = StringPrintf("Failed to build process map");
@@ -240,7 +241,7 @@ static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte
 }
 
 MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot,
-                             bool low_4gb, std::string* error_msg) {
+                             bool low_4gb, bool reuse, std::string* error_msg) {
 #ifndef __LP64__
   UNUSED(low_4gb);
 #endif
@@ -250,6 +251,15 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt
   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
 
   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+  if (reuse) {
+    // reuse means it is okay that it overlaps an existing page mapping.
+    // Only use this if you actually made the page reservation yourself.
+    CHECK(expected_ptr != nullptr);
+
+    DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << error_msg;
+    flags |= MAP_FIXED;
+  }
+
   ScopedFd fd(-1);
 
 #ifdef USE_ASHMEM
@@ -273,7 +283,7 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt
       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
       return nullptr;
     }
-    flags = MAP_PRIVATE;
+    flags &= ~MAP_ANONYMOUS;
   }
 #endif
 
@@ -393,8 +403,6 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int p
                                  std::string* error_msg) {
   CHECK_NE(0, prot);
   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
-  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
-  uintptr_t limit = expected + byte_count;
 
   // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
   // expect his mapping to be contained within an existing map.
@@ -403,7 +411,7 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int p
     // Only use this if you actually made the page reservation yourself.
     CHECK(expected_ptr != nullptr);
 
-    DCHECK(ContainedWithinExistingMap(expected, limit, error_msg));
+    DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << error_msg;
     flags |= MAP_FIXED;
   } else {
     CHECK_EQ(0, flags & MAP_FIXED);
index dc337e0..11b2569 100644 (file)
@@ -54,6 +54,7 @@ class MemMap {
  public:
   // Request an anonymous region of length 'byte_count' and a requested base address.
   // Use NULL as the requested base address if you don't care.
+  // "reuse" allows re-mapping an address range from an existing mapping.
   //
   // The word "anonymous" in this context means "not backed by a file". The supplied
   // 'ashmem_name' will be used -- on systems that support it -- to give the mapping
@@ -61,7 +62,7 @@ class MemMap {
   //
   // On success, returns returns a MemMap instance.  On failure, returns a NULL;
   static MemMap* MapAnonymous(const char* ashmem_name, uint8_t* addr, size_t byte_count, int prot,
-                              bool low_4gb, std::string* error_msg);
+                              bool low_4gb, bool reuse, std::string* error_msg);
 
   // Map part of a file, taking care of non-page aligned offsets.  The
   // "start" offset is absolute, not relative.
index 14a72b9..f635b5d 100644 (file)
@@ -43,6 +43,7 @@ class MemMapTest : public testing::Test {
                                       2 * page_size,
                                       PROT_READ | PROT_WRITE,
                                       low_4gb,
+                                      false,
                                       &error_msg);
     // Check its state and write to it.
     uint8_t* base0 = m0->Begin();
@@ -129,11 +130,12 @@ TEST_F(MemMapTest, MapAnonymousEmpty) {
   CommonInit();
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
-                                             nullptr,
-                                             0,
-                                             PROT_READ,
-                                             false,
-                                             &error_msg));
+                                                   nullptr,
+                                                   0,
+                                                   PROT_READ,
+                                                   false,
+                                                   false,
+                                                   &error_msg));
   ASSERT_TRUE(map.get() != nullptr) << error_msg;
   ASSERT_TRUE(error_msg.empty());
   map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
@@ -141,6 +143,7 @@ TEST_F(MemMapTest, MapAnonymousEmpty) {
                                  kPageSize,
                                  PROT_READ | PROT_WRITE,
                                  false,
+                                 false,
                                  &error_msg));
   ASSERT_TRUE(map.get() != nullptr) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -151,11 +154,12 @@ TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
   CommonInit();
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
-                                             nullptr,
-                                             kPageSize,
-                                             PROT_READ | PROT_WRITE,
-                                             true,
-                                             &error_msg));
+                                                   nullptr,
+                                                   kPageSize,
+                                                   PROT_READ | PROT_WRITE,
+                                                   true,
+                                                   false,
+                                                   &error_msg));
   ASSERT_TRUE(map.get() != nullptr) << error_msg;
   ASSERT_TRUE(error_msg.empty());
   ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
@@ -167,31 +171,34 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
   std::string error_msg;
   // Map at an address that should work, which should succeed.
   std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
-                                              reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
-                                              kPageSize,
-                                              PROT_READ | PROT_WRITE,
-                                              false,
-                                              &error_msg));
+                                                    reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
+                                                    kPageSize,
+                                                    PROT_READ | PROT_WRITE,
+                                                    false,
+                                                    false,
+                                                    &error_msg));
   ASSERT_TRUE(map0.get() != nullptr) << error_msg;
   ASSERT_TRUE(error_msg.empty());
   ASSERT_TRUE(map0->BaseBegin() == reinterpret_cast<void*>(ART_BASE_ADDRESS));
   // Map at an unspecified address, which should succeed.
   std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
-                                              nullptr,
-                                              kPageSize,
-                                              PROT_READ | PROT_WRITE,
-                                              false,
-                                              &error_msg));
+                                                    nullptr,
+                                                    kPageSize,
+                                                    PROT_READ | PROT_WRITE,
+                                                    false,
+                                                    false,
+                                                    &error_msg));
   ASSERT_TRUE(map1.get() != nullptr) << error_msg;
   ASSERT_TRUE(error_msg.empty());
   ASSERT_TRUE(map1->BaseBegin() != nullptr);
   // Attempt to map at the same address, which should fail.
   std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
-                                              reinterpret_cast<uint8_t*>(map1->BaseBegin()),
-                                              kPageSize,
-                                              PROT_READ | PROT_WRITE,
-                                              false,
-                                              &error_msg));
+                                                    reinterpret_cast<uint8_t*>(map1->BaseBegin()),
+                                                    kPageSize,
+                                                    PROT_READ | PROT_WRITE,
+                                                    false,
+                                                    false,
+                                                    &error_msg));
   ASSERT_TRUE(map2.get() == nullptr) << error_msg;
   ASSERT_TRUE(!error_msg.empty());
 }
@@ -217,6 +224,7 @@ TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
                                                      0x21000000,
                                                      PROT_READ | PROT_WRITE,
                                                      true,
+                                                     false,
                                                      &error_msg));
     ASSERT_TRUE(map.get() != nullptr) << error_msg;
     ASSERT_TRUE(error_msg.empty());
@@ -230,11 +238,12 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
   uintptr_t ptr = 0;
   ptr -= kPageSize;  // Now it's close to the top.
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
-                                             reinterpret_cast<uint8_t*>(ptr),
-                                             2 * kPageSize,  // brings it over the top.
-                                             PROT_READ | PROT_WRITE,
-                                             false,
-                                             &error_msg));
+                                                   reinterpret_cast<uint8_t*>(ptr),
+                                                   2 * kPageSize,  // brings it over the top.
+                                                   PROT_READ | PROT_WRITE,
+                                                   false,
+                                                   false,
+                                                   &error_msg));
   ASSERT_EQ(nullptr, map.get());
   ASSERT_FALSE(error_msg.empty());
 }
@@ -243,12 +252,14 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
   CommonInit();
   std::string error_msg;
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
-                                             reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
-                                             kPageSize,
-                                             PROT_READ | PROT_WRITE,
-                                             true,
-                                             &error_msg));
+  std::unique_ptr<MemMap> map(
+      MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
+                           reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
+                           kPageSize,
+                           PROT_READ | PROT_WRITE,
+                           true,
+                           false,
+                           &error_msg));
   ASSERT_EQ(nullptr, map.get());
   ASSERT_FALSE(error_msg.empty());
 }
@@ -257,16 +268,40 @@ TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
   CommonInit();
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
-                                             reinterpret_cast<uint8_t*>(0xF0000000),
-                                             0x20000000,
-                                             PROT_READ | PROT_WRITE,
-                                             true,
-                                             &error_msg));
+                                                   reinterpret_cast<uint8_t*>(0xF0000000),
+                                                   0x20000000,
+                                                   PROT_READ | PROT_WRITE,
+                                                   true,
+                                                   false,
+                                                   &error_msg));
   ASSERT_EQ(nullptr, map.get());
   ASSERT_FALSE(error_msg.empty());
 }
 #endif
 
+TEST_F(MemMapTest, MapAnonymousReuse) {
+  CommonInit();
+  std::string error_msg;
+  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
+                                                   nullptr,
+                                                   0x20000,
+                                                   PROT_READ | PROT_WRITE,
+                                                   false,
+                                                   false,
+                                                   &error_msg));
+  ASSERT_NE(nullptr, map.get());
+  ASSERT_TRUE(error_msg.empty());
+  std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
+                                                    reinterpret_cast<uint8_t*>(map->BaseBegin()),
+                                                    0x10000,
+                                                    PROT_READ | PROT_WRITE,
+                                                    false,
+                                                    true,
+                                                    &error_msg));
+  ASSERT_NE(nullptr, map2.get());
+  ASSERT_TRUE(error_msg.empty());
+}
+
 TEST_F(MemMapTest, CheckNoGaps) {
   CommonInit();
   std::string error_msg;
@@ -277,6 +312,7 @@ TEST_F(MemMapTest, CheckNoGaps) {
                                                    kPageSize * kNumPages,
                                                    PROT_READ | PROT_WRITE,
                                                    false,
+                                                   false,
                                                    &error_msg));
   ASSERT_TRUE(map.get() != nullptr) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -292,6 +328,7 @@ TEST_F(MemMapTest, CheckNoGaps) {
                                                     kPageSize,
                                                     PROT_READ | PROT_WRITE,
                                                     false,
+                                                    false,
                                                     &error_msg));
   ASSERT_TRUE(map0.get() != nullptr) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -300,6 +337,7 @@ TEST_F(MemMapTest, CheckNoGaps) {
                                                     kPageSize,
                                                     PROT_READ | PROT_WRITE,
                                                     false,
+                                                    false,
                                                     &error_msg));
   ASSERT_TRUE(map1.get() != nullptr) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -308,6 +346,7 @@ TEST_F(MemMapTest, CheckNoGaps) {
                                                     kPageSize,
                                                     PROT_READ | PROT_WRITE,
                                                     false,
+                                                    false,
                                                     &error_msg));
   ASSERT_TRUE(map2.get() != nullptr) << error_msg;
   ASSERT_TRUE(error_msg.empty());
index 739d62c..356e3d2 100644 (file)
@@ -52,17 +52,7 @@ OatFile* OatFile::OpenWithElfFile(ElfFile* elf_file,
   CHECK(has_section);
   oat_file->begin_ = elf_file->Begin() + offset;
   oat_file->end_ = elf_file->Begin() + size + offset;
-  return oat_file->Setup(error_msg) ? oat_file.release() : nullptr;
-}
-
-OatFile* OatFile::OpenMemory(std::vector<uint8_t>& oat_contents,
-                             const std::string& location,
-                             std::string* error_msg) {
-  CHECK(!oat_contents.empty()) << location;
-  CheckLocation(location);
-  std::unique_ptr<OatFile> oat_file(new OatFile(location, false));
-  oat_file->begin_ = &oat_contents[0];
-  oat_file->end_ = &oat_contents[oat_contents.size()];
+  // Ignore the optional .bss section when opening non-executable.
   return oat_file->Setup(error_msg) ? oat_file.release() : nullptr;
 }
 
@@ -108,18 +98,6 @@ OatFile* OatFile::OpenReadable(File* file, const std::string& location, std::str
   return OpenElfFile(file, location, nullptr, nullptr, false, false, error_msg);
 }
 
-OatFile* OatFile::OpenDlopen(const std::string& elf_filename,
-                             const std::string& location,
-                             uint8_t* requested_base,
-                             std::string* error_msg) {
-  std::unique_ptr<OatFile> oat_file(new OatFile(location, true));
-  bool success = oat_file->Dlopen(elf_filename, requested_base, error_msg);
-  if (!success) {
-    return nullptr;
-  }
-  return oat_file.release();
-}
-
 OatFile* OatFile::OpenElfFile(File* file,
                               const std::string& location,
                               uint8_t* requested_base,
@@ -138,8 +116,8 @@ OatFile* OatFile::OpenElfFile(File* file,
 }
 
 OatFile::OatFile(const std::string& location, bool is_executable)
-    : location_(location), begin_(NULL), end_(NULL), is_executable_(is_executable),
-      dlopen_handle_(NULL),
+    : location_(location), begin_(NULL), end_(NULL), bss_begin_(nullptr), bss_end_(nullptr),
+      is_executable_(is_executable), dlopen_handle_(NULL),
       secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
   CHECK(!location_.empty());
 }
@@ -151,43 +129,6 @@ OatFile::~OatFile() {
   }
 }
 
-bool OatFile::Dlopen(const std::string& elf_filename, uint8_t* requested_base,
-                     std::string* error_msg) {
-  char* absolute_path = realpath(elf_filename.c_str(), NULL);
-  if (absolute_path == NULL) {
-    *error_msg = StringPrintf("Failed to find absolute path for '%s'", elf_filename.c_str());
-    return false;
-  }
-  dlopen_handle_ = dlopen(absolute_path, RTLD_NOW);
-  free(absolute_path);
-  if (dlopen_handle_ == NULL) {
-    *error_msg = StringPrintf("Failed to dlopen '%s': %s", elf_filename.c_str(), dlerror());
-    return false;
-  }
-  begin_ = reinterpret_cast<uint8_t*>(dlsym(dlopen_handle_, "oatdata"));
-  if (begin_ == NULL) {
-    *error_msg = StringPrintf("Failed to find oatdata symbol in '%s': %s", elf_filename.c_str(),
-                              dlerror());
-    return false;
-  }
-  if (requested_base != NULL && begin_ != requested_base) {
-    PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
-    *error_msg = StringPrintf("Failed to find oatdata symbol at expected address: "
-                              "oatdata=%p != expected=%p. See process maps in the log.",
-                              begin_, requested_base);
-    return false;
-  }
-  end_ = reinterpret_cast<uint8_t*>(dlsym(dlopen_handle_, "oatlastword"));
-  if (end_ == NULL) {
-    *error_msg = StringPrintf("Failed to find oatlastword symbol in '%s': %s", elf_filename.c_str(),
-                              dlerror());
-    return false;
-  }
-  // Readjust to be non-inclusive upper bound.
-  end_ += sizeof(uint32_t);
-  return Setup(error_msg);
-}
-
 bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, uint8_t* oat_file_begin,
                           bool writable, bool executable,
                           std::string* error_msg) {
@@ -222,6 +163,23 @@ bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, uint8_t* oat_file
   }
   // Readjust to be non-inclusive upper bound.
   end_ += sizeof(uint32_t);
+
+  bss_begin_ = elf_file_->FindDynamicSymbolAddress("oatbss");
+  if (bss_begin_ == nullptr) {
+    // No .bss section. Clear dlerror().
+    bss_end_ = nullptr;
+    dlerror();
+  } else {
+    bss_end_ = elf_file_->FindDynamicSymbolAddress("oatbsslastword");
+    if (bss_end_ == nullptr) {
+      *error_msg = StringPrintf("Failed to find oatbasslastword symbol in '%s'",
+                                file->GetPath().c_str());
+      return false;
+    }
+    // Readjust to be non-inclusive upper bound.
+    bss_end_ += sizeof(uint32_t);
+  }
+
   return Setup(error_msg);
 }
 
@@ -363,6 +321,14 @@ const uint8_t* OatFile::End() const {
   return end_;
 }
 
+const uint8_t* OatFile::BssBegin() const {
+  return bss_begin_;
+}
+
+const uint8_t* OatFile::BssEnd() const {
+  return bss_end_;
+}
+
 const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
                                                   const uint32_t* dex_location_checksum,
                                                   bool warn_if_not_found) const {
index 5e68439..564185c 100644 (file)
@@ -62,11 +62,6 @@ class OatFile {
   // Opens an oat file from an already opened File. Maps it PROT_READ, MAP_PRIVATE.
   static OatFile* OpenReadable(File* file, const std::string& location, std::string* error_msg);
 
-  // Open an oat file backed by a std::vector with the given location.
-  static OatFile* OpenMemory(std::vector<uint8_t>& oat_contents,
-                             const std::string& location,
-                             std::string* error_msg);
-
   ~OatFile();
 
   bool IsExecutable() const {
@@ -274,17 +269,19 @@ class OatFile {
     return End() - Begin();
   }
 
+  size_t BssSize() const {
+    return BssEnd() - BssBegin();
+  }
+
   const uint8_t* Begin() const;
   const uint8_t* End() const;
 
+  const uint8_t* BssBegin() const;
+  const uint8_t* BssEnd() const;
+
  private:
   static void CheckLocation(const std::string& location);
 
-  static OatFile* OpenDlopen(const std::string& elf_filename,
-                             const std::string& location,
-                             uint8_t* requested_base,
-                             std::string* error_msg);
-
   static OatFile* OpenElfFile(File* file,
                               const std::string& location,
                               uint8_t* requested_base,
@@ -294,7 +291,6 @@ class OatFile {
                               std::string* error_msg);
 
   explicit OatFile(const std::string& filename, bool executable);
-  bool Dlopen(const std::string& elf_filename, uint8_t* requested_base, std::string* error_msg);
   bool ElfFileOpen(File* file, uint8_t* requested_base,
                    uint8_t* oat_file_begin,  // Override where the file is loaded to if not null
                    bool writable, bool executable,
@@ -312,6 +308,12 @@ class OatFile {
   // Pointer to end of oat region for bounds checking.
   const uint8_t* end_;
 
+  // Pointer to the .bss section, if present, otherwise nullptr.
+  const uint8_t* bss_begin_;
+
+  // Pointer to the end of the .bss section, if present, otherwise nullptr.
+  const uint8_t* bss_end_;
+
   // Was this oat_file loaded executable?
   const bool is_executable_;
 
index 587eb32..2a82285 100644 (file)
@@ -31,7 +31,7 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n
       name_(name) {
   std::string error_msg;
   stack_.reset(MemMap::MapAnonymous(name.c_str(), nullptr, stack_size, PROT_READ | PROT_WRITE,
-                                    false, &error_msg));
+                                    false, false, &error_msg));
   CHECK(stack_.get() != nullptr) << error_msg;
   const char* reason = "new thread pool worker thread";
   pthread_attr_t attr;
index 63bfc44..ffab674 100644 (file)
@@ -57,7 +57,8 @@ MemMap* ZipEntry::ExtractToMemMap(const char* zip_filename, const char* entry_fi
   name += zip_filename;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
                                                    NULL, GetUncompressedLength(),
-                                                   PROT_READ | PROT_WRITE, false, error_msg));
+                                                   PROT_READ | PROT_WRITE, false, false,
+                                                   error_msg));
   if (map.get() == nullptr) {
     DCHECK(!error_msg->empty());
     return nullptr;