OSDN Git Service

Fix valgrind tests: mark allocated space as defined.
authorVladimir Marko <vmarko@google.com>
Mon, 25 Apr 2016 18:40:34 +0000 (19:40 +0100)
committerVladimir Marko <vmarko@google.com>
Mon, 25 Apr 2016 19:28:54 +0000 (20:28 +0100)
Follow-up to
    https://android-review.googlesource.com/219700

Bug: 28173563
Bug: 28256882
Change-Id: I4e8e8d3202fd01ab48d86b3a6b92302524df66bb

runtime/base/arena_allocator.cc
runtime/base/arena_allocator.h
runtime/base/arena_allocator_test.cc

index 5b6801f..b84e29f 100644 (file)
@@ -321,24 +321,26 @@ void* ArenaAllocator::AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind) {
   // and padding between allocations marked as inaccessible.
   size_t rounded_bytes = RoundUp(bytes + kMemoryToolRedZoneBytes, 8);
   ArenaAllocatorStats::RecordAlloc(rounded_bytes, kind);
+  uint8_t* ret;
   if (UNLIKELY(rounded_bytes > static_cast<size_t>(end_ - ptr_))) {
-    void* ret = AllocFromNewArena(rounded_bytes);
+    ret = AllocFromNewArena(rounded_bytes);
+    uint8_t* noaccess_begin = ret + bytes;
+    uint8_t* noaccess_end;
     if (ret == arena_head_->Begin()) {
       DCHECK(ptr_ - rounded_bytes == ret);
-      uint8_t* noaccess_begin = ptr_ - rounded_bytes + bytes;
-      MEMORY_TOOL_MAKE_NOACCESS(noaccess_begin, end_ - noaccess_begin);
+      noaccess_end = end_;
     } else {
       // We're still using the old arena but `ret` comes from a new one just after it.
       DCHECK(arena_head_->next_ != nullptr);
       DCHECK(ret == arena_head_->next_->Begin());
       DCHECK_EQ(rounded_bytes, arena_head_->next_->GetBytesAllocated());
-      uint8_t* noaccess_begin = arena_head_->next_->Begin() + bytes;
-      MEMORY_TOOL_MAKE_NOACCESS(noaccess_begin, arena_head_->next_->End() - noaccess_begin);
+      noaccess_end = arena_head_->next_->End();
     }
-    return ret;
+    MEMORY_TOOL_MAKE_NOACCESS(noaccess_begin, noaccess_end - noaccess_begin);
+  } else {
+    ret = ptr_;
+    ptr_ += rounded_bytes;
   }
-  uint8_t* ret = ptr_;
-  ptr_ += rounded_bytes;
   MEMORY_TOOL_MAKE_DEFINED(ret, bytes);
   // Check that the memory is already zeroed out.
   DCHECK(std::all_of(ret, ret + bytes, [](uint8_t val) { return val == 0u; }));
@@ -351,7 +353,7 @@ ArenaAllocator::~ArenaAllocator() {
   pool_->FreeArenaChain(arena_head_);
 }
 
-void* ArenaAllocator::AllocFromNewArena(size_t bytes) {
+uint8_t* ArenaAllocator::AllocFromNewArena(size_t bytes) {
   Arena* new_arena = pool_->AllocArena(std::max(Arena::kDefaultSize, bytes));
   DCHECK(new_arena != nullptr);
   DCHECK_LE(bytes, new_arena->Size());
index 4b745f0..6c1a898 100644 (file)
@@ -364,7 +364,7 @@ class ArenaAllocator
 
  private:
   void* AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind);
-  void* AllocFromNewArena(size_t bytes);
+  uint8_t* AllocFromNewArena(size_t bytes);
 
   static constexpr size_t kAlignment = 8;
 
index c07c0ed..9de3cc4 100644 (file)
@@ -41,6 +41,28 @@ TEST_F(ArenaAllocatorTest, Test) {
   EXPECT_EQ(2U, bv.GetStorageSize());
 }
 
+TEST_F(ArenaAllocatorTest, MakeDefined) {
+  // Regression test to make sure we mark the allocated area defined.
+  ArenaPool pool;
+  static constexpr size_t kSmallArraySize = 10;
+  static constexpr size_t kLargeArraySize = 50;
+  uint32_t* small_array;
+  {
+    // Allocate a small array from an arena and release it.
+    ArenaAllocator arena(&pool);
+    small_array = arena.AllocArray<uint32_t>(kSmallArraySize);
+    ASSERT_EQ(0u, small_array[kSmallArraySize - 1u]);
+  }
+  {
+    // Reuse the previous arena and allocate more than previous allocation including red zone.
+    ArenaAllocator arena(&pool);
+    uint32_t* large_array = arena.AllocArray<uint32_t>(kLargeArraySize);
+    ASSERT_EQ(0u, large_array[kLargeArraySize - 1u]);
+    // Verify that the allocation was made on the same arena.
+    ASSERT_EQ(small_array, large_array);
+  }
+}
+
 TEST_F(ArenaAllocatorTest, LargeAllocations) {
   {
     ArenaPool pool;