OSDN Git Service

Add valgrind support to large object map space.
authorMathieu Chartier <mathieuc@google.com>
Wed, 26 Mar 2014 19:53:19 +0000 (12:53 -0700)
committerMathieu Chartier <mathieuc@google.com>
Wed, 26 Mar 2014 23:15:19 +0000 (16:15 -0700)
Added valgrind support to large object map space.

Bug: 7392044
Change-Id: I1456f46414e1fa59ebcc2190ec00576dae26d623

runtime/gc/heap.cc
runtime/gc/space/large_object_space.cc
runtime/gc/space/large_object_space.h

index 7827261..02e7e3f 100644 (file)
@@ -708,9 +708,11 @@ Heap::~Heap() {
   allocation_stack_->Reset();
   live_stack_->Reset();
   STLDeleteValues(&mod_union_tables_);
+  STLDeleteValues(&remembered_sets_);
   STLDeleteElements(&continuous_spaces_);
   STLDeleteElements(&discontinuous_spaces_);
   delete gc_complete_lock_;
+  delete heap_trim_request_lock_;
   VLOG(heap) << "Finished ~Heap()";
 }
 
index 1ca132e..2fc67ec 100644 (file)
@@ -29,6 +29,50 @@ namespace art {
 namespace gc {
 namespace space {
 
+class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+ public:
+  explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
+  }
+
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+                                size_t* usable_size) OVERRIDE {
+    mirror::Object* obj =
+        LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
+                                   usable_size);
+    mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
+        reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
+    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
+    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(object_without_rdz) + num_bytes,
+                               kValgrindRedZoneBytes);
+    if (usable_size != nullptr) {
+      *usable_size = num_bytes;  // Since we have redzones, shrink the usable size.
+    }
+    return object_without_rdz;
+  }
+
+  virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+    mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+        reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+    return LargeObjectMapSpace::AllocationSize(object_with_rdz, usable_size);
+  }
+
+  virtual size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
+    mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+        reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+    VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
+    return LargeObjectMapSpace::Free(self, object_with_rdz);
+  }
+
+  bool Contains(const mirror::Object* obj) const OVERRIDE {
+    mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+        reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+    return LargeObjectMapSpace::Contains(object_with_rdz);
+  }
+
+ private:
+  static constexpr size_t kValgrindRedZoneBytes = kPageSize;
+};
+
 void LargeObjectSpace::SwapBitmaps() {
   live_objects_.swap(mark_objects_);
   // Swap names to get more descriptive diagnostics.
@@ -53,7 +97,11 @@ LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
       lock_("large object map space lock", kAllocSpaceLock) {}
 
 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
-  return new LargeObjectMapSpace(name);
+  if (RUNNING_ON_VALGRIND > 0) {
+    return new ValgrindLargeObjectMapSpace(name);
+  } else {
+    return new LargeObjectMapSpace(name);
+  }
 }
 
 mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
index b1b0c3c..eb01325 100644 (file)
@@ -91,7 +91,7 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
 };
 
 // A discontinuous large object space implemented by individual mmap/munmap calls.
-class LargeObjectMapSpace FINAL : public LargeObjectSpace {
+class LargeObjectMapSpace : public LargeObjectSpace {
  public:
   // Creates a large object space. Allocations into the large object space use memory maps instead
   // of malloc.
@@ -106,7 +106,7 @@ class LargeObjectMapSpace FINAL : public LargeObjectSpace {
   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
   bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
 
- private:
+ protected:
   explicit LargeObjectMapSpace(const std::string& name);
   virtual ~LargeObjectMapSpace() {}
 
@@ -115,7 +115,7 @@ class LargeObjectMapSpace FINAL : public LargeObjectSpace {
   std::vector<mirror::Object*,
       accounting::GcAllocator<mirror::Object*> > large_objects_ GUARDED_BY(lock_);
   typedef SafeMap<mirror::Object*, MemMap*, std::less<mirror::Object*>,
-      accounting::GcAllocator<std::pair<const mirror::Object*, MemMap*> > > MemMaps;
+      accounting::GcAllocator<std::pair<mirror::Object*, MemMap*> > > MemMaps;
   MemMaps mem_maps_ GUARDED_BY(lock_);
 };
 
@@ -150,7 +150,7 @@ class FreeListSpace FINAL : public LargeObjectSpace {
 
   void Dump(std::ostream& os) const;
 
- private:
+ protected:
   static const size_t kAlignment = kPageSize;
 
   class AllocationHeader {