OSDN Git Service

Fix memory ordering issues; document IMemory peculiarities
authorHans Boehm <hboehm@google.com>
Wed, 13 Jul 2016 01:05:49 +0000 (18:05 -0700)
committerHans Boehm <hboehm@google.com>
Wed, 24 Aug 2016 22:13:39 +0000 (15:13 -0700)
Convert to standard atomics.

Correct mHeapId memory ordering. Required acquire ordering on loads
was missing in several places.

Remove atomic updates to count, since it is only updated with lock
held. (And would be missing fences if this were not true.)

Document the peculiar use of copy-in-write vectors in a context in
which copy-on-write is unsafe.

While we're here, consistently check dup() for errors.

Bug: 28816986

Merged-in: I05b9f96e3867fa2e0abe6f319be8c56b89624c41

Change-Id: I05b9f96e3867fa2e0abe6f319be8c56b89624c41

libs/binder/IMemory.cpp

index 55ad6cc..790fa8c 100644 (file)
@@ -16,6 +16,7 @@
 
 #define LOG_TAG "IMemory"
 
+#include <atomic>
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -29,7 +30,6 @@
 #include <cutils/log.h>
 #include <utils/KeyedVector.h>
 #include <utils/threads.h>
-#include <utils/Atomic.h>
 #include <binder/Parcel.h>
 #include <utils/CallStack.h>
 
@@ -56,12 +56,15 @@ private:
     struct heap_info_t {
         sp<IMemoryHeap> heap;
         int32_t         count;
+        // Note that this cannot be meaningfully copied.
     };
 
     void free_heap(const wp<IBinder>& binder);
 
-    Mutex mHeapCacheLock;
+    Mutex mHeapCacheLock;  // Protects entire vector below.
     KeyedVector< wp<IBinder>, heap_info_t > mHeapCache;
+    // We do not use the copy-on-write capabilities of KeyedVector.
+    // TODO: Reimplemement based on standard C++ container?
 };
 
 static sp<HeapCache> gHeapCache = new HeapCache();
@@ -105,7 +108,7 @@ private:
     void assertMapped() const;
     void assertReallyMapped() const;
 
-    mutable volatile int32_t mHeapId;
+    mutable std::atomic<int32_t> mHeapId;
     mutable void*       mBase;
     mutable size_t      mSize;
     mutable uint32_t    mFlags;
@@ -248,8 +251,9 @@ BpMemoryHeap::BpMemoryHeap(const sp<IBinder>& impl)
 }
 
 BpMemoryHeap::~BpMemoryHeap() {
-    if (mHeapId != -1) {
-        close(mHeapId);
+    int32_t heapId = mHeapId.load(memory_order_relaxed);
+    if (heapId != -1) {
+        close(heapId);
         if (mRealHeap) {
             // by construction we're the last one
             if (mBase != MAP_FAILED) {
@@ -257,7 +261,7 @@ BpMemoryHeap::~BpMemoryHeap() {
 
                 if (VERBOSE) {
                     ALOGD("UNMAPPING binder=%p, heap=%p, size=%zu, fd=%d",
-                            binder.get(), this, mSize, mHeapId);
+                            binder.get(), this, mSize, heapId);
                     CallStack stack(LOG_TAG);
                 }
 
@@ -273,17 +277,21 @@ BpMemoryHeap::~BpMemoryHeap() {
 
 void BpMemoryHeap::assertMapped() const
 {
-    if (mHeapId == -1) {
+    int32_t heapId = mHeapId.load(memory_order_acquire);
+    if (heapId == -1) {
         sp<IBinder> binder(IInterface::asBinder(const_cast<BpMemoryHeap*>(this)));
         sp<BpMemoryHeap> heap(static_cast<BpMemoryHeap*>(find_heap(binder).get()));
         heap->assertReallyMapped();
         if (heap->mBase != MAP_FAILED) {
             Mutex::Autolock _l(mLock);
-            if (mHeapId == -1) {
+            if (mHeapId.load(memory_order_relaxed) == -1) {
                 mBase   = heap->mBase;
                 mSize   = heap->mSize;
                 mOffset = heap->mOffset;
-                android_atomic_write( dup( heap->mHeapId ), &mHeapId );
+                int fd = dup(heap->mHeapId.load(memory_order_relaxed));
+                ALOGE_IF(fd==-1, "cannot dup fd=%d",
+                        heap->mHeapId.load(memory_order_relaxed));
+                mHeapId.store(fd, memory_order_release);
             }
         } else {
             // something went wrong
@@ -294,7 +302,8 @@ void BpMemoryHeap::assertMapped() const
 
 void BpMemoryHeap::assertReallyMapped() const
 {
-    if (mHeapId == -1) {
+    int32_t heapId = mHeapId.load(memory_order_acquire);
+    if (heapId == -1) {
 
         // remote call without mLock held, worse case scenario, we end up
         // calling transact() from multiple threads, but that's not a problem,
@@ -313,7 +322,7 @@ void BpMemoryHeap::assertReallyMapped() const
                 parcel_fd, size, err, strerror(-err));
 
         Mutex::Autolock _l(mLock);
-        if (mHeapId == -1) {
+        if (mHeapId.load(memory_order_relaxed) == -1) {
             int fd = dup( parcel_fd );
             ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%zd, err=%d (%s)",
                     parcel_fd, size, err, strerror(errno));
@@ -322,7 +331,6 @@ void BpMemoryHeap::assertReallyMapped() const
             if (!(flags & READ_ONLY)) {
                 access |= PROT_WRITE;
             }
-
             mRealHeap = true;
             mBase = mmap(0, size, access, MAP_SHARED, fd, offset);
             if (mBase == MAP_FAILED) {
@@ -333,7 +341,7 @@ void BpMemoryHeap::assertReallyMapped() const
                 mSize = size;
                 mFlags = flags;
                 mOffset = offset;
-                android_atomic_write(fd, &mHeapId);
+                mHeapId.store(fd, memory_order_release);
             }
         }
     }
@@ -341,7 +349,8 @@ void BpMemoryHeap::assertReallyMapped() const
 
 int BpMemoryHeap::getHeapID() const {
     assertMapped();
-    return mHeapId;
+    // We either stored mHeapId ourselves, or loaded it with acquire semantics.
+    return mHeapId.load(memory_order_relaxed);
 }
 
 void* BpMemoryHeap::getBase() const {
@@ -418,9 +427,10 @@ sp<IMemoryHeap> HeapCache::find_heap(const sp<IBinder>& binder)
                 "found binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
                 binder.get(), info.heap.get(),
                 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
-                static_cast<BpMemoryHeap*>(info.heap.get())->mHeapId,
+                static_cast<BpMemoryHeap*>(info.heap.get())
+                    ->mHeapId.load(memory_order_relaxed),
                 info.count);
-        android_atomic_inc(&info.count);
+        ++info.count;
         return info.heap;
     } else {
         heap_info_t info;
@@ -445,13 +455,13 @@ void HeapCache::free_heap(const wp<IBinder>& binder)
         ssize_t i = mHeapCache.indexOfKey(binder);
         if (i>=0) {
             heap_info_t& info(mHeapCache.editValueAt(i));
-            int32_t c = android_atomic_dec(&info.count);
-            if (c == 1) {
+            if (--info.count == 0) {
                 ALOGD_IF(VERBOSE,
                         "removing binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
                         binder.unsafe_get(), info.heap.get(),
                         static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
-                        static_cast<BpMemoryHeap*>(info.heap.get())->mHeapId,
+                        static_cast<BpMemoryHeap*>(info.heap.get())
+                            ->mHeapId.load(memory_order_relaxed),
                         info.count);
                 rel = mHeapCache.valueAt(i).heap;
                 mHeapCache.removeItemsAt(i);
@@ -482,7 +492,7 @@ void HeapCache::dump_heaps()
         ALOGD("hey=%p, heap=%p, count=%d, (fd=%d, base=%p, size=%zu)",
                 mHeapCache.keyAt(i).unsafe_get(),
                 info.heap.get(), info.count,
-                h->mHeapId, h->mBase, h->mSize);
+                h->mHeapId.load(memory_order_relaxed), h->mBase, h->mSize);
     }
 }