OSDN Git Service

Address some comments and clean up
authorMathieu Chartier <mathieuc@google.com>
Tue, 4 Aug 2015 22:19:41 +0000 (15:19 -0700)
committerMathieu Chartier <mathieuc@google.com>
Wed, 5 Aug 2015 01:56:54 +0000 (18:56 -0700)
Change-Id: I538cf204f1c89d5fc81f8fc5e5800fcf1cf87359

14 files changed:
compiler/image_writer.cc
runtime/barrier.h
runtime/base/mutex.h
runtime/class_table.h
runtime/debugger.cc
runtime/gc/weak_root_state.h
runtime/intern_table.cc
runtime/intern_table.h
runtime/interpreter/interpreter_common.cc
runtime/jdwp/jdwp.h
runtime/native/java_lang_Thread.cc
runtime/runtime.cc
runtime/thread.cc
runtime/thread_state.h

index 17d75a3..953b1de 100644 (file)
@@ -719,7 +719,7 @@ void ImageWriter::CalculateObjectBinSlots(Object* obj) {
     }
     // InternImageString allows us to intern while holding the heap bitmap lock. This is safe since
     // we are guaranteed to not have GC during image writing.
-    mirror::String* const interned = Runtime::Current()->GetInternTable()->InternImageString(
+    mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrongImageString(
         obj->AsString());
     if (obj != interned) {
       if (!IsImageBinSlotAssigned(interned)) {
index 02f9f58..94977fb 100644 (file)
@@ -51,7 +51,7 @@ class Barrier {
   // to sleep, resulting in a deadlock.
 
   // Increment the count by delta, wait on condition if count is non zero.
-  void Increment(Thread* self, int delta) REQUIRES(!lock_);;
+  void Increment(Thread* self, int delta) REQUIRES(!lock_);
 
   // Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns
   // true if time out occurred.
index d0504d9..2801fb7 100644 (file)
@@ -332,36 +332,40 @@ class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
   bool IsExclusiveHeld(const Thread* self) const;
 
   // Assert the current thread has exclusive access to the ReaderWriterMutex.
-  void AssertExclusiveHeld(const Thread* self) {
+  void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
     if (kDebugLocking && (gAborting == 0)) {
       CHECK(IsExclusiveHeld(self)) << *this;
     }
   }
-  void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
+  void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
 
   // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
-  void AssertNotExclusiveHeld(const Thread* self) {
+  void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
     if (kDebugLocking && (gAborting == 0)) {
       CHECK(!IsExclusiveHeld(self)) << *this;
     }
   }
-  void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
+  void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
+    AssertNotExclusiveHeld(self);
+  }
 
   // Is the current thread a shared holder of the ReaderWriterMutex.
   bool IsSharedHeld(const Thread* self) const;
 
   // Assert the current thread has shared access to the ReaderWriterMutex.
-  void AssertSharedHeld(const Thread* self) {
+  void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
     if (kDebugLocking && (gAborting == 0)) {
       // TODO: we can only assert this well when self != null.
       CHECK(IsSharedHeld(self) || self == nullptr) << *this;
     }
   }
-  void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
+  void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
+    AssertSharedHeld(self);
+  }
 
   // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
   // mode.
-  void AssertNotHeld(const Thread* self) {
+  void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
     if (kDebugLocking && (gAborting == 0)) {
       CHECK(!IsSharedHeld(self)) << *this;
     }
@@ -679,6 +683,7 @@ class Locks {
 
 class Roles {
  public:
+  // Uninterruptible means that the thread may not become suspended.
   static Uninterruptible uninterruptible_;
 };
 
index 252a47d..4182954 100644 (file)
@@ -107,13 +107,14 @@ class ClassTable {
       return item.IsNull();
     }
   };
-  // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results
+  // hash set which hashes class descriptor, and compares descriptors and class loaders. Results
   // should be compared for a matching Class descriptor and class loader.
   typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals,
       ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>>
       ClassSet;
 
   // TODO: shard lock to have one per class loader.
+  // We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot.
   std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
 };
 
index 1865516..e1e130b 100644 (file)
@@ -2094,7 +2094,7 @@ JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
     case kWaitingInMainDebuggerLoop:
     case kWaitingInMainSignalCatcherLoop:
     case kWaitingPerformingGc:
-    case kWaitingWeakRootRead:
+    case kWaitingWeakGcRootRead:
     case kWaiting:
       return JDWP::TS_WAIT;
       // Don't add a 'default' here so the compiler can spot incompatible enum changes.
index b66f19d..e3cefc4 100644 (file)
@@ -28,6 +28,8 @@ enum WeakRootState {
   // Need to wait until we can read weak roots.
   kWeakRootStateNoReadsOrWrites,
   // Need to mark new weak roots to make sure they don't get swept.
+  // kWeakRootStateMarkNewRoots is currently unused but I was planning on using to allow adding new
+  // weak roots during the CMS reference processing phase.
   kWeakRootStateMarkNewRoots,
 };
 
index ae521b1..2be570a 100644 (file)
@@ -90,7 +90,6 @@ mirror::String* InternTable::LookupStrong(mirror::String* s) {
 }
 
 mirror::String* InternTable::LookupWeak(mirror::String* s) {
-  // TODO: Return only if marked.
   return weak_interns_.Find(s);
 }
 
@@ -229,7 +228,7 @@ void InternTable::BroadcastForNewInterns() {
 
 void InternTable::WaitUntilAccessible(Thread* self) {
   Locks::intern_table_lock_->ExclusiveUnlock(self);
-  self->TransitionFromRunnableToSuspended(kWaitingWeakRootRead);
+  self->TransitionFromRunnableToSuspended(kWaitingWeakGcRootRead);
   Locks::intern_table_lock_->ExclusiveLock(self);
   while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
     weak_intern_condition_.Wait(self);
@@ -250,24 +249,35 @@ mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool hold
     CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
   }
   while (true) {
+    if (holding_locks) {
+      if (!kUseReadBarrier) {
+        CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
+      } else {
+        CHECK(self->GetWeakRefAccessEnabled());
+      }
+    }
     // Check the strong table for a match.
     mirror::String* strong = LookupStrong(s);
     if (strong != nullptr) {
       return strong;
     }
+    if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
+        (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
+      break;
+    }
     // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
     // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
     // cleared.
-    if (weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
-      break;
-    }
     CHECK(!holding_locks);
     StackHandleScope<1> hs(self);
     auto h = hs.NewHandleWrapper(&s);
     WaitUntilAccessible(self);
   }
-  CHECK_NE(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
-  DCHECK_NE(weak_root_state_, gc::kWeakRootStateMarkNewRoots) << "Unsupported";
+  if (!kUseReadBarrier) {
+    CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
+  } else {
+    CHECK(self->GetWeakRefAccessEnabled());
+  }
   // There is no match in the strong table, check the weak table.
   mirror::String* weak = LookupWeak(s);
   if (weak != nullptr) {
@@ -298,7 +308,7 @@ mirror::String* InternTable::InternStrong(const char* utf8_data) {
   return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
 }
 
-mirror::String* InternTable::InternImageString(mirror::String* s) {
+mirror::String* InternTable::InternStrongImageString(mirror::String* s) {
   // May be holding the heap bitmap lock.
   return Insert(s, true, true);
 }
@@ -319,8 +329,6 @@ bool InternTable::ContainsWeak(mirror::String* s) {
 void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
   weak_interns_.SweepWeaks(visitor);
-  // Done sweeping, back to a normal state.
-  ChangeWeakRootStateLocked(gc::kWeakRootStateNormal);
 }
 
 void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
index 0be6675..ae9f7a7 100644 (file)
@@ -61,8 +61,10 @@ class InternTable {
       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   // Only used by image writer. Special version that may not cause thread suspension since the GC
-  // can not be running while we are doing image writing.
-  mirror::String* InternImageString(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_);
+  // can not be running while we are doing image writing. Maybe be called while while holding a
+  // lock since there will not be thread suspension.
+  mirror::String* InternStrongImageString(mirror::String* s)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
   mirror::String* InternStrong(const char* utf8_data) SHARED_REQUIRES(Locks::mutator_lock_)
@@ -184,7 +186,9 @@ class InternTable {
     UnorderedSet post_zygote_table_;
   };
 
-  // Insert if non null, otherwise return null.
+  // Insert if non null, otherwise return null. Must be called holding the mutator lock.
+  // If holding_locks is true, then we may also hold other locks. If holding_locks is true, then we
+  // require GC is not running since it is not safe to wait while holding locks.
   mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
       REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
 
index 9de9e8a..f923b84 100644 (file)
@@ -884,7 +884,7 @@ void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
 
 // Explicit DoCall template function declarations.
 #define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check)                      \
-  template SHARED_REQUIRES(Locks::mutator_lock_)                                          \
+  template SHARED_REQUIRES(Locks::mutator_lock_)                                                \
   bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self,              \
                                                   ShadowFrame& shadow_frame,                    \
                                                   const Instruction* inst, uint16_t inst_data,  \
@@ -897,7 +897,7 @@ EXPLICIT_DO_CALL_TEMPLATE_DECL(true, true);
 
 // Explicit DoLambdaCall template function declarations.
 #define EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check)               \
-  template SHARED_REQUIRES(Locks::mutator_lock_)                                          \
+  template SHARED_REQUIRES(Locks::mutator_lock_)                                                \
   bool DoLambdaCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self,        \
                                                         ShadowFrame& shadow_frame,              \
                                                         const Instruction* inst,                \
@@ -911,7 +911,7 @@ EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(true, true);
 
 // Explicit DoFilledNewArray template function declarations.
 #define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active)       \
-  template SHARED_REQUIRES(Locks::mutator_lock_)                                            \
+  template SHARED_REQUIRES(Locks::mutator_lock_)                                                  \
   bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst,         \
                                                                  const ShadowFrame& shadow_frame, \
                                                                  Thread* self, JValue* result)
index f5ac9d0..ae02fe6 100644 (file)
@@ -128,6 +128,9 @@ struct JdwpState {
    * the debugger.
    *
    * Returns a newly-allocated JdwpState struct on success, or nullptr on failure.
+   *
+   * NO_THREAD_SAFETY_ANALYSIS since we can't annotate that we do not have
+   * state->thread_start_lock_ held.
    */
   static JdwpState* Create(const JdwpOptions* options)
       REQUIRES(!Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
index b40d94a..7118f36 100644 (file)
@@ -90,7 +90,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha
     case kWaitingInMainSignalCatcherLoop: return kJavaWaiting;
     case kWaitingForMethodTracingStart:   return kJavaWaiting;
     case kWaitingForVisitObjects:         return kJavaWaiting;
-    case kWaitingWeakRootRead:            return kJavaWaiting;
+    case kWaitingWeakGcRootRead:          return kJavaWaiting;
     case kSuspended:                      return kJavaRunnable;
     // Don't add a 'default' here so the compiler can spot incompatible enum changes.
   }
index a27acb2..b08e521 100644 (file)
@@ -1516,7 +1516,7 @@ void Runtime::DisallowNewSystemWeaks() {
 
 void Runtime::AllowNewSystemWeaks() {
   monitor_list_->AllowNewMonitors();
-  intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal);  // TODO: Do this in the sweeping?
+  intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal);  // TODO: Do this in the sweeping.
   java_vm_->AllowNewWeakGlobals();
   heap_->AllowNewAllocationRecords();
   lambda_box_table_->AllowNewWeakBoxedLambdas();
index b3efad0..7433600 100644 (file)
@@ -2736,7 +2736,7 @@ void Thread::PopVerifier(verifier::MethodVerifier* verifier) {
 size_t Thread::NumberOfHeldMutexes() const {
   size_t count = 0;
   for (BaseMutex* mu : tlsPtr_.held_mutexes) {
-    count += static_cast<size_t>(mu != nullptr);
+    count += mu != nullptr ? 1 : 0;
   }
   return count;
 }
index c000e61..a11d213 100644 (file)
@@ -43,7 +43,7 @@ enum ThreadState {
   kWaitingForMethodTracingStart,    // WAITING        TS_WAIT      waiting for method tracing to start
   kWaitingForVisitObjects,          // WAITING        TS_WAIT      waiting for visiting objects
   kWaitingForGetObjectsAllocated,   // WAITING        TS_WAIT      waiting for getting the number of allocated objects
-  kWaitingWeakRootRead,             // WAITING        TS_WAIT      waiting to read a weak root
+  kWaitingWeakGcRootRead,           // WAITING        TS_WAIT      waiting on the GC to read a weak root
   kStarting,                        // NEW            TS_WAIT      native thread started, not yet ready to run managed code
   kNative,                          // RUNNABLE       TS_RUNNING   running in a JNI native method
   kSuspended,                       // RUNNABLE       TS_RUNNING   suspended by GC or debugger