DCHECK_EQ(obj, obj->AsString()->Intern());
return;
}
- mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrong(
- obj->AsString()->Intern());
+ // InternImageString allows us to intern while holding the heap bitmap lock. This is safe since
+ // we are guaranteed to not have GC during image writing.
+ mirror::String* const interned = Runtime::Current()->GetInternTable()->InternImageString(
+ obj->AsString());
if (obj != interned) {
if (!IsImageBinSlotAssigned(interned)) {
// interned obj is after us, allocate its location early
dex_instruction.h \
dex_instruction_utils.h \
gc_root.h \
- gc/allocator/rosalloc.h \
- gc/collector/gc_type.h \
gc/allocator_type.h \
+ gc/allocator/rosalloc.h \
gc/collector_type.h \
+ gc/collector/gc_type.h \
+ gc/heap.h \
gc/space/region_space.h \
gc/space/space.h \
- gc/heap.h \
+ gc/weak_root_state.h \
image.h \
instrumentation.h \
indirect_reference_table.h \
case kWaitingInMainDebuggerLoop:
case kWaitingInMainSignalCatcherLoop:
case kWaitingPerformingGc:
+ case kWaitingWeakRootRead:
case kWaiting:
return JDWP::TS_WAIT;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
void MarkSweep::SweepSystemWeaks(Thread* self) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Runtime::Current()->SweepSystemWeaks(this);
}
--- /dev/null
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_WEAK_ROOT_STATE_H_
+#define ART_RUNTIME_GC_WEAK_ROOT_STATE_H_
+
+#include <iosfwd>
+
+namespace art {
+namespace gc {
+
+enum WeakRootState {
+ // Can read or add weak roots.
+ kWeakRootStateNormal,
+ // Need to wait until we can read weak roots.
+ kWeakRootStateNoReadsOrWrites,
+ // Need to mark new weak roots to make sure they don't get swept.
+ kWeakRootStateMarkNewRoots,
+};
+
+std::ostream& operator<<(std::ostream& os, const WeakRootState&);
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_WEAK_ROOT_STATE_H_
#include "gc_root-inl.h"
#include "gc/collector/garbage_collector.h"
#include "gc/space/image_space.h"
+#include "gc/weak_root_state.h"
#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
InternTable::InternTable()
: image_added_to_intern_table_(false), log_new_roots_(false),
- allow_new_interns_(true),
- new_intern_condition_("New intern condition", *Locks::intern_table_lock_) {
+ weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
+ weak_root_state_(gc::kWeakRootStateNormal) {
}
size_t InternTable::Size() const {
}
mirror::String* InternTable::LookupWeak(mirror::String* s) {
+ // TODO: Return only if marked.
return weak_interns_.Find(s);
}
}
}
-mirror::String* InternTable::LookupStringFromImage(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
if (image_added_to_intern_table_) {
return nullptr;
}
return nullptr;
}
-void InternTable::AllowNewInterns() {
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::intern_table_lock_);
- allow_new_interns_ = true;
- new_intern_condition_.Broadcast(self);
-}
-
-void InternTable::DisallowNewInterns() {
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::intern_table_lock_);
- allow_new_interns_ = false;
-}
-
-void InternTable::EnsureNewInternsDisallowed() {
+void InternTable::EnsureNewWeakInternsDisallowed() {
// Lock and unlock once to ensure that no threads are still in the
// middle of adding new interns.
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- CHECK(!allow_new_interns_);
+ CHECK_EQ(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
}
void InternTable::BroadcastForNewInterns() {
CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::intern_table_lock_);
- new_intern_condition_.Broadcast(self);
+ weak_intern_condition_.Broadcast(self);
+}
+
+void InternTable::WaitUntilAccessible(Thread* self) {
+ Locks::intern_table_lock_->ExclusiveUnlock(self);
+ self->TransitionFromRunnableToSuspended(kWaitingWeakRootRead);
+ Locks::intern_table_lock_->ExclusiveLock(self);
+ while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
+ weak_intern_condition_.Wait(self);
+ }
+ Locks::intern_table_lock_->ExclusiveUnlock(self);
+ self->TransitionFromSuspendedToRunnable();
+ Locks::intern_table_lock_->ExclusiveLock(self);
}
-mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
+mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool holding_locks) {
if (s == nullptr) {
return nullptr;
}
- Thread* self = Thread::Current();
+ Thread* const self = Thread::Current();
MutexLock mu(self, *Locks::intern_table_lock_);
- while (UNLIKELY((!kUseReadBarrier && !allow_new_interns_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
- new_intern_condition_.WaitHoldingLocks(self);
+ if (kDebugLocking && !holding_locks) {
+ Locks::mutator_lock_->AssertSharedHeld(self);
+ CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
}
- // Check the strong table for a match.
- mirror::String* strong = LookupStrong(s);
- if (strong != nullptr) {
- return strong;
+ while (true) {
+ // Check the strong table for a match.
+ mirror::String* strong = LookupStrong(s);
+ if (strong != nullptr) {
+ return strong;
+ }
+ // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
+ // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
+ // cleared.
+ if (weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
+ break;
+ }
+ CHECK(!holding_locks);
+ StackHandleScope<1> hs(self);
+ auto h = hs.NewHandleWrapper(&s);
+ WaitUntilAccessible(self);
}
+ CHECK_NE(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
+ DCHECK_NE(weak_root_state_, gc::kWeakRootStateMarkNewRoots) << "Unsupported";
// There is no match in the strong table, check the weak table.
mirror::String* weak = LookupWeak(s);
if (weak != nullptr) {
return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
}
+mirror::String* InternTable::InternImageString(mirror::String* s) {
+ // May be holding the heap bitmap lock.
+ return Insert(s, true, true);
+}
+
mirror::String* InternTable::InternStrong(mirror::String* s) {
- return Insert(s, true);
+ return Insert(s, true, false);
}
mirror::String* InternTable::InternWeak(mirror::String* s) {
- return Insert(s, false);
+ return Insert(s, false, false);
}
bool InternTable::ContainsWeak(mirror::String* s) {
void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
weak_interns_.SweepWeaks(visitor);
+ // Done sweeping, back to a normal state.
+ ChangeWeakRootStateLocked(gc::kWeakRootStateNormal);
}
void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
return pre_zygote_table_.Size() + post_zygote_table_.Size();
}
+void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ ChangeWeakRootStateLocked(new_state);
+}
+
+void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
+ weak_root_state_ = new_state;
+ if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
+ weak_intern_condition_.Broadcast(Thread::Current());
+ }
+}
+
} // namespace art
#include <unordered_set>
+#include "atomic.h"
#include "base/allocator.h"
#include "base/hash_set.h"
#include "base/mutex.h"
#include "gc_root.h"
+#include "gc/weak_root_state.h"
#include "object_callbacks.h"
namespace art {
public:
InternTable();
- // Interns a potentially new string in the 'strong' table. (See above.)
+ // Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Interns a potentially new string in the 'strong' table. (See above.)
+ // Only used by image writer.
+ mirror::String* InternImageString(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(const char* utf8_data)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Interns a potentially new string in the 'strong' table. (See above.)
+ // Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Interns a potentially new string in the 'weak' table. (See above.)
+ // Interns a potentially new string in the 'weak' table. May cause thread suspension.
mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SweepInternTableWeaks(IsMarkedVisitor* visitor)
void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void EnsureNewInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void BroadcastForNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnsureNewWeakInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Adds all of the resolved image strings from the image space into the intern table. The
// advantage of doing this is preventing expensive DexFile::FindStringId calls.
size_t WriteToMemory(uint8_t* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ // Change the weak root state. May broadcast to waiters.
+ void ChangeWeakRootState(gc::WeakRootState new_state)
+ LOCKS_EXCLUDED(Locks::intern_table_lock_);
+
private:
class StringHashEquals {
public:
};
// Insert if non null, otherwise return null.
- mirror::String* Insert(mirror::String* s, bool is_strong)
+ mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
LOCKS_EXCLUDED(Locks::intern_table_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Change the weak root state. May broadcast to waiters.
+ void ChangeWeakRootStateLocked(gc::WeakRootState new_state)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+
+ // Wait until we can read weak roots.
+ void WaitUntilAccessible(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
- bool allow_new_interns_ GUARDED_BY(Locks::intern_table_lock_);
- ConditionVariable new_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
+ ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
// Since this contains (strong) roots, they need a read barrier to
// enable concurrent intern table (strong) root scan. Do not
// directly access the strings in it. Use functions that contain
// not directly access the strings in it. Use functions that contain
// read barriers.
Table weak_interns_ GUARDED_BY(Locks::intern_table_lock_);
+ // Weak root state, used for concurrent system weak processing and more.
+ gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_);
};
} // namespace art
case kWaitingInMainSignalCatcherLoop: return kJavaWaiting;
case kWaitingForMethodTracingStart: return kJavaWaiting;
case kWaitingForVisitObjects: return kJavaWaiting;
+ case kWaitingWeakRootRead: return kJavaWaiting;
case kSuspended: return kJavaRunnable;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
}
void Runtime::DisallowNewSystemWeaks() {
monitor_list_->DisallowNewMonitors();
- intern_table_->DisallowNewInterns();
+ intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
java_vm_->DisallowNewWeakGlobals();
heap_->DisallowNewAllocationRecords();
}
void Runtime::AllowNewSystemWeaks() {
monitor_list_->AllowNewMonitors();
- intern_table_->AllowNewInterns();
+ intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping?
java_vm_->AllowNewWeakGlobals();
heap_->AllowNewAllocationRecords();
}
// Lock and unlock the system weak locks once to ensure that no
// threads are still in the middle of adding new system weaks.
monitor_list_->EnsureNewMonitorsDisallowed();
- intern_table_->EnsureNewInternsDisallowed();
+ intern_table_->EnsureNewWeakInternsDisallowed();
java_vm_->EnsureNewWeakGlobalsDisallowed();
}
tlsPtr_.method_verifier = verifier->link_;
}
+size_t Thread::NumberOfHeldMutexes() const {
+ size_t count = 0;
+ for (BaseMutex* mu : tlsPtr_.held_mutexes) {
+ count += static_cast<size_t>(mu != nullptr);
+ }
+ return count;
+}
+
} // namespace art
return tls32_.daemon;
}
+ size_t NumberOfHeldMutexes() const;
+
bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
kWaitingForMethodTracingStart, // WAITING TS_WAIT waiting for method tracing to start
kWaitingForVisitObjects, // WAITING TS_WAIT waiting for visiting objects
kWaitingForGetObjectsAllocated, // WAITING TS_WAIT waiting for getting the number of allocated objects
+ kWaitingWeakRootRead, // WAITING TS_WAIT waiting to read a weak root
kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger