}
// InternImageString allows us to intern while holding the heap bitmap lock. This is safe since
// we are guaranteed to not have GC during image writing.
- mirror::String* const interned = Runtime::Current()->GetInternTable()->InternImageString(
+ mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrongImageString(
obj->AsString());
if (obj != interned) {
if (!IsImageBinSlotAssigned(interned)) {
// to sleep, resulting in a deadlock.
// Increment the count by delta, wait on condition if count is non zero.
- void Increment(Thread* self, int delta) REQUIRES(!lock_);;
+ void Increment(Thread* self, int delta) REQUIRES(!lock_);
// Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns
// true if time out occurred.
bool IsExclusiveHeld(const Thread* self) const;
// Assert the current thread has exclusive access to the ReaderWriterMutex.
- void AssertExclusiveHeld(const Thread* self) {
+ void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(IsExclusiveHeld(self)) << *this;
}
}
- void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
+ void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
// Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
- void AssertNotExclusiveHeld(const Thread* self) {
+ void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(!IsExclusiveHeld(self)) << *this;
}
}
- void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
+ void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
+ AssertNotExclusiveHeld(self);
+ }
// Is the current thread a shared holder of the ReaderWriterMutex.
bool IsSharedHeld(const Thread* self) const;
// Assert the current thread has shared access to the ReaderWriterMutex.
- void AssertSharedHeld(const Thread* self) {
+ void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
if (kDebugLocking && (gAborting == 0)) {
// TODO: we can only assert this well when self != null.
CHECK(IsSharedHeld(self) || self == nullptr) << *this;
}
}
- void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
+ void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
+ AssertSharedHeld(self);
+ }
// Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
// mode.
- void AssertNotHeld(const Thread* self) {
+ void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
if (kDebugLocking && (gAborting == 0)) {
CHECK(!IsSharedHeld(self)) << *this;
}
class Roles {
public:
+ // Uninterruptible means that the thread may not become suspended.
static Uninterruptible uninterruptible_;
};
return item.IsNull();
}
};
- // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results
+ // hash set which hashes class descriptor, and compares descriptors and class loaders. Results
// should be compared for a matching Class descriptor and class loader.
typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals,
ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>>
ClassSet;
// TODO: shard lock to have one per class loader.
+ // We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot.
std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
};
case kWaitingInMainDebuggerLoop:
case kWaitingInMainSignalCatcherLoop:
case kWaitingPerformingGc:
- case kWaitingWeakRootRead:
+ case kWaitingWeakGcRootRead:
case kWaiting:
return JDWP::TS_WAIT;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
// Need to wait until we can read weak roots.
kWeakRootStateNoReadsOrWrites,
// Need to mark new weak roots to make sure they don't get swept.
+ // kWeakRootStateMarkNewRoots is currently unused but I was planning on using to allow adding new
+ // weak roots during the CMS reference processing phase.
kWeakRootStateMarkNewRoots,
};
}
mirror::String* InternTable::LookupWeak(mirror::String* s) {
- // TODO: Return only if marked.
return weak_interns_.Find(s);
}
void InternTable::WaitUntilAccessible(Thread* self) {
Locks::intern_table_lock_->ExclusiveUnlock(self);
- self->TransitionFromRunnableToSuspended(kWaitingWeakRootRead);
+ self->TransitionFromRunnableToSuspended(kWaitingWeakGcRootRead);
Locks::intern_table_lock_->ExclusiveLock(self);
while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
weak_intern_condition_.Wait(self);
CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
}
while (true) {
+ if (holding_locks) {
+ if (!kUseReadBarrier) {
+ CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
+ } else {
+ CHECK(self->GetWeakRefAccessEnabled());
+ }
+ }
// Check the strong table for a match.
mirror::String* strong = LookupStrong(s);
if (strong != nullptr) {
return strong;
}
+ if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
+ (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
+ break;
+ }
// weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
// cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
// cleared.
- if (weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
- break;
- }
CHECK(!holding_locks);
StackHandleScope<1> hs(self);
auto h = hs.NewHandleWrapper(&s);
WaitUntilAccessible(self);
}
- CHECK_NE(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
- DCHECK_NE(weak_root_state_, gc::kWeakRootStateMarkNewRoots) << "Unsupported";
+ if (!kUseReadBarrier) {
+ CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
+ } else {
+ CHECK(self->GetWeakRefAccessEnabled());
+ }
// There is no match in the strong table, check the weak table.
mirror::String* weak = LookupWeak(s);
if (weak != nullptr) {
return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
}
-mirror::String* InternTable::InternImageString(mirror::String* s) {
+mirror::String* InternTable::InternStrongImageString(mirror::String* s) {
// May be holding the heap bitmap lock.
return Insert(s, true, true);
}
void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
weak_interns_.SweepWeaks(visitor);
- // Done sweeping, back to a normal state.
- ChangeWeakRootStateLocked(gc::kWeakRootStateNormal);
}
void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// Only used by image writer. Special version that may not cause thread suspension since the GC
- // can not be running while we are doing image writing.
- mirror::String* InternImageString(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_);
+ // can not be running while we are doing image writing. Maybe be called while while holding a
+ // lock since there will not be thread suspension.
+ mirror::String* InternStrongImageString(mirror::String* s)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(const char* utf8_data) SHARED_REQUIRES(Locks::mutator_lock_)
UnorderedSet post_zygote_table_;
};
- // Insert if non null, otherwise return null.
+ // Insert if non null, otherwise return null. Must be called holding the mutator lock.
+ // If holding_locks is true, then we may also hold other locks. If holding_locks is true, then we
+ // require GC is not running since it is not safe to wait while holding locks.
mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Explicit DoCall template function declarations.
#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
// Explicit DoLambdaCall template function declarations.
#define EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoLambdaCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
ShadowFrame& shadow_frame, \
const Instruction* inst, \
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template SHARED_REQUIRES(Locks::mutator_lock_) \
bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst, \
const ShadowFrame& shadow_frame, \
Thread* self, JValue* result)
* the debugger.
*
* Returns a newly-allocated JdwpState struct on success, or nullptr on failure.
+ *
+ * NO_THREAD_SAFETY_ANALYSIS since we can't annotate that we do not have
+ * state->thread_start_lock_ held.
*/
static JdwpState* Create(const JdwpOptions* options)
REQUIRES(!Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
case kWaitingInMainSignalCatcherLoop: return kJavaWaiting;
case kWaitingForMethodTracingStart: return kJavaWaiting;
case kWaitingForVisitObjects: return kJavaWaiting;
- case kWaitingWeakRootRead: return kJavaWaiting;
+ case kWaitingWeakGcRootRead: return kJavaWaiting;
case kSuspended: return kJavaRunnable;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
}
void Runtime::AllowNewSystemWeaks() {
monitor_list_->AllowNewMonitors();
- intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping?
+ intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping.
java_vm_->AllowNewWeakGlobals();
heap_->AllowNewAllocationRecords();
lambda_box_table_->AllowNewWeakBoxedLambdas();
size_t Thread::NumberOfHeldMutexes() const {
size_t count = 0;
for (BaseMutex* mu : tlsPtr_.held_mutexes) {
- count += static_cast<size_t>(mu != nullptr);
+ count += mu != nullptr ? 1 : 0;
}
return count;
}
kWaitingForMethodTracingStart, // WAITING TS_WAIT waiting for method tracing to start
kWaitingForVisitObjects, // WAITING TS_WAIT waiting for visiting objects
kWaitingForGetObjectsAllocated, // WAITING TS_WAIT waiting for getting the number of allocated objects
- kWaitingWeakRootRead, // WAITING TS_WAIT waiting to read a weak root
+ kWaitingWeakGcRootRead, // WAITING TS_WAIT waiting on the GC to read a weak root
kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger