void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
+ virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
void PushOntoMarkStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
bool do_atomic_update) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
- REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_);
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
void ProfilingInfo::AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) {
InlineCache* cache = GetInlineCache(dex_pc);
for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
- mirror::Class* existing = cache->classes_[i].Read();
- if (existing == cls) {
+ mirror::Class* existing = cache->classes_[i].Read<kWithoutReadBarrier>();
+ mirror::Class* marked = ReadBarrier::IsMarked(existing);
+ if (marked == cls) {
// Receiver type is already in the cache, nothing else to do.
return;
- } else if (existing == nullptr) {
+ } else if (marked == nullptr) {
// Cache entry is empty, try to put `cls` in it.
- GcRoot<mirror::Class> expected_root(nullptr);
+ // Note: it's ok to spin on 'existing' here: if 'existing' is not null, that means
+ // it is a stalled heap address, which will only be cleared during SweepSystemWeaks,
+ // *after* this thread hits a suspend point.
+ GcRoot<mirror::Class> expected_root(existing);
GcRoot<mirror::Class> desired_root(cls);
if (!reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&cache->classes_[i])->
CompareExchangeStrongSequentiallyConsistent(expected_root, desired_root)) {
}
}
+template <typename MirrorType>
+inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
+ // Only read-barrier configurations can have mutators run while
+ // the GC is marking.
+ if (!kUseReadBarrier) {
+ return ref;
+ }
+ // IsMarked does not handle null, so handle it here.
+ if (ref == nullptr) {
+ return nullptr;
+ }
+ // IsMarked should only be called when the GC is marking.
+ if (!Thread::Current()->GetIsGcMarking()) {
+ return ref;
+ }
+
+ return reinterpret_cast<MirrorType*>(
+ Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref));
+}
+
inline bool ReadBarrier::IsDuringStartup() {
gc::Heap* heap = Runtime::Current()->GetHeap();
if (heap == nullptr) {
GcRootSource* gc_root_source = nullptr)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Return the mirror Object if it is marked, or null if not.
+ template <typename MirrorType>
+ ALWAYS_INLINE static MirrorType* IsMarked(MirrorType* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
static bool IsDuringStartup();
// Without the holder object.