From 3049324f4ef71b5d7a3de49bd77c75f07dbf8f3a Mon Sep 17 00:00:00 2001 From: Hiroshi Yamauchi Date: Thu, 3 Nov 2016 13:06:52 -0700 Subject: [PATCH] Make empty checkpoint work while weak ref access is disabled. Fix a potential race on PushOntoMarkStack for CC by running an empty checkpoint (while weak ref access is disabled). Bug: 32508093 Bug: 12687968 Test: test-art-host with CC/CMS, libartd boot with N9, Ritz EAAC. Change-Id: I3749bb525e7734804307ee16262355f3fc730312 --- runtime/gc/allocation_record.cc | 4 ++- runtime/gc/allocation_record.h | 1 - runtime/gc/collector/concurrent_copying.cc | 32 +++++------------ runtime/gc/heap.cc | 1 - runtime/gc/heap.h | 1 - runtime/gc/reference_processor.cc | 7 +++- runtime/gc/system_weak.h | 14 +++++--- runtime/gc/system_weak_test.cc | 10 +++--- runtime/generated/asm_support_gen.h | 4 +++ runtime/intern_table.cc | 1 - runtime/intern_table.h | 2 +- runtime/interpreter/mterp/arm/footer.S | 2 +- runtime/interpreter/mterp/arm/op_return.S | 2 +- runtime/interpreter/mterp/arm/op_return_void.S | 2 +- .../mterp/arm/op_return_void_no_barrier.S | 2 +- runtime/interpreter/mterp/arm/op_return_wide.S | 2 +- runtime/interpreter/mterp/arm64/footer.S | 6 ++-- runtime/interpreter/mterp/arm64/op_return.S | 2 +- runtime/interpreter/mterp/arm64/op_return_void.S | 2 +- .../mterp/arm64/op_return_void_no_barrier.S | 2 +- runtime/interpreter/mterp/arm64/op_return_wide.S | 2 +- runtime/interpreter/mterp/mips/footer.S | 2 +- runtime/interpreter/mterp/mips/op_return.S | 2 +- runtime/interpreter/mterp/mips/op_return_void.S | 2 +- .../mterp/mips/op_return_void_no_barrier.S | 2 +- runtime/interpreter/mterp/mips/op_return_wide.S | 2 +- runtime/interpreter/mterp/mips64/footer.S | 4 +-- runtime/interpreter/mterp/mips64/op_return.S | 2 +- runtime/interpreter/mterp/mips64/op_return_void.S | 2 +- .../mterp/mips64/op_return_void_no_barrier.S | 2 +- runtime/interpreter/mterp/mips64/op_return_wide.S | 2 +- runtime/interpreter/mterp/mterp.cc | 2 ++ runtime/interpreter/mterp/out/mterp_arm.S | 12 +++---- runtime/interpreter/mterp/out/mterp_arm64.S | 16 ++++----- runtime/interpreter/mterp/out/mterp_mips.S | 12 +++---- runtime/interpreter/mterp/out/mterp_mips64.S | 14 ++++---- runtime/interpreter/mterp/out/mterp_x86.S | 12 +++---- runtime/interpreter/mterp/out/mterp_x86_64.S | 12 +++---- runtime/interpreter/mterp/x86/footer.S | 2 +- runtime/interpreter/mterp/x86/op_return.S | 2 +- runtime/interpreter/mterp/x86/op_return_void.S | 2 +- .../mterp/x86/op_return_void_no_barrier.S | 2 +- runtime/interpreter/mterp/x86/op_return_wide.S | 2 +- runtime/interpreter/mterp/x86_64/footer.S | 2 +- runtime/interpreter/mterp/x86_64/op_return.S | 2 +- runtime/interpreter/mterp/x86_64/op_return_void.S | 2 +- .../mterp/x86_64/op_return_void_no_barrier.S | 2 +- runtime/interpreter/mterp/x86_64/op_return_wide.S | 2 +- runtime/java_vm_ext.cc | 10 +++++- runtime/java_vm_ext.h | 1 - runtime/monitor.cc | 4 ++- runtime/runtime.cc | 8 ++--- runtime/runtime.h | 5 ++- runtime/thread-inl.h | 24 +++++++++++-- runtime/thread.cc | 31 +++++++++++++++- runtime/thread.h | 9 ++++- runtime/thread_list.cc | 41 +++++++++++++++++++++- runtime/thread_list.h | 15 ++++++++ tools/cpp-define-generator/constant_thread.def | 2 ++ 59 files changed, 248 insertions(+), 125 deletions(-) diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc index d92190093..e18a95525 100644 --- a/runtime/gc/allocation_record.cc +++ b/runtime/gc/allocation_record.cc @@ -181,7 +181,6 @@ void AllocRecordObjectMap::DisallowNewAllocationRecords() { } void AllocRecordObjectMap::BroadcastForNewAllocationRecords() { - CHECK(kUseReadBarrier); new_record_condition_.Broadcast(Thread::Current()); } @@ -291,6 +290,9 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self, // Wait for GC's sweeping to complete and allow new records while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) || (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { + // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the + // presence of threads blocking for weak ref access. + self->CheckEmptyCheckpoint(); new_record_condition_.WaitHoldingLocks(self); } diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h index c8b2b8970..90cff6a8c 100644 --- a/runtime/gc/allocation_record.h +++ b/runtime/gc/allocation_record.h @@ -261,7 +261,6 @@ class AllocRecordObjectMap { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::alloc_tracker_lock_); void BroadcastForNewAllocationRecords() - REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::alloc_tracker_lock_); // TODO: Is there a better way to hide the entries_'s type? diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 6dfab8b56..1e1b05c68 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -514,26 +514,6 @@ void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); } -class EmptyCheckpoint : public Closure { - public: - explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying) - : concurrent_copying_(concurrent_copying) { - } - - virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { - // Note: self is not necessarily equal to thread since thread may be suspended. - Thread* self = Thread::Current(); - CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) - << thread->GetState() << " thread " << thread << " self " << self; - // If thread is a running mutator, then act on behalf of the garbage collector. - // See the code in ThreadList::RunCheckpoint. - concurrent_copying_->GetBarrier().Pass(self); - } - - private: - ConcurrentCopying* const concurrent_copying_; -}; - // Used to visit objects in the immune spaces. inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) { DCHECK(obj != nullptr); @@ -835,10 +815,10 @@ void ConcurrentCopying::ProcessFalseGrayStack() { void ConcurrentCopying::IssueEmptyCheckpoint() { Thread* self = Thread::Current(); - EmptyCheckpoint check_point(this); ThreadList* thread_list = Runtime::Current()->GetThreadList(); - gc_barrier_->Init(self, 0); - size_t barrier_count = thread_list->RunCheckpoint(&check_point); + Barrier* barrier = thread_list->EmptyCheckpointBarrier(); + barrier->Init(self, 0); + size_t barrier_count = thread_list->RunEmptyCheckpoint(); // If there are no threads to wait which implys that all the checkpoint functions are finished, // then no need to release the mutator lock. if (barrier_count == 0) { @@ -848,7 +828,7 @@ void ConcurrentCopying::IssueEmptyCheckpoint() { Locks::mutator_lock_->SharedUnlock(self); { ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); - gc_barrier_->Increment(self, barrier_count); + barrier->Increment(self, barrier_count); } Locks::mutator_lock_->SharedLock(self); } @@ -1253,6 +1233,10 @@ bool ConcurrentCopying::ProcessMarkStackOnce() { } gc_mark_stack_->Reset(); } else if (mark_stack_mode == kMarkStackModeShared) { + // Do an empty checkpoint to avoid a race with a mutator preempted in the middle of a read + // barrier but before pushing onto the mark stack. b/32508093. Note the weak ref access is + // disabled at this point. + IssueEmptyCheckpoint(); // Process the shared GC mark stack with a lock. { MutexLock mu(self, mark_stack_lock_); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 5de004b7a..447e06e70 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -4069,7 +4069,6 @@ void Heap::DisallowNewAllocationRecords() const { } void Heap::BroadcastForNewAllocationRecords() const { - CHECK(kUseReadBarrier); // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may // be set to false while some threads are waiting for system weak access in // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554. diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index e8eb69e35..0c671d269 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -797,7 +797,6 @@ class Heap { REQUIRES(!Locks::alloc_tracker_lock_); void BroadcastForNewAllocationRecords() const - REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::alloc_tracker_lock_); void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_); diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc index 798ecd3d8..2cde7d573 100644 --- a/runtime/gc/reference_processor.cc +++ b/runtime/gc/reference_processor.cc @@ -55,7 +55,6 @@ void ReferenceProcessor::DisableSlowPath(Thread* self) { } void ReferenceProcessor::BroadcastForSlowPath(Thread* self) { - CHECK(kUseReadBarrier); MutexLock mu(self, *Locks::reference_processor_lock_); condition_.Broadcast(self); } @@ -99,6 +98,9 @@ ObjPtr ReferenceProcessor::GetReferent(Thread* self, } } } + // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the + // presence of threads blocking for weak ref access. + self->CheckEmptyCheckpoint(); condition_.WaitHoldingLocks(self); } return reference->GetReferent(); @@ -270,6 +272,9 @@ bool ReferenceProcessor::MakeCircularListIfUnenqueued( // Wait untul we are done processing reference. while ((!kUseReadBarrier && SlowPathEnabled()) || (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) { + // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the + // presence of threads blocking for weak ref access. + self->CheckEmptyCheckpoint(); condition_.WaitHoldingLocks(self); } // At this point, since the sentinel of the reference is live, it is guaranteed to not be diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h index 3910a280c..884e90b6d 100644 --- a/runtime/gc/system_weak.h +++ b/runtime/gc/system_weak.h @@ -30,7 +30,8 @@ class AbstractSystemWeakHolder { virtual void Allow() REQUIRES_SHARED(Locks::mutator_lock_) = 0; virtual void Disallow() REQUIRES_SHARED(Locks::mutator_lock_) = 0; - virtual void Broadcast() REQUIRES_SHARED(Locks::mutator_lock_) = 0; + // See Runtime::BroadcastForNewSystemWeaks for the broadcast_for_checkpoint definition. + virtual void Broadcast(bool broadcast_for_checkpoint) = 0; virtual void Sweep(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) = 0; }; @@ -61,19 +62,22 @@ class SystemWeakHolder : public AbstractSystemWeakHolder { allow_new_system_weak_ = false; } - void Broadcast() OVERRIDE - REQUIRES_SHARED(Locks::mutator_lock_) + void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(!allow_disallow_lock_) { - CHECK(kUseReadBarrier); MutexLock mu(Thread::Current(), allow_disallow_lock_); new_weak_condition_.Broadcast(Thread::Current()); } protected: - void Wait(Thread* self) REQUIRES_SHARED(allow_disallow_lock_) { + void Wait(Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(allow_disallow_lock_) { // Wait for GC's sweeping to complete and allow new records while (UNLIKELY((!kUseReadBarrier && !allow_new_system_weak_) || (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { + // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the + // presence of threads blocking for weak ref access. + self->CheckEmptyCheckpoint(); new_weak_condition_.WaitHoldingLocks(self); } } diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc index af8a44490..9b601c075 100644 --- a/runtime/gc/system_weak_test.cc +++ b/runtime/gc/system_weak_test.cc @@ -58,12 +58,14 @@ struct CountingSystemWeakHolder : public SystemWeakHolder { disallow_count_++; } - void Broadcast() OVERRIDE - REQUIRES_SHARED(Locks::mutator_lock_) + void Broadcast(bool broadcast_for_checkpoint) OVERRIDE REQUIRES(!allow_disallow_lock_) { - SystemWeakHolder::Broadcast(); + SystemWeakHolder::Broadcast(broadcast_for_checkpoint); - allow_count_++; + if (!broadcast_for_checkpoint) { + // Don't count the broadcasts for running checkpoints. + allow_count_++; + } } void Sweep(IsMarkedVisitor* visitor) OVERRIDE diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h index 03f5bf6dd..bea20f167 100644 --- a/runtime/generated/asm_support_gen.h +++ b/runtime/generated/asm_support_gen.h @@ -134,6 +134,10 @@ DEFINE_CHECK_EQ(static_cast(ROSALLOC_SLOT_NEXT_OFFSET), (static_cast(THREAD_SUSPEND_REQUEST), (static_cast((art::kSuspendRequest)))) #define THREAD_CHECKPOINT_REQUEST 2 DEFINE_CHECK_EQ(static_cast(THREAD_CHECKPOINT_REQUEST), (static_cast((art::kCheckpointRequest)))) +#define THREAD_EMPTY_CHECKPOINT_REQUEST 4 +DEFINE_CHECK_EQ(static_cast(THREAD_EMPTY_CHECKPOINT_REQUEST), (static_cast((art::kEmptyCheckpointRequest)))) +#define THREAD_SUSPEND_OR_CHECKPOINT_REQUEST 7 +DEFINE_CHECK_EQ(static_cast(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), (static_cast((art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest)))) #define JIT_CHECK_OSR (-1) DEFINE_CHECK_EQ(static_cast(JIT_CHECK_OSR), (static_cast((art::jit::kJitCheckForOSR)))) #define JIT_HOTNESS_DISABLE (-2) diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc index a61a1878a..577c488e5 100644 --- a/runtime/intern_table.cc +++ b/runtime/intern_table.cc @@ -185,7 +185,6 @@ void InternTable::AddImagesStringsToTable(const std::vectorOpcode(inst_data); } else if (flags & kSuspendRequest) { LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data); + } else if (flags & kEmptyCheckpointRequest) { + LOG(INFO) << "Empty checkpoint fallback: " << inst->Opcode(inst_data); } } diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S index 78a90af54..4d540d768 100644 --- a/runtime/interpreter/mterp/out/mterp_arm.S +++ b/runtime/interpreter/mterp/out/mterp_arm.S @@ -619,7 +619,7 @@ artMterpAsmInstructionStart = .L_op_nop bl MterpThreadFenceForConstructor ldr lr, [rSELF, #THREAD_FLAGS_OFFSET] mov r0, rSELF - ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST blne MterpSuspendCheck @ (self) mov r0, #0 mov r1, #0 @@ -639,7 +639,7 @@ artMterpAsmInstructionStart = .L_op_nop bl MterpThreadFenceForConstructor ldr lr, [rSELF, #THREAD_FLAGS_OFFSET] mov r0, rSELF - ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST blne MterpSuspendCheck @ (self) mov r2, rINST, lsr #8 @ r2<- AA GET_VREG r0, r2 @ r0<- vAA @@ -658,7 +658,7 @@ artMterpAsmInstructionStart = .L_op_nop bl MterpThreadFenceForConstructor ldr lr, [rSELF, #THREAD_FLAGS_OFFSET] mov r0, rSELF - ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST blne MterpSuspendCheck @ (self) mov r2, rINST, lsr #8 @ r2<- AA VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA] @@ -680,7 +680,7 @@ artMterpAsmInstructionStart = .L_op_nop bl MterpThreadFenceForConstructor ldr lr, [rSELF, #THREAD_FLAGS_OFFSET] mov r0, rSELF - ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST blne MterpSuspendCheck @ (self) mov r2, rINST, lsr #8 @ r2<- AA GET_VREG r0, r2 @ r0<- vAA @@ -3149,7 +3149,7 @@ artMterpAsmInstructionStart = .L_op_nop /* File: arm/op_return_void_no_barrier.S */ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET] mov r0, rSELF - ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST blne MterpSuspendCheck @ (self) mov r0, #0 mov r1, #0 @@ -11989,7 +11989,7 @@ MterpCommonTakenBranch: REFRESH_IBASE add r2, rINST, rINST @ r2<- byte offset FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST - ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST bne .L_suspend_request_pending GET_INST_OPCODE ip @ extract opcode from rINST GOTO_OPCODE ip @ jump to next instruction diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S index dafcc3ef6..42f8c1b08 100644 --- a/runtime/interpreter/mterp/out/mterp_arm64.S +++ b/runtime/interpreter/mterp/out/mterp_arm64.S @@ -616,7 +616,7 @@ artMterpAsmInstructionStart = .L_op_nop bl MterpThreadFenceForConstructor ldr w7, [xSELF, #THREAD_FLAGS_OFFSET] mov x0, xSELF - ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST b.ne .Lop_return_void_check .Lop_return_void_return: mov x0, #0 @@ -639,7 +639,7 @@ artMterpAsmInstructionStart = .L_op_nop bl MterpThreadFenceForConstructor ldr w7, [xSELF, #THREAD_FLAGS_OFFSET] mov x0, xSELF - ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST b.ne .Lop_return_check .Lop_return_return: lsr w2, wINST, #8 // r2<- AA @@ -662,7 +662,7 @@ artMterpAsmInstructionStart = .L_op_nop bl MterpThreadFenceForConstructor ldr w7, [xSELF, #THREAD_FLAGS_OFFSET] mov x0, xSELF - ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST b.ne .Lop_return_wide_check .Lop_return_wide_return: lsr w2, wINST, #8 // w2<- AA @@ -687,7 +687,7 @@ artMterpAsmInstructionStart = .L_op_nop bl MterpThreadFenceForConstructor ldr w7, [xSELF, #THREAD_FLAGS_OFFSET] mov x0, xSELF - ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST b.ne .Lop_return_object_check .Lop_return_object_return: lsr w2, wINST, #8 // r2<- AA @@ -3033,7 +3033,7 @@ artMterpAsmInstructionStart = .L_op_nop /* File: arm64/op_return_void_no_barrier.S */ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET] mov x0, xSELF - ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST b.ne .Lop_return_void_no_barrier_check .Lop_return_void_no_barrier_return: mov x0, #0 @@ -7082,7 +7082,7 @@ MterpCommonTakenBranchNoFlags: add w2, wINST, wINST // w2<- byte offset FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST REFRESH_IBASE - ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST b.ne .L_suspend_request_pending GET_INST_OPCODE ip // extract opcode from wINST GOTO_OPCODE ip // jump to next instruction @@ -7156,7 +7156,7 @@ MterpCommonTakenBranchNoFlags: */ MterpCheckSuspendAndContinue: ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE - ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST b.ne check1 GET_INST_OPCODE ip // extract opcode from wINST GOTO_OPCODE ip // jump to next instruction @@ -7211,7 +7211,7 @@ MterpReturn: ldr lr, [xSELF, #THREAD_FLAGS_OFFSET] str x0, [x2] mov x0, xSELF - ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST b.eq check2 bl MterpSuspendCheck // (self) check2: diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S index c1ba79422..e17187aa4 100644 --- a/runtime/interpreter/mterp/out/mterp_mips.S +++ b/runtime/interpreter/mterp/out/mterp_mips.S @@ -818,7 +818,7 @@ artMterpAsmInstructionStart = .L_op_nop JAL(MterpThreadFenceForConstructor) lw ra, THREAD_FLAGS_OFFSET(rSELF) move a0, rSELF - and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST beqz ra, 1f JAL(MterpSuspendCheck) # (self) 1: @@ -840,7 +840,7 @@ artMterpAsmInstructionStart = .L_op_nop JAL(MterpThreadFenceForConstructor) lw ra, THREAD_FLAGS_OFFSET(rSELF) move a0, rSELF - and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST beqz ra, 1f JAL(MterpSuspendCheck) # (self) 1: @@ -861,7 +861,7 @@ artMterpAsmInstructionStart = .L_op_nop JAL(MterpThreadFenceForConstructor) lw ra, THREAD_FLAGS_OFFSET(rSELF) move a0, rSELF - and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST beqz ra, 1f JAL(MterpSuspendCheck) # (self) 1: @@ -885,7 +885,7 @@ artMterpAsmInstructionStart = .L_op_nop JAL(MterpThreadFenceForConstructor) lw ra, THREAD_FLAGS_OFFSET(rSELF) move a0, rSELF - and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST beqz ra, 1f JAL(MterpSuspendCheck) # (self) 1: @@ -3344,7 +3344,7 @@ artMterpAsmInstructionStart = .L_op_nop /* File: mips/op_return_void_no_barrier.S */ lw ra, THREAD_FLAGS_OFFSET(rSELF) move a0, rSELF - and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST beqz ra, 1f JAL(MterpSuspendCheck) # (self) 1: @@ -12791,7 +12791,7 @@ MterpCommonTakenBranchNoFlags: REFRESH_IBASE() addu a2, rINST, rINST # a2<- byte offset FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST - and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST bnez ra, .L_suspend_request_pending GET_INST_OPCODE(t0) # extract opcode from rINST GOTO_OPCODE(t0) # jump to next instruction diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S index 143aeb034..037787f6b 100644 --- a/runtime/interpreter/mterp/out/mterp_mips64.S +++ b/runtime/interpreter/mterp/out/mterp_mips64.S @@ -637,7 +637,7 @@ artMterpAsmInstructionStart = .L_op_nop jal MterpThreadFenceForConstructor lw ra, THREAD_FLAGS_OFFSET(rSELF) move a0, rSELF - and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST beqzc ra, 1f jal MterpSuspendCheck # (self) 1: @@ -659,7 +659,7 @@ artMterpAsmInstructionStart = .L_op_nop jal MterpThreadFenceForConstructor lw ra, THREAD_FLAGS_OFFSET(rSELF) move a0, rSELF - and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST beqzc ra, 1f jal MterpSuspendCheck # (self) 1: @@ -681,7 +681,7 @@ artMterpAsmInstructionStart = .L_op_nop jal MterpThreadFenceForConstructor lw ra, THREAD_FLAGS_OFFSET(rSELF) move a0, rSELF - and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST beqzc ra, 1f jal MterpSuspendCheck # (self) 1: @@ -705,7 +705,7 @@ artMterpAsmInstructionStart = .L_op_nop jal MterpThreadFenceForConstructor lw ra, THREAD_FLAGS_OFFSET(rSELF) move a0, rSELF - and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST beqzc ra, 1f jal MterpSuspendCheck # (self) 1: @@ -3121,7 +3121,7 @@ artMterpAsmInstructionStart = .L_op_nop .extern MterpSuspendCheck lw ra, THREAD_FLAGS_OFFSET(rSELF) move a0, rSELF - and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST beqzc ra, 1f jal MterpSuspendCheck # (self) 1: @@ -12179,7 +12179,7 @@ MterpCommonTakenBranchNoFlags: REFRESH_IBASE daddu a2, rINST, rINST # a2<- byte offset FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST - and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST bnezc ra, .L_suspend_request_pending GET_INST_OPCODE v0 # extract opcode from rINST GOTO_OPCODE v0 # jump to next instruction @@ -12296,7 +12296,7 @@ MterpReturn: lw ra, THREAD_FLAGS_OFFSET(rSELF) sd a0, 0(a2) move a0, rSELF - and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST beqzc ra, check2 jal MterpSuspendCheck # (self) check2: diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S index d676fdab9..695d1e497 100644 --- a/runtime/interpreter/mterp/out/mterp_x86.S +++ b/runtime/interpreter/mterp/out/mterp_x86.S @@ -612,7 +612,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movl rSELF, %eax - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) jz 1f movl %eax, OUT_ARG0(%esp) call SYMBOL(MterpSuspendCheck) @@ -634,7 +634,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movl rSELF, %eax - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) jz 1f movl %eax, OUT_ARG0(%esp) call SYMBOL(MterpSuspendCheck) @@ -654,7 +654,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movl rSELF, %eax - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) jz 1f movl %eax, OUT_ARG0(%esp) call SYMBOL(MterpSuspendCheck) @@ -677,7 +677,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movl rSELF, %eax - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) jz 1f movl %eax, OUT_ARG0(%esp) call SYMBOL(MterpSuspendCheck) @@ -3104,7 +3104,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop .L_op_return_void_no_barrier: /* 0x73 */ /* File: x86/op_return_void_no_barrier.S */ movl rSELF, %eax - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) jz 1f movl %eax, OUT_ARG0(%esp) call SYMBOL(MterpSuspendCheck) @@ -12678,7 +12678,7 @@ MterpCommonTakenBranch: je .L_add_batch # counted down to zero - report .L_resume_backward_branch: movl rSELF, %eax - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) leal (rPC, rINST, 2), rPC FETCH_INST jnz .L_suspend_request_pending diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S index df88499a6..2eab58c05 100644 --- a/runtime/interpreter/mterp/out/mterp_x86_64.S +++ b/runtime/interpreter/mterp/out/mterp_x86_64.S @@ -587,7 +587,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movq rSELF, OUT_ARG0 - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) jz 1f call SYMBOL(MterpSuspendCheck) 1: @@ -607,7 +607,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movq rSELF, OUT_ARG0 - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) jz 1f call SYMBOL(MterpSuspendCheck) 1: @@ -625,7 +625,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movq rSELF, OUT_ARG0 - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) jz 1f call SYMBOL(MterpSuspendCheck) 1: @@ -646,7 +646,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movq rSELF, OUT_ARG0 - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) jz 1f call SYMBOL(MterpSuspendCheck) 1: @@ -2972,7 +2972,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop .L_op_return_void_no_barrier: /* 0x73 */ /* File: x86_64/op_return_void_no_barrier.S */ movq rSELF, OUT_ARG0 - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) jz 1f call SYMBOL(MterpSuspendCheck) 1: @@ -11915,7 +11915,7 @@ MterpCommonTakenBranch: je .L_add_batch # counted down to zero - report .L_resume_backward_branch: movq rSELF, %rax - testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax) + testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax) REFRESH_IBASE leaq (rPC, rINSTq, 2), rPC FETCH_INST diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S index e8c8ca8d7..088cb127d 100644 --- a/runtime/interpreter/mterp/x86/footer.S +++ b/runtime/interpreter/mterp/x86/footer.S @@ -167,7 +167,7 @@ MterpCommonTakenBranch: je .L_add_batch # counted down to zero - report .L_resume_backward_branch: movl rSELF, %eax - testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) + testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) leal (rPC, rINST, 2), rPC FETCH_INST jnz .L_suspend_request_pending diff --git a/runtime/interpreter/mterp/x86/op_return.S b/runtime/interpreter/mterp/x86/op_return.S index 8e3cfad38..a8ebbed64 100644 --- a/runtime/interpreter/mterp/x86/op_return.S +++ b/runtime/interpreter/mterp/x86/op_return.S @@ -7,7 +7,7 @@ .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movl rSELF, %eax - testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) + testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) jz 1f movl %eax, OUT_ARG0(%esp) call SYMBOL(MterpSuspendCheck) diff --git a/runtime/interpreter/mterp/x86/op_return_void.S b/runtime/interpreter/mterp/x86/op_return_void.S index a14a4f639..d9eddf39f 100644 --- a/runtime/interpreter/mterp/x86/op_return_void.S +++ b/runtime/interpreter/mterp/x86/op_return_void.S @@ -1,7 +1,7 @@ .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movl rSELF, %eax - testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) + testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) jz 1f movl %eax, OUT_ARG0(%esp) call SYMBOL(MterpSuspendCheck) diff --git a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S index 1d0e93331..2fbda6bfe 100644 --- a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S +++ b/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S @@ -1,5 +1,5 @@ movl rSELF, %eax - testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) + testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) jz 1f movl %eax, OUT_ARG0(%esp) call SYMBOL(MterpSuspendCheck) diff --git a/runtime/interpreter/mterp/x86/op_return_wide.S b/runtime/interpreter/mterp/x86/op_return_wide.S index 7d1850a96..5fff62620 100644 --- a/runtime/interpreter/mterp/x86/op_return_wide.S +++ b/runtime/interpreter/mterp/x86/op_return_wide.S @@ -5,7 +5,7 @@ .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movl rSELF, %eax - testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) + testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) jz 1f movl %eax, OUT_ARG0(%esp) call SYMBOL(MterpSuspendCheck) diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S index f78f16357..ed5e5eabf 100644 --- a/runtime/interpreter/mterp/x86_64/footer.S +++ b/runtime/interpreter/mterp/x86_64/footer.S @@ -151,7 +151,7 @@ MterpCommonTakenBranch: je .L_add_batch # counted down to zero - report .L_resume_backward_branch: movq rSELF, %rax - testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax) + testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax) REFRESH_IBASE leaq (rPC, rINSTq, 2), rPC FETCH_INST diff --git a/runtime/interpreter/mterp/x86_64/op_return.S b/runtime/interpreter/mterp/x86_64/op_return.S index 07e0e5357..8cb6cbaee 100644 --- a/runtime/interpreter/mterp/x86_64/op_return.S +++ b/runtime/interpreter/mterp/x86_64/op_return.S @@ -7,7 +7,7 @@ .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movq rSELF, OUT_ARG0 - testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) + testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) jz 1f call SYMBOL(MterpSuspendCheck) 1: diff --git a/runtime/interpreter/mterp/x86_64/op_return_void.S b/runtime/interpreter/mterp/x86_64/op_return_void.S index 6a12df318..ba68e7e44 100644 --- a/runtime/interpreter/mterp/x86_64/op_return_void.S +++ b/runtime/interpreter/mterp/x86_64/op_return_void.S @@ -1,7 +1,7 @@ .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movq rSELF, OUT_ARG0 - testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) + testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) jz 1f call SYMBOL(MterpSuspendCheck) 1: diff --git a/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S index 822b2e85e..6799da1db 100644 --- a/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S +++ b/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S @@ -1,5 +1,5 @@ movq rSELF, OUT_ARG0 - testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) + testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) jz 1f call SYMBOL(MterpSuspendCheck) 1: diff --git a/runtime/interpreter/mterp/x86_64/op_return_wide.S b/runtime/interpreter/mterp/x86_64/op_return_wide.S index 288eb96f8..d6d6d1bf5 100644 --- a/runtime/interpreter/mterp/x86_64/op_return_wide.S +++ b/runtime/interpreter/mterp/x86_64/op_return_wide.S @@ -5,7 +5,7 @@ .extern MterpThreadFenceForConstructor call SYMBOL(MterpThreadFenceForConstructor) movq rSELF, OUT_ARG0 - testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) + testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0) jz 1f call SYMBOL(MterpSuspendCheck) 1: diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc index 8e76aeb7c..caf705a9c 100644 --- a/runtime/java_vm_ext.cc +++ b/runtime/java_vm_ext.cc @@ -562,6 +562,9 @@ jweak JavaVMExt::AddWeakGlobalRef(Thread* self, ObjPtr obj) { } MutexLock mu(self, *Locks::jni_weak_globals_lock_); while (UNLIKELY(!MayAccessWeakGlobals(self))) { + // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the + // presence of threads blocking for weak ref access. + self->CheckEmptyCheckpoint(); weak_globals_add_condition_.WaitHoldingLocks(self); } IndirectRef ref = weak_globals_.Add(kIRTFirstSegment, obj); @@ -648,7 +651,6 @@ void JavaVMExt::AllowNewWeakGlobals() { } void JavaVMExt::BroadcastForNewWeakGlobals() { - CHECK(kUseReadBarrier); Thread* self = Thread::Current(); MutexLock mu(self, *Locks::jni_weak_globals_lock_); weak_globals_add_condition_.Broadcast(self); @@ -694,6 +696,9 @@ ObjPtr JavaVMExt::DecodeWeakGlobalLocked(Thread* self, IndirectR Locks::jni_weak_globals_lock_->AssertHeld(self); } while (UNLIKELY(!MayAccessWeakGlobals(self))) { + // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the + // presence of threads blocking for weak ref access. + self->CheckEmptyCheckpoint(); weak_globals_add_condition_.WaitHoldingLocks(self); } return weak_globals_.Get(ref); @@ -716,6 +721,9 @@ bool JavaVMExt::IsWeakGlobalCleared(Thread* self, IndirectRef ref) { DCHECK_EQ(IndirectReferenceTable::GetIndirectRefKind(ref), kWeakGlobal); MutexLock mu(self, *Locks::jni_weak_globals_lock_); while (UNLIKELY(!MayAccessWeakGlobals(self))) { + // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the + // presence of threads blocking for weak ref access. + self->CheckEmptyCheckpoint(); weak_globals_add_condition_.WaitHoldingLocks(self); } // When just checking a weak ref has been cleared, avoid triggering the read barrier in decode diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h index 9e37f1178..7374920f2 100644 --- a/runtime/java_vm_ext.h +++ b/runtime/java_vm_ext.h @@ -136,7 +136,6 @@ class JavaVMExt : public JavaVM { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jni_weak_globals_lock_); void BroadcastForNewWeakGlobals() - REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jni_weak_globals_lock_); jobject AddGlobalRef(Thread* self, ObjPtr obj) diff --git a/runtime/monitor.cc b/runtime/monitor.cc index eb74fcf7f..5a7165a16 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -1330,7 +1330,6 @@ void MonitorList::AllowNewMonitors() { } void MonitorList::BroadcastForNewMonitors() { - CHECK(kUseReadBarrier); Thread* self = Thread::Current(); MutexLock mu(self, monitor_list_lock_); monitor_add_condition_.Broadcast(self); @@ -1341,6 +1340,9 @@ void MonitorList::Add(Monitor* m) { MutexLock mu(self, monitor_list_lock_); while (UNLIKELY((!kUseReadBarrier && !allow_new_monitors_) || (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { + // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the + // presence of threads blocking for weak ref access. + self->CheckEmptyCheckpoint(); monitor_add_condition_.WaitHoldingLocks(self); } list_.push_front(m); diff --git a/runtime/runtime.cc b/runtime/runtime.cc index d645c5a39..530b1ab8b 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -1756,10 +1756,10 @@ void Runtime::AllowNewSystemWeaks() { } } -void Runtime::BroadcastForNewSystemWeaks() { +void Runtime::BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint) { // This is used for the read barrier case that uses the thread-local - // Thread::GetWeakRefAccessEnabled() flag. - CHECK(kUseReadBarrier); + // Thread::GetWeakRefAccessEnabled() flag and the checkpoint while weak ref access is disabled + // (see ThreadList::RunCheckpoint). monitor_list_->BroadcastForNewMonitors(); intern_table_->BroadcastForNewInterns(); java_vm_->BroadcastForNewWeakGlobals(); @@ -1767,7 +1767,7 @@ void Runtime::BroadcastForNewSystemWeaks() { // All other generic system-weak holders. for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) { - holder->Broadcast(); + holder->Broadcast(broadcast_for_checkpoint); } } diff --git a/runtime/runtime.h b/runtime/runtime.h index 043ff5d73..02a6bb697 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -322,7 +322,10 @@ class Runtime { void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_); void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_); - void BroadcastForNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_); + // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to + // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak + // access is reenabled. + void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false); // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If // clean_dirty is true then dirty roots will be marked as non-dirty after visiting. diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h index 5fa935320..c92305f37 100644 --- a/runtime/thread-inl.h +++ b/runtime/thread-inl.h @@ -72,6 +72,19 @@ inline void Thread::CheckSuspend() { RunCheckpointFunction(); } else if (ReadFlag(kSuspendRequest)) { FullSuspendCheck(); + } else if (ReadFlag(kEmptyCheckpointRequest)) { + RunEmptyCheckpoint(); + } else { + break; + } + } +} + +inline void Thread::CheckEmptyCheckpoint() { + DCHECK_EQ(Thread::Current(), this); + for (;;) { + if (ReadFlag(kEmptyCheckpointRequest)) { + RunEmptyCheckpoint(); } else { break; } @@ -145,8 +158,13 @@ inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state RunCheckpointFunction(); continue; } + if (UNLIKELY((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest) != 0)) { + RunEmptyCheckpoint(); + continue; + } // Change the state but keep the current flags (kCheckpointRequest is clear). DCHECK_EQ((old_state_and_flags.as_struct.flags & kCheckpointRequest), 0); + DCHECK_EQ((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest), 0); new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags; new_state_and_flags.as_struct.state = new_state; @@ -163,7 +181,8 @@ inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state inline void Thread::PassActiveSuspendBarriers() { while (true) { uint16_t current_flags = tls32_.state_and_flags.as_struct.flags; - if (LIKELY((current_flags & (kCheckpointRequest | kActiveSuspendBarrier)) == 0)) { + if (LIKELY((current_flags & + (kCheckpointRequest | kEmptyCheckpointRequest | kActiveSuspendBarrier)) == 0)) { break; } else if ((current_flags & kActiveSuspendBarrier) != 0) { PassActiveSuspendBarriers(this); @@ -211,7 +230,8 @@ inline ThreadState Thread::TransitionFromSuspendedToRunnable() { } } else if ((old_state_and_flags.as_struct.flags & kActiveSuspendBarrier) != 0) { PassActiveSuspendBarriers(this); - } else if ((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0) { + } else if ((old_state_and_flags.as_struct.flags & + (kCheckpointRequest | kEmptyCheckpointRequest)) != 0) { // Impossible LOG(FATAL) << "Transitioning to runnable with checkpoint flag, " << " flags=" << old_state_and_flags.as_struct.flags diff --git a/runtime/thread.cc b/runtime/thread.cc index 3f7d0868b..2434ee258 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1148,6 +1148,12 @@ void Thread::RunCheckpointFunction() { } while (!done); } +void Thread::RunEmptyCheckpoint() { + DCHECK_EQ(Thread::Current(), this); + AtomicClearFlag(kEmptyCheckpointRequest); + Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this); +} + bool Thread::RequestCheckpoint(Closure* function) { union StateAndFlags old_state_and_flags; old_state_and_flags.as_int = tls32_.state_and_flags.as_int; @@ -1175,6 +1181,28 @@ bool Thread::RequestCheckpoint(Closure* function) { return success; } +bool Thread::RequestEmptyCheckpoint() { + union StateAndFlags old_state_and_flags; + old_state_and_flags.as_int = tls32_.state_and_flags.as_int; + if (old_state_and_flags.as_struct.state != kRunnable) { + // If it's not runnable, we don't need to do anything because it won't be in the middle of a + // heap access (eg. the read barrier). + return false; + } + + // We must be runnable to request a checkpoint. + DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable); + union StateAndFlags new_state_and_flags; + new_state_and_flags.as_int = old_state_and_flags.as_int; + new_state_and_flags.as_struct.flags |= kEmptyCheckpointRequest; + bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( + old_state_and_flags.as_int, new_state_and_flags.as_int); + if (success) { + TriggerSuspend(); + } + return success; +} + class BarrierClosure : public Closure { public: explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {} @@ -1833,7 +1861,8 @@ Thread::~Thread() { tlsPtr_.jni_env = nullptr; } CHECK_NE(GetState(), kRunnable); - CHECK_NE(ReadFlag(kCheckpointRequest), true); + CHECK(!ReadFlag(kCheckpointRequest)); + CHECK(!ReadFlag(kEmptyCheckpointRequest)); CHECK(tlsPtr_.checkpoint_function == nullptr); CHECK_EQ(checkpoint_overflow_.size(), 0u); CHECK(tlsPtr_.flip_function == nullptr); diff --git a/runtime/thread.h b/runtime/thread.h index 75b5b123d..12b80a907 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -105,7 +105,8 @@ enum ThreadFlag { kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the // safepoint handler. kCheckpointRequest = 2, // Request that the thread do some checkpoint work and then continue. - kActiveSuspendBarrier = 4 // Register that at least 1 suspend barrier needs to be passed. + kEmptyCheckpointRequest = 4, // Request that the thread do empty checkpoint and then continue. + kActiveSuspendBarrier = 8, // Register that at least 1 suspend barrier needs to be passed. }; enum class StackedShadowFrameType { @@ -171,6 +172,9 @@ class Thread { // Process pending thread suspension request and handle if pending. void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_); + // Process a pending empty checkpoint if pending. + void CheckEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_); + static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, mirror::Object* thread_peer) REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) @@ -239,6 +243,8 @@ class Thread { REQUIRES(Locks::thread_suspend_count_lock_); void RequestSynchronousCheckpoint(Closure* function) REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::thread_list_lock_); + bool RequestEmptyCheckpoint() + REQUIRES(Locks::thread_suspend_count_lock_); void SetFlipFunction(Closure* function); Closure* GetFlipFunction(); @@ -1218,6 +1224,7 @@ class Thread { REQUIRES(Locks::thread_suspend_count_lock_); void RunCheckpointFunction(); + void RunEmptyCheckpoint(); bool PassActiveSuspendBarriers(Thread* self) REQUIRES(!Locks::thread_suspend_count_lock_); diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index eba6666de..53d2d4aff 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -32,6 +32,7 @@ #include "base/timing_logger.h" #include "debugger.h" #include "gc/collector/concurrent_copying.h" +#include "gc/reference_processor.h" #include "jni_internal.h" #include "lock_word.h" #include "monitor.h" @@ -68,7 +69,8 @@ ThreadList::ThreadList() debug_suspend_all_count_(0), unregistering_count_(0), suspend_all_historam_("suspend all histogram", 16, 64), - long_suspend_(false) { + long_suspend_(false), + empty_checkpoint_barrier_(new Barrier(0)) { CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U))); } @@ -373,6 +375,43 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function, Closure* callback return count; } +size_t ThreadList::RunEmptyCheckpoint() { + Thread* self = Thread::Current(); + Locks::mutator_lock_->AssertNotExclusiveHeld(self); + Locks::thread_list_lock_->AssertNotHeld(self); + Locks::thread_suspend_count_lock_->AssertNotHeld(self); + + size_t count = 0; + { + MutexLock mu(self, *Locks::thread_list_lock_); + MutexLock mu2(self, *Locks::thread_suspend_count_lock_); + for (Thread* thread : list_) { + if (thread != self) { + while (true) { + if (thread->RequestEmptyCheckpoint()) { + // This thread will run an empty checkpoint (decrement the empty checkpoint barrier) + // some time in the near future. + ++count; + break; + } + if (thread->GetState() != kRunnable) { + // It's seen suspended, we are done because it must not be in the middle of a mutator + // heap access. + break; + } + } + } + } + } + + // Wake up the threads blocking for weak ref access so that they will respond to the empty + // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state. + Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self); + Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true); + + return count; +} + // Request that a checkpoint function be run on all active (non-suspended) // threads. Returns the number of successful requests. size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) { diff --git a/runtime/thread_list.h b/runtime/thread_list.h index b455e31e4..133d43029 100644 --- a/runtime/thread_list.h +++ b/runtime/thread_list.h @@ -17,6 +17,7 @@ #ifndef ART_RUNTIME_THREAD_LIST_H_ #define ART_RUNTIME_THREAD_LIST_H_ +#include "barrier.h" #include "base/histogram.h" #include "base/mutex.h" #include "base/value_object.h" @@ -100,6 +101,14 @@ class ThreadList { size_t RunCheckpoint(Closure* checkpoint_function, Closure* callback = nullptr) REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); + // Run an empty checkpoint on threads. Wait until threads pass the next suspend point or are + // suspended. This is used to ensure that the threads finish or aren't in the middle of an + // in-flight mutator heap access (eg. a read barrier.) Runnable threads will respond by + // decrementing the empty checkpoint barrier count. This works even when the weak ref access is + // disabled. Only one concurrent use is currently supported. + size_t RunEmptyCheckpoint() + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); + size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function) REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); @@ -158,6 +167,10 @@ class ThreadList { void DumpNativeStacks(std::ostream& os) REQUIRES(!Locks::thread_list_lock_); + Barrier* EmptyCheckpointBarrier() { + return empty_checkpoint_barrier_.get(); + } + private: uint32_t AllocThreadId(Thread* self); void ReleaseThreadId(Thread* self, uint32_t id) REQUIRES(!Locks::allocated_thread_ids_lock_); @@ -203,6 +216,8 @@ class ThreadList { // Whether or not the current thread suspension is long. bool long_suspend_; + std::unique_ptr empty_checkpoint_barrier_; + friend class Thread; DISALLOW_COPY_AND_ASSIGN(ThreadList); diff --git a/tools/cpp-define-generator/constant_thread.def b/tools/cpp-define-generator/constant_thread.def index af5ca21f1..1364b558e 100644 --- a/tools/cpp-define-generator/constant_thread.def +++ b/tools/cpp-define-generator/constant_thread.def @@ -25,5 +25,7 @@ DEFINE_THREAD_CONSTANT(SUSPEND_REQUEST, int32_t, art::kSuspendRequest) DEFINE_THREAD_CONSTANT(CHECKPOINT_REQUEST, int32_t, art::kCheckpointRequest) +DEFINE_THREAD_CONSTANT(EMPTY_CHECKPOINT_REQUEST, int32_t, art::kEmptyCheckpointRequest) +DEFINE_THREAD_CONSTANT(SUSPEND_OR_CHECKPOINT_REQUEST, int32_t, art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest) #undef DEFINE_THREAD_CONSTANT -- 2.11.0