}
void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
- CHECK(kUseReadBarrier);
new_record_condition_.Broadcast(Thread::Current());
}
// Wait for GC's sweeping to complete and allow new records
while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
new_record_condition_.WaitHoldingLocks(self);
}
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
void BroadcastForNewAllocationRecords()
- REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
// TODO: Is there a better way to hide the entries_'s type?
live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
}
-class EmptyCheckpoint : public Closure {
- public:
- explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
- : concurrent_copying_(concurrent_copying) {
- }
-
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
- // Note: self is not necessarily equal to thread since thread may be suspended.
- Thread* self = Thread::Current();
- CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
- << thread->GetState() << " thread " << thread << " self " << self;
- // If thread is a running mutator, then act on behalf of the garbage collector.
- // See the code in ThreadList::RunCheckpoint.
- concurrent_copying_->GetBarrier().Pass(self);
- }
-
- private:
- ConcurrentCopying* const concurrent_copying_;
-};
-
// Used to visit objects in the immune spaces.
inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
DCHECK(obj != nullptr);
void ConcurrentCopying::IssueEmptyCheckpoint() {
Thread* self = Thread::Current();
- EmptyCheckpoint check_point(this);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
- gc_barrier_->Init(self, 0);
- size_t barrier_count = thread_list->RunCheckpoint(&check_point);
+ Barrier* barrier = thread_list->EmptyCheckpointBarrier();
+ barrier->Init(self, 0);
+ size_t barrier_count = thread_list->RunEmptyCheckpoint();
// If there are no threads to wait which implys that all the checkpoint functions are finished,
// then no need to release the mutator lock.
if (barrier_count == 0) {
Locks::mutator_lock_->SharedUnlock(self);
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
- gc_barrier_->Increment(self, barrier_count);
+ barrier->Increment(self, barrier_count);
}
Locks::mutator_lock_->SharedLock(self);
}
}
gc_mark_stack_->Reset();
} else if (mark_stack_mode == kMarkStackModeShared) {
+ // Do an empty checkpoint to avoid a race with a mutator preempted in the middle of a read
+ // barrier but before pushing onto the mark stack. b/32508093. Note the weak ref access is
+ // disabled at this point.
+ IssueEmptyCheckpoint();
// Process the shared GC mark stack with a lock.
{
MutexLock mu(self, mark_stack_lock_);
}
void Heap::BroadcastForNewAllocationRecords() const {
- CHECK(kUseReadBarrier);
// Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
// be set to false while some threads are waiting for system weak access in
// AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
REQUIRES(!Locks::alloc_tracker_lock_);
void BroadcastForNewAllocationRecords() const
- REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
}
void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
- CHECK(kUseReadBarrier);
MutexLock mu(self, *Locks::reference_processor_lock_);
condition_.Broadcast(self);
}
}
}
}
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
condition_.WaitHoldingLocks(self);
}
return reference->GetReferent();
// Wait untul we are done processing reference.
while ((!kUseReadBarrier && SlowPathEnabled()) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
condition_.WaitHoldingLocks(self);
}
// At this point, since the sentinel of the reference is live, it is guaranteed to not be
virtual void Allow() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void Disallow() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual void Broadcast() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+ // See Runtime::BroadcastForNewSystemWeaks for the broadcast_for_checkpoint definition.
+ virtual void Broadcast(bool broadcast_for_checkpoint) = 0;
virtual void Sweep(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
allow_new_system_weak_ = false;
}
- void Broadcast() OVERRIDE
- REQUIRES_SHARED(Locks::mutator_lock_)
+ void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) OVERRIDE
REQUIRES(!allow_disallow_lock_) {
- CHECK(kUseReadBarrier);
MutexLock mu(Thread::Current(), allow_disallow_lock_);
new_weak_condition_.Broadcast(Thread::Current());
}
protected:
- void Wait(Thread* self) REQUIRES_SHARED(allow_disallow_lock_) {
+ void Wait(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_) {
// Wait for GC's sweeping to complete and allow new records
while (UNLIKELY((!kUseReadBarrier && !allow_new_system_weak_) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
new_weak_condition_.WaitHoldingLocks(self);
}
}
disallow_count_++;
}
- void Broadcast() OVERRIDE
- REQUIRES_SHARED(Locks::mutator_lock_)
+ void Broadcast(bool broadcast_for_checkpoint) OVERRIDE
REQUIRES(!allow_disallow_lock_) {
- SystemWeakHolder::Broadcast();
+ SystemWeakHolder::Broadcast(broadcast_for_checkpoint);
- allow_count_++;
+ if (!broadcast_for_checkpoint) {
+ // Don't count the broadcasts for running checkpoints.
+ allow_count_++;
+ }
}
void Sweep(IsMarkedVisitor* visitor) OVERRIDE
DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_SUSPEND_REQUEST), (static_cast<int32_t>((art::kSuspendRequest))))
#define THREAD_CHECKPOINT_REQUEST 2
DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kCheckpointRequest))))
+#define THREAD_EMPTY_CHECKPOINT_REQUEST 4
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_EMPTY_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kEmptyCheckpointRequest))))
+#define THREAD_SUSPEND_OR_CHECKPOINT_REQUEST 7
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest))))
#define JIT_CHECK_OSR (-1)
DEFINE_CHECK_EQ(static_cast<int16_t>(JIT_CHECK_OSR), (static_cast<int16_t>((art::jit::kJitCheckForOSR))))
#define JIT_HOTNESS_DISABLE (-2)
}
void InternTable::BroadcastForNewInterns() {
- CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::intern_table_lock_);
weak_intern_condition_.Broadcast(self);
void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
- void BroadcastForNewInterns() REQUIRES_SHARED(Locks::mutator_lock_);
+ void BroadcastForNewInterns();
// Adds all of the resolved image strings from the image spaces into the intern table. The
// advantage of doing this is preventing expensive DexFile::FindStringId calls. Sets
REFRESH_IBASE
add r2, rINST, rINST @ r2<- byte offset
FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
- ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
bne .L_suspend_request_pending
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
bl MterpThreadFenceForConstructor
ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
mov r0, rSELF
- ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
blne MterpSuspendCheck @ (self)
mov r2, rINST, lsr #8 @ r2<- AA
GET_VREG r0, r2 @ r0<- vAA
bl MterpThreadFenceForConstructor
ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
mov r0, rSELF
- ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
blne MterpSuspendCheck @ (self)
mov r0, #0
mov r1, #0
ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
mov r0, rSELF
- ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
blne MterpSuspendCheck @ (self)
mov r0, #0
mov r1, #0
bl MterpThreadFenceForConstructor
ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
mov r0, rSELF
- ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
blne MterpSuspendCheck @ (self)
mov r2, rINST, lsr #8 @ r2<- AA
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA]
add w2, wINST, wINST // w2<- byte offset
FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
REFRESH_IBASE
- ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne .L_suspend_request_pending
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
*/
MterpCheckSuspendAndContinue:
ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne check1
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
str x0, [x2]
mov x0, xSELF
- ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.eq check2
bl MterpSuspendCheck // (self)
check2:
bl MterpThreadFenceForConstructor
ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
mov x0, xSELF
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne .L${opcode}_check
.L${opcode}_return:
lsr w2, wINST, #8 // r2<- AA
bl MterpThreadFenceForConstructor
ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
mov x0, xSELF
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne .L${opcode}_check
.L${opcode}_return:
mov x0, #0
ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
mov x0, xSELF
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne .L${opcode}_check
.L${opcode}_return:
mov x0, #0
bl MterpThreadFenceForConstructor
ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
mov x0, xSELF
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne .L${opcode}_check
.L${opcode}_return:
lsr w2, wINST, #8 // w2<- AA
REFRESH_IBASE()
addu a2, rINST, rINST # a2<- byte offset
FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
bnez ra, .L_suspend_request_pending
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
JAL(MterpThreadFenceForConstructor)
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqz ra, 1f
JAL(MterpSuspendCheck) # (self)
1:
JAL(MterpThreadFenceForConstructor)
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqz ra, 1f
JAL(MterpSuspendCheck) # (self)
1:
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqz ra, 1f
JAL(MterpSuspendCheck) # (self)
1:
JAL(MterpThreadFenceForConstructor)
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqz ra, 1f
JAL(MterpSuspendCheck) # (self)
1:
REFRESH_IBASE
daddu a2, rINST, rINST # a2<- byte offset
FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
bnezc ra, .L_suspend_request_pending
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
lw ra, THREAD_FLAGS_OFFSET(rSELF)
sd a0, 0(a2)
move a0, rSELF
- and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqzc ra, check2
jal MterpSuspendCheck # (self)
check2:
jal MterpThreadFenceForConstructor
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqzc ra, 1f
jal MterpSuspendCheck # (self)
1:
jal MterpThreadFenceForConstructor
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqzc ra, 1f
jal MterpSuspendCheck # (self)
1:
.extern MterpSuspendCheck
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqzc ra, 1f
jal MterpSuspendCheck # (self)
1:
jal MterpThreadFenceForConstructor
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqzc ra, 1f
jal MterpSuspendCheck # (self)
1:
LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data);
} else if (flags & kSuspendRequest) {
LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data);
+ } else if (flags & kEmptyCheckpointRequest) {
+ LOG(INFO) << "Empty checkpoint fallback: " << inst->Opcode(inst_data);
}
}
bl MterpThreadFenceForConstructor
ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
mov r0, rSELF
- ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
blne MterpSuspendCheck @ (self)
mov r0, #0
mov r1, #0
bl MterpThreadFenceForConstructor
ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
mov r0, rSELF
- ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
blne MterpSuspendCheck @ (self)
mov r2, rINST, lsr #8 @ r2<- AA
GET_VREG r0, r2 @ r0<- vAA
bl MterpThreadFenceForConstructor
ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
mov r0, rSELF
- ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
blne MterpSuspendCheck @ (self)
mov r2, rINST, lsr #8 @ r2<- AA
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA]
bl MterpThreadFenceForConstructor
ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
mov r0, rSELF
- ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
blne MterpSuspendCheck @ (self)
mov r2, rINST, lsr #8 @ r2<- AA
GET_VREG r0, r2 @ r0<- vAA
/* File: arm/op_return_void_no_barrier.S */
ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
mov r0, rSELF
- ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
blne MterpSuspendCheck @ (self)
mov r0, #0
mov r1, #0
REFRESH_IBASE
add r2, rINST, rINST @ r2<- byte offset
FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
- ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
bne .L_suspend_request_pending
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
bl MterpThreadFenceForConstructor
ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
mov x0, xSELF
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne .Lop_return_void_check
.Lop_return_void_return:
mov x0, #0
bl MterpThreadFenceForConstructor
ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
mov x0, xSELF
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne .Lop_return_check
.Lop_return_return:
lsr w2, wINST, #8 // r2<- AA
bl MterpThreadFenceForConstructor
ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
mov x0, xSELF
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne .Lop_return_wide_check
.Lop_return_wide_return:
lsr w2, wINST, #8 // w2<- AA
bl MterpThreadFenceForConstructor
ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
mov x0, xSELF
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne .Lop_return_object_check
.Lop_return_object_return:
lsr w2, wINST, #8 // r2<- AA
/* File: arm64/op_return_void_no_barrier.S */
ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
mov x0, xSELF
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne .Lop_return_void_no_barrier_check
.Lop_return_void_no_barrier_return:
mov x0, #0
add w2, wINST, wINST // w2<- byte offset
FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
REFRESH_IBASE
- ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne .L_suspend_request_pending
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
*/
MterpCheckSuspendAndContinue:
ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.ne check1
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
str x0, [x2]
mov x0, xSELF
- ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
b.eq check2
bl MterpSuspendCheck // (self)
check2:
JAL(MterpThreadFenceForConstructor)
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqz ra, 1f
JAL(MterpSuspendCheck) # (self)
1:
JAL(MterpThreadFenceForConstructor)
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqz ra, 1f
JAL(MterpSuspendCheck) # (self)
1:
JAL(MterpThreadFenceForConstructor)
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqz ra, 1f
JAL(MterpSuspendCheck) # (self)
1:
JAL(MterpThreadFenceForConstructor)
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqz ra, 1f
JAL(MterpSuspendCheck) # (self)
1:
/* File: mips/op_return_void_no_barrier.S */
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqz ra, 1f
JAL(MterpSuspendCheck) # (self)
1:
REFRESH_IBASE()
addu a2, rINST, rINST # a2<- byte offset
FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
bnez ra, .L_suspend_request_pending
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
jal MterpThreadFenceForConstructor
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqzc ra, 1f
jal MterpSuspendCheck # (self)
1:
jal MterpThreadFenceForConstructor
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqzc ra, 1f
jal MterpSuspendCheck # (self)
1:
jal MterpThreadFenceForConstructor
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqzc ra, 1f
jal MterpSuspendCheck # (self)
1:
jal MterpThreadFenceForConstructor
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqzc ra, 1f
jal MterpSuspendCheck # (self)
1:
.extern MterpSuspendCheck
lw ra, THREAD_FLAGS_OFFSET(rSELF)
move a0, rSELF
- and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqzc ra, 1f
jal MterpSuspendCheck # (self)
1:
REFRESH_IBASE
daddu a2, rINST, rINST # a2<- byte offset
FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
- and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
bnezc ra, .L_suspend_request_pending
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
lw ra, THREAD_FLAGS_OFFSET(rSELF)
sd a0, 0(a2)
move a0, rSELF
- and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
beqzc ra, check2
jal MterpSuspendCheck # (self)
check2:
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
call SYMBOL(MterpSuspendCheck)
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
call SYMBOL(MterpSuspendCheck)
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
call SYMBOL(MterpSuspendCheck)
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
call SYMBOL(MterpSuspendCheck)
.L_op_return_void_no_barrier: /* 0x73 */
/* File: x86/op_return_void_no_barrier.S */
movl rSELF, %eax
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
call SYMBOL(MterpSuspendCheck)
je .L_add_batch # counted down to zero - report
.L_resume_backward_branch:
movl rSELF, %eax
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
leal (rPC, rINST, 2), rPC
FETCH_INST
jnz .L_suspend_request_pending
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
jz 1f
call SYMBOL(MterpSuspendCheck)
1:
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
jz 1f
call SYMBOL(MterpSuspendCheck)
1:
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
jz 1f
call SYMBOL(MterpSuspendCheck)
1:
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
jz 1f
call SYMBOL(MterpSuspendCheck)
1:
.L_op_return_void_no_barrier: /* 0x73 */
/* File: x86_64/op_return_void_no_barrier.S */
movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
jz 1f
call SYMBOL(MterpSuspendCheck)
1:
je .L_add_batch # counted down to zero - report
.L_resume_backward_branch:
movq rSELF, %rax
- testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
+ testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
REFRESH_IBASE
leaq (rPC, rINSTq, 2), rPC
FETCH_INST
je .L_add_batch # counted down to zero - report
.L_resume_backward_branch:
movl rSELF, %eax
- testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
leal (rPC, rINST, 2), rPC
FETCH_INST
jnz .L_suspend_request_pending
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
- testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
call SYMBOL(MterpSuspendCheck)
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
- testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
call SYMBOL(MterpSuspendCheck)
movl rSELF, %eax
- testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
call SYMBOL(MterpSuspendCheck)
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
- testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
call SYMBOL(MterpSuspendCheck)
je .L_add_batch # counted down to zero - report
.L_resume_backward_branch:
movq rSELF, %rax
- testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
REFRESH_IBASE
leaq (rPC, rINSTq, 2), rPC
FETCH_INST
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movq rSELF, OUT_ARG0
- testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
jz 1f
call SYMBOL(MterpSuspendCheck)
1:
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movq rSELF, OUT_ARG0
- testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
jz 1f
call SYMBOL(MterpSuspendCheck)
1:
movq rSELF, OUT_ARG0
- testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
jz 1f
call SYMBOL(MterpSuspendCheck)
1:
.extern MterpThreadFenceForConstructor
call SYMBOL(MterpThreadFenceForConstructor)
movq rSELF, OUT_ARG0
- testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
jz 1f
call SYMBOL(MterpSuspendCheck)
1:
}
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
weak_globals_add_condition_.WaitHoldingLocks(self);
}
IndirectRef ref = weak_globals_.Add(kIRTFirstSegment, obj);
}
void JavaVMExt::BroadcastForNewWeakGlobals() {
- CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
weak_globals_add_condition_.Broadcast(self);
Locks::jni_weak_globals_lock_->AssertHeld(self);
}
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
weak_globals_add_condition_.WaitHoldingLocks(self);
}
return weak_globals_.Get(ref);
DCHECK_EQ(IndirectReferenceTable::GetIndirectRefKind(ref), kWeakGlobal);
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
weak_globals_add_condition_.WaitHoldingLocks(self);
}
// When just checking a weak ref has been cleared, avoid triggering the read barrier in decode
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jni_weak_globals_lock_);
void BroadcastForNewWeakGlobals()
- REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jni_weak_globals_lock_);
jobject AddGlobalRef(Thread* self, ObjPtr<mirror::Object> obj)
}
void MonitorList::BroadcastForNewMonitors() {
- CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, monitor_list_lock_);
monitor_add_condition_.Broadcast(self);
MutexLock mu(self, monitor_list_lock_);
while (UNLIKELY((!kUseReadBarrier && !allow_new_monitors_) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
monitor_add_condition_.WaitHoldingLocks(self);
}
list_.push_front(m);
}
}
-void Runtime::BroadcastForNewSystemWeaks() {
+void Runtime::BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint) {
// This is used for the read barrier case that uses the thread-local
- // Thread::GetWeakRefAccessEnabled() flag.
- CHECK(kUseReadBarrier);
+ // Thread::GetWeakRefAccessEnabled() flag and the checkpoint while weak ref access is disabled
+ // (see ThreadList::RunCheckpoint).
monitor_list_->BroadcastForNewMonitors();
intern_table_->BroadcastForNewInterns();
java_vm_->BroadcastForNewWeakGlobals();
// All other generic system-weak holders.
for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
- holder->Broadcast();
+ holder->Broadcast(broadcast_for_checkpoint);
}
}
void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
- void BroadcastForNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
+ // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
+ // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
+ // access is reenabled.
+ void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
// Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
// clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
RunCheckpointFunction();
} else if (ReadFlag(kSuspendRequest)) {
FullSuspendCheck();
+ } else if (ReadFlag(kEmptyCheckpointRequest)) {
+ RunEmptyCheckpoint();
+ } else {
+ break;
+ }
+ }
+}
+
+inline void Thread::CheckEmptyCheckpoint() {
+ DCHECK_EQ(Thread::Current(), this);
+ for (;;) {
+ if (ReadFlag(kEmptyCheckpointRequest)) {
+ RunEmptyCheckpoint();
} else {
break;
}
RunCheckpointFunction();
continue;
}
+ if (UNLIKELY((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest) != 0)) {
+ RunEmptyCheckpoint();
+ continue;
+ }
// Change the state but keep the current flags (kCheckpointRequest is clear).
DCHECK_EQ((old_state_and_flags.as_struct.flags & kCheckpointRequest), 0);
+ DCHECK_EQ((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest), 0);
new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags;
new_state_and_flags.as_struct.state = new_state;
inline void Thread::PassActiveSuspendBarriers() {
while (true) {
uint16_t current_flags = tls32_.state_and_flags.as_struct.flags;
- if (LIKELY((current_flags & (kCheckpointRequest | kActiveSuspendBarrier)) == 0)) {
+ if (LIKELY((current_flags &
+ (kCheckpointRequest | kEmptyCheckpointRequest | kActiveSuspendBarrier)) == 0)) {
break;
} else if ((current_flags & kActiveSuspendBarrier) != 0) {
PassActiveSuspendBarriers(this);
}
} else if ((old_state_and_flags.as_struct.flags & kActiveSuspendBarrier) != 0) {
PassActiveSuspendBarriers(this);
- } else if ((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0) {
+ } else if ((old_state_and_flags.as_struct.flags &
+ (kCheckpointRequest | kEmptyCheckpointRequest)) != 0) {
// Impossible
LOG(FATAL) << "Transitioning to runnable with checkpoint flag, "
<< " flags=" << old_state_and_flags.as_struct.flags
} while (!done);
}
+void Thread::RunEmptyCheckpoint() {
+ DCHECK_EQ(Thread::Current(), this);
+ AtomicClearFlag(kEmptyCheckpointRequest);
+ Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this);
+}
+
bool Thread::RequestCheckpoint(Closure* function) {
union StateAndFlags old_state_and_flags;
old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
return success;
}
+bool Thread::RequestEmptyCheckpoint() {
+ union StateAndFlags old_state_and_flags;
+ old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
+ if (old_state_and_flags.as_struct.state != kRunnable) {
+ // If it's not runnable, we don't need to do anything because it won't be in the middle of a
+ // heap access (eg. the read barrier).
+ return false;
+ }
+
+ // We must be runnable to request a checkpoint.
+ DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
+ union StateAndFlags new_state_and_flags;
+ new_state_and_flags.as_int = old_state_and_flags.as_int;
+ new_state_and_flags.as_struct.flags |= kEmptyCheckpointRequest;
+ bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
+ old_state_and_flags.as_int, new_state_and_flags.as_int);
+ if (success) {
+ TriggerSuspend();
+ }
+ return success;
+}
+
class BarrierClosure : public Closure {
public:
explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {}
tlsPtr_.jni_env = nullptr;
}
CHECK_NE(GetState(), kRunnable);
- CHECK_NE(ReadFlag(kCheckpointRequest), true);
+ CHECK(!ReadFlag(kCheckpointRequest));
+ CHECK(!ReadFlag(kEmptyCheckpointRequest));
CHECK(tlsPtr_.checkpoint_function == nullptr);
CHECK_EQ(checkpoint_overflow_.size(), 0u);
CHECK(tlsPtr_.flip_function == nullptr);
kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the
// safepoint handler.
kCheckpointRequest = 2, // Request that the thread do some checkpoint work and then continue.
- kActiveSuspendBarrier = 4 // Register that at least 1 suspend barrier needs to be passed.
+ kEmptyCheckpointRequest = 4, // Request that the thread do empty checkpoint and then continue.
+ kActiveSuspendBarrier = 8, // Register that at least 1 suspend barrier needs to be passed.
};
enum class StackedShadowFrameType {
// Process pending thread suspension request and handle if pending.
void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
+ // Process a pending empty checkpoint if pending.
+ void CheckEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
+
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
mirror::Object* thread_peer)
REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
REQUIRES(Locks::thread_suspend_count_lock_);
void RequestSynchronousCheckpoint(Closure* function)
REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::thread_list_lock_);
+ bool RequestEmptyCheckpoint()
+ REQUIRES(Locks::thread_suspend_count_lock_);
void SetFlipFunction(Closure* function);
Closure* GetFlipFunction();
REQUIRES(Locks::thread_suspend_count_lock_);
void RunCheckpointFunction();
+ void RunEmptyCheckpoint();
bool PassActiveSuspendBarriers(Thread* self)
REQUIRES(!Locks::thread_suspend_count_lock_);
#include "base/timing_logger.h"
#include "debugger.h"
#include "gc/collector/concurrent_copying.h"
+#include "gc/reference_processor.h"
#include "jni_internal.h"
#include "lock_word.h"
#include "monitor.h"
debug_suspend_all_count_(0),
unregistering_count_(0),
suspend_all_historam_("suspend all histogram", 16, 64),
- long_suspend_(false) {
+ long_suspend_(false),
+ empty_checkpoint_barrier_(new Barrier(0)) {
CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
}
return count;
}
+size_t ThreadList::RunEmptyCheckpoint() {
+ Thread* self = Thread::Current();
+ Locks::mutator_lock_->AssertNotExclusiveHeld(self);
+ Locks::thread_list_lock_->AssertNotHeld(self);
+ Locks::thread_suspend_count_lock_->AssertNotHeld(self);
+
+ size_t count = 0;
+ {
+ MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+ for (Thread* thread : list_) {
+ if (thread != self) {
+ while (true) {
+ if (thread->RequestEmptyCheckpoint()) {
+ // This thread will run an empty checkpoint (decrement the empty checkpoint barrier)
+ // some time in the near future.
+ ++count;
+ break;
+ }
+ if (thread->GetState() != kRunnable) {
+ // It's seen suspended, we are done because it must not be in the middle of a mutator
+ // heap access.
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Wake up the threads blocking for weak ref access so that they will respond to the empty
+ // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
+ Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
+ Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true);
+
+ return count;
+}
+
// Request that a checkpoint function be run on all active (non-suspended)
// threads. Returns the number of successful requests.
size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) {
#ifndef ART_RUNTIME_THREAD_LIST_H_
#define ART_RUNTIME_THREAD_LIST_H_
+#include "barrier.h"
#include "base/histogram.h"
#include "base/mutex.h"
#include "base/value_object.h"
size_t RunCheckpoint(Closure* checkpoint_function, Closure* callback = nullptr)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+ // Run an empty checkpoint on threads. Wait until threads pass the next suspend point or are
+ // suspended. This is used to ensure that the threads finish or aren't in the middle of an
+ // in-flight mutator heap access (eg. a read barrier.) Runnable threads will respond by
+ // decrementing the empty checkpoint barrier count. This works even when the weak ref access is
+ // disabled. Only one concurrent use is currently supported.
+ size_t RunEmptyCheckpoint()
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+
size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
void DumpNativeStacks(std::ostream& os)
REQUIRES(!Locks::thread_list_lock_);
+ Barrier* EmptyCheckpointBarrier() {
+ return empty_checkpoint_barrier_.get();
+ }
+
private:
uint32_t AllocThreadId(Thread* self);
void ReleaseThreadId(Thread* self, uint32_t id) REQUIRES(!Locks::allocated_thread_ids_lock_);
// Whether or not the current thread suspension is long.
bool long_suspend_;
+ std::unique_ptr<Barrier> empty_checkpoint_barrier_;
+
friend class Thread;
DISALLOW_COPY_AND_ASSIGN(ThreadList);
DEFINE_THREAD_CONSTANT(SUSPEND_REQUEST, int32_t, art::kSuspendRequest)
DEFINE_THREAD_CONSTANT(CHECKPOINT_REQUEST, int32_t, art::kCheckpointRequest)
+DEFINE_THREAD_CONSTANT(EMPTY_CHECKPOINT_REQUEST, int32_t, art::kEmptyCheckpointRequest)
+DEFINE_THREAD_CONSTANT(SUSPEND_OR_CHECKPOINT_REQUEST, int32_t, art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest)
#undef DEFINE_THREAD_CONSTANT