if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
return 1;
}
- if (paused) {
- return heap_->GetParallelGCThreadCount() + 1;
- } else {
- return heap_->GetConcGCThreadCount() + 1;
- }
+ return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1;
}
void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
// Primitive arrays larger than this size are put in the large object space.
static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
+ // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
+ static constexpr bool kDefaultEnableParallelGC = false;
// Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
// since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
}
// Default to number of processors minus one since the main GC thread also does work.
- args.SetIfMissing(M::ParallelGCThreads,
- static_cast<unsigned int>(sysconf(_SC_NPROCESSORS_CONF) - 1u));
+ args.SetIfMissing(M::ParallelGCThreads, gc::Heap::kDefaultEnableParallelGC ?
+ static_cast<unsigned int>(sysconf(_SC_NPROCESSORS_CONF) - 1u) : 0u);
// -Xverbose:
{
RUNTIME_OPTIONS_KEY (MemoryKiB, NonMovingSpaceCapacity, gc::Heap::kDefaultNonMovingSpaceCapacity)
RUNTIME_OPTIONS_KEY (double, HeapTargetUtilization, gc::Heap::kDefaultTargetUtilization)
RUNTIME_OPTIONS_KEY (double, ForegroundHeapGrowthMultiplier, gc::Heap::kDefaultHeapGrowthMultiplier)
-RUNTIME_OPTIONS_KEY (unsigned int, ParallelGCThreads, 1u)
+RUNTIME_OPTIONS_KEY (unsigned int, ParallelGCThreads, 0u)
RUNTIME_OPTIONS_KEY (unsigned int, ConcGCThreads)
RUNTIME_OPTIONS_KEY (Memory<1>, StackSize) // -Xss
RUNTIME_OPTIONS_KEY (unsigned int, MaxSpinsBeforeThinLockInflation,Monitor::kDefaultMaxSpinsBeforeThinLockInflation)