2 * Copyright (C) 2011 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include <unwind.h> // For GC verification.
24 #include "art_field-inl.h"
25 #include "base/allocator.h"
26 #include "base/arena_allocator.h"
27 #include "base/dumpable.h"
28 #include "base/histogram-inl.h"
29 #include "base/stl_util.h"
30 #include "base/systrace.h"
31 #include "base/time_utils.h"
32 #include "common_throws.h"
33 #include "cutils/sched_policy.h"
35 #include "dex_file-inl.h"
36 #include "gc/accounting/atomic_stack.h"
37 #include "gc/accounting/card_table-inl.h"
38 #include "gc/accounting/heap_bitmap-inl.h"
39 #include "gc/accounting/mod_union_table-inl.h"
40 #include "gc/accounting/remembered_set.h"
41 #include "gc/accounting/space_bitmap-inl.h"
42 #include "gc/collector/concurrent_copying.h"
43 #include "gc/collector/mark_compact.h"
44 #include "gc/collector/mark_sweep.h"
45 #include "gc/collector/partial_mark_sweep.h"
46 #include "gc/collector/semi_space.h"
47 #include "gc/collector/sticky_mark_sweep.h"
48 #include "gc/reference_processor.h"
49 #include "gc/space/bump_pointer_space.h"
50 #include "gc/space/dlmalloc_space-inl.h"
51 #include "gc/space/image_space.h"
52 #include "gc/space/large_object_space.h"
53 #include "gc/space/region_space.h"
54 #include "gc/space/rosalloc_space-inl.h"
55 #include "gc/space/space-inl.h"
56 #include "gc/space/zygote_space.h"
57 #include "gc/task_processor.h"
58 #include "entrypoints/quick/quick_alloc_entrypoints.h"
61 #include "intern_table.h"
63 #include "jit/jit_code_cache.h"
64 #include "mirror/class-inl.h"
65 #include "mirror/object-inl.h"
66 #include "mirror/object_array-inl.h"
67 #include "mirror/reference-inl.h"
69 #include "reflection.h"
71 #include "ScopedLocalRef.h"
72 #include "scoped_thread_state_change.h"
73 #include "handle_scope-inl.h"
74 #include "thread_list.h"
75 #include "well_known_classes.h"
81 static constexpr size_t kCollectorTransitionStressIterations = 0;
82 static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
83 // Minimum amount of remaining bytes before a concurrent GC is triggered.
84 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
85 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
86 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
87 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
88 // threads (lower pauses, use less memory bandwidth).
89 static constexpr double kStickyGcThroughputAdjustment = 1.0;
90 // Whether or not we compact the zygote in PreZygoteFork.
91 static constexpr bool kCompactZygote = kMovingCollector;
92 // How many reserve entries are at the end of the allocation stack, these are only needed if the
93 // allocation stack overflows.
94 static constexpr size_t kAllocationStackReserveSize = 1024;
95 // Default mark stack size in bytes.
96 static const size_t kDefaultMarkStackSize = 64 * KB;
98 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
99 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
100 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
101 static const char* kNonMovingSpaceName = "non moving space";
102 static const char* kZygoteSpaceName = "zygote space";
103 static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
104 static constexpr bool kGCALotMode = false;
105 // GC alot mode uses a small allocation stack to stress test a lot of GC.
106 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
107 sizeof(mirror::HeapReference<mirror::Object>);
108 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
109 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
110 sizeof(mirror::HeapReference<mirror::Object>);
111 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
112 sizeof(mirror::HeapReference<mirror::Object>);
113 // System.runFinalization can deadlock with native allocations, to deal with this, we have a
114 // timeout on how long we wait for finalizers to run. b/21544853
115 static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
117 // For deterministic compilation, we need the heap to be at a well-known address.
118 static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
119 // Dump the rosalloc stats on SIGQUIT.
120 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
122 static inline bool CareAboutPauseTimes() {
123 return Runtime::Current()->InJankPerceptibleProcessState();
126 Heap::Heap(size_t initial_size,
130 double target_utilization,
131 double foreground_heap_growth_multiplier,
133 size_t non_moving_space_capacity,
134 const std::string& image_file_name,
135 const InstructionSet image_instruction_set,
136 CollectorType foreground_collector_type,
137 CollectorType background_collector_type,
138 space::LargeObjectSpaceType large_object_space_type,
139 size_t large_object_threshold,
140 size_t parallel_gc_threads,
141 size_t conc_gc_threads,
142 bool low_memory_mode,
143 size_t long_pause_log_threshold,
144 size_t long_gc_log_threshold,
145 bool ignore_max_footprint,
147 bool verify_pre_gc_heap,
148 bool verify_pre_sweeping_heap,
149 bool verify_post_gc_heap,
150 bool verify_pre_gc_rosalloc,
151 bool verify_pre_sweeping_rosalloc,
152 bool verify_post_gc_rosalloc,
154 bool use_homogeneous_space_compaction_for_oom,
155 uint64_t min_interval_homogeneous_space_compaction_by_oom)
156 : non_moving_space_(nullptr),
157 rosalloc_space_(nullptr),
158 dlmalloc_space_(nullptr),
159 main_space_(nullptr),
160 collector_type_(kCollectorTypeNone),
161 foreground_collector_type_(foreground_collector_type),
162 background_collector_type_(background_collector_type),
163 desired_collector_type_(foreground_collector_type_),
164 pending_task_lock_(nullptr),
165 parallel_gc_threads_(parallel_gc_threads),
166 conc_gc_threads_(conc_gc_threads),
167 low_memory_mode_(low_memory_mode),
168 long_pause_log_threshold_(long_pause_log_threshold),
169 long_gc_log_threshold_(long_gc_log_threshold),
170 ignore_max_footprint_(ignore_max_footprint),
171 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
172 zygote_space_(nullptr),
173 large_object_threshold_(large_object_threshold),
174 disable_thread_flip_count_(0),
175 thread_flip_running_(false),
176 collector_type_running_(kCollectorTypeNone),
177 last_gc_type_(collector::kGcTypeNone),
178 next_gc_type_(collector::kGcTypePartial),
180 growth_limit_(growth_limit),
181 max_allowed_footprint_(initial_size),
182 native_footprint_gc_watermark_(initial_size),
183 native_need_to_run_finalization_(false),
184 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
185 total_bytes_freed_ever_(0),
186 total_objects_freed_ever_(0),
187 num_bytes_allocated_(0),
188 native_bytes_allocated_(0),
189 num_bytes_freed_revoke_(0),
190 verify_missing_card_marks_(false),
191 verify_system_weaks_(false),
192 verify_pre_gc_heap_(verify_pre_gc_heap),
193 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
194 verify_post_gc_heap_(verify_post_gc_heap),
195 verify_mod_union_table_(false),
196 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
197 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
198 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
199 gc_stress_mode_(gc_stress_mode),
200 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
201 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
202 * verification is enabled, we limit the size of allocation stacks to speed up their
205 max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
206 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
207 kDefaultAllocationStackSize),
208 current_allocator_(kAllocatorTypeDlMalloc),
209 current_non_moving_allocator_(kAllocatorTypeNonMoving),
210 bump_pointer_space_(nullptr),
211 temp_space_(nullptr),
212 region_space_(nullptr),
215 target_utilization_(target_utilization),
216 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
218 verify_object_mode_(kVerifyObjectModeDisabled),
219 disable_moving_gc_count_(0),
220 is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
222 main_space_backup_(nullptr),
223 min_interval_homogeneous_space_compaction_by_oom_(
224 min_interval_homogeneous_space_compaction_by_oom),
225 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
226 pending_collector_transition_(nullptr),
227 pending_heap_trim_(nullptr),
228 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
229 running_collection_is_blocking_(false),
230 blocking_gc_count_(0U),
231 blocking_gc_time_(0U),
232 last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
233 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
234 gc_count_last_window_(0U),
235 blocking_gc_count_last_window_(0U),
236 gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
237 blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
238 kGcCountRateMaxBucketCount),
239 alloc_tracking_enabled_(false),
240 backtrace_lock_(nullptr),
241 seen_backtrace_count_(0u),
242 unique_backtrace_count_(0u),
243 gc_disabled_for_shutdown_(false) {
244 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
245 LOG(INFO) << "Heap() entering";
247 ScopedTrace trace(__FUNCTION__);
248 Runtime* const runtime = Runtime::Current();
249 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
251 const bool is_zygote = runtime->IsZygote();
253 // Background compaction is currently not supported for command line runs.
254 if (background_collector_type_ != foreground_collector_type_) {
255 VLOG(heap) << "Disabling background compaction for non zygote";
256 background_collector_type_ = foreground_collector_type_;
259 ChangeCollector(desired_collector_type_);
260 live_bitmap_.reset(new accounting::HeapBitmap(this));
261 mark_bitmap_.reset(new accounting::HeapBitmap(this));
262 // Requested begin for the alloc space, to follow the mapped image and oat files
263 uint8_t* requested_alloc_space_begin = nullptr;
264 if (foreground_collector_type_ == kCollectorTypeCC) {
265 // Need to use a low address so that we can allocate a contiguous
266 // 2 * Xmx space when there's no image (dex2oat for target).
267 CHECK_GE(300 * MB, non_moving_space_capacity);
268 requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
271 // Load image space(s).
272 if (!image_file_name.empty()) {
273 // For code reuse, handle this like a work queue.
274 std::vector<std::string> image_file_names;
275 image_file_names.push_back(image_file_name);
276 // The loaded spaces. Secondary images may fail to load, in which case we need to remove
277 // already added spaces.
278 std::vector<space::Space*> added_image_spaces;
279 uint8_t* const original_requested_alloc_space_begin = requested_alloc_space_begin;
280 for (size_t index = 0; index < image_file_names.size(); ++index) {
281 std::string& image_name = image_file_names[index];
282 std::string error_msg;
283 space::ImageSpace* boot_image_space = space::ImageSpace::CreateBootImage(
285 image_instruction_set,
288 if (boot_image_space != nullptr) {
289 AddSpace(boot_image_space);
290 added_image_spaces.push_back(boot_image_space);
291 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
292 // isn't going to get in the middle
293 uint8_t* oat_file_end_addr = boot_image_space->GetImageHeader().GetOatFileEnd();
294 CHECK_GT(oat_file_end_addr, boot_image_space->End());
295 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
296 boot_image_spaces_.push_back(boot_image_space);
299 // If this was the first space, check whether there are more images to load.
300 const OatFile* boot_oat_file = boot_image_space->GetOatFile();
301 if (boot_oat_file == nullptr) {
305 const OatHeader& boot_oat_header = boot_oat_file->GetOatHeader();
306 const char* boot_classpath =
307 boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPath);
308 if (boot_classpath == nullptr) {
312 space::ImageSpace::CreateMultiImageLocations(image_file_name,
317 LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
318 << "Attempting to fall back to imageless running. Error was: " << error_msg
319 << "\nAttempted image: " << image_name;
320 // Remove already loaded spaces.
321 for (space::Space* loaded_space : added_image_spaces) {
322 RemoveSpace(loaded_space);
325 boot_image_spaces_.clear();
326 requested_alloc_space_begin = original_requested_alloc_space_begin;
332 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
333 +- nonmoving space (non_moving_space_capacity)+-
334 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
335 +-????????????????????????????????????????????+-
336 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
337 +-main alloc space / bump space 1 (capacity_) +-
338 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
339 +-????????????????????????????????????????????+-
340 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
341 +-main alloc space2 / bump space 2 (capacity_)+-
342 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
344 // We don't have hspace compaction enabled with GSS or CC.
345 if (foreground_collector_type_ == kCollectorTypeGSS ||
346 foreground_collector_type_ == kCollectorTypeCC) {
347 use_homogeneous_space_compaction_for_oom_ = false;
349 bool support_homogeneous_space_compaction =
350 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
351 use_homogeneous_space_compaction_for_oom_;
352 // We may use the same space the main space for the non moving space if we don't need to compact
353 // from the main space.
354 // This is not the case if we support homogeneous compaction or have a moving background
356 bool separate_non_moving_space = is_zygote ||
357 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
358 IsMovingGc(background_collector_type_);
359 if (foreground_collector_type_ == kCollectorTypeGSS) {
360 separate_non_moving_space = false;
362 std::unique_ptr<MemMap> main_mem_map_1;
363 std::unique_ptr<MemMap> main_mem_map_2;
365 // Gross hack to make dex2oat deterministic.
366 if (foreground_collector_type_ == kCollectorTypeMS &&
367 requested_alloc_space_begin == nullptr &&
368 Runtime::Current()->IsAotCompiler()) {
369 // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
371 requested_alloc_space_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
373 uint8_t* request_begin = requested_alloc_space_begin;
374 if (request_begin != nullptr && separate_non_moving_space) {
375 request_begin += non_moving_space_capacity;
377 std::string error_str;
378 std::unique_ptr<MemMap> non_moving_space_mem_map;
379 if (separate_non_moving_space) {
380 ScopedTrace trace2("Create separate non moving space");
381 // If we are the zygote, the non moving space becomes the zygote space when we run
382 // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
383 // rename the mem map later.
384 const char* space_name = is_zygote ? kZygoteSpaceName: kNonMovingSpaceName;
385 // Reserve the non moving mem map before the other two since it needs to be at a specific
387 non_moving_space_mem_map.reset(
388 MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
389 non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
391 CHECK(non_moving_space_mem_map != nullptr) << error_str;
392 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
393 request_begin = reinterpret_cast<uint8_t*>(300 * MB);
395 // Attempt to create 2 mem maps at or after the requested begin.
396 if (foreground_collector_type_ != kCollectorTypeCC) {
397 ScopedTrace trace2("Create main mem map");
398 if (separate_non_moving_space || !is_zygote) {
399 main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0],
404 // If no separate non-moving space and we are the zygote, the main space must come right
405 // after the image space to avoid a gap. This is required since we want the zygote space to
406 // be adjacent to the image space.
407 main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
408 PROT_READ | PROT_WRITE, true, false,
411 CHECK(main_mem_map_1.get() != nullptr) << error_str;
413 if (support_homogeneous_space_compaction ||
414 background_collector_type_ == kCollectorTypeSS ||
415 foreground_collector_type_ == kCollectorTypeSS) {
416 ScopedTrace trace2("Create main mem map 2");
417 main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
418 capacity_, &error_str));
419 CHECK(main_mem_map_2.get() != nullptr) << error_str;
422 // Create the non moving space first so that bitmaps don't take up the address range.
423 if (separate_non_moving_space) {
424 ScopedTrace trace2("Add non moving space");
425 // Non moving space is always dlmalloc since we currently don't have support for multiple
426 // active rosalloc spaces.
427 const size_t size = non_moving_space_mem_map->Size();
428 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
429 non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
430 initial_size, size, size, false);
431 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
432 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
433 << requested_alloc_space_begin;
434 AddSpace(non_moving_space_);
436 // Create other spaces based on whether or not we have a moving GC.
437 if (foreground_collector_type_ == kCollectorTypeCC) {
438 region_space_ = space::RegionSpace::Create("Region space", capacity_ * 2, request_begin);
439 AddSpace(region_space_);
440 } else if (IsMovingGc(foreground_collector_type_) &&
441 foreground_collector_type_ != kCollectorTypeGSS) {
442 // Create bump pointer spaces.
443 // We only to create the bump pointer if the foreground collector is a compacting GC.
444 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
445 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
446 main_mem_map_1.release());
447 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
448 AddSpace(bump_pointer_space_);
449 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
450 main_mem_map_2.release());
451 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
452 AddSpace(temp_space_);
453 CHECK(separate_non_moving_space);
455 CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
456 CHECK(main_space_ != nullptr);
457 AddSpace(main_space_);
458 if (!separate_non_moving_space) {
459 non_moving_space_ = main_space_;
460 CHECK(!non_moving_space_->CanMoveObjects());
462 if (foreground_collector_type_ == kCollectorTypeGSS) {
463 CHECK_EQ(foreground_collector_type_, background_collector_type_);
464 // Create bump pointer spaces instead of a backup space.
465 main_mem_map_2.release();
466 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
467 kGSSBumpPointerSpaceCapacity, nullptr);
468 CHECK(bump_pointer_space_ != nullptr);
469 AddSpace(bump_pointer_space_);
470 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
471 kGSSBumpPointerSpaceCapacity, nullptr);
472 CHECK(temp_space_ != nullptr);
473 AddSpace(temp_space_);
474 } else if (main_mem_map_2.get() != nullptr) {
475 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
476 main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
477 growth_limit_, capacity_, name, true));
478 CHECK(main_space_backup_.get() != nullptr);
479 // Add the space so its accounted for in the heap_begin and heap_end.
480 AddSpace(main_space_backup_.get());
483 CHECK(non_moving_space_ != nullptr);
484 CHECK(!non_moving_space_->CanMoveObjects());
485 // Allocate the large object space.
486 if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
487 large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
489 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
490 } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
491 large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
492 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
494 // Disable the large object space by making the cutoff excessively large.
495 large_object_threshold_ = std::numeric_limits<size_t>::max();
496 large_object_space_ = nullptr;
498 if (large_object_space_ != nullptr) {
499 AddSpace(large_object_space_);
501 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
502 CHECK(!continuous_spaces_.empty());
503 // Relies on the spaces being sorted.
504 uint8_t* heap_begin = continuous_spaces_.front()->Begin();
505 uint8_t* heap_end = continuous_spaces_.back()->Limit();
506 size_t heap_capacity = heap_end - heap_begin;
507 // Remove the main backup space since it slows down the GC to have unused extra spaces.
508 // TODO: Avoid needing to do this.
509 if (main_space_backup_.get() != nullptr) {
510 RemoveSpace(main_space_backup_.get());
512 // Allocate the card table.
513 // We currently don't support dynamically resizing the card table.
514 // Since we don't know where in the low_4gb the app image will be located, make the card table
515 // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
516 UNUSED(heap_capacity);
517 // Start at 64 KB, we can be sure there are no spaces mapped this low since the address range is
518 // reserved by the kernel.
519 static constexpr size_t kMinHeapAddress = 4 * KB;
520 card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
521 4 * GB - kMinHeapAddress));
522 CHECK(card_table_.get() != nullptr) << "Failed to create card table";
523 if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
524 rb_table_.reset(new accounting::ReadBarrierTable());
525 DCHECK(rb_table_->IsAllCleared());
527 if (HasBootImageSpace()) {
528 // Don't add the image mod union table if we are running without an image, this can crash if
529 // we use the CardCache implementation.
530 for (space::ImageSpace* image_space : GetBootImageSpaces()) {
531 accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
532 "Image mod-union table", this, image_space);
533 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
534 AddModUnionTable(mod_union_table);
537 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
538 accounting::RememberedSet* non_moving_space_rem_set =
539 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
540 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
541 AddRememberedSet(non_moving_space_rem_set);
543 // TODO: Count objects in the image space here?
544 num_bytes_allocated_.StoreRelaxed(0);
545 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
546 kDefaultMarkStackSize));
547 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
548 allocation_stack_.reset(accounting::ObjectStack::Create(
549 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
550 live_stack_.reset(accounting::ObjectStack::Create(
551 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
552 // It's still too early to take a lock because there are no threads yet, but we can create locks
553 // now. We don't create it earlier to make it clear that you can't use locks during heap
555 gc_complete_lock_ = new Mutex("GC complete lock");
556 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
557 *gc_complete_lock_));
558 thread_flip_lock_ = new Mutex("GC thread flip lock");
559 thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
560 *thread_flip_lock_));
561 task_processor_.reset(new TaskProcessor());
562 reference_processor_.reset(new ReferenceProcessor());
563 pending_task_lock_ = new Mutex("Pending task lock");
564 if (ignore_max_footprint_) {
565 SetIdealFootprint(std::numeric_limits<size_t>::max());
566 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
568 CHECK_NE(max_allowed_footprint_, 0U);
569 // Create our garbage collectors.
570 for (size_t i = 0; i < 2; ++i) {
571 const bool concurrent = i != 0;
572 if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
573 (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
574 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
575 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
576 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
579 if (kMovingCollector) {
580 if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
581 MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
582 use_homogeneous_space_compaction_for_oom_) {
583 // TODO: Clean this up.
584 const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
585 semi_space_collector_ = new collector::SemiSpace(this, generational,
586 generational ? "generational" : "");
587 garbage_collectors_.push_back(semi_space_collector_);
589 if (MayUseCollector(kCollectorTypeCC)) {
590 concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
591 garbage_collectors_.push_back(concurrent_copying_collector_);
593 if (MayUseCollector(kCollectorTypeMC)) {
594 mark_compact_collector_ = new collector::MarkCompact(this);
595 garbage_collectors_.push_back(mark_compact_collector_);
598 if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
599 (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
600 // Check that there's no gap between the image space and the non moving space so that the
601 // immune region won't break (eg. due to a large object allocated in the gap). This is only
602 // required when we're the zygote or using GSS.
603 // Space with smallest Begin().
604 space::ImageSpace* first_space = nullptr;
605 for (space::ImageSpace* space : boot_image_spaces_) {
606 if (first_space == nullptr || space->Begin() < first_space->Begin()) {
610 bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
612 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
613 MemMap::DumpMaps(LOG(ERROR), true);
614 LOG(FATAL) << "There's a gap between the image space and the non-moving space";
617 instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
618 if (gc_stress_mode_) {
619 backtrace_lock_ = new Mutex("GC complete lock");
621 if (is_running_on_memory_tool_ || gc_stress_mode_) {
622 instrumentation->InstrumentQuickAllocEntryPoints();
624 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
625 LOG(INFO) << "Heap() exiting";
629 MemMap* Heap::MapAnonymousPreferredAddress(const char* name,
630 uint8_t* request_begin,
632 std::string* out_error_str) {
634 MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
635 PROT_READ | PROT_WRITE, true, false, out_error_str);
636 if (map != nullptr || request_begin == nullptr) {
639 // Retry a second time with no specified request begin.
640 request_begin = nullptr;
644 bool Heap::MayUseCollector(CollectorType type) const {
645 return foreground_collector_type_ == type || background_collector_type_ == type;
648 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
653 bool can_move_objects) {
654 space::MallocSpace* malloc_space = nullptr;
656 // Create rosalloc space.
657 malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
658 initial_size, growth_limit, capacity,
659 low_memory_mode_, can_move_objects);
661 malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
662 initial_size, growth_limit, capacity,
665 if (collector::SemiSpace::kUseRememberedSet) {
666 accounting::RememberedSet* rem_set =
667 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
668 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
669 AddRememberedSet(rem_set);
671 CHECK(malloc_space != nullptr) << "Failed to create " << name;
672 malloc_space->SetFootprintLimit(malloc_space->Capacity());
676 void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
678 // Is background compaction is enabled?
679 bool can_move_objects = IsMovingGc(background_collector_type_) !=
680 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
681 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
682 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
683 // from the main space to the zygote space. If background compaction is enabled, always pass in
684 // that we can move objets.
685 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
686 // After the zygote we want this to be false if we don't have background compaction enabled so
687 // that getting primitive array elements is faster.
688 // We never have homogeneous compaction with GSS and don't need a space with movable objects.
689 can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
691 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
692 RemoveRememberedSet(main_space_);
694 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
695 main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
697 SetSpaceAsDefault(main_space_);
698 VLOG(heap) << "Created main space " << main_space_;
701 void Heap::ChangeAllocator(AllocatorType allocator) {
702 if (current_allocator_ != allocator) {
703 // These two allocators are only used internally and don't have any entrypoints.
704 CHECK_NE(allocator, kAllocatorTypeLOS);
705 CHECK_NE(allocator, kAllocatorTypeNonMoving);
706 current_allocator_ = allocator;
707 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
708 SetQuickAllocEntryPointsAllocator(current_allocator_);
709 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
713 void Heap::DisableMovingGc() {
714 if (IsMovingGc(foreground_collector_type_)) {
715 foreground_collector_type_ = kCollectorTypeCMS;
717 if (IsMovingGc(background_collector_type_)) {
718 background_collector_type_ = foreground_collector_type_;
720 TransitionCollector(foreground_collector_type_);
721 Thread* const self = Thread::Current();
722 ScopedThreadStateChange tsc(self, kSuspended);
723 ScopedSuspendAll ssa(__FUNCTION__);
724 // Something may have caused the transition to fail.
725 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
726 CHECK(main_space_ != nullptr);
727 // The allocation stack may have non movable objects in it. We need to flush it since the GC
728 // can't only handle marking allocation stack objects of one non moving space and one main
731 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
734 main_space_->DisableMovingObjects();
735 non_moving_space_ = main_space_;
736 CHECK(!non_moving_space_->CanMoveObjects());
740 std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
741 if (!IsValidContinuousSpaceObjectAddress(klass)) {
742 return StringPrintf("<non heap address klass %p>", klass);
744 mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
745 if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
746 std::string result("[");
747 result += SafeGetClassDescriptor(component_type);
749 } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
750 return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
751 } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
752 return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
754 mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
755 if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
756 return StringPrintf("<non heap address dex_cache %p>", dex_cache);
758 const DexFile* dex_file = dex_cache->GetDexFile();
759 uint16_t class_def_idx = klass->GetDexClassDefIndex();
760 if (class_def_idx == DexFile::kDexNoIndex16) {
761 return "<class def not found>";
763 const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
764 const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
765 return dex_file->GetTypeDescriptor(type_id);
769 std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
770 if (obj == nullptr) {
773 mirror::Class* klass = obj->GetClass<kVerifyNone>();
774 if (klass == nullptr) {
775 return "(class=null)";
777 std::string result(SafeGetClassDescriptor(klass));
778 if (obj->IsClass()) {
779 result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
784 void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
785 if (obj == nullptr) {
786 stream << "(obj=null)";
789 if (IsAligned<kObjectAlignment>(obj)) {
790 space::Space* space = nullptr;
791 // Don't use find space since it only finds spaces which actually contain objects instead of
792 // spaces which may contain objects (e.g. cleared bump pointer spaces).
793 for (const auto& cur_space : continuous_spaces_) {
794 if (cur_space->HasAddress(obj)) {
799 // Unprotect all the spaces.
800 for (const auto& con_space : continuous_spaces_) {
801 mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE);
803 stream << "Object " << obj;
804 if (space != nullptr) {
805 stream << " in space " << *space;
807 mirror::Class* klass = obj->GetClass<kVerifyNone>();
808 stream << "\nclass=" << klass;
809 if (klass != nullptr) {
810 stream << " type= " << SafePrettyTypeOf(obj);
812 // Re-protect the address we faulted on.
813 mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
817 bool Heap::IsCompilingBoot() const {
818 if (!Runtime::Current()->IsAotCompiler()) {
821 ScopedObjectAccess soa(Thread::Current());
822 for (const auto& space : continuous_spaces_) {
823 if (space->IsImageSpace() || space->IsZygoteSpace()) {
830 void Heap::IncrementDisableMovingGC(Thread* self) {
831 // Need to do this holding the lock to prevent races where the GC is about to run / running when
832 // we attempt to disable it.
833 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
834 MutexLock mu(self, *gc_complete_lock_);
835 ++disable_moving_gc_count_;
836 if (IsMovingGc(collector_type_running_)) {
837 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
841 void Heap::DecrementDisableMovingGC(Thread* self) {
842 MutexLock mu(self, *gc_complete_lock_);
843 CHECK_GT(disable_moving_gc_count_, 0U);
844 --disable_moving_gc_count_;
847 void Heap::IncrementDisableThreadFlip(Thread* self) {
848 // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
849 CHECK(kUseReadBarrier);
850 bool is_nested = self->GetDisableThreadFlipCount() > 0;
851 self->IncrementDisableThreadFlipCount();
853 // If this is a nested JNI critical section enter, we don't need to wait or increment the global
854 // counter. The global counter is incremented only once for a thread for the outermost enter.
857 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
858 MutexLock mu(self, *thread_flip_lock_);
859 bool has_waited = false;
860 uint64_t wait_start = NanoTime();
861 while (thread_flip_running_) {
863 thread_flip_cond_->Wait(self);
865 ++disable_thread_flip_count_;
867 uint64_t wait_time = NanoTime() - wait_start;
868 total_wait_time_ += wait_time;
869 if (wait_time > long_pause_log_threshold_) {
870 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
875 void Heap::DecrementDisableThreadFlip(Thread* self) {
876 // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
877 // the GC waiting before doing a thread flip.
878 CHECK(kUseReadBarrier);
879 self->DecrementDisableThreadFlipCount();
880 bool is_outermost = self->GetDisableThreadFlipCount() == 0;
882 // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
883 // The global counter is decremented only once for a thread for the outermost exit.
886 MutexLock mu(self, *thread_flip_lock_);
887 CHECK_GT(disable_thread_flip_count_, 0U);
888 --disable_thread_flip_count_;
889 if (disable_thread_flip_count_ == 0) {
890 // Potentially notify the GC thread blocking to begin a thread flip.
891 thread_flip_cond_->Broadcast(self);
895 void Heap::ThreadFlipBegin(Thread* self) {
896 // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
897 // > 0, block. Otherwise, go ahead.
898 CHECK(kUseReadBarrier);
899 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
900 MutexLock mu(self, *thread_flip_lock_);
901 bool has_waited = false;
902 uint64_t wait_start = NanoTime();
903 CHECK(!thread_flip_running_);
904 // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
905 // GC. This like a writer preference of a reader-writer lock.
906 thread_flip_running_ = true;
907 while (disable_thread_flip_count_ > 0) {
909 thread_flip_cond_->Wait(self);
912 uint64_t wait_time = NanoTime() - wait_start;
913 total_wait_time_ += wait_time;
914 if (wait_time > long_pause_log_threshold_) {
915 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
920 void Heap::ThreadFlipEnd(Thread* self) {
921 // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
922 // waiting before doing a JNI critical.
923 CHECK(kUseReadBarrier);
924 MutexLock mu(self, *thread_flip_lock_);
925 CHECK(thread_flip_running_);
926 thread_flip_running_ = false;
927 // Potentially notify mutator threads blocking to enter a JNI critical section.
928 thread_flip_cond_->Broadcast(self);
931 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
932 if (old_process_state != new_process_state) {
933 const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
934 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
935 // Start at index 1 to avoid "is always false" warning.
936 // Have iteration 1 always transition the collector.
937 TransitionCollector((((i & 1) == 1) == jank_perceptible)
938 ? foreground_collector_type_
939 : background_collector_type_);
940 usleep(kCollectorTransitionStressWait);
942 if (jank_perceptible) {
943 // Transition back to foreground right away to prevent jank.
944 RequestCollectorTransition(foreground_collector_type_, 0);
946 // Don't delay for debug builds since we may want to stress test the GC.
947 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
948 // special handling which does a homogenous space compaction once but then doesn't transition
950 RequestCollectorTransition(background_collector_type_,
951 kIsDebugBuild ? 0 : kCollectorTransitionWait);
956 void Heap::CreateThreadPool() {
957 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
958 if (num_threads != 0) {
959 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
963 // Visit objects when threads aren't suspended. If concurrent moving
964 // GC, disable moving GC and suspend threads and then visit objects.
965 void Heap::VisitObjects(ObjectCallback callback, void* arg) {
966 Thread* self = Thread::Current();
967 Locks::mutator_lock_->AssertSharedHeld(self);
968 DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
969 if (IsGcConcurrentAndMoving()) {
970 // Concurrent moving GC. Just suspending threads isn't sufficient
971 // because a collection isn't one big pause and we could suspend
972 // threads in the middle (between phases) of a concurrent moving
973 // collection where it's not easily known which objects are alive
974 // (both the region space and the non-moving space) or which
975 // copies of objects to visit, and the to-space invariant could be
976 // easily broken. Visit objects while GC isn't running by using
977 // IncrementDisableMovingGC() and threads are suspended.
978 IncrementDisableMovingGC(self);
980 ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
981 ScopedSuspendAll ssa(__FUNCTION__);
982 VisitObjectsInternalRegionSpace(callback, arg);
983 VisitObjectsInternal(callback, arg);
985 DecrementDisableMovingGC(self);
987 // GCs can move objects, so don't allow this.
988 ScopedAssertNoThreadSuspension ants(self, "Visiting objects");
989 DCHECK(region_space_ == nullptr);
990 VisitObjectsInternal(callback, arg);
994 // Visit objects when threads are already suspended.
995 void Heap::VisitObjectsPaused(ObjectCallback callback, void* arg) {
996 Thread* self = Thread::Current();
997 Locks::mutator_lock_->AssertExclusiveHeld(self);
998 VisitObjectsInternalRegionSpace(callback, arg);
999 VisitObjectsInternal(callback, arg);
1002 // Visit objects in the region spaces.
1003 void Heap::VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) {
1004 Thread* self = Thread::Current();
1005 Locks::mutator_lock_->AssertExclusiveHeld(self);
1006 if (region_space_ != nullptr) {
1007 DCHECK(IsGcConcurrentAndMoving());
1008 if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
1009 // Exclude the pre-zygote fork time where the semi-space collector
1010 // calls VerifyHeapReferences() as part of the zygote compaction
1011 // which then would call here without the moving GC disabled,
1013 DCHECK(IsMovingGCDisabled(self));
1015 region_space_->Walk(callback, arg);
1019 // Visit objects in the other spaces.
1020 void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
1021 if (bump_pointer_space_ != nullptr) {
1022 // Visit objects in bump pointer space.
1023 bump_pointer_space_->Walk(callback, arg);
1025 // TODO: Switch to standard begin and end to use ranged a based loop.
1026 for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
1027 mirror::Object* const obj = it->AsMirrorPtr();
1028 if (obj != nullptr && obj->GetClass() != nullptr) {
1029 // Avoid the race condition caused by the object not yet being written into the allocation
1030 // stack or the class not yet being written in the object. Or, if
1031 // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
1036 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1037 GetLiveBitmap()->Walk(callback, arg);
1041 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
1042 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
1043 space::ContinuousSpace* space2 = non_moving_space_;
1044 // TODO: Generalize this to n bitmaps?
1045 CHECK(space1 != nullptr);
1046 CHECK(space2 != nullptr);
1047 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
1048 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
1052 void Heap::DeleteThreadPool() {
1053 thread_pool_.reset(nullptr);
1056 void Heap::AddSpace(space::Space* space) {
1057 CHECK(space != nullptr);
1058 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1059 if (space->IsContinuousSpace()) {
1060 DCHECK(!space->IsDiscontinuousSpace());
1061 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1062 // Continuous spaces don't necessarily have bitmaps.
1063 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1064 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1065 if (live_bitmap != nullptr) {
1066 CHECK(mark_bitmap != nullptr);
1067 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
1068 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
1070 continuous_spaces_.push_back(continuous_space);
1071 // Ensure that spaces remain sorted in increasing order of start address.
1072 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
1073 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
1074 return a->Begin() < b->Begin();
1077 CHECK(space->IsDiscontinuousSpace());
1078 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1079 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1080 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1081 discontinuous_spaces_.push_back(discontinuous_space);
1083 if (space->IsAllocSpace()) {
1084 alloc_spaces_.push_back(space->AsAllocSpace());
1088 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
1089 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1090 if (continuous_space->IsDlMallocSpace()) {
1091 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
1092 } else if (continuous_space->IsRosAllocSpace()) {
1093 rosalloc_space_ = continuous_space->AsRosAllocSpace();
1097 void Heap::RemoveSpace(space::Space* space) {
1098 DCHECK(space != nullptr);
1099 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1100 if (space->IsContinuousSpace()) {
1101 DCHECK(!space->IsDiscontinuousSpace());
1102 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1103 // Continuous spaces don't necessarily have bitmaps.
1104 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1105 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1106 if (live_bitmap != nullptr) {
1107 DCHECK(mark_bitmap != nullptr);
1108 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
1109 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
1111 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
1112 DCHECK(it != continuous_spaces_.end());
1113 continuous_spaces_.erase(it);
1115 DCHECK(space->IsDiscontinuousSpace());
1116 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1117 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1118 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1119 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
1120 discontinuous_space);
1121 DCHECK(it != discontinuous_spaces_.end());
1122 discontinuous_spaces_.erase(it);
1124 if (space->IsAllocSpace()) {
1125 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
1126 DCHECK(it != alloc_spaces_.end());
1127 alloc_spaces_.erase(it);
1131 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
1132 // Dump cumulative timings.
1133 os << "Dumping cumulative Gc timings\n";
1134 uint64_t total_duration = 0;
1135 // Dump cumulative loggers for each GC type.
1136 uint64_t total_paused_time = 0;
1137 for (auto& collector : garbage_collectors_) {
1138 total_duration += collector->GetCumulativeTimings().GetTotalNs();
1139 total_paused_time += collector->GetTotalPausedTimeNs();
1140 collector->DumpPerformanceInfo(os);
1142 if (total_duration != 0) {
1143 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
1144 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1145 os << "Mean GC size throughput: "
1146 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
1147 os << "Mean GC object throughput: "
1148 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
1150 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
1151 os << "Total number of allocations " << total_objects_allocated << "\n";
1152 os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1153 os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
1154 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
1155 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1156 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
1157 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1158 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
1159 if (HasZygoteSpace()) {
1160 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1162 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
1163 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1164 os << "Total GC count: " << GetGcCount() << "\n";
1165 os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1166 os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1167 os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1170 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1171 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1172 os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1173 gc_count_rate_histogram_.DumpBins(os);
1176 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1177 os << "Histogram of blocking GC count per "
1178 << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1179 blocking_gc_count_rate_histogram_.DumpBins(os);
1184 if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1185 rosalloc_space_->DumpStats(os);
1188 BaseMutex::DumpAll(os);
1191 void Heap::ResetGcPerformanceInfo() {
1192 for (auto& collector : garbage_collectors_) {
1193 collector->ResetMeasurements();
1195 total_bytes_freed_ever_ = 0;
1196 total_objects_freed_ever_ = 0;
1197 total_wait_time_ = 0;
1198 blocking_gc_count_ = 0;
1199 blocking_gc_time_ = 0;
1200 gc_count_last_window_ = 0;
1201 blocking_gc_count_last_window_ = 0;
1202 last_update_time_gc_count_rate_histograms_ = // Round down by the window duration.
1203 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1205 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1206 gc_count_rate_histogram_.Reset();
1207 blocking_gc_count_rate_histogram_.Reset();
1211 uint64_t Heap::GetGcCount() const {
1212 uint64_t gc_count = 0U;
1213 for (auto& collector : garbage_collectors_) {
1214 gc_count += collector->GetCumulativeTimings().GetIterations();
1219 uint64_t Heap::GetGcTime() const {
1220 uint64_t gc_time = 0U;
1221 for (auto& collector : garbage_collectors_) {
1222 gc_time += collector->GetCumulativeTimings().GetTotalNs();
1227 uint64_t Heap::GetBlockingGcCount() const {
1228 return blocking_gc_count_;
1231 uint64_t Heap::GetBlockingGcTime() const {
1232 return blocking_gc_time_;
1235 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1236 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1237 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1238 gc_count_rate_histogram_.DumpBins(os);
1242 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1243 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1244 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1245 blocking_gc_count_rate_histogram_.DumpBins(os);
1250 VLOG(heap) << "Starting ~Heap()";
1251 STLDeleteElements(&garbage_collectors_);
1252 // If we don't reset then the mark stack complains in its destructor.
1253 allocation_stack_->Reset();
1254 allocation_records_.reset();
1255 live_stack_->Reset();
1256 STLDeleteValues(&mod_union_tables_);
1257 STLDeleteValues(&remembered_sets_);
1258 STLDeleteElements(&continuous_spaces_);
1259 STLDeleteElements(&discontinuous_spaces_);
1260 delete gc_complete_lock_;
1261 delete thread_flip_lock_;
1262 delete pending_task_lock_;
1263 delete backtrace_lock_;
1264 if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) {
1265 LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed()
1266 << " total=" << seen_backtrace_count_.LoadRelaxed() +
1267 unique_backtrace_count_.LoadRelaxed();
1269 VLOG(heap) << "Finished ~Heap()";
1272 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
1273 bool fail_ok) const {
1274 for (const auto& space : continuous_spaces_) {
1275 if (space->Contains(obj)) {
1280 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
1285 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
1286 bool fail_ok) const {
1287 for (const auto& space : discontinuous_spaces_) {
1288 if (space->Contains(obj)) {
1293 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
1298 space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
1299 space::Space* result = FindContinuousSpaceFromObject(obj, true);
1300 if (result != nullptr) {
1303 return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1306 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1307 // If we're in a stack overflow, do not create a new exception. It would require running the
1308 // constructor, which will of course still be in a stack overflow.
1309 if (self->IsHandlingStackOverflow()) {
1310 self->SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1314 std::ostringstream oss;
1315 size_t total_bytes_free = GetFreeMemory();
1316 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1317 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
1318 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1319 if (total_bytes_free >= byte_count) {
1320 space::AllocSpace* space = nullptr;
1321 if (allocator_type == kAllocatorTypeNonMoving) {
1322 space = non_moving_space_;
1323 } else if (allocator_type == kAllocatorTypeRosAlloc ||
1324 allocator_type == kAllocatorTypeDlMalloc) {
1325 space = main_space_;
1326 } else if (allocator_type == kAllocatorTypeBumpPointer ||
1327 allocator_type == kAllocatorTypeTLAB) {
1328 space = bump_pointer_space_;
1329 } else if (allocator_type == kAllocatorTypeRegion ||
1330 allocator_type == kAllocatorTypeRegionTLAB) {
1331 space = region_space_;
1333 if (space != nullptr) {
1334 space->LogFragmentationAllocFailure(oss, byte_count);
1337 self->ThrowOutOfMemoryError(oss.str().c_str());
1340 void Heap::DoPendingCollectorTransition() {
1341 CollectorType desired_collector_type = desired_collector_type_;
1342 // Launch homogeneous space compaction if it is desired.
1343 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1344 if (!CareAboutPauseTimes()) {
1345 PerformHomogeneousSpaceCompact();
1347 VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1350 TransitionCollector(desired_collector_type);
1354 void Heap::Trim(Thread* self) {
1355 Runtime* const runtime = Runtime::Current();
1356 if (!CareAboutPauseTimes()) {
1357 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1359 ScopedTrace trace("Deflating monitors");
1360 ScopedSuspendAll ssa(__FUNCTION__);
1361 uint64_t start_time = NanoTime();
1362 size_t count = runtime->GetMonitorList()->DeflateMonitors();
1363 VLOG(heap) << "Deflating " << count << " monitors took "
1364 << PrettyDuration(NanoTime() - start_time);
1366 TrimIndirectReferenceTables(self);
1368 // Trim arenas that may have been used by JIT or verifier.
1369 runtime->GetArenaPool()->TrimMaps();
1372 class TrimIndirectReferenceTableClosure : public Closure {
1374 explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1376 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1377 thread->GetJniEnv()->locals.Trim();
1378 // If thread is a running mutator, then act on behalf of the trim thread.
1379 // See the code in ThreadList::RunCheckpoint.
1380 barrier_->Pass(Thread::Current());
1384 Barrier* const barrier_;
1387 void Heap::TrimIndirectReferenceTables(Thread* self) {
1388 ScopedObjectAccess soa(self);
1389 ScopedTrace trace(__PRETTY_FUNCTION__);
1390 JavaVMExt* vm = soa.Vm();
1391 // Trim globals indirect reference table.
1393 // Trim locals indirect reference tables.
1395 TrimIndirectReferenceTableClosure closure(&barrier);
1396 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1397 size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1398 if (barrier_count != 0) {
1399 barrier.Increment(self, barrier_count);
1403 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1404 MutexLock mu(self, *gc_complete_lock_);
1405 // Ensure there is only one GC at a time.
1406 WaitForGcToCompleteLocked(cause, self);
1407 collector_type_running_ = collector_type;
1410 void Heap::TrimSpaces(Thread* self) {
1412 // Need to do this before acquiring the locks since we don't want to get suspended while
1413 // holding any locks.
1414 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1415 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1417 StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1419 ScopedTrace trace(__PRETTY_FUNCTION__);
1420 const uint64_t start_ns = NanoTime();
1421 // Trim the managed spaces.
1422 uint64_t total_alloc_space_allocated = 0;
1423 uint64_t total_alloc_space_size = 0;
1424 uint64_t managed_reclaimed = 0;
1426 ScopedObjectAccess soa(self);
1427 for (const auto& space : continuous_spaces_) {
1428 if (space->IsMallocSpace()) {
1429 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1430 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1431 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1432 // for a long period of time.
1433 managed_reclaimed += malloc_space->Trim();
1435 total_alloc_space_size += malloc_space->Size();
1439 total_alloc_space_allocated = GetBytesAllocated();
1440 if (large_object_space_ != nullptr) {
1441 total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1443 if (bump_pointer_space_ != nullptr) {
1444 total_alloc_space_allocated -= bump_pointer_space_->Size();
1446 if (region_space_ != nullptr) {
1447 total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1449 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1450 static_cast<float>(total_alloc_space_size);
1451 uint64_t gc_heap_end_ns = NanoTime();
1452 // We never move things in the native heap, so we can finish the GC at this point.
1453 FinishGC(self, collector::kGcTypeNone);
1455 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1456 << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1457 << static_cast<int>(100 * managed_utilization) << "%.";
1460 bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
1461 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
1463 if (obj == nullptr) {
1466 return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
1469 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
1470 return FindContinuousSpaceFromObject(obj, true) != nullptr;
1473 bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
1474 if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
1477 for (const auto& space : continuous_spaces_) {
1478 if (space->HasAddress(obj)) {
1485 bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
1486 bool search_live_stack, bool sorted) {
1487 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
1490 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
1491 mirror::Class* klass = obj->GetClass<kVerifyNone>();
1493 // This case happens for java.lang.Class.
1496 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1497 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
1498 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1499 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1500 return temp_space_->Contains(obj);
1502 if (region_space_ != nullptr && region_space_->HasAddress(obj)) {
1505 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1506 space::DiscontinuousSpace* d_space = nullptr;
1507 if (c_space != nullptr) {
1508 if (c_space->GetLiveBitmap()->Test(obj)) {
1512 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1513 if (d_space != nullptr) {
1514 if (d_space->GetLiveBitmap()->Test(obj)) {
1519 // This is covering the allocation/live stack swapping that is done without mutators suspended.
1520 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1522 NanoSleep(MsToNs(10));
1524 if (search_allocation_stack) {
1526 if (allocation_stack_->ContainsSorted(obj)) {
1529 } else if (allocation_stack_->Contains(obj)) {
1534 if (search_live_stack) {
1536 if (live_stack_->ContainsSorted(obj)) {
1539 } else if (live_stack_->Contains(obj)) {
1544 // We need to check the bitmaps again since there is a race where we mark something as live and
1545 // then clear the stack containing it.
1546 if (c_space != nullptr) {
1547 if (c_space->GetLiveBitmap()->Test(obj)) {
1551 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1552 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
1559 std::string Heap::DumpSpaces() const {
1560 std::ostringstream oss;
1565 void Heap::DumpSpaces(std::ostream& stream) const {
1566 for (const auto& space : continuous_spaces_) {
1567 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1568 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1569 stream << space << " " << *space << "\n";
1570 if (live_bitmap != nullptr) {
1571 stream << live_bitmap << " " << *live_bitmap << "\n";
1573 if (mark_bitmap != nullptr) {
1574 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1577 for (const auto& space : discontinuous_spaces_) {
1578 stream << space << " " << *space << "\n";
1582 void Heap::VerifyObjectBody(mirror::Object* obj) {
1583 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1587 // Ignore early dawn of the universe verifications.
1588 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
1591 CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned";
1592 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1593 CHECK(c != nullptr) << "Null class in object " << obj;
1594 CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
1595 CHECK(VerifyClassClass(c));
1597 if (verify_object_mode_ > kVerifyObjectModeFast) {
1598 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1599 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1603 void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
1604 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
1607 void Heap::VerifyHeap() {
1608 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1609 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
1612 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1613 // Use signed comparison since freed bytes can be negative when background compaction foreground
1614 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1615 // free list backed space typically increasing memory footprint due to padding and binning.
1616 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
1617 // Note: This relies on 2s complement for handling negative freed_bytes.
1618 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
1619 if (Runtime::Current()->HasStatsEnabled()) {
1620 RuntimeStats* thread_stats = Thread::Current()->GetStats();
1621 thread_stats->freed_objects += freed_objects;
1622 thread_stats->freed_bytes += freed_bytes;
1623 // TODO: Do this concurrently.
1624 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1625 global_stats->freed_objects += freed_objects;
1626 global_stats->freed_bytes += freed_bytes;
1630 void Heap::RecordFreeRevoke() {
1631 // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1632 // the ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1633 // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1634 // all the way to zero exactly as the remainder will be subtracted at the next GC.
1635 size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
1636 CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
1637 bytes_freed) << "num_bytes_freed_revoke_ underflow";
1638 CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
1639 bytes_freed) << "num_bytes_allocated_ underflow";
1640 GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1643 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1644 if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1645 return rosalloc_space_;
1647 for (const auto& space : continuous_spaces_) {
1648 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1649 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1650 return space->AsContinuousSpace()->AsRosAllocSpace();
1657 static inline bool EntrypointsInstrumented() SHARED_REQUIRES(Locks::mutator_lock_) {
1658 instrumentation::Instrumentation* const instrumentation =
1659 Runtime::Current()->GetInstrumentation();
1660 return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1663 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1664 AllocatorType allocator,
1667 size_t* bytes_allocated,
1668 size_t* usable_size,
1669 size_t* bytes_tl_bulk_allocated,
1670 mirror::Class** klass) {
1671 bool was_default_allocator = allocator == GetCurrentAllocator();
1672 // Make sure there is no pending exception since we may need to throw an OOME.
1673 self->AssertNoPendingException();
1674 DCHECK(klass != nullptr);
1675 StackHandleScope<1> hs(self);
1676 HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
1677 klass = nullptr; // Invalidate for safety.
1678 // The allocation failed. If the GC is running, block until it completes, and then retry the
1680 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
1681 // If we were the default allocator but the allocator changed while we were suspended,
1682 // abort the allocation.
1683 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1684 (!instrumented && EntrypointsInstrumented())) {
1687 if (last_gc != collector::kGcTypeNone) {
1688 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1689 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1690 usable_size, bytes_tl_bulk_allocated);
1691 if (ptr != nullptr) {
1696 collector::GcType tried_type = next_gc_type_;
1698 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1699 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1700 (!instrumented && EntrypointsInstrumented())) {
1704 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1705 usable_size, bytes_tl_bulk_allocated);
1706 if (ptr != nullptr) {
1711 // Loop through our different Gc types and try to Gc until we get enough free memory.
1712 for (collector::GcType gc_type : gc_plan_) {
1713 if (gc_type == tried_type) {
1716 // Attempt to run the collector, if we succeed, re-try the allocation.
1717 const bool plan_gc_ran =
1718 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1719 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1720 (!instrumented && EntrypointsInstrumented())) {
1724 // Did we free sufficient memory for the allocation to succeed?
1725 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1726 usable_size, bytes_tl_bulk_allocated);
1727 if (ptr != nullptr) {
1732 // Allocations have failed after GCs; this is an exceptional state.
1733 // Try harder, growing the heap if necessary.
1734 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1735 usable_size, bytes_tl_bulk_allocated);
1736 if (ptr != nullptr) {
1739 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1740 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1741 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1743 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1745 // TODO: Run finalization, but this may cause more allocations to occur.
1746 // We don't need a WaitForGcToComplete here either.
1747 DCHECK(!gc_plan_.empty());
1748 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1749 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1750 (!instrumented && EntrypointsInstrumented())) {
1753 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
1754 bytes_tl_bulk_allocated);
1755 if (ptr == nullptr) {
1756 const uint64_t current_time = NanoTime();
1757 switch (allocator) {
1758 case kAllocatorTypeRosAlloc:
1760 case kAllocatorTypeDlMalloc: {
1761 if (use_homogeneous_space_compaction_for_oom_ &&
1762 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1763 min_interval_homogeneous_space_compaction_by_oom_) {
1764 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1765 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1766 // Thread suspension could have occurred.
1767 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1768 (!instrumented && EntrypointsInstrumented())) {
1772 case HomogeneousSpaceCompactResult::kSuccess:
1773 // If the allocation succeeded, we delayed an oom.
1774 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1775 usable_size, bytes_tl_bulk_allocated);
1776 if (ptr != nullptr) {
1777 count_delayed_oom_++;
1780 case HomogeneousSpaceCompactResult::kErrorReject:
1781 // Reject due to disabled moving GC.
1783 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1784 // Throw OOM by default.
1787 UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1788 << static_cast<size_t>(result);
1792 // Always print that we ran homogeneous space compation since this can cause jank.
1793 VLOG(heap) << "Ran heap homogeneous space compaction, "
1794 << " requested defragmentation "
1795 << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1796 << " performed defragmentation "
1797 << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1798 << " ignored homogeneous space compaction "
1799 << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1800 << " delayed count = "
1801 << count_delayed_oom_.LoadSequentiallyConsistent();
1805 case kAllocatorTypeNonMoving: {
1806 // Try to transition the heap if the allocation failure was due to the space being full.
1807 if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
1808 // If we aren't out of memory then the OOM was probably from the non moving space being
1809 // full. Attempt to disable compaction and turn the main space into a non moving space.
1811 // Thread suspension could have occurred.
1812 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1813 (!instrumented && EntrypointsInstrumented())) {
1816 // If we are still a moving GC then something must have caused the transition to fail.
1817 if (IsMovingGc(collector_type_)) {
1818 MutexLock mu(self, *gc_complete_lock_);
1819 // If we couldn't disable moving GC, just throw OOME and return null.
1820 LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1821 << disable_moving_gc_count_;
1823 LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1824 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1825 usable_size, bytes_tl_bulk_allocated);
1831 // Do nothing for others allocators.
1835 // If the allocation hasn't succeeded by this point, throw an OOM error.
1836 if (ptr == nullptr) {
1837 ThrowOutOfMemoryError(self, alloc_size, allocator);
1842 void Heap::SetTargetHeapUtilization(float target) {
1843 DCHECK_GT(target, 0.0f); // asserted in Java code
1844 DCHECK_LT(target, 1.0f);
1845 target_utilization_ = target;
1848 size_t Heap::GetObjectsAllocated() const {
1849 Thread* const self = Thread::Current();
1850 ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
1851 // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
1852 ScopedSuspendAll ssa(__FUNCTION__);
1853 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1855 for (space::AllocSpace* space : alloc_spaces_) {
1856 total += space->GetObjectsAllocated();
1861 uint64_t Heap::GetObjectsAllocatedEver() const {
1862 uint64_t total = GetObjectsFreedEver();
1863 // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1864 if (Thread::Current() != nullptr) {
1865 total += GetObjectsAllocated();
1870 uint64_t Heap::GetBytesAllocatedEver() const {
1871 return GetBytesFreedEver() + GetBytesAllocated();
1874 class InstanceCounter {
1876 InstanceCounter(const std::vector<mirror::Class*>& classes,
1877 bool use_is_assignable_from,
1879 SHARED_REQUIRES(Locks::mutator_lock_)
1880 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {}
1882 static void Callback(mirror::Object* obj, void* arg)
1883 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1884 InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1885 mirror::Class* instance_class = obj->GetClass();
1886 CHECK(instance_class != nullptr);
1887 for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1888 mirror::Class* klass = instance_counter->classes_[i];
1889 if (instance_counter->use_is_assignable_from_) {
1890 if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
1891 ++instance_counter->counts_[i];
1893 } else if (instance_class == klass) {
1894 ++instance_counter->counts_[i];
1900 const std::vector<mirror::Class*>& classes_;
1901 bool use_is_assignable_from_;
1902 uint64_t* const counts_;
1903 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
1906 void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
1908 InstanceCounter counter(classes, use_is_assignable_from, counts);
1909 VisitObjects(InstanceCounter::Callback, &counter);
1912 class InstanceCollector {
1914 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
1915 SHARED_REQUIRES(Locks::mutator_lock_)
1916 : class_(c), max_count_(max_count), instances_(instances) {
1918 static void Callback(mirror::Object* obj, void* arg)
1919 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1920 DCHECK(arg != nullptr);
1921 InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1922 if (obj->GetClass() == instance_collector->class_) {
1923 if (instance_collector->max_count_ == 0 ||
1924 instance_collector->instances_.size() < instance_collector->max_count_) {
1925 instance_collector->instances_.push_back(obj);
1931 const mirror::Class* const class_;
1932 const uint32_t max_count_;
1933 std::vector<mirror::Object*>& instances_;
1934 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1937 void Heap::GetInstances(mirror::Class* c,
1939 std::vector<mirror::Object*>& instances) {
1940 InstanceCollector collector(c, max_count, instances);
1941 VisitObjects(&InstanceCollector::Callback, &collector);
1944 class ReferringObjectsFinder {
1946 ReferringObjectsFinder(mirror::Object* object,
1948 std::vector<mirror::Object*>& referring_objects)
1949 SHARED_REQUIRES(Locks::mutator_lock_)
1950 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1953 static void Callback(mirror::Object* obj, void* arg)
1954 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1955 reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1958 // For bitmap Visit.
1959 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1960 // annotalysis on visitors.
1961 void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1962 o->VisitReferences(*this, VoidFunctor());
1965 // For Object::VisitReferences.
1966 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
1967 SHARED_REQUIRES(Locks::mutator_lock_) {
1968 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
1969 if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1970 referring_objects_.push_back(obj);
1974 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
1976 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
1979 const mirror::Object* const object_;
1980 const uint32_t max_count_;
1981 std::vector<mirror::Object*>& referring_objects_;
1982 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1985 void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1986 std::vector<mirror::Object*>& referring_objects) {
1987 ReferringObjectsFinder finder(o, max_count, referring_objects);
1988 VisitObjects(&ReferringObjectsFinder::Callback, &finder);
1991 void Heap::CollectGarbage(bool clear_soft_references) {
1992 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1993 // last GC will not have necessarily been cleared.
1994 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
1997 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
1998 return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
1999 foreground_collector_type_ == kCollectorTypeCMS;
2002 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
2003 Thread* self = Thread::Current();
2004 // Inc requested homogeneous space compaction.
2005 count_requested_homogeneous_space_compaction_++;
2006 // Store performed homogeneous space compaction at a new request arrival.
2007 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2008 Locks::mutator_lock_->AssertNotHeld(self);
2010 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2011 MutexLock mu(self, *gc_complete_lock_);
2012 // Ensure there is only one GC at a time.
2013 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
2014 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
2016 // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
2018 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
2019 !main_space_->CanMoveObjects()) {
2020 return kErrorReject;
2022 if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
2023 return kErrorUnsupported;
2025 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
2027 if (Runtime::Current()->IsShuttingDown(self)) {
2028 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2029 // cause objects to get finalized.
2030 FinishGC(self, collector::kGcTypeNone);
2031 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
2033 collector::GarbageCollector* collector;
2035 ScopedSuspendAll ssa(__FUNCTION__);
2036 uint64_t start_time = NanoTime();
2037 // Launch compaction.
2038 space::MallocSpace* to_space = main_space_backup_.release();
2039 space::MallocSpace* from_space = main_space_;
2040 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2041 const uint64_t space_size_before_compaction = from_space->Size();
2043 // Make sure that we will have enough room to copy.
2044 CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
2045 collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
2046 const uint64_t space_size_after_compaction = to_space->Size();
2047 main_space_ = to_space;
2048 main_space_backup_.reset(from_space);
2049 RemoveSpace(from_space);
2050 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
2051 // Update performed homogeneous space compaction count.
2052 count_performed_homogeneous_space_compaction_++;
2053 // Print statics log and resume all threads.
2054 uint64_t duration = NanoTime() - start_time;
2055 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
2056 << PrettySize(space_size_before_compaction) << " -> "
2057 << PrettySize(space_size_after_compaction) << " compact-ratio: "
2058 << std::fixed << static_cast<double>(space_size_after_compaction) /
2059 static_cast<double>(space_size_before_compaction);
2062 reference_processor_->EnqueueClearedReferences(self);
2063 GrowForUtilization(semi_space_collector_);
2064 LogGC(kGcCauseHomogeneousSpaceCompact, collector);
2065 FinishGC(self, collector::kGcTypeFull);
2067 ScopedObjectAccess soa(self);
2068 soa.Vm()->UnloadNativeLibraries();
2070 return HomogeneousSpaceCompactResult::kSuccess;
2073 void Heap::TransitionCollector(CollectorType collector_type) {
2074 if (collector_type == collector_type_) {
2077 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
2078 << " -> " << static_cast<int>(collector_type);
2079 uint64_t start_time = NanoTime();
2080 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
2081 Runtime* const runtime = Runtime::Current();
2082 Thread* const self = Thread::Current();
2083 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2084 Locks::mutator_lock_->AssertNotHeld(self);
2085 // Busy wait until we can GC (StartGC can fail if we have a non-zero
2086 // compacting_gc_disable_count_, this should rarely occurs).
2089 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2090 MutexLock mu(self, *gc_complete_lock_);
2091 // Ensure there is only one GC at a time.
2092 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
2093 // Currently we only need a heap transition if we switch from a moving collector to a
2094 // non-moving one, or visa versa.
2095 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
2096 // If someone else beat us to it and changed the collector before we could, exit.
2097 // This is safe to do before the suspend all since we set the collector_type_running_ before
2098 // we exit the loop. If another thread attempts to do the heap transition before we exit,
2099 // then it would get blocked on WaitForGcToCompleteLocked.
2100 if (collector_type == collector_type_) {
2103 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
2104 if (!copying_transition || disable_moving_gc_count_ == 0) {
2105 // TODO: Not hard code in semi-space collector?
2106 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
2112 if (runtime->IsShuttingDown(self)) {
2113 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2114 // cause objects to get finalized.
2115 FinishGC(self, collector::kGcTypeNone);
2118 collector::GarbageCollector* collector = nullptr;
2120 ScopedSuspendAll ssa(__FUNCTION__);
2121 switch (collector_type) {
2122 case kCollectorTypeSS: {
2123 if (!IsMovingGc(collector_type_)) {
2124 // Create the bump pointer space from the backup space.
2125 CHECK(main_space_backup_ != nullptr);
2126 std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
2127 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
2128 // pointer space last transition it will be protected.
2129 CHECK(mem_map != nullptr);
2130 mem_map->Protect(PROT_READ | PROT_WRITE);
2131 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
2133 AddSpace(bump_pointer_space_);
2134 collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
2135 // Use the now empty main space mem map for the bump pointer temp space.
2136 mem_map.reset(main_space_->ReleaseMemMap());
2137 // Unset the pointers just in case.
2138 if (dlmalloc_space_ == main_space_) {
2139 dlmalloc_space_ = nullptr;
2140 } else if (rosalloc_space_ == main_space_) {
2141 rosalloc_space_ = nullptr;
2143 // Remove the main space so that we don't try to trim it, this doens't work for debug
2144 // builds since RosAlloc attempts to read the magic number from a protected page.
2145 RemoveSpace(main_space_);
2146 RemoveRememberedSet(main_space_);
2147 delete main_space_; // Delete the space since it has been removed.
2148 main_space_ = nullptr;
2149 RemoveRememberedSet(main_space_backup_.get());
2150 main_space_backup_.reset(nullptr); // Deletes the space.
2151 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
2153 AddSpace(temp_space_);
2157 case kCollectorTypeMS:
2159 case kCollectorTypeCMS: {
2160 if (IsMovingGc(collector_type_)) {
2161 CHECK(temp_space_ != nullptr);
2162 std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
2163 RemoveSpace(temp_space_);
2164 temp_space_ = nullptr;
2165 mem_map->Protect(PROT_READ | PROT_WRITE);
2166 CreateMainMallocSpace(mem_map.get(),
2167 kDefaultInitialSize,
2168 std::min(mem_map->Size(), growth_limit_),
2171 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
2172 AddSpace(main_space_);
2173 collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
2174 mem_map.reset(bump_pointer_space_->ReleaseMemMap());
2175 RemoveSpace(bump_pointer_space_);
2176 bump_pointer_space_ = nullptr;
2177 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
2178 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
2179 if (kIsDebugBuild && kUseRosAlloc) {
2180 mem_map->Protect(PROT_READ | PROT_WRITE);
2182 main_space_backup_.reset(CreateMallocSpaceFromMemMap(
2184 kDefaultInitialSize,
2185 std::min(mem_map->Size(), growth_limit_),
2189 if (kIsDebugBuild && kUseRosAlloc) {
2190 mem_map->Protect(PROT_NONE);
2197 LOG(FATAL) << "Attempted to transition to invalid collector type "
2198 << static_cast<size_t>(collector_type);
2202 ChangeCollector(collector_type);
2204 // Can't call into java code with all threads suspended.
2205 reference_processor_->EnqueueClearedReferences(self);
2206 uint64_t duration = NanoTime() - start_time;
2207 GrowForUtilization(semi_space_collector_);
2208 DCHECK(collector != nullptr);
2209 LogGC(kGcCauseCollectorTransition, collector);
2210 FinishGC(self, collector::kGcTypeFull);
2212 ScopedObjectAccess soa(self);
2213 soa.Vm()->UnloadNativeLibraries();
2215 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
2216 int32_t delta_allocated = before_allocated - after_allocated;
2217 std::string saved_str;
2218 if (delta_allocated >= 0) {
2219 saved_str = " saved at least " + PrettySize(delta_allocated);
2221 saved_str = " expanded " + PrettySize(-delta_allocated);
2223 VLOG(heap) << "Collector transition to " << collector_type << " took "
2224 << PrettyDuration(duration) << saved_str;
2227 void Heap::ChangeCollector(CollectorType collector_type) {
2228 // TODO: Only do this with all mutators suspended to avoid races.
2229 if (collector_type != collector_type_) {
2230 if (collector_type == kCollectorTypeMC) {
2231 // Don't allow mark compact unless support is compiled in.
2232 CHECK(kMarkCompactSupport);
2234 collector_type_ = collector_type;
2236 switch (collector_type_) {
2237 case kCollectorTypeCC: {
2238 gc_plan_.push_back(collector::kGcTypeFull);
2240 ChangeAllocator(kAllocatorTypeRegionTLAB);
2242 ChangeAllocator(kAllocatorTypeRegion);
2246 case kCollectorTypeMC: // Fall-through.
2247 case kCollectorTypeSS: // Fall-through.
2248 case kCollectorTypeGSS: {
2249 gc_plan_.push_back(collector::kGcTypeFull);
2251 ChangeAllocator(kAllocatorTypeTLAB);
2253 ChangeAllocator(kAllocatorTypeBumpPointer);
2257 case kCollectorTypeMS: {
2258 gc_plan_.push_back(collector::kGcTypeSticky);
2259 gc_plan_.push_back(collector::kGcTypePartial);
2260 gc_plan_.push_back(collector::kGcTypeFull);
2261 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2264 case kCollectorTypeCMS: {
2265 gc_plan_.push_back(collector::kGcTypeSticky);
2266 gc_plan_.push_back(collector::kGcTypePartial);
2267 gc_plan_.push_back(collector::kGcTypeFull);
2268 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2272 UNIMPLEMENTED(FATAL);
2276 if (IsGcConcurrent()) {
2277 concurrent_start_bytes_ =
2278 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
2280 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2285 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2286 class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
2288 ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
2289 : SemiSpace(heap, false, "zygote collector"),
2290 bin_live_bitmap_(nullptr),
2291 bin_mark_bitmap_(nullptr),
2292 is_running_on_memory_tool_(is_running_on_memory_tool) {}
2294 void BuildBins(space::ContinuousSpace* space) {
2295 bin_live_bitmap_ = space->GetLiveBitmap();
2296 bin_mark_bitmap_ = space->GetMarkBitmap();
2298 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
2299 context.collector_ = this;
2300 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2301 // Note: This requires traversing the space in increasing order of object addresses.
2302 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
2303 // Add the last bin which spans after the last object to the end of the space.
2304 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
2309 uintptr_t prev_; // The end of the previous object.
2310 ZygoteCompactingCollector* collector_;
2312 // Maps from bin sizes to locations.
2313 std::multimap<size_t, uintptr_t> bins_;
2314 // Live bitmap of the space which contains the bins.
2315 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2316 // Mark bitmap of the space which contains the bins.
2317 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2318 const bool is_running_on_memory_tool_;
2320 static void Callback(mirror::Object* obj, void* arg)
2321 SHARED_REQUIRES(Locks::mutator_lock_) {
2322 DCHECK(arg != nullptr);
2323 BinContext* context = reinterpret_cast<BinContext*>(arg);
2324 ZygoteCompactingCollector* collector = context->collector_;
2325 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2326 size_t bin_size = object_addr - context->prev_;
2327 // Add the bin consisting of the end of the previous object to the start of the current object.
2328 collector->AddBin(bin_size, context->prev_);
2329 context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
2332 void AddBin(size_t size, uintptr_t position) {
2333 if (is_running_on_memory_tool_) {
2334 MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2337 bins_.insert(std::make_pair(size, position));
2341 virtual bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const {
2342 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2347 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
2348 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2349 size_t obj_size = obj->SizeOf();
2350 size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2351 mirror::Object* forward_address;
2352 // Find the smallest bin which we can move obj in.
2353 auto it = bins_.lower_bound(alloc_size);
2354 if (it == bins_.end()) {
2355 // No available space in the bins, place it in the target space instead (grows the zygote
2357 size_t bytes_allocated, dummy;
2358 forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
2359 if (to_space_live_bitmap_ != nullptr) {
2360 to_space_live_bitmap_->Set(forward_address);
2362 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2363 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2366 size_t size = it->first;
2367 uintptr_t pos = it->second;
2368 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
2369 forward_address = reinterpret_cast<mirror::Object*>(pos);
2370 // Set the live and mark bits so that sweeping system weaks works properly.
2371 bin_live_bitmap_->Set(forward_address);
2372 bin_mark_bitmap_->Set(forward_address);
2373 DCHECK_GE(size, alloc_size);
2374 // Add a new bin with the remaining space.
2375 AddBin(size - alloc_size, pos + alloc_size);
2377 // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
2378 memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2379 if (kUseBakerOrBrooksReadBarrier) {
2380 obj->AssertReadBarrierPointer();
2381 if (kUseBrooksReadBarrier) {
2382 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
2383 forward_address->SetReadBarrierPointer(forward_address);
2385 forward_address->AssertReadBarrierPointer();
2387 return forward_address;
2391 void Heap::UnBindBitmaps() {
2392 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2393 for (const auto& space : GetContinuousSpaces()) {
2394 if (space->IsContinuousMemMapAllocSpace()) {
2395 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2396 if (alloc_space->HasBoundBitmaps()) {
2397 alloc_space->UnBindBitmaps();
2403 void Heap::PreZygoteFork() {
2404 if (!HasZygoteSpace()) {
2405 // We still want to GC in case there is some unreachable non moving objects that could cause a
2406 // suboptimal bin packing when we compact the zygote space.
2407 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
2408 // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2409 // the trim process may require locking the mutator lock.
2410 non_moving_space_->Trim();
2412 Thread* self = Thread::Current();
2413 MutexLock mu(self, zygote_creation_lock_);
2414 // Try to see if we have any Zygote spaces.
2415 if (HasZygoteSpace()) {
2418 Runtime::Current()->GetInternTable()->AddNewTable();
2419 Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
2420 VLOG(heap) << "Starting PreZygoteFork";
2421 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2423 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2424 const bool same_space = non_moving_space_ == main_space_;
2425 if (kCompactZygote) {
2426 // Temporarily disable rosalloc verification because the zygote
2427 // compaction will mess up the rosalloc internal metadata.
2428 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2429 ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
2430 zygote_collector.BuildBins(non_moving_space_);
2431 // Create a new bump pointer space which we will compact into.
2432 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2433 non_moving_space_->Limit());
2434 // Compact the bump pointer space to a new zygote bump pointer space.
2435 bool reset_main_space = false;
2436 if (IsMovingGc(collector_type_)) {
2437 if (collector_type_ == kCollectorTypeCC) {
2438 zygote_collector.SetFromSpace(region_space_);
2440 zygote_collector.SetFromSpace(bump_pointer_space_);
2443 CHECK(main_space_ != nullptr);
2444 CHECK_NE(main_space_, non_moving_space_)
2445 << "Does not make sense to compact within the same space";
2446 // Copy from the main space.
2447 zygote_collector.SetFromSpace(main_space_);
2448 reset_main_space = true;
2450 zygote_collector.SetToSpace(&target_space);
2451 zygote_collector.SetSwapSemiSpaces(false);
2452 zygote_collector.Run(kGcCauseCollectorTransition, false);
2453 if (reset_main_space) {
2454 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2455 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2456 MemMap* mem_map = main_space_->ReleaseMemMap();
2457 RemoveSpace(main_space_);
2458 space::Space* old_main_space = main_space_;
2459 CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
2461 delete old_main_space;
2462 AddSpace(main_space_);
2464 if (collector_type_ == kCollectorTypeCC) {
2465 region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2467 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2470 if (temp_space_ != nullptr) {
2471 CHECK(temp_space_->IsEmpty());
2473 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2474 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2475 // Update the end and write out image.
2476 non_moving_space_->SetEnd(target_space.End());
2477 non_moving_space_->SetLimit(target_space.Limit());
2478 VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2480 // Change the collector to the post zygote one.
2481 ChangeCollector(foreground_collector_type_);
2482 // Save the old space so that we can remove it after we complete creating the zygote space.
2483 space::MallocSpace* old_alloc_space = non_moving_space_;
2484 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2485 // the remaining available space.
2486 // Remove the old space before creating the zygote space since creating the zygote space sets
2487 // the old alloc space's bitmaps to null.
2488 RemoveSpace(old_alloc_space);
2489 if (collector::SemiSpace::kUseRememberedSet) {
2490 // Sanity bound check.
2491 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2492 // Remove the remembered set for the now zygote space (the old
2493 // non-moving space). Note now that we have compacted objects into
2494 // the zygote space, the data in the remembered set is no longer
2495 // needed. The zygote space will instead have a mod-union table
2496 // from this point on.
2497 RemoveRememberedSet(old_alloc_space);
2499 // Remaining space becomes the new non moving space.
2500 zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2501 &non_moving_space_);
2502 CHECK(!non_moving_space_->CanMoveObjects());
2504 main_space_ = non_moving_space_;
2505 SetSpaceAsDefault(main_space_);
2507 delete old_alloc_space;
2508 CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2509 AddSpace(zygote_space_);
2510 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2511 AddSpace(non_moving_space_);
2512 // Create the zygote space mod union table.
2513 accounting::ModUnionTable* mod_union_table =
2514 new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
2516 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2517 // Set all the cards in the mod-union table since we don't know which objects contain references
2518 // to large objects.
2519 mod_union_table->SetCards();
2520 AddModUnionTable(mod_union_table);
2521 large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
2522 if (collector::SemiSpace::kUseRememberedSet) {
2523 // Add a new remembered set for the post-zygote non-moving space.
2524 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2525 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2527 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2528 << "Failed to create post-zygote non-moving space remembered set";
2529 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2533 void Heap::FlushAllocStack() {
2534 MarkAllocStackAsLive(allocation_stack_.get());
2535 allocation_stack_->Reset();
2538 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2539 accounting::ContinuousSpaceBitmap* bitmap2,
2540 accounting::LargeObjectBitmap* large_objects,
2541 accounting::ObjectStack* stack) {
2542 DCHECK(bitmap1 != nullptr);
2543 DCHECK(bitmap2 != nullptr);
2544 const auto* limit = stack->End();
2545 for (auto* it = stack->Begin(); it != limit; ++it) {
2546 const mirror::Object* obj = it->AsMirrorPtr();
2547 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2548 if (bitmap1->HasAddress(obj)) {
2550 } else if (bitmap2->HasAddress(obj)) {
2553 DCHECK(large_objects != nullptr);
2554 large_objects->Set(obj);
2560 void Heap::SwapSemiSpaces() {
2561 CHECK(bump_pointer_space_ != nullptr);
2562 CHECK(temp_space_ != nullptr);
2563 std::swap(bump_pointer_space_, temp_space_);
2566 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2567 space::ContinuousMemMapAllocSpace* source_space,
2569 CHECK(kMovingCollector);
2570 if (target_space != source_space) {
2571 // Don't swap spaces since this isn't a typical semi space collection.
2572 semi_space_collector_->SetSwapSemiSpaces(false);
2573 semi_space_collector_->SetFromSpace(source_space);
2574 semi_space_collector_->SetToSpace(target_space);
2575 semi_space_collector_->Run(gc_cause, false);
2576 return semi_space_collector_;
2578 CHECK(target_space->IsBumpPointerSpace())
2579 << "In-place compaction is only supported for bump pointer spaces";
2580 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2581 mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
2582 return mark_compact_collector_;
2586 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2588 bool clear_soft_references) {
2589 Thread* self = Thread::Current();
2590 Runtime* runtime = Runtime::Current();
2591 // If the heap can't run the GC, silently fail and return that no GC was run.
2593 case collector::kGcTypePartial: {
2594 if (!HasZygoteSpace()) {
2595 return collector::kGcTypeNone;
2600 // Other GC types don't have any special cases which makes them not runnable. The main case
2604 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2605 Locks::mutator_lock_->AssertNotHeld(self);
2606 if (self->IsHandlingStackOverflow()) {
2607 // If we are throwing a stack overflow error we probably don't have enough remaining stack
2608 // space to run the GC.
2609 return collector::kGcTypeNone;
2613 gc_complete_lock_->AssertNotHeld(self);
2614 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2615 MutexLock mu(self, *gc_complete_lock_);
2616 // Ensure there is only one GC at a time.
2617 WaitForGcToCompleteLocked(gc_cause, self);
2618 compacting_gc = IsMovingGc(collector_type_);
2619 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2620 if (compacting_gc && disable_moving_gc_count_ != 0) {
2621 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2622 return collector::kGcTypeNone;
2624 if (gc_disabled_for_shutdown_) {
2625 return collector::kGcTypeNone;
2627 collector_type_running_ = collector_type_;
2629 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2630 ++runtime->GetStats()->gc_for_alloc_count;
2631 ++self->GetStats()->gc_for_alloc_count;
2633 const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
2634 // Approximate heap size.
2635 ATRACE_INT("Heap size (KB)", bytes_allocated_before_gc / KB);
2637 DCHECK_LT(gc_type, collector::kGcTypeMax);
2638 DCHECK_NE(gc_type, collector::kGcTypeNone);
2640 collector::GarbageCollector* collector = nullptr;
2641 // TODO: Clean this up.
2642 if (compacting_gc) {
2643 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2644 current_allocator_ == kAllocatorTypeTLAB ||
2645 current_allocator_ == kAllocatorTypeRegion ||
2646 current_allocator_ == kAllocatorTypeRegionTLAB);
2647 switch (collector_type_) {
2648 case kCollectorTypeSS:
2650 case kCollectorTypeGSS:
2651 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2652 semi_space_collector_->SetToSpace(temp_space_);
2653 semi_space_collector_->SetSwapSemiSpaces(true);
2654 collector = semi_space_collector_;
2656 case kCollectorTypeCC:
2657 concurrent_copying_collector_->SetRegionSpace(region_space_);
2658 collector = concurrent_copying_collector_;
2660 case kCollectorTypeMC:
2661 mark_compact_collector_->SetSpace(bump_pointer_space_);
2662 collector = mark_compact_collector_;
2665 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2667 if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
2668 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2669 if (kIsDebugBuild) {
2670 // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
2671 temp_space_->GetMemMap()->TryReadable();
2673 CHECK(temp_space_->IsEmpty());
2675 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
2676 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2677 current_allocator_ == kAllocatorTypeDlMalloc) {
2678 collector = FindCollectorByGcType(gc_type);
2680 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2682 if (IsGcConcurrent()) {
2683 // Disable concurrent GC check so that we don't have spammy JNI requests.
2684 // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2685 // calculated in the same thread so that there aren't any races that can cause it to become
2686 // permanantly disabled. b/17942071
2687 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2690 if ((gc_type == collector::kGcTypeFull) && runtime->UseJit()) {
2691 // It's time to clear all inline caches, in case some classes can be unloaded.
2692 runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self);
2695 CHECK(collector != nullptr)
2696 << "Could not find garbage collector with collector_type="
2697 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2698 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2699 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2700 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2702 // Enqueue cleared references.
2703 reference_processor_->EnqueueClearedReferences(self);
2704 // Grow the heap so that we know when to perform the next GC.
2705 GrowForUtilization(collector, bytes_allocated_before_gc);
2706 LogGC(gc_cause, collector);
2707 FinishGC(self, gc_type);
2708 // Inform DDMS that a GC completed.
2710 // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2711 // deadlocks in case the JNI_OnUnload function does allocations.
2713 ScopedObjectAccess soa(self);
2714 soa.Vm()->UnloadNativeLibraries();
2719 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2720 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2721 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2722 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2723 // (mutator time blocked >= long_pause_log_threshold_).
2724 bool log_gc = gc_cause == kGcCauseExplicit;
2725 if (!log_gc && CareAboutPauseTimes()) {
2726 // GC for alloc pauses the allocating thread, so consider it as a pause.
2727 log_gc = duration > long_gc_log_threshold_ ||
2728 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2729 for (uint64_t pause : pause_times) {
2730 log_gc = log_gc || pause >= long_pause_log_threshold_;
2734 const size_t percent_free = GetPercentFree();
2735 const size_t current_heap_size = GetBytesAllocated();
2736 const size_t total_memory = GetTotalMemory();
2737 std::ostringstream pause_string;
2738 for (size_t i = 0; i < pause_times.size(); ++i) {
2739 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2740 << ((i != pause_times.size() - 1) ? "," : "");
2742 LOG(INFO) << gc_cause << " " << collector->GetName()
2743 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2744 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2745 << current_gc_iteration_.GetFreedLargeObjects() << "("
2746 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2747 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2748 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2749 << " total " << PrettyDuration((duration / 1000) * 1000);
2750 VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2754 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2755 MutexLock mu(self, *gc_complete_lock_);
2756 collector_type_running_ = kCollectorTypeNone;
2757 if (gc_type != collector::kGcTypeNone) {
2758 last_gc_type_ = gc_type;
2761 ++gc_count_last_window_;
2762 if (running_collection_is_blocking_) {
2763 // If the currently running collection was a blocking one,
2764 // increment the counters and reset the flag.
2765 ++blocking_gc_count_;
2766 blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2767 ++blocking_gc_count_last_window_;
2769 // Update the gc count rate histograms if due.
2770 UpdateGcCountRateHistograms();
2773 running_collection_is_blocking_ = false;
2774 // Wake anyone who may have been waiting for the GC to complete.
2775 gc_complete_cond_->Broadcast(self);
2778 void Heap::UpdateGcCountRateHistograms() {
2779 // Invariant: if the time since the last update includes more than
2780 // one windows, all the GC runs (if > 0) must have happened in first
2781 // window because otherwise the update must have already taken place
2782 // at an earlier GC run. So, we report the non-first windows with
2783 // zero counts to the histograms.
2784 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2785 uint64_t now = NanoTime();
2786 DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2787 uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2788 uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2789 if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2790 // Record the first window.
2791 gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
2792 blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2793 blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2794 // Record the other windows (with zero counts).
2795 for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2796 gc_count_rate_histogram_.AddValue(0);
2797 blocking_gc_count_rate_histogram_.AddValue(0);
2799 // Update the last update time and reset the counters.
2800 last_update_time_gc_count_rate_histograms_ =
2801 (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2802 gc_count_last_window_ = 1; // Include the current run.
2803 blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2805 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2808 class RootMatchesObjectVisitor : public SingleRootVisitor {
2810 explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2812 void VisitRoot(mirror::Object* root, const RootInfo& info)
2813 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2815 LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2820 const mirror::Object* const obj_;
2826 void operator()(const mirror::Object* obj) const {
2827 LOG(ERROR) << "Would have rescanned object " << obj;
2831 // Verify a reference from an object.
2832 class VerifyReferenceVisitor : public SingleRootVisitor {
2834 VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2835 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
2836 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2838 size_t GetFailureCount() const {
2839 return fail_count_->LoadSequentiallyConsistent();
2842 void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
2843 SHARED_REQUIRES(Locks::mutator_lock_) {
2844 if (verify_referent_) {
2845 VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
2849 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
2850 SHARED_REQUIRES(Locks::mutator_lock_) {
2851 VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
2854 bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
2855 return heap_->IsLiveObjectLocked(obj, true, false, true);
2858 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
2859 SHARED_REQUIRES(Locks::mutator_lock_) {
2860 if (!root->IsNull()) {
2864 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
2865 SHARED_REQUIRES(Locks::mutator_lock_) {
2866 const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2867 root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2870 virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
2871 SHARED_REQUIRES(Locks::mutator_lock_) {
2872 if (root == nullptr) {
2873 LOG(ERROR) << "Root is null with info " << root_info.GetType();
2874 } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
2875 LOG(ERROR) << "Root " << root << " is dead with type " << PrettyTypeOf(root)
2876 << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
2881 // TODO: Fix the no thread safety analysis.
2882 // Returns false on failure.
2883 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2884 NO_THREAD_SAFETY_ANALYSIS {
2885 if (ref == nullptr || IsLive(ref)) {
2886 // Verify that the reference is live.
2889 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
2890 // Print message on only on first failure to prevent spam.
2891 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2893 if (obj != nullptr) {
2894 // Only do this part for non roots.
2895 accounting::CardTable* card_table = heap_->GetCardTable();
2896 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2897 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2898 uint8_t* card_addr = card_table->CardFromAddr(obj);
2899 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2900 << offset << "\n card value = " << static_cast<int>(*card_addr);
2901 if (heap_->IsValidObjectAddress(obj->GetClass())) {
2902 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
2904 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2907 // Attempt to find the class inside of the recently freed objects.
2908 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2909 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2910 space::MallocSpace* space = ref_space->AsMallocSpace();
2911 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2912 if (ref_class != nullptr) {
2913 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2914 << PrettyClass(ref_class);
2916 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2920 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2921 ref->GetClass()->IsClass()) {
2922 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
2924 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2925 << ") is not a valid heap address";
2928 card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
2929 void* cover_begin = card_table->AddrFromCard(card_addr);
2930 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2931 accounting::CardTable::kCardSize);
2932 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2933 << "-" << cover_end;
2934 accounting::ContinuousSpaceBitmap* bitmap =
2935 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2937 if (bitmap == nullptr) {
2938 LOG(ERROR) << "Object " << obj << " has no bitmap";
2939 if (!VerifyClassClass(obj->GetClass())) {
2940 LOG(ERROR) << "Object " << obj << " failed class verification!";
2943 // Print out how the object is live.
2944 if (bitmap->Test(obj)) {
2945 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2947 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
2948 LOG(ERROR) << "Object " << obj << " found in allocation stack";
2950 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
2951 LOG(ERROR) << "Object " << obj << " found in live stack";
2953 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2954 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2956 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2957 LOG(ERROR) << "Ref " << ref << " found in live stack";
2959 // Attempt to see if the card table missed the reference.
2960 ScanVisitor scan_visitor;
2961 uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
2962 card_table->Scan<false>(bitmap, byte_cover_begin,
2963 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
2966 // Search to see if any of the roots reference our object.
2967 RootMatchesObjectVisitor visitor1(obj);
2968 Runtime::Current()->VisitRoots(&visitor1);
2969 // Search to see if any of the roots reference our reference.
2970 RootMatchesObjectVisitor visitor2(ref);
2971 Runtime::Current()->VisitRoots(&visitor2);
2977 Atomic<size_t>* const fail_count_;
2978 const bool verify_referent_;
2981 // Verify all references within an object, for use with HeapBitmap::Visit.
2982 class VerifyObjectVisitor {
2984 VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2985 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2987 void operator()(mirror::Object* obj)
2988 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2989 // Note: we are verifying the references in obj but not obj itself, this is because obj must
2990 // be live or else how did we find it in the live bitmap?
2991 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
2992 // The class doesn't count as a reference but we should verify it anyways.
2993 obj->VisitReferences(visitor, visitor);
2996 static void VisitCallback(mirror::Object* obj, void* arg)
2997 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2998 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
2999 visitor->operator()(obj);
3002 void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
3003 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
3004 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
3005 Runtime::Current()->VisitRoots(&visitor);
3008 size_t GetFailureCount() const {
3009 return fail_count_->LoadSequentiallyConsistent();
3014 Atomic<size_t>* const fail_count_;
3015 const bool verify_referent_;
3018 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
3019 // Slow path, the allocation stack push back must have already failed.
3020 DCHECK(!allocation_stack_->AtomicPushBack(*obj));
3022 // TODO: Add handle VerifyObject.
3023 StackHandleScope<1> hs(self);
3024 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3025 // Push our object into the reserve region of the allocaiton stack. This is only required due
3026 // to heap verification requiring that roots are live (either in the live bitmap or in the
3027 // allocation stack).
3028 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
3029 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3030 } while (!allocation_stack_->AtomicPushBack(*obj));
3033 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
3034 // Slow path, the allocation stack push back must have already failed.
3035 DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
3036 StackReference<mirror::Object>* start_address;
3037 StackReference<mirror::Object>* end_address;
3038 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
3040 // TODO: Add handle VerifyObject.
3041 StackHandleScope<1> hs(self);
3042 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3043 // Push our object into the reserve region of the allocaiton stack. This is only required due
3044 // to heap verification requiring that roots are live (either in the live bitmap or in the
3045 // allocation stack).
3046 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
3047 // Push into the reserve allocation stack.
3048 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3050 self->SetThreadLocalAllocationStack(start_address, end_address);
3051 // Retry on the new thread-local allocation stack.
3052 CHECK(self->PushOnThreadLocalAllocationStack(*obj)); // Must succeed.
3055 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
3056 size_t Heap::VerifyHeapReferences(bool verify_referents) {
3057 Thread* self = Thread::Current();
3058 Locks::mutator_lock_->AssertExclusiveHeld(self);
3059 // Lets sort our allocation stacks so that we can efficiently binary search them.
3060 allocation_stack_->Sort();
3061 live_stack_->Sort();
3062 // Since we sorted the allocation stack content, need to revoke all
3063 // thread-local allocation stacks.
3064 RevokeAllThreadLocalAllocationStacks(self);
3065 Atomic<size_t> fail_count_(0);
3066 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
3067 // Verify objects in the allocation stack since these will be objects which were:
3068 // 1. Allocated prior to the GC (pre GC verification).
3069 // 2. Allocated during the GC (pre sweep GC verification).
3070 // We don't want to verify the objects in the live stack since they themselves may be
3071 // pointing to dead objects if they are not reachable.
3072 VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
3073 // Verify the roots:
3074 visitor.VerifyRoots();
3075 if (visitor.GetFailureCount() > 0) {
3076 // Dump mod-union tables.
3077 for (const auto& table_pair : mod_union_tables_) {
3078 accounting::ModUnionTable* mod_union_table = table_pair.second;
3079 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
3081 // Dump remembered sets.
3082 for (const auto& table_pair : remembered_sets_) {
3083 accounting::RememberedSet* remembered_set = table_pair.second;
3084 remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
3086 DumpSpaces(LOG(ERROR));
3088 return visitor.GetFailureCount();
3091 class VerifyReferenceCardVisitor {
3093 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
3094 SHARED_REQUIRES(Locks::mutator_lock_,
3095 Locks::heap_bitmap_lock_)
3096 : heap_(heap), failed_(failed) {
3099 // There is no card marks for native roots on a class.
3100 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
3102 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
3104 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3105 // annotalysis on visitors.
3106 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3107 NO_THREAD_SAFETY_ANALYSIS {
3108 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
3109 // Filter out class references since changing an object's class does not mark the card as dirty.
3110 // Also handles large objects, since the only reference they hold is a class reference.
3111 if (ref != nullptr && !ref->IsClass()) {
3112 accounting::CardTable* card_table = heap_->GetCardTable();
3113 // If the object is not dirty and it is referencing something in the live stack other than
3114 // class, then it must be on a dirty card.
3115 if (!card_table->AddrIsInCardTable(obj)) {
3116 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3118 } else if (!card_table->IsDirty(obj)) {
3119 // TODO: Check mod-union tables.
3120 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3121 // kCardDirty - 1 if it didnt get touched since we aged it.
3122 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3123 if (live_stack->ContainsSorted(ref)) {
3124 if (live_stack->ContainsSorted(obj)) {
3125 LOG(ERROR) << "Object " << obj << " found in live stack";
3127 if (heap_->GetLiveBitmap()->Test(obj)) {
3128 LOG(ERROR) << "Object " << obj << " found in live bitmap";
3130 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
3131 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
3133 // Print which field of the object is dead.
3134 if (!obj->IsObjectArray()) {
3135 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
3136 CHECK(klass != nullptr);
3137 for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
3138 if (field.GetOffset().Int32Value() == offset.Int32Value()) {
3139 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
3140 << PrettyField(&field);
3145 mirror::ObjectArray<mirror::Object>* object_array =
3146 obj->AsObjectArray<mirror::Object>();
3147 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3148 if (object_array->Get(i) == ref) {
3149 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3162 bool* const failed_;
3165 class VerifyLiveStackReferences {
3167 explicit VerifyLiveStackReferences(Heap* heap)
3171 void operator()(mirror::Object* obj) const
3172 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3173 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
3174 obj->VisitReferences(visitor, VoidFunctor());
3177 bool Failed() const {
3186 bool Heap::VerifyMissingCardMarks() {
3187 Thread* self = Thread::Current();
3188 Locks::mutator_lock_->AssertExclusiveHeld(self);
3189 // We need to sort the live stack since we binary search it.
3190 live_stack_->Sort();
3191 // Since we sorted the allocation stack content, need to revoke all
3192 // thread-local allocation stacks.
3193 RevokeAllThreadLocalAllocationStacks(self);
3194 VerifyLiveStackReferences visitor(this);
3195 GetLiveBitmap()->Visit(visitor);
3196 // We can verify objects in the live stack since none of these should reference dead objects.
3197 for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3198 if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3199 visitor(it->AsMirrorPtr());
3202 return !visitor.Failed();
3205 void Heap::SwapStacks() {
3206 if (kUseThreadLocalAllocationStack) {
3207 live_stack_->AssertAllZero();
3209 allocation_stack_.swap(live_stack_);
3212 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
3213 // This must be called only during the pause.
3214 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
3215 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3216 MutexLock mu2(self, *Locks::thread_list_lock_);
3217 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3218 for (Thread* t : thread_list) {
3219 t->RevokeThreadLocalAllocationStack();
3223 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3224 if (kIsDebugBuild) {
3225 if (rosalloc_space_ != nullptr) {
3226 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3228 if (bump_pointer_space_ != nullptr) {
3229 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3234 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3235 if (kIsDebugBuild) {
3236 if (bump_pointer_space_ != nullptr) {
3237 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3242 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3243 auto it = mod_union_tables_.find(space);
3244 if (it == mod_union_tables_.end()) {
3250 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3251 auto it = remembered_sets_.find(space);
3252 if (it == remembered_sets_.end()) {
3258 void Heap::ProcessCards(TimingLogger* timings,
3260 bool process_alloc_space_cards,
3261 bool clear_alloc_space_cards) {
3262 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3263 // Clear cards and keep track of cards cleared in the mod-union table.
3264 for (const auto& space : continuous_spaces_) {
3265 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3266 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3267 if (table != nullptr) {
3268 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3269 "ImageModUnionClearCards";
3270 TimingLogger::ScopedTiming t2(name, timings);
3271 table->ClearCards();
3272 } else if (use_rem_sets && rem_set != nullptr) {
3273 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
3274 << static_cast<int>(collector_type_);
3275 TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3276 rem_set->ClearCards();
3277 } else if (process_alloc_space_cards) {
3278 TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3279 if (clear_alloc_space_cards) {
3280 uint8_t* end = space->End();
3281 if (space->IsImageSpace()) {
3282 // Image space end is the end of the mirror objects, it is not necessarily page or card
3283 // aligned. Align up so that the check in ClearCardRange does not fail.
3284 end = AlignUp(end, accounting::CardTable::kCardSize);
3286 card_table_->ClearCardRange(space->Begin(), end);
3288 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3289 // cards were dirty before the GC started.
3290 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3291 // -> clean(cleaning thread).
3292 // The races are we either end up with: Aged card, unaged card. Since we have the
3293 // checkpoint roots and then we scan / update mod union tables after. We will always
3294 // scan either card. If we end up with the non aged card, we scan it it in the pause.
3295 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3302 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
3303 virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
3306 virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*) OVERRIDE {
3310 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3311 Thread* const self = Thread::Current();
3312 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3313 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3314 if (verify_pre_gc_heap_) {
3315 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3316 size_t failures = VerifyHeapReferences();
3318 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3322 // Check that all objects which reference things in the live stack are on dirty cards.
3323 if (verify_missing_card_marks_) {
3324 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3325 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3327 // Sort the live stack so that we can quickly binary search it later.
3328 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3329 << " missing card mark verification failed\n" << DumpSpaces();
3332 if (verify_mod_union_table_) {
3333 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3334 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3335 for (const auto& table_pair : mod_union_tables_) {
3336 accounting::ModUnionTable* mod_union_table = table_pair.second;
3337 IdentityMarkHeapReferenceVisitor visitor;
3338 mod_union_table->UpdateAndMarkReferences(&visitor);
3339 mod_union_table->Verify();
3344 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3345 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3346 collector::GarbageCollector::ScopedPause pause(gc);
3347 PreGcVerificationPaused(gc);
3351 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
3352 // TODO: Add a new runtime option for this?
3353 if (verify_pre_gc_rosalloc_) {
3354 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3358 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3359 Thread* const self = Thread::Current();
3360 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3361 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3362 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3363 // reachable objects.
3364 if (verify_pre_sweeping_heap_) {
3365 TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3366 CHECK_NE(self->GetState(), kRunnable);
3368 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3369 // Swapping bound bitmaps does nothing.
3372 // Pass in false since concurrent reference processing can mean that the reference referents
3373 // may point to dead objects at the point which PreSweepingGcVerification is called.
3374 size_t failures = VerifyHeapReferences(false);
3376 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3380 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3384 if (verify_pre_sweeping_rosalloc_) {
3385 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3389 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3390 // Only pause if we have to do some verification.
3391 Thread* const self = Thread::Current();
3392 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3393 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3394 if (verify_system_weaks_) {
3395 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3396 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3397 mark_sweep->VerifySystemWeaks();
3399 if (verify_post_gc_rosalloc_) {
3400 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3402 if (verify_post_gc_heap_) {
3403 TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3404 size_t failures = VerifyHeapReferences();
3406 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3412 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3413 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3414 collector::GarbageCollector::ScopedPause pause(gc);
3415 PostGcVerificationPaused(gc);
3419 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3420 TimingLogger::ScopedTiming t(name, timings);
3421 for (const auto& space : continuous_spaces_) {
3422 if (space->IsRosAllocSpace()) {
3423 VLOG(heap) << name << " : " << space->GetName();
3424 space->AsRosAllocSpace()->Verify();
3429 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3430 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
3431 MutexLock mu(self, *gc_complete_lock_);
3432 return WaitForGcToCompleteLocked(cause, self);
3435 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
3436 collector::GcType last_gc_type = collector::kGcTypeNone;
3437 uint64_t wait_start = NanoTime();
3438 while (collector_type_running_ != kCollectorTypeNone) {
3439 if (self != task_processor_->GetRunningThread()) {
3440 // The current thread is about to wait for a currently running
3441 // collection to finish. If the waiting thread is not the heap
3442 // task daemon thread, the currently running collection is
3443 // considered as a blocking GC.
3444 running_collection_is_blocking_ = true;
3445 VLOG(gc) << "Waiting for a blocking GC " << cause;
3447 ScopedTrace trace("GC: Wait For Completion");
3448 // We must wait, change thread state then sleep on gc_complete_cond_;
3449 gc_complete_cond_->Wait(self);
3450 last_gc_type = last_gc_type_;
3452 uint64_t wait_time = NanoTime() - wait_start;
3453 total_wait_time_ += wait_time;
3454 if (wait_time > long_pause_log_threshold_) {
3455 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
3456 << " for cause " << cause;
3458 if (self != task_processor_->GetRunningThread()) {
3459 // The current thread is about to run a collection. If the thread
3460 // is not the heap task daemon thread, it's considered as a
3461 // blocking GC (i.e., blocking itself).
3462 running_collection_is_blocking_ = true;
3463 VLOG(gc) << "Starting a blocking GC " << cause;
3465 return last_gc_type;
3468 void Heap::DumpForSigQuit(std::ostream& os) {
3469 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3470 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
3471 DumpGcPerformanceInfo(os);
3474 size_t Heap::GetPercentFree() {
3475 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
3478 void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
3479 if (max_allowed_footprint > GetMaxMemory()) {
3480 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
3481 << PrettySize(GetMaxMemory());
3482 max_allowed_footprint = GetMaxMemory();
3484 max_allowed_footprint_ = max_allowed_footprint;
3487 bool Heap::IsMovableObject(const mirror::Object* obj) const {
3488 if (kMovingCollector) {
3489 space::Space* space = FindContinuousSpaceFromObject(obj, true);
3490 if (space != nullptr) {
3491 // TODO: Check large object?
3492 return space->CanMoveObjects();
3498 void Heap::UpdateMaxNativeFootprint() {
3499 size_t native_size = native_bytes_allocated_.LoadRelaxed();
3500 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
3501 size_t target_size = native_size / GetTargetHeapUtilization();
3502 if (target_size > native_size + max_free_) {
3503 target_size = native_size + max_free_;
3504 } else if (target_size < native_size + min_free_) {
3505 target_size = native_size + min_free_;
3507 native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
3510 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3511 for (const auto& collector : garbage_collectors_) {
3512 if (collector->GetCollectorType() == collector_type_ &&
3513 collector->GetGcType() == gc_type) {
3520 double Heap::HeapGrowthMultiplier() const {
3521 // If we don't care about pause times we are background, so return 1.0.
3522 if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
3525 return foreground_heap_growth_multiplier_;
3528 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3529 uint64_t bytes_allocated_before_gc) {
3530 // We know what our utilization is at this moment.
3531 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3532 const uint64_t bytes_allocated = GetBytesAllocated();
3533 uint64_t target_size;
3534 collector::GcType gc_type = collector_ran->GetGcType();
3535 const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
3537 const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
3538 const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
3539 if (gc_type != collector::kGcTypeSticky) {
3540 // Grow the heap for non sticky GC.
3541 ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
3543 target_size = bytes_allocated + delta * multiplier;
3544 target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
3545 target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
3546 native_need_to_run_finalization_ = true;
3547 next_gc_type_ = collector::kGcTypeSticky;
3549 collector::GcType non_sticky_gc_type =
3550 HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
3551 // Find what the next non sticky collector will be.
3552 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3553 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3554 // do another sticky collection next.
3555 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
3556 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3557 // if the sticky GC throughput always remained >= the full/partial throughput.
3558 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
3559 non_sticky_collector->GetEstimatedMeanThroughput() &&
3560 non_sticky_collector->NumberOfIterations() > 0 &&
3561 bytes_allocated <= max_allowed_footprint_) {
3562 next_gc_type_ = collector::kGcTypeSticky;
3564 next_gc_type_ = non_sticky_gc_type;
3566 // If we have freed enough memory, shrink the heap back down.
3567 if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
3568 target_size = bytes_allocated + adjusted_max_free;
3570 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
3573 if (!ignore_max_footprint_) {
3574 SetIdealFootprint(target_size);
3575 if (IsGcConcurrent()) {
3576 const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3577 current_gc_iteration_.GetFreedLargeObjectBytes() +
3578 current_gc_iteration_.GetFreedRevokeBytes();
3579 // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3580 // how many bytes were allocated during the GC we need to add freed_bytes back on.
3581 CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3582 const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
3583 bytes_allocated_before_gc;
3584 // Calculate when to perform the next ConcurrentGC.
3585 // Calculate the estimated GC duration.
3586 const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
3587 // Estimate how many remaining bytes we will have when we need to start the next GC.
3588 size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds;
3589 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3590 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3591 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
3592 // A never going to happen situation that from the estimated allocation rate we will exceed
3593 // the applications entire footprint with the given estimated allocation rate. Schedule
3594 // another GC nearly straight away.
3595 remaining_bytes = kMinConcurrentRemainingBytes;
3597 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
3598 DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
3599 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3600 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3602 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
3603 static_cast<size_t>(bytes_allocated));
3608 void Heap::ClampGrowthLimit() {
3609 // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3610 ScopedObjectAccess soa(Thread::Current());
3611 WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
3612 capacity_ = growth_limit_;
3613 for (const auto& space : continuous_spaces_) {
3614 if (space->IsMallocSpace()) {
3615 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3616 malloc_space->ClampGrowthLimit();
3619 // This space isn't added for performance reasons.
3620 if (main_space_backup_.get() != nullptr) {
3621 main_space_backup_->ClampGrowthLimit();
3625 void Heap::ClearGrowthLimit() {
3626 growth_limit_ = capacity_;
3627 ScopedObjectAccess soa(Thread::Current());
3628 for (const auto& space : continuous_spaces_) {
3629 if (space->IsMallocSpace()) {
3630 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3631 malloc_space->ClearGrowthLimit();
3632 malloc_space->SetFootprintLimit(malloc_space->Capacity());
3635 // This space isn't added for performance reasons.
3636 if (main_space_backup_.get() != nullptr) {
3637 main_space_backup_->ClearGrowthLimit();
3638 main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3642 void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
3643 ScopedObjectAccess soa(self);
3644 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
3646 args[0].l = arg.get();
3647 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
3648 // Restore object in case it gets moved.
3649 *object = soa.Decode<mirror::Object*>(arg.get());
3652 void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) {
3653 StackHandleScope<1> hs(self);
3654 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3655 RequestConcurrentGC(self, force_full);
3658 class Heap::ConcurrentGCTask : public HeapTask {
3660 ConcurrentGCTask(uint64_t target_time, bool force_full)
3661 : HeapTask(target_time), force_full_(force_full) { }
3662 virtual void Run(Thread* self) OVERRIDE {
3663 gc::Heap* heap = Runtime::Current()->GetHeap();
3664 heap->ConcurrentGC(self, force_full_);
3665 heap->ClearConcurrentGCRequest();
3669 const bool force_full_; // If true, force full (or partial) collection.
3672 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
3673 Runtime* runtime = Runtime::Current();
3674 return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3675 !self->IsHandlingStackOverflow();
3678 void Heap::ClearConcurrentGCRequest() {
3679 concurrent_gc_pending_.StoreRelaxed(false);
3682 void Heap::RequestConcurrentGC(Thread* self, bool force_full) {
3683 if (CanAddHeapTask(self) &&
3684 concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
3685 task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
3690 void Heap::ConcurrentGC(Thread* self, bool force_full) {
3691 if (!Runtime::Current()->IsShuttingDown(self)) {
3692 // Wait for any GCs currently running to finish.
3693 if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
3694 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
3695 // instead. E.g. can't do partial, so do full instead.
3696 collector::GcType next_gc_type = next_gc_type_;
3697 // If forcing full and next gc type is sticky, override with a non-sticky type.
3698 if (force_full && next_gc_type == collector::kGcTypeSticky) {
3699 next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
3701 if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
3702 collector::kGcTypeNone) {
3703 for (collector::GcType gc_type : gc_plan_) {
3704 // Attempt to run the collector, if we succeed, we are done.
3705 if (gc_type > next_gc_type &&
3706 CollectGarbageInternal(gc_type, kGcCauseBackground, false) !=
3707 collector::kGcTypeNone) {
3716 class Heap::CollectorTransitionTask : public HeapTask {
3718 explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
3720 virtual void Run(Thread* self) OVERRIDE {
3721 gc::Heap* heap = Runtime::Current()->GetHeap();
3722 heap->DoPendingCollectorTransition();
3723 heap->ClearPendingCollectorTransition(self);
3727 void Heap::ClearPendingCollectorTransition(Thread* self) {
3728 MutexLock mu(self, *pending_task_lock_);
3729 pending_collector_transition_ = nullptr;
3732 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3733 Thread* self = Thread::Current();
3734 desired_collector_type_ = desired_collector_type;
3735 if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3738 CollectorTransitionTask* added_task = nullptr;
3739 const uint64_t target_time = NanoTime() + delta_time;
3741 MutexLock mu(self, *pending_task_lock_);
3742 // If we have an existing collector transition, update the targe time to be the new target.
3743 if (pending_collector_transition_ != nullptr) {
3744 task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3747 added_task = new CollectorTransitionTask(target_time);
3748 pending_collector_transition_ = added_task;
3750 task_processor_->AddTask(self, added_task);
3753 class Heap::HeapTrimTask : public HeapTask {
3755 explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
3756 virtual void Run(Thread* self) OVERRIDE {
3757 gc::Heap* heap = Runtime::Current()->GetHeap();
3759 heap->ClearPendingTrim(self);
3763 void Heap::ClearPendingTrim(Thread* self) {
3764 MutexLock mu(self, *pending_task_lock_);
3765 pending_heap_trim_ = nullptr;
3768 void Heap::RequestTrim(Thread* self) {
3769 if (!CanAddHeapTask(self)) {
3772 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3773 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3774 // a space it will hold its lock and can become a cause of jank.
3775 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3778 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3779 // because that only marks object heads, so a large array looks like lots of empty space. We
3780 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3781 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3782 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3783 // not how much use we're making of those pages.
3784 HeapTrimTask* added_task = nullptr;
3786 MutexLock mu(self, *pending_task_lock_);
3787 if (pending_heap_trim_ != nullptr) {
3788 // Already have a heap trim request in task processor, ignore this request.
3791 added_task = new HeapTrimTask(kHeapTrimWait);
3792 pending_heap_trim_ = added_task;
3794 task_processor_->AddTask(self, added_task);
3797 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3798 if (rosalloc_space_ != nullptr) {
3799 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3800 if (freed_bytes_revoke > 0U) {
3801 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3802 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3805 if (bump_pointer_space_ != nullptr) {
3806 CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
3808 if (region_space_ != nullptr) {
3809 CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
3813 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3814 if (rosalloc_space_ != nullptr) {
3815 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3816 if (freed_bytes_revoke > 0U) {
3817 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3818 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3823 void Heap::RevokeAllThreadLocalBuffers() {
3824 if (rosalloc_space_ != nullptr) {
3825 size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3826 if (freed_bytes_revoke > 0U) {
3827 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3828 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3831 if (bump_pointer_space_ != nullptr) {
3832 CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
3834 if (region_space_ != nullptr) {
3835 CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
3839 bool Heap::IsGCRequestPending() const {
3840 return concurrent_gc_pending_.LoadRelaxed();
3843 void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3844 env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3845 WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3846 static_cast<jlong>(timeout));
3849 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
3850 Thread* self = ThreadForEnv(env);
3851 if (native_need_to_run_finalization_) {
3852 RunFinalization(env, kNativeAllocationFinalizeTimeout);
3853 UpdateMaxNativeFootprint();
3854 native_need_to_run_finalization_ = false;
3856 // Total number of native bytes allocated.
3857 size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
3858 new_native_bytes_allocated += bytes;
3859 if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
3860 collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
3861 collector::kGcTypeFull;
3863 // The second watermark is higher than the gc watermark. If you hit this it means you are
3864 // allocating native objects faster than the GC can keep up with.
3865 if (new_native_bytes_allocated > growth_limit_) {
3866 if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
3867 // Just finished a GC, attempt to run finalizers.
3868 RunFinalization(env, kNativeAllocationFinalizeTimeout);
3869 CHECK(!env->ExceptionCheck());
3870 // Native bytes allocated may be updated by finalization, refresh it.
3871 new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
3873 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
3874 if (new_native_bytes_allocated > growth_limit_) {
3875 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3876 RunFinalization(env, kNativeAllocationFinalizeTimeout);
3877 native_need_to_run_finalization_ = false;
3878 CHECK(!env->ExceptionCheck());
3880 // We have just run finalizers, update the native watermark since it is very likely that
3881 // finalizers released native managed allocations.
3882 UpdateMaxNativeFootprint();
3883 } else if (!IsGCRequestPending()) {
3884 if (IsGcConcurrent()) {
3885 RequestConcurrentGC(self, true); // Request non-sticky type.
3887 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3893 void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
3894 size_t expected_size;
3896 expected_size = native_bytes_allocated_.LoadRelaxed();
3897 if (UNLIKELY(bytes > expected_size)) {
3898 ScopedObjectAccess soa(env);
3899 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
3900 StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
3901 "registered as allocated", bytes, expected_size).c_str());
3904 } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
3905 expected_size - bytes));
3908 size_t Heap::GetTotalMemory() const {
3909 return std::max(max_allowed_footprint_, GetBytesAllocated());
3912 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3913 DCHECK(mod_union_table != nullptr);
3914 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3917 void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
3918 CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
3919 (c->IsVariableSize() || c->GetObjectSize() == byte_count)) << c->GetClassFlags();
3920 CHECK_GE(byte_count, sizeof(mirror::Object));
3923 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3924 CHECK(remembered_set != nullptr);
3925 space::Space* space = remembered_set->GetSpace();
3926 CHECK(space != nullptr);
3927 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
3928 remembered_sets_.Put(space, remembered_set);
3929 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
3932 void Heap::RemoveRememberedSet(space::Space* space) {
3933 CHECK(space != nullptr);
3934 auto it = remembered_sets_.find(space);
3935 CHECK(it != remembered_sets_.end());
3937 remembered_sets_.erase(it);
3938 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3941 void Heap::ClearMarkedObjects() {
3942 // Clear all of the spaces' mark bitmaps.
3943 for (const auto& space : GetContinuousSpaces()) {
3944 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
3945 if (space->GetLiveBitmap() != mark_bitmap) {
3946 mark_bitmap->Clear();
3949 // Clear the marked objects in the discontinous space object sets.
3950 for (const auto& space : GetDiscontinuousSpaces()) {
3951 space->GetMarkBitmap()->Clear();
3955 void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
3956 allocation_records_.reset(records);
3959 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
3960 if (IsAllocTrackingEnabled()) {
3961 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3962 if (IsAllocTrackingEnabled()) {
3963 GetAllocationRecords()->VisitRoots(visitor);
3968 void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
3969 if (IsAllocTrackingEnabled()) {
3970 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3971 if (IsAllocTrackingEnabled()) {
3972 GetAllocationRecords()->SweepAllocationRecords(visitor);
3977 void Heap::AllowNewAllocationRecords() const {
3978 CHECK(!kUseReadBarrier);
3979 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3980 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
3981 if (allocation_records != nullptr) {
3982 allocation_records->AllowNewAllocationRecords();
3986 void Heap::DisallowNewAllocationRecords() const {
3987 CHECK(!kUseReadBarrier);
3988 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3989 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
3990 if (allocation_records != nullptr) {
3991 allocation_records->DisallowNewAllocationRecords();
3995 void Heap::BroadcastForNewAllocationRecords() const {
3996 CHECK(kUseReadBarrier);
3997 // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
3998 // be set to false while some threads are waiting for system weak access in
3999 // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
4000 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4001 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4002 if (allocation_records != nullptr) {
4003 allocation_records->BroadcastForNewAllocationRecords();
4007 // Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
4008 class StackCrawlState {
4010 StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
4011 : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
4013 size_t GetFrameCount() const {
4014 return frame_count_;
4016 static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
4017 auto* const state = reinterpret_cast<StackCrawlState*>(arg);
4018 const uintptr_t ip = _Unwind_GetIP(context);
4019 // The first stack frame is get_backtrace itself. Skip it.
4020 if (ip != 0 && state->skip_count_ > 0) {
4021 --state->skip_count_;
4022 return _URC_NO_REASON;
4024 // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
4025 state->frames_[state->frame_count_] = ip;
4026 state->frame_count_++;
4027 return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
4031 uintptr_t* const frames_;
4032 size_t frame_count_;
4033 const size_t max_depth_;
4037 static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
4038 StackCrawlState state(frames, max_depth, 0u);
4039 _Unwind_Backtrace(&StackCrawlState::Callback, &state);
4040 return state.GetFrameCount();
4043 void Heap::CheckGcStressMode(Thread* self, mirror::Object** obj) {
4044 auto* const runtime = Runtime::Current();
4045 if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
4046 !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
4047 // Check if we should GC.
4048 bool new_backtrace = false;
4050 static constexpr size_t kMaxFrames = 16u;
4051 uintptr_t backtrace[kMaxFrames];
4052 const size_t frames = get_backtrace(backtrace, kMaxFrames);
4054 for (size_t i = 0; i < frames; ++i) {
4055 hash = hash * 2654435761 + backtrace[i];
4056 hash += (hash >> 13) ^ (hash << 6);
4058 MutexLock mu(self, *backtrace_lock_);
4059 new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
4060 if (new_backtrace) {
4061 seen_backtraces_.insert(hash);
4064 if (new_backtrace) {
4065 StackHandleScope<1> hs(self);
4066 auto h = hs.NewHandleWrapper(obj);
4067 CollectGarbage(false);
4068 unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
4070 seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
4075 void Heap::DisableGCForShutdown() {
4076 Thread* const self = Thread::Current();
4077 CHECK(Runtime::Current()->IsShuttingDown(self));
4078 MutexLock mu(self, *gc_complete_lock_);
4079 gc_disabled_for_shutdown_ = true;
4082 bool Heap::ObjectIsInBootImageSpace(mirror::Object* obj) const {
4083 for (gc::space::ImageSpace* space : boot_image_spaces_) {
4084 if (space->HasAddress(obj)) {
4091 bool Heap::IsInBootImageOatFile(const void* p) const {
4092 for (gc::space::ImageSpace* space : boot_image_spaces_) {
4093 if (space->GetOatFile()->Contains(p)) {
4100 void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
4101 uint32_t* boot_image_end,
4102 uint32_t* boot_oat_begin,
4103 uint32_t* boot_oat_end) {
4104 DCHECK(boot_image_begin != nullptr);
4105 DCHECK(boot_image_end != nullptr);
4106 DCHECK(boot_oat_begin != nullptr);
4107 DCHECK(boot_oat_end != nullptr);
4108 *boot_image_begin = 0u;
4109 *boot_image_end = 0u;
4110 *boot_oat_begin = 0u;
4112 for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) {
4113 const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin());
4114 const uint32_t image_size = space_->GetImageHeader().GetImageSize();
4115 if (*boot_image_begin == 0 || image_begin < *boot_image_begin) {
4116 *boot_image_begin = image_begin;
4118 *boot_image_end = std::max(*boot_image_end, image_begin + image_size);
4119 const OatFile* boot_oat_file = space_->GetOatFile();
4120 const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin());
4121 const uint32_t oat_size = boot_oat_file->Size();
4122 if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) {
4123 *boot_oat_begin = oat_begin;
4125 *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size);