1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "heap.h"
18 
19 #include <limits>
20 #include "android-base/thread_annotations.h"
21 #if defined(__BIONIC__) || defined(__GLIBC__)
22 #include <malloc.h>  // For mallinfo()
23 #endif
24 #include <memory>
25 #include <vector>
26 
27 #include "android-base/stringprintf.h"
28 
29 #include "allocation_listener.h"
30 #include "art_field-inl.h"
31 #include "backtrace_helper.h"
32 #include "base/allocator.h"
33 #include "base/arena_allocator.h"
34 #include "base/dumpable.h"
35 #include "base/file_utils.h"
36 #include "base/histogram-inl.h"
37 #include "base/logging.h"  // For VLOG.
38 #include "base/memory_tool.h"
39 #include "base/mutex.h"
40 #include "base/os.h"
41 #include "base/stl_util.h"
42 #include "base/systrace.h"
43 #include "base/time_utils.h"
44 #include "base/utils.h"
45 #include "class_root-inl.h"
46 #include "common_throws.h"
47 #include "debugger.h"
48 #include "dex/dex_file-inl.h"
49 #include "entrypoints/quick/quick_alloc_entrypoints.h"
50 #include "gc/accounting/card_table-inl.h"
51 #include "gc/accounting/heap_bitmap-inl.h"
52 #include "gc/accounting/mod_union_table-inl.h"
53 #include "gc/accounting/read_barrier_table.h"
54 #include "gc/accounting/remembered_set.h"
55 #include "gc/accounting/space_bitmap-inl.h"
56 #include "gc/collector/concurrent_copying.h"
57 #include "gc/collector/mark_sweep.h"
58 #include "gc/collector/partial_mark_sweep.h"
59 #include "gc/collector/semi_space.h"
60 #include "gc/collector/sticky_mark_sweep.h"
61 #include "gc/racing_check.h"
62 #include "gc/reference_processor.h"
63 #include "gc/scoped_gc_critical_section.h"
64 #include "gc/space/bump_pointer_space.h"
65 #include "gc/space/dlmalloc_space-inl.h"
66 #include "gc/space/image_space.h"
67 #include "gc/space/large_object_space.h"
68 #include "gc/space/region_space.h"
69 #include "gc/space/rosalloc_space-inl.h"
70 #include "gc/space/space-inl.h"
71 #include "gc/space/zygote_space.h"
72 #include "gc/task_processor.h"
73 #include "gc/verification.h"
74 #include "gc_pause_listener.h"
75 #include "gc_root.h"
76 #include "handle_scope-inl.h"
77 #include "heap-inl.h"
78 #include "heap-visit-objects-inl.h"
79 #include "image.h"
80 #include "intern_table.h"
81 #include "jit/jit.h"
82 #include "jit/jit_code_cache.h"
83 #include "jni/java_vm_ext.h"
84 #include "mirror/class-inl.h"
85 #include "mirror/executable-inl.h"
86 #include "mirror/field.h"
87 #include "mirror/method_handle_impl.h"
88 #include "mirror/object-inl.h"
89 #include "mirror/object-refvisitor-inl.h"
90 #include "mirror/object_array-inl.h"
91 #include "mirror/reference-inl.h"
92 #include "mirror/var_handle.h"
93 #include "nativehelper/scoped_local_ref.h"
94 #include "obj_ptr-inl.h"
95 #include "reflection.h"
96 #include "runtime.h"
97 #include "scoped_thread_state_change-inl.h"
98 #include "thread_list.h"
99 #include "verify_object-inl.h"
100 #include "well_known_classes.h"
101 
102 namespace art {
103 
104 namespace gc {
105 
106 DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition);
107 
108 // Minimum amount of remaining bytes before a concurrent GC is triggered.
109 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
110 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
111 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
112 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
113 // threads (lower pauses, use less memory bandwidth).
GetStickyGcThroughputAdjustment(bool use_generational_cc)114 static double GetStickyGcThroughputAdjustment(bool use_generational_cc) {
115   return use_generational_cc ? 0.5 : 1.0;
116 }
117 // Whether or not we compact the zygote in PreZygoteFork.
118 static constexpr bool kCompactZygote = kMovingCollector;
119 // How many reserve entries are at the end of the allocation stack, these are only needed if the
120 // allocation stack overflows.
121 static constexpr size_t kAllocationStackReserveSize = 1024;
122 // Default mark stack size in bytes.
123 static const size_t kDefaultMarkStackSize = 64 * KB;
124 // Define space name.
125 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
126 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
127 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
128 static const char* kNonMovingSpaceName = "non moving space";
129 static const char* kZygoteSpaceName = "zygote space";
130 static constexpr bool kGCALotMode = false;
131 // GC alot mode uses a small allocation stack to stress test a lot of GC.
132 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
133     sizeof(mirror::HeapReference<mirror::Object>);
134 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
135 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
136     sizeof(mirror::HeapReference<mirror::Object>);
137 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
138     sizeof(mirror::HeapReference<mirror::Object>);
139 
140 // For deterministic compilation, we need the heap to be at a well-known address.
141 static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
142 // Dump the rosalloc stats on SIGQUIT.
143 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
144 
145 static const char* kRegionSpaceName = "main space (region space)";
146 
147 // If true, we log all GCs in the both the foreground and background. Used for debugging.
148 static constexpr bool kLogAllGCs = false;
149 
150 // Use Max heap for 2 seconds, this is smaller than the usual 5s window since we don't want to leave
151 // allocate with relaxed ergonomics for that long.
152 static constexpr size_t kPostForkMaxHeapDurationMS = 2000;
153 
154 #if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
155 // 300 MB (0x12c00000) - (default non-moving space capacity).
156 uint8_t* const Heap::kPreferredAllocSpaceBegin =
157     reinterpret_cast<uint8_t*>(300 * MB - kDefaultNonMovingSpaceCapacity);
158 #else
159 #ifdef __ANDROID__
160 // For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
161 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
162 #else
163 // For 32-bit host, use 0x40000000 because asan uses most of the space below this.
164 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
165 #endif
166 #endif
167 
CareAboutPauseTimes()168 static inline bool CareAboutPauseTimes() {
169   return Runtime::Current()->InJankPerceptibleProcessState();
170 }
171 
VerifyBootImagesContiguity(const std::vector<gc::space::ImageSpace * > & image_spaces)172 static void VerifyBootImagesContiguity(const std::vector<gc::space::ImageSpace*>& image_spaces) {
173   uint32_t boot_image_size = 0u;
174   for (size_t i = 0u, num_spaces = image_spaces.size(); i != num_spaces; ) {
175     const ImageHeader& image_header = image_spaces[i]->GetImageHeader();
176     uint32_t reservation_size = image_header.GetImageReservationSize();
177     uint32_t image_count = image_header.GetImageSpaceCount();
178 
179     CHECK_NE(image_count, 0u);
180     CHECK_LE(image_count, num_spaces - i);
181     CHECK_NE(reservation_size, 0u);
182     for (size_t j = 1u; j != image_count; ++j) {
183       CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetComponentCount(), 0u);
184       CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetImageReservationSize(), 0u);
185     }
186 
187     // Check the start of the heap.
188     CHECK_EQ(image_spaces[0]->Begin() + boot_image_size, image_spaces[i]->Begin());
189     // Check contiguous layout of images and oat files.
190     const uint8_t* current_heap = image_spaces[i]->Begin();
191     const uint8_t* current_oat = image_spaces[i]->GetImageHeader().GetOatFileBegin();
192     for (size_t j = 0u; j != image_count; ++j) {
193       const ImageHeader& current_header = image_spaces[i + j]->GetImageHeader();
194       CHECK_EQ(current_heap, image_spaces[i + j]->Begin());
195       CHECK_EQ(current_oat, current_header.GetOatFileBegin());
196       current_heap += RoundUp(current_header.GetImageSize(), kPageSize);
197       CHECK_GT(current_header.GetOatFileEnd(), current_header.GetOatFileBegin());
198       current_oat = current_header.GetOatFileEnd();
199     }
200     // Check that oat files start at the end of images.
201     CHECK_EQ(current_heap, image_spaces[i]->GetImageHeader().GetOatFileBegin());
202     // Check that the reservation size equals the size of images and oat files.
203     CHECK_EQ(reservation_size, static_cast<size_t>(current_oat - image_spaces[i]->Begin()));
204 
205     boot_image_size += reservation_size;
206     i += image_count;
207   }
208 }
209 
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t stop_for_native_allocs,size_t capacity,size_t non_moving_space_capacity,const std::vector<std::string> & boot_class_path,const std::vector<std::string> & boot_class_path_locations,const std::string & image_file_name,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,space::LargeObjectSpaceType large_object_space_type,size_t large_object_threshold,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_target_footprint,bool always_log_explicit_gcs,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool gc_stress_mode,bool measure_gc_performance,bool use_homogeneous_space_compaction_for_oom,bool use_generational_cc,uint64_t min_interval_homogeneous_space_compaction_by_oom,bool dump_region_info_before_gc,bool dump_region_info_after_gc,space::ImageSpaceLoadingOrder image_space_loading_order)210 Heap::Heap(size_t initial_size,
211            size_t growth_limit,
212            size_t min_free,
213            size_t max_free,
214            double target_utilization,
215            double foreground_heap_growth_multiplier,
216            size_t stop_for_native_allocs,
217            size_t capacity,
218            size_t non_moving_space_capacity,
219            const std::vector<std::string>& boot_class_path,
220            const std::vector<std::string>& boot_class_path_locations,
221            const std::string& image_file_name,
222            const InstructionSet image_instruction_set,
223            CollectorType foreground_collector_type,
224            CollectorType background_collector_type,
225            space::LargeObjectSpaceType large_object_space_type,
226            size_t large_object_threshold,
227            size_t parallel_gc_threads,
228            size_t conc_gc_threads,
229            bool low_memory_mode,
230            size_t long_pause_log_threshold,
231            size_t long_gc_log_threshold,
232            bool ignore_target_footprint,
233            bool always_log_explicit_gcs,
234            bool use_tlab,
235            bool verify_pre_gc_heap,
236            bool verify_pre_sweeping_heap,
237            bool verify_post_gc_heap,
238            bool verify_pre_gc_rosalloc,
239            bool verify_pre_sweeping_rosalloc,
240            bool verify_post_gc_rosalloc,
241            bool gc_stress_mode,
242            bool measure_gc_performance,
243            bool use_homogeneous_space_compaction_for_oom,
244            bool use_generational_cc,
245            uint64_t min_interval_homogeneous_space_compaction_by_oom,
246            bool dump_region_info_before_gc,
247            bool dump_region_info_after_gc,
248            space::ImageSpaceLoadingOrder image_space_loading_order)
249     : non_moving_space_(nullptr),
250       rosalloc_space_(nullptr),
251       dlmalloc_space_(nullptr),
252       main_space_(nullptr),
253       collector_type_(kCollectorTypeNone),
254       foreground_collector_type_(foreground_collector_type),
255       background_collector_type_(background_collector_type),
256       desired_collector_type_(foreground_collector_type_),
257       pending_task_lock_(nullptr),
258       parallel_gc_threads_(parallel_gc_threads),
259       conc_gc_threads_(conc_gc_threads),
260       low_memory_mode_(low_memory_mode),
261       long_pause_log_threshold_(long_pause_log_threshold),
262       long_gc_log_threshold_(long_gc_log_threshold),
263       process_cpu_start_time_ns_(ProcessCpuNanoTime()),
264       pre_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
265       post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
266       pre_gc_weighted_allocated_bytes_(0.0),
267       post_gc_weighted_allocated_bytes_(0.0),
268       ignore_target_footprint_(ignore_target_footprint),
269       always_log_explicit_gcs_(always_log_explicit_gcs),
270       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
271       zygote_space_(nullptr),
272       large_object_threshold_(large_object_threshold),
273       disable_thread_flip_count_(0),
274       thread_flip_running_(false),
275       collector_type_running_(kCollectorTypeNone),
276       last_gc_cause_(kGcCauseNone),
277       thread_running_gc_(nullptr),
278       last_gc_type_(collector::kGcTypeNone),
279       next_gc_type_(collector::kGcTypePartial),
280       capacity_(capacity),
281       growth_limit_(growth_limit),
282       target_footprint_(initial_size),
283       // Using kPostMonitorLock as a lock at kDefaultMutexLevel is acquired after
284       // this one.
285       process_state_update_lock_("process state update lock", kPostMonitorLock),
286       min_foreground_target_footprint_(0),
287       concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
288       total_bytes_freed_ever_(0),
289       total_objects_freed_ever_(0),
290       num_bytes_allocated_(0),
291       native_bytes_registered_(0),
292       old_native_bytes_allocated_(0),
293       native_objects_notified_(0),
294       num_bytes_freed_revoke_(0),
295       verify_missing_card_marks_(false),
296       verify_system_weaks_(false),
297       verify_pre_gc_heap_(verify_pre_gc_heap),
298       verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
299       verify_post_gc_heap_(verify_post_gc_heap),
300       verify_mod_union_table_(false),
301       verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
302       verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
303       verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
304       gc_stress_mode_(gc_stress_mode),
305       /* For GC a lot mode, we limit the allocation stacks to be kGcAlotInterval allocations. This
306        * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
307        * verification is enabled, we limit the size of allocation stacks to speed up their
308        * searching.
309        */
310       max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
311           : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
312           kDefaultAllocationStackSize),
313       current_allocator_(kAllocatorTypeDlMalloc),
314       current_non_moving_allocator_(kAllocatorTypeNonMoving),
315       bump_pointer_space_(nullptr),
316       temp_space_(nullptr),
317       region_space_(nullptr),
318       min_free_(min_free),
319       max_free_(max_free),
320       target_utilization_(target_utilization),
321       foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
322       stop_for_native_allocs_(stop_for_native_allocs),
323       total_wait_time_(0),
324       verify_object_mode_(kVerifyObjectModeDisabled),
325       disable_moving_gc_count_(0),
326       semi_space_collector_(nullptr),
327       active_concurrent_copying_collector_(nullptr),
328       young_concurrent_copying_collector_(nullptr),
329       concurrent_copying_collector_(nullptr),
330       is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
331       use_tlab_(use_tlab),
332       main_space_backup_(nullptr),
333       min_interval_homogeneous_space_compaction_by_oom_(
334           min_interval_homogeneous_space_compaction_by_oom),
335       last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
336       pending_collector_transition_(nullptr),
337       pending_heap_trim_(nullptr),
338       use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
339       use_generational_cc_(use_generational_cc),
340       running_collection_is_blocking_(false),
341       blocking_gc_count_(0U),
342       blocking_gc_time_(0U),
343       last_update_time_gc_count_rate_histograms_(  // Round down by the window duration.
344           (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
345       gc_count_last_window_(0U),
346       blocking_gc_count_last_window_(0U),
347       gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
348       blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
349                                         kGcCountRateMaxBucketCount),
350       alloc_tracking_enabled_(false),
351       alloc_record_depth_(AllocRecordObjectMap::kDefaultAllocStackDepth),
352       backtrace_lock_(nullptr),
353       seen_backtrace_count_(0u),
354       unique_backtrace_count_(0u),
355       gc_disabled_for_shutdown_(false),
356       dump_region_info_before_gc_(dump_region_info_before_gc),
357       dump_region_info_after_gc_(dump_region_info_after_gc),
358       boot_image_spaces_(),
359       boot_images_start_address_(0u),
360       boot_images_size_(0u) {
361   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
362     LOG(INFO) << "Heap() entering";
363   }
364   if (kUseReadBarrier) {
365     CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
366     CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
367   } else if (background_collector_type_ != gc::kCollectorTypeHomogeneousSpaceCompact) {
368     CHECK_EQ(IsMovingGc(foreground_collector_type_), IsMovingGc(background_collector_type_))
369         << "Changing from " << foreground_collector_type_ << " to "
370         << background_collector_type_ << " (or visa versa) is not supported.";
371   }
372   verification_.reset(new Verification(this));
373   CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
374   ScopedTrace trace(__FUNCTION__);
375   Runtime* const runtime = Runtime::Current();
376   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
377   // entrypoints.
378   const bool is_zygote = runtime->IsZygote();
379   if (!is_zygote) {
380     // Background compaction is currently not supported for command line runs.
381     if (background_collector_type_ != foreground_collector_type_) {
382       VLOG(heap) << "Disabling background compaction for non zygote";
383       background_collector_type_ = foreground_collector_type_;
384     }
385   }
386   ChangeCollector(desired_collector_type_);
387   live_bitmap_.reset(new accounting::HeapBitmap(this));
388   mark_bitmap_.reset(new accounting::HeapBitmap(this));
389 
390   // We don't have hspace compaction enabled with CC.
391   if (foreground_collector_type_ == kCollectorTypeCC) {
392     use_homogeneous_space_compaction_for_oom_ = false;
393   }
394   bool support_homogeneous_space_compaction =
395       background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
396       use_homogeneous_space_compaction_for_oom_;
397   // We may use the same space the main space for the non moving space if we don't need to compact
398   // from the main space.
399   // This is not the case if we support homogeneous compaction or have a moving background
400   // collector type.
401   bool separate_non_moving_space = is_zygote ||
402       support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
403       IsMovingGc(background_collector_type_);
404 
405   // Requested begin for the alloc space, to follow the mapped image and oat files
406   uint8_t* request_begin = nullptr;
407   // Calculate the extra space required after the boot image, see allocations below.
408   size_t heap_reservation_size = 0u;
409   if (separate_non_moving_space) {
410     heap_reservation_size = non_moving_space_capacity;
411   } else if (foreground_collector_type_ != kCollectorTypeCC && is_zygote) {
412     heap_reservation_size = capacity_;
413   }
414   heap_reservation_size = RoundUp(heap_reservation_size, kPageSize);
415   // Load image space(s).
416   std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
417   MemMap heap_reservation;
418   if (space::ImageSpace::LoadBootImage(boot_class_path,
419                                        boot_class_path_locations,
420                                        image_file_name,
421                                        image_instruction_set,
422                                        image_space_loading_order,
423                                        runtime->ShouldRelocate(),
424                                        /*executable=*/ !runtime->IsAotCompiler(),
425                                        is_zygote,
426                                        heap_reservation_size,
427                                        &boot_image_spaces,
428                                        &heap_reservation)) {
429     DCHECK_EQ(heap_reservation_size, heap_reservation.IsValid() ? heap_reservation.Size() : 0u);
430     DCHECK(!boot_image_spaces.empty());
431     request_begin = boot_image_spaces.back()->GetImageHeader().GetOatFileEnd();
432     DCHECK(!heap_reservation.IsValid() || request_begin == heap_reservation.Begin())
433         << "request_begin=" << static_cast<const void*>(request_begin)
434         << " heap_reservation.Begin()=" << static_cast<const void*>(heap_reservation.Begin());
435     for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) {
436       boot_image_spaces_.push_back(space.get());
437       AddSpace(space.release());
438     }
439     boot_images_start_address_ = PointerToLowMemUInt32(boot_image_spaces_.front()->Begin());
440     uint32_t boot_images_end =
441         PointerToLowMemUInt32(boot_image_spaces_.back()->GetImageHeader().GetOatFileEnd());
442     boot_images_size_ = boot_images_end - boot_images_start_address_;
443     if (kIsDebugBuild) {
444       VerifyBootImagesContiguity(boot_image_spaces_);
445     }
446   } else {
447     if (foreground_collector_type_ == kCollectorTypeCC) {
448       // Need to use a low address so that we can allocate a contiguous 2 * Xmx space
449       // when there's no image (dex2oat for target).
450       request_begin = kPreferredAllocSpaceBegin;
451     }
452     // Gross hack to make dex2oat deterministic.
453     if (foreground_collector_type_ == kCollectorTypeMS && Runtime::Current()->IsAotCompiler()) {
454       // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
455       // b/26849108
456       request_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
457     }
458   }
459 
460   /*
461   requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
462                                      +-  nonmoving space (non_moving_space_capacity)+-
463                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
464                                      +-????????????????????????????????????????????+-
465                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
466                                      +-main alloc space / bump space 1 (capacity_) +-
467                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
468                                      +-????????????????????????????????????????????+-
469                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
470                                      +-main alloc space2 / bump space 2 (capacity_)+-
471                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
472   */
473 
474   MemMap main_mem_map_1;
475   MemMap main_mem_map_2;
476 
477   std::string error_str;
478   MemMap non_moving_space_mem_map;
479   if (separate_non_moving_space) {
480     ScopedTrace trace2("Create separate non moving space");
481     // If we are the zygote, the non moving space becomes the zygote space when we run
482     // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
483     // rename the mem map later.
484     const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
485     // Reserve the non moving mem map before the other two since it needs to be at a specific
486     // address.
487     DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
488     if (heap_reservation.IsValid()) {
489       non_moving_space_mem_map = heap_reservation.RemapAtEnd(
490           heap_reservation.Begin(), space_name, PROT_READ | PROT_WRITE, &error_str);
491     } else {
492       non_moving_space_mem_map = MapAnonymousPreferredAddress(
493           space_name, request_begin, non_moving_space_capacity, &error_str);
494     }
495     CHECK(non_moving_space_mem_map.IsValid()) << error_str;
496     DCHECK(!heap_reservation.IsValid());
497     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
498     request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
499   }
500   // Attempt to create 2 mem maps at or after the requested begin.
501   if (foreground_collector_type_ != kCollectorTypeCC) {
502     ScopedTrace trace2("Create main mem map");
503     if (separate_non_moving_space || !is_zygote) {
504       main_mem_map_1 = MapAnonymousPreferredAddress(
505           kMemMapSpaceName[0], request_begin, capacity_, &error_str);
506     } else {
507       // If no separate non-moving space and we are the zygote, the main space must come right after
508       // the image space to avoid a gap. This is required since we want the zygote space to be
509       // adjacent to the image space.
510       DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
511       main_mem_map_1 = MemMap::MapAnonymous(
512           kMemMapSpaceName[0],
513           request_begin,
514           capacity_,
515           PROT_READ | PROT_WRITE,
516           /* low_4gb= */ true,
517           /* reuse= */ false,
518           heap_reservation.IsValid() ? &heap_reservation : nullptr,
519           &error_str);
520     }
521     CHECK(main_mem_map_1.IsValid()) << error_str;
522     DCHECK(!heap_reservation.IsValid());
523   }
524   if (support_homogeneous_space_compaction ||
525       background_collector_type_ == kCollectorTypeSS ||
526       foreground_collector_type_ == kCollectorTypeSS) {
527     ScopedTrace trace2("Create main mem map 2");
528     main_mem_map_2 = MapAnonymousPreferredAddress(
529         kMemMapSpaceName[1], main_mem_map_1.End(), capacity_, &error_str);
530     CHECK(main_mem_map_2.IsValid()) << error_str;
531   }
532 
533   // Create the non moving space first so that bitmaps don't take up the address range.
534   if (separate_non_moving_space) {
535     ScopedTrace trace2("Add non moving space");
536     // Non moving space is always dlmalloc since we currently don't have support for multiple
537     // active rosalloc spaces.
538     const size_t size = non_moving_space_mem_map.Size();
539     const void* non_moving_space_mem_map_begin = non_moving_space_mem_map.Begin();
540     non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map),
541                                                                "zygote / non moving space",
542                                                                kDefaultStartingSize,
543                                                                initial_size,
544                                                                size,
545                                                                size,
546                                                                /* can_move_objects= */ false);
547     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
548         << non_moving_space_mem_map_begin;
549     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
550     AddSpace(non_moving_space_);
551   }
552   // Create other spaces based on whether or not we have a moving GC.
553   if (foreground_collector_type_ == kCollectorTypeCC) {
554     CHECK(separate_non_moving_space);
555     // Reserve twice the capacity, to allow evacuating every region for explicit GCs.
556     MemMap region_space_mem_map =
557         space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
558     CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
559     region_space_ = space::RegionSpace::Create(
560         kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_);
561     AddSpace(region_space_);
562   } else if (IsMovingGc(foreground_collector_type_)) {
563     // Create bump pointer spaces.
564     // We only to create the bump pointer if the foreground collector is a compacting GC.
565     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
566     bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
567                                                                     std::move(main_mem_map_1));
568     CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
569     AddSpace(bump_pointer_space_);
570     temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
571                                                             std::move(main_mem_map_2));
572     CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
573     AddSpace(temp_space_);
574     CHECK(separate_non_moving_space);
575   } else {
576     CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_);
577     CHECK(main_space_ != nullptr);
578     AddSpace(main_space_);
579     if (!separate_non_moving_space) {
580       non_moving_space_ = main_space_;
581       CHECK(!non_moving_space_->CanMoveObjects());
582     }
583     if (main_mem_map_2.IsValid()) {
584       const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
585       main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
586                                                            initial_size,
587                                                            growth_limit_,
588                                                            capacity_,
589                                                            name,
590                                                            /* can_move_objects= */ true));
591       CHECK(main_space_backup_.get() != nullptr);
592       // Add the space so its accounted for in the heap_begin and heap_end.
593       AddSpace(main_space_backup_.get());
594     }
595   }
596   CHECK(non_moving_space_ != nullptr);
597   CHECK(!non_moving_space_->CanMoveObjects());
598   // Allocate the large object space.
599   if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
600     large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_);
601     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
602   } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
603     large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
604     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
605   } else {
606     // Disable the large object space by making the cutoff excessively large.
607     large_object_threshold_ = std::numeric_limits<size_t>::max();
608     large_object_space_ = nullptr;
609   }
610   if (large_object_space_ != nullptr) {
611     AddSpace(large_object_space_);
612   }
613   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
614   CHECK(!continuous_spaces_.empty());
615   // Relies on the spaces being sorted.
616   uint8_t* heap_begin = continuous_spaces_.front()->Begin();
617   uint8_t* heap_end = continuous_spaces_.back()->Limit();
618   size_t heap_capacity = heap_end - heap_begin;
619   // Remove the main backup space since it slows down the GC to have unused extra spaces.
620   // TODO: Avoid needing to do this.
621   if (main_space_backup_.get() != nullptr) {
622     RemoveSpace(main_space_backup_.get());
623   }
624   // Allocate the card table.
625   // We currently don't support dynamically resizing the card table.
626   // Since we don't know where in the low_4gb the app image will be located, make the card table
627   // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
628   UNUSED(heap_capacity);
629   // Start at 4 KB, we can be sure there are no spaces mapped this low since the address range is
630   // reserved by the kernel.
631   static constexpr size_t kMinHeapAddress = 4 * KB;
632   card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
633                                                   4 * GB - kMinHeapAddress));
634   CHECK(card_table_.get() != nullptr) << "Failed to create card table";
635   if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
636     rb_table_.reset(new accounting::ReadBarrierTable());
637     DCHECK(rb_table_->IsAllCleared());
638   }
639   if (HasBootImageSpace()) {
640     // Don't add the image mod union table if we are running without an image, this can crash if
641     // we use the CardCache implementation.
642     for (space::ImageSpace* image_space : GetBootImageSpaces()) {
643       accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
644           "Image mod-union table", this, image_space);
645       CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
646       AddModUnionTable(mod_union_table);
647     }
648   }
649   if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
650     accounting::RememberedSet* non_moving_space_rem_set =
651         new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
652     CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
653     AddRememberedSet(non_moving_space_rem_set);
654   }
655   // TODO: Count objects in the image space here?
656   num_bytes_allocated_.store(0, std::memory_order_relaxed);
657   mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
658                                                     kDefaultMarkStackSize));
659   const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
660   allocation_stack_.reset(accounting::ObjectStack::Create(
661       "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
662   live_stack_.reset(accounting::ObjectStack::Create(
663       "live stack", max_allocation_stack_size_, alloc_stack_capacity));
664   // It's still too early to take a lock because there are no threads yet, but we can create locks
665   // now. We don't create it earlier to make it clear that you can't use locks during heap
666   // initialization.
667   gc_complete_lock_ = new Mutex("GC complete lock");
668   gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
669                                                 *gc_complete_lock_));
670 
671   thread_flip_lock_ = new Mutex("GC thread flip lock");
672   thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
673                                                 *thread_flip_lock_));
674   task_processor_.reset(new TaskProcessor());
675   reference_processor_.reset(new ReferenceProcessor());
676   pending_task_lock_ = new Mutex("Pending task lock");
677   if (ignore_target_footprint_) {
678     SetIdealFootprint(std::numeric_limits<size_t>::max());
679     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
680   }
681   CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U);
682   // Create our garbage collectors.
683   for (size_t i = 0; i < 2; ++i) {
684     const bool concurrent = i != 0;
685     if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
686         (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
687       garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
688       garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
689       garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
690     }
691   }
692   if (kMovingCollector) {
693     if (MayUseCollector(kCollectorTypeSS) ||
694         MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
695         use_homogeneous_space_compaction_for_oom_) {
696       semi_space_collector_ = new collector::SemiSpace(this);
697       garbage_collectors_.push_back(semi_space_collector_);
698     }
699     if (MayUseCollector(kCollectorTypeCC)) {
700       concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
701                                                                        /*young_gen=*/false,
702                                                                        use_generational_cc_,
703                                                                        "",
704                                                                        measure_gc_performance);
705       if (use_generational_cc_) {
706         young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
707             this,
708             /*young_gen=*/true,
709             use_generational_cc_,
710             "young",
711             measure_gc_performance);
712       }
713       active_concurrent_copying_collector_ = concurrent_copying_collector_;
714       DCHECK(region_space_ != nullptr);
715       concurrent_copying_collector_->SetRegionSpace(region_space_);
716       if (use_generational_cc_) {
717         young_concurrent_copying_collector_->SetRegionSpace(region_space_);
718         // At this point, non-moving space should be created.
719         DCHECK(non_moving_space_ != nullptr);
720         concurrent_copying_collector_->CreateInterRegionRefBitmaps();
721       }
722       garbage_collectors_.push_back(concurrent_copying_collector_);
723       if (use_generational_cc_) {
724         garbage_collectors_.push_back(young_concurrent_copying_collector_);
725       }
726     }
727   }
728   if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
729       (is_zygote || separate_non_moving_space)) {
730     // Check that there's no gap between the image space and the non moving space so that the
731     // immune region won't break (eg. due to a large object allocated in the gap). This is only
732     // required when we're the zygote.
733     // Space with smallest Begin().
734     space::ImageSpace* first_space = nullptr;
735     for (space::ImageSpace* space : boot_image_spaces_) {
736       if (first_space == nullptr || space->Begin() < first_space->Begin()) {
737         first_space = space;
738       }
739     }
740     bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
741     if (!no_gap) {
742       PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
743       MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true);
744       LOG(FATAL) << "There's a gap between the image space and the non-moving space";
745     }
746   }
747   instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
748   if (gc_stress_mode_) {
749     backtrace_lock_ = new Mutex("GC complete lock");
750   }
751   if (is_running_on_memory_tool_ || gc_stress_mode_) {
752     instrumentation->InstrumentQuickAllocEntryPoints();
753   }
754   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
755     LOG(INFO) << "Heap() exiting";
756   }
757 }
758 
MapAnonymousPreferredAddress(const char * name,uint8_t * request_begin,size_t capacity,std::string * out_error_str)759 MemMap Heap::MapAnonymousPreferredAddress(const char* name,
760                                           uint8_t* request_begin,
761                                           size_t capacity,
762                                           std::string* out_error_str) {
763   while (true) {
764     MemMap map = MemMap::MapAnonymous(name,
765                                       request_begin,
766                                       capacity,
767                                       PROT_READ | PROT_WRITE,
768                                       /*low_4gb=*/ true,
769                                       /*reuse=*/ false,
770                                       /*reservation=*/ nullptr,
771                                       out_error_str);
772     if (map.IsValid() || request_begin == nullptr) {
773       return map;
774     }
775     // Retry a  second time with no specified request begin.
776     request_begin = nullptr;
777   }
778 }
779 
MayUseCollector(CollectorType type) const780 bool Heap::MayUseCollector(CollectorType type) const {
781   return foreground_collector_type_ == type || background_collector_type_ == type;
782 }
783 
CreateMallocSpaceFromMemMap(MemMap && mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)784 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map,
785                                                       size_t initial_size,
786                                                       size_t growth_limit,
787                                                       size_t capacity,
788                                                       const char* name,
789                                                       bool can_move_objects) {
790   space::MallocSpace* malloc_space = nullptr;
791   if (kUseRosAlloc) {
792     // Create rosalloc space.
793     malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map),
794                                                           name,
795                                                           kDefaultStartingSize,
796                                                           initial_size,
797                                                           growth_limit,
798                                                           capacity,
799                                                           low_memory_mode_,
800                                                           can_move_objects);
801   } else {
802     malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map),
803                                                           name,
804                                                           kDefaultStartingSize,
805                                                           initial_size,
806                                                           growth_limit,
807                                                           capacity,
808                                                           can_move_objects);
809   }
810   if (collector::SemiSpace::kUseRememberedSet) {
811     accounting::RememberedSet* rem_set  =
812         new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
813     CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
814     AddRememberedSet(rem_set);
815   }
816   CHECK(malloc_space != nullptr) << "Failed to create " << name;
817   malloc_space->SetFootprintLimit(malloc_space->Capacity());
818   return malloc_space;
819 }
820 
CreateMainMallocSpace(MemMap && mem_map,size_t initial_size,size_t growth_limit,size_t capacity)821 void Heap::CreateMainMallocSpace(MemMap&& mem_map,
822                                  size_t initial_size,
823                                  size_t growth_limit,
824                                  size_t capacity) {
825   // Is background compaction is enabled?
826   bool can_move_objects = IsMovingGc(background_collector_type_) !=
827       IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
828   // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
829   // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
830   // from the main space to the zygote space. If background compaction is enabled, always pass in
831   // that we can move objets.
832   if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
833     // After the zygote we want this to be false if we don't have background compaction enabled so
834     // that getting primitive array elements is faster.
835     can_move_objects = !HasZygoteSpace();
836   }
837   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
838     RemoveRememberedSet(main_space_);
839   }
840   const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
841   main_space_ = CreateMallocSpaceFromMemMap(std::move(mem_map),
842                                             initial_size,
843                                             growth_limit,
844                                             capacity, name,
845                                             can_move_objects);
846   SetSpaceAsDefault(main_space_);
847   VLOG(heap) << "Created main space " << main_space_;
848 }
849 
ChangeAllocator(AllocatorType allocator)850 void Heap::ChangeAllocator(AllocatorType allocator) {
851   if (current_allocator_ != allocator) {
852     // These two allocators are only used internally and don't have any entrypoints.
853     CHECK_NE(allocator, kAllocatorTypeLOS);
854     CHECK_NE(allocator, kAllocatorTypeNonMoving);
855     current_allocator_ = allocator;
856     MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
857     SetQuickAllocEntryPointsAllocator(current_allocator_);
858     Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
859   }
860 }
861 
IsCompilingBoot() const862 bool Heap::IsCompilingBoot() const {
863   if (!Runtime::Current()->IsAotCompiler()) {
864     return false;
865   }
866   ScopedObjectAccess soa(Thread::Current());
867   for (const auto& space : continuous_spaces_) {
868     if (space->IsImageSpace() || space->IsZygoteSpace()) {
869       return false;
870     }
871   }
872   return true;
873 }
874 
IncrementDisableMovingGC(Thread * self)875 void Heap::IncrementDisableMovingGC(Thread* self) {
876   // Need to do this holding the lock to prevent races where the GC is about to run / running when
877   // we attempt to disable it.
878   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
879   MutexLock mu(self, *gc_complete_lock_);
880   ++disable_moving_gc_count_;
881   if (IsMovingGc(collector_type_running_)) {
882     WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
883   }
884 }
885 
DecrementDisableMovingGC(Thread * self)886 void Heap::DecrementDisableMovingGC(Thread* self) {
887   MutexLock mu(self, *gc_complete_lock_);
888   CHECK_GT(disable_moving_gc_count_, 0U);
889   --disable_moving_gc_count_;
890 }
891 
IncrementDisableThreadFlip(Thread * self)892 void Heap::IncrementDisableThreadFlip(Thread* self) {
893   // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
894   CHECK(kUseReadBarrier);
895   bool is_nested = self->GetDisableThreadFlipCount() > 0;
896   self->IncrementDisableThreadFlipCount();
897   if (is_nested) {
898     // If this is a nested JNI critical section enter, we don't need to wait or increment the global
899     // counter. The global counter is incremented only once for a thread for the outermost enter.
900     return;
901   }
902   ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
903   MutexLock mu(self, *thread_flip_lock_);
904   thread_flip_cond_->CheckSafeToWait(self);
905   bool has_waited = false;
906   uint64_t wait_start = 0;
907   if (thread_flip_running_) {
908     wait_start = NanoTime();
909     ScopedTrace trace("IncrementDisableThreadFlip");
910     while (thread_flip_running_) {
911       has_waited = true;
912       thread_flip_cond_->Wait(self);
913     }
914   }
915   ++disable_thread_flip_count_;
916   if (has_waited) {
917     uint64_t wait_time = NanoTime() - wait_start;
918     total_wait_time_ += wait_time;
919     if (wait_time > long_pause_log_threshold_) {
920       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
921     }
922   }
923 }
924 
DecrementDisableThreadFlip(Thread * self)925 void Heap::DecrementDisableThreadFlip(Thread* self) {
926   // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
927   // the GC waiting before doing a thread flip.
928   CHECK(kUseReadBarrier);
929   self->DecrementDisableThreadFlipCount();
930   bool is_outermost = self->GetDisableThreadFlipCount() == 0;
931   if (!is_outermost) {
932     // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
933     // The global counter is decremented only once for a thread for the outermost exit.
934     return;
935   }
936   MutexLock mu(self, *thread_flip_lock_);
937   CHECK_GT(disable_thread_flip_count_, 0U);
938   --disable_thread_flip_count_;
939   if (disable_thread_flip_count_ == 0) {
940     // Potentially notify the GC thread blocking to begin a thread flip.
941     thread_flip_cond_->Broadcast(self);
942   }
943 }
944 
ThreadFlipBegin(Thread * self)945 void Heap::ThreadFlipBegin(Thread* self) {
946   // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
947   // > 0, block. Otherwise, go ahead.
948   CHECK(kUseReadBarrier);
949   ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
950   MutexLock mu(self, *thread_flip_lock_);
951   thread_flip_cond_->CheckSafeToWait(self);
952   bool has_waited = false;
953   uint64_t wait_start = NanoTime();
954   CHECK(!thread_flip_running_);
955   // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
956   // GC. This like a writer preference of a reader-writer lock.
957   thread_flip_running_ = true;
958   while (disable_thread_flip_count_ > 0) {
959     has_waited = true;
960     thread_flip_cond_->Wait(self);
961   }
962   if (has_waited) {
963     uint64_t wait_time = NanoTime() - wait_start;
964     total_wait_time_ += wait_time;
965     if (wait_time > long_pause_log_threshold_) {
966       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
967     }
968   }
969 }
970 
ThreadFlipEnd(Thread * self)971 void Heap::ThreadFlipEnd(Thread* self) {
972   // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
973   // waiting before doing a JNI critical.
974   CHECK(kUseReadBarrier);
975   MutexLock mu(self, *thread_flip_lock_);
976   CHECK(thread_flip_running_);
977   thread_flip_running_ = false;
978   // Potentially notify mutator threads blocking to enter a JNI critical section.
979   thread_flip_cond_->Broadcast(self);
980 }
981 
GrowHeapOnJankPerceptibleSwitch()982 void Heap::GrowHeapOnJankPerceptibleSwitch() {
983   MutexLock mu(Thread::Current(), process_state_update_lock_);
984   size_t orig_target_footprint = target_footprint_.load(std::memory_order_relaxed);
985   if (orig_target_footprint < min_foreground_target_footprint_) {
986     target_footprint_.compare_exchange_strong(orig_target_footprint,
987                                               min_foreground_target_footprint_,
988                                               std::memory_order_relaxed);
989   }
990   min_foreground_target_footprint_ = 0;
991 }
992 
UpdateProcessState(ProcessState old_process_state,ProcessState new_process_state)993 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
994   if (old_process_state != new_process_state) {
995     const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
996     if (jank_perceptible) {
997       // Transition back to foreground right away to prevent jank.
998       RequestCollectorTransition(foreground_collector_type_, 0);
999       GrowHeapOnJankPerceptibleSwitch();
1000     } else {
1001       // Don't delay for debug builds since we may want to stress test the GC.
1002       // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
1003       // special handling which does a homogenous space compaction once but then doesn't transition
1004       // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
1005       // transition the collector.
1006       RequestCollectorTransition(background_collector_type_,
1007                                  kStressCollectorTransition
1008                                      ? 0
1009                                      : kCollectorTransitionWait);
1010     }
1011   }
1012 }
1013 
CreateThreadPool()1014 void Heap::CreateThreadPool() {
1015   const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
1016   if (num_threads != 0) {
1017     thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
1018   }
1019 }
1020 
MarkAllocStackAsLive(accounting::ObjectStack * stack)1021 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
1022   space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
1023   space::ContinuousSpace* space2 = non_moving_space_;
1024   // TODO: Generalize this to n bitmaps?
1025   CHECK(space1 != nullptr);
1026   CHECK(space2 != nullptr);
1027   MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
1028                  (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
1029                  stack);
1030 }
1031 
DeleteThreadPool()1032 void Heap::DeleteThreadPool() {
1033   thread_pool_.reset(nullptr);
1034 }
1035 
AddSpace(space::Space * space)1036 void Heap::AddSpace(space::Space* space) {
1037   CHECK(space != nullptr);
1038   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1039   if (space->IsContinuousSpace()) {
1040     DCHECK(!space->IsDiscontinuousSpace());
1041     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1042     // Continuous spaces don't necessarily have bitmaps.
1043     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1044     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1045     // The region space bitmap is not added since VisitObjects visits the region space objects with
1046     // special handling.
1047     if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1048       CHECK(mark_bitmap != nullptr);
1049       live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
1050       mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
1051     }
1052     continuous_spaces_.push_back(continuous_space);
1053     // Ensure that spaces remain sorted in increasing order of start address.
1054     std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
1055               [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
1056       return a->Begin() < b->Begin();
1057     });
1058   } else {
1059     CHECK(space->IsDiscontinuousSpace());
1060     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1061     live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1062     mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1063     discontinuous_spaces_.push_back(discontinuous_space);
1064   }
1065   if (space->IsAllocSpace()) {
1066     alloc_spaces_.push_back(space->AsAllocSpace());
1067   }
1068 }
1069 
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)1070 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
1071   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1072   if (continuous_space->IsDlMallocSpace()) {
1073     dlmalloc_space_ = continuous_space->AsDlMallocSpace();
1074   } else if (continuous_space->IsRosAllocSpace()) {
1075     rosalloc_space_ = continuous_space->AsRosAllocSpace();
1076   }
1077 }
1078 
RemoveSpace(space::Space * space)1079 void Heap::RemoveSpace(space::Space* space) {
1080   DCHECK(space != nullptr);
1081   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1082   if (space->IsContinuousSpace()) {
1083     DCHECK(!space->IsDiscontinuousSpace());
1084     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1085     // Continuous spaces don't necessarily have bitmaps.
1086     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1087     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1088     if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1089       DCHECK(mark_bitmap != nullptr);
1090       live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
1091       mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
1092     }
1093     auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
1094     DCHECK(it != continuous_spaces_.end());
1095     continuous_spaces_.erase(it);
1096   } else {
1097     DCHECK(space->IsDiscontinuousSpace());
1098     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1099     live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1100     mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1101     auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
1102                         discontinuous_space);
1103     DCHECK(it != discontinuous_spaces_.end());
1104     discontinuous_spaces_.erase(it);
1105   }
1106   if (space->IsAllocSpace()) {
1107     auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
1108     DCHECK(it != alloc_spaces_.end());
1109     alloc_spaces_.erase(it);
1110   }
1111 }
1112 
CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,uint64_t current_process_cpu_time) const1113 double Heap::CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
1114                                                uint64_t current_process_cpu_time) const {
1115   uint64_t bytes_allocated = GetBytesAllocated();
1116   double weight = current_process_cpu_time - gc_last_process_cpu_time_ns;
1117   return weight * bytes_allocated;
1118 }
1119 
CalculatePreGcWeightedAllocatedBytes()1120 void Heap::CalculatePreGcWeightedAllocatedBytes() {
1121   uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1122   pre_gc_weighted_allocated_bytes_ +=
1123     CalculateGcWeightedAllocatedBytes(pre_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1124   pre_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
1125 }
1126 
CalculatePostGcWeightedAllocatedBytes()1127 void Heap::CalculatePostGcWeightedAllocatedBytes() {
1128   uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1129   post_gc_weighted_allocated_bytes_ +=
1130     CalculateGcWeightedAllocatedBytes(post_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1131   post_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
1132 }
1133 
GetTotalGcCpuTime()1134 uint64_t Heap::GetTotalGcCpuTime() {
1135   uint64_t sum = 0;
1136   for (auto* collector : garbage_collectors_) {
1137     sum += collector->GetTotalCpuTime();
1138   }
1139   return sum;
1140 }
1141 
DumpGcPerformanceInfo(std::ostream & os)1142 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
1143   // Dump cumulative timings.
1144   os << "Dumping cumulative Gc timings\n";
1145   uint64_t total_duration = 0;
1146   // Dump cumulative loggers for each GC type.
1147   uint64_t total_paused_time = 0;
1148   for (auto* collector : garbage_collectors_) {
1149     total_duration += collector->GetCumulativeTimings().GetTotalNs();
1150     total_paused_time += collector->GetTotalPausedTimeNs();
1151     collector->DumpPerformanceInfo(os);
1152   }
1153   if (total_duration != 0) {
1154     const double total_seconds = total_duration / 1.0e9;
1155     const double total_cpu_seconds = GetTotalGcCpuTime() / 1.0e9;
1156     os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1157     os << "Mean GC size throughput: "
1158        << PrettySize(GetBytesFreedEver() / total_seconds) << "/s"
1159        << " per cpu-time: "
1160        << PrettySize(GetBytesFreedEver() / total_cpu_seconds) << "/s\n";
1161     os << "Mean GC object throughput: "
1162        << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
1163   }
1164   uint64_t total_objects_allocated = GetObjectsAllocatedEver();
1165   os << "Total number of allocations " << total_objects_allocated << "\n";
1166   os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1167   os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
1168   os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
1169   os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1170   os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
1171   os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1172   os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
1173   if (HasZygoteSpace()) {
1174     os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1175   }
1176   os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
1177   os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1178   os << "Total GC count: " << GetGcCount() << "\n";
1179   os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1180   os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1181   os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1182 
1183   {
1184     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1185     if (gc_count_rate_histogram_.SampleSize() > 0U) {
1186       os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1187       gc_count_rate_histogram_.DumpBins(os);
1188       os << "\n";
1189     }
1190     if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1191       os << "Histogram of blocking GC count per "
1192          << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1193       blocking_gc_count_rate_histogram_.DumpBins(os);
1194       os << "\n";
1195     }
1196   }
1197 
1198   if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1199     rosalloc_space_->DumpStats(os);
1200   }
1201 
1202   os << "Native bytes total: " << GetNativeBytes()
1203      << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n";
1204 
1205   os << "Total native bytes at last GC: "
1206      << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n";
1207 
1208   BaseMutex::DumpAll(os);
1209 }
1210 
ResetGcPerformanceInfo()1211 void Heap::ResetGcPerformanceInfo() {
1212   for (auto* collector : garbage_collectors_) {
1213     collector->ResetMeasurements();
1214   }
1215 
1216   process_cpu_start_time_ns_ = ProcessCpuNanoTime();
1217 
1218   pre_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1219   pre_gc_weighted_allocated_bytes_ = 0u;
1220 
1221   post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1222   post_gc_weighted_allocated_bytes_ = 0u;
1223 
1224   total_bytes_freed_ever_.store(0);
1225   total_objects_freed_ever_.store(0);
1226   total_wait_time_ = 0;
1227   blocking_gc_count_ = 0;
1228   blocking_gc_time_ = 0;
1229   gc_count_last_window_ = 0;
1230   blocking_gc_count_last_window_ = 0;
1231   last_update_time_gc_count_rate_histograms_ =  // Round down by the window duration.
1232       (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1233   {
1234     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1235     gc_count_rate_histogram_.Reset();
1236     blocking_gc_count_rate_histogram_.Reset();
1237   }
1238 }
1239 
GetGcCount() const1240 uint64_t Heap::GetGcCount() const {
1241   uint64_t gc_count = 0U;
1242   for (auto* collector : garbage_collectors_) {
1243     gc_count += collector->GetCumulativeTimings().GetIterations();
1244   }
1245   return gc_count;
1246 }
1247 
GetGcTime() const1248 uint64_t Heap::GetGcTime() const {
1249   uint64_t gc_time = 0U;
1250   for (auto* collector : garbage_collectors_) {
1251     gc_time += collector->GetCumulativeTimings().GetTotalNs();
1252   }
1253   return gc_time;
1254 }
1255 
GetBlockingGcCount() const1256 uint64_t Heap::GetBlockingGcCount() const {
1257   return blocking_gc_count_;
1258 }
1259 
GetBlockingGcTime() const1260 uint64_t Heap::GetBlockingGcTime() const {
1261   return blocking_gc_time_;
1262 }
1263 
DumpGcCountRateHistogram(std::ostream & os) const1264 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1265   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1266   if (gc_count_rate_histogram_.SampleSize() > 0U) {
1267     gc_count_rate_histogram_.DumpBins(os);
1268   }
1269 }
1270 
DumpBlockingGcCountRateHistogram(std::ostream & os) const1271 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1272   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1273   if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1274     blocking_gc_count_rate_histogram_.DumpBins(os);
1275   }
1276 }
1277 
1278 ALWAYS_INLINE
GetAndOverwriteAllocationListener(Atomic<AllocationListener * > * storage,AllocationListener * new_value)1279 static inline AllocationListener* GetAndOverwriteAllocationListener(
1280     Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
1281   return storage->exchange(new_value);
1282 }
1283 
~Heap()1284 Heap::~Heap() {
1285   VLOG(heap) << "Starting ~Heap()";
1286   STLDeleteElements(&garbage_collectors_);
1287   // If we don't reset then the mark stack complains in its destructor.
1288   allocation_stack_->Reset();
1289   allocation_records_.reset();
1290   live_stack_->Reset();
1291   STLDeleteValues(&mod_union_tables_);
1292   STLDeleteValues(&remembered_sets_);
1293   STLDeleteElements(&continuous_spaces_);
1294   STLDeleteElements(&discontinuous_spaces_);
1295   delete gc_complete_lock_;
1296   delete thread_flip_lock_;
1297   delete pending_task_lock_;
1298   delete backtrace_lock_;
1299   uint64_t unique_count = unique_backtrace_count_.load();
1300   uint64_t seen_count = seen_backtrace_count_.load();
1301   if (unique_count != 0 || seen_count != 0) {
1302     LOG(INFO) << "gc stress unique=" << unique_count << " total=" << (unique_count + seen_count);
1303   }
1304   VLOG(heap) << "Finished ~Heap()";
1305 }
1306 
1307 
FindContinuousSpaceFromAddress(const mirror::Object * addr) const1308 space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
1309   for (const auto& space : continuous_spaces_) {
1310     if (space->Contains(addr)) {
1311       return space;
1312     }
1313   }
1314   return nullptr;
1315 }
1316 
FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1317 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1318                                                             bool fail_ok) const {
1319   space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
1320   if (space != nullptr) {
1321     return space;
1322   }
1323   if (!fail_ok) {
1324     LOG(FATAL) << "object " << obj << " not inside any spaces!";
1325   }
1326   return nullptr;
1327 }
1328 
FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1329 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1330                                                                   bool fail_ok) const {
1331   for (const auto& space : discontinuous_spaces_) {
1332     if (space->Contains(obj.Ptr())) {
1333       return space;
1334     }
1335   }
1336   if (!fail_ok) {
1337     LOG(FATAL) << "object " << obj << " not inside any spaces!";
1338   }
1339   return nullptr;
1340 }
1341 
FindSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1342 space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
1343   space::Space* result = FindContinuousSpaceFromObject(obj, true);
1344   if (result != nullptr) {
1345     return result;
1346   }
1347   return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1348 }
1349 
FindSpaceFromAddress(const void * addr) const1350 space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
1351   for (const auto& space : continuous_spaces_) {
1352     if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1353       return space;
1354     }
1355   }
1356   for (const auto& space : discontinuous_spaces_) {
1357     if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1358       return space;
1359     }
1360   }
1361   return nullptr;
1362 }
1363 
DumpSpaceNameFromAddress(const void * addr) const1364 std::string Heap::DumpSpaceNameFromAddress(const void* addr) const {
1365   space::Space* space = FindSpaceFromAddress(addr);
1366   return (space != nullptr) ? space->GetName() : "no space";
1367 }
1368 
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)1369 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1370   // If we're in a stack overflow, do not create a new exception. It would require running the
1371   // constructor, which will of course still be in a stack overflow.
1372   if (self->IsHandlingStackOverflow()) {
1373     self->SetException(
1374         Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow());
1375     return;
1376   }
1377 
1378   std::ostringstream oss;
1379   size_t total_bytes_free = GetFreeMemory();
1380   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1381       << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
1382       << " target footprint " << target_footprint_.load(std::memory_order_relaxed)
1383       << ", growth limit "
1384       << growth_limit_;
1385   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1386   if (total_bytes_free >= byte_count) {
1387     space::AllocSpace* space = nullptr;
1388     if (allocator_type == kAllocatorTypeNonMoving) {
1389       space = non_moving_space_;
1390     } else if (allocator_type == kAllocatorTypeRosAlloc ||
1391                allocator_type == kAllocatorTypeDlMalloc) {
1392       space = main_space_;
1393     } else if (allocator_type == kAllocatorTypeBumpPointer ||
1394                allocator_type == kAllocatorTypeTLAB) {
1395       space = bump_pointer_space_;
1396     } else if (allocator_type == kAllocatorTypeRegion ||
1397                allocator_type == kAllocatorTypeRegionTLAB) {
1398       space = region_space_;
1399     }
1400 
1401     // There is no fragmentation info to log for large-object space.
1402     if (allocator_type != kAllocatorTypeLOS) {
1403       CHECK(space != nullptr) << "allocator_type:" << allocator_type
1404                               << " byte_count:" << byte_count
1405                               << " total_bytes_free:" << total_bytes_free;
1406       space->LogFragmentationAllocFailure(oss, byte_count);
1407     }
1408   }
1409   self->ThrowOutOfMemoryError(oss.str().c_str());
1410 }
1411 
DoPendingCollectorTransition()1412 void Heap::DoPendingCollectorTransition() {
1413   CollectorType desired_collector_type = desired_collector_type_;
1414   // Launch homogeneous space compaction if it is desired.
1415   if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1416     if (!CareAboutPauseTimes()) {
1417       PerformHomogeneousSpaceCompact();
1418     } else {
1419       VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1420     }
1421   } else if (desired_collector_type == kCollectorTypeCCBackground) {
1422     DCHECK(kUseReadBarrier);
1423     if (!CareAboutPauseTimes()) {
1424       // Invoke CC full compaction.
1425       CollectGarbageInternal(collector::kGcTypeFull,
1426                              kGcCauseCollectorTransition,
1427                              /*clear_soft_references=*/false);
1428     } else {
1429       VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
1430     }
1431   } else {
1432     CHECK_EQ(desired_collector_type, collector_type_) << "Unsupported collector transition";
1433   }
1434 }
1435 
Trim(Thread * self)1436 void Heap::Trim(Thread* self) {
1437   Runtime* const runtime = Runtime::Current();
1438   if (!CareAboutPauseTimes()) {
1439     // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1440     // about pauses.
1441     ScopedTrace trace("Deflating monitors");
1442     // Avoid race conditions on the lock word for CC.
1443     ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1444     ScopedSuspendAll ssa(__FUNCTION__);
1445     uint64_t start_time = NanoTime();
1446     size_t count = runtime->GetMonitorList()->DeflateMonitors();
1447     VLOG(heap) << "Deflating " << count << " monitors took "
1448         << PrettyDuration(NanoTime() - start_time);
1449   }
1450   TrimIndirectReferenceTables(self);
1451   TrimSpaces(self);
1452   // Trim arenas that may have been used by JIT or verifier.
1453   runtime->GetArenaPool()->TrimMaps();
1454 }
1455 
1456 class TrimIndirectReferenceTableClosure : public Closure {
1457  public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1458   explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1459   }
Run(Thread * thread)1460   void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
1461     thread->GetJniEnv()->TrimLocals();
1462     // If thread is a running mutator, then act on behalf of the trim thread.
1463     // See the code in ThreadList::RunCheckpoint.
1464     barrier_->Pass(Thread::Current());
1465   }
1466 
1467  private:
1468   Barrier* const barrier_;
1469 };
1470 
TrimIndirectReferenceTables(Thread * self)1471 void Heap::TrimIndirectReferenceTables(Thread* self) {
1472   ScopedObjectAccess soa(self);
1473   ScopedTrace trace(__PRETTY_FUNCTION__);
1474   JavaVMExt* vm = soa.Vm();
1475   // Trim globals indirect reference table.
1476   vm->TrimGlobals();
1477   // Trim locals indirect reference tables.
1478   Barrier barrier(0);
1479   TrimIndirectReferenceTableClosure closure(&barrier);
1480   ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1481   size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1482   if (barrier_count != 0) {
1483     barrier.Increment(self, barrier_count);
1484   }
1485 }
1486 
StartGC(Thread * self,GcCause cause,CollectorType collector_type)1487 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1488   // Need to do this before acquiring the locks since we don't want to get suspended while
1489   // holding any locks.
1490   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1491   MutexLock mu(self, *gc_complete_lock_);
1492   // Ensure there is only one GC at a time.
1493   WaitForGcToCompleteLocked(cause, self);
1494   collector_type_running_ = collector_type;
1495   last_gc_cause_ = cause;
1496   thread_running_gc_ = self;
1497 }
1498 
TrimSpaces(Thread * self)1499 void Heap::TrimSpaces(Thread* self) {
1500   // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1501   // trimming.
1502   StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1503   ScopedTrace trace(__PRETTY_FUNCTION__);
1504   const uint64_t start_ns = NanoTime();
1505   // Trim the managed spaces.
1506   uint64_t total_alloc_space_allocated = 0;
1507   uint64_t total_alloc_space_size = 0;
1508   uint64_t managed_reclaimed = 0;
1509   {
1510     ScopedObjectAccess soa(self);
1511     for (const auto& space : continuous_spaces_) {
1512       if (space->IsMallocSpace()) {
1513         gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1514         if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1515           // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1516           // for a long period of time.
1517           managed_reclaimed += malloc_space->Trim();
1518         }
1519         total_alloc_space_size += malloc_space->Size();
1520       }
1521     }
1522   }
1523   total_alloc_space_allocated = GetBytesAllocated();
1524   if (large_object_space_ != nullptr) {
1525     total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1526   }
1527   if (bump_pointer_space_ != nullptr) {
1528     total_alloc_space_allocated -= bump_pointer_space_->Size();
1529   }
1530   if (region_space_ != nullptr) {
1531     total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1532   }
1533   const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1534       static_cast<float>(total_alloc_space_size);
1535   uint64_t gc_heap_end_ns = NanoTime();
1536   // We never move things in the native heap, so we can finish the GC at this point.
1537   FinishGC(self, collector::kGcTypeNone);
1538 
1539   VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1540       << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1541       << static_cast<int>(100 * managed_utilization) << "%.";
1542 }
1543 
IsValidObjectAddress(const void * addr) const1544 bool Heap::IsValidObjectAddress(const void* addr) const {
1545   if (addr == nullptr) {
1546     return true;
1547   }
1548   return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
1549 }
1550 
IsNonDiscontinuousSpaceHeapAddress(const void * addr) const1551 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
1552   return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
1553 }
1554 
IsLiveObjectLocked(ObjPtr<mirror::Object> obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1555 bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
1556                               bool search_allocation_stack,
1557                               bool search_live_stack,
1558                               bool sorted) {
1559   if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
1560     return false;
1561   }
1562   if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
1563     mirror::Class* klass = obj->GetClass<kVerifyNone>();
1564     if (obj == klass) {
1565       // This case happens for java.lang.Class.
1566       return true;
1567     }
1568     return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1569   } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
1570     // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1571     // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1572     return temp_space_->Contains(obj.Ptr());
1573   }
1574   if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
1575     return true;
1576   }
1577   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1578   space::DiscontinuousSpace* d_space = nullptr;
1579   if (c_space != nullptr) {
1580     if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1581       return true;
1582     }
1583   } else {
1584     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1585     if (d_space != nullptr) {
1586       if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1587         return true;
1588       }
1589     }
1590   }
1591   // This is covering the allocation/live stack swapping that is done without mutators suspended.
1592   for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1593     if (i > 0) {
1594       NanoSleep(MsToNs(10));
1595     }
1596     if (search_allocation_stack) {
1597       if (sorted) {
1598         if (allocation_stack_->ContainsSorted(obj.Ptr())) {
1599           return true;
1600         }
1601       } else if (allocation_stack_->Contains(obj.Ptr())) {
1602         return true;
1603       }
1604     }
1605 
1606     if (search_live_stack) {
1607       if (sorted) {
1608         if (live_stack_->ContainsSorted(obj.Ptr())) {
1609           return true;
1610         }
1611       } else if (live_stack_->Contains(obj.Ptr())) {
1612         return true;
1613       }
1614     }
1615   }
1616   // We need to check the bitmaps again since there is a race where we mark something as live and
1617   // then clear the stack containing it.
1618   if (c_space != nullptr) {
1619     if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1620       return true;
1621     }
1622   } else {
1623     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1624     if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1625       return true;
1626     }
1627   }
1628   return false;
1629 }
1630 
DumpSpaces() const1631 std::string Heap::DumpSpaces() const {
1632   std::ostringstream oss;
1633   DumpSpaces(oss);
1634   return oss.str();
1635 }
1636 
DumpSpaces(std::ostream & stream) const1637 void Heap::DumpSpaces(std::ostream& stream) const {
1638   for (const auto& space : continuous_spaces_) {
1639     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1640     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1641     stream << space << " " << *space << "\n";
1642     if (live_bitmap != nullptr) {
1643       stream << live_bitmap << " " << *live_bitmap << "\n";
1644     }
1645     if (mark_bitmap != nullptr) {
1646       stream << mark_bitmap << " " << *mark_bitmap << "\n";
1647     }
1648   }
1649   for (const auto& space : discontinuous_spaces_) {
1650     stream << space << " " << *space << "\n";
1651   }
1652 }
1653 
VerifyObjectBody(ObjPtr<mirror::Object> obj)1654 void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
1655   if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1656     return;
1657   }
1658 
1659   // Ignore early dawn of the universe verifications.
1660   if (UNLIKELY(num_bytes_allocated_.load(std::memory_order_relaxed) < 10 * KB)) {
1661     return;
1662   }
1663   CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
1664   mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1665   CHECK(c != nullptr) << "Null class in object " << obj;
1666   CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
1667   CHECK(VerifyClassClass(c));
1668 
1669   if (verify_object_mode_ > kVerifyObjectModeFast) {
1670     // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1671     CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1672   }
1673 }
1674 
VerifyHeap()1675 void Heap::VerifyHeap() {
1676   ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1677   auto visitor = [&](mirror::Object* obj) {
1678     VerifyObjectBody(obj);
1679   };
1680   // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already
1681   // NO_THREAD_SAFETY_ANALYSIS.
1682   auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS {
1683     GetLiveBitmap()->Visit(visitor);
1684   };
1685   no_thread_safety_analysis();
1686 }
1687 
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1688 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1689   // Use signed comparison since freed bytes can be negative when background compaction foreground
1690   // transitions occurs. This is typically due to objects moving from a bump pointer space to a
1691   // free list backed space, which may increase memory footprint due to padding and binning.
1692   RACING_DCHECK_LE(freed_bytes,
1693                    static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed)));
1694   // Note: This relies on 2s complement for handling negative freed_bytes.
1695   num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes), std::memory_order_relaxed);
1696   if (Runtime::Current()->HasStatsEnabled()) {
1697     RuntimeStats* thread_stats = Thread::Current()->GetStats();
1698     thread_stats->freed_objects += freed_objects;
1699     thread_stats->freed_bytes += freed_bytes;
1700     // TODO: Do this concurrently.
1701     RuntimeStats* global_stats = Runtime::Current()->GetStats();
1702     global_stats->freed_objects += freed_objects;
1703     global_stats->freed_bytes += freed_bytes;
1704   }
1705 }
1706 
RecordFreeRevoke()1707 void Heap::RecordFreeRevoke() {
1708   // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1709   // ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1710   // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1711   // all the way to zero exactly as the remainder will be subtracted at the next GC.
1712   size_t bytes_freed = num_bytes_freed_revoke_.load(std::memory_order_relaxed);
1713   CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed, std::memory_order_relaxed),
1714            bytes_freed) << "num_bytes_freed_revoke_ underflow";
1715   CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed, std::memory_order_relaxed),
1716            bytes_freed) << "num_bytes_allocated_ underflow";
1717   GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1718 }
1719 
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1720 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1721   if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1722     return rosalloc_space_;
1723   }
1724   for (const auto& space : continuous_spaces_) {
1725     if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1726       if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1727         return space->AsContinuousSpace()->AsRosAllocSpace();
1728       }
1729     }
1730   }
1731   return nullptr;
1732 }
1733 
EntrypointsInstrumented()1734 static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
1735   instrumentation::Instrumentation* const instrumentation =
1736       Runtime::Current()->GetInstrumentation();
1737   return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1738 }
1739 
AllocateInternalWithGc(Thread * self,AllocatorType allocator,bool instrumented,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,ObjPtr<mirror::Class> * klass)1740 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1741                                              AllocatorType allocator,
1742                                              bool instrumented,
1743                                              size_t alloc_size,
1744                                              size_t* bytes_allocated,
1745                                              size_t* usable_size,
1746                                              size_t* bytes_tl_bulk_allocated,
1747                                              ObjPtr<mirror::Class>* klass) {
1748   // After a GC (due to allocation failure) we should retrieve at least this
1749   // fraction of the current max heap size. Otherwise throw OOME.
1750   constexpr double kMinFreeHeapAfterGcForAlloc = 0.01;
1751   bool was_default_allocator = allocator == GetCurrentAllocator();
1752   // Make sure there is no pending exception since we may need to throw an OOME.
1753   self->AssertNoPendingException();
1754   DCHECK(klass != nullptr);
1755 
1756   StackHandleScope<1> hs(self);
1757   HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(klass));
1758 
1759   auto send_object_pre_alloc =
1760       [&]() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_) {
1761         if (UNLIKELY(instrumented)) {
1762           AllocationListener* l = alloc_listener_.load(std::memory_order_seq_cst);
1763           if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) {
1764             l->PreObjectAllocated(self, h_klass, &alloc_size);
1765           }
1766         }
1767       };
1768 #define PERFORM_SUSPENDING_OPERATION(op)                                          \
1769   [&]() REQUIRES(Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) { \
1770     ScopedAllowThreadSuspension ats;                                              \
1771     auto res = (op);                                                              \
1772     send_object_pre_alloc();                                                      \
1773     return res;                                                                   \
1774   }()
1775 
1776   // The allocation failed. If the GC is running, block until it completes, and then retry the
1777   // allocation.
1778   collector::GcType last_gc =
1779       PERFORM_SUSPENDING_OPERATION(WaitForGcToComplete(kGcCauseForAlloc, self));
1780   // If we were the default allocator but the allocator changed while we were suspended,
1781   // abort the allocation.
1782   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1783       (!instrumented && EntrypointsInstrumented())) {
1784     return nullptr;
1785   }
1786   if (last_gc != collector::kGcTypeNone) {
1787     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1788     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1789                                                      usable_size, bytes_tl_bulk_allocated);
1790     if (ptr != nullptr) {
1791       return ptr;
1792     }
1793   }
1794 
1795   auto have_reclaimed_enough = [&]() {
1796     size_t curr_bytes_allocated = GetBytesAllocated();
1797     double curr_free_heap =
1798         static_cast<double>(growth_limit_ - curr_bytes_allocated) / growth_limit_;
1799     return curr_free_heap >= kMinFreeHeapAfterGcForAlloc;
1800   };
1801   // We perform one GC as per the next_gc_type_ (chosen in GrowForUtilization),
1802   // if it's not already tried. If that doesn't succeed then go for the most
1803   // exhaustive option. Perform a full-heap collection including clearing
1804   // SoftReferences. In case of ConcurrentCopying, it will also ensure that
1805   // all regions are evacuated. If allocation doesn't succeed even after that
1806   // then there is no hope, so we throw OOME.
1807   collector::GcType tried_type = next_gc_type_;
1808   if (last_gc < tried_type) {
1809     const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
1810         CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
1811 
1812     if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1813         (!instrumented && EntrypointsInstrumented())) {
1814       return nullptr;
1815     }
1816     if (gc_ran && have_reclaimed_enough()) {
1817       mirror::Object* ptr = TryToAllocate<true, false>(self, allocator,
1818                                                        alloc_size, bytes_allocated,
1819                                                        usable_size, bytes_tl_bulk_allocated);
1820       if (ptr != nullptr) {
1821         return ptr;
1822       }
1823     }
1824   }
1825   // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1826   // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1827   // VM spec requires that all SoftReferences have been collected and cleared before throwing
1828   // OOME.
1829   VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1830            << " allocation";
1831   // TODO: Run finalization, but this may cause more allocations to occur.
1832   // We don't need a WaitForGcToComplete here either.
1833   DCHECK(!gc_plan_.empty());
1834   PERFORM_SUSPENDING_OPERATION(CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true));
1835   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1836       (!instrumented && EntrypointsInstrumented())) {
1837     return nullptr;
1838   }
1839   mirror::Object* ptr = nullptr;
1840   if (have_reclaimed_enough()) {
1841     ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1842                                     usable_size, bytes_tl_bulk_allocated);
1843   }
1844 
1845   if (ptr == nullptr) {
1846     const uint64_t current_time = NanoTime();
1847     switch (allocator) {
1848       case kAllocatorTypeRosAlloc:
1849         // Fall-through.
1850       case kAllocatorTypeDlMalloc: {
1851         if (use_homogeneous_space_compaction_for_oom_ &&
1852             current_time - last_time_homogeneous_space_compaction_by_oom_ >
1853             min_interval_homogeneous_space_compaction_by_oom_) {
1854           last_time_homogeneous_space_compaction_by_oom_ = current_time;
1855           HomogeneousSpaceCompactResult result =
1856               PERFORM_SUSPENDING_OPERATION(PerformHomogeneousSpaceCompact());
1857           // Thread suspension could have occurred.
1858           if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1859               (!instrumented && EntrypointsInstrumented())) {
1860             return nullptr;
1861           }
1862           switch (result) {
1863             case HomogeneousSpaceCompactResult::kSuccess:
1864               // If the allocation succeeded, we delayed an oom.
1865               ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1866                                               usable_size, bytes_tl_bulk_allocated);
1867               if (ptr != nullptr) {
1868                 count_delayed_oom_++;
1869               }
1870               break;
1871             case HomogeneousSpaceCompactResult::kErrorReject:
1872               // Reject due to disabled moving GC.
1873               break;
1874             case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1875               // Throw OOM by default.
1876               break;
1877             default: {
1878               UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1879                   << static_cast<size_t>(result);
1880               UNREACHABLE();
1881             }
1882           }
1883           // Always print that we ran homogeneous space compation since this can cause jank.
1884           VLOG(heap) << "Ran heap homogeneous space compaction, "
1885                     << " requested defragmentation "
1886                     << count_requested_homogeneous_space_compaction_.load()
1887                     << " performed defragmentation "
1888                     << count_performed_homogeneous_space_compaction_.load()
1889                     << " ignored homogeneous space compaction "
1890                     << count_ignored_homogeneous_space_compaction_.load()
1891                     << " delayed count = "
1892                     << count_delayed_oom_.load();
1893         }
1894         break;
1895       }
1896       default: {
1897         // Do nothing for others allocators.
1898       }
1899     }
1900   }
1901 #undef PERFORM_SUSPENDING_OPERATION
1902   // If the allocation hasn't succeeded by this point, throw an OOM error.
1903   if (ptr == nullptr) {
1904     ScopedAllowThreadSuspension ats;
1905     ThrowOutOfMemoryError(self, alloc_size, allocator);
1906   }
1907   return ptr;
1908 }
1909 
SetTargetHeapUtilization(float target)1910 void Heap::SetTargetHeapUtilization(float target) {
1911   DCHECK_GT(target, 0.1f);  // asserted in Java code
1912   DCHECK_LT(target, 1.0f);
1913   target_utilization_ = target;
1914 }
1915 
GetObjectsAllocated() const1916 size_t Heap::GetObjectsAllocated() const {
1917   Thread* const self = Thread::Current();
1918   ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
1919   // Prevent GC running during GetObjectsAllocated since we may get a checkpoint request that tells
1920   // us to suspend while we are doing SuspendAll. b/35232978
1921   gc::ScopedGCCriticalSection gcs(Thread::Current(),
1922                                   gc::kGcCauseGetObjectsAllocated,
1923                                   gc::kCollectorTypeGetObjectsAllocated);
1924   // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
1925   ScopedSuspendAll ssa(__FUNCTION__);
1926   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1927   size_t total = 0;
1928   for (space::AllocSpace* space : alloc_spaces_) {
1929     total += space->GetObjectsAllocated();
1930   }
1931   return total;
1932 }
1933 
GetObjectsAllocatedEver() const1934 uint64_t Heap::GetObjectsAllocatedEver() const {
1935   uint64_t total = GetObjectsFreedEver();
1936   // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1937   if (Thread::Current() != nullptr) {
1938     total += GetObjectsAllocated();
1939   }
1940   return total;
1941 }
1942 
GetBytesAllocatedEver() const1943 uint64_t Heap::GetBytesAllocatedEver() const {
1944   // Force the returned value to be monotonically increasing, in the sense that if this is called
1945   // at A and B, such that A happens-before B, then the call at B returns a value no smaller than
1946   // that at A. This is not otherwise guaranteed, since num_bytes_allocated_ is decremented first,
1947   // and total_bytes_freed_ever_ is incremented later.
1948   static std::atomic<uint64_t> max_bytes_so_far(0);
1949   uint64_t so_far = max_bytes_so_far.load(std::memory_order_relaxed);
1950   uint64_t current_bytes = GetBytesFreedEver(std::memory_order_acquire);
1951   current_bytes += GetBytesAllocated();
1952   do {
1953     if (current_bytes <= so_far) {
1954       return so_far;
1955     }
1956   } while (!max_bytes_so_far.compare_exchange_weak(so_far /* updated */,
1957                                                    current_bytes, std::memory_order_relaxed));
1958   return current_bytes;
1959 }
1960 
1961 // Check whether the given object is an instance of the given class.
MatchesClass(mirror::Object * obj,Handle<mirror::Class> h_class,bool use_is_assignable_from)1962 static bool MatchesClass(mirror::Object* obj,
1963                          Handle<mirror::Class> h_class,
1964                          bool use_is_assignable_from) REQUIRES_SHARED(Locks::mutator_lock_) {
1965   mirror::Class* instance_class = obj->GetClass();
1966   CHECK(instance_class != nullptr);
1967   ObjPtr<mirror::Class> klass = h_class.Get();
1968   if (use_is_assignable_from) {
1969     return klass != nullptr && klass->IsAssignableFrom(instance_class);
1970   }
1971   return instance_class == klass;
1972 }
1973 
CountInstances(const std::vector<Handle<mirror::Class>> & classes,bool use_is_assignable_from,uint64_t * counts)1974 void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
1975                           bool use_is_assignable_from,
1976                           uint64_t* counts) {
1977   auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1978     for (size_t i = 0; i < classes.size(); ++i) {
1979       if (MatchesClass(obj, classes[i], use_is_assignable_from)) {
1980         ++counts[i];
1981       }
1982     }
1983   };
1984   VisitObjects(instance_counter);
1985 }
1986 
GetInstances(VariableSizedHandleScope & scope,Handle<mirror::Class> h_class,bool use_is_assignable_from,int32_t max_count,std::vector<Handle<mirror::Object>> & instances)1987 void Heap::GetInstances(VariableSizedHandleScope& scope,
1988                         Handle<mirror::Class> h_class,
1989                         bool use_is_assignable_from,
1990                         int32_t max_count,
1991                         std::vector<Handle<mirror::Object>>& instances) {
1992   DCHECK_GE(max_count, 0);
1993   auto instance_collector = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1994     if (MatchesClass(obj, h_class, use_is_assignable_from)) {
1995       if (max_count == 0 || instances.size() < static_cast<size_t>(max_count)) {
1996         instances.push_back(scope.NewHandle(obj));
1997       }
1998     }
1999   };
2000   VisitObjects(instance_collector);
2001 }
2002 
GetReferringObjects(VariableSizedHandleScope & scope,Handle<mirror::Object> o,int32_t max_count,std::vector<Handle<mirror::Object>> & referring_objects)2003 void Heap::GetReferringObjects(VariableSizedHandleScope& scope,
2004                                Handle<mirror::Object> o,
2005                                int32_t max_count,
2006                                std::vector<Handle<mirror::Object>>& referring_objects) {
2007   class ReferringObjectsFinder {
2008    public:
2009     ReferringObjectsFinder(VariableSizedHandleScope& scope_in,
2010                            Handle<mirror::Object> object_in,
2011                            int32_t max_count_in,
2012                            std::vector<Handle<mirror::Object>>& referring_objects_in)
2013         REQUIRES_SHARED(Locks::mutator_lock_)
2014         : scope_(scope_in),
2015           object_(object_in),
2016           max_count_(max_count_in),
2017           referring_objects_(referring_objects_in) {}
2018 
2019     // For Object::VisitReferences.
2020     void operator()(ObjPtr<mirror::Object> obj,
2021                     MemberOffset offset,
2022                     bool is_static ATTRIBUTE_UNUSED) const
2023         REQUIRES_SHARED(Locks::mutator_lock_) {
2024       mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
2025       if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
2026         referring_objects_.push_back(scope_.NewHandle(obj));
2027       }
2028     }
2029 
2030     void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
2031         const {}
2032     void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
2033 
2034    private:
2035     VariableSizedHandleScope& scope_;
2036     Handle<mirror::Object> const object_;
2037     const uint32_t max_count_;
2038     std::vector<Handle<mirror::Object>>& referring_objects_;
2039     DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
2040   };
2041   ReferringObjectsFinder finder(scope, o, max_count, referring_objects);
2042   auto referring_objects_finder = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2043     obj->VisitReferences(finder, VoidFunctor());
2044   };
2045   VisitObjects(referring_objects_finder);
2046 }
2047 
CollectGarbage(bool clear_soft_references,GcCause cause)2048 void Heap::CollectGarbage(bool clear_soft_references, GcCause cause) {
2049   // Even if we waited for a GC we still need to do another GC since weaks allocated during the
2050   // last GC will not have necessarily been cleared.
2051   CollectGarbageInternal(gc_plan_.back(), cause, clear_soft_references);
2052 }
2053 
SupportHomogeneousSpaceCompactAndCollectorTransitions() const2054 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
2055   return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
2056       foreground_collector_type_ == kCollectorTypeCMS;
2057 }
2058 
PerformHomogeneousSpaceCompact()2059 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
2060   Thread* self = Thread::Current();
2061   // Inc requested homogeneous space compaction.
2062   count_requested_homogeneous_space_compaction_++;
2063   // Store performed homogeneous space compaction at a new request arrival.
2064   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2065   Locks::mutator_lock_->AssertNotHeld(self);
2066   {
2067     ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2068     MutexLock mu(self, *gc_complete_lock_);
2069     // Ensure there is only one GC at a time.
2070     WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
2071     // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable
2072     // count is non zero.
2073     // If the collector type changed to something which doesn't benefit from homogeneous space
2074     // compaction, exit.
2075     if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
2076         !main_space_->CanMoveObjects()) {
2077       return kErrorReject;
2078     }
2079     if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
2080       return kErrorUnsupported;
2081     }
2082     collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
2083   }
2084   if (Runtime::Current()->IsShuttingDown(self)) {
2085     // Don't allow heap transitions to happen if the runtime is shutting down since these can
2086     // cause objects to get finalized.
2087     FinishGC(self, collector::kGcTypeNone);
2088     return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
2089   }
2090   collector::GarbageCollector* collector;
2091   {
2092     ScopedSuspendAll ssa(__FUNCTION__);
2093     uint64_t start_time = NanoTime();
2094     // Launch compaction.
2095     space::MallocSpace* to_space = main_space_backup_.release();
2096     space::MallocSpace* from_space = main_space_;
2097     to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2098     const uint64_t space_size_before_compaction = from_space->Size();
2099     AddSpace(to_space);
2100     // Make sure that we will have enough room to copy.
2101     CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
2102     collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
2103     const uint64_t space_size_after_compaction = to_space->Size();
2104     main_space_ = to_space;
2105     main_space_backup_.reset(from_space);
2106     RemoveSpace(from_space);
2107     SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
2108     // Update performed homogeneous space compaction count.
2109     count_performed_homogeneous_space_compaction_++;
2110     // Print statics log and resume all threads.
2111     uint64_t duration = NanoTime() - start_time;
2112     VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
2113                << PrettySize(space_size_before_compaction) << " -> "
2114                << PrettySize(space_size_after_compaction) << " compact-ratio: "
2115                << std::fixed << static_cast<double>(space_size_after_compaction) /
2116                static_cast<double>(space_size_before_compaction);
2117   }
2118   // Finish GC.
2119   // Get the references we need to enqueue.
2120   SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self);
2121   GrowForUtilization(semi_space_collector_);
2122   LogGC(kGcCauseHomogeneousSpaceCompact, collector);
2123   FinishGC(self, collector::kGcTypeFull);
2124   // Enqueue any references after losing the GC locks.
2125   clear->Run(self);
2126   clear->Finalize();
2127   {
2128     ScopedObjectAccess soa(self);
2129     soa.Vm()->UnloadNativeLibraries();
2130   }
2131   return HomogeneousSpaceCompactResult::kSuccess;
2132 }
2133 
ChangeCollector(CollectorType collector_type)2134 void Heap::ChangeCollector(CollectorType collector_type) {
2135   // TODO: Only do this with all mutators suspended to avoid races.
2136   if (collector_type != collector_type_) {
2137     collector_type_ = collector_type;
2138     gc_plan_.clear();
2139     switch (collector_type_) {
2140       case kCollectorTypeCC: {
2141         if (use_generational_cc_) {
2142           gc_plan_.push_back(collector::kGcTypeSticky);
2143         }
2144         gc_plan_.push_back(collector::kGcTypeFull);
2145         if (use_tlab_) {
2146           ChangeAllocator(kAllocatorTypeRegionTLAB);
2147         } else {
2148           ChangeAllocator(kAllocatorTypeRegion);
2149         }
2150         break;
2151       }
2152       case kCollectorTypeSS: {
2153         gc_plan_.push_back(collector::kGcTypeFull);
2154         if (use_tlab_) {
2155           ChangeAllocator(kAllocatorTypeTLAB);
2156         } else {
2157           ChangeAllocator(kAllocatorTypeBumpPointer);
2158         }
2159         break;
2160       }
2161       case kCollectorTypeMS: {
2162         gc_plan_.push_back(collector::kGcTypeSticky);
2163         gc_plan_.push_back(collector::kGcTypePartial);
2164         gc_plan_.push_back(collector::kGcTypeFull);
2165         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2166         break;
2167       }
2168       case kCollectorTypeCMS: {
2169         gc_plan_.push_back(collector::kGcTypeSticky);
2170         gc_plan_.push_back(collector::kGcTypePartial);
2171         gc_plan_.push_back(collector::kGcTypeFull);
2172         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2173         break;
2174       }
2175       default: {
2176         UNIMPLEMENTED(FATAL);
2177         UNREACHABLE();
2178       }
2179     }
2180     if (IsGcConcurrent()) {
2181       concurrent_start_bytes_ =
2182           UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
2183                              kMinConcurrentRemainingBytes);
2184     } else {
2185       concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2186     }
2187   }
2188 }
2189 
2190 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2191 class ZygoteCompactingCollector final : public collector::SemiSpace {
2192  public:
ZygoteCompactingCollector(gc::Heap * heap,bool is_running_on_memory_tool)2193   ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
2194       : SemiSpace(heap, "zygote collector"),
2195         bin_live_bitmap_(nullptr),
2196         bin_mark_bitmap_(nullptr),
2197         is_running_on_memory_tool_(is_running_on_memory_tool) {}
2198 
BuildBins(space::ContinuousSpace * space)2199   void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
2200     bin_live_bitmap_ = space->GetLiveBitmap();
2201     bin_mark_bitmap_ = space->GetMarkBitmap();
2202     uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
2203     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2204     // Note: This requires traversing the space in increasing order of object addresses.
2205     auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2206       uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2207       size_t bin_size = object_addr - prev;
2208       // Add the bin consisting of the end of the previous object to the start of the current object.
2209       AddBin(bin_size, prev);
2210       prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
2211     };
2212     bin_live_bitmap_->Walk(visitor);
2213     // Add the last bin which spans after the last object to the end of the space.
2214     AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
2215   }
2216 
2217  private:
2218   // Maps from bin sizes to locations.
2219   std::multimap<size_t, uintptr_t> bins_;
2220   // Live bitmap of the space which contains the bins.
2221   accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2222   // Mark bitmap of the space which contains the bins.
2223   accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2224   const bool is_running_on_memory_tool_;
2225 
AddBin(size_t size,uintptr_t position)2226   void AddBin(size_t size, uintptr_t position) {
2227     if (is_running_on_memory_tool_) {
2228       MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2229     }
2230     if (size != 0) {
2231       bins_.insert(std::make_pair(size, position));
2232     }
2233   }
2234 
ShouldSweepSpace(space::ContinuousSpace * space ATTRIBUTE_UNUSED) const2235   bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const override {
2236     // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2237     // allocator.
2238     return false;
2239   }
2240 
MarkNonForwardedObject(mirror::Object * obj)2241   mirror::Object* MarkNonForwardedObject(mirror::Object* obj) override
2242       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2243     size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
2244     size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2245     mirror::Object* forward_address;
2246     // Find the smallest bin which we can move obj in.
2247     auto it = bins_.lower_bound(alloc_size);
2248     if (it == bins_.end()) {
2249       // No available space in the bins, place it in the target space instead (grows the zygote
2250       // space).
2251       size_t bytes_allocated, unused_bytes_tl_bulk_allocated;
2252       forward_address = to_space_->Alloc(
2253           self_, alloc_size, &bytes_allocated, nullptr, &unused_bytes_tl_bulk_allocated);
2254       if (to_space_live_bitmap_ != nullptr) {
2255         to_space_live_bitmap_->Set(forward_address);
2256       } else {
2257         GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2258         GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2259       }
2260     } else {
2261       size_t size = it->first;
2262       uintptr_t pos = it->second;
2263       bins_.erase(it);  // Erase the old bin which we replace with the new smaller bin.
2264       forward_address = reinterpret_cast<mirror::Object*>(pos);
2265       // Set the live and mark bits so that sweeping system weaks works properly.
2266       bin_live_bitmap_->Set(forward_address);
2267       bin_mark_bitmap_->Set(forward_address);
2268       DCHECK_GE(size, alloc_size);
2269       // Add a new bin with the remaining space.
2270       AddBin(size - alloc_size, pos + alloc_size);
2271     }
2272     // Copy the object over to its new location.
2273     // Historical note: We did not use `alloc_size` to avoid a Valgrind error.
2274     memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2275     if (kUseBakerReadBarrier) {
2276       obj->AssertReadBarrierState();
2277       forward_address->AssertReadBarrierState();
2278     }
2279     return forward_address;
2280   }
2281 };
2282 
UnBindBitmaps()2283 void Heap::UnBindBitmaps() {
2284   TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2285   for (const auto& space : GetContinuousSpaces()) {
2286     if (space->IsContinuousMemMapAllocSpace()) {
2287       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2288       if (alloc_space->GetLiveBitmap() != nullptr && alloc_space->HasBoundBitmaps()) {
2289         alloc_space->UnBindBitmaps();
2290       }
2291     }
2292   }
2293 }
2294 
IncrementFreedEver()2295 void Heap::IncrementFreedEver() {
2296   // Counters are updated only by us, but may be read concurrently.
2297   // The updates should become visible after the corresponding live object info.
2298   total_objects_freed_ever_.store(total_objects_freed_ever_.load(std::memory_order_relaxed)
2299                                   + GetCurrentGcIteration()->GetFreedObjects()
2300                                   + GetCurrentGcIteration()->GetFreedLargeObjects(),
2301                                   std::memory_order_release);
2302   total_bytes_freed_ever_.store(total_bytes_freed_ever_.load(std::memory_order_relaxed)
2303                                 + GetCurrentGcIteration()->GetFreedBytes()
2304                                 + GetCurrentGcIteration()->GetFreedLargeObjectBytes(),
2305                                 std::memory_order_release);
2306 }
2307 
2308 #pragma clang diagnostic push
2309 #if !ART_USE_FUTEXES
2310 // Frame gets too large, perhaps due to Bionic pthread_mutex_lock size. We don't care.
2311 #  pragma clang diagnostic ignored "-Wframe-larger-than="
2312 #endif
2313 // This has a large frame, but shouldn't be run anywhere near the stack limit.
PreZygoteFork()2314 void Heap::PreZygoteFork() {
2315   if (!HasZygoteSpace()) {
2316     // We still want to GC in case there is some unreachable non moving objects that could cause a
2317     // suboptimal bin packing when we compact the zygote space.
2318     CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
2319     // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2320     // the trim process may require locking the mutator lock.
2321     non_moving_space_->Trim();
2322   }
2323   Thread* self = Thread::Current();
2324   MutexLock mu(self, zygote_creation_lock_);
2325   // Try to see if we have any Zygote spaces.
2326   if (HasZygoteSpace()) {
2327     return;
2328   }
2329   Runtime::Current()->GetInternTable()->AddNewTable();
2330   Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
2331   VLOG(heap) << "Starting PreZygoteFork";
2332   // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2333   // there.
2334   non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2335   const bool same_space = non_moving_space_ == main_space_;
2336   if (kCompactZygote) {
2337     // Temporarily disable rosalloc verification because the zygote
2338     // compaction will mess up the rosalloc internal metadata.
2339     ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2340     ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
2341     zygote_collector.BuildBins(non_moving_space_);
2342     // Create a new bump pointer space which we will compact into.
2343     space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2344                                          non_moving_space_->Limit());
2345     // Compact the bump pointer space to a new zygote bump pointer space.
2346     bool reset_main_space = false;
2347     if (IsMovingGc(collector_type_)) {
2348       if (collector_type_ == kCollectorTypeCC) {
2349         zygote_collector.SetFromSpace(region_space_);
2350       } else {
2351         zygote_collector.SetFromSpace(bump_pointer_space_);
2352       }
2353     } else {
2354       CHECK(main_space_ != nullptr);
2355       CHECK_NE(main_space_, non_moving_space_)
2356           << "Does not make sense to compact within the same space";
2357       // Copy from the main space.
2358       zygote_collector.SetFromSpace(main_space_);
2359       reset_main_space = true;
2360     }
2361     zygote_collector.SetToSpace(&target_space);
2362     zygote_collector.SetSwapSemiSpaces(false);
2363     zygote_collector.Run(kGcCauseCollectorTransition, false);
2364     if (reset_main_space) {
2365       main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2366       madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2367       MemMap mem_map = main_space_->ReleaseMemMap();
2368       RemoveSpace(main_space_);
2369       space::Space* old_main_space = main_space_;
2370       CreateMainMallocSpace(std::move(mem_map),
2371                             kDefaultInitialSize,
2372                             std::min(mem_map.Size(), growth_limit_),
2373                             mem_map.Size());
2374       delete old_main_space;
2375       AddSpace(main_space_);
2376     } else {
2377       if (collector_type_ == kCollectorTypeCC) {
2378         region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2379         // Evacuated everything out of the region space, clear the mark bitmap.
2380         region_space_->GetMarkBitmap()->Clear();
2381       } else {
2382         bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2383       }
2384     }
2385     if (temp_space_ != nullptr) {
2386       CHECK(temp_space_->IsEmpty());
2387     }
2388     IncrementFreedEver();
2389     // Update the end and write out image.
2390     non_moving_space_->SetEnd(target_space.End());
2391     non_moving_space_->SetLimit(target_space.Limit());
2392     VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2393   }
2394   // Change the collector to the post zygote one.
2395   ChangeCollector(foreground_collector_type_);
2396   // Save the old space so that we can remove it after we complete creating the zygote space.
2397   space::MallocSpace* old_alloc_space = non_moving_space_;
2398   // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2399   // the remaining available space.
2400   // Remove the old space before creating the zygote space since creating the zygote space sets
2401   // the old alloc space's bitmaps to null.
2402   RemoveSpace(old_alloc_space);
2403   if (collector::SemiSpace::kUseRememberedSet) {
2404     // Consistency bound check.
2405     FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2406     // Remove the remembered set for the now zygote space (the old
2407     // non-moving space). Note now that we have compacted objects into
2408     // the zygote space, the data in the remembered set is no longer
2409     // needed. The zygote space will instead have a mod-union table
2410     // from this point on.
2411     RemoveRememberedSet(old_alloc_space);
2412   }
2413   // Remaining space becomes the new non moving space.
2414   zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2415                                                      &non_moving_space_);
2416   CHECK(!non_moving_space_->CanMoveObjects());
2417   if (same_space) {
2418     main_space_ = non_moving_space_;
2419     SetSpaceAsDefault(main_space_);
2420   }
2421   delete old_alloc_space;
2422   CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2423   AddSpace(zygote_space_);
2424   non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2425   AddSpace(non_moving_space_);
2426   constexpr bool set_mark_bit = kUseBakerReadBarrier
2427                                 && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects;
2428   if (set_mark_bit) {
2429     // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
2430     // safe since we mark all of the objects that may reference non immune objects as gray.
2431     zygote_space_->SetMarkBitInLiveObjects();
2432   }
2433 
2434   // Create the zygote space mod union table.
2435   accounting::ModUnionTable* mod_union_table =
2436       new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_);
2437   CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2438 
2439   if (collector_type_ != kCollectorTypeCC) {
2440     // Set all the cards in the mod-union table since we don't know which objects contain references
2441     // to large objects.
2442     mod_union_table->SetCards();
2443   } else {
2444     // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There
2445     // may be dirty cards from the zygote compaction or reference processing. These cards are not
2446     // necessary to have marked since the zygote space may not refer to any objects not in the
2447     // zygote or image spaces at this point.
2448     mod_union_table->ProcessCards();
2449     mod_union_table->ClearTable();
2450 
2451     // For CC we never collect zygote large objects. This means we do not need to set the cards for
2452     // the zygote mod-union table and we can also clear all of the existing image mod-union tables.
2453     // The existing mod-union tables are only for image spaces and may only reference zygote and
2454     // image objects.
2455     for (auto& pair : mod_union_tables_) {
2456       CHECK(pair.first->IsImageSpace());
2457       CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage());
2458       accounting::ModUnionTable* table = pair.second;
2459       table->ClearTable();
2460     }
2461   }
2462   AddModUnionTable(mod_union_table);
2463   large_object_space_->SetAllLargeObjectsAsZygoteObjects(self, set_mark_bit);
2464   if (collector::SemiSpace::kUseRememberedSet) {
2465     // Add a new remembered set for the post-zygote non-moving space.
2466     accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2467         new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2468                                       non_moving_space_);
2469     CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2470         << "Failed to create post-zygote non-moving space remembered set";
2471     AddRememberedSet(post_zygote_non_moving_space_rem_set);
2472   }
2473 }
2474 #pragma clang diagnostic pop
2475 
FlushAllocStack()2476 void Heap::FlushAllocStack() {
2477   MarkAllocStackAsLive(allocation_stack_.get());
2478   allocation_stack_->Reset();
2479 }
2480 
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2481 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2482                           accounting::ContinuousSpaceBitmap* bitmap2,
2483                           accounting::LargeObjectBitmap* large_objects,
2484                           accounting::ObjectStack* stack) {
2485   DCHECK(bitmap1 != nullptr);
2486   DCHECK(bitmap2 != nullptr);
2487   const auto* limit = stack->End();
2488   for (auto* it = stack->Begin(); it != limit; ++it) {
2489     const mirror::Object* obj = it->AsMirrorPtr();
2490     if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2491       if (bitmap1->HasAddress(obj)) {
2492         bitmap1->Set(obj);
2493       } else if (bitmap2->HasAddress(obj)) {
2494         bitmap2->Set(obj);
2495       } else {
2496         DCHECK(large_objects != nullptr);
2497         large_objects->Set(obj);
2498       }
2499     }
2500   }
2501 }
2502 
SwapSemiSpaces()2503 void Heap::SwapSemiSpaces() {
2504   CHECK(bump_pointer_space_ != nullptr);
2505   CHECK(temp_space_ != nullptr);
2506   std::swap(bump_pointer_space_, temp_space_);
2507 }
2508 
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2509 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2510                                            space::ContinuousMemMapAllocSpace* source_space,
2511                                            GcCause gc_cause) {
2512   CHECK(kMovingCollector);
2513   if (target_space != source_space) {
2514     // Don't swap spaces since this isn't a typical semi space collection.
2515     semi_space_collector_->SetSwapSemiSpaces(false);
2516     semi_space_collector_->SetFromSpace(source_space);
2517     semi_space_collector_->SetToSpace(target_space);
2518     semi_space_collector_->Run(gc_cause, false);
2519     return semi_space_collector_;
2520   }
2521   LOG(FATAL) << "Unsupported";
2522   UNREACHABLE();
2523 }
2524 
TraceHeapSize(size_t heap_size)2525 void Heap::TraceHeapSize(size_t heap_size) {
2526   ATraceIntegerValue("Heap size (KB)", heap_size / KB);
2527 }
2528 
2529 #if defined(__GLIBC__)
2530 # define IF_GLIBC(x) x
2531 #else
2532 # define IF_GLIBC(x)
2533 #endif
2534 
GetNativeBytes()2535 size_t Heap::GetNativeBytes() {
2536   size_t malloc_bytes;
2537 #if defined(__BIONIC__) || defined(__GLIBC__)
2538   IF_GLIBC(size_t mmapped_bytes;)
2539   struct mallinfo mi = mallinfo();
2540   // In spite of the documentation, the jemalloc version of this call seems to do what we want,
2541   // and it is thread-safe.
2542   if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) {
2543     // Shouldn't happen, but glibc declares uordblks as int.
2544     // Avoiding sign extension gets us correct behavior for another 2 GB.
2545     malloc_bytes = (unsigned int)mi.uordblks;
2546     IF_GLIBC(mmapped_bytes = (unsigned int)mi.hblkhd;)
2547   } else {
2548     malloc_bytes = mi.uordblks;
2549     IF_GLIBC(mmapped_bytes = mi.hblkhd;)
2550   }
2551   // From the spec, it appeared mmapped_bytes <= malloc_bytes. Reality was sometimes
2552   // dramatically different. (b/119580449 was an early bug.) If so, we try to fudge it.
2553   // However, malloc implementations seem to interpret hblkhd differently, namely as
2554   // mapped blocks backing the entire heap (e.g. jemalloc) vs. large objects directly
2555   // allocated via mmap (e.g. glibc). Thus we now only do this for glibc, where it
2556   // previously helped, and which appears to use a reading of the spec compatible
2557   // with our adjustment.
2558 #if defined(__GLIBC__)
2559   if (mmapped_bytes > malloc_bytes) {
2560     malloc_bytes = mmapped_bytes;
2561   }
2562 #endif  // GLIBC
2563 #else  // Neither Bionic nor Glibc
2564   // We should hit this case only in contexts in which GC triggering is not critical. Effectively
2565   // disable GC triggering based on malloc().
2566   malloc_bytes = 1000;
2567 #endif
2568   return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed);
2569   // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no
2570   // more expensive, and it would allow us to count memory allocated by means other than malloc.
2571   // However it would change as pages are unmapped and remapped due to memory pressure, among
2572   // other things. It seems risky to trigger GCs as a result of such changes.
2573 }
2574 
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references)2575 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2576                                                GcCause gc_cause,
2577                                                bool clear_soft_references) {
2578   Thread* self = Thread::Current();
2579   Runtime* runtime = Runtime::Current();
2580   // If the heap can't run the GC, silently fail and return that no GC was run.
2581   switch (gc_type) {
2582     case collector::kGcTypePartial: {
2583       if (!HasZygoteSpace()) {
2584         return collector::kGcTypeNone;
2585       }
2586       break;
2587     }
2588     default: {
2589       // Other GC types don't have any special cases which makes them not runnable. The main case
2590       // here is full GC.
2591     }
2592   }
2593   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2594   Locks::mutator_lock_->AssertNotHeld(self);
2595   if (self->IsHandlingStackOverflow()) {
2596     // If we are throwing a stack overflow error we probably don't have enough remaining stack
2597     // space to run the GC.
2598     return collector::kGcTypeNone;
2599   }
2600   bool compacting_gc;
2601   {
2602     gc_complete_lock_->AssertNotHeld(self);
2603     ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2604     MutexLock mu(self, *gc_complete_lock_);
2605     // Ensure there is only one GC at a time.
2606     WaitForGcToCompleteLocked(gc_cause, self);
2607     compacting_gc = IsMovingGc(collector_type_);
2608     // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2609     if (compacting_gc && disable_moving_gc_count_ != 0) {
2610       LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2611       return collector::kGcTypeNone;
2612     }
2613     if (gc_disabled_for_shutdown_) {
2614       return collector::kGcTypeNone;
2615     }
2616     collector_type_running_ = collector_type_;
2617   }
2618   if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2619     ++runtime->GetStats()->gc_for_alloc_count;
2620     ++self->GetStats()->gc_for_alloc_count;
2621   }
2622   const size_t bytes_allocated_before_gc = GetBytesAllocated();
2623 
2624   DCHECK_LT(gc_type, collector::kGcTypeMax);
2625   DCHECK_NE(gc_type, collector::kGcTypeNone);
2626 
2627   collector::GarbageCollector* collector = nullptr;
2628   // TODO: Clean this up.
2629   if (compacting_gc) {
2630     DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2631            current_allocator_ == kAllocatorTypeTLAB ||
2632            current_allocator_ == kAllocatorTypeRegion ||
2633            current_allocator_ == kAllocatorTypeRegionTLAB);
2634     switch (collector_type_) {
2635       case kCollectorTypeSS:
2636         semi_space_collector_->SetFromSpace(bump_pointer_space_);
2637         semi_space_collector_->SetToSpace(temp_space_);
2638         semi_space_collector_->SetSwapSemiSpaces(true);
2639         collector = semi_space_collector_;
2640         break;
2641       case kCollectorTypeCC:
2642         if (use_generational_cc_) {
2643           // TODO: Other threads must do the flip checkpoint before they start poking at
2644           // active_concurrent_copying_collector_. So we should not concurrency here.
2645           active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ?
2646               young_concurrent_copying_collector_ : concurrent_copying_collector_;
2647           DCHECK(active_concurrent_copying_collector_->RegionSpace() == region_space_);
2648         }
2649         collector = active_concurrent_copying_collector_;
2650         break;
2651       default:
2652         LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2653     }
2654     if (collector != active_concurrent_copying_collector_) {
2655       temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2656       if (kIsDebugBuild) {
2657         // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
2658         temp_space_->GetMemMap()->TryReadable();
2659       }
2660       CHECK(temp_space_->IsEmpty());
2661     }
2662     gc_type = collector::kGcTypeFull;  // TODO: Not hard code this in.
2663   } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2664       current_allocator_ == kAllocatorTypeDlMalloc) {
2665     collector = FindCollectorByGcType(gc_type);
2666   } else {
2667     LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2668   }
2669 
2670   CHECK(collector != nullptr)
2671       << "Could not find garbage collector with collector_type="
2672       << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2673   collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2674   IncrementFreedEver();
2675   RequestTrim(self);
2676   // Collect cleared references.
2677   SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self);
2678   // Grow the heap so that we know when to perform the next GC.
2679   GrowForUtilization(collector, bytes_allocated_before_gc);
2680   LogGC(gc_cause, collector);
2681   FinishGC(self, gc_type);
2682   // Actually enqueue all cleared references. Do this after the GC has officially finished since
2683   // otherwise we can deadlock.
2684   clear->Run(self);
2685   clear->Finalize();
2686   // Inform DDMS that a GC completed.
2687   Dbg::GcDidFinish();
2688 
2689   old_native_bytes_allocated_.store(GetNativeBytes());
2690 
2691   // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2692   // deadlocks in case the JNI_OnUnload function does allocations.
2693   {
2694     ScopedObjectAccess soa(self);
2695     soa.Vm()->UnloadNativeLibraries();
2696   }
2697   return gc_type;
2698 }
2699 
LogGC(GcCause gc_cause,collector::GarbageCollector * collector)2700 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2701   const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2702   const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2703   // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2704   // (mutator time blocked >= long_pause_log_threshold_).
2705   bool log_gc = kLogAllGCs || (gc_cause == kGcCauseExplicit && always_log_explicit_gcs_);
2706   if (!log_gc && CareAboutPauseTimes()) {
2707     // GC for alloc pauses the allocating thread, so consider it as a pause.
2708     log_gc = duration > long_gc_log_threshold_ ||
2709         (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2710     for (uint64_t pause : pause_times) {
2711       log_gc = log_gc || pause >= long_pause_log_threshold_;
2712     }
2713   }
2714   if (log_gc) {
2715     const size_t percent_free = GetPercentFree();
2716     const size_t current_heap_size = GetBytesAllocated();
2717     const size_t total_memory = GetTotalMemory();
2718     std::ostringstream pause_string;
2719     for (size_t i = 0; i < pause_times.size(); ++i) {
2720       pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2721                    << ((i != pause_times.size() - 1) ? "," : "");
2722     }
2723     LOG(INFO) << gc_cause << " " << collector->GetName()
2724               << " GC freed "  << current_gc_iteration_.GetFreedObjects() << "("
2725               << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2726               << current_gc_iteration_.GetFreedLargeObjects() << "("
2727               << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2728               << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2729               << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2730               << " total " << PrettyDuration((duration / 1000) * 1000);
2731     VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2732   }
2733 }
2734 
FinishGC(Thread * self,collector::GcType gc_type)2735 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2736   MutexLock mu(self, *gc_complete_lock_);
2737   collector_type_running_ = kCollectorTypeNone;
2738   if (gc_type != collector::kGcTypeNone) {
2739     last_gc_type_ = gc_type;
2740 
2741     // Update stats.
2742     ++gc_count_last_window_;
2743     if (running_collection_is_blocking_) {
2744       // If the currently running collection was a blocking one,
2745       // increment the counters and reset the flag.
2746       ++blocking_gc_count_;
2747       blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2748       ++blocking_gc_count_last_window_;
2749     }
2750     // Update the gc count rate histograms if due.
2751     UpdateGcCountRateHistograms();
2752   }
2753   // Reset.
2754   running_collection_is_blocking_ = false;
2755   thread_running_gc_ = nullptr;
2756   // Wake anyone who may have been waiting for the GC to complete.
2757   gc_complete_cond_->Broadcast(self);
2758 }
2759 
UpdateGcCountRateHistograms()2760 void Heap::UpdateGcCountRateHistograms() {
2761   // Invariant: if the time since the last update includes more than
2762   // one windows, all the GC runs (if > 0) must have happened in first
2763   // window because otherwise the update must have already taken place
2764   // at an earlier GC run. So, we report the non-first windows with
2765   // zero counts to the histograms.
2766   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2767   uint64_t now = NanoTime();
2768   DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2769   uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2770   uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2771 
2772   // The computed number of windows can be incoherently high if NanoTime() is not monotonic.
2773   // Setting a limit on its maximum value reduces the impact on CPU time in such cases.
2774   if (num_of_windows > kGcCountRateHistogramMaxNumMissedWindows) {
2775     LOG(WARNING) << "Reducing the number of considered missed Gc histogram windows from "
2776                  << num_of_windows << " to " << kGcCountRateHistogramMaxNumMissedWindows;
2777     num_of_windows = kGcCountRateHistogramMaxNumMissedWindows;
2778   }
2779 
2780   if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2781     // Record the first window.
2782     gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1);  // Exclude the current run.
2783     blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2784         blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2785     // Record the other windows (with zero counts).
2786     for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2787       gc_count_rate_histogram_.AddValue(0);
2788       blocking_gc_count_rate_histogram_.AddValue(0);
2789     }
2790     // Update the last update time and reset the counters.
2791     last_update_time_gc_count_rate_histograms_ =
2792         (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2793     gc_count_last_window_ = 1;  // Include the current run.
2794     blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2795   }
2796   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2797 }
2798 
2799 class RootMatchesObjectVisitor : public SingleRootVisitor {
2800  public:
RootMatchesObjectVisitor(const mirror::Object * obj)2801   explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2802 
VisitRoot(mirror::Object * root,const RootInfo & info)2803   void VisitRoot(mirror::Object* root, const RootInfo& info)
2804       override REQUIRES_SHARED(Locks::mutator_lock_) {
2805     if (root == obj_) {
2806       LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2807     }
2808   }
2809 
2810  private:
2811   const mirror::Object* const obj_;
2812 };
2813 
2814 
2815 class ScanVisitor {
2816  public:
operator ()(const mirror::Object * obj) const2817   void operator()(const mirror::Object* obj) const {
2818     LOG(ERROR) << "Would have rescanned object " << obj;
2819   }
2820 };
2821 
2822 // Verify a reference from an object.
2823 class VerifyReferenceVisitor : public SingleRootVisitor {
2824  public:
VerifyReferenceVisitor(Thread * self,Heap * heap,size_t * fail_count,bool verify_referent)2825   VerifyReferenceVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
2826       REQUIRES_SHARED(Locks::mutator_lock_)
2827       : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
2828     CHECK_EQ(self_, Thread::Current());
2829   }
2830 
operator ()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,ObjPtr<mirror::Reference> ref) const2831   void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
2832       REQUIRES_SHARED(Locks::mutator_lock_) {
2833     if (verify_referent_) {
2834       VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
2835     }
2836   }
2837 
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const2838   void operator()(ObjPtr<mirror::Object> obj,
2839                   MemberOffset offset,
2840                   bool is_static ATTRIBUTE_UNUSED) const
2841       REQUIRES_SHARED(Locks::mutator_lock_) {
2842     VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
2843   }
2844 
IsLive(ObjPtr<mirror::Object> obj) const2845   bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
2846     return heap_->IsLiveObjectLocked(obj, true, false, true);
2847   }
2848 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const2849   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
2850       REQUIRES_SHARED(Locks::mutator_lock_) {
2851     if (!root->IsNull()) {
2852       VisitRoot(root);
2853     }
2854   }
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const2855   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
2856       REQUIRES_SHARED(Locks::mutator_lock_) {
2857     const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2858         root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2859   }
2860 
VisitRoot(mirror::Object * root,const RootInfo & root_info)2861   void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
2862       REQUIRES_SHARED(Locks::mutator_lock_) {
2863     if (root == nullptr) {
2864       LOG(ERROR) << "Root is null with info " << root_info.GetType();
2865     } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
2866       LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root)
2867           << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
2868     }
2869   }
2870 
2871  private:
2872   // TODO: Fix the no thread safety analysis.
2873   // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const2874   bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2875       NO_THREAD_SAFETY_ANALYSIS {
2876     if (ref == nullptr || IsLive(ref)) {
2877       // Verify that the reference is live.
2878       return true;
2879     }
2880     CHECK_EQ(self_, Thread::Current());  // fail_count_ is private to the calling thread.
2881     *fail_count_ += 1;
2882     if (*fail_count_ == 1) {
2883       // Only print message for the first failure to prevent spam.
2884       LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2885     }
2886     if (obj != nullptr) {
2887       // Only do this part for non roots.
2888       accounting::CardTable* card_table = heap_->GetCardTable();
2889       accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2890       accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2891       uint8_t* card_addr = card_table->CardFromAddr(obj);
2892       LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2893                  << offset << "\n card value = " << static_cast<int>(*card_addr);
2894       if (heap_->IsValidObjectAddress(obj->GetClass())) {
2895         LOG(ERROR) << "Obj type " << obj->PrettyTypeOf();
2896       } else {
2897         LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2898       }
2899 
2900       // Attempt to find the class inside of the recently freed objects.
2901       space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2902       if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2903         space::MallocSpace* space = ref_space->AsMallocSpace();
2904         mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2905         if (ref_class != nullptr) {
2906           LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2907                      << ref_class->PrettyClass();
2908         } else {
2909           LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2910         }
2911       }
2912 
2913       if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2914           ref->GetClass()->IsClass()) {
2915         LOG(ERROR) << "Ref type " << ref->PrettyTypeOf();
2916       } else {
2917         LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2918                    << ") is not a valid heap address";
2919       }
2920 
2921       card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
2922       void* cover_begin = card_table->AddrFromCard(card_addr);
2923       void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2924           accounting::CardTable::kCardSize);
2925       LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2926           << "-" << cover_end;
2927       accounting::ContinuousSpaceBitmap* bitmap =
2928           heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2929 
2930       if (bitmap == nullptr) {
2931         LOG(ERROR) << "Object " << obj << " has no bitmap";
2932         if (!VerifyClassClass(obj->GetClass())) {
2933           LOG(ERROR) << "Object " << obj << " failed class verification!";
2934         }
2935       } else {
2936         // Print out how the object is live.
2937         if (bitmap->Test(obj)) {
2938           LOG(ERROR) << "Object " << obj << " found in live bitmap";
2939         }
2940         if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
2941           LOG(ERROR) << "Object " << obj << " found in allocation stack";
2942         }
2943         if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
2944           LOG(ERROR) << "Object " << obj << " found in live stack";
2945         }
2946         if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2947           LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2948         }
2949         if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2950           LOG(ERROR) << "Ref " << ref << " found in live stack";
2951         }
2952         // Attempt to see if the card table missed the reference.
2953         ScanVisitor scan_visitor;
2954         uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
2955         card_table->Scan<false>(bitmap, byte_cover_begin,
2956                                 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
2957       }
2958 
2959       // Search to see if any of the roots reference our object.
2960       RootMatchesObjectVisitor visitor1(obj);
2961       Runtime::Current()->VisitRoots(&visitor1);
2962       // Search to see if any of the roots reference our reference.
2963       RootMatchesObjectVisitor visitor2(ref);
2964       Runtime::Current()->VisitRoots(&visitor2);
2965     }
2966     return false;
2967   }
2968 
2969   Thread* const self_;
2970   Heap* const heap_;
2971   size_t* const fail_count_;
2972   const bool verify_referent_;
2973 };
2974 
2975 // Verify all references within an object, for use with HeapBitmap::Visit.
2976 class VerifyObjectVisitor {
2977  public:
VerifyObjectVisitor(Thread * self,Heap * heap,size_t * fail_count,bool verify_referent)2978   VerifyObjectVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
2979       : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2980 
operator ()(mirror::Object * obj)2981   void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2982     // Note: we are verifying the references in obj but not obj itself, this is because obj must
2983     // be live or else how did we find it in the live bitmap?
2984     VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
2985     // The class doesn't count as a reference but we should verify it anyways.
2986     obj->VisitReferences(visitor, visitor);
2987   }
2988 
VerifyRoots()2989   void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
2990     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2991     VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
2992     Runtime::Current()->VisitRoots(&visitor);
2993   }
2994 
GetFailureCount() const2995   uint32_t GetFailureCount() const REQUIRES(Locks::mutator_lock_) {
2996     CHECK_EQ(self_, Thread::Current());
2997     return *fail_count_;
2998   }
2999 
3000  private:
3001   Thread* const self_;
3002   Heap* const heap_;
3003   size_t* const fail_count_;
3004   const bool verify_referent_;
3005 };
3006 
PushOnAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3007 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
3008   // Slow path, the allocation stack push back must have already failed.
3009   DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
3010   do {
3011     // TODO: Add handle VerifyObject.
3012     StackHandleScope<1> hs(self);
3013     HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3014     // Push our object into the reserve region of the allocation stack. This is only required due
3015     // to heap verification requiring that roots are live (either in the live bitmap or in the
3016     // allocation stack).
3017     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3018     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3019   } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
3020 }
3021 
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3022 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
3023                                                           ObjPtr<mirror::Object>* obj) {
3024   // Slow path, the allocation stack push back must have already failed.
3025   DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
3026   StackReference<mirror::Object>* start_address;
3027   StackReference<mirror::Object>* end_address;
3028   while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
3029                                             &end_address)) {
3030     // TODO: Add handle VerifyObject.
3031     StackHandleScope<1> hs(self);
3032     HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3033     // Push our object into the reserve region of the allocaiton stack. This is only required due
3034     // to heap verification requiring that roots are live (either in the live bitmap or in the
3035     // allocation stack).
3036     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3037     // Push into the reserve allocation stack.
3038     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3039   }
3040   self->SetThreadLocalAllocationStack(start_address, end_address);
3041   // Retry on the new thread-local allocation stack.
3042   CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr()));  // Must succeed.
3043 }
3044 
3045 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)3046 size_t Heap::VerifyHeapReferences(bool verify_referents) {
3047   Thread* self = Thread::Current();
3048   Locks::mutator_lock_->AssertExclusiveHeld(self);
3049   // Lets sort our allocation stacks so that we can efficiently binary search them.
3050   allocation_stack_->Sort();
3051   live_stack_->Sort();
3052   // Since we sorted the allocation stack content, need to revoke all
3053   // thread-local allocation stacks.
3054   RevokeAllThreadLocalAllocationStacks(self);
3055   size_t fail_count = 0;
3056   VerifyObjectVisitor visitor(self, this, &fail_count, verify_referents);
3057   // Verify objects in the allocation stack since these will be objects which were:
3058   // 1. Allocated prior to the GC (pre GC verification).
3059   // 2. Allocated during the GC (pre sweep GC verification).
3060   // We don't want to verify the objects in the live stack since they themselves may be
3061   // pointing to dead objects if they are not reachable.
3062   VisitObjectsPaused(visitor);
3063   // Verify the roots:
3064   visitor.VerifyRoots();
3065   if (visitor.GetFailureCount() > 0) {
3066     // Dump mod-union tables.
3067     for (const auto& table_pair : mod_union_tables_) {
3068       accounting::ModUnionTable* mod_union_table = table_pair.second;
3069       mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": ");
3070     }
3071     // Dump remembered sets.
3072     for (const auto& table_pair : remembered_sets_) {
3073       accounting::RememberedSet* remembered_set = table_pair.second;
3074       remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": ");
3075     }
3076     DumpSpaces(LOG_STREAM(ERROR));
3077   }
3078   return visitor.GetFailureCount();
3079 }
3080 
3081 class VerifyReferenceCardVisitor {
3082  public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)3083   VerifyReferenceCardVisitor(Heap* heap, bool* failed)
3084       REQUIRES_SHARED(Locks::mutator_lock_,
3085                             Locks::heap_bitmap_lock_)
3086       : heap_(heap), failed_(failed) {
3087   }
3088 
3089   // There is no card marks for native roots on a class.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3090   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
3091       const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3092   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
3093 
3094   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3095   // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const3096   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3097       NO_THREAD_SAFETY_ANALYSIS {
3098     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
3099     // Filter out class references since changing an object's class does not mark the card as dirty.
3100     // Also handles large objects, since the only reference they hold is a class reference.
3101     if (ref != nullptr && !ref->IsClass()) {
3102       accounting::CardTable* card_table = heap_->GetCardTable();
3103       // If the object is not dirty and it is referencing something in the live stack other than
3104       // class, then it must be on a dirty card.
3105       if (!card_table->AddrIsInCardTable(obj)) {
3106         LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3107         *failed_ = true;
3108       } else if (!card_table->IsDirty(obj)) {
3109         // TODO: Check mod-union tables.
3110         // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3111         // kCardDirty - 1 if it didnt get touched since we aged it.
3112         accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3113         if (live_stack->ContainsSorted(ref)) {
3114           if (live_stack->ContainsSorted(obj)) {
3115             LOG(ERROR) << "Object " << obj << " found in live stack";
3116           }
3117           if (heap_->GetLiveBitmap()->Test(obj)) {
3118             LOG(ERROR) << "Object " << obj << " found in live bitmap";
3119           }
3120           LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj)
3121                     << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref)
3122                     << " in live stack";
3123 
3124           // Print which field of the object is dead.
3125           if (!obj->IsObjectArray()) {
3126             ObjPtr<mirror::Class> klass = is_static ? obj->AsClass() : obj->GetClass();
3127             CHECK(klass != nullptr);
3128             for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
3129               if (field.GetOffset().Int32Value() == offset.Int32Value()) {
3130                 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
3131                            << field.PrettyField();
3132                 break;
3133               }
3134             }
3135           } else {
3136             ObjPtr<mirror::ObjectArray<mirror::Object>> object_array =
3137                 obj->AsObjectArray<mirror::Object>();
3138             for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3139               if (object_array->Get(i) == ref) {
3140                 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3141               }
3142             }
3143           }
3144 
3145           *failed_ = true;
3146         }
3147       }
3148     }
3149   }
3150 
3151  private:
3152   Heap* const heap_;
3153   bool* const failed_;
3154 };
3155 
3156 class VerifyLiveStackReferences {
3157  public:
VerifyLiveStackReferences(Heap * heap)3158   explicit VerifyLiveStackReferences(Heap* heap)
3159       : heap_(heap),
3160         failed_(false) {}
3161 
operator ()(mirror::Object * obj) const3162   void operator()(mirror::Object* obj) const
3163       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3164     VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
3165     obj->VisitReferences(visitor, VoidFunctor());
3166   }
3167 
Failed() const3168   bool Failed() const {
3169     return failed_;
3170   }
3171 
3172  private:
3173   Heap* const heap_;
3174   bool failed_;
3175 };
3176 
VerifyMissingCardMarks()3177 bool Heap::VerifyMissingCardMarks() {
3178   Thread* self = Thread::Current();
3179   Locks::mutator_lock_->AssertExclusiveHeld(self);
3180   // We need to sort the live stack since we binary search it.
3181   live_stack_->Sort();
3182   // Since we sorted the allocation stack content, need to revoke all
3183   // thread-local allocation stacks.
3184   RevokeAllThreadLocalAllocationStacks(self);
3185   VerifyLiveStackReferences visitor(this);
3186   GetLiveBitmap()->Visit(visitor);
3187   // We can verify objects in the live stack since none of these should reference dead objects.
3188   for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3189     if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3190       visitor(it->AsMirrorPtr());
3191     }
3192   }
3193   return !visitor.Failed();
3194 }
3195 
SwapStacks()3196 void Heap::SwapStacks() {
3197   if (kUseThreadLocalAllocationStack) {
3198     live_stack_->AssertAllZero();
3199   }
3200   allocation_stack_.swap(live_stack_);
3201 }
3202 
RevokeAllThreadLocalAllocationStacks(Thread * self)3203 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
3204   // This must be called only during the pause.
3205   DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
3206   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3207   MutexLock mu2(self, *Locks::thread_list_lock_);
3208   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3209   for (Thread* t : thread_list) {
3210     t->RevokeThreadLocalAllocationStack();
3211   }
3212 }
3213 
AssertThreadLocalBuffersAreRevoked(Thread * thread)3214 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3215   if (kIsDebugBuild) {
3216     if (rosalloc_space_ != nullptr) {
3217       rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3218     }
3219     if (bump_pointer_space_ != nullptr) {
3220       bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3221     }
3222   }
3223 }
3224 
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()3225 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3226   if (kIsDebugBuild) {
3227     if (bump_pointer_space_ != nullptr) {
3228       bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3229     }
3230   }
3231 }
3232 
FindModUnionTableFromSpace(space::Space * space)3233 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3234   auto it = mod_union_tables_.find(space);
3235   if (it == mod_union_tables_.end()) {
3236     return nullptr;
3237   }
3238   return it->second;
3239 }
3240 
FindRememberedSetFromSpace(space::Space * space)3241 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3242   auto it = remembered_sets_.find(space);
3243   if (it == remembered_sets_.end()) {
3244     return nullptr;
3245   }
3246   return it->second;
3247 }
3248 
ProcessCards(TimingLogger * timings,bool use_rem_sets,bool process_alloc_space_cards,bool clear_alloc_space_cards)3249 void Heap::ProcessCards(TimingLogger* timings,
3250                         bool use_rem_sets,
3251                         bool process_alloc_space_cards,
3252                         bool clear_alloc_space_cards) {
3253   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3254   // Clear cards and keep track of cards cleared in the mod-union table.
3255   for (const auto& space : continuous_spaces_) {
3256     accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3257     accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3258     if (table != nullptr) {
3259       const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3260           "ImageModUnionClearCards";
3261       TimingLogger::ScopedTiming t2(name, timings);
3262       table->ProcessCards();
3263     } else if (use_rem_sets && rem_set != nullptr) {
3264       DCHECK(collector::SemiSpace::kUseRememberedSet) << static_cast<int>(collector_type_);
3265       TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3266       rem_set->ClearCards();
3267     } else if (process_alloc_space_cards) {
3268       TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3269       if (clear_alloc_space_cards) {
3270         uint8_t* end = space->End();
3271         if (space->IsImageSpace()) {
3272           // Image space end is the end of the mirror objects, it is not necessarily page or card
3273           // aligned. Align up so that the check in ClearCardRange does not fail.
3274           end = AlignUp(end, accounting::CardTable::kCardSize);
3275         }
3276         card_table_->ClearCardRange(space->Begin(), end);
3277       } else {
3278         // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3279         // cards were dirty before the GC started.
3280         // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3281         // -> clean(cleaning thread).
3282         // The races are we either end up with: Aged card, unaged card. Since we have the
3283         // checkpoint roots and then we scan / update mod union tables after. We will always
3284         // scan either card. If we end up with the non aged card, we scan it it in the pause.
3285         card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3286                                        VoidFunctor());
3287       }
3288     }
3289   }
3290 }
3291 
3292 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
MarkObjectart::gc::IdentityMarkHeapReferenceVisitor3293   mirror::Object* MarkObject(mirror::Object* obj) override {
3294     return obj;
3295   }
MarkHeapReferenceart::gc::IdentityMarkHeapReferenceVisitor3296   void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
3297   }
3298 };
3299 
PreGcVerificationPaused(collector::GarbageCollector * gc)3300 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3301   Thread* const self = Thread::Current();
3302   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3303   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3304   if (verify_pre_gc_heap_) {
3305     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3306     size_t failures = VerifyHeapReferences();
3307     if (failures > 0) {
3308       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3309           << " failures";
3310     }
3311   }
3312   // Check that all objects which reference things in the live stack are on dirty cards.
3313   if (verify_missing_card_marks_) {
3314     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3315     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3316     SwapStacks();
3317     // Sort the live stack so that we can quickly binary search it later.
3318     CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3319                                     << " missing card mark verification failed\n" << DumpSpaces();
3320     SwapStacks();
3321   }
3322   if (verify_mod_union_table_) {
3323     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3324     ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3325     for (const auto& table_pair : mod_union_tables_) {
3326       accounting::ModUnionTable* mod_union_table = table_pair.second;
3327       IdentityMarkHeapReferenceVisitor visitor;
3328       mod_union_table->UpdateAndMarkReferences(&visitor);
3329       mod_union_table->Verify();
3330     }
3331   }
3332 }
3333 
PreGcVerification(collector::GarbageCollector * gc)3334 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3335   if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3336     collector::GarbageCollector::ScopedPause pause(gc, false);
3337     PreGcVerificationPaused(gc);
3338   }
3339 }
3340 
PrePauseRosAllocVerification(collector::GarbageCollector * gc ATTRIBUTE_UNUSED)3341 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
3342   // TODO: Add a new runtime option for this?
3343   if (verify_pre_gc_rosalloc_) {
3344     RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3345   }
3346 }
3347 
PreSweepingGcVerification(collector::GarbageCollector * gc)3348 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3349   Thread* const self = Thread::Current();
3350   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3351   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3352   // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3353   // reachable objects.
3354   if (verify_pre_sweeping_heap_) {
3355     TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3356     CHECK_NE(self->GetState(), kRunnable);
3357     {
3358       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3359       // Swapping bound bitmaps does nothing.
3360       gc->SwapBitmaps();
3361     }
3362     // Pass in false since concurrent reference processing can mean that the reference referents
3363     // may point to dead objects at the point which PreSweepingGcVerification is called.
3364     size_t failures = VerifyHeapReferences(false);
3365     if (failures > 0) {
3366       LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3367           << " failures";
3368     }
3369     {
3370       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3371       gc->SwapBitmaps();
3372     }
3373   }
3374   if (verify_pre_sweeping_rosalloc_) {
3375     RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3376   }
3377 }
3378 
PostGcVerificationPaused(collector::GarbageCollector * gc)3379 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3380   // Only pause if we have to do some verification.
3381   Thread* const self = Thread::Current();
3382   TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3383   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3384   if (verify_system_weaks_) {
3385     ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3386     collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3387     mark_sweep->VerifySystemWeaks();
3388   }
3389   if (verify_post_gc_rosalloc_) {
3390     RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3391   }
3392   if (verify_post_gc_heap_) {
3393     TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3394     size_t failures = VerifyHeapReferences();
3395     if (failures > 0) {
3396       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3397           << " failures";
3398     }
3399   }
3400 }
3401 
PostGcVerification(collector::GarbageCollector * gc)3402 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3403   if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3404     collector::GarbageCollector::ScopedPause pause(gc, false);
3405     PostGcVerificationPaused(gc);
3406   }
3407 }
3408 
RosAllocVerification(TimingLogger * timings,const char * name)3409 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3410   TimingLogger::ScopedTiming t(name, timings);
3411   for (const auto& space : continuous_spaces_) {
3412     if (space->IsRosAllocSpace()) {
3413       VLOG(heap) << name << " : " << space->GetName();
3414       space->AsRosAllocSpace()->Verify();
3415     }
3416   }
3417 }
3418 
WaitForGcToComplete(GcCause cause,Thread * self)3419 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3420   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
3421   MutexLock mu(self, *gc_complete_lock_);
3422   return WaitForGcToCompleteLocked(cause, self);
3423 }
3424 
WaitForGcToCompleteLocked(GcCause cause,Thread * self)3425 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
3426   gc_complete_cond_->CheckSafeToWait(self);
3427   collector::GcType last_gc_type = collector::kGcTypeNone;
3428   GcCause last_gc_cause = kGcCauseNone;
3429   uint64_t wait_start = NanoTime();
3430   while (collector_type_running_ != kCollectorTypeNone) {
3431     if (self != task_processor_->GetRunningThread()) {
3432       // The current thread is about to wait for a currently running
3433       // collection to finish. If the waiting thread is not the heap
3434       // task daemon thread, the currently running collection is
3435       // considered as a blocking GC.
3436       running_collection_is_blocking_ = true;
3437       VLOG(gc) << "Waiting for a blocking GC " << cause;
3438     }
3439     SCOPED_TRACE << "GC: Wait For Completion " << cause;
3440     // We must wait, change thread state then sleep on gc_complete_cond_;
3441     gc_complete_cond_->Wait(self);
3442     last_gc_type = last_gc_type_;
3443     last_gc_cause = last_gc_cause_;
3444   }
3445   uint64_t wait_time = NanoTime() - wait_start;
3446   total_wait_time_ += wait_time;
3447   if (wait_time > long_pause_log_threshold_) {
3448     LOG(INFO) << "WaitForGcToComplete blocked " << cause << " on " << last_gc_cause << " for "
3449               << PrettyDuration(wait_time);
3450   }
3451   if (self != task_processor_->GetRunningThread()) {
3452     // The current thread is about to run a collection. If the thread
3453     // is not the heap task daemon thread, it's considered as a
3454     // blocking GC (i.e., blocking itself).
3455     running_collection_is_blocking_ = true;
3456     // Don't log fake "GC" types that are only used for debugger or hidden APIs. If we log these,
3457     // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
3458     if (cause == kGcCauseForAlloc ||
3459         cause == kGcCauseForNativeAlloc ||
3460         cause == kGcCauseDisableMovingGc) {
3461       VLOG(gc) << "Starting a blocking GC " << cause;
3462     }
3463   }
3464   return last_gc_type;
3465 }
3466 
DumpForSigQuit(std::ostream & os)3467 void Heap::DumpForSigQuit(std::ostream& os) {
3468   os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3469      << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
3470   DumpGcPerformanceInfo(os);
3471 }
3472 
GetPercentFree()3473 size_t Heap::GetPercentFree() {
3474   return static_cast<size_t>(100.0f * static_cast<float>(
3475       GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed));
3476 }
3477 
SetIdealFootprint(size_t target_footprint)3478 void Heap::SetIdealFootprint(size_t target_footprint) {
3479   if (target_footprint > GetMaxMemory()) {
3480     VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to "
3481              << PrettySize(GetMaxMemory());
3482     target_footprint = GetMaxMemory();
3483   }
3484   target_footprint_.store(target_footprint, std::memory_order_relaxed);
3485 }
3486 
IsMovableObject(ObjPtr<mirror::Object> obj) const3487 bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
3488   if (kMovingCollector) {
3489     space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
3490     if (space != nullptr) {
3491       // TODO: Check large object?
3492       return space->CanMoveObjects();
3493     }
3494   }
3495   return false;
3496 }
3497 
FindCollectorByGcType(collector::GcType gc_type)3498 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3499   for (auto* collector : garbage_collectors_) {
3500     if (collector->GetCollectorType() == collector_type_ &&
3501         collector->GetGcType() == gc_type) {
3502       return collector;
3503     }
3504   }
3505   return nullptr;
3506 }
3507 
HeapGrowthMultiplier() const3508 double Heap::HeapGrowthMultiplier() const {
3509   // If we don't care about pause times we are background, so return 1.0.
3510   if (!CareAboutPauseTimes()) {
3511     return 1.0;
3512   }
3513   return foreground_heap_growth_multiplier_;
3514 }
3515 
GrowForUtilization(collector::GarbageCollector * collector_ran,size_t bytes_allocated_before_gc)3516 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3517                               size_t bytes_allocated_before_gc) {
3518   // We know what our utilization is at this moment.
3519   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3520   const size_t bytes_allocated = GetBytesAllocated();
3521   // Trace the new heap size after the GC is finished.
3522   TraceHeapSize(bytes_allocated);
3523   uint64_t target_size, grow_bytes;
3524   collector::GcType gc_type = collector_ran->GetGcType();
3525   MutexLock mu(Thread::Current(), process_state_update_lock_);
3526   // Use the multiplier to grow more for foreground.
3527   const double multiplier = HeapGrowthMultiplier();
3528   if (gc_type != collector::kGcTypeSticky) {
3529     // Grow the heap for non sticky GC.
3530     uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0);
3531     DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated
3532         << " target_utilization_=" << target_utilization_;
3533     grow_bytes = std::min(delta, static_cast<uint64_t>(max_free_));
3534     grow_bytes = std::max(grow_bytes, static_cast<uint64_t>(min_free_));
3535     target_size = bytes_allocated + static_cast<uint64_t>(grow_bytes * multiplier);
3536     next_gc_type_ = collector::kGcTypeSticky;
3537   } else {
3538     collector::GcType non_sticky_gc_type = NonStickyGcType();
3539     // Find what the next non sticky collector will be.
3540     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3541     if (use_generational_cc_) {
3542       if (non_sticky_collector == nullptr) {
3543         non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial);
3544       }
3545       CHECK(non_sticky_collector != nullptr);
3546     }
3547     double sticky_gc_throughput_adjustment = GetStickyGcThroughputAdjustment(use_generational_cc_);
3548 
3549     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3550     // do another sticky collection next.
3551     // We also check that the bytes allocated aren't over the target_footprint, or
3552     // concurrent_start_bytes in case of concurrent GCs, in order to prevent a
3553     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3554     // if the sticky GC throughput always remained >= the full/partial throughput.
3555     size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
3556     if (current_gc_iteration_.GetEstimatedThroughput() * sticky_gc_throughput_adjustment >=
3557         non_sticky_collector->GetEstimatedMeanThroughput() &&
3558         non_sticky_collector->NumberOfIterations() > 0 &&
3559         bytes_allocated <= (IsGcConcurrent() ? concurrent_start_bytes_ : target_footprint)) {
3560       next_gc_type_ = collector::kGcTypeSticky;
3561     } else {
3562       next_gc_type_ = non_sticky_gc_type;
3563     }
3564     // If we have freed enough memory, shrink the heap back down.
3565     const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
3566     if (bytes_allocated + adjusted_max_free < target_footprint) {
3567       target_size = bytes_allocated + adjusted_max_free;
3568       grow_bytes = max_free_;
3569     } else {
3570       target_size = std::max(bytes_allocated, target_footprint);
3571       // The same whether jank perceptible or not; just avoid the adjustment.
3572       grow_bytes = 0;
3573     }
3574   }
3575   CHECK_LE(target_size, std::numeric_limits<size_t>::max());
3576   if (!ignore_target_footprint_) {
3577     SetIdealFootprint(target_size);
3578     // Store target size (computed with foreground heap growth multiplier) for updating
3579     // target_footprint_ when process state switches to foreground.
3580     // target_size = 0 ensures that target_footprint_ is not updated on
3581     // process-state switch.
3582     min_foreground_target_footprint_ =
3583         (multiplier <= 1.0 && grow_bytes > 0)
3584         ? bytes_allocated + static_cast<size_t>(grow_bytes * foreground_heap_growth_multiplier_)
3585         : 0;
3586 
3587     if (IsGcConcurrent()) {
3588       const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3589           current_gc_iteration_.GetFreedLargeObjectBytes() +
3590           current_gc_iteration_.GetFreedRevokeBytes();
3591       // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3592       // how many bytes were allocated during the GC we need to add freed_bytes back on.
3593       CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3594       const size_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
3595           bytes_allocated_before_gc;
3596       // Calculate when to perform the next ConcurrentGC.
3597       // Estimate how many remaining bytes we will have when we need to start the next GC.
3598       size_t remaining_bytes = bytes_allocated_during_gc;
3599       remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3600       remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3601       size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
3602       if (UNLIKELY(remaining_bytes > target_footprint)) {
3603         // A never going to happen situation that from the estimated allocation rate we will exceed
3604         // the applications entire footprint with the given estimated allocation rate. Schedule
3605         // another GC nearly straight away.
3606         remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint);
3607       }
3608       DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory());
3609       // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3610       // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3611       // right away.
3612       concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated);
3613     }
3614   }
3615 }
3616 
ClampGrowthLimit()3617 void Heap::ClampGrowthLimit() {
3618   // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3619   ScopedObjectAccess soa(Thread::Current());
3620   WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
3621   capacity_ = growth_limit_;
3622   for (const auto& space : continuous_spaces_) {
3623     if (space->IsMallocSpace()) {
3624       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3625       malloc_space->ClampGrowthLimit();
3626     }
3627   }
3628   if (collector_type_ == kCollectorTypeCC) {
3629     DCHECK(region_space_ != nullptr);
3630     // Twice the capacity as CC needs extra space for evacuating objects.
3631     region_space_->ClampGrowthLimit(2 * capacity_);
3632   }
3633   // This space isn't added for performance reasons.
3634   if (main_space_backup_.get() != nullptr) {
3635     main_space_backup_->ClampGrowthLimit();
3636   }
3637 }
3638 
ClearGrowthLimit()3639 void Heap::ClearGrowthLimit() {
3640   if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_
3641       && growth_limit_ < capacity_) {
3642     target_footprint_.store(capacity_, std::memory_order_relaxed);
3643     concurrent_start_bytes_ =
3644         UnsignedDifference(capacity_, kMinConcurrentRemainingBytes);
3645   }
3646   growth_limit_ = capacity_;
3647   ScopedObjectAccess soa(Thread::Current());
3648   for (const auto& space : continuous_spaces_) {
3649     if (space->IsMallocSpace()) {
3650       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3651       malloc_space->ClearGrowthLimit();
3652       malloc_space->SetFootprintLimit(malloc_space->Capacity());
3653     }
3654   }
3655   // This space isn't added for performance reasons.
3656   if (main_space_backup_.get() != nullptr) {
3657     main_space_backup_->ClearGrowthLimit();
3658     main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3659   }
3660 }
3661 
AddFinalizerReference(Thread * self,ObjPtr<mirror::Object> * object)3662 void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
3663   ScopedObjectAccess soa(self);
3664   ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
3665   jvalue args[1];
3666   args[0].l = arg.get();
3667   InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
3668   // Restore object in case it gets moved.
3669   *object = soa.Decode<mirror::Object>(arg.get());
3670 }
3671 
RequestConcurrentGCAndSaveObject(Thread * self,bool force_full,ObjPtr<mirror::Object> * obj)3672 void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
3673                                             bool force_full,
3674                                             ObjPtr<mirror::Object>* obj) {
3675   StackHandleScope<1> hs(self);
3676   HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3677   RequestConcurrentGC(self, kGcCauseBackground, force_full);
3678 }
3679 
3680 class Heap::ConcurrentGCTask : public HeapTask {
3681  public:
ConcurrentGCTask(uint64_t target_time,GcCause cause,bool force_full)3682   ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
3683       : HeapTask(target_time), cause_(cause), force_full_(force_full) {}
Run(Thread * self)3684   void Run(Thread* self) override {
3685     gc::Heap* heap = Runtime::Current()->GetHeap();
3686     heap->ConcurrentGC(self, cause_, force_full_);
3687     heap->ClearConcurrentGCRequest();
3688   }
3689 
3690  private:
3691   const GcCause cause_;
3692   const bool force_full_;  // If true, force full (or partial) collection.
3693 };
3694 
CanAddHeapTask(Thread * self)3695 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
3696   Runtime* runtime = Runtime::Current();
3697   return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3698       !self->IsHandlingStackOverflow();
3699 }
3700 
ClearConcurrentGCRequest()3701 void Heap::ClearConcurrentGCRequest() {
3702   concurrent_gc_pending_.store(false, std::memory_order_relaxed);
3703 }
3704 
RequestConcurrentGC(Thread * self,GcCause cause,bool force_full)3705 void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) {
3706   if (CanAddHeapTask(self) &&
3707       concurrent_gc_pending_.CompareAndSetStrongSequentiallyConsistent(false, true)) {
3708     task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(),  // Start straight away.
3709                                                         cause,
3710                                                         force_full));
3711   }
3712 }
3713 
ConcurrentGC(Thread * self,GcCause cause,bool force_full)3714 void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full) {
3715   if (!Runtime::Current()->IsShuttingDown(self)) {
3716     // Wait for any GCs currently running to finish.
3717     if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) {
3718       // If we can't run the GC type we wanted to run, find the next appropriate one and try
3719       // that instead. E.g. can't do partial, so do full instead.
3720       collector::GcType next_gc_type = next_gc_type_;
3721       // If forcing full and next gc type is sticky, override with a non-sticky type.
3722       if (force_full && next_gc_type == collector::kGcTypeSticky) {
3723         next_gc_type = NonStickyGcType();
3724       }
3725       if (CollectGarbageInternal(next_gc_type, cause, false) == collector::kGcTypeNone) {
3726         for (collector::GcType gc_type : gc_plan_) {
3727           // Attempt to run the collector, if we succeed, we are done.
3728           if (gc_type > next_gc_type &&
3729               CollectGarbageInternal(gc_type, cause, false) != collector::kGcTypeNone) {
3730             break;
3731           }
3732         }
3733       }
3734     }
3735   }
3736 }
3737 
3738 class Heap::CollectorTransitionTask : public HeapTask {
3739  public:
CollectorTransitionTask(uint64_t target_time)3740   explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
3741 
Run(Thread * self)3742   void Run(Thread* self) override {
3743     gc::Heap* heap = Runtime::Current()->GetHeap();
3744     heap->DoPendingCollectorTransition();
3745     heap->ClearPendingCollectorTransition(self);
3746   }
3747 };
3748 
ClearPendingCollectorTransition(Thread * self)3749 void Heap::ClearPendingCollectorTransition(Thread* self) {
3750   MutexLock mu(self, *pending_task_lock_);
3751   pending_collector_transition_ = nullptr;
3752 }
3753 
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)3754 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3755   Thread* self = Thread::Current();
3756   desired_collector_type_ = desired_collector_type;
3757   if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3758     return;
3759   }
3760   if (collector_type_ == kCollectorTypeCC) {
3761     // For CC, we invoke a full compaction when going to the background, but the collector type
3762     // doesn't change.
3763     DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
3764   }
3765   DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
3766   CollectorTransitionTask* added_task = nullptr;
3767   const uint64_t target_time = NanoTime() + delta_time;
3768   {
3769     MutexLock mu(self, *pending_task_lock_);
3770     // If we have an existing collector transition, update the targe time to be the new target.
3771     if (pending_collector_transition_ != nullptr) {
3772       task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3773       return;
3774     }
3775     added_task = new CollectorTransitionTask(target_time);
3776     pending_collector_transition_ = added_task;
3777   }
3778   task_processor_->AddTask(self, added_task);
3779 }
3780 
3781 class Heap::HeapTrimTask : public HeapTask {
3782  public:
HeapTrimTask(uint64_t delta_time)3783   explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Run(Thread * self)3784   void Run(Thread* self) override {
3785     gc::Heap* heap = Runtime::Current()->GetHeap();
3786     heap->Trim(self);
3787     heap->ClearPendingTrim(self);
3788   }
3789 };
3790 
ClearPendingTrim(Thread * self)3791 void Heap::ClearPendingTrim(Thread* self) {
3792   MutexLock mu(self, *pending_task_lock_);
3793   pending_heap_trim_ = nullptr;
3794 }
3795 
RequestTrim(Thread * self)3796 void Heap::RequestTrim(Thread* self) {
3797   if (!CanAddHeapTask(self)) {
3798     return;
3799   }
3800   // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3801   // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3802   // a space it will hold its lock and can become a cause of jank.
3803   // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3804   // forking.
3805 
3806   // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3807   // because that only marks object heads, so a large array looks like lots of empty space. We
3808   // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3809   // to utilization (which is probably inversely proportional to how much benefit we can expect).
3810   // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3811   // not how much use we're making of those pages.
3812   HeapTrimTask* added_task = nullptr;
3813   {
3814     MutexLock mu(self, *pending_task_lock_);
3815     if (pending_heap_trim_ != nullptr) {
3816       // Already have a heap trim request in task processor, ignore this request.
3817       return;
3818     }
3819     added_task = new HeapTrimTask(kHeapTrimWait);
3820     pending_heap_trim_ = added_task;
3821   }
3822   task_processor_->AddTask(self, added_task);
3823 }
3824 
IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke)3825 void Heap::IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke) {
3826   size_t previous_num_bytes_freed_revoke =
3827       num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_relaxed);
3828   // Check the updated value is less than the number of bytes allocated. There is a risk of
3829   // execution being suspended between the increment above and the CHECK below, leading to
3830   // the use of previous_num_bytes_freed_revoke in the comparison.
3831   CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed),
3832            previous_num_bytes_freed_revoke + freed_bytes_revoke);
3833 }
3834 
RevokeThreadLocalBuffers(Thread * thread)3835 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3836   if (rosalloc_space_ != nullptr) {
3837     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3838     if (freed_bytes_revoke > 0U) {
3839       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
3840     }
3841   }
3842   if (bump_pointer_space_ != nullptr) {
3843     CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
3844   }
3845   if (region_space_ != nullptr) {
3846     CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
3847   }
3848 }
3849 
RevokeRosAllocThreadLocalBuffers(Thread * thread)3850 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3851   if (rosalloc_space_ != nullptr) {
3852     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3853     if (freed_bytes_revoke > 0U) {
3854       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
3855     }
3856   }
3857 }
3858 
RevokeAllThreadLocalBuffers()3859 void Heap::RevokeAllThreadLocalBuffers() {
3860   if (rosalloc_space_ != nullptr) {
3861     size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3862     if (freed_bytes_revoke > 0U) {
3863       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
3864     }
3865   }
3866   if (bump_pointer_space_ != nullptr) {
3867     CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
3868   }
3869   if (region_space_ != nullptr) {
3870     CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
3871   }
3872 }
3873 
IsGCRequestPending() const3874 bool Heap::IsGCRequestPending() const {
3875   return concurrent_gc_pending_.load(std::memory_order_relaxed);
3876 }
3877 
RunFinalization(JNIEnv * env,uint64_t timeout)3878 void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3879   env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3880                             WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3881                             static_cast<jlong>(timeout));
3882 }
3883 
3884 // For GC triggering purposes, we count old (pre-last-GC) and new native allocations as
3885 // different fractions of Java allocations.
3886 // For now, we essentially do not count old native allocations at all, so that we can preserve the
3887 // existing behavior of not limiting native heap size. If we seriously considered it, we would
3888 // have to adjust collection thresholds when we encounter large amounts of old native memory,
3889 // and handle native out-of-memory situations.
3890 
3891 static constexpr size_t kOldNativeDiscountFactor = 65536;  // Approximately infinite for now.
3892 static constexpr size_t kNewNativeDiscountFactor = 2;
3893 
3894 // If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
3895 // newly allocated memory exceeds stop_for_native_allocs_, we wait for GC to complete to avoid
3896 // running out of memory.
3897 static constexpr float kStopForNativeFactor = 4.0;
3898 
3899 // Return the ratio of the weighted native + java allocated bytes to its target value.
3900 // A return value > 1.0 means we should collect. Significantly larger values mean we're falling
3901 // behind.
NativeMemoryOverTarget(size_t current_native_bytes,bool is_gc_concurrent)3902 inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent) {
3903   // Collection check for native allocation. Does not enforce Java heap bounds.
3904   // With adj_start_bytes defined below, effectively checks
3905   // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes,
3906   // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above.
3907   size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed);
3908   if (old_native_bytes > current_native_bytes) {
3909     // Net decrease; skip the check, but update old value.
3910     // It's OK to lose an update if two stores race.
3911     old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed);
3912     return 0.0;
3913   } else {
3914     size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes);
3915     size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor
3916         + old_native_bytes / kOldNativeDiscountFactor;
3917     size_t add_bytes_allowed = static_cast<size_t>(
3918         NativeAllocationGcWatermark() * HeapGrowthMultiplier());
3919     size_t java_gc_start_bytes = is_gc_concurrent
3920         ? concurrent_start_bytes_
3921         : target_footprint_.load(std::memory_order_relaxed);
3922     size_t adj_start_bytes = UnsignedSum(java_gc_start_bytes,
3923                                          add_bytes_allowed / kNewNativeDiscountFactor);
3924     return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
3925          / static_cast<float>(adj_start_bytes);
3926   }
3927 }
3928 
CheckGCForNative(Thread * self)3929 inline void Heap::CheckGCForNative(Thread* self) {
3930   bool is_gc_concurrent = IsGcConcurrent();
3931   size_t current_native_bytes = GetNativeBytes();
3932   float gc_urgency = NativeMemoryOverTarget(current_native_bytes, is_gc_concurrent);
3933   if (UNLIKELY(gc_urgency >= 1.0)) {
3934     if (is_gc_concurrent) {
3935       RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true);
3936       if (gc_urgency > kStopForNativeFactor
3937           && current_native_bytes > stop_for_native_allocs_) {
3938         // We're in danger of running out of memory due to rampant native allocation.
3939         if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
3940           LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
3941         }
3942         WaitForGcToComplete(kGcCauseForNativeAlloc, self);
3943       }
3944     } else {
3945       CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
3946     }
3947   }
3948 }
3949 
3950 // About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect.
NotifyNativeAllocations(JNIEnv * env)3951 void Heap::NotifyNativeAllocations(JNIEnv* env) {
3952   native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed);
3953   CheckGCForNative(ThreadForEnv(env));
3954 }
3955 
3956 // Register a native allocation with an explicit size.
3957 // This should only be done for large allocations of non-malloc memory, which we wouldn't
3958 // otherwise see.
RegisterNativeAllocation(JNIEnv * env,size_t bytes)3959 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
3960   // Cautiously check for a wrapped negative bytes argument.
3961   DCHECK(sizeof(size_t) < 8 || bytes < (std::numeric_limits<size_t>::max() / 2));
3962   native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed);
3963   uint32_t objects_notified =
3964       native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
3965   if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1
3966       || bytes > kCheckImmediatelyThreshold) {
3967     CheckGCForNative(ThreadForEnv(env));
3968   }
3969 }
3970 
RegisterNativeFree(JNIEnv *,size_t bytes)3971 void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
3972   size_t allocated;
3973   size_t new_freed_bytes;
3974   do {
3975     allocated = native_bytes_registered_.load(std::memory_order_relaxed);
3976     new_freed_bytes = std::min(allocated, bytes);
3977     // We should not be registering more free than allocated bytes.
3978     // But correctly keep going in non-debug builds.
3979     DCHECK_EQ(new_freed_bytes, bytes);
3980   } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated,
3981                                                               allocated - new_freed_bytes));
3982 }
3983 
GetTotalMemory() const3984 size_t Heap::GetTotalMemory() const {
3985   return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated());
3986 }
3987 
AddModUnionTable(accounting::ModUnionTable * mod_union_table)3988 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3989   DCHECK(mod_union_table != nullptr);
3990   mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3991 }
3992 
CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c,size_t byte_count)3993 void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
3994   // Compare rounded sizes since the allocation may have been retried after rounding the size.
3995   // See b/37885600
3996   CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
3997         (c->IsVariableSize() ||
3998             RoundUp(c->GetObjectSize(), kObjectAlignment) ==
3999                 RoundUp(byte_count, kObjectAlignment)))
4000       << "ClassFlags=" << c->GetClassFlags()
4001       << " IsClassClass=" << c->IsClassClass()
4002       << " byte_count=" << byte_count
4003       << " IsVariableSize=" << c->IsVariableSize()
4004       << " ObjectSize=" << c->GetObjectSize()
4005       << " sizeof(Class)=" << sizeof(mirror::Class)
4006       << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass");
4007   CHECK_GE(byte_count, sizeof(mirror::Object));
4008 }
4009 
AddRememberedSet(accounting::RememberedSet * remembered_set)4010 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
4011   CHECK(remembered_set != nullptr);
4012   space::Space* space = remembered_set->GetSpace();
4013   CHECK(space != nullptr);
4014   CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
4015   remembered_sets_.Put(space, remembered_set);
4016   CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
4017 }
4018 
RemoveRememberedSet(space::Space * space)4019 void Heap::RemoveRememberedSet(space::Space* space) {
4020   CHECK(space != nullptr);
4021   auto it = remembered_sets_.find(space);
4022   CHECK(it != remembered_sets_.end());
4023   delete it->second;
4024   remembered_sets_.erase(it);
4025   CHECK(remembered_sets_.find(space) == remembered_sets_.end());
4026 }
4027 
ClearMarkedObjects()4028 void Heap::ClearMarkedObjects() {
4029   // Clear all of the spaces' mark bitmaps.
4030   for (const auto& space : GetContinuousSpaces()) {
4031     if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) {
4032       space->GetMarkBitmap()->Clear();
4033     }
4034   }
4035   // Clear the marked objects in the discontinous space object sets.
4036   for (const auto& space : GetDiscontinuousSpaces()) {
4037     space->GetMarkBitmap()->Clear();
4038   }
4039 }
4040 
SetAllocationRecords(AllocRecordObjectMap * records)4041 void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
4042   allocation_records_.reset(records);
4043 }
4044 
VisitAllocationRecords(RootVisitor * visitor) const4045 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
4046   if (IsAllocTrackingEnabled()) {
4047     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4048     if (IsAllocTrackingEnabled()) {
4049       GetAllocationRecords()->VisitRoots(visitor);
4050     }
4051   }
4052 }
4053 
SweepAllocationRecords(IsMarkedVisitor * visitor) const4054 void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
4055   if (IsAllocTrackingEnabled()) {
4056     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4057     if (IsAllocTrackingEnabled()) {
4058       GetAllocationRecords()->SweepAllocationRecords(visitor);
4059     }
4060   }
4061 }
4062 
AllowNewAllocationRecords() const4063 void Heap::AllowNewAllocationRecords() const {
4064   CHECK(!kUseReadBarrier);
4065   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4066   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4067   if (allocation_records != nullptr) {
4068     allocation_records->AllowNewAllocationRecords();
4069   }
4070 }
4071 
DisallowNewAllocationRecords() const4072 void Heap::DisallowNewAllocationRecords() const {
4073   CHECK(!kUseReadBarrier);
4074   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4075   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4076   if (allocation_records != nullptr) {
4077     allocation_records->DisallowNewAllocationRecords();
4078   }
4079 }
4080 
BroadcastForNewAllocationRecords() const4081 void Heap::BroadcastForNewAllocationRecords() const {
4082   // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
4083   // be set to false while some threads are waiting for system weak access in
4084   // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
4085   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4086   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4087   if (allocation_records != nullptr) {
4088     allocation_records->BroadcastForNewAllocationRecords();
4089   }
4090 }
4091 
CheckGcStressMode(Thread * self,ObjPtr<mirror::Object> * obj)4092 void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
4093   DCHECK(gc_stress_mode_);
4094   auto* const runtime = Runtime::Current();
4095   if (runtime->GetClassLinker()->IsInitialized() && !runtime->IsActiveTransaction()) {
4096     // Check if we should GC.
4097     bool new_backtrace = false;
4098     {
4099       static constexpr size_t kMaxFrames = 16u;
4100       MutexLock mu(self, *backtrace_lock_);
4101       FixedSizeBacktrace<kMaxFrames> backtrace;
4102       backtrace.Collect(/* skip_count= */ 2);
4103       uint64_t hash = backtrace.Hash();
4104       new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
4105       if (new_backtrace) {
4106         seen_backtraces_.insert(hash);
4107       }
4108     }
4109     if (new_backtrace) {
4110       StackHandleScope<1> hs(self);
4111       auto h = hs.NewHandleWrapper(obj);
4112       CollectGarbage(/* clear_soft_references= */ false);
4113       unique_backtrace_count_.fetch_add(1);
4114     } else {
4115       seen_backtrace_count_.fetch_add(1);
4116     }
4117   }
4118 }
4119 
DisableGCForShutdown()4120 void Heap::DisableGCForShutdown() {
4121   Thread* const self = Thread::Current();
4122   CHECK(Runtime::Current()->IsShuttingDown(self));
4123   MutexLock mu(self, *gc_complete_lock_);
4124   gc_disabled_for_shutdown_ = true;
4125 }
4126 
ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const4127 bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
4128   DCHECK_EQ(IsBootImageAddress(obj.Ptr()),
4129             any_of(boot_image_spaces_.begin(),
4130                    boot_image_spaces_.end(),
4131                    [obj](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
4132                      return space->HasAddress(obj.Ptr());
4133                    }));
4134   return IsBootImageAddress(obj.Ptr());
4135 }
4136 
IsInBootImageOatFile(const void * p) const4137 bool Heap::IsInBootImageOatFile(const void* p) const {
4138   DCHECK_EQ(IsBootImageAddress(p),
4139             any_of(boot_image_spaces_.begin(),
4140                    boot_image_spaces_.end(),
4141                    [p](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
4142                      return space->GetOatFile()->Contains(p);
4143                    }));
4144   return IsBootImageAddress(p);
4145 }
4146 
SetAllocationListener(AllocationListener * l)4147 void Heap::SetAllocationListener(AllocationListener* l) {
4148   AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l);
4149 
4150   if (old == nullptr) {
4151     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4152   }
4153 }
4154 
RemoveAllocationListener()4155 void Heap::RemoveAllocationListener() {
4156   AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
4157 
4158   if (old != nullptr) {
4159     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4160   }
4161 }
4162 
SetGcPauseListener(GcPauseListener * l)4163 void Heap::SetGcPauseListener(GcPauseListener* l) {
4164   gc_pause_listener_.store(l, std::memory_order_relaxed);
4165 }
4166 
RemoveGcPauseListener()4167 void Heap::RemoveGcPauseListener() {
4168   gc_pause_listener_.store(nullptr, std::memory_order_relaxed);
4169 }
4170 
AllocWithNewTLAB(Thread * self,AllocatorType allocator_type,size_t alloc_size,bool grow,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)4171 mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
4172                                        AllocatorType allocator_type,
4173                                        size_t alloc_size,
4174                                        bool grow,
4175                                        size_t* bytes_allocated,
4176                                        size_t* usable_size,
4177                                        size_t* bytes_tl_bulk_allocated) {
4178   if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) {
4179     DCHECK_GT(alloc_size, self->TlabSize());
4180     // There is enough space if we grow the TLAB. Lets do that. This increases the
4181     // TLAB bytes.
4182     const size_t min_expand_size = alloc_size - self->TlabSize();
4183     const size_t expand_bytes = std::max(
4184         min_expand_size,
4185         std::min(self->TlabRemainingCapacity() - self->TlabSize(), kPartialTlabSize));
4186     if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, expand_bytes, grow))) {
4187       return nullptr;
4188     }
4189     *bytes_tl_bulk_allocated = expand_bytes;
4190     self->ExpandTlab(expand_bytes);
4191     DCHECK_LE(alloc_size, self->TlabSize());
4192   } else if (allocator_type == kAllocatorTypeTLAB) {
4193     DCHECK(bump_pointer_space_ != nullptr);
4194     const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
4195     if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
4196       return nullptr;
4197     }
4198     // Try allocating a new thread local buffer, if the allocation fails the space must be
4199     // full so return null.
4200     if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
4201       return nullptr;
4202     }
4203     *bytes_tl_bulk_allocated = new_tlab_size;
4204   } else {
4205     DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
4206     DCHECK(region_space_ != nullptr);
4207     if (space::RegionSpace::kRegionSize >= alloc_size) {
4208       // Non-large. Check OOME for a tlab.
4209       if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
4210                                             space::RegionSpace::kRegionSize,
4211                                             grow))) {
4212         const size_t new_tlab_size = kUsePartialTlabs
4213             ? std::max(alloc_size, kPartialTlabSize)
4214             : gc::space::RegionSpace::kRegionSize;
4215         // Try to allocate a tlab.
4216         if (!region_space_->AllocNewTlab(self, new_tlab_size, bytes_tl_bulk_allocated)) {
4217           // Failed to allocate a tlab. Try non-tlab.
4218           return region_space_->AllocNonvirtual<false>(alloc_size,
4219                                                        bytes_allocated,
4220                                                        usable_size,
4221                                                        bytes_tl_bulk_allocated);
4222         }
4223         // Fall-through to using the TLAB below.
4224       } else {
4225         // Check OOME for a non-tlab allocation.
4226         if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
4227           return region_space_->AllocNonvirtual<false>(alloc_size,
4228                                                        bytes_allocated,
4229                                                        usable_size,
4230                                                        bytes_tl_bulk_allocated);
4231         }
4232         // Neither tlab or non-tlab works. Give up.
4233         return nullptr;
4234       }
4235     } else {
4236       // Large. Check OOME.
4237       if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
4238         return region_space_->AllocNonvirtual<false>(alloc_size,
4239                                                      bytes_allocated,
4240                                                      usable_size,
4241                                                      bytes_tl_bulk_allocated);
4242       }
4243       return nullptr;
4244     }
4245   }
4246   // Refilled TLAB, return.
4247   mirror::Object* ret = self->AllocTlab(alloc_size);
4248   DCHECK(ret != nullptr);
4249   *bytes_allocated = alloc_size;
4250   *usable_size = alloc_size;
4251   return ret;
4252 }
4253 
GetVerification() const4254 const Verification* Heap::GetVerification() const {
4255   return verification_.get();
4256 }
4257 
VlogHeapGrowth(size_t old_footprint,size_t new_footprint,size_t alloc_size)4258 void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) {
4259   VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to "
4260              << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
4261 }
4262 
4263 class Heap::TriggerPostForkCCGcTask : public HeapTask {
4264  public:
TriggerPostForkCCGcTask(uint64_t target_time)4265   explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
Run(Thread * self)4266   void Run(Thread* self) override {
4267     gc::Heap* heap = Runtime::Current()->GetHeap();
4268     // Trigger a GC, if not already done. The first GC after fork, whenever it
4269     // takes place, will adjust the thresholds to normal levels.
4270     if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) {
4271       heap->RequestConcurrentGC(self, kGcCauseBackground, false);
4272     }
4273   }
4274 };
4275 
PostForkChildAction(Thread * self)4276 void Heap::PostForkChildAction(Thread* self) {
4277   // Temporarily increase target_footprint_ and concurrent_start_bytes_ to
4278   // max values to avoid GC during app launch.
4279   if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) {
4280     // Set target_footprint_ to the largest allowed value.
4281     SetIdealFootprint(growth_limit_);
4282     // Set concurrent_start_bytes_ to half of the heap size.
4283     size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
4284     concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated());
4285 
4286     GetTaskProcessor()->AddTask(
4287         self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS)));
4288   }
4289 }
4290 
VisitReflectiveTargets(ReflectiveValueVisitor * visit)4291 void Heap::VisitReflectiveTargets(ReflectiveValueVisitor *visit) {
4292   VisitObjectsPaused([&visit](mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
4293     art::ObjPtr<mirror::Class> klass(ref->GetClass());
4294     // All these classes are in the BootstrapClassLoader.
4295     if (!klass->IsBootStrapClassLoaded()) {
4296       return;
4297     }
4298     if (GetClassRoot<mirror::Method>()->IsAssignableFrom(klass) ||
4299         GetClassRoot<mirror::Constructor>()->IsAssignableFrom(klass)) {
4300       down_cast<mirror::Executable*>(ref)->VisitTarget(visit);
4301     } else if (art::GetClassRoot<art::mirror::Field>() == klass) {
4302       down_cast<mirror::Field*>(ref)->VisitTarget(visit);
4303     } else if (art::GetClassRoot<art::mirror::MethodHandle>()->IsAssignableFrom(klass)) {
4304       down_cast<mirror::MethodHandle*>(ref)->VisitTarget(visit);
4305     } else if (art::GetClassRoot<art::mirror::FieldVarHandle>()->IsAssignableFrom(klass)) {
4306       down_cast<mirror::FieldVarHandle*>(ref)->VisitTarget(visit);
4307     } else if (art::GetClassRoot<art::mirror::DexCache>()->IsAssignableFrom(klass)) {
4308       down_cast<mirror::DexCache*>(ref)->VisitReflectiveTargets(visit);
4309     }
4310   });
4311 }
4312 
AddHeapTask(gc::HeapTask * task)4313 bool Heap::AddHeapTask(gc::HeapTask* task) {
4314   Thread* const self = Thread::Current();
4315   if (!CanAddHeapTask(self)) {
4316     return false;
4317   }
4318   GetTaskProcessor()->AddTask(self, task);
4319   return true;
4320 }
4321 
4322 }  // namespace gc
4323 }  // namespace art
4324