1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_HEAP_H_
18 #define ART_RUNTIME_GC_HEAP_H_
19 
20 #include <iosfwd>
21 #include <string>
22 #include <unordered_set>
23 #include <vector>
24 
25 #include <android-base/logging.h>
26 
27 #include "allocator_type.h"
28 #include "base/atomic.h"
29 #include "base/macros.h"
30 #include "base/mutex.h"
31 #include "base/runtime_debug.h"
32 #include "base/safe_map.h"
33 #include "base/time_utils.h"
34 #include "gc/collector/gc_type.h"
35 #include "gc/collector/iteration.h"
36 #include "gc/collector_type.h"
37 #include "gc/gc_cause.h"
38 #include "gc/space/image_space_loading_order.h"
39 #include "gc/space/large_object_space.h"
40 #include "handle.h"
41 #include "obj_ptr.h"
42 #include "offsets.h"
43 #include "process_state.h"
44 #include "read_barrier_config.h"
45 #include "runtime_globals.h"
46 #include "verify_object.h"
47 
48 namespace art {
49 
50 class ConditionVariable;
51 enum class InstructionSet;
52 class IsMarkedVisitor;
53 class Mutex;
54 class ReflectiveValueVisitor;
55 class RootVisitor;
56 class StackVisitor;
57 class Thread;
58 class ThreadPool;
59 class TimingLogger;
60 class VariableSizedHandleScope;
61 
62 namespace mirror {
63 class Class;
64 class Object;
65 }  // namespace mirror
66 
67 namespace gc {
68 
69 class AllocationListener;
70 class AllocRecordObjectMap;
71 class GcPauseListener;
72 class HeapTask;
73 class ReferenceProcessor;
74 class TaskProcessor;
75 class Verification;
76 
77 namespace accounting {
78 template <typename T> class AtomicStack;
79 typedef AtomicStack<mirror::Object> ObjectStack;
80 class CardTable;
81 class HeapBitmap;
82 class ModUnionTable;
83 class ReadBarrierTable;
84 class RememberedSet;
85 }  // namespace accounting
86 
87 namespace collector {
88 class ConcurrentCopying;
89 class GarbageCollector;
90 class MarkSweep;
91 class SemiSpace;
92 }  // namespace collector
93 
94 namespace allocator {
95 class RosAlloc;
96 }  // namespace allocator
97 
98 namespace space {
99 class AllocSpace;
100 class BumpPointerSpace;
101 class ContinuousMemMapAllocSpace;
102 class DiscontinuousSpace;
103 class DlMallocSpace;
104 class ImageSpace;
105 class LargeObjectSpace;
106 class MallocSpace;
107 class RegionSpace;
108 class RosAllocSpace;
109 class Space;
110 class ZygoteSpace;
111 }  // namespace space
112 
113 enum HomogeneousSpaceCompactResult {
114   // Success.
115   kSuccess,
116   // Reject due to disabled moving GC.
117   kErrorReject,
118   // Unsupported due to the current configuration.
119   kErrorUnsupported,
120   // System is shutting down.
121   kErrorVMShuttingDown,
122 };
123 
124 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
125 static constexpr bool kUseRosAlloc = true;
126 
127 // If true, use thread-local allocation stack.
128 static constexpr bool kUseThreadLocalAllocationStack = true;
129 
130 class Heap {
131  public:
132   // How much we grow the TLAB if we can do it.
133   static constexpr size_t kPartialTlabSize = 16 * KB;
134   static constexpr bool kUsePartialTlabs = true;
135 
136   static constexpr size_t kDefaultStartingSize = kPageSize;
137   static constexpr size_t kDefaultInitialSize = 2 * MB;
138   static constexpr size_t kDefaultMaximumSize = 256 * MB;
139   static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
140   static constexpr size_t kDefaultMaxFree = 2 * MB;
141   static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
142   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
143   static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
144   static constexpr size_t kDefaultTLABSize = 32 * KB;
145   static constexpr double kDefaultTargetUtilization = 0.75;
146   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
147   // Primitive arrays larger than this size are put in the large object space.
148   static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
149   static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
150   // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
151   static constexpr bool kDefaultEnableParallelGC = false;
152   static uint8_t* const kPreferredAllocSpaceBegin;
153 
154   // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
155   // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
156   static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
157       USE_ART_LOW_4G_ALLOCATOR ?
158           space::LargeObjectSpaceType::kFreeList
159         : space::LargeObjectSpaceType::kMap;
160 
161   // Used so that we don't overflow the allocation time atomic integer.
162   static constexpr size_t kTimeAdjust = 1024;
163 
164   // Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations.
165   // Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order
166   // as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec
167   // on Android.
168 #ifdef __ANDROID__
169   static constexpr uint32_t kNotifyNativeInterval = 32;
170 #else
171   // Some host mallinfo() implementations are slow. And memory is less scarce.
172   static constexpr uint32_t kNotifyNativeInterval = 384;
173 #endif
174 
175   // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the
176   // following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to
177   // make it safe to allocate that many bytes between checks.
178   static constexpr size_t kCheckImmediatelyThreshold = 300000;
179 
180   // How often we allow heap trimming to happen (nanoseconds).
181   static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
182   // How long we wait after a transition request to perform a collector transition (nanoseconds).
183   static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
184   // Whether the transition-wait applies or not. Zero wait will stress the
185   // transition code and collector, but increases jank probability.
186   DECLARE_RUNTIME_DEBUG_FLAG(kStressCollectorTransition);
187 
188   // Create a heap with the requested sizes. The possible empty
189   // image_file_names names specify Spaces to load based on
190   // ImageWriter output.
191   Heap(size_t initial_size,
192        size_t growth_limit,
193        size_t min_free,
194        size_t max_free,
195        double target_utilization,
196        double foreground_heap_growth_multiplier,
197        size_t stop_for_native_allocs,
198        size_t capacity,
199        size_t non_moving_space_capacity,
200        const std::vector<std::string>& boot_class_path,
201        const std::vector<std::string>& boot_class_path_locations,
202        const std::string& image_file_name,
203        InstructionSet image_instruction_set,
204        CollectorType foreground_collector_type,
205        CollectorType background_collector_type,
206        space::LargeObjectSpaceType large_object_space_type,
207        size_t large_object_threshold,
208        size_t parallel_gc_threads,
209        size_t conc_gc_threads,
210        bool low_memory_mode,
211        size_t long_pause_threshold,
212        size_t long_gc_threshold,
213        bool ignore_target_footprint,
214        bool always_log_explicit_gcs,
215        bool use_tlab,
216        bool verify_pre_gc_heap,
217        bool verify_pre_sweeping_heap,
218        bool verify_post_gc_heap,
219        bool verify_pre_gc_rosalloc,
220        bool verify_pre_sweeping_rosalloc,
221        bool verify_post_gc_rosalloc,
222        bool gc_stress_mode,
223        bool measure_gc_performance,
224        bool use_homogeneous_space_compaction,
225        bool use_generational_cc,
226        uint64_t min_interval_homogeneous_space_compaction_by_oom,
227        bool dump_region_info_before_gc,
228        bool dump_region_info_after_gc,
229        space::ImageSpaceLoadingOrder image_space_loading_order);
230 
231   ~Heap();
232 
233   // Allocates and initializes storage for an object instance.
234   template <bool kInstrumented = true, typename PreFenceVisitor>
AllocObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)235   mirror::Object* AllocObject(Thread* self,
236                               ObjPtr<mirror::Class> klass,
237                               size_t num_bytes,
238                               const PreFenceVisitor& pre_fence_visitor)
239       REQUIRES_SHARED(Locks::mutator_lock_)
240       REQUIRES(!*gc_complete_lock_,
241                !*pending_task_lock_,
242                !*backtrace_lock_,
243                !process_state_update_lock_,
244                !Roles::uninterruptible_) {
245     return AllocObjectWithAllocator<kInstrumented>(self,
246                                                    klass,
247                                                    num_bytes,
248                                                    GetCurrentAllocator(),
249                                                    pre_fence_visitor);
250   }
251 
252   template <bool kInstrumented = true, typename PreFenceVisitor>
AllocNonMovableObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)253   mirror::Object* AllocNonMovableObject(Thread* self,
254                                         ObjPtr<mirror::Class> klass,
255                                         size_t num_bytes,
256                                         const PreFenceVisitor& pre_fence_visitor)
257       REQUIRES_SHARED(Locks::mutator_lock_)
258       REQUIRES(!*gc_complete_lock_,
259                !*pending_task_lock_,
260                !*backtrace_lock_,
261                !process_state_update_lock_,
262                !Roles::uninterruptible_) {
263     return AllocObjectWithAllocator<kInstrumented>(self,
264                                                    klass,
265                                                    num_bytes,
266                                                    GetCurrentNonMovingAllocator(),
267                                                    pre_fence_visitor);
268   }
269 
270   template <bool kInstrumented = true, bool kCheckLargeObject = true, typename PreFenceVisitor>
271   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
272                                                          ObjPtr<mirror::Class> klass,
273                                                          size_t byte_count,
274                                                          AllocatorType allocator,
275                                                          const PreFenceVisitor& pre_fence_visitor)
276       REQUIRES_SHARED(Locks::mutator_lock_)
277       REQUIRES(!*gc_complete_lock_,
278                !*pending_task_lock_,
279                !*backtrace_lock_,
280                !process_state_update_lock_,
281                !Roles::uninterruptible_);
282 
GetCurrentAllocator()283   AllocatorType GetCurrentAllocator() const {
284     return current_allocator_;
285   }
286 
GetCurrentNonMovingAllocator()287   AllocatorType GetCurrentNonMovingAllocator() const {
288     return current_non_moving_allocator_;
289   }
290 
291   // Visit all of the live objects in the heap.
292   template <typename Visitor>
293   ALWAYS_INLINE void VisitObjects(Visitor&& visitor)
294       REQUIRES_SHARED(Locks::mutator_lock_)
295       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
296   template <typename Visitor>
297   ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor)
298       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
299 
300   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
301       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
302 
303   void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
304       REQUIRES_SHARED(Locks::mutator_lock_);
305 
306   // Inform the garbage collector of a non-malloc allocated native memory that might become
307   // reclaimable in the future as a result of Java garbage collection.
308   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
309       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
310   void RegisterNativeFree(JNIEnv* env, size_t bytes);
311 
312   // Notify the garbage collector of malloc allocations that might be reclaimable
313   // as a result of Java garbage collection. Each such call represents approximately
314   // kNotifyNativeInterval such allocations.
315   void NotifyNativeAllocations(JNIEnv* env)
316       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
317 
GetNotifyNativeInterval()318   uint32_t GetNotifyNativeInterval() {
319     return kNotifyNativeInterval;
320   }
321 
322   // Change the allocator, updates entrypoints.
323   void ChangeAllocator(AllocatorType allocator)
324       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
325 
326   // Change the collector to be one of the possible options (MS, CMS, SS).
327   void ChangeCollector(CollectorType collector_type)
328       REQUIRES(Locks::mutator_lock_);
329 
330   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
331   // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
332   // proper lock ordering for it.
333   void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS;
334 
335   // Consistency check of all live references.
336   void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
337   // Returns how many failures occured.
338   size_t VerifyHeapReferences(bool verify_referents = true)
339       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
340   bool VerifyMissingCardMarks()
341       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
342 
343   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
344   // and doesn't abort on error, allowing the caller to report more
345   // meaningful diagnostics.
346   bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
347 
348   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
349   // very slow.
350   bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const
351       REQUIRES_SHARED(Locks::mutator_lock_);
352 
353   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
354   // Requires the heap lock to be held.
355   bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
356                           bool search_allocation_stack = true,
357                           bool search_live_stack = true,
358                           bool sorted = false)
359       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
360 
361   // Returns true if there is any chance that the object (obj) will move.
362   bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
363 
364   // Enables us to compacting GC until objects are released.
365   void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
366   void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
367 
368   // Temporarily disable thread flip for JNI critical calls.
369   void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
370   void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
371   void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
372   void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
373 
374   // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
375   // Mutator lock is required for GetContinuousSpaces.
376   void ClearMarkedObjects()
377       REQUIRES(Locks::heap_bitmap_lock_)
378       REQUIRES_SHARED(Locks::mutator_lock_);
379 
380   // Initiates an explicit garbage collection.
381   void CollectGarbage(bool clear_soft_references, GcCause cause = kGcCauseExplicit)
382       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
383 
384   // Does a concurrent GC, should only be called by the GC daemon thread
385   // through runtime.
386   void ConcurrentGC(Thread* self, GcCause cause, bool force_full)
387       REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_,
388                !*pending_task_lock_, !process_state_update_lock_);
389 
390   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
391   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
392   void CountInstances(const std::vector<Handle<mirror::Class>>& classes,
393                       bool use_is_assignable_from,
394                       uint64_t* counts)
395       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
396       REQUIRES_SHARED(Locks::mutator_lock_);
397 
398   // Implements VMDebug.getInstancesOfClasses and JDWP RT_Instances.
399   void GetInstances(VariableSizedHandleScope& scope,
400                     Handle<mirror::Class> c,
401                     bool use_is_assignable_from,
402                     int32_t max_count,
403                     std::vector<Handle<mirror::Object>>& instances)
404       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
405       REQUIRES_SHARED(Locks::mutator_lock_);
406 
407   // Implements JDWP OR_ReferringObjects.
408   void GetReferringObjects(VariableSizedHandleScope& scope,
409                            Handle<mirror::Object> o,
410                            int32_t max_count,
411                            std::vector<Handle<mirror::Object>>& referring_objects)
412       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
413       REQUIRES_SHARED(Locks::mutator_lock_);
414 
415   // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
416   // implement dalvik.system.VMRuntime.clearGrowthLimit.
417   void ClearGrowthLimit();
418 
419   // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
420   // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
421   void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
422 
423   // Target ideal heap utilization ratio, implements
424   // dalvik.system.VMRuntime.getTargetHeapUtilization.
GetTargetHeapUtilization()425   double GetTargetHeapUtilization() const {
426     return target_utilization_;
427   }
428 
429   // Data structure memory usage tracking.
430   void RegisterGCAllocation(size_t bytes);
431   void RegisterGCDeAllocation(size_t bytes);
432 
433   // Set the heap's private space pointers to be the same as the space based on it's type. Public
434   // due to usage by tests.
435   void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
436       REQUIRES(!Locks::heap_bitmap_lock_);
437   void AddSpace(space::Space* space)
438       REQUIRES(!Locks::heap_bitmap_lock_)
439       REQUIRES(Locks::mutator_lock_);
440   void RemoveSpace(space::Space* space)
441     REQUIRES(!Locks::heap_bitmap_lock_)
442     REQUIRES(Locks::mutator_lock_);
443 
GetPreGcWeightedAllocatedBytes()444   double GetPreGcWeightedAllocatedBytes() const {
445     return pre_gc_weighted_allocated_bytes_;
446   }
447 
GetPostGcWeightedAllocatedBytes()448   double GetPostGcWeightedAllocatedBytes() const {
449     return post_gc_weighted_allocated_bytes_;
450   }
451 
452   void CalculatePreGcWeightedAllocatedBytes();
453   void CalculatePostGcWeightedAllocatedBytes();
454   uint64_t GetTotalGcCpuTime();
455 
GetProcessCpuStartTime()456   uint64_t GetProcessCpuStartTime() const {
457     return process_cpu_start_time_ns_;
458   }
459 
GetPostGCLastProcessCpuTime()460   uint64_t GetPostGCLastProcessCpuTime() const {
461     return post_gc_last_process_cpu_time_ns_;
462   }
463 
464   // Set target ideal heap utilization ratio, implements
465   // dalvik.system.VMRuntime.setTargetHeapUtilization.
466   void SetTargetHeapUtilization(float target);
467 
468   // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
469   // from the system. Doesn't allow the space to exceed its growth limit.
470   void SetIdealFootprint(size_t max_allowed_footprint);
471 
472   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
473   // waited for.
474   collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_);
475 
476   // Update the heap's process state to a new value, may cause compaction to occur.
477   void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state)
478       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_);
479 
HaveContinuousSpaces()480   bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS {
481     // No lock since vector empty is thread safe.
482     return !continuous_spaces_.empty();
483   }
484 
GetContinuousSpaces()485   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
486       REQUIRES_SHARED(Locks::mutator_lock_) {
487     return continuous_spaces_;
488   }
489 
GetDiscontinuousSpaces()490   const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
491     return discontinuous_spaces_;
492   }
493 
GetCurrentGcIteration()494   const collector::Iteration* GetCurrentGcIteration() const {
495     return &current_gc_iteration_;
496   }
GetCurrentGcIteration()497   collector::Iteration* GetCurrentGcIteration() {
498     return &current_gc_iteration_;
499   }
500 
501   // Enable verification of object references when the runtime is sufficiently initialized.
EnableObjectValidation()502   void EnableObjectValidation() {
503     verify_object_mode_ = kVerifyObjectSupport;
504     if (verify_object_mode_ > kVerifyObjectModeDisabled) {
505       VerifyHeap();
506     }
507   }
508 
509   // Disable object reference verification for image writing.
DisableObjectValidation()510   void DisableObjectValidation() {
511     verify_object_mode_ = kVerifyObjectModeDisabled;
512   }
513 
514   // Other checks may be performed if we know the heap should be in a healthy state.
IsObjectValidationEnabled()515   bool IsObjectValidationEnabled() const {
516     return verify_object_mode_ > kVerifyObjectModeDisabled;
517   }
518 
519   // Returns true if low memory mode is enabled.
IsLowMemoryMode()520   bool IsLowMemoryMode() const {
521     return low_memory_mode_;
522   }
523 
524   // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
525   // Scales heap growth, min free, and max free.
526   double HeapGrowthMultiplier() const;
527 
528   // Freed bytes can be negative in cases where we copy objects from a compacted space to a
529   // free-list backed space.
530   void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
531 
532   // Record the bytes freed by thread-local buffer revoke.
533   void RecordFreeRevoke();
534 
GetCardTable()535   accounting::CardTable* GetCardTable() const {
536     return card_table_.get();
537   }
538 
GetReadBarrierTable()539   accounting::ReadBarrierTable* GetReadBarrierTable() const {
540     return rb_table_.get();
541   }
542 
543   void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
544 
545   // Returns the number of bytes currently allocated.
546   // The result should be treated as an approximation, if it is being concurrently updated.
GetBytesAllocated()547   size_t GetBytesAllocated() const {
548     return num_bytes_allocated_.load(std::memory_order_relaxed);
549   }
550 
GetUseGenerationalCC()551   bool GetUseGenerationalCC() const {
552     return use_generational_cc_;
553   }
554 
555   // Returns the number of objects currently allocated.
556   size_t GetObjectsAllocated() const
557       REQUIRES(!Locks::heap_bitmap_lock_);
558 
559   // Returns the total number of objects allocated since the heap was created.
560   uint64_t GetObjectsAllocatedEver() const;
561 
562   // Returns the total number of bytes allocated since the heap was created.
563   uint64_t GetBytesAllocatedEver() const;
564 
565   // Returns the total number of objects freed since the heap was created.
566   // With default memory order, this should be viewed only as a hint.
567   uint64_t GetObjectsFreedEver(std::memory_order mo = std::memory_order_relaxed) const {
568     return total_objects_freed_ever_.load(mo);
569   }
570 
571   // Returns the total number of bytes freed since the heap was created.
572   // With default memory order, this should be viewed only as a hint.
573   uint64_t GetBytesFreedEver(std::memory_order mo = std::memory_order_relaxed) const {
574     return total_bytes_freed_ever_.load(mo);
575   }
576 
GetRegionSpace()577   space::RegionSpace* GetRegionSpace() const {
578     return region_space_;
579   }
580 
581   // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
582   // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
583   // were specified. Android apps start with a growth limit (small heap size) which is
584   // cleared/extended for large apps.
GetMaxMemory()585   size_t GetMaxMemory() const {
586     // There are some race conditions in the allocation code that can cause bytes allocated to
587     // become larger than growth_limit_ in rare cases.
588     return std::max(GetBytesAllocated(), growth_limit_);
589   }
590 
591   // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
592   // consumed by an application.
593   size_t GetTotalMemory() const;
594 
595   // Returns approximately how much free memory we have until the next GC happens.
GetFreeMemoryUntilGC()596   size_t GetFreeMemoryUntilGC() const {
597     return UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
598                               GetBytesAllocated());
599   }
600 
601   // Returns approximately how much free memory we have until the next OOME happens.
GetFreeMemoryUntilOOME()602   size_t GetFreeMemoryUntilOOME() const {
603     return UnsignedDifference(growth_limit_, GetBytesAllocated());
604   }
605 
606   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
607   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
GetFreeMemory()608   size_t GetFreeMemory() const {
609     return UnsignedDifference(GetTotalMemory(),
610                               num_bytes_allocated_.load(std::memory_order_relaxed));
611   }
612 
613   // Get the space that corresponds to an object's address. Current implementation searches all
614   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
615   // TODO: consider using faster data structure like binary tree.
616   space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const
617       REQUIRES_SHARED(Locks::mutator_lock_);
618 
619   space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const
620       REQUIRES_SHARED(Locks::mutator_lock_);
621 
622   space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>,
623                                                               bool fail_ok) const
624       REQUIRES_SHARED(Locks::mutator_lock_);
625 
626   space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const
627       REQUIRES_SHARED(Locks::mutator_lock_);
628 
629   space::Space* FindSpaceFromAddress(const void* ptr) const
630       REQUIRES_SHARED(Locks::mutator_lock_);
631 
632   std::string DumpSpaceNameFromAddress(const void* addr) const
633       REQUIRES_SHARED(Locks::mutator_lock_);
634 
635   void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
636 
637   // Do a pending collector transition.
638   void DoPendingCollectorTransition()
639       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
640 
641   // Deflate monitors, ... and trim the spaces.
642   void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
643 
644   void RevokeThreadLocalBuffers(Thread* thread);
645   void RevokeRosAllocThreadLocalBuffers(Thread* thread);
646   void RevokeAllThreadLocalBuffers();
647   void AssertThreadLocalBuffersAreRevoked(Thread* thread);
648   void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
649   void RosAllocVerification(TimingLogger* timings, const char* name)
650       REQUIRES(Locks::mutator_lock_);
651 
GetLiveBitmap()652   accounting::HeapBitmap* GetLiveBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
653     return live_bitmap_.get();
654   }
655 
GetMarkBitmap()656   accounting::HeapBitmap* GetMarkBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
657     return mark_bitmap_.get();
658   }
659 
GetLiveStack()660   accounting::ObjectStack* GetLiveStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
661     return live_stack_.get();
662   }
663 
664   void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
665 
666   // Mark and empty stack.
667   void FlushAllocStack()
668       REQUIRES_SHARED(Locks::mutator_lock_)
669       REQUIRES(Locks::heap_bitmap_lock_);
670 
671   // Revoke all the thread-local allocation stacks.
672   void RevokeAllThreadLocalAllocationStacks(Thread* self)
673       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
674 
675   // Mark all the objects in the allocation stack in the specified bitmap.
676   // TODO: Refactor?
677   void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
678                       accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
679                       accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
680                       accounting::ObjectStack* stack)
681       REQUIRES_SHARED(Locks::mutator_lock_)
682       REQUIRES(Locks::heap_bitmap_lock_);
683 
684   // Mark the specified allocation stack as live.
685   void MarkAllocStackAsLive(accounting::ObjectStack* stack)
686       REQUIRES_SHARED(Locks::mutator_lock_)
687       REQUIRES(Locks::heap_bitmap_lock_);
688 
689   // Unbind any bound bitmaps.
690   void UnBindBitmaps()
691       REQUIRES(Locks::heap_bitmap_lock_)
692       REQUIRES_SHARED(Locks::mutator_lock_);
693 
694   // Returns the boot image spaces. There may be multiple boot image spaces.
GetBootImageSpaces()695   const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
696     return boot_image_spaces_;
697   }
698 
699   bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const
700       REQUIRES_SHARED(Locks::mutator_lock_);
701 
702   bool IsInBootImageOatFile(const void* p) const
703       REQUIRES_SHARED(Locks::mutator_lock_);
704 
705   // Get the start address of the boot images if any; otherwise returns 0.
GetBootImagesStartAddress()706   uint32_t GetBootImagesStartAddress() const {
707     return boot_images_start_address_;
708   }
709 
710   // Get the size of all boot images, including the heap and oat areas.
GetBootImagesSize()711   uint32_t GetBootImagesSize() const {
712     return boot_images_size_;
713   }
714 
715   // Check if a pointer points to a boot image.
IsBootImageAddress(const void * p)716   bool IsBootImageAddress(const void* p) const {
717     return reinterpret_cast<uintptr_t>(p) - boot_images_start_address_ < boot_images_size_;
718   }
719 
GetDlMallocSpace()720   space::DlMallocSpace* GetDlMallocSpace() const {
721     return dlmalloc_space_;
722   }
723 
GetRosAllocSpace()724   space::RosAllocSpace* GetRosAllocSpace() const {
725     return rosalloc_space_;
726   }
727 
728   // Return the corresponding rosalloc space.
729   space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
730       REQUIRES_SHARED(Locks::mutator_lock_);
731 
GetNonMovingSpace()732   space::MallocSpace* GetNonMovingSpace() const {
733     return non_moving_space_;
734   }
735 
GetLargeObjectsSpace()736   space::LargeObjectSpace* GetLargeObjectsSpace() const {
737     return large_object_space_;
738   }
739 
740   // Returns the free list space that may contain movable objects (the
741   // one that's not the non-moving space), either rosalloc_space_ or
742   // dlmalloc_space_.
GetPrimaryFreeListSpace()743   space::MallocSpace* GetPrimaryFreeListSpace() {
744     if (kUseRosAlloc) {
745       DCHECK(rosalloc_space_ != nullptr);
746       // reinterpret_cast is necessary as the space class hierarchy
747       // isn't known (#included) yet here.
748       return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
749     } else {
750       DCHECK(dlmalloc_space_ != nullptr);
751       return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
752     }
753   }
754 
755   void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
756   std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
757 
758   // GC performance measuring
759   void DumpGcPerformanceInfo(std::ostream& os)
760       REQUIRES(!*gc_complete_lock_);
761   void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
762 
763   // Thread pool.
764   void CreateThreadPool();
765   void DeleteThreadPool();
GetThreadPool()766   ThreadPool* GetThreadPool() {
767     return thread_pool_.get();
768   }
GetParallelGCThreadCount()769   size_t GetParallelGCThreadCount() const {
770     return parallel_gc_threads_;
771   }
GetConcGCThreadCount()772   size_t GetConcGCThreadCount() const {
773     return conc_gc_threads_;
774   }
775   accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
776   void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
777 
778   accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
779   void AddRememberedSet(accounting::RememberedSet* remembered_set);
780   // Also deletes the remebered set.
781   void RemoveRememberedSet(space::Space* space);
782 
783   bool IsCompilingBoot() const;
HasBootImageSpace()784   bool HasBootImageSpace() const {
785     return !boot_image_spaces_.empty();
786   }
787 
GetReferenceProcessor()788   ReferenceProcessor* GetReferenceProcessor() {
789     return reference_processor_.get();
790   }
GetTaskProcessor()791   TaskProcessor* GetTaskProcessor() {
792     return task_processor_.get();
793   }
794 
HasZygoteSpace()795   bool HasZygoteSpace() const {
796     return zygote_space_ != nullptr;
797   }
798 
799   // Returns the active concurrent copying collector.
ConcurrentCopyingCollector()800   collector::ConcurrentCopying* ConcurrentCopyingCollector() {
801     if (use_generational_cc_) {
802       DCHECK((active_concurrent_copying_collector_ == concurrent_copying_collector_) ||
803              (active_concurrent_copying_collector_ == young_concurrent_copying_collector_));
804     } else {
805       DCHECK_EQ(active_concurrent_copying_collector_, concurrent_copying_collector_);
806     }
807     return active_concurrent_copying_collector_;
808   }
809 
CurrentCollectorType()810   CollectorType CurrentCollectorType() {
811     return collector_type_;
812   }
813 
IsGcConcurrentAndMoving()814   bool IsGcConcurrentAndMoving() const {
815     if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
816       // Assume no transition when a concurrent moving collector is used.
817       DCHECK_EQ(collector_type_, foreground_collector_type_);
818       return true;
819     }
820     return false;
821   }
822 
IsMovingGCDisabled(Thread * self)823   bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
824     MutexLock mu(self, *gc_complete_lock_);
825     return disable_moving_gc_count_ > 0;
826   }
827 
828   // Request an asynchronous trim.
829   void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
830 
831   // Request asynchronous GC.
832   void RequestConcurrentGC(Thread* self, GcCause cause, bool force_full)
833       REQUIRES(!*pending_task_lock_);
834 
835   // Whether or not we may use a garbage collector, used so that we only create collectors we need.
836   bool MayUseCollector(CollectorType type) const;
837 
838   // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval)839   void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
840     min_interval_homogeneous_space_compaction_by_oom_ = interval;
841   }
842 
843   // Helpers for android.os.Debug.getRuntimeStat().
844   uint64_t GetGcCount() const;
845   uint64_t GetGcTime() const;
846   uint64_t GetBlockingGcCount() const;
847   uint64_t GetBlockingGcTime() const;
848   void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
849   void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
850 
851   // Allocation tracking support
852   // Callers to this function use double-checked locking to ensure safety on allocation_records_
IsAllocTrackingEnabled()853   bool IsAllocTrackingEnabled() const {
854     return alloc_tracking_enabled_.load(std::memory_order_relaxed);
855   }
856 
SetAllocTrackingEnabled(bool enabled)857   void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
858     alloc_tracking_enabled_.store(enabled, std::memory_order_relaxed);
859   }
860 
861   // Return the current stack depth of allocation records.
GetAllocTrackerStackDepth()862   size_t GetAllocTrackerStackDepth() const {
863     return alloc_record_depth_;
864   }
865 
866   // Return the current stack depth of allocation records.
SetAllocTrackerStackDepth(size_t alloc_record_depth)867   void SetAllocTrackerStackDepth(size_t alloc_record_depth) {
868     alloc_record_depth_ = alloc_record_depth;
869   }
870 
GetAllocationRecords()871   AllocRecordObjectMap* GetAllocationRecords() const REQUIRES(Locks::alloc_tracker_lock_) {
872     return allocation_records_.get();
873   }
874 
875   void SetAllocationRecords(AllocRecordObjectMap* records)
876       REQUIRES(Locks::alloc_tracker_lock_);
877 
878   void VisitAllocationRecords(RootVisitor* visitor) const
879       REQUIRES_SHARED(Locks::mutator_lock_)
880       REQUIRES(!Locks::alloc_tracker_lock_);
881 
882   void SweepAllocationRecords(IsMarkedVisitor* visitor) const
883       REQUIRES_SHARED(Locks::mutator_lock_)
884       REQUIRES(!Locks::alloc_tracker_lock_);
885 
886   void DisallowNewAllocationRecords() const
887       REQUIRES_SHARED(Locks::mutator_lock_)
888       REQUIRES(!Locks::alloc_tracker_lock_);
889 
890   void AllowNewAllocationRecords() const
891       REQUIRES_SHARED(Locks::mutator_lock_)
892       REQUIRES(!Locks::alloc_tracker_lock_);
893 
894   void BroadcastForNewAllocationRecords() const
895       REQUIRES(!Locks::alloc_tracker_lock_);
896 
897   void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
898 
899   // Create a new alloc space and compact default alloc space to it.
900   HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact()
901       REQUIRES(!*gc_complete_lock_, !process_state_update_lock_);
902   bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
903 
904   // Install an allocation listener.
905   void SetAllocationListener(AllocationListener* l);
906   // Remove an allocation listener. Note: the listener must not be deleted, as for performance
907   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
908   void RemoveAllocationListener();
909 
910   // Install a gc pause listener.
911   void SetGcPauseListener(GcPauseListener* l);
912   // Get the currently installed gc pause listener, or null.
GetGcPauseListener()913   GcPauseListener* GetGcPauseListener() {
914     return gc_pause_listener_.load(std::memory_order_acquire);
915   }
916   // Remove a gc pause listener. Note: the listener must not be deleted, as for performance
917   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
918   void RemoveGcPauseListener();
919 
920   const Verification* GetVerification() const;
921 
922   void PostForkChildAction(Thread* self);
923 
924   void TraceHeapSize(size_t heap_size);
925 
926   bool AddHeapTask(gc::HeapTask* task);
927 
928  private:
929   class ConcurrentGCTask;
930   class CollectorTransitionTask;
931   class HeapTrimTask;
932   class TriggerPostForkCCGcTask;
933 
934   // Compact source space to target space. Returns the collector used.
935   collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
936                                        space::ContinuousMemMapAllocSpace* source_space,
937                                        GcCause gc_cause)
938       REQUIRES(Locks::mutator_lock_);
939 
940   void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
941   void StartGC(Thread* self, GcCause cause, CollectorType collector_type)
942       REQUIRES(!*gc_complete_lock_);
943   void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
944 
945   double CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
946                                            uint64_t current_process_cpu_time) const;
947 
948   // Create a mem map with a preferred base address.
949   static MemMap MapAnonymousPreferredAddress(const char* name,
950                                              uint8_t* request_begin,
951                                              size_t capacity,
952                                              std::string* out_error_str);
953 
SupportHSpaceCompaction()954   bool SupportHSpaceCompaction() const {
955     // Returns true if we can do hspace compaction
956     return main_space_backup_ != nullptr;
957   }
958 
959   // Size_t saturating arithmetic
UnsignedDifference(size_t x,size_t y)960   static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
961     return x > y ? x - y : 0;
962   }
UnsignedSum(size_t x,size_t y)963   static ALWAYS_INLINE size_t UnsignedSum(size_t x, size_t y) {
964     return x + y >= x ? x + y : std::numeric_limits<size_t>::max();
965   }
966 
AllocatorHasAllocationStack(AllocatorType allocator_type)967   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
968     return
969         allocator_type != kAllocatorTypeRegionTLAB &&
970         allocator_type != kAllocatorTypeBumpPointer &&
971         allocator_type != kAllocatorTypeTLAB &&
972         allocator_type != kAllocatorTypeRegion;
973   }
AllocatorMayHaveConcurrentGC(AllocatorType allocator_type)974   static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
975     if (kUseReadBarrier) {
976       // Read barrier may have the TLAB allocator but is always concurrent. TODO: clean this up.
977       return true;
978     }
979     return
980         allocator_type != kAllocatorTypeTLAB &&
981         allocator_type != kAllocatorTypeBumpPointer;
982   }
IsMovingGc(CollectorType collector_type)983   static bool IsMovingGc(CollectorType collector_type) {
984     return
985         collector_type == kCollectorTypeCC ||
986         collector_type == kCollectorTypeSS ||
987         collector_type == kCollectorTypeCCBackground ||
988         collector_type == kCollectorTypeHomogeneousSpaceCompact;
989   }
990   bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
991       REQUIRES_SHARED(Locks::mutator_lock_);
992 
993   // Checks whether we should garbage collect:
994   ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
995   float NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent);
996   ALWAYS_INLINE void CheckConcurrentGCForJava(Thread* self,
997                                               size_t new_num_bytes_allocated,
998                                               ObjPtr<mirror::Object>* obj)
999       REQUIRES_SHARED(Locks::mutator_lock_)
1000       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
1001   void CheckGCForNative(Thread* self)
1002       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_);
1003 
GetMarkStack()1004   accounting::ObjectStack* GetMarkStack() {
1005     return mark_stack_.get();
1006   }
1007 
1008   // We don't force this to be inlined since it is a slow path.
1009   template <bool kInstrumented, typename PreFenceVisitor>
1010   mirror::Object* AllocLargeObject(Thread* self,
1011                                    ObjPtr<mirror::Class>* klass,
1012                                    size_t byte_count,
1013                                    const PreFenceVisitor& pre_fence_visitor)
1014       REQUIRES_SHARED(Locks::mutator_lock_)
1015       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_,
1016                !*backtrace_lock_, !process_state_update_lock_);
1017 
1018   // Handles Allocate()'s slow allocation path with GC involved after
1019   // an initial allocation attempt failed.
1020   mirror::Object* AllocateInternalWithGc(Thread* self,
1021                                          AllocatorType allocator,
1022                                          bool instrumented,
1023                                          size_t num_bytes,
1024                                          size_t* bytes_allocated,
1025                                          size_t* usable_size,
1026                                          size_t* bytes_tl_bulk_allocated,
1027                                          ObjPtr<mirror::Class>* klass)
1028       REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
1029       REQUIRES(Roles::uninterruptible_)
1030       REQUIRES_SHARED(Locks::mutator_lock_);
1031 
1032   // Allocate into a specific space.
1033   mirror::Object* AllocateInto(Thread* self,
1034                                space::AllocSpace* space,
1035                                ObjPtr<mirror::Class> c,
1036                                size_t bytes)
1037       REQUIRES_SHARED(Locks::mutator_lock_);
1038 
1039   // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
1040   // wrong space.
1041   void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
1042 
1043   // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
1044   // that the switch statement is constant optimized in the entrypoints.
1045   template <const bool kInstrumented, const bool kGrow>
1046   ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self,
1047                                               AllocatorType allocator_type,
1048                                               size_t alloc_size,
1049                                               size_t* bytes_allocated,
1050                                               size_t* usable_size,
1051                                               size_t* bytes_tl_bulk_allocated)
1052       REQUIRES_SHARED(Locks::mutator_lock_);
1053 
1054   mirror::Object* AllocWithNewTLAB(Thread* self,
1055                                    AllocatorType allocator_type,
1056                                    size_t alloc_size,
1057                                    bool grow,
1058                                    size_t* bytes_allocated,
1059                                    size_t* usable_size,
1060                                    size_t* bytes_tl_bulk_allocated)
1061       REQUIRES_SHARED(Locks::mutator_lock_);
1062 
1063   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
1064       REQUIRES_SHARED(Locks::mutator_lock_);
1065 
1066   // Are we out of memory, and thus should force a GC or fail?
1067   // For concurrent collectors, out of memory is defined by growth_limit_.
1068   // For nonconcurrent collectors it is defined by target_footprint_ unless grow is
1069   // set. If grow is set, the limit is growth_limit_ and we adjust target_footprint_
1070   // to accomodate the allocation.
1071   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
1072                                                size_t alloc_size,
1073                                                bool grow);
1074 
1075   // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
1076   void RunFinalization(JNIEnv* env, uint64_t timeout);
1077 
1078   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
1079   // waited for.
1080   collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
1081       REQUIRES(gc_complete_lock_);
1082 
1083   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
1084       REQUIRES(!*pending_task_lock_);
1085 
1086   void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, ObjPtr<mirror::Object>* obj)
1087       REQUIRES_SHARED(Locks::mutator_lock_)
1088       REQUIRES(!*pending_task_lock_);
1089   bool IsGCRequestPending() const;
1090 
1091   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
1092   // which type of Gc was actually ran.
1093   collector::GcType CollectGarbageInternal(collector::GcType gc_plan,
1094                                            GcCause gc_cause,
1095                                            bool clear_soft_references)
1096       REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
1097                !*pending_task_lock_, !process_state_update_lock_);
1098 
1099   void PreGcVerification(collector::GarbageCollector* gc)
1100       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
1101   void PreGcVerificationPaused(collector::GarbageCollector* gc)
1102       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
1103   void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
1104       REQUIRES(Locks::mutator_lock_);
1105   void PreSweepingGcVerification(collector::GarbageCollector* gc)
1106       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1107   void PostGcVerification(collector::GarbageCollector* gc)
1108       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
1109   void PostGcVerificationPaused(collector::GarbageCollector* gc)
1110       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
1111 
1112   // Find a collector based on GC type.
1113   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
1114 
1115   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
1116   void CreateMainMallocSpace(MemMap&& mem_map,
1117                              size_t initial_size,
1118                              size_t growth_limit,
1119                              size_t capacity);
1120 
1121   // Create a malloc space based on a mem map. Does not set the space as default.
1122   space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap&& mem_map,
1123                                                   size_t initial_size,
1124                                                   size_t growth_limit,
1125                                                   size_t capacity,
1126                                                   const char* name,
1127                                                   bool can_move_objects);
1128 
1129   // Given the current contents of the alloc space, increase the allowed heap footprint to match
1130   // the target utilization ratio.  This should only be called immediately after a full garbage
1131   // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
1132   // the GC was run.
1133   void GrowForUtilization(collector::GarbageCollector* collector_ran,
1134                           size_t bytes_allocated_before_gc = 0)
1135       REQUIRES(!process_state_update_lock_);
1136 
1137   size_t GetPercentFree();
1138 
1139   // Swap the allocation stack with the live stack.
1140   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
1141 
1142   // Clear cards and update the mod union table. When process_alloc_space_cards is true,
1143   // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
1144   // not process the alloc space if process_alloc_space_cards is false.
1145   void ProcessCards(TimingLogger* timings,
1146                     bool use_rem_sets,
1147                     bool process_alloc_space_cards,
1148                     bool clear_alloc_space_cards)
1149       REQUIRES_SHARED(Locks::mutator_lock_);
1150 
1151   // Push an object onto the allocation stack.
1152   void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
1153       REQUIRES_SHARED(Locks::mutator_lock_)
1154       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1155   void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
1156       REQUIRES_SHARED(Locks::mutator_lock_)
1157       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1158   void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
1159       REQUIRES_SHARED(Locks::mutator_lock_)
1160       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1161 
1162   void ClearConcurrentGCRequest();
1163   void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
1164   void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
1165 
1166   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
1167   // sweep GC, false for other GC types.
IsGcConcurrent()1168   bool IsGcConcurrent() const ALWAYS_INLINE {
1169     return collector_type_ == kCollectorTypeCC ||
1170         collector_type_ == kCollectorTypeCMS ||
1171         collector_type_ == kCollectorTypeCCBackground;
1172   }
1173 
1174   // Trim the managed and native spaces by releasing unused memory back to the OS.
1175   void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
1176 
1177   // Trim 0 pages at the end of reference tables.
1178   void TrimIndirectReferenceTables(Thread* self);
1179 
1180   template <typename Visitor>
1181   ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor)
1182       REQUIRES_SHARED(Locks::mutator_lock_)
1183       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1184   template <typename Visitor>
1185   ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor)
1186       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1187 
1188   void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
1189 
1190   // GC stress mode attempts to do one GC per unique backtrace.
1191   void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
1192       REQUIRES_SHARED(Locks::mutator_lock_)
1193       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_,
1194                !*backtrace_lock_, !process_state_update_lock_);
1195 
NonStickyGcType()1196   collector::GcType NonStickyGcType() const {
1197     return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
1198   }
1199 
1200   // Return the amount of space we allow for native memory when deciding whether to
1201   // collect. We collect when a weighted sum of Java memory plus native memory exceeds
1202   // the similarly weighted sum of the Java heap size target and this value.
NativeAllocationGcWatermark()1203   ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
1204     // We keep the traditional limit of max_free_ in place for small heaps,
1205     // but allow it to be adjusted upward for large heaps to limit GC overhead.
1206     return target_footprint_.load(std::memory_order_relaxed) / 8 + max_free_;
1207   }
1208 
1209   ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);
1210 
1211   // On switching app from background to foreground, grow the heap size
1212   // to incorporate foreground heap growth multiplier.
1213   void GrowHeapOnJankPerceptibleSwitch() REQUIRES(!process_state_update_lock_);
1214 
1215   // Update *_freed_ever_ counters to reflect current GC values.
1216   void IncrementFreedEver();
1217 
1218   // Remove a vlog code from heap-inl.h which is transitively included in half the world.
1219   static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
1220 
1221   // Return our best approximation of the number of bytes of native memory that
1222   // are currently in use, and could possibly be reclaimed as an indirect result
1223   // of a garbage collection.
1224   size_t GetNativeBytes();
1225 
1226   // All-known continuous spaces, where objects lie within fixed bounds.
1227   std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1228 
1229   // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
1230   std::vector<space::DiscontinuousSpace*> discontinuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1231 
1232   // All-known alloc spaces, where objects may be or have been allocated.
1233   std::vector<space::AllocSpace*> alloc_spaces_;
1234 
1235   // A space where non-movable objects are allocated, when compaction is enabled it contains
1236   // Classes, ArtMethods, ArtFields, and non moving objects.
1237   space::MallocSpace* non_moving_space_;
1238 
1239   // Space which we use for the kAllocatorTypeROSAlloc.
1240   space::RosAllocSpace* rosalloc_space_;
1241 
1242   // Space which we use for the kAllocatorTypeDlMalloc.
1243   space::DlMallocSpace* dlmalloc_space_;
1244 
1245   // The main space is the space which the GC copies to and from on process state updates. This
1246   // space is typically either the dlmalloc_space_ or the rosalloc_space_.
1247   space::MallocSpace* main_space_;
1248 
1249   // The large object space we are currently allocating into.
1250   space::LargeObjectSpace* large_object_space_;
1251 
1252   // The card table, dirtied by the write barrier.
1253   std::unique_ptr<accounting::CardTable> card_table_;
1254 
1255   std::unique_ptr<accounting::ReadBarrierTable> rb_table_;
1256 
1257   // A mod-union table remembers all of the references from the it's space to other spaces.
1258   AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
1259       mod_union_tables_;
1260 
1261   // A remembered set remembers all of the references from the it's space to the target space.
1262   AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
1263       remembered_sets_;
1264 
1265   // The current collector type.
1266   CollectorType collector_type_;
1267   // Which collector we use when the app is in the foreground.
1268   CollectorType foreground_collector_type_;
1269   // Which collector we will use when the app is notified of a transition to background.
1270   CollectorType background_collector_type_;
1271   // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
1272   CollectorType desired_collector_type_;
1273 
1274   // Lock which guards pending tasks.
1275   Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1276 
1277   // How many GC threads we may use for paused parts of garbage collection.
1278   const size_t parallel_gc_threads_;
1279 
1280   // How many GC threads we may use for unpaused parts of garbage collection.
1281   const size_t conc_gc_threads_;
1282 
1283   // Boolean for if we are in low memory mode.
1284   const bool low_memory_mode_;
1285 
1286   // If we get a pause longer than long pause log threshold, then we print out the GC after it
1287   // finishes.
1288   const size_t long_pause_log_threshold_;
1289 
1290   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
1291   const size_t long_gc_log_threshold_;
1292 
1293   // Starting time of the new process; meant to be used for measuring total process CPU time.
1294   uint64_t process_cpu_start_time_ns_;
1295 
1296   // Last time (before and after) GC started; meant to be used to measure the
1297   // duration between two GCs.
1298   uint64_t pre_gc_last_process_cpu_time_ns_;
1299   uint64_t post_gc_last_process_cpu_time_ns_;
1300 
1301   // allocated_bytes * (current_process_cpu_time - [pre|post]_gc_last_process_cpu_time)
1302   double pre_gc_weighted_allocated_bytes_;
1303   double post_gc_weighted_allocated_bytes_;
1304 
1305   // If we ignore the target footprint it lets the heap grow until it hits the heap capacity, this
1306   // is useful for benchmarking since it reduces time spent in GC to a low %.
1307   const bool ignore_target_footprint_;
1308 
1309   // If we are running tests or some other configurations we might not actually
1310   // want logs for explicit gcs since they can get spammy.
1311   const bool always_log_explicit_gcs_;
1312 
1313   // Lock which guards zygote space creation.
1314   Mutex zygote_creation_lock_;
1315 
1316   // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
1317   // zygote space creation.
1318   space::ZygoteSpace* zygote_space_;
1319 
1320   // Minimum allocation size of large object.
1321   size_t large_object_threshold_;
1322 
1323   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
1324   // completes.
1325   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1326   std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
1327 
1328   // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
1329   Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1330   std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
1331   // This counter keeps track of how many threads are currently in a JNI critical section. This is
1332   // incremented once per thread even with nested enters.
1333   size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
1334   bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
1335 
1336   // Reference processor;
1337   std::unique_ptr<ReferenceProcessor> reference_processor_;
1338 
1339   // Task processor, proxies heap trim requests to the daemon threads.
1340   std::unique_ptr<TaskProcessor> task_processor_;
1341 
1342   // Collector type of the running GC.
1343   volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
1344 
1345   // Cause of the last running GC.
1346   volatile GcCause last_gc_cause_ GUARDED_BY(gc_complete_lock_);
1347 
1348   // The thread currently running the GC.
1349   volatile Thread* thread_running_gc_ GUARDED_BY(gc_complete_lock_);
1350 
1351   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
1352   volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
1353   collector::GcType next_gc_type_;
1354 
1355   // Maximum size that the heap can reach.
1356   size_t capacity_;
1357 
1358   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
1359   // programs it is "cleared" making it the same as capacity.
1360   // Only weakly enforced for simultaneous allocations.
1361   size_t growth_limit_;
1362 
1363   // Target size (as in maximum allocatable bytes) for the heap. Weakly enforced as a limit for
1364   // non-concurrent GC. Used as a guideline for computing concurrent_start_bytes_ in the
1365   // concurrent GC case.
1366   Atomic<size_t> target_footprint_;
1367 
1368   // Computed with foreground-multiplier in GrowForUtilization() when run in
1369   // jank non-perceptible state. On update to process state from background to
1370   // foreground we set target_footprint_ to this value.
1371   Mutex process_state_update_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1372   size_t min_foreground_target_footprint_ GUARDED_BY(process_state_update_lock_);
1373 
1374   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
1375   // it completes ahead of an allocation failing.
1376   // A multiple of this is also used to determine when to trigger a GC in response to native
1377   // allocation.
1378   size_t concurrent_start_bytes_;
1379 
1380   // Since the heap was created, how many bytes have been freed.
1381   std::atomic<uint64_t> total_bytes_freed_ever_;
1382 
1383   // Since the heap was created, how many objects have been freed.
1384   std::atomic<uint64_t> total_objects_freed_ever_;
1385 
1386   // Number of bytes currently allocated and not yet reclaimed. Includes active
1387   // TLABS in their entirety, even if they have not yet been parceled out.
1388   Atomic<size_t> num_bytes_allocated_;
1389 
1390   // Number of registered native bytes allocated. Adjusted after each RegisterNativeAllocation and
1391   // RegisterNativeFree. Used to  help determine when to trigger GC for native allocations. Should
1392   // not include bytes allocated through the system malloc, since those are implicitly included.
1393   Atomic<size_t> native_bytes_registered_;
1394 
1395   // Approximately the smallest value of GetNativeBytes() we've seen since the last GC.
1396   Atomic<size_t> old_native_bytes_allocated_;
1397 
1398   // Total number of native objects of which we were notified since the beginning of time, mod 2^32.
1399   // Allows us to check for GC only roughly every kNotifyNativeInterval allocations.
1400   Atomic<uint32_t> native_objects_notified_;
1401 
1402   // Number of bytes freed by thread local buffer revokes. This will
1403   // cancel out the ahead-of-time bulk counting of bytes allocated in
1404   // rosalloc thread-local buffers.  It is temporarily accumulated
1405   // here to be subtracted from num_bytes_allocated_ later at the next
1406   // GC.
1407   Atomic<size_t> num_bytes_freed_revoke_;
1408 
1409   // Info related to the current or previous GC iteration.
1410   collector::Iteration current_gc_iteration_;
1411 
1412   // Heap verification flags.
1413   const bool verify_missing_card_marks_;
1414   const bool verify_system_weaks_;
1415   const bool verify_pre_gc_heap_;
1416   const bool verify_pre_sweeping_heap_;
1417   const bool verify_post_gc_heap_;
1418   const bool verify_mod_union_table_;
1419   bool verify_pre_gc_rosalloc_;
1420   bool verify_pre_sweeping_rosalloc_;
1421   bool verify_post_gc_rosalloc_;
1422   const bool gc_stress_mode_;
1423 
1424   // RAII that temporarily disables the rosalloc verification during
1425   // the zygote fork.
1426   class ScopedDisableRosAllocVerification {
1427    private:
1428     Heap* const heap_;
1429     const bool orig_verify_pre_gc_;
1430     const bool orig_verify_pre_sweeping_;
1431     const bool orig_verify_post_gc_;
1432 
1433    public:
ScopedDisableRosAllocVerification(Heap * heap)1434     explicit ScopedDisableRosAllocVerification(Heap* heap)
1435         : heap_(heap),
1436           orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
1437           orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
1438           orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
1439       heap_->verify_pre_gc_rosalloc_ = false;
1440       heap_->verify_pre_sweeping_rosalloc_ = false;
1441       heap_->verify_post_gc_rosalloc_ = false;
1442     }
~ScopedDisableRosAllocVerification()1443     ~ScopedDisableRosAllocVerification() {
1444       heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
1445       heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
1446       heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
1447     }
1448   };
1449 
1450   // Parallel GC data structures.
1451   std::unique_ptr<ThreadPool> thread_pool_;
1452 
1453   // A bitmap that is set corresponding to the known live objects since the last GC cycle.
1454   std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1455   // A bitmap that is set corresponding to the marked objects in the current GC cycle.
1456   std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1457 
1458   // Mark stack that we reuse to avoid re-allocating the mark stack.
1459   std::unique_ptr<accounting::ObjectStack> mark_stack_;
1460 
1461   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
1462   // to use the live bitmap as the old mark bitmap.
1463   const size_t max_allocation_stack_size_;
1464   std::unique_ptr<accounting::ObjectStack> allocation_stack_;
1465 
1466   // Second allocation stack so that we can process allocation with the heap unlocked.
1467   std::unique_ptr<accounting::ObjectStack> live_stack_;
1468 
1469   // Allocator type.
1470   AllocatorType current_allocator_;
1471   const AllocatorType current_non_moving_allocator_;
1472 
1473   // Which GCs we run in order when an allocation fails.
1474   std::vector<collector::GcType> gc_plan_;
1475 
1476   // Bump pointer spaces.
1477   space::BumpPointerSpace* bump_pointer_space_;
1478   // Temp space is the space which the semispace collector copies to.
1479   space::BumpPointerSpace* temp_space_;
1480 
1481   // Region space, used by the concurrent collector.
1482   space::RegionSpace* region_space_;
1483 
1484   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
1485   // utilization, regardless of target utilization ratio.
1486   const size_t min_free_;
1487 
1488   // The ideal maximum free size, when we grow the heap for utilization.
1489   const size_t max_free_;
1490 
1491   // Target ideal heap utilization ratio.
1492   double target_utilization_;
1493 
1494   // How much more we grow the heap when we are a foreground app instead of background.
1495   double foreground_heap_growth_multiplier_;
1496 
1497   // The amount of native memory allocation since the last GC required to cause us to wait for a
1498   // collection as a result of native allocation. Very large values can cause the device to run
1499   // out of memory, due to lack of finalization to reclaim native memory.  Making it too small can
1500   // cause jank in apps like launcher that intentionally allocate large amounts of memory in rapid
1501   // succession. (b/122099093) 1/4 to 1/3 of physical memory seems to be a good number.
1502   const size_t stop_for_native_allocs_;
1503 
1504   // Total time which mutators are paused or waiting for GC to complete.
1505   uint64_t total_wait_time_;
1506 
1507   // The current state of heap verification, may be enabled or disabled.
1508   VerifyObjectMode verify_object_mode_;
1509 
1510   // Compacting GC disable count, prevents compacting GC from running iff > 0.
1511   size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
1512 
1513   std::vector<collector::GarbageCollector*> garbage_collectors_;
1514   collector::SemiSpace* semi_space_collector_;
1515   collector::ConcurrentCopying* active_concurrent_copying_collector_;
1516   collector::ConcurrentCopying* young_concurrent_copying_collector_;
1517   collector::ConcurrentCopying* concurrent_copying_collector_;
1518 
1519   const bool is_running_on_memory_tool_;
1520   const bool use_tlab_;
1521 
1522   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
1523   // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1524   std::unique_ptr<space::MallocSpace> main_space_backup_;
1525 
1526   // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1527   uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1528 
1529   // Times of the last homogeneous space compaction caused by OOM.
1530   uint64_t last_time_homogeneous_space_compaction_by_oom_;
1531 
1532   // Saved OOMs by homogeneous space compaction.
1533   Atomic<size_t> count_delayed_oom_;
1534 
1535   // Count for requested homogeneous space compaction.
1536   Atomic<size_t> count_requested_homogeneous_space_compaction_;
1537 
1538   // Count for ignored homogeneous space compaction.
1539   Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1540 
1541   // Count for performed homogeneous space compaction.
1542   Atomic<size_t> count_performed_homogeneous_space_compaction_;
1543 
1544   // Whether or not a concurrent GC is pending.
1545   Atomic<bool> concurrent_gc_pending_;
1546 
1547   // Active tasks which we can modify (change target time, desired collector type, etc..).
1548   CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_);
1549   HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_);
1550 
1551   // Whether or not we use homogeneous space compaction to avoid OOM errors.
1552   bool use_homogeneous_space_compaction_for_oom_;
1553 
1554   // If true, enable generational collection when using the Concurrent Copying
1555   // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
1556   // for major collections. Set in Heap constructor.
1557   const bool use_generational_cc_;
1558 
1559   // True if the currently running collection has made some thread wait.
1560   bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
1561   // The number of blocking GC runs.
1562   uint64_t blocking_gc_count_;
1563   // The total duration of blocking GC runs.
1564   uint64_t blocking_gc_time_;
1565   // The duration of the window for the GC count rate histograms.
1566   static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000);  // 10s.
1567   // Maximum number of missed histogram windows for which statistics will be collected.
1568   static constexpr uint64_t kGcCountRateHistogramMaxNumMissedWindows = 100;
1569   // The last time when the GC count rate histograms were updated.
1570   // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
1571   uint64_t last_update_time_gc_count_rate_histograms_;
1572   // The running count of GC runs in the last window.
1573   uint64_t gc_count_last_window_;
1574   // The running count of blocking GC runs in the last window.
1575   uint64_t blocking_gc_count_last_window_;
1576   // The maximum number of buckets in the GC count rate histograms.
1577   static constexpr size_t kGcCountRateMaxBucketCount = 200;
1578   // The histogram of the number of GC invocations per window duration.
1579   Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1580   // The histogram of the number of blocking GC invocations per window duration.
1581   Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1582 
1583   // Allocation tracking support
1584   Atomic<bool> alloc_tracking_enabled_;
1585   std::unique_ptr<AllocRecordObjectMap> allocation_records_;
1586   size_t alloc_record_depth_;
1587 
1588   // GC stress related data structures.
1589   Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1590   // Debugging variables, seen backtraces vs unique backtraces.
1591   Atomic<uint64_t> seen_backtrace_count_;
1592   Atomic<uint64_t> unique_backtrace_count_;
1593   // Stack trace hashes that we already saw,
1594   std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
1595 
1596   // We disable GC when we are shutting down the runtime in case there are daemon threads still
1597   // allocating.
1598   bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
1599 
1600   // Turned on by -XX:DumpRegionInfoBeforeGC and -XX:DumpRegionInfoAfterGC to
1601   // emit region info before and after each GC cycle.
1602   bool dump_region_info_before_gc_;
1603   bool dump_region_info_after_gc_;
1604 
1605   // Boot image spaces.
1606   std::vector<space::ImageSpace*> boot_image_spaces_;
1607 
1608   // Boot image address range. Includes images and oat files.
1609   uint32_t boot_images_start_address_;
1610   uint32_t boot_images_size_;
1611 
1612   // An installed allocation listener.
1613   Atomic<AllocationListener*> alloc_listener_;
1614   // An installed GC Pause listener.
1615   Atomic<GcPauseListener*> gc_pause_listener_;
1616 
1617   std::unique_ptr<Verification> verification_;
1618 
1619   friend class CollectorTransitionTask;
1620   friend class collector::GarbageCollector;
1621   friend class collector::ConcurrentCopying;
1622   friend class collector::MarkSweep;
1623   friend class collector::SemiSpace;
1624   friend class GCCriticalSection;
1625   friend class ReferenceQueue;
1626   friend class ScopedGCCriticalSection;
1627   friend class ScopedInterruptibleGCCriticalSection;
1628   friend class VerifyReferenceCardVisitor;
1629   friend class VerifyReferenceVisitor;
1630   friend class VerifyObjectVisitor;
1631 
1632   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1633 };
1634 
1635 }  // namespace gc
1636 }  // namespace art
1637 
1638 #endif  // ART_RUNTIME_GC_HEAP_H_
1639