1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
18 #define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
19 
20 #include <iosfwd>
21 #include <memory>
22 #include <set>
23 #include <string>
24 #include <unordered_set>
25 #include <vector>
26 
27 #include "base/arena_containers.h"
28 #include "base/array_ref.h"
29 #include "base/atomic.h"
30 #include "base/histogram.h"
31 #include "base/macros.h"
32 #include "base/mem_map.h"
33 #include "base/mutex.h"
34 #include "base/safe_map.h"
35 #include "compilation_kind.h"
36 #include "jit_memory_region.h"
37 
38 namespace art {
39 
40 class ArtMethod;
41 template<class T> class Handle;
42 class LinearAlloc;
43 class InlineCache;
44 class IsMarkedVisitor;
45 class JitJniStubTestHelper;
46 class OatQuickMethodHeader;
47 struct ProfileMethodInfo;
48 class ProfilingInfo;
49 class Thread;
50 
51 namespace gc {
52 namespace accounting {
53 template<size_t kAlignment> class MemoryRangeBitmap;
54 }  // namespace accounting
55 }  // namespace gc
56 
57 namespace mirror {
58 class Class;
59 class Object;
60 template<class T> class ObjectArray;
61 }  // namespace mirror
62 
63 namespace gc {
64 namespace accounting {
65 template<size_t kAlignment> class MemoryRangeBitmap;
66 }  // namespace accounting
67 }  // namespace gc
68 
69 namespace mirror {
70 class Class;
71 class Object;
72 template<class T> class ObjectArray;
73 }  // namespace mirror
74 
75 namespace jit {
76 
77 class MarkCodeClosure;
78 
79 // Type of bitmap used for tracking live functions in the JIT code cache for the purposes
80 // of garbage collecting code.
81 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAccountingBytes>;
82 
83 // The state of profile-based compilation in the zygote.
84 // - kInProgress:      JIT compilation is happening
85 // - kDone:            JIT compilation is finished, and the zygote is preparing notifying
86 //                     the other processes.
87 // - kNotifiedOk:      the zygote has notified the other processes, which can start
88 //                     sharing the boot image method mappings.
89 // - kNotifiedFailure: the zygote has notified the other processes, but they
90 //                     cannot share the boot image method mappings due to
91 //                     unexpected errors
92 enum class ZygoteCompilationState : uint8_t {
93   kInProgress = 0,
94   kDone = 1,
95   kNotifiedOk = 2,
96   kNotifiedFailure = 3,
97 };
98 
99 // Class abstraction over a map of ArtMethod -> compiled code, where the
100 // ArtMethod are compiled by the zygote, and the map acts as a communication
101 // channel between the zygote and the other processes.
102 // For the zygote process, this map is the only map it is placing the compiled
103 // code. JitCodeCache.method_code_map_ is empty.
104 //
105 // This map is writable only by the zygote, and readable by all children.
106 class ZygoteMap {
107  public:
108   struct Entry {
109     ArtMethod* method;
110     // Note we currently only allocate code in the low 4g, so we could just reserve 4 bytes
111     // for the code pointer. For simplicity and in the case we move to 64bit
112     // addresses for code, just keep it void* for now.
113     const void* code_ptr;
114   };
115 
ZygoteMap(JitMemoryRegion * region)116   explicit ZygoteMap(JitMemoryRegion* region)
117       : map_(), region_(region), compilation_state_(nullptr) {}
118 
119   // Initialize the data structure so it can hold `number_of_methods` mappings.
120   // Note that the map is fixed size and never grows.
121   void Initialize(uint32_t number_of_methods) REQUIRES(!Locks::jit_lock_);
122 
123   // Add the mapping method -> code.
124   void Put(const void* code, ArtMethod* method) REQUIRES(Locks::jit_lock_);
125 
126   // Return the code pointer for the given method. If pc is not zero, check that
127   // the pc falls into that code range. Return null otherwise.
128   const void* GetCodeFor(ArtMethod* method, uintptr_t pc = 0) const;
129 
130   // Return whether the map has associated code for the given method.
ContainsMethod(ArtMethod * method)131   bool ContainsMethod(ArtMethod* method) const {
132     return GetCodeFor(method) != nullptr;
133   }
134 
SetCompilationState(ZygoteCompilationState state)135   void SetCompilationState(ZygoteCompilationState state) {
136     region_->WriteData(compilation_state_, state);
137   }
138 
IsCompilationDoneButNotNotified()139   bool IsCompilationDoneButNotNotified() const {
140     return compilation_state_ != nullptr && *compilation_state_ == ZygoteCompilationState::kDone;
141   }
142 
IsCompilationNotified()143   bool IsCompilationNotified() const {
144     return compilation_state_ != nullptr && *compilation_state_ > ZygoteCompilationState::kDone;
145   }
146 
CanMapBootImageMethods()147   bool CanMapBootImageMethods() const {
148     return compilation_state_ != nullptr &&
149            *compilation_state_ == ZygoteCompilationState::kNotifiedOk;
150   }
151 
cbegin()152   ArrayRef<const Entry>::const_iterator cbegin() const {
153     return map_.cbegin();
154   }
begin()155   ArrayRef<const Entry>::iterator begin() {
156     return map_.begin();
157   }
cend()158   ArrayRef<const Entry>::const_iterator cend() const {
159     return map_.cend();
160   }
end()161   ArrayRef<const Entry>::iterator end() {
162     return map_.end();
163   }
164 
165  private:
166   // The map allocated with `region_`.
167   ArrayRef<const Entry> map_;
168 
169   // The region in which the map is allocated.
170   JitMemoryRegion* const region_;
171 
172   // The current state of compilation in the zygote. Starts with kInProgress,
173   // and should end with kNotifiedOk or kNotifiedFailure.
174   const ZygoteCompilationState* compilation_state_;
175 
176   DISALLOW_COPY_AND_ASSIGN(ZygoteMap);
177 };
178 
179 class JitCodeCache {
180  public:
181   static constexpr size_t kMaxCapacity = 64 * MB;
182   // Put the default to a very low amount for debug builds to stress the code cache
183   // collection.
184   static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
185 
186   // By default, do not GC until reaching 256KB.
187   static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
188 
189   // Create the code cache with a code + data capacity equal to "capacity", error message is passed
190   // in the out arg error_msg.
191   static JitCodeCache* Create(bool used_only_for_profile_data,
192                               bool rwx_memory_allowed,
193                               bool is_zygote,
194                               std::string* error_msg);
195   ~JitCodeCache();
196 
197   bool NotifyCompilationOf(ArtMethod* method,
198                            Thread* self,
199                            CompilationKind compilation_kind,
200                            bool prejit,
201                            JitMemoryRegion* region)
202       REQUIRES_SHARED(Locks::mutator_lock_)
203       REQUIRES(!Locks::jit_lock_);
204 
205   void NotifyMethodRedefined(ArtMethod* method)
206       REQUIRES(Locks::mutator_lock_)
207       REQUIRES(!Locks::jit_lock_);
208 
209   // Notify to the code cache that the compiler wants to use the
210   // profiling info of `method` to drive optimizations,
211   // and therefore ensure the returned profiling info object is not
212   // collected.
213   ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
214       REQUIRES_SHARED(Locks::mutator_lock_)
215       REQUIRES(!Locks::jit_lock_);
216 
217   void DoneCompiling(ArtMethod* method, Thread* self, CompilationKind compilation_kind)
218       REQUIRES_SHARED(Locks::mutator_lock_)
219       REQUIRES(!Locks::jit_lock_);
220 
221   void DoneCompilerUse(ArtMethod* method, Thread* self)
222       REQUIRES_SHARED(Locks::mutator_lock_)
223       REQUIRES(!Locks::jit_lock_);
224 
225   // Return true if the code cache contains this pc.
226   bool ContainsPc(const void* pc) const;
227 
228   // Return true if the code cache contains this pc in the private region (i.e. not from zygote).
229   bool PrivateRegionContainsPc(const void* pc) const;
230 
231   // Returns true if either the method's entrypoint is JIT compiled code or it is the
232   // instrumentation entrypoint and we can jump to jit code for this method. For testing use only.
233   bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
234 
235   // Return true if the code cache contains this method.
236   bool ContainsMethod(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
237 
238   // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
239   const void* GetJniStubCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
240 
241   // Allocate a region for both code and data in the JIT code cache.
242   // The reserved memory is left completely uninitialized.
243   bool Reserve(Thread* self,
244                JitMemoryRegion* region,
245                size_t code_size,
246                size_t stack_map_size,
247                size_t number_of_roots,
248                ArtMethod* method,
249                /*out*/ArrayRef<const uint8_t>* reserved_code,
250                /*out*/ArrayRef<const uint8_t>* reserved_data)
251       REQUIRES_SHARED(Locks::mutator_lock_)
252       REQUIRES(!Locks::jit_lock_);
253 
254   // Initialize code and data of previously allocated memory.
255   //
256   // `cha_single_implementation_list` needs to be registered via CHA (if it's
257   // still valid), since the compiled code still needs to be invalidated if the
258   // single-implementation assumptions are violated later. This needs to be done
259   // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
260   // guard elimination.
261   bool Commit(Thread* self,
262               JitMemoryRegion* region,
263               ArtMethod* method,
264               ArrayRef<const uint8_t> reserved_code,  // Uninitialized destination.
265               ArrayRef<const uint8_t> code,           // Compiler output (source).
266               ArrayRef<const uint8_t> reserved_data,  // Uninitialized destination.
267               const std::vector<Handle<mirror::Object>>& roots,
268               ArrayRef<const uint8_t> stack_map,      // Compiler output (source).
269               const std::vector<uint8_t>& debug_info,
270               bool is_full_debug_info,
271               CompilationKind compilation_kind,
272               bool has_should_deoptimize_flag,
273               const ArenaSet<ArtMethod*>& cha_single_implementation_list)
274       REQUIRES_SHARED(Locks::mutator_lock_)
275       REQUIRES(!Locks::jit_lock_);
276 
277   // Free the previously allocated memory regions.
278   void Free(Thread* self, JitMemoryRegion* region, const uint8_t* code, const uint8_t* data)
279       REQUIRES_SHARED(Locks::mutator_lock_)
280       REQUIRES(!Locks::jit_lock_);
281   void FreeLocked(JitMemoryRegion* region, const uint8_t* code, const uint8_t* data)
282       REQUIRES_SHARED(Locks::mutator_lock_)
283       REQUIRES(Locks::jit_lock_);
284 
285   // Perform a collection on the code cache.
286   void GarbageCollectCache(Thread* self)
287       REQUIRES(!Locks::jit_lock_)
288       REQUIRES_SHARED(Locks::mutator_lock_);
289 
290   // Given the 'pc', try to find the JIT compiled code associated with it.  'method' may be null
291   // when LookupMethodHeader is called from MarkCodeClosure::Run() in debug builds.  Return null
292   // if 'pc' is not in the code cache.
293   OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
294       REQUIRES(!Locks::jit_lock_)
295       REQUIRES_SHARED(Locks::mutator_lock_);
296 
297   OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
298       REQUIRES(!Locks::jit_lock_)
299       REQUIRES_SHARED(Locks::mutator_lock_);
300 
301   // Removes method from the cache for testing purposes. The caller
302   // must ensure that all threads are suspended and the method should
303   // not be in any thread's stack.
304   bool RemoveMethod(ArtMethod* method, bool release_memory)
305       REQUIRES(!Locks::jit_lock_)
306       REQUIRES(Locks::mutator_lock_);
307 
308   // Remove all methods in our cache that were allocated by 'alloc'.
309   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
310       REQUIRES(!Locks::jit_lock_)
311       REQUIRES_SHARED(Locks::mutator_lock_);
312 
313   void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
314       REQUIRES(!Locks::jit_lock_)
315       REQUIRES_SHARED(Locks::mutator_lock_);
316 
317   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
318   // will collect and retry if the first allocation is unsuccessful.
319   ProfilingInfo* AddProfilingInfo(Thread* self,
320                                   ArtMethod* method,
321                                   const std::vector<uint32_t>& entries,
322                                   bool retry_allocation)
323       REQUIRES(!Locks::jit_lock_)
324       REQUIRES_SHARED(Locks::mutator_lock_);
325 
OwnsSpace(const void * mspace)326   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
327     return private_region_.OwnsSpace(mspace) || shared_region_.OwnsSpace(mspace);
328   }
329 
330   void* MoreCore(const void* mspace, intptr_t increment);
331 
332   // Adds to `methods` all profiled methods which are part of any of the given dex locations.
333   void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
334                           std::vector<ProfileMethodInfo>& methods)
335       REQUIRES(!Locks::jit_lock_)
336       REQUIRES_SHARED(Locks::mutator_lock_);
337 
338   void InvalidateAllCompiledCode()
339       REQUIRES(!Locks::jit_lock_)
340       REQUIRES_SHARED(Locks::mutator_lock_);
341 
342   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
343       REQUIRES(!Locks::jit_lock_)
344       REQUIRES_SHARED(Locks::mutator_lock_);
345 
346   void Dump(std::ostream& os) REQUIRES(!Locks::jit_lock_);
347 
348   bool IsOsrCompiled(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
349 
350   void SweepRootTables(IsMarkedVisitor* visitor)
351       REQUIRES(!Locks::jit_lock_)
352       REQUIRES_SHARED(Locks::mutator_lock_);
353 
354   // The GC needs to disallow the reading of inline caches when it processes them,
355   // to avoid having a class being used while it is being deleted.
356   void AllowInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
357   void DisallowInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
358   void BroadcastForInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
359 
360   // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
361   // 'new_method' since it is being made obsolete.
362   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
363       REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
364 
365   // Dynamically change whether we want to garbage collect code.
366   void SetGarbageCollectCode(bool value) REQUIRES(!Locks::jit_lock_);
367 
368   bool GetGarbageCollectCode() REQUIRES(!Locks::jit_lock_);
369 
370   // Unsafe variant for debug checks.
GetGarbageCollectCodeUnsafe()371   bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS {
372     return garbage_collect_code_;
373   }
GetZygoteMap()374   ZygoteMap* GetZygoteMap() {
375     return &zygote_map_;
376   }
377 
378   // If Jit-gc has been disabled (and instrumentation has been enabled) this will return the
379   // jit-compiled entrypoint for this method.  Otherwise it will return null.
380   const void* FindCompiledCodeForInstrumentation(ArtMethod* method)
381       REQUIRES(!Locks::jit_lock_)
382       REQUIRES_SHARED(Locks::mutator_lock_);
383 
384   // Fetch the code of a method that was JITted, but the JIT could not
385   // update its entrypoint due to the resolution trampoline.
386   const void* GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method)
387       REQUIRES(!Locks::jit_lock_)
388       REQUIRES_SHARED(Locks::mutator_lock_);
389 
390   void PostForkChildAction(bool is_system_server, bool is_zygote);
391 
392   // Clear the entrypoints of JIT compiled methods that belong in the zygote space.
393   // This is used for removing non-debuggable JIT code at the point we realize the runtime
394   // is debuggable. Also clear the Precompiled flag from all methods so the non-debuggable code
395   // doesn't come back.
396   void TransitionToDebuggable() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
397 
398   JitMemoryRegion* GetCurrentRegion();
IsSharedRegion(const JitMemoryRegion & region)399   bool IsSharedRegion(const JitMemoryRegion& region) const { return &region == &shared_region_; }
CanAllocateProfilingInfo()400   bool CanAllocateProfilingInfo() {
401     // If we don't have a private region, we cannot allocate a profiling info.
402     // A shared region doesn't support in general GC objects, which a profiling info
403     // can reference.
404     JitMemoryRegion* region = GetCurrentRegion();
405     return region->IsValid() && !IsSharedRegion(*region);
406   }
407 
408   // Return whether the given `ptr` is in the zygote executable memory space.
IsInZygoteExecSpace(const void * ptr)409   bool IsInZygoteExecSpace(const void* ptr) const {
410     return shared_region_.IsInExecSpace(ptr);
411   }
412 
413  private:
414   JitCodeCache();
415 
416   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
417                                           ArtMethod* method,
418                                           const std::vector<uint32_t>& entries)
419       REQUIRES(Locks::jit_lock_)
420       REQUIRES_SHARED(Locks::mutator_lock_);
421 
422   // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
423   // The non-mutator lock version should be used if possible. This method will release then
424   // re-acquire the mutator lock.
425   void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
426       REQUIRES(Locks::jit_lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
427 
428   // If a collection is in progress, wait for it to finish. Return
429   // whether the thread actually waited.
430   bool WaitForPotentialCollectionToComplete(Thread* self)
431       REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::mutator_lock_);
432 
433   // Remove CHA dependents and underlying allocations for entries in `method_headers`.
434   void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
435       REQUIRES_SHARED(Locks::mutator_lock_)
436       REQUIRES(Locks::jit_lock_)
437       REQUIRES(!Locks::cha_lock_);
438 
439   // Removes method from the cache. The caller must ensure that all threads
440   // are suspended and the method should not be in any thread's stack.
441   bool RemoveMethodLocked(ArtMethod* method, bool release_memory)
442       REQUIRES(Locks::jit_lock_)
443       REQUIRES(Locks::mutator_lock_);
444 
445   // Call given callback for every compiled method in the code cache.
446   void VisitAllMethods(const std::function<void(const void*, ArtMethod*)>& cb)
447       REQUIRES(Locks::jit_lock_);
448 
449   // Free code and data allocations for `code_ptr`.
450   void FreeCodeAndData(const void* code_ptr)
451       REQUIRES(Locks::jit_lock_)
452       REQUIRES_SHARED(Locks::mutator_lock_);
453 
454   // Number of bytes allocated in the code cache.
455   size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_);
456 
457   // Number of bytes allocated in the data cache.
458   size_t DataCacheSize() REQUIRES(!Locks::jit_lock_);
459 
460   // Number of bytes allocated in the code cache.
461   size_t CodeCacheSizeLocked() REQUIRES(Locks::jit_lock_);
462 
463   // Number of bytes allocated in the data cache.
464   size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_);
465 
466   // Notify all waiting threads that a collection is done.
467   void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_);
468 
469   // Return whether the code cache's capacity is at its maximum.
470   bool IsAtMaxCapacity() const REQUIRES(Locks::jit_lock_);
471 
472   // Return whether we should do a full collection given the current state of the cache.
473   bool ShouldDoFullCollection()
474       REQUIRES(Locks::jit_lock_)
475       REQUIRES_SHARED(Locks::mutator_lock_);
476 
477   void DoCollection(Thread* self, bool collect_profiling_info)
478       REQUIRES(!Locks::jit_lock_)
479       REQUIRES_SHARED(Locks::mutator_lock_);
480 
481   void RemoveUnmarkedCode(Thread* self)
482       REQUIRES(!Locks::jit_lock_)
483       REQUIRES_SHARED(Locks::mutator_lock_);
484 
485   void MarkCompiledCodeOnThreadStacks(Thread* self)
486       REQUIRES(!Locks::jit_lock_)
487       REQUIRES_SHARED(Locks::mutator_lock_);
488 
GetLiveBitmap()489   CodeCacheBitmap* GetLiveBitmap() const {
490     return live_bitmap_.get();
491   }
492 
IsInZygoteDataSpace(const void * ptr)493   bool IsInZygoteDataSpace(const void* ptr) const {
494     return shared_region_.IsInDataSpace(ptr);
495   }
496 
497   bool IsWeakAccessEnabled(Thread* self) const;
498   void WaitUntilInlineCacheAccessible(Thread* self)
499       REQUIRES(!Locks::jit_lock_)
500       REQUIRES_SHARED(Locks::mutator_lock_);
501 
502   // Record that `method` is being compiled with the given mode.
503   void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
504       REQUIRES(Locks::jit_lock_);
505 
506   // Remove `method` from the list of methods meing compiled with the given mode.
507   void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
508       REQUIRES(Locks::jit_lock_);
509 
510   // Return whether `method` is being compiled with the given mode.
511   bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
512       REQUIRES(Locks::jit_lock_);
513 
514   // Return whether `method` is being compiled in any mode.
515   bool IsMethodBeingCompiled(ArtMethod* method) REQUIRES(Locks::jit_lock_);
516 
517   class JniStubKey;
518   class JniStubData;
519 
520   // Whether the GC allows accessing weaks in inline caches. Note that this
521   // is not used by the concurrent collector, which uses
522   // Thread::SetWeakRefAccessEnabled instead.
523   Atomic<bool> is_weak_access_enabled_;
524 
525   // Condition to wait on for accessing inline caches.
526   ConditionVariable inline_cache_cond_ GUARDED_BY(Locks::jit_lock_);
527 
528   // -------------- JIT memory regions ------------------------------------- //
529 
530   // Shared region, inherited from the zygote.
531   JitMemoryRegion shared_region_;
532 
533   // Process's own region.
534   JitMemoryRegion private_region_;
535 
536   // -------------- Global JIT maps --------------------------------------- //
537 
538   // Holds compiled code associated with the shorty for a JNI stub.
539   SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(Locks::jit_lock_);
540 
541   // Holds compiled code associated to the ArtMethod.
542   SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(Locks::jit_lock_);
543 
544   // Holds compiled code associated to the ArtMethod. Used when pre-jitting
545   // methods whose entrypoints have the resolution stub.
546   SafeMap<ArtMethod*, const void*> saved_compiled_methods_map_ GUARDED_BY(Locks::jit_lock_);
547 
548   // Holds osr compiled code associated to the ArtMethod.
549   SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(Locks::jit_lock_);
550 
551   // ProfilingInfo objects we have allocated.
552   std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
553 
554   // Methods we are currently compiling, one set for each kind of compilation.
555   std::set<ArtMethod*> current_optimized_compilations_ GUARDED_BY(Locks::jit_lock_);
556   std::set<ArtMethod*> current_osr_compilations_ GUARDED_BY(Locks::jit_lock_);
557   std::set<ArtMethod*> current_baseline_compilations_ GUARDED_BY(Locks::jit_lock_);
558 
559   // Methods that the zygote has compiled and can be shared across processes
560   // forked from the zygote.
561   ZygoteMap zygote_map_;
562 
563   // -------------- JIT GC related data structures ----------------------- //
564 
565   // Condition to wait on during collection.
566   ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_);
567 
568   // Whether there is a code cache collection in progress.
569   bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_);
570 
571   // Bitmap for collecting code and data.
572   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
573 
574   // Whether the last collection round increased the code cache.
575   bool last_collection_increased_code_cache_ GUARDED_BY(Locks::jit_lock_);
576 
577   // Whether we can do garbage collection. Not 'const' as tests may override this.
578   bool garbage_collect_code_ GUARDED_BY(Locks::jit_lock_);
579 
580   // ---------------- JIT statistics -------------------------------------- //
581 
582   // Number of baseline compilations done throughout the lifetime of the JIT.
583   size_t number_of_baseline_compilations_ GUARDED_BY(Locks::jit_lock_);
584 
585   // Number of optimized compilations done throughout the lifetime of the JIT.
586   size_t number_of_optimized_compilations_ GUARDED_BY(Locks::jit_lock_);
587 
588   // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
589   size_t number_of_osr_compilations_ GUARDED_BY(Locks::jit_lock_);
590 
591   // Number of code cache collections done throughout the lifetime of the JIT.
592   size_t number_of_collections_ GUARDED_BY(Locks::jit_lock_);
593 
594   // Histograms for keeping track of stack map size statistics.
595   Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(Locks::jit_lock_);
596 
597   // Histograms for keeping track of code size statistics.
598   Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(Locks::jit_lock_);
599 
600   // Histograms for keeping track of profiling info statistics.
601   Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(Locks::jit_lock_);
602 
603   friend class art::JitJniStubTestHelper;
604   friend class ScopedCodeCacheWrite;
605   friend class MarkCodeClosure;
606 
607   DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
608 };
609 
610 }  // namespace jit
611 }  // namespace art
612 
613 #endif  // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
614