1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jit_code_cache.h"
18 
19 #include <sstream>
20 
21 #include <android-base/logging.h>
22 
23 #include "arch/context.h"
24 #include "art_method-inl.h"
25 #include "base/enums.h"
26 #include "base/histogram-inl.h"
27 #include "base/logging.h"  // For VLOG.
28 #include "base/membarrier.h"
29 #include "base/memfd.h"
30 #include "base/mem_map.h"
31 #include "base/quasi_atomic.h"
32 #include "base/stl_util.h"
33 #include "base/systrace.h"
34 #include "base/time_utils.h"
35 #include "base/utils.h"
36 #include "cha.h"
37 #include "debugger_interface.h"
38 #include "dex/dex_file_loader.h"
39 #include "dex/method_reference.h"
40 #include "entrypoints/entrypoint_utils-inl.h"
41 #include "entrypoints/runtime_asm_entrypoints.h"
42 #include "gc/accounting/bitmap-inl.h"
43 #include "gc/allocator/dlmalloc.h"
44 #include "gc/scoped_gc_critical_section.h"
45 #include "handle.h"
46 #include "instrumentation.h"
47 #include "intern_table.h"
48 #include "jit/jit.h"
49 #include "jit/profiling_info.h"
50 #include "jit/jit_scoped_code_cache_write.h"
51 #include "linear_alloc.h"
52 #include "oat_file-inl.h"
53 #include "oat_quick_method_header.h"
54 #include "object_callbacks.h"
55 #include "profile/profile_compilation_info.h"
56 #include "scoped_thread_state_change-inl.h"
57 #include "stack.h"
58 #include "thread-current-inl.h"
59 #include "thread_list.h"
60 
61 namespace art {
62 namespace jit {
63 
64 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
65 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
66 
67 class JitCodeCache::JniStubKey {
68  public:
REQUIRES_SHARED(Locks::mutator_lock_)69   explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
70       : shorty_(method->GetShorty()),
71         is_static_(method->IsStatic()),
72         is_fast_native_(method->IsFastNative()),
73         is_critical_native_(method->IsCriticalNative()),
74         is_synchronized_(method->IsSynchronized()) {
75     DCHECK(!(is_fast_native_ && is_critical_native_));
76   }
77 
operator <(const JniStubKey & rhs) const78   bool operator<(const JniStubKey& rhs) const {
79     if (is_static_ != rhs.is_static_) {
80       return rhs.is_static_;
81     }
82     if (is_synchronized_ != rhs.is_synchronized_) {
83       return rhs.is_synchronized_;
84     }
85     if (is_fast_native_ != rhs.is_fast_native_) {
86       return rhs.is_fast_native_;
87     }
88     if (is_critical_native_ != rhs.is_critical_native_) {
89       return rhs.is_critical_native_;
90     }
91     return strcmp(shorty_, rhs.shorty_) < 0;
92   }
93 
94   // Update the shorty to point to another method's shorty. Call this function when removing
95   // the method that references the old shorty from JniCodeData and not removing the entire
96   // JniCodeData; the old shorty may become a dangling pointer when that method is unloaded.
UpdateShorty(ArtMethod * method) const97   void UpdateShorty(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
98     const char* shorty = method->GetShorty();
99     DCHECK_STREQ(shorty_, shorty);
100     shorty_ = shorty;
101   }
102 
103  private:
104   // The shorty points to a DexFile data and may need to change
105   // to point to the same shorty in a different DexFile.
106   mutable const char* shorty_;
107 
108   const bool is_static_;
109   const bool is_fast_native_;
110   const bool is_critical_native_;
111   const bool is_synchronized_;
112 };
113 
114 class JitCodeCache::JniStubData {
115  public:
JniStubData()116   JniStubData() : code_(nullptr), methods_() {}
117 
SetCode(const void * code)118   void SetCode(const void* code) {
119     DCHECK(code != nullptr);
120     code_ = code;
121   }
122 
UpdateEntryPoints(const void * entrypoint)123   void UpdateEntryPoints(const void* entrypoint) REQUIRES_SHARED(Locks::mutator_lock_) {
124     DCHECK(IsCompiled());
125     DCHECK(entrypoint == OatQuickMethodHeader::FromCodePointer(GetCode())->GetEntryPoint());
126     instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation();
127     for (ArtMethod* m : GetMethods()) {
128       // Because `m` might be in the process of being deleted:
129       // - Call the dedicated method instead of the more generic UpdateMethodsCode
130       // - Check the class status without a full read barrier; use ReadBarrier::IsMarked().
131       bool can_set_entrypoint = true;
132       if (NeedsClinitCheckBeforeCall(m)) {
133         // To avoid resurrecting an unreachable object, we must not use a full read
134         // barrier but we do not want to miss updating an entrypoint under common
135         // circumstances, i.e. during a GC the class becomes visibly initialized,
136         // the method becomes hot, we compile the thunk and want to update the
137         // entrypoint while the method's declaring class field still points to the
138         // from-space class object with the old status. Therefore we read the
139         // declaring class without a read barrier and check if it's already marked.
140         // If yes, we check the status of the to-space class object as intended.
141         // Otherwise, there is no to-space object and the from-space class object
142         // contains the most recent value of the status field; even if this races
143         // with another thread doing a read barrier and updating the status, that's
144         // no different from a race with a thread that just updates the status.
145         // Such race can happen only for the zygote method pre-compilation, as we
146         // otherwise compile only thunks for methods of visibly initialized classes.
147         ObjPtr<mirror::Class> klass = m->GetDeclaringClass<kWithoutReadBarrier>();
148         ObjPtr<mirror::Class> marked = ReadBarrier::IsMarked(klass.Ptr());
149         ObjPtr<mirror::Class> checked_klass = (marked != nullptr) ? marked : klass;
150         can_set_entrypoint = checked_klass->IsVisiblyInitialized();
151       }
152       if (can_set_entrypoint) {
153         instrum->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
154       }
155     }
156   }
157 
GetCode() const158   const void* GetCode() const {
159     return code_;
160   }
161 
IsCompiled() const162   bool IsCompiled() const {
163     return GetCode() != nullptr;
164   }
165 
AddMethod(ArtMethod * method)166   void AddMethod(ArtMethod* method) {
167     if (!ContainsElement(methods_, method)) {
168       methods_.push_back(method);
169     }
170   }
171 
GetMethods() const172   const std::vector<ArtMethod*>& GetMethods() const {
173     return methods_;
174   }
175 
RemoveMethodsIn(const LinearAlloc & alloc)176   void RemoveMethodsIn(const LinearAlloc& alloc) REQUIRES_SHARED(Locks::mutator_lock_) {
177     auto kept_end = std::partition(
178         methods_.begin(),
179         methods_.end(),
180         [&alloc](ArtMethod* method) { return !alloc.ContainsUnsafe(method); });
181     for (auto it = kept_end; it != methods_.end(); it++) {
182       VLOG(jit) << "JIT removed (JNI) " << (*it)->PrettyMethod() << ": " << code_;
183     }
184     methods_.erase(kept_end, methods_.end());
185   }
186 
RemoveMethod(ArtMethod * method)187   bool RemoveMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
188     auto it = std::find(methods_.begin(), methods_.end(), method);
189     if (it != methods_.end()) {
190       VLOG(jit) << "JIT removed (JNI) " << (*it)->PrettyMethod() << ": " << code_;
191       methods_.erase(it);
192       return true;
193     } else {
194       return false;
195     }
196   }
197 
MoveObsoleteMethod(ArtMethod * old_method,ArtMethod * new_method)198   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
199     std::replace(methods_.begin(), methods_.end(), old_method, new_method);
200   }
201 
202  private:
203   const void* code_;
204   std::vector<ArtMethod*> methods_;
205 };
206 
Create(bool used_only_for_profile_data,bool rwx_memory_allowed,bool is_zygote,std::string * error_msg)207 JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
208                                    bool rwx_memory_allowed,
209                                    bool is_zygote,
210                                    std::string* error_msg) {
211   // Register for membarrier expedited sync core if JIT will be generating code.
212   if (!used_only_for_profile_data) {
213     if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
214       // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
215       // flushed and it's used when adding code to the JIT. The memory used by the new code may
216       // have just been released and, in theory, the old code could still be in a pipeline.
217       VLOG(jit) << "Kernel does not support membarrier sync-core";
218     }
219   }
220 
221   size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
222   // Check whether the provided max capacity in options is below 1GB.
223   size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
224   // We need to have 32 bit offsets from method headers in code cache which point to things
225   // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
226   // Ensure we're below 1 GB to be safe.
227   if (max_capacity > 1 * GB) {
228     std::ostringstream oss;
229     oss << "Maxium code cache capacity is limited to 1 GB, "
230         << PrettySize(max_capacity) << " is too big";
231     *error_msg = oss.str();
232     return nullptr;
233   }
234 
235   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
236   JitMemoryRegion region;
237   if (!region.Initialize(initial_capacity,
238                          max_capacity,
239                          rwx_memory_allowed,
240                          is_zygote,
241                          error_msg)) {
242     return nullptr;
243   }
244 
245   std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
246   if (is_zygote) {
247     // Zygote should never collect code to share the memory with the children.
248     jit_code_cache->garbage_collect_code_ = false;
249     jit_code_cache->shared_region_ = std::move(region);
250   } else {
251     jit_code_cache->private_region_ = std::move(region);
252   }
253 
254   VLOG(jit) << "Created jit code cache: initial capacity="
255             << PrettySize(initial_capacity)
256             << ", maximum capacity="
257             << PrettySize(max_capacity);
258 
259   return jit_code_cache.release();
260 }
261 
JitCodeCache()262 JitCodeCache::JitCodeCache()
263     : is_weak_access_enabled_(true),
264       inline_cache_cond_("Jit inline cache condition variable", *Locks::jit_lock_),
265       zygote_map_(&shared_region_),
266       lock_cond_("Jit code cache condition variable", *Locks::jit_lock_),
267       collection_in_progress_(false),
268       last_collection_increased_code_cache_(false),
269       garbage_collect_code_(true),
270       number_of_baseline_compilations_(0),
271       number_of_optimized_compilations_(0),
272       number_of_osr_compilations_(0),
273       number_of_collections_(0),
274       histogram_stack_map_memory_use_("Memory used for stack maps", 16),
275       histogram_code_memory_use_("Memory used for compiled code", 16),
276       histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
277 }
278 
~JitCodeCache()279 JitCodeCache::~JitCodeCache() {}
280 
PrivateRegionContainsPc(const void * ptr) const281 bool JitCodeCache::PrivateRegionContainsPc(const void* ptr) const {
282   return private_region_.IsInExecSpace(ptr);
283 }
284 
ContainsPc(const void * ptr) const285 bool JitCodeCache::ContainsPc(const void* ptr) const {
286   return PrivateRegionContainsPc(ptr) || shared_region_.IsInExecSpace(ptr);
287 }
288 
WillExecuteJitCode(ArtMethod * method)289 bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
290   ScopedObjectAccess soa(art::Thread::Current());
291   ScopedAssertNoThreadSuspension sants(__FUNCTION__);
292   if (ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
293     return true;
294   } else if (method->GetEntryPointFromQuickCompiledCode() == GetQuickInstrumentationEntryPoint()) {
295     return FindCompiledCodeForInstrumentation(method) != nullptr;
296   }
297   return false;
298 }
299 
ContainsMethod(ArtMethod * method)300 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
301   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
302   if (UNLIKELY(method->IsNative())) {
303     auto it = jni_stubs_map_.find(JniStubKey(method));
304     if (it != jni_stubs_map_.end() &&
305         it->second.IsCompiled() &&
306         ContainsElement(it->second.GetMethods(), method)) {
307       return true;
308     }
309   } else {
310     for (const auto& it : method_code_map_) {
311       if (it.second == method) {
312         return true;
313       }
314     }
315     if (zygote_map_.ContainsMethod(method)) {
316       return true;
317     }
318   }
319   return false;
320 }
321 
GetJniStubCode(ArtMethod * method)322 const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
323   DCHECK(method->IsNative());
324   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
325   auto it = jni_stubs_map_.find(JniStubKey(method));
326   if (it != jni_stubs_map_.end()) {
327     JniStubData& data = it->second;
328     if (data.IsCompiled() && ContainsElement(data.GetMethods(), method)) {
329       return data.GetCode();
330     }
331   }
332   return nullptr;
333 }
334 
FindCompiledCodeForInstrumentation(ArtMethod * method)335 const void* JitCodeCache::FindCompiledCodeForInstrumentation(ArtMethod* method) {
336   // If jit-gc is still on we use the SavedEntryPoint field for doing that and so cannot use it to
337   // find the instrumentation entrypoint.
338   if (LIKELY(GetGarbageCollectCode())) {
339     return nullptr;
340   }
341   ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
342   if (info == nullptr) {
343     return nullptr;
344   }
345   // When GC is disabled for trampoline tracing we will use SavedEntrypoint to hold the actual
346   // jit-compiled version of the method. If jit-gc is disabled for other reasons this will just be
347   // nullptr.
348   return info->GetSavedEntryPoint();
349 }
350 
GetSavedEntryPointOfPreCompiledMethod(ArtMethod * method)351 const void* JitCodeCache::GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method) {
352   if (method->IsPreCompiled()) {
353     const void* code_ptr = nullptr;
354     if (method->GetDeclaringClass()->GetClassLoader() == nullptr) {
355       code_ptr = zygote_map_.GetCodeFor(method);
356     } else {
357       MutexLock mu(Thread::Current(), *Locks::jit_lock_);
358       auto it = saved_compiled_methods_map_.find(method);
359       if (it != saved_compiled_methods_map_.end()) {
360         code_ptr = it->second;
361       }
362     }
363     if (code_ptr != nullptr) {
364       OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
365       return method_header->GetEntryPoint();
366     }
367   }
368   return nullptr;
369 }
370 
WaitForPotentialCollectionToComplete(Thread * self)371 bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
372   bool in_collection = false;
373   while (collection_in_progress_) {
374     in_collection = true;
375     lock_cond_.Wait(self);
376   }
377   return in_collection;
378 }
379 
FromCodeToAllocation(const void * code)380 static uintptr_t FromCodeToAllocation(const void* code) {
381   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
382   return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
383 }
384 
FromAllocationToCode(const uint8_t * alloc)385 static const void* FromAllocationToCode(const uint8_t* alloc) {
386   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
387   return reinterpret_cast<const void*>(alloc + RoundUp(sizeof(OatQuickMethodHeader), alignment));
388 }
389 
GetNumberOfRoots(const uint8_t * stack_map)390 static uint32_t GetNumberOfRoots(const uint8_t* stack_map) {
391   // The length of the table is stored just before the stack map (and therefore at the end of
392   // the table itself), in order to be able to fetch it from a `stack_map` pointer.
393   return reinterpret_cast<const uint32_t*>(stack_map)[-1];
394 }
395 
DCheckRootsAreValid(const std::vector<Handle<mirror::Object>> & roots,bool is_shared_region)396 static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots,
397                                 bool is_shared_region)
398     REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
399   if (!kIsDebugBuild) {
400     return;
401   }
402   // Put all roots in `roots_data`.
403   for (Handle<mirror::Object> object : roots) {
404     // Ensure the string is strongly interned. b/32995596
405     if (object->IsString()) {
406       ObjPtr<mirror::String> str = object->AsString();
407       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
408       CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
409     }
410     // Ensure that we don't put movable objects in the shared region.
411     if (is_shared_region) {
412       CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get()));
413     }
414   }
415 }
416 
GetRootTable(const void * code_ptr,uint32_t * number_of_roots=nullptr)417 static const uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
418   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
419   uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
420   uint32_t roots = GetNumberOfRoots(data);
421   if (number_of_roots != nullptr) {
422     *number_of_roots = roots;
423   }
424   return data - ComputeRootTableSize(roots);
425 }
426 
SweepRootTables(IsMarkedVisitor * visitor)427 void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
428   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
429   for (const auto& entry : method_code_map_) {
430     uint32_t number_of_roots = 0;
431     const uint8_t* root_table = GetRootTable(entry.first, &number_of_roots);
432     uint8_t* roots_data = private_region_.IsInDataSpace(root_table)
433         ? private_region_.GetWritableDataAddress(root_table)
434         : shared_region_.GetWritableDataAddress(root_table);
435     GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
436     for (uint32_t i = 0; i < number_of_roots; ++i) {
437       // This does not need a read barrier because this is called by GC.
438       mirror::Object* object = roots[i].Read<kWithoutReadBarrier>();
439       if (object == nullptr || object == Runtime::GetWeakClassSentinel()) {
440         // entry got deleted in a previous sweep.
441       } else if (object->IsString<kDefaultVerifyFlags>()) {
442         mirror::Object* new_object = visitor->IsMarked(object);
443         // We know the string is marked because it's a strongly-interned string that
444         // is always alive. The IsMarked implementation of the CMS collector returns
445         // null for newly allocated objects, but we know those haven't moved. Therefore,
446         // only update the entry if we get a different non-null string.
447         // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
448         // out of the weak access/creation pause. b/32167580
449         if (new_object != nullptr && new_object != object) {
450           DCHECK(new_object->IsString());
451           roots[i] = GcRoot<mirror::Object>(new_object);
452         }
453       } else {
454         Runtime::ProcessWeakClass(
455             reinterpret_cast<GcRoot<mirror::Class>*>(&roots[i]),
456             visitor,
457             Runtime::GetWeakClassSentinel());
458       }
459     }
460   }
461   // Walk over inline caches to clear entries containing unloaded classes.
462   for (ProfilingInfo* info : profiling_infos_) {
463     for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
464       InlineCache* cache = &info->cache_[i];
465       for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
466         Runtime::ProcessWeakClass(&cache->classes_[j], visitor, nullptr);
467       }
468     }
469   }
470 }
471 
FreeCodeAndData(const void * code_ptr)472 void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
473   if (IsInZygoteExecSpace(code_ptr)) {
474     // No need to free, this is shared memory.
475     return;
476   }
477   uintptr_t allocation = FromCodeToAllocation(code_ptr);
478   const uint8_t* data = nullptr;
479   if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
480     data = GetRootTable(code_ptr);
481   }  // else this is a JNI stub without any data.
482 
483   FreeLocked(&private_region_, reinterpret_cast<uint8_t*>(allocation), data);
484 }
485 
FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader * > & method_headers)486 void JitCodeCache::FreeAllMethodHeaders(
487     const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
488   // We need to remove entries in method_headers from CHA dependencies
489   // first since once we do FreeCode() below, the memory can be reused
490   // so it's possible for the same method_header to start representing
491   // different compile code.
492   {
493     MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
494     Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
495         ->RemoveDependentsWithMethodHeaders(method_headers);
496   }
497 
498   ScopedCodeCacheWrite scc(private_region_);
499   for (const OatQuickMethodHeader* method_header : method_headers) {
500     FreeCodeAndData(method_header->GetCode());
501   }
502 
503   // We have potentially removed a lot of debug info. Do maintenance pass to save space.
504   RepackNativeDebugInfoForJit();
505 
506   // Check that the set of compiled methods exactly matches native debug information.
507   if (kIsDebugBuild) {
508     std::map<const void*, ArtMethod*> compiled_methods;
509     VisitAllMethods([&](const void* addr, ArtMethod* method) {
510       CHECK(addr != nullptr && method != nullptr);
511       compiled_methods.emplace(addr, method);
512     });
513     std::set<const void*> debug_info;
514     ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) {
515       addr = AlignDown(addr, GetInstructionSetInstructionAlignment(kRuntimeISA));  // Thumb-bit.
516       CHECK(debug_info.emplace(addr).second) << "Duplicate debug info: " << addr << " " << name;
517       CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name;
518     });
519     if (!debug_info.empty()) {  // If debug-info generation is enabled.
520       for (auto it : compiled_methods) {
521         CHECK_EQ(debug_info.count(it.first), 1u) << "No debug info: " << it.second->PrettyMethod();
522       }
523     }
524   }
525 }
526 
RemoveMethodsIn(Thread * self,const LinearAlloc & alloc)527 void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
528   ScopedTrace trace(__PRETTY_FUNCTION__);
529   // We use a set to first collect all method_headers whose code need to be
530   // removed. We need to free the underlying code after we remove CHA dependencies
531   // for entries in this set. And it's more efficient to iterate through
532   // the CHA dependency map just once with an unordered_set.
533   std::unordered_set<OatQuickMethodHeader*> method_headers;
534   {
535     MutexLock mu(self, *Locks::jit_lock_);
536     // We do not check if a code cache GC is in progress, as this method comes
537     // with the classlinker_classes_lock_ held, and suspending ourselves could
538     // lead to a deadlock.
539     {
540       for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
541         it->second.RemoveMethodsIn(alloc);
542         if (it->second.GetMethods().empty()) {
543           method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->second.GetCode()));
544           it = jni_stubs_map_.erase(it);
545         } else {
546           it->first.UpdateShorty(it->second.GetMethods().front());
547           ++it;
548         }
549       }
550       for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
551         if (alloc.ContainsUnsafe(it->second)) {
552           method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
553           VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
554           it = method_code_map_.erase(it);
555         } else {
556           ++it;
557         }
558       }
559     }
560     for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
561       if (alloc.ContainsUnsafe(it->first)) {
562         // Note that the code has already been pushed to method_headers in the loop
563         // above and is going to be removed in FreeCode() below.
564         it = osr_code_map_.erase(it);
565       } else {
566         ++it;
567       }
568     }
569     for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
570       ProfilingInfo* info = *it;
571       if (alloc.ContainsUnsafe(info->GetMethod())) {
572         info->GetMethod()->SetProfilingInfo(nullptr);
573         private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info));
574         it = profiling_infos_.erase(it);
575       } else {
576         ++it;
577       }
578     }
579     FreeAllMethodHeaders(method_headers);
580   }
581 }
582 
IsWeakAccessEnabled(Thread * self) const583 bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
584   return kUseReadBarrier
585       ? self->GetWeakRefAccessEnabled()
586       : is_weak_access_enabled_.load(std::memory_order_seq_cst);
587 }
588 
WaitUntilInlineCacheAccessible(Thread * self)589 void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) {
590   if (IsWeakAccessEnabled(self)) {
591     return;
592   }
593   ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
594   MutexLock mu(self, *Locks::jit_lock_);
595   while (!IsWeakAccessEnabled(self)) {
596     inline_cache_cond_.Wait(self);
597   }
598 }
599 
BroadcastForInlineCacheAccess()600 void JitCodeCache::BroadcastForInlineCacheAccess() {
601   Thread* self = Thread::Current();
602   MutexLock mu(self, *Locks::jit_lock_);
603   inline_cache_cond_.Broadcast(self);
604 }
605 
AllowInlineCacheAccess()606 void JitCodeCache::AllowInlineCacheAccess() {
607   DCHECK(!kUseReadBarrier);
608   is_weak_access_enabled_.store(true, std::memory_order_seq_cst);
609   BroadcastForInlineCacheAccess();
610 }
611 
DisallowInlineCacheAccess()612 void JitCodeCache::DisallowInlineCacheAccess() {
613   DCHECK(!kUseReadBarrier);
614   is_weak_access_enabled_.store(false, std::memory_order_seq_cst);
615 }
616 
CopyInlineCacheInto(const InlineCache & ic,Handle<mirror::ObjectArray<mirror::Class>> array)617 void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic,
618                                        Handle<mirror::ObjectArray<mirror::Class>> array) {
619   WaitUntilInlineCacheAccessible(Thread::Current());
620   // Note that we don't need to lock `lock_` here, the compiler calling
621   // this method has already ensured the inline cache will not be deleted.
622   for (size_t in_cache = 0, in_array = 0;
623        in_cache < InlineCache::kIndividualCacheSize;
624        ++in_cache) {
625     mirror::Class* object = ic.classes_[in_cache].Read();
626     if (object != nullptr) {
627       array->Set(in_array++, object);
628     }
629   }
630 }
631 
ClearMethodCounter(ArtMethod * method,bool was_warm)632 static void ClearMethodCounter(ArtMethod* method, bool was_warm)
633     REQUIRES_SHARED(Locks::mutator_lock_) {
634   if (was_warm) {
635     method->SetPreviouslyWarm();
636   }
637   // We reset the counter to 1 so that the profile knows that the method was executed at least once.
638   // This is required for layout purposes.
639   // We also need to make sure we'll pass the warmup threshold again, so we set to 0 if
640   // the warmup threshold is 1.
641   uint16_t jit_warmup_threshold = Runtime::Current()->GetJITOptions()->GetWarmupThreshold();
642   method->SetCounter(std::min(jit_warmup_threshold - 1, 1));
643 }
644 
WaitForPotentialCollectionToCompleteRunnable(Thread * self)645 void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
646   while (collection_in_progress_) {
647     Locks::jit_lock_->Unlock(self);
648     {
649       ScopedThreadSuspension sts(self, kSuspended);
650       MutexLock mu(self, *Locks::jit_lock_);
651       WaitForPotentialCollectionToComplete(self);
652     }
653     Locks::jit_lock_->Lock(self);
654   }
655 }
656 
Commit(Thread * self,JitMemoryRegion * region,ArtMethod * method,ArrayRef<const uint8_t> reserved_code,ArrayRef<const uint8_t> code,ArrayRef<const uint8_t> reserved_data,const std::vector<Handle<mirror::Object>> & roots,ArrayRef<const uint8_t> stack_map,const std::vector<uint8_t> & debug_info,bool is_full_debug_info,CompilationKind compilation_kind,bool has_should_deoptimize_flag,const ArenaSet<ArtMethod * > & cha_single_implementation_list)657 bool JitCodeCache::Commit(Thread* self,
658                           JitMemoryRegion* region,
659                           ArtMethod* method,
660                           ArrayRef<const uint8_t> reserved_code,
661                           ArrayRef<const uint8_t> code,
662                           ArrayRef<const uint8_t> reserved_data,
663                           const std::vector<Handle<mirror::Object>>& roots,
664                           ArrayRef<const uint8_t> stack_map,
665                           const std::vector<uint8_t>& debug_info,
666                           bool is_full_debug_info,
667                           CompilationKind compilation_kind,
668                           bool has_should_deoptimize_flag,
669                           const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
670   DCHECK(!method->IsNative() || (compilation_kind != CompilationKind::kOsr));
671 
672   if (!method->IsNative()) {
673     // We need to do this before grabbing the lock_ because it needs to be able to see the string
674     // InternTable. Native methods do not have roots.
675     DCheckRootsAreValid(roots, IsSharedRegion(*region));
676   }
677 
678   const uint8_t* roots_data = reserved_data.data();
679   size_t root_table_size = ComputeRootTableSize(roots.size());
680   const uint8_t* stack_map_data = roots_data + root_table_size;
681 
682   MutexLock mu(self, *Locks::jit_lock_);
683   // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
684   // finish.
685   WaitForPotentialCollectionToCompleteRunnable(self);
686   const uint8_t* code_ptr = region->CommitCode(
687       reserved_code, code, stack_map_data, has_should_deoptimize_flag);
688   if (code_ptr == nullptr) {
689     return false;
690   }
691   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
692 
693   // Commit roots and stack maps before updating the entry point.
694   if (!region->CommitData(reserved_data, roots, stack_map)) {
695     return false;
696   }
697 
698   switch (compilation_kind) {
699     case CompilationKind::kOsr:
700       number_of_osr_compilations_++;
701       break;
702     case CompilationKind::kBaseline:
703       number_of_baseline_compilations_++;
704       break;
705     case CompilationKind::kOptimized:
706       number_of_optimized_compilations_++;
707       break;
708   }
709 
710   // We need to update the debug info before the entry point gets set.
711   // At the same time we want to do under JIT lock so that debug info and JIT maps are in sync.
712   if (!debug_info.empty()) {
713     // NB: Don't allow packing of full info since it would remove non-backtrace data.
714     AddNativeDebugInfoForJit(code_ptr, debug_info, /*allow_packing=*/ !is_full_debug_info);
715   }
716 
717   // We need to update the entry point in the runnable state for the instrumentation.
718   {
719     // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
720     // compiled code is considered invalidated by some class linking, but below we still make the
721     // compiled code valid for the method.  Need cha_lock_ for checking all single-implementation
722     // flags and register dependencies.
723     MutexLock cha_mu(self, *Locks::cha_lock_);
724     bool single_impl_still_valid = true;
725     for (ArtMethod* single_impl : cha_single_implementation_list) {
726       if (!single_impl->HasSingleImplementation()) {
727         // Simply discard the compiled code. Clear the counter so that it may be recompiled later.
728         // Hopefully the class hierarchy will be more stable when compilation is retried.
729         single_impl_still_valid = false;
730         ClearMethodCounter(method, /*was_warm=*/ false);
731         break;
732       }
733     }
734 
735     // Discard the code if any single-implementation assumptions are now invalid.
736     if (UNLIKELY(!single_impl_still_valid)) {
737       VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
738       return false;
739     }
740     DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
741         << "Should not be using cha on debuggable apps/runs!";
742 
743     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
744     for (ArtMethod* single_impl : cha_single_implementation_list) {
745       class_linker->GetClassHierarchyAnalysis()->AddDependency(single_impl, method, method_header);
746     }
747 
748     if (UNLIKELY(method->IsNative())) {
749       auto it = jni_stubs_map_.find(JniStubKey(method));
750       DCHECK(it != jni_stubs_map_.end())
751           << "Entry inserted in NotifyCompilationOf() should be alive.";
752       JniStubData* data = &it->second;
753       DCHECK(ContainsElement(data->GetMethods(), method))
754           << "Entry inserted in NotifyCompilationOf() should contain this method.";
755       data->SetCode(code_ptr);
756       data->UpdateEntryPoints(method_header->GetEntryPoint());
757     } else {
758       if (method->IsPreCompiled() && IsSharedRegion(*region)) {
759         zygote_map_.Put(code_ptr, method);
760       } else {
761         method_code_map_.Put(code_ptr, method);
762       }
763       if (compilation_kind == CompilationKind::kOsr) {
764         osr_code_map_.Put(method, code_ptr);
765       } else if (NeedsClinitCheckBeforeCall(method) &&
766                  !method->GetDeclaringClass()->IsVisiblyInitialized()) {
767         // This situation currently only occurs in the jit-zygote mode.
768         DCHECK(!garbage_collect_code_);
769         DCHECK(method->IsPreCompiled());
770         // The shared region can easily be queried. For the private region, we
771         // use a side map.
772         if (!IsSharedRegion(*region)) {
773           saved_compiled_methods_map_.Put(method, code_ptr);
774         }
775       } else {
776         Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
777             method, method_header->GetEntryPoint());
778       }
779     }
780     if (collection_in_progress_) {
781       // We need to update the live bitmap if there is a GC to ensure it sees this new
782       // code.
783       GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
784     }
785     VLOG(jit)
786         << "JIT added (kind=" << compilation_kind << ") "
787         << ArtMethod::PrettyMethod(method) << "@" << method
788         << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
789         << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
790         << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
791         << reinterpret_cast<const void*>(method_header->GetEntryPoint() +
792                                          method_header->GetCodeSize());
793   }
794 
795   return true;
796 }
797 
CodeCacheSize()798 size_t JitCodeCache::CodeCacheSize() {
799   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
800   return CodeCacheSizeLocked();
801 }
802 
RemoveMethod(ArtMethod * method,bool release_memory)803 bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
804   // This function is used only for testing and only with non-native methods.
805   CHECK(!method->IsNative());
806 
807   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
808 
809   bool osr = osr_code_map_.find(method) != osr_code_map_.end();
810   bool in_cache = RemoveMethodLocked(method, release_memory);
811 
812   if (!in_cache) {
813     return false;
814   }
815 
816   method->SetCounter(0);
817   Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
818       method, GetQuickToInterpreterBridge());
819   VLOG(jit)
820       << "JIT removed (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
821       << ArtMethod::PrettyMethod(method) << "@" << method
822       << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
823       << " dcache_size=" << PrettySize(DataCacheSizeLocked());
824   return true;
825 }
826 
RemoveMethodLocked(ArtMethod * method,bool release_memory)827 bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
828   if (LIKELY(!method->IsNative())) {
829     ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
830     if (info != nullptr) {
831       RemoveElement(profiling_infos_, info);
832     }
833     method->SetProfilingInfo(nullptr);
834   }
835 
836   bool in_cache = false;
837   ScopedCodeCacheWrite ccw(private_region_);
838   if (UNLIKELY(method->IsNative())) {
839     auto it = jni_stubs_map_.find(JniStubKey(method));
840     if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
841       in_cache = true;
842       if (it->second.GetMethods().empty()) {
843         if (release_memory) {
844           FreeCodeAndData(it->second.GetCode());
845         }
846         jni_stubs_map_.erase(it);
847       } else {
848         it->first.UpdateShorty(it->second.GetMethods().front());
849       }
850     }
851   } else {
852     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
853       if (it->second == method) {
854         in_cache = true;
855         if (release_memory) {
856           FreeCodeAndData(it->first);
857         }
858         VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
859         it = method_code_map_.erase(it);
860       } else {
861         ++it;
862       }
863     }
864 
865     auto osr_it = osr_code_map_.find(method);
866     if (osr_it != osr_code_map_.end()) {
867       osr_code_map_.erase(osr_it);
868     }
869   }
870 
871   return in_cache;
872 }
873 
874 // This notifies the code cache that the given method has been redefined and that it should remove
875 // any cached information it has on the method. All threads must be suspended before calling this
876 // method. The compiled code for the method (if there is any) must not be in any threads call stack.
NotifyMethodRedefined(ArtMethod * method)877 void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
878   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
879   RemoveMethodLocked(method, /* release_memory= */ true);
880 }
881 
882 // This invalidates old_method. Once this function returns one can no longer use old_method to
883 // execute code unless it is fixed up. This fixup will happen later in the process of installing a
884 // class redefinition.
885 // TODO We should add some info to ArtMethod to note that 'old_method' has been invalidated and
886 // shouldn't be used since it is no longer logically in the jit code cache.
887 // TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
MoveObsoleteMethod(ArtMethod * old_method,ArtMethod * new_method)888 void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
889   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
890   if (old_method->IsNative()) {
891     // Update methods in jni_stubs_map_.
892     for (auto& entry : jni_stubs_map_) {
893       JniStubData& data = entry.second;
894       data.MoveObsoleteMethod(old_method, new_method);
895     }
896     return;
897   }
898   // Update ProfilingInfo to the new one and remove it from the old_method.
899   if (old_method->GetProfilingInfo(kRuntimePointerSize) != nullptr) {
900     DCHECK_EQ(old_method->GetProfilingInfo(kRuntimePointerSize)->GetMethod(), old_method);
901     ProfilingInfo* info = old_method->GetProfilingInfo(kRuntimePointerSize);
902     old_method->SetProfilingInfo(nullptr);
903     // Since the JIT should be paused and all threads suspended by the time this is called these
904     // checks should always pass.
905     DCHECK(!info->IsInUseByCompiler());
906     new_method->SetProfilingInfo(info);
907     // Get rid of the old saved entrypoint if it is there.
908     info->SetSavedEntryPoint(nullptr);
909     info->method_ = new_method;
910   }
911   // Update method_code_map_ to point to the new method.
912   for (auto& it : method_code_map_) {
913     if (it.second == old_method) {
914       it.second = new_method;
915     }
916   }
917   // Update osr_code_map_ to point to the new method.
918   auto code_map = osr_code_map_.find(old_method);
919   if (code_map != osr_code_map_.end()) {
920     osr_code_map_.Put(new_method, code_map->second);
921     osr_code_map_.erase(old_method);
922   }
923 }
924 
TransitionToDebuggable()925 void JitCodeCache::TransitionToDebuggable() {
926   // Check that none of our methods have an entrypoint in the zygote exec
927   // space (this should be taken care of by
928   // ClassLinker::UpdateEntryPointsClassVisitor.
929   {
930     MutexLock mu(Thread::Current(), *Locks::jit_lock_);
931     if (kIsDebugBuild) {
932       for (const auto& it : method_code_map_) {
933         ArtMethod* method = it.second;
934         DCHECK(!method->IsPreCompiled());
935         DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
936       }
937     }
938     // Not strictly necessary, but this map is useless now.
939     saved_compiled_methods_map_.clear();
940   }
941   if (kIsDebugBuild) {
942     for (const auto& entry : zygote_map_) {
943       ArtMethod* method = entry.method;
944       if (method != nullptr) {
945         DCHECK(!method->IsPreCompiled());
946         DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
947       }
948     }
949   }
950 }
951 
CodeCacheSizeLocked()952 size_t JitCodeCache::CodeCacheSizeLocked() {
953   return GetCurrentRegion()->GetUsedMemoryForCode();
954 }
955 
DataCacheSize()956 size_t JitCodeCache::DataCacheSize() {
957   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
958   return DataCacheSizeLocked();
959 }
960 
DataCacheSizeLocked()961 size_t JitCodeCache::DataCacheSizeLocked() {
962   return GetCurrentRegion()->GetUsedMemoryForData();
963 }
964 
Reserve(Thread * self,JitMemoryRegion * region,size_t code_size,size_t stack_map_size,size_t number_of_roots,ArtMethod * method,ArrayRef<const uint8_t> * reserved_code,ArrayRef<const uint8_t> * reserved_data)965 bool JitCodeCache::Reserve(Thread* self,
966                            JitMemoryRegion* region,
967                            size_t code_size,
968                            size_t stack_map_size,
969                            size_t number_of_roots,
970                            ArtMethod* method,
971                            /*out*/ArrayRef<const uint8_t>* reserved_code,
972                            /*out*/ArrayRef<const uint8_t>* reserved_data) {
973   code_size = OatQuickMethodHeader::InstructionAlignedSize() + code_size;
974   size_t data_size = RoundUp(ComputeRootTableSize(number_of_roots) + stack_map_size, sizeof(void*));
975 
976   const uint8_t* code;
977   const uint8_t* data;
978   while (true) {
979     bool at_max_capacity = false;
980     {
981       ScopedThreadSuspension sts(self, kSuspended);
982       MutexLock mu(self, *Locks::jit_lock_);
983       WaitForPotentialCollectionToComplete(self);
984       ScopedCodeCacheWrite ccw(*region);
985       code = region->AllocateCode(code_size);
986       data = region->AllocateData(data_size);
987       at_max_capacity = IsAtMaxCapacity();
988     }
989     if (code != nullptr && data != nullptr) {
990       break;
991     }
992     Free(self, region, code, data);
993     if (at_max_capacity) {
994       VLOG(jit) << "JIT failed to allocate code of size "
995                 << PrettySize(code_size)
996                 << ", and data of size "
997                 << PrettySize(data_size);
998       return false;
999     }
1000     // Run a code cache collection and try again.
1001     GarbageCollectCache(self);
1002   }
1003 
1004   *reserved_code = ArrayRef<const uint8_t>(code, code_size);
1005   *reserved_data = ArrayRef<const uint8_t>(data, data_size);
1006 
1007   MutexLock mu(self, *Locks::jit_lock_);
1008   histogram_code_memory_use_.AddValue(code_size);
1009   if (code_size > kCodeSizeLogThreshold) {
1010     LOG(INFO) << "JIT allocated "
1011               << PrettySize(code_size)
1012               << " for compiled code of "
1013               << ArtMethod::PrettyMethod(method);
1014   }
1015   histogram_stack_map_memory_use_.AddValue(data_size);
1016   if (data_size > kStackMapSizeLogThreshold) {
1017     LOG(INFO) << "JIT allocated "
1018               << PrettySize(data_size)
1019               << " for stack maps of "
1020               << ArtMethod::PrettyMethod(method);
1021   }
1022   return true;
1023 }
1024 
Free(Thread * self,JitMemoryRegion * region,const uint8_t * code,const uint8_t * data)1025 void JitCodeCache::Free(Thread* self,
1026                         JitMemoryRegion* region,
1027                         const uint8_t* code,
1028                         const uint8_t* data) {
1029   MutexLock mu(self, *Locks::jit_lock_);
1030   ScopedCodeCacheWrite ccw(*region);
1031   FreeLocked(region, code, data);
1032 }
1033 
FreeLocked(JitMemoryRegion * region,const uint8_t * code,const uint8_t * data)1034 void JitCodeCache::FreeLocked(JitMemoryRegion* region, const uint8_t* code, const uint8_t* data) {
1035   if (code != nullptr) {
1036     RemoveNativeDebugInfoForJit(reinterpret_cast<const void*>(FromAllocationToCode(code)));
1037     region->FreeCode(code);
1038   }
1039   if (data != nullptr) {
1040     region->FreeData(data);
1041   }
1042 }
1043 
1044 class MarkCodeClosure final : public Closure {
1045  public:
MarkCodeClosure(JitCodeCache * code_cache,CodeCacheBitmap * bitmap,Barrier * barrier)1046   MarkCodeClosure(JitCodeCache* code_cache, CodeCacheBitmap* bitmap, Barrier* barrier)
1047       : code_cache_(code_cache), bitmap_(bitmap), barrier_(barrier) {}
1048 
Run(Thread * thread)1049   void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
1050     ScopedTrace trace(__PRETTY_FUNCTION__);
1051     DCHECK(thread == Thread::Current() || thread->IsSuspended());
1052     StackVisitor::WalkStack(
1053         [&](const art::StackVisitor* stack_visitor) {
1054           const OatQuickMethodHeader* method_header =
1055               stack_visitor->GetCurrentOatQuickMethodHeader();
1056           if (method_header == nullptr) {
1057             return true;
1058           }
1059           const void* code = method_header->GetCode();
1060           if (code_cache_->ContainsPc(code) && !code_cache_->IsInZygoteExecSpace(code)) {
1061             // Use the atomic set version, as multiple threads are executing this code.
1062             bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
1063           }
1064           return true;
1065         },
1066         thread,
1067         /* context= */ nullptr,
1068         art::StackVisitor::StackWalkKind::kSkipInlinedFrames);
1069 
1070     if (kIsDebugBuild) {
1071       // The stack walking code queries the side instrumentation stack if it
1072       // sees an instrumentation exit pc, so the JIT code of methods in that stack
1073       // must have been seen. We check this below.
1074       for (const auto& it : *thread->GetInstrumentationStack()) {
1075         // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in
1076         // its stack frame, it is not the method owning return_pc_. We just pass null to
1077         // LookupMethodHeader: the method is only checked against in debug builds.
1078         OatQuickMethodHeader* method_header =
1079             code_cache_->LookupMethodHeader(it.second.return_pc_, /* method= */ nullptr);
1080         if (method_header != nullptr) {
1081           const void* code = method_header->GetCode();
1082           CHECK(bitmap_->Test(FromCodeToAllocation(code)));
1083         }
1084       }
1085     }
1086     barrier_->Pass(Thread::Current());
1087   }
1088 
1089  private:
1090   JitCodeCache* const code_cache_;
1091   CodeCacheBitmap* const bitmap_;
1092   Barrier* const barrier_;
1093 };
1094 
NotifyCollectionDone(Thread * self)1095 void JitCodeCache::NotifyCollectionDone(Thread* self) {
1096   collection_in_progress_ = false;
1097   lock_cond_.Broadcast(self);
1098 }
1099 
MarkCompiledCodeOnThreadStacks(Thread * self)1100 void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
1101   Barrier barrier(0);
1102   size_t threads_running_checkpoint = 0;
1103   MarkCodeClosure closure(this, GetLiveBitmap(), &barrier);
1104   threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1105   // Now that we have run our checkpoint, move to a suspended state and wait
1106   // for other threads to run the checkpoint.
1107   ScopedThreadSuspension sts(self, kSuspended);
1108   if (threads_running_checkpoint != 0) {
1109     barrier.Increment(self, threads_running_checkpoint);
1110   }
1111 }
1112 
IsAtMaxCapacity() const1113 bool JitCodeCache::IsAtMaxCapacity() const {
1114   return private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity();
1115 }
1116 
ShouldDoFullCollection()1117 bool JitCodeCache::ShouldDoFullCollection() {
1118   if (IsAtMaxCapacity()) {
1119     // Always do a full collection when the code cache is full.
1120     return true;
1121   } else if (private_region_.GetCurrentCapacity() < kReservedCapacity) {
1122     // Always do partial collection when the code cache size is below the reserved
1123     // capacity.
1124     return false;
1125   } else if (last_collection_increased_code_cache_) {
1126     // This time do a full collection.
1127     return true;
1128   } else {
1129     // This time do a partial collection.
1130     return false;
1131   }
1132 }
1133 
GarbageCollectCache(Thread * self)1134 void JitCodeCache::GarbageCollectCache(Thread* self) {
1135   ScopedTrace trace(__FUNCTION__);
1136   // Wait for an existing collection, or let everyone know we are starting one.
1137   {
1138     ScopedThreadSuspension sts(self, kSuspended);
1139     MutexLock mu(self, *Locks::jit_lock_);
1140     if (!garbage_collect_code_) {
1141       private_region_.IncreaseCodeCacheCapacity();
1142       return;
1143     } else if (WaitForPotentialCollectionToComplete(self)) {
1144       return;
1145     } else {
1146       number_of_collections_++;
1147       live_bitmap_.reset(CodeCacheBitmap::Create(
1148           "code-cache-bitmap",
1149           reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
1150           reinterpret_cast<uintptr_t>(
1151               private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
1152       collection_in_progress_ = true;
1153     }
1154   }
1155 
1156   TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
1157   {
1158     TimingLogger::ScopedTiming st("Code cache collection", &logger);
1159 
1160     bool do_full_collection = false;
1161     {
1162       MutexLock mu(self, *Locks::jit_lock_);
1163       do_full_collection = ShouldDoFullCollection();
1164     }
1165 
1166     VLOG(jit) << "Do "
1167               << (do_full_collection ? "full" : "partial")
1168               << " code cache collection, code="
1169               << PrettySize(CodeCacheSize())
1170               << ", data=" << PrettySize(DataCacheSize());
1171 
1172     DoCollection(self, /* collect_profiling_info= */ do_full_collection);
1173 
1174     VLOG(jit) << "After code cache collection, code="
1175               << PrettySize(CodeCacheSize())
1176               << ", data=" << PrettySize(DataCacheSize());
1177 
1178     {
1179       MutexLock mu(self, *Locks::jit_lock_);
1180 
1181       // Increase the code cache only when we do partial collections.
1182       // TODO: base this strategy on how full the code cache is?
1183       if (do_full_collection) {
1184         last_collection_increased_code_cache_ = false;
1185       } else {
1186         last_collection_increased_code_cache_ = true;
1187         private_region_.IncreaseCodeCacheCapacity();
1188       }
1189 
1190       bool next_collection_will_be_full = ShouldDoFullCollection();
1191 
1192       // Start polling the liveness of compiled code to prepare for the next full collection.
1193       if (next_collection_will_be_full) {
1194         if (Runtime::Current()->GetJITOptions()->CanCompileBaseline()) {
1195           for (ProfilingInfo* info : profiling_infos_) {
1196             info->SetBaselineHotnessCount(0);
1197           }
1198         } else {
1199           // Save the entry point of methods we have compiled, and update the entry
1200           // point of those methods to the interpreter. If the method is invoked, the
1201           // interpreter will update its entry point to the compiled code and call it.
1202           for (ProfilingInfo* info : profiling_infos_) {
1203             const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
1204             if (!IsInZygoteDataSpace(info) && ContainsPc(entry_point)) {
1205               info->SetSavedEntryPoint(entry_point);
1206               // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
1207               // class of the method. We may be concurrently running a GC which makes accessing
1208               // the class unsafe. We know it is OK to bypass the instrumentation as we've just
1209               // checked that the current entry point is JIT compiled code.
1210               info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
1211             }
1212           }
1213         }
1214 
1215         // Change entry points of native methods back to the GenericJNI entrypoint.
1216         for (const auto& entry : jni_stubs_map_) {
1217           const JniStubData& data = entry.second;
1218           if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) {
1219             continue;
1220           }
1221           // Make sure a single invocation of the GenericJNI trampoline tries to recompile.
1222           uint16_t new_counter = Runtime::Current()->GetJit()->HotMethodThreshold() - 1u;
1223           const OatQuickMethodHeader* method_header =
1224               OatQuickMethodHeader::FromCodePointer(data.GetCode());
1225           for (ArtMethod* method : data.GetMethods()) {
1226             if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) {
1227               // Don't call Instrumentation::UpdateMethodsCode(), same as for normal methods above.
1228               method->SetCounter(new_counter);
1229               method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
1230             }
1231           }
1232         }
1233       }
1234       live_bitmap_.reset(nullptr);
1235       NotifyCollectionDone(self);
1236     }
1237   }
1238   Runtime::Current()->GetJit()->AddTimingLogger(logger);
1239 }
1240 
RemoveUnmarkedCode(Thread * self)1241 void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
1242   ScopedTrace trace(__FUNCTION__);
1243   std::unordered_set<OatQuickMethodHeader*> method_headers;
1244   {
1245     MutexLock mu(self, *Locks::jit_lock_);
1246     // Iterate over all compiled code and remove entries that are not marked.
1247     for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
1248       JniStubData* data = &it->second;
1249       if (IsInZygoteExecSpace(data->GetCode()) ||
1250           !data->IsCompiled() ||
1251           GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
1252         ++it;
1253       } else {
1254         method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode()));
1255         for (ArtMethod* method : data->GetMethods()) {
1256           VLOG(jit) << "JIT removed (JNI) " << method->PrettyMethod() << ": " << data->GetCode();
1257         }
1258         it = jni_stubs_map_.erase(it);
1259       }
1260     }
1261     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
1262       const void* code_ptr = it->first;
1263       uintptr_t allocation = FromCodeToAllocation(code_ptr);
1264       if (IsInZygoteExecSpace(code_ptr) || GetLiveBitmap()->Test(allocation)) {
1265         ++it;
1266       } else {
1267         OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1268         method_headers.insert(header);
1269         VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
1270         it = method_code_map_.erase(it);
1271       }
1272     }
1273     FreeAllMethodHeaders(method_headers);
1274   }
1275 }
1276 
GetGarbageCollectCode()1277 bool JitCodeCache::GetGarbageCollectCode() {
1278   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1279   return garbage_collect_code_;
1280 }
1281 
SetGarbageCollectCode(bool value)1282 void JitCodeCache::SetGarbageCollectCode(bool value) {
1283   Thread* self = Thread::Current();
1284   MutexLock mu(self, *Locks::jit_lock_);
1285   if (garbage_collect_code_ != value) {
1286     if (garbage_collect_code_) {
1287       // When dynamically disabling the garbage collection, we neee
1288       // to make sure that a potential current collection is finished, and also
1289       // clear the saved entry point in profiling infos to avoid dangling pointers.
1290       WaitForPotentialCollectionToComplete(self);
1291       for (ProfilingInfo* info : profiling_infos_) {
1292         info->SetSavedEntryPoint(nullptr);
1293       }
1294     }
1295     // Update the flag while holding the lock to ensure no thread will try to GC.
1296     garbage_collect_code_ = value;
1297   }
1298 }
1299 
RemoveMethodBeingCompiled(ArtMethod * method,CompilationKind kind)1300 void JitCodeCache::RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
1301   DCHECK(IsMethodBeingCompiled(method, kind));
1302   switch (kind) {
1303     case CompilationKind::kOsr:
1304       current_osr_compilations_.erase(method);
1305       break;
1306     case CompilationKind::kBaseline:
1307       current_baseline_compilations_.erase(method);
1308       break;
1309     case CompilationKind::kOptimized:
1310       current_optimized_compilations_.erase(method);
1311       break;
1312   }
1313 }
1314 
AddMethodBeingCompiled(ArtMethod * method,CompilationKind kind)1315 void JitCodeCache::AddMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
1316   DCHECK(!IsMethodBeingCompiled(method, kind));
1317   switch (kind) {
1318     case CompilationKind::kOsr:
1319       current_osr_compilations_.insert(method);
1320       break;
1321     case CompilationKind::kBaseline:
1322       current_baseline_compilations_.insert(method);
1323       break;
1324     case CompilationKind::kOptimized:
1325       current_optimized_compilations_.insert(method);
1326       break;
1327   }
1328 }
1329 
IsMethodBeingCompiled(ArtMethod * method,CompilationKind kind)1330 bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
1331   switch (kind) {
1332     case CompilationKind::kOsr:
1333       return ContainsElement(current_osr_compilations_, method);
1334     case CompilationKind::kBaseline:
1335       return ContainsElement(current_baseline_compilations_, method);
1336     case CompilationKind::kOptimized:
1337       return ContainsElement(current_optimized_compilations_, method);
1338   }
1339 }
1340 
IsMethodBeingCompiled(ArtMethod * method)1341 bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method) {
1342   return ContainsElement(current_optimized_compilations_, method) ||
1343       ContainsElement(current_osr_compilations_, method) ||
1344       ContainsElement(current_baseline_compilations_, method);
1345 }
1346 
DoCollection(Thread * self,bool collect_profiling_info)1347 void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
1348   ScopedTrace trace(__FUNCTION__);
1349   {
1350     MutexLock mu(self, *Locks::jit_lock_);
1351 
1352     if (Runtime::Current()->GetJITOptions()->CanCompileBaseline()) {
1353       // Update to interpreter the methods that have baseline entrypoints and whose baseline
1354       // hotness count is zero.
1355       // Note that these methods may be in thread stack or concurrently revived
1356       // between. That's OK, as the thread executing it will mark it.
1357       for (ProfilingInfo* info : profiling_infos_) {
1358         if (info->GetBaselineHotnessCount() == 0) {
1359           const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
1360           if (ContainsPc(entry_point)) {
1361             OatQuickMethodHeader* method_header =
1362                 OatQuickMethodHeader::FromEntryPoint(entry_point);
1363             if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr())) {
1364               info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
1365             }
1366           }
1367         }
1368       }
1369       // TODO: collect profiling info
1370       // TODO: collect optimized code?
1371     } else {
1372       if (collect_profiling_info) {
1373         // Clear the profiling info of methods that do not have compiled code as entrypoint.
1374         // Also remove the saved entry point from the ProfilingInfo objects.
1375         for (ProfilingInfo* info : profiling_infos_) {
1376           const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
1377           if (!ContainsPc(ptr) &&
1378               !IsMethodBeingCompiled(info->GetMethod()) &&
1379               !info->IsInUseByCompiler() &&
1380               !IsInZygoteDataSpace(info)) {
1381             info->GetMethod()->SetProfilingInfo(nullptr);
1382           }
1383 
1384           if (info->GetSavedEntryPoint() != nullptr) {
1385             info->SetSavedEntryPoint(nullptr);
1386             // We are going to move this method back to interpreter. Clear the counter now to
1387             // give it a chance to be hot again.
1388             ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true);
1389           }
1390         }
1391       } else if (kIsDebugBuild) {
1392         // Check that the profiling infos do not have a dangling entry point.
1393         for (ProfilingInfo* info : profiling_infos_) {
1394           DCHECK(!Runtime::Current()->IsZygote());
1395           const void* entry_point = info->GetSavedEntryPoint();
1396           DCHECK(entry_point == nullptr || IsInZygoteExecSpace(entry_point));
1397         }
1398       }
1399     }
1400 
1401     // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not
1402     // an entry point is either:
1403     // - an osr compiled code, that will be removed if not in a thread call stack.
1404     // - discarded compiled code, that will be removed if not in a thread call stack.
1405     for (const auto& entry : jni_stubs_map_) {
1406       const JniStubData& data = entry.second;
1407       const void* code_ptr = data.GetCode();
1408       if (IsInZygoteExecSpace(code_ptr)) {
1409         continue;
1410       }
1411       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1412       for (ArtMethod* method : data.GetMethods()) {
1413         if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
1414           GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
1415           break;
1416         }
1417       }
1418     }
1419     for (const auto& it : method_code_map_) {
1420       ArtMethod* method = it.second;
1421       const void* code_ptr = it.first;
1422       if (IsInZygoteExecSpace(code_ptr)) {
1423         continue;
1424       }
1425       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1426       if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
1427         GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
1428       }
1429     }
1430 
1431     // Empty osr method map, as osr compiled code will be deleted (except the ones
1432     // on thread stacks).
1433     osr_code_map_.clear();
1434   }
1435 
1436   // Run a checkpoint on all threads to mark the JIT compiled code they are running.
1437   MarkCompiledCodeOnThreadStacks(self);
1438 
1439   // At this point, mutator threads are still running, and entrypoints of methods can
1440   // change. We do know they cannot change to a code cache entry that is not marked,
1441   // therefore we can safely remove those entries.
1442   RemoveUnmarkedCode(self);
1443 
1444   if (collect_profiling_info) {
1445     MutexLock mu(self, *Locks::jit_lock_);
1446     // Free all profiling infos of methods not compiled nor being compiled.
1447     auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
1448       [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
1449         const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
1450         // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope
1451         // that the compiled code would not get revived. As mutator threads run concurrently,
1452         // they may have revived the compiled code, and now we are in the situation where
1453         // a method has compiled code but no ProfilingInfo.
1454         // We make sure compiled methods have a ProfilingInfo object. It is needed for
1455         // code cache collection.
1456         if (ContainsPc(ptr) &&
1457             info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
1458           info->GetMethod()->SetProfilingInfo(info);
1459         } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
1460           // No need for this ProfilingInfo object anymore.
1461           private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info));
1462           return true;
1463         }
1464         return false;
1465       });
1466     profiling_infos_.erase(profiling_kept_end, profiling_infos_.end());
1467   }
1468 }
1469 
LookupMethodHeader(uintptr_t pc,ArtMethod * method)1470 OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
1471   static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
1472   if (kRuntimeISA == InstructionSet::kArm) {
1473     // On Thumb-2, the pc is offset by one.
1474     --pc;
1475   }
1476   if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
1477     return nullptr;
1478   }
1479 
1480   if (!kIsDebugBuild) {
1481     // Called with null `method` only from MarkCodeClosure::Run() in debug build.
1482     CHECK(method != nullptr);
1483   }
1484 
1485   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1486   OatQuickMethodHeader* method_header = nullptr;
1487   ArtMethod* found_method = nullptr;  // Only for DCHECK(), not for JNI stubs.
1488   if (method != nullptr && UNLIKELY(method->IsNative())) {
1489     auto it = jni_stubs_map_.find(JniStubKey(method));
1490     if (it == jni_stubs_map_.end() || !ContainsElement(it->second.GetMethods(), method)) {
1491       return nullptr;
1492     }
1493     const void* code_ptr = it->second.GetCode();
1494     method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1495     if (!method_header->Contains(pc)) {
1496       return nullptr;
1497     }
1498   } else {
1499     if (shared_region_.IsInExecSpace(reinterpret_cast<const void*>(pc))) {
1500       const void* code_ptr = zygote_map_.GetCodeFor(method, pc);
1501       if (code_ptr != nullptr) {
1502         return OatQuickMethodHeader::FromCodePointer(code_ptr);
1503       }
1504     }
1505     auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
1506     if (it != method_code_map_.begin()) {
1507       --it;
1508       const void* code_ptr = it->first;
1509       if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) {
1510         method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1511         found_method = it->second;
1512       }
1513     }
1514     if (method_header == nullptr && method == nullptr) {
1515       // Scan all compiled JNI stubs as well. This slow search is used only
1516       // for checks in debug build, for release builds the `method` is not null.
1517       for (auto&& entry : jni_stubs_map_) {
1518         const JniStubData& data = entry.second;
1519         if (data.IsCompiled() &&
1520             OatQuickMethodHeader::FromCodePointer(data.GetCode())->Contains(pc)) {
1521           method_header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
1522         }
1523       }
1524     }
1525     if (method_header == nullptr) {
1526       return nullptr;
1527     }
1528   }
1529 
1530   if (kIsDebugBuild && method != nullptr && !method->IsNative()) {
1531     DCHECK_EQ(found_method, method)
1532         << ArtMethod::PrettyMethod(method) << " "
1533         << ArtMethod::PrettyMethod(found_method) << " "
1534         << std::hex << pc;
1535   }
1536   return method_header;
1537 }
1538 
LookupOsrMethodHeader(ArtMethod * method)1539 OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
1540   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1541   auto it = osr_code_map_.find(method);
1542   if (it == osr_code_map_.end()) {
1543     return nullptr;
1544   }
1545   return OatQuickMethodHeader::FromCodePointer(it->second);
1546 }
1547 
AddProfilingInfo(Thread * self,ArtMethod * method,const std::vector<uint32_t> & entries,bool retry_allocation)1548 ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
1549                                               ArtMethod* method,
1550                                               const std::vector<uint32_t>& entries,
1551                                               bool retry_allocation)
1552     // No thread safety analysis as we are using TryLock/Unlock explicitly.
1553     NO_THREAD_SAFETY_ANALYSIS {
1554   DCHECK(CanAllocateProfilingInfo());
1555   ProfilingInfo* info = nullptr;
1556   if (!retry_allocation) {
1557     // If we are allocating for the interpreter, just try to lock, to avoid
1558     // lock contention with the JIT.
1559     if (Locks::jit_lock_->ExclusiveTryLock(self)) {
1560       info = AddProfilingInfoInternal(self, method, entries);
1561       Locks::jit_lock_->ExclusiveUnlock(self);
1562     }
1563   } else {
1564     {
1565       MutexLock mu(self, *Locks::jit_lock_);
1566       info = AddProfilingInfoInternal(self, method, entries);
1567     }
1568 
1569     if (info == nullptr) {
1570       GarbageCollectCache(self);
1571       MutexLock mu(self, *Locks::jit_lock_);
1572       info = AddProfilingInfoInternal(self, method, entries);
1573     }
1574   }
1575   return info;
1576 }
1577 
AddProfilingInfoInternal(Thread * self ATTRIBUTE_UNUSED,ArtMethod * method,const std::vector<uint32_t> & entries)1578 ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED,
1579                                                       ArtMethod* method,
1580                                                       const std::vector<uint32_t>& entries) {
1581   size_t profile_info_size = RoundUp(
1582       sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(),
1583       sizeof(void*));
1584 
1585   // Check whether some other thread has concurrently created it.
1586   ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
1587   if (info != nullptr) {
1588     return info;
1589   }
1590 
1591   const uint8_t* data = private_region_.AllocateData(profile_info_size);
1592   if (data == nullptr) {
1593     return nullptr;
1594   }
1595   uint8_t* writable_data = private_region_.GetWritableDataAddress(data);
1596   info = new (writable_data) ProfilingInfo(method, entries);
1597 
1598   // Make sure other threads see the data in the profiling info object before the
1599   // store in the ArtMethod's ProfilingInfo pointer.
1600   std::atomic_thread_fence(std::memory_order_release);
1601 
1602   method->SetProfilingInfo(info);
1603   profiling_infos_.push_back(info);
1604   histogram_profiling_info_memory_use_.AddValue(profile_info_size);
1605   return info;
1606 }
1607 
MoreCore(const void * mspace,intptr_t increment)1608 void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) {
1609   return shared_region_.OwnsSpace(mspace)
1610       ? shared_region_.MoreCore(mspace, increment)
1611       : private_region_.MoreCore(mspace, increment);
1612 }
1613 
GetProfiledMethods(const std::set<std::string> & dex_base_locations,std::vector<ProfileMethodInfo> & methods)1614 void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
1615                                       std::vector<ProfileMethodInfo>& methods) {
1616   Thread* self = Thread::Current();
1617   WaitUntilInlineCacheAccessible(self);
1618   MutexLock mu(self, *Locks::jit_lock_);
1619   ScopedTrace trace(__FUNCTION__);
1620   uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold();
1621   for (const ProfilingInfo* info : profiling_infos_) {
1622     ArtMethod* method = info->GetMethod();
1623     const DexFile* dex_file = method->GetDexFile();
1624     const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
1625     if (!ContainsElement(dex_base_locations, base_location)) {
1626       // Skip dex files which are not profiled.
1627       continue;
1628     }
1629     std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
1630 
1631     // If the method didn't reach the compilation threshold don't save the inline caches.
1632     // They might be incomplete and cause unnecessary deoptimizations.
1633     // If the inline cache is empty the compiler will generate a regular invoke virtual/interface.
1634     if (method->GetCounter() < jit_compile_threshold) {
1635       methods.emplace_back(/*ProfileMethodInfo*/
1636           MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
1637       continue;
1638     }
1639 
1640     for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
1641       std::vector<TypeReference> profile_classes;
1642       const InlineCache& cache = info->cache_[i];
1643       ArtMethod* caller = info->GetMethod();
1644       bool is_missing_types = false;
1645       for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) {
1646         mirror::Class* cls = cache.classes_[k].Read();
1647         if (cls == nullptr) {
1648           break;
1649         }
1650 
1651         // Check if the receiver is in the boot class path or if it's in the
1652         // same class loader as the caller. If not, skip it, as there is not
1653         // much we can do during AOT.
1654         if (!cls->IsBootStrapClassLoaded() &&
1655             caller->GetClassLoader() != cls->GetClassLoader()) {
1656           is_missing_types = true;
1657           continue;
1658         }
1659 
1660         const DexFile* class_dex_file = nullptr;
1661         dex::TypeIndex type_index;
1662 
1663         if (cls->GetDexCache() == nullptr) {
1664           DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
1665           // Make a best effort to find the type index in the method's dex file.
1666           // We could search all open dex files but that might turn expensive
1667           // and probably not worth it.
1668           class_dex_file = dex_file;
1669           type_index = cls->FindTypeIndexInOtherDexFile(*dex_file);
1670         } else {
1671           class_dex_file = &(cls->GetDexFile());
1672           type_index = cls->GetDexTypeIndex();
1673         }
1674         if (!type_index.IsValid()) {
1675           // Could be a proxy class or an array for which we couldn't find the type index.
1676           is_missing_types = true;
1677           continue;
1678         }
1679         if (ContainsElement(dex_base_locations,
1680                             DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) {
1681           // Only consider classes from the same apk (including multidex).
1682           profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/
1683               class_dex_file, type_index);
1684         } else {
1685           is_missing_types = true;
1686         }
1687       }
1688       if (!profile_classes.empty()) {
1689         inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/
1690             cache.dex_pc_, is_missing_types, profile_classes);
1691       }
1692     }
1693     methods.emplace_back(/*ProfileMethodInfo*/
1694         MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
1695   }
1696 }
1697 
IsOsrCompiled(ArtMethod * method)1698 bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
1699   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1700   return osr_code_map_.find(method) != osr_code_map_.end();
1701 }
1702 
NotifyCompilationOf(ArtMethod * method,Thread * self,CompilationKind compilation_kind,bool prejit,JitMemoryRegion * region)1703 bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
1704                                        Thread* self,
1705                                        CompilationKind compilation_kind,
1706                                        bool prejit,
1707                                        JitMemoryRegion* region) {
1708   const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
1709   if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
1710     OatQuickMethodHeader* method_header =
1711         OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
1712     bool is_baseline = (compilation_kind == CompilationKind::kBaseline);
1713     if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr()) == is_baseline) {
1714       VLOG(jit) << "Not compiling "
1715                 << method->PrettyMethod()
1716                 << " because it has already been compiled"
1717                 << " kind=" << compilation_kind;
1718       return false;
1719     }
1720   }
1721 
1722   if (NeedsClinitCheckBeforeCall(method) && !prejit) {
1723     // We do not need a synchronization barrier for checking the visibly initialized status
1724     // or checking the initialized status just for requesting visible initialization.
1725     ClassStatus status = method->GetDeclaringClass()
1726         ->GetStatus<kDefaultVerifyFlags, /*kWithSynchronizationBarrier=*/ false>();
1727     if (status != ClassStatus::kVisiblyInitialized) {
1728       // Unless we're pre-jitting, we currently don't save the JIT compiled code if we cannot
1729       // update the entrypoint due to needing an initialization check.
1730       if (status == ClassStatus::kInitialized) {
1731         // Request visible initialization but do not block to allow compiling other methods.
1732         // Hopefully, this will complete by the time the method becomes hot again.
1733         Runtime::Current()->GetClassLinker()->MakeInitializedClassesVisiblyInitialized(
1734             self, /*wait=*/ false);
1735       }
1736       VLOG(jit) << "Not compiling "
1737                 << method->PrettyMethod()
1738                 << " because it has the resolution stub";
1739       // Give it a new chance to be hot.
1740       ClearMethodCounter(method, /*was_warm=*/ false);
1741       return false;
1742     }
1743   }
1744 
1745   if (compilation_kind == CompilationKind::kOsr) {
1746     MutexLock mu(self, *Locks::jit_lock_);
1747     if (osr_code_map_.find(method) != osr_code_map_.end()) {
1748       return false;
1749     }
1750   }
1751 
1752   if (UNLIKELY(method->IsNative())) {
1753     MutexLock mu(self, *Locks::jit_lock_);
1754     JniStubKey key(method);
1755     auto it = jni_stubs_map_.find(key);
1756     bool new_compilation = false;
1757     if (it == jni_stubs_map_.end()) {
1758       // Create a new entry to mark the stub as being compiled.
1759       it = jni_stubs_map_.Put(key, JniStubData{});
1760       new_compilation = true;
1761     }
1762     JniStubData* data = &it->second;
1763     data->AddMethod(method);
1764     if (data->IsCompiled()) {
1765       OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(data->GetCode());
1766       const void* entrypoint = method_header->GetEntryPoint();
1767       // Update also entrypoints of other methods held by the JniStubData.
1768       // We could simply update the entrypoint of `method` but if the last JIT GC has
1769       // changed these entrypoints to GenericJNI in preparation for a full GC, we may
1770       // as well change them back as this stub shall not be collected anyway and this
1771       // can avoid a few expensive GenericJNI calls.
1772       data->UpdateEntryPoints(entrypoint);
1773       if (collection_in_progress_) {
1774         if (!IsInZygoteExecSpace(data->GetCode())) {
1775           GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
1776         }
1777       }
1778     }
1779     return new_compilation;
1780   } else {
1781     ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
1782     if (CanAllocateProfilingInfo() &&
1783         (compilation_kind == CompilationKind::kBaseline) &&
1784         (info == nullptr)) {
1785       // We can retry allocation here as we're the JIT thread.
1786       if (ProfilingInfo::Create(self, method, /* retry_allocation= */ true)) {
1787         info = method->GetProfilingInfo(kRuntimePointerSize);
1788       }
1789     }
1790     if (info == nullptr) {
1791       // When prejitting, we don't allocate a profiling info.
1792       if (!prejit && !IsSharedRegion(*region)) {
1793         VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
1794         // Because the counter is not atomic, there are some rare cases where we may not hit the
1795         // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
1796         ClearMethodCounter(method, /*was_warm=*/ false);
1797         return false;
1798       }
1799     }
1800     MutexLock mu(self, *Locks::jit_lock_);
1801     if (IsMethodBeingCompiled(method, compilation_kind)) {
1802       return false;
1803     }
1804     AddMethodBeingCompiled(method, compilation_kind);
1805     return true;
1806   }
1807 }
1808 
NotifyCompilerUse(ArtMethod * method,Thread * self)1809 ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
1810   MutexLock mu(self, *Locks::jit_lock_);
1811   ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
1812   if (info != nullptr) {
1813     if (!info->IncrementInlineUse()) {
1814       // Overflow of inlining uses, just bail.
1815       return nullptr;
1816     }
1817   }
1818   return info;
1819 }
1820 
DoneCompilerUse(ArtMethod * method,Thread * self)1821 void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
1822   MutexLock mu(self, *Locks::jit_lock_);
1823   ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
1824   DCHECK(info != nullptr);
1825   info->DecrementInlineUse();
1826 }
1827 
DoneCompiling(ArtMethod * method,Thread * self,CompilationKind compilation_kind)1828 void JitCodeCache::DoneCompiling(ArtMethod* method,
1829                                  Thread* self,
1830                                  CompilationKind compilation_kind) {
1831   DCHECK_EQ(Thread::Current(), self);
1832   MutexLock mu(self, *Locks::jit_lock_);
1833   if (UNLIKELY(method->IsNative())) {
1834     auto it = jni_stubs_map_.find(JniStubKey(method));
1835     DCHECK(it != jni_stubs_map_.end());
1836     JniStubData* data = &it->second;
1837     DCHECK(ContainsElement(data->GetMethods(), method));
1838     if (UNLIKELY(!data->IsCompiled())) {
1839       // Failed to compile; the JNI compiler never fails, but the cache may be full.
1840       jni_stubs_map_.erase(it);  // Remove the entry added in NotifyCompilationOf().
1841     }  // else Commit() updated entrypoints of all methods in the JniStubData.
1842   } else {
1843     RemoveMethodBeingCompiled(method, compilation_kind);
1844   }
1845 }
1846 
InvalidateAllCompiledCode()1847 void JitCodeCache::InvalidateAllCompiledCode() {
1848   art::MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1849   size_t cnt = profiling_infos_.size();
1850   size_t osr_size = osr_code_map_.size();
1851   for (ProfilingInfo* pi : profiling_infos_) {
1852     // NB Due to OSR we might run this on some methods multiple times but this should be fine.
1853     ArtMethod* meth = pi->GetMethod();
1854     pi->SetSavedEntryPoint(nullptr);
1855     // We had a ProfilingInfo so we must be warm.
1856     ClearMethodCounter(meth, /*was_warm=*/true);
1857     ClassLinker* linker = Runtime::Current()->GetClassLinker();
1858     if (meth->IsObsolete()) {
1859       linker->SetEntryPointsForObsoleteMethod(meth);
1860     } else {
1861       linker->SetEntryPointsToInterpreter(meth);
1862     }
1863   }
1864   osr_code_map_.clear();
1865   VLOG(jit) << "Invalidated the compiled code of " << (cnt - osr_size) << " methods and "
1866             << osr_size << " OSRs.";
1867 }
1868 
InvalidateCompiledCodeFor(ArtMethod * method,const OatQuickMethodHeader * header)1869 void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
1870                                              const OatQuickMethodHeader* header) {
1871   DCHECK(!method->IsNative());
1872   ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
1873   const void* method_entrypoint = method->GetEntryPointFromQuickCompiledCode();
1874   if ((profiling_info != nullptr) &&
1875       (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
1876     // When instrumentation is set, the actual entrypoint is the one in the profiling info.
1877     method_entrypoint = profiling_info->GetSavedEntryPoint();
1878     // Prevent future uses of the compiled code.
1879     profiling_info->SetSavedEntryPoint(nullptr);
1880   }
1881 
1882   // Clear the method counter if we are running jitted code since we might want to jit this again in
1883   // the future.
1884   if (method_entrypoint == header->GetEntryPoint()) {
1885     // The entrypoint is the one to invalidate, so we just update it to the interpreter entry point
1886     // and clear the counter to get the method Jitted again.
1887     Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
1888         method, GetQuickToInterpreterBridge());
1889     ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr);
1890   } else {
1891     MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1892     auto it = osr_code_map_.find(method);
1893     if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
1894       // Remove the OSR method, to avoid using it again.
1895       osr_code_map_.erase(it);
1896     }
1897   }
1898 
1899   // In case the method was pre-compiled, clear that information so we
1900   // can recompile it ourselves.
1901   if (method->IsPreCompiled()) {
1902     method->ClearPreCompiled();
1903   }
1904 }
1905 
Dump(std::ostream & os)1906 void JitCodeCache::Dump(std::ostream& os) {
1907   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
1908   os << "Current JIT code cache size (used / resident): "
1909      << GetCurrentRegion()->GetUsedMemoryForCode() / KB << "KB / "
1910      << GetCurrentRegion()->GetResidentMemoryForCode() / KB << "KB\n"
1911      << "Current JIT data cache size (used / resident): "
1912      << GetCurrentRegion()->GetUsedMemoryForData() / KB << "KB / "
1913      << GetCurrentRegion()->GetResidentMemoryForData() / KB << "KB\n";
1914   if (!Runtime::Current()->IsZygote()) {
1915     os << "Zygote JIT code cache size (at point of fork): "
1916        << shared_region_.GetUsedMemoryForCode() / KB << "KB / "
1917        << shared_region_.GetResidentMemoryForCode() / KB << "KB\n"
1918        << "Zygote JIT data cache size (at point of fork): "
1919        << shared_region_.GetUsedMemoryForData() / KB << "KB / "
1920        << shared_region_.GetResidentMemoryForData() / KB << "KB\n";
1921   }
1922   os << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
1923      << "Current JIT capacity: " << PrettySize(GetCurrentRegion()->GetCurrentCapacity()) << "\n"
1924      << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
1925      << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
1926      << "Total number of JIT baseline compilations: " << number_of_baseline_compilations_ << "\n"
1927      << "Total number of JIT optimized compilations: " << number_of_optimized_compilations_ << "\n"
1928      << "Total number of JIT compilations for on stack replacement: "
1929         << number_of_osr_compilations_ << "\n"
1930      << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl;
1931   histogram_stack_map_memory_use_.PrintMemoryUse(os);
1932   histogram_code_memory_use_.PrintMemoryUse(os);
1933   histogram_profiling_info_memory_use_.PrintMemoryUse(os);
1934 }
1935 
PostForkChildAction(bool is_system_server,bool is_zygote)1936 void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
1937   Thread* self = Thread::Current();
1938 
1939   // Remove potential tasks that have been inherited from the zygote.
1940   // We do this now and not in Jit::PostForkChildAction, as system server calls
1941   // JitCodeCache::PostForkChildAction first, and then does some code loading
1942   // that may result in new JIT tasks that we want to keep.
1943   ThreadPool* pool = Runtime::Current()->GetJit()->GetThreadPool();
1944   if (pool != nullptr) {
1945     pool->RemoveAllTasks(self);
1946   }
1947 
1948   MutexLock mu(self, *Locks::jit_lock_);
1949 
1950   // Reset potential writable MemMaps inherited from the zygote. We never want
1951   // to write to them.
1952   shared_region_.ResetWritableMappings();
1953 
1954   if (is_zygote || Runtime::Current()->IsSafeMode()) {
1955     // Don't create a private region for a child zygote. Regions are usually map shared
1956     // (to satisfy dual-view), and we don't want children of a child zygote to inherit it.
1957     return;
1958   }
1959 
1960   // Reset all statistics to be specific to this process.
1961   number_of_baseline_compilations_ = 0;
1962   number_of_optimized_compilations_ = 0;
1963   number_of_osr_compilations_ = 0;
1964   number_of_collections_ = 0;
1965   histogram_stack_map_memory_use_.Reset();
1966   histogram_code_memory_use_.Reset();
1967   histogram_profiling_info_memory_use_.Reset();
1968 
1969   size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
1970   size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
1971   std::string error_msg;
1972   if (!private_region_.Initialize(initial_capacity,
1973                                   max_capacity,
1974                                   /* rwx_memory_allowed= */ !is_system_server,
1975                                   is_zygote,
1976                                   &error_msg)) {
1977     LOG(WARNING) << "Could not create private region after zygote fork: " << error_msg;
1978   }
1979 }
1980 
GetCurrentRegion()1981 JitMemoryRegion* JitCodeCache::GetCurrentRegion() {
1982   return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_;
1983 }
1984 
VisitAllMethods(const std::function<void (const void *,ArtMethod *)> & cb)1985 void JitCodeCache::VisitAllMethods(const std::function<void(const void*, ArtMethod*)>& cb) {
1986   for (const auto& it : jni_stubs_map_) {
1987     const JniStubData& data = it.second;
1988     if (data.IsCompiled()) {
1989       for (ArtMethod* method : data.GetMethods()) {
1990         cb(data.GetCode(), method);
1991       }
1992     }
1993   }
1994   for (auto it : method_code_map_) {  // Includes OSR methods.
1995     cb(it.first, it.second);
1996   }
1997   for (auto it : saved_compiled_methods_map_) {
1998     cb(it.second, it.first);
1999   }
2000   for (auto it : zygote_map_) {
2001     if (it.code_ptr != nullptr && it.method != nullptr) {
2002       cb(it.code_ptr, it.method);
2003     }
2004   }
2005 }
2006 
Initialize(uint32_t number_of_methods)2007 void ZygoteMap::Initialize(uint32_t number_of_methods) {
2008   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
2009   // Allocate for 40-80% capacity. This will offer OK lookup times, and termination
2010   // cases.
2011   size_t capacity = RoundUpToPowerOfTwo(number_of_methods * 100 / 80);
2012   const uint8_t* memory = region_->AllocateData(
2013       capacity * sizeof(Entry) + sizeof(ZygoteCompilationState));
2014   if (memory == nullptr) {
2015     LOG(WARNING) << "Could not allocate data for the zygote map";
2016     return;
2017   }
2018   const Entry* data = reinterpret_cast<const Entry*>(memory);
2019   region_->FillData(data, capacity, Entry { nullptr, nullptr });
2020   map_ = ArrayRef(data, capacity);
2021   compilation_state_ = reinterpret_cast<const ZygoteCompilationState*>(
2022       memory + capacity * sizeof(Entry));
2023   region_->WriteData(compilation_state_, ZygoteCompilationState::kInProgress);
2024 }
2025 
GetCodeFor(ArtMethod * method,uintptr_t pc) const2026 const void* ZygoteMap::GetCodeFor(ArtMethod* method, uintptr_t pc) const {
2027   if (map_.empty()) {
2028     return nullptr;
2029   }
2030 
2031   if (method == nullptr) {
2032     // Do a linear search. This should only be used in debug builds.
2033     CHECK(kIsDebugBuild);
2034     for (const Entry& entry : map_) {
2035       const void* code_ptr = entry.code_ptr;
2036       if (code_ptr != nullptr) {
2037         OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
2038         if (method_header->Contains(pc)) {
2039           return code_ptr;
2040         }
2041       }
2042     }
2043     return nullptr;
2044   }
2045 
2046   std::hash<ArtMethod*> hf;
2047   size_t index = hf(method) & (map_.size() - 1u);
2048   size_t original_index = index;
2049   // Loop over the array: we know this loop terminates as we will either
2050   // encounter the given method, or a null entry. Both terminate the loop.
2051   // Note that the zygote may concurrently write new entries to the map. That's OK as the
2052   // map is never resized.
2053   while (true) {
2054     const Entry& entry = map_[index];
2055     if (entry.method == nullptr) {
2056       // Not compiled yet.
2057       return nullptr;
2058     }
2059     if (entry.method == method) {
2060       if (entry.code_ptr == nullptr) {
2061         // This is a race with the zygote which wrote the method, but hasn't written the
2062         // code. Just bail and wait for the next time we need the method.
2063         return nullptr;
2064       }
2065       if (pc != 0 && !OatQuickMethodHeader::FromCodePointer(entry.code_ptr)->Contains(pc)) {
2066         return nullptr;
2067       }
2068       return entry.code_ptr;
2069     }
2070     index = (index + 1) & (map_.size() - 1);
2071     DCHECK_NE(original_index, index);
2072   }
2073 }
2074 
Put(const void * code,ArtMethod * method)2075 void ZygoteMap::Put(const void* code, ArtMethod* method) {
2076   if (map_.empty()) {
2077     return;
2078   }
2079   CHECK(Runtime::Current()->IsZygote());
2080   std::hash<ArtMethod*> hf;
2081   size_t index = hf(method) & (map_.size() - 1);
2082   size_t original_index = index;
2083   // Because the size of the map is bigger than the number of methods that will
2084   // be added, we are guaranteed to find a free slot in the array, and
2085   // therefore for this loop to terminate.
2086   while (true) {
2087     const Entry* entry = &map_[index];
2088     if (entry->method == nullptr) {
2089       // Note that readers can read this memory concurrently, but that's OK as
2090       // we are writing pointers.
2091       region_->WriteData(entry, Entry { method, code });
2092       break;
2093     }
2094     index = (index + 1) & (map_.size() - 1);
2095     DCHECK_NE(original_index, index);
2096   }
2097   DCHECK_EQ(GetCodeFor(method), code);
2098 }
2099 
2100 }  // namespace jit
2101 }  // namespace art
2102