1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_RUNTIME_H_
18 #define ART_RUNTIME_RUNTIME_H_
19 
20 #include <jni.h>
21 #include <stdio.h>
22 
23 #include <iosfwd>
24 #include <set>
25 #include <string>
26 #include <utility>
27 #include <memory>
28 #include <vector>
29 
30 #include "base/locks.h"
31 #include "base/macros.h"
32 #include "base/mem_map.h"
33 #include "base/string_view_cpp20.h"
34 #include "deoptimization_kind.h"
35 #include "dex/dex_file_types.h"
36 #include "experimental_flags.h"
37 #include "gc/space/image_space_loading_order.h"
38 #include "gc_root.h"
39 #include "instrumentation.h"
40 #include "jdwp_provider.h"
41 #include "jni/jni_id_manager.h"
42 #include "jni_id_type.h"
43 #include "obj_ptr.h"
44 #include "offsets.h"
45 #include "process_state.h"
46 #include "quick/quick_method_frame_info.h"
47 #include "reflective_value_visitor.h"
48 #include "runtime_stats.h"
49 
50 namespace art {
51 
52 namespace gc {
53 class AbstractSystemWeakHolder;
54 class Heap;
55 }  // namespace gc
56 
57 namespace hiddenapi {
58 enum class EnforcementPolicy;
59 }  // namespace hiddenapi
60 
61 namespace jit {
62 class Jit;
63 class JitCodeCache;
64 class JitOptions;
65 }  // namespace jit
66 
67 namespace mirror {
68 class Array;
69 class ClassLoader;
70 class DexCache;
71 template<class T> class ObjectArray;
72 template<class T> class PrimitiveArray;
73 typedef PrimitiveArray<int8_t> ByteArray;
74 class String;
75 class Throwable;
76 }  // namespace mirror
77 namespace ti {
78 class Agent;
79 class AgentSpec;
80 }  // namespace ti
81 namespace verifier {
82 class MethodVerifier;
83 enum class VerifyMode : int8_t;
84 }  // namespace verifier
85 class ArenaPool;
86 class ArtMethod;
87 enum class CalleeSaveType: uint32_t;
88 class ClassLinker;
89 class CompilerCallbacks;
90 class Dex2oatImageTest;
91 class DexFile;
92 enum class InstructionSet;
93 class InternTable;
94 class IsMarkedVisitor;
95 class JavaVMExt;
96 class LinearAlloc;
97 class MonitorList;
98 class MonitorPool;
99 class NullPointerHandler;
100 class OatFileAssistantTest;
101 class OatFileManager;
102 class Plugin;
103 struct RuntimeArgumentMap;
104 class RuntimeCallbacks;
105 class SignalCatcher;
106 class StackOverflowHandler;
107 class SuspensionHandler;
108 class ThreadList;
109 class ThreadPool;
110 class Trace;
111 struct TraceConfig;
112 class Transaction;
113 
114 typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
115 
116 class Runtime {
117  public:
118   // Parse raw runtime options.
119   static bool ParseOptions(const RuntimeOptions& raw_options,
120                            bool ignore_unrecognized,
121                            RuntimeArgumentMap* runtime_options);
122 
123   // Creates and initializes a new runtime.
124   static bool Create(RuntimeArgumentMap&& runtime_options)
125       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
126 
127   // Creates and initializes a new runtime.
128   static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
129       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
130 
131   bool EnsurePluginLoaded(const char* plugin_name, std::string* error_msg);
132   bool EnsurePerfettoPlugin(std::string* error_msg);
133 
134   // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
IsAotCompiler()135   bool IsAotCompiler() const {
136     return !UseJitCompilation() && IsCompiler();
137   }
138 
139   // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
IsCompiler()140   bool IsCompiler() const {
141     return compiler_callbacks_ != nullptr;
142   }
143 
144   // If a compiler, are we compiling a boot image?
145   bool IsCompilingBootImage() const;
146 
147   bool CanRelocate() const;
148 
ShouldRelocate()149   bool ShouldRelocate() const {
150     return must_relocate_ && CanRelocate();
151   }
152 
MustRelocateIfPossible()153   bool MustRelocateIfPossible() const {
154     return must_relocate_;
155   }
156 
IsImageDex2OatEnabled()157   bool IsImageDex2OatEnabled() const {
158     return image_dex2oat_enabled_;
159   }
160 
GetCompilerCallbacks()161   CompilerCallbacks* GetCompilerCallbacks() {
162     return compiler_callbacks_;
163   }
164 
SetCompilerCallbacks(CompilerCallbacks * callbacks)165   void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
166     CHECK(callbacks != nullptr);
167     compiler_callbacks_ = callbacks;
168   }
169 
IsZygote()170   bool IsZygote() const {
171     return is_zygote_;
172   }
173 
IsPrimaryZygote()174   bool IsPrimaryZygote() const {
175     return is_primary_zygote_;
176   }
177 
IsSystemServer()178   bool IsSystemServer() const {
179     return is_system_server_;
180   }
181 
SetAsSystemServer()182   void SetAsSystemServer() {
183     is_system_server_ = true;
184     is_zygote_ = false;
185     is_primary_zygote_ = false;
186   }
187 
SetAsZygoteChild(bool is_system_server,bool is_zygote)188   void SetAsZygoteChild(bool is_system_server, bool is_zygote) {
189     // System server should have been set earlier in SetAsSystemServer.
190     CHECK_EQ(is_system_server_, is_system_server);
191     is_zygote_ = is_zygote;
192     is_primary_zygote_ = false;
193   }
194 
IsExplicitGcDisabled()195   bool IsExplicitGcDisabled() const {
196     return is_explicit_gc_disabled_;
197   }
198 
199   std::string GetCompilerExecutable() const;
200 
GetCompilerOptions()201   const std::vector<std::string>& GetCompilerOptions() const {
202     return compiler_options_;
203   }
204 
AddCompilerOption(const std::string & option)205   void AddCompilerOption(const std::string& option) {
206     compiler_options_.push_back(option);
207   }
208 
GetImageCompilerOptions()209   const std::vector<std::string>& GetImageCompilerOptions() const {
210     return image_compiler_options_;
211   }
212 
GetImageLocation()213   const std::string& GetImageLocation() const {
214     return image_location_;
215   }
216 
217   // Starts a runtime, which may cause threads to be started and code to run.
218   bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
219 
220   bool IsShuttingDown(Thread* self);
IsShuttingDownLocked()221   bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
222     return shutting_down_;
223   }
224 
NumberOfThreadsBeingBorn()225   size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
226     return threads_being_born_;
227   }
228 
StartThreadBirth()229   void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
230     threads_being_born_++;
231   }
232 
233   void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
234 
IsStarted()235   bool IsStarted() const {
236     return started_;
237   }
238 
IsFinishedStarting()239   bool IsFinishedStarting() const {
240     return finished_starting_;
241   }
242 
243   void RunRootClinits(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
244 
Current()245   static Runtime* Current() {
246     return instance_;
247   }
248 
249   // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
250   // callers should prefer.
251   NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
252 
253   // Returns the "main" ThreadGroup, used when attaching user threads.
254   jobject GetMainThreadGroup() const;
255 
256   // Returns the "system" ThreadGroup, used when attaching our internal threads.
257   jobject GetSystemThreadGroup() const;
258 
259   // Returns the system ClassLoader which represents the CLASSPATH.
260   jobject GetSystemClassLoader() const;
261 
262   // Attaches the calling native thread to the runtime.
263   bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
264                            bool create_peer);
265 
266   void CallExitHook(jint status);
267 
268   // Detaches the current native thread from the runtime.
269   void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
270 
271   void DumpDeoptimizations(std::ostream& os);
272   void DumpForSigQuit(std::ostream& os);
273   void DumpLockHolders(std::ostream& os);
274 
275   ~Runtime();
276 
GetBootClassPath()277   const std::vector<std::string>& GetBootClassPath() const {
278     return boot_class_path_;
279   }
280 
GetBootClassPathLocations()281   const std::vector<std::string>& GetBootClassPathLocations() const {
282     DCHECK(boot_class_path_locations_.empty() ||
283            boot_class_path_locations_.size() == boot_class_path_.size());
284     return boot_class_path_locations_.empty() ? boot_class_path_ : boot_class_path_locations_;
285   }
286 
GetClassPathString()287   const std::string& GetClassPathString() const {
288     return class_path_string_;
289   }
290 
GetClassLinker()291   ClassLinker* GetClassLinker() const {
292     return class_linker_;
293   }
294 
GetJniIdManager()295   jni::JniIdManager* GetJniIdManager() const {
296     return jni_id_manager_.get();
297   }
298 
GetDefaultStackSize()299   size_t GetDefaultStackSize() const {
300     return default_stack_size_;
301   }
302 
GetFinalizerTimeoutMs()303   unsigned int GetFinalizerTimeoutMs() const {
304     return finalizer_timeout_ms_;
305   }
306 
GetHeap()307   gc::Heap* GetHeap() const {
308     return heap_;
309   }
310 
GetInternTable()311   InternTable* GetInternTable() const {
312     DCHECK(intern_table_ != nullptr);
313     return intern_table_;
314   }
315 
GetJavaVM()316   JavaVMExt* GetJavaVM() const {
317     return java_vm_.get();
318   }
319 
GetMaxSpinsBeforeThinLockInflation()320   size_t GetMaxSpinsBeforeThinLockInflation() const {
321     return max_spins_before_thin_lock_inflation_;
322   }
323 
GetMonitorList()324   MonitorList* GetMonitorList() const {
325     return monitor_list_;
326   }
327 
GetMonitorPool()328   MonitorPool* GetMonitorPool() const {
329     return monitor_pool_;
330   }
331 
332   // Is the given object the special object used to mark a cleared JNI weak global?
333   bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
334 
335   // Get the special object used to mark a cleared JNI weak global.
336   mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
337 
338   mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingException()
339       REQUIRES_SHARED(Locks::mutator_lock_);
340   mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME()
341       REQUIRES_SHARED(Locks::mutator_lock_);
342   mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow()
343       REQUIRES_SHARED(Locks::mutator_lock_);
344 
345   mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
346       REQUIRES_SHARED(Locks::mutator_lock_);
347 
GetProperties()348   const std::vector<std::string>& GetProperties() const {
349     return properties_;
350   }
351 
GetThreadList()352   ThreadList* GetThreadList() const {
353     return thread_list_;
354   }
355 
GetVersion()356   static const char* GetVersion() {
357     return "2.1.0";
358   }
359 
IsMethodHandlesEnabled()360   bool IsMethodHandlesEnabled() const {
361     return true;
362   }
363 
364   void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
365   void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
366   // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
367   // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
368   // access is reenabled.
369   void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
370 
371   // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
372   // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
373   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
374       REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
375       REQUIRES_SHARED(Locks::mutator_lock_);
376 
377   // Visit image roots, only used for hprof since the GC uses the image space mod union table
378   // instead.
379   void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
380 
381   // Visit all of the roots we can safely visit concurrently.
382   void VisitConcurrentRoots(RootVisitor* visitor,
383                             VisitRootFlags flags = kVisitRootFlagAllRoots)
384       REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
385       REQUIRES_SHARED(Locks::mutator_lock_);
386 
387   // Visit all of the non thread roots, we can do this with mutators unpaused.
388   void VisitNonThreadRoots(RootVisitor* visitor)
389       REQUIRES_SHARED(Locks::mutator_lock_);
390 
391   void VisitTransactionRoots(RootVisitor* visitor)
392       REQUIRES_SHARED(Locks::mutator_lock_);
393 
394   // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
395   // system weak is updated to be the visitor's returned value.
396   void SweepSystemWeaks(IsMarkedVisitor* visitor)
397       REQUIRES_SHARED(Locks::mutator_lock_);
398 
399   // Walk all reflective objects and visit their targets as well as any method/fields held by the
400   // runtime threads that are marked as being reflective.
401   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) REQUIRES(Locks::mutator_lock_);
402   // Helper for visiting reflective targets with lambdas for both field and method reflective
403   // targets.
404   template <typename FieldVis, typename MethodVis>
VisitReflectiveTargets(FieldVis && fv,MethodVis && mv)405   void VisitReflectiveTargets(FieldVis&& fv, MethodVis&& mv) REQUIRES(Locks::mutator_lock_) {
406     FunctionReflectiveValueVisitor frvv(fv, mv);
407     VisitReflectiveTargets(&frvv);
408   }
409 
410   // Returns a special method that calls into a trampoline for runtime method resolution
411   ArtMethod* GetResolutionMethod();
412 
HasResolutionMethod()413   bool HasResolutionMethod() const {
414     return resolution_method_ != nullptr;
415   }
416 
417   void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
ClearResolutionMethod()418   void ClearResolutionMethod() {
419     resolution_method_ = nullptr;
420   }
421 
422   ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
423 
424   // Returns a special method that calls into a trampoline for runtime imt conflicts.
425   ArtMethod* GetImtConflictMethod();
426   ArtMethod* GetImtUnimplementedMethod();
427 
HasImtConflictMethod()428   bool HasImtConflictMethod() const {
429     return imt_conflict_method_ != nullptr;
430   }
431 
ClearImtConflictMethod()432   void ClearImtConflictMethod() {
433     imt_conflict_method_ = nullptr;
434   }
435 
436   void FixupConflictTables() REQUIRES_SHARED(Locks::mutator_lock_);
437   void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
438   void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
439 
440   ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
441       REQUIRES_SHARED(Locks::mutator_lock_);
442 
ClearImtUnimplementedMethod()443   void ClearImtUnimplementedMethod() {
444     imt_unimplemented_method_ = nullptr;
445   }
446 
HasCalleeSaveMethod(CalleeSaveType type)447   bool HasCalleeSaveMethod(CalleeSaveType type) const {
448     return callee_save_methods_[static_cast<size_t>(type)] != 0u;
449   }
450 
451   ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
452       REQUIRES_SHARED(Locks::mutator_lock_);
453 
454   ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
455       REQUIRES_SHARED(Locks::mutator_lock_);
456 
457   QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
458       REQUIRES_SHARED(Locks::mutator_lock_);
459 
GetCalleeSaveMethodOffset(CalleeSaveType type)460   static constexpr size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
461     return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
462   }
463 
GetInstructionSet()464   InstructionSet GetInstructionSet() const {
465     return instruction_set_;
466   }
467 
468   void SetInstructionSet(InstructionSet instruction_set);
469   void ClearInstructionSet();
470 
471   void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
472   void ClearCalleeSaveMethods();
473 
474   ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
475 
476   uint64_t GetStat(int kind);
477 
GetStats()478   RuntimeStats* GetStats() {
479     return &stats_;
480   }
481 
HasStatsEnabled()482   bool HasStatsEnabled() const {
483     return stats_enabled_;
484   }
485 
486   void ResetStats(int kinds);
487 
488   void SetStatsEnabled(bool new_state)
489       REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
490 
491   enum class NativeBridgeAction {  // private
492     kUnload,
493     kInitialize
494   };
495 
GetJit()496   jit::Jit* GetJit() const {
497     return jit_.get();
498   }
499 
GetJitCodeCache()500   jit::JitCodeCache* GetJitCodeCache() const {
501     return jit_code_cache_.get();
502   }
503 
504   // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
505   bool UseJitCompilation() const;
506 
507   void PreZygoteFork();
508   void PostZygoteFork();
509   void InitNonZygoteOrPostFork(
510       JNIEnv* env,
511       bool is_system_server,
512       bool is_child_zygote,
513       NativeBridgeAction action,
514       const char* isa,
515       bool profile_system_server = false);
516 
GetInstrumentation()517   const instrumentation::Instrumentation* GetInstrumentation() const {
518     return &instrumentation_;
519   }
520 
GetInstrumentation()521   instrumentation::Instrumentation* GetInstrumentation() {
522     return &instrumentation_;
523   }
524 
525   void RegisterAppInfo(const std::vector<std::string>& code_paths,
526                        const std::string& profile_output_filename);
527 
528   // Transaction support.
529   bool IsActiveTransaction() const;
530   void EnterTransactionMode(bool strict, mirror::Class* root);
531   void ExitTransactionMode();
532   void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_);
533   // Transaction rollback and exit transaction are always done together, it's convenience to
534   // do them in one function.
535   void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
536   bool IsTransactionAborted() const;
537   const std::unique_ptr<Transaction>& GetTransaction() const;
538   bool IsActiveStrictTransactionMode() const;
539 
540   void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
541       REQUIRES_SHARED(Locks::mutator_lock_);
542   void ThrowTransactionAbortError(Thread* self)
543       REQUIRES_SHARED(Locks::mutator_lock_);
544 
545   void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
546                                bool is_volatile) const;
547   void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
548                             bool is_volatile) const;
549   void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
550                             bool is_volatile) const;
551   void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
552                           bool is_volatile) const;
553   void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
554                           bool is_volatile) const;
555   void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
556                           bool is_volatile) const;
557   void RecordWriteFieldReference(mirror::Object* obj,
558                                  MemberOffset field_offset,
559                                  ObjPtr<mirror::Object> value,
560                                  bool is_volatile) const
561       REQUIRES_SHARED(Locks::mutator_lock_);
562   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
563       REQUIRES_SHARED(Locks::mutator_lock_);
564   void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
565       REQUIRES(Locks::intern_table_lock_);
566   void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
567       REQUIRES(Locks::intern_table_lock_);
568   void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
569       REQUIRES(Locks::intern_table_lock_);
570   void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
571       REQUIRES(Locks::intern_table_lock_);
572   void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
573       REQUIRES_SHARED(Locks::mutator_lock_);
574 
575   void SetFaultMessage(const std::string& message);
576 
577   void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
578 
ExplicitStackOverflowChecks()579   bool ExplicitStackOverflowChecks() const {
580     return !implicit_so_checks_;
581   }
582 
583   void DisableVerifier();
584   bool IsVerificationEnabled() const;
585   bool IsVerificationSoftFail() const;
586 
SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy)587   void SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
588     hidden_api_policy_ = policy;
589   }
590 
GetHiddenApiEnforcementPolicy()591   hiddenapi::EnforcementPolicy GetHiddenApiEnforcementPolicy() const {
592     return hidden_api_policy_;
593   }
594 
SetCorePlatformApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy)595   void SetCorePlatformApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
596     core_platform_api_policy_ = policy;
597   }
598 
GetCorePlatformApiEnforcementPolicy()599   hiddenapi::EnforcementPolicy GetCorePlatformApiEnforcementPolicy() const {
600     return core_platform_api_policy_;
601   }
602 
SetTestApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy)603   void SetTestApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
604     test_api_policy_ = policy;
605   }
606 
GetTestApiEnforcementPolicy()607   hiddenapi::EnforcementPolicy GetTestApiEnforcementPolicy() const {
608     return test_api_policy_;
609   }
610 
SetHiddenApiExemptions(const std::vector<std::string> & exemptions)611   void SetHiddenApiExemptions(const std::vector<std::string>& exemptions) {
612     hidden_api_exemptions_ = exemptions;
613   }
614 
GetHiddenApiExemptions()615   const std::vector<std::string>& GetHiddenApiExemptions() {
616     return hidden_api_exemptions_;
617   }
618 
SetDedupeHiddenApiWarnings(bool value)619   void SetDedupeHiddenApiWarnings(bool value) {
620     dedupe_hidden_api_warnings_ = value;
621   }
622 
ShouldDedupeHiddenApiWarnings()623   bool ShouldDedupeHiddenApiWarnings() {
624     return dedupe_hidden_api_warnings_;
625   }
626 
SetHiddenApiEventLogSampleRate(uint32_t rate)627   void SetHiddenApiEventLogSampleRate(uint32_t rate) {
628     hidden_api_access_event_log_rate_ = rate;
629   }
630 
GetHiddenApiEventLogSampleRate()631   uint32_t GetHiddenApiEventLogSampleRate() const {
632     return hidden_api_access_event_log_rate_;
633   }
634 
GetProcessPackageName()635   const std::string& GetProcessPackageName() const {
636     return process_package_name_;
637   }
638 
SetProcessPackageName(const char * package_name)639   void SetProcessPackageName(const char* package_name) {
640     if (package_name == nullptr) {
641       process_package_name_.clear();
642     } else {
643       process_package_name_ = package_name;
644     }
645   }
646 
GetProcessDataDirectory()647   const std::string& GetProcessDataDirectory() const {
648     return process_data_directory_;
649   }
650 
SetProcessDataDirectory(const char * data_dir)651   void SetProcessDataDirectory(const char* data_dir) {
652     if (data_dir == nullptr) {
653       process_data_directory_.clear();
654     } else {
655       process_data_directory_ = data_dir;
656     }
657   }
658 
GetCpuAbilist()659   const std::vector<std::string>& GetCpuAbilist() const {
660     return cpu_abilist_;
661   }
662 
IsRunningOnMemoryTool()663   bool IsRunningOnMemoryTool() const {
664     return is_running_on_memory_tool_;
665   }
666 
SetTargetSdkVersion(uint32_t version)667   void SetTargetSdkVersion(uint32_t version) {
668     target_sdk_version_ = version;
669   }
670 
GetTargetSdkVersion()671   uint32_t GetTargetSdkVersion() const {
672     return target_sdk_version_;
673   }
674 
SetDisabledCompatChanges(const std::set<uint64_t> & disabled_changes)675   void SetDisabledCompatChanges(const std::set<uint64_t>& disabled_changes) {
676     disabled_compat_changes_ = disabled_changes;
677   }
678 
GetDisabledCompatChanges()679   std::set<uint64_t> GetDisabledCompatChanges() const {
680     return disabled_compat_changes_;
681   }
682 
isChangeEnabled(uint64_t change_id)683   bool isChangeEnabled(uint64_t change_id) const {
684     // TODO(145743810): add an up call to java to log to statsd
685     return disabled_compat_changes_.count(change_id) == 0;
686   }
687 
GetZygoteMaxFailedBoots()688   uint32_t GetZygoteMaxFailedBoots() const {
689     return zygote_max_failed_boots_;
690   }
691 
AreExperimentalFlagsEnabled(ExperimentalFlags flags)692   bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
693     return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
694   }
695 
696   void CreateJitCodeCache(bool rwx_memory_allowed);
697 
698   // Create the JIT and instrumentation and code cache.
699   void CreateJit();
700 
GetArenaPool()701   ArenaPool* GetArenaPool() {
702     return arena_pool_.get();
703   }
GetJitArenaPool()704   ArenaPool* GetJitArenaPool() {
705     return jit_arena_pool_.get();
706   }
GetArenaPool()707   const ArenaPool* GetArenaPool() const {
708     return arena_pool_.get();
709   }
710 
711   void ReclaimArenaPoolMemory();
712 
GetLinearAlloc()713   LinearAlloc* GetLinearAlloc() {
714     return linear_alloc_.get();
715   }
716 
GetJITOptions()717   jit::JitOptions* GetJITOptions() {
718     return jit_options_.get();
719   }
720 
IsJavaDebuggable()721   bool IsJavaDebuggable() const {
722     return is_java_debuggable_;
723   }
724 
SetProfileableFromShell(bool value)725   void SetProfileableFromShell(bool value) {
726     is_profileable_from_shell_ = value;
727   }
728 
IsProfileableFromShell()729   bool IsProfileableFromShell() const {
730     return is_profileable_from_shell_;
731   }
732 
733   void SetJavaDebuggable(bool value);
734 
735   // Deoptimize the boot image, called for Java debuggable apps.
736   void DeoptimizeBootImage() REQUIRES(Locks::mutator_lock_);
737 
IsNativeDebuggable()738   bool IsNativeDebuggable() const {
739     return is_native_debuggable_;
740   }
741 
SetNativeDebuggable(bool value)742   void SetNativeDebuggable(bool value) {
743     is_native_debuggable_ = value;
744   }
745 
746   void SetSignalHookDebuggable(bool value);
747 
AreNonStandardExitsEnabled()748   bool AreNonStandardExitsEnabled() const {
749     return non_standard_exits_enabled_;
750   }
751 
SetNonStandardExitsEnabled()752   void SetNonStandardExitsEnabled() {
753     DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; });
754   }
755 
AreAsyncExceptionsThrown()756   bool AreAsyncExceptionsThrown() const {
757     return async_exceptions_thrown_;
758   }
759 
SetAsyncExceptionsThrown()760   void SetAsyncExceptionsThrown() {
761     DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; });
762   }
763 
764   // Change state and re-check which interpreter should be used.
765   //
766   // This must be called whenever there is an event that forces
767   // us to use different interpreter (e.g. debugger is attached).
768   //
769   // Changing the state using the lamda gives us some multihreading safety.
770   // It ensures that two calls do not interfere with each other and
771   // it makes it possible to DCHECK that thread local flag is correct.
772   template<typename Action>
773   static void DoAndMaybeSwitchInterpreter(Action lamda);
774 
775   // Returns the build fingerprint, if set. Otherwise an empty string is returned.
GetFingerprint()776   std::string GetFingerprint() {
777     return fingerprint_;
778   }
779 
780   // Called from class linker.
781   void SetSentinel(ObjPtr<mirror::Object> sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
782   // For testing purpose only.
783   // TODO: Remove this when this is no longer needed (b/116087961).
784   GcRoot<mirror::Object> GetSentinel() REQUIRES_SHARED(Locks::mutator_lock_);
785 
786 
787   // Use a sentinel for marking entries in a table that have been cleared.
788   // This helps diagnosing in case code tries to wrongly access such
789   // entries.
GetWeakClassSentinel()790   static mirror::Class* GetWeakClassSentinel() {
791     return reinterpret_cast<mirror::Class*>(0xebadbeef);
792   }
793 
794   // Helper for the GC to process a weak class in a table.
795   static void ProcessWeakClass(GcRoot<mirror::Class>* root_ptr,
796                                IsMarkedVisitor* visitor,
797                                mirror::Class* update)
798       REQUIRES_SHARED(Locks::mutator_lock_);
799 
800   // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
801   LinearAlloc* CreateLinearAlloc();
802 
GetOatFileManager()803   OatFileManager& GetOatFileManager() const {
804     DCHECK(oat_file_manager_ != nullptr);
805     return *oat_file_manager_;
806   }
807 
808   double GetHashTableMinLoadFactor() const;
809   double GetHashTableMaxLoadFactor() const;
810 
IsSafeMode()811   bool IsSafeMode() const {
812     return safe_mode_;
813   }
814 
SetSafeMode(bool mode)815   void SetSafeMode(bool mode) {
816     safe_mode_ = mode;
817   }
818 
GetDumpNativeStackOnSigQuit()819   bool GetDumpNativeStackOnSigQuit() const {
820     return dump_native_stack_on_sig_quit_;
821   }
822 
GetPrunedDalvikCache()823   bool GetPrunedDalvikCache() const {
824     return pruned_dalvik_cache_;
825   }
826 
SetPrunedDalvikCache(bool pruned)827   void SetPrunedDalvikCache(bool pruned) {
828     pruned_dalvik_cache_ = pruned;
829   }
830 
831   void UpdateProcessState(ProcessState process_state);
832 
833   // Returns true if we currently care about long mutator pause.
InJankPerceptibleProcessState()834   bool InJankPerceptibleProcessState() const {
835     return process_state_ == kProcessStateJankPerceptible;
836   }
837 
838   void RegisterSensitiveThread() const;
839 
SetZygoteNoThreadSection(bool val)840   void SetZygoteNoThreadSection(bool val) {
841     zygote_no_threads_ = val;
842   }
843 
IsZygoteNoThreadSection()844   bool IsZygoteNoThreadSection() const {
845     return zygote_no_threads_;
846   }
847 
848   // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
849   // optimization that makes it impossible to deoptimize.
850   bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
851 
852   // Returns a saved copy of the environment (getenv/setenv values).
853   // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
GetEnvSnapshot()854   char** GetEnvSnapshot() const {
855     return env_snapshot_.GetSnapshot();
856   }
857 
858   void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
859   void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
860 
861   void AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader);
862 
GetAgents()863   const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
864     return agents_;
865   }
866 
867   RuntimeCallbacks* GetRuntimeCallbacks();
868 
HasLoadedPlugins()869   bool HasLoadedPlugins() const {
870     return !plugins_.empty();
871   }
872 
873   void InitThreadGroups(Thread* self);
874 
SetDumpGCPerformanceOnShutdown(bool value)875   void SetDumpGCPerformanceOnShutdown(bool value) {
876     dump_gc_performance_on_shutdown_ = value;
877   }
878 
GetDumpGCPerformanceOnShutdown()879   bool GetDumpGCPerformanceOnShutdown() const {
880     return dump_gc_performance_on_shutdown_;
881   }
882 
IncrementDeoptimizationCount(DeoptimizationKind kind)883   void IncrementDeoptimizationCount(DeoptimizationKind kind) {
884     DCHECK_LE(kind, DeoptimizationKind::kLast);
885     deoptimization_counts_[static_cast<size_t>(kind)]++;
886   }
887 
GetNumberOfDeoptimizations()888   uint32_t GetNumberOfDeoptimizations() const {
889     uint32_t result = 0;
890     for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
891       result += deoptimization_counts_[i];
892     }
893     return result;
894   }
895 
896   // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
897   // This is beneficial for low RAM devices since it reduces page cache thrashing.
MAdviseRandomAccess()898   bool MAdviseRandomAccess() const {
899     return madvise_random_access_;
900   }
901 
GetJdwpOptions()902   const std::string& GetJdwpOptions() {
903     return jdwp_options_;
904   }
905 
GetJdwpProvider()906   JdwpProvider GetJdwpProvider() const {
907     return jdwp_provider_;
908   }
909 
GetJniIdType()910   JniIdType GetJniIdType() const {
911     return jni_ids_indirection_;
912   }
913 
CanSetJniIdType()914   bool CanSetJniIdType() const {
915     return GetJniIdType() == JniIdType::kSwapablePointer;
916   }
917 
918   // Changes the JniIdType to the given type. Only allowed if CanSetJniIdType(). All threads must be
919   // suspended to call this function.
920   void SetJniIdType(JniIdType t);
921 
GetVerifierLoggingThresholdMs()922   uint32_t GetVerifierLoggingThresholdMs() const {
923     return verifier_logging_threshold_ms_;
924   }
925 
926   // Atomically delete the thread pool if the reference count is 0.
927   bool DeleteThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
928 
929   // Wait for all the thread workers to be attached.
930   void WaitForThreadPoolWorkersToStart() REQUIRES(!Locks::runtime_thread_pool_lock_);
931 
932   // Scoped usage of the runtime thread pool. Prevents the pool from being
933   // deleted. Note that the thread pool is only for startup and gets deleted after.
934   class ScopedThreadPoolUsage {
935    public:
936     ScopedThreadPoolUsage();
937     ~ScopedThreadPoolUsage();
938 
939     // Return the thread pool.
GetThreadPool()940     ThreadPool* GetThreadPool() const {
941       return thread_pool_;
942     }
943 
944    private:
945     ThreadPool* const thread_pool_;
946   };
947 
LoadAppImageStartupCache()948   bool LoadAppImageStartupCache() const {
949     return load_app_image_startup_cache_;
950   }
951 
SetLoadAppImageStartupCacheEnabled(bool enabled)952   void SetLoadAppImageStartupCacheEnabled(bool enabled) {
953     load_app_image_startup_cache_ = enabled;
954   }
955 
956   // Reset the startup completed status so that we can call NotifyStartupCompleted again. Should
957   // only be used for testing.
958   void ResetStartupCompleted();
959 
960   // Notify the runtime that application startup is considered completed. Only has effect for the
961   // first call.
962   void NotifyStartupCompleted();
963 
964   // Return true if startup is already completed.
965   bool GetStartupCompleted() const;
966 
GetImageSpaceLoadingOrder()967   gc::space::ImageSpaceLoadingOrder GetImageSpaceLoadingOrder() const {
968     return image_space_loading_order_;
969   }
970 
IsVerifierMissingKThrowFatal()971   bool IsVerifierMissingKThrowFatal() const {
972     return verifier_missing_kthrow_fatal_;
973   }
974 
IsPerfettoHprofEnabled()975   bool IsPerfettoHprofEnabled() const {
976     return perfetto_hprof_enabled_;
977   }
978 
979   // Return true if we should load oat files as executable or not.
980   bool GetOatFilesExecutable() const;
981 
982  private:
983   static void InitPlatformSignalHandlers();
984 
985   Runtime();
986 
987   void BlockSignals();
988 
989   bool Init(RuntimeArgumentMap&& runtime_options)
990       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
991   void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
992   void RegisterRuntimeNativeMethods(JNIEnv* env);
993 
994   void StartDaemonThreads();
995   void StartSignalCatcher();
996 
997   void MaybeSaveJitProfilingInfo();
998 
999   // Visit all of the thread roots.
1000   void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
1001       REQUIRES_SHARED(Locks::mutator_lock_);
1002 
1003   // Visit all other roots which must be done with mutators suspended.
1004   void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
1005       REQUIRES_SHARED(Locks::mutator_lock_);
1006 
1007   // Constant roots are the roots which never change after the runtime is initialized, they only
1008   // need to be visited once per GC cycle.
1009   void VisitConstantRoots(RootVisitor* visitor)
1010       REQUIRES_SHARED(Locks::mutator_lock_);
1011 
1012   // Note: To be lock-free, GetFaultMessage temporarily replaces the lock message with null.
1013   //       As such, there is a window where a call will return an empty string. In general,
1014   //       only aborting code should retrieve this data (via GetFaultMessageForAbortLogging
1015   //       friend).
1016   std::string GetFaultMessage();
1017 
1018   ThreadPool* AcquireThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
1019   void ReleaseThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
1020 
1021   // A pointer to the active runtime or null.
1022   static Runtime* instance_;
1023 
1024   // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
1025   static constexpr int kProfileForground = 0;
1026   static constexpr int kProfileBackground = 1;
1027 
1028   static constexpr uint32_t kCalleeSaveSize = 6u;
1029 
1030   // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
1031   uint64_t callee_save_methods_[kCalleeSaveSize];
1032   // Pre-allocated exceptions (see Runtime::Init).
1033   GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_exception_;
1034   GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_oome_;
1035   GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_handling_stack_overflow_;
1036   GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
1037   ArtMethod* resolution_method_;
1038   ArtMethod* imt_conflict_method_;
1039   // Unresolved method has the same behavior as the conflict method, it is used by the class linker
1040   // for differentiating between unfilled imt slots vs conflict slots in superclasses.
1041   ArtMethod* imt_unimplemented_method_;
1042 
1043   // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
1044   // JDWP (invalid references).
1045   GcRoot<mirror::Object> sentinel_;
1046 
1047   InstructionSet instruction_set_;
1048 
1049   CompilerCallbacks* compiler_callbacks_;
1050   bool is_zygote_;
1051   bool is_primary_zygote_;
1052   bool is_system_server_;
1053   bool must_relocate_;
1054   bool is_concurrent_gc_enabled_;
1055   bool is_explicit_gc_disabled_;
1056   bool image_dex2oat_enabled_;
1057 
1058   std::string compiler_executable_;
1059   std::vector<std::string> compiler_options_;
1060   std::vector<std::string> image_compiler_options_;
1061   std::string image_location_;
1062 
1063   std::vector<std::string> boot_class_path_;
1064   std::vector<std::string> boot_class_path_locations_;
1065   std::string class_path_string_;
1066   std::vector<std::string> properties_;
1067 
1068   std::list<ti::AgentSpec> agent_specs_;
1069   std::list<std::unique_ptr<ti::Agent>> agents_;
1070   std::vector<Plugin> plugins_;
1071 
1072   // The default stack size for managed threads created by the runtime.
1073   size_t default_stack_size_;
1074 
1075   // Finalizers running for longer than this many milliseconds abort the runtime.
1076   unsigned int finalizer_timeout_ms_;
1077 
1078   gc::Heap* heap_;
1079 
1080   std::unique_ptr<ArenaPool> jit_arena_pool_;
1081   std::unique_ptr<ArenaPool> arena_pool_;
1082   // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
1083   // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
1084   // since the field arrays are int arrays in this case.
1085   std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
1086 
1087   // Shared linear alloc for now.
1088   std::unique_ptr<LinearAlloc> linear_alloc_;
1089 
1090   // The number of spins that are done before thread suspension is used to forcibly inflate.
1091   size_t max_spins_before_thin_lock_inflation_;
1092   MonitorList* monitor_list_;
1093   MonitorPool* monitor_pool_;
1094 
1095   ThreadList* thread_list_;
1096 
1097   InternTable* intern_table_;
1098 
1099   ClassLinker* class_linker_;
1100 
1101   SignalCatcher* signal_catcher_;
1102 
1103   std::unique_ptr<jni::JniIdManager> jni_id_manager_;
1104 
1105   std::unique_ptr<JavaVMExt> java_vm_;
1106 
1107   std::unique_ptr<jit::Jit> jit_;
1108   std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
1109   std::unique_ptr<jit::JitOptions> jit_options_;
1110 
1111   // Runtime thread pool. The pool is only for startup and gets deleted after.
1112   std::unique_ptr<ThreadPool> thread_pool_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
1113   size_t thread_pool_ref_count_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
1114 
1115   // Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed
1116   // lock-free, so needs to be atomic.
1117   std::atomic<std::string*> fault_message_;
1118 
1119   // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
1120   // the shutdown lock so that threads aren't born while we're shutting down.
1121   size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1122 
1123   // Waited upon until no threads are being born.
1124   std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1125 
1126   // Set when runtime shutdown is past the point that new threads may attach.
1127   bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1128 
1129   // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
1130   bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
1131 
1132   bool started_;
1133 
1134   // New flag added which tells us if the runtime has finished starting. If
1135   // this flag is set then the Daemon threads are created and the class loader
1136   // is created. This flag is needed for knowing if its safe to request CMS.
1137   bool finished_starting_;
1138 
1139   // Hooks supported by JNI_CreateJavaVM
1140   jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
1141   void (*exit_)(jint status);
1142   void (*abort_)();
1143 
1144   bool stats_enabled_;
1145   RuntimeStats stats_;
1146 
1147   const bool is_running_on_memory_tool_;
1148 
1149   std::unique_ptr<TraceConfig> trace_config_;
1150 
1151   instrumentation::Instrumentation instrumentation_;
1152 
1153   jobject main_thread_group_;
1154   jobject system_thread_group_;
1155 
1156   // As returned by ClassLoader.getSystemClassLoader().
1157   jobject system_class_loader_;
1158 
1159   // If true, then we dump the GC cumulative timings on shutdown.
1160   bool dump_gc_performance_on_shutdown_;
1161 
1162   // Transactions used for pre-initializing classes at compilation time.
1163   // Support nested transactions, maintain a list containing all transactions. Transactions are
1164   // handled under a stack discipline. Because GC needs to go over all transactions, we choose list
1165   // as substantial data structure instead of stack.
1166   std::list<std::unique_ptr<Transaction>> preinitialization_transactions_;
1167 
1168   // If kNone, verification is disabled. kEnable by default.
1169   verifier::VerifyMode verify_;
1170 
1171   // List of supported cpu abis.
1172   std::vector<std::string> cpu_abilist_;
1173 
1174   // Specifies target SDK version to allow workarounds for certain API levels.
1175   uint32_t target_sdk_version_;
1176 
1177   // A set of disabled compat changes for the running app, all other changes are enabled.
1178   std::set<uint64_t> disabled_compat_changes_;
1179 
1180   // Implicit checks flags.
1181   bool implicit_null_checks_;       // NullPointer checks are implicit.
1182   bool implicit_so_checks_;         // StackOverflow checks are implicit.
1183   bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
1184 
1185   // Whether or not the sig chain (and implicitly the fault handler) should be
1186   // disabled. Tools like dex2oat don't need them. This enables
1187   // building a statically link version of dex2oat.
1188   bool no_sig_chain_;
1189 
1190   // Force the use of native bridge even if the app ISA matches the runtime ISA.
1191   bool force_native_bridge_;
1192 
1193   // Whether or not a native bridge has been loaded.
1194   //
1195   // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
1196   // if standard dlopen fails to load native library associated with native activity, it calls to
1197   // the native bridge to load it and then gets the trampoline for the entry to native activity.
1198   //
1199   // The option 'native_bridge_library_filename' specifies the name of the native bridge.
1200   // When non-empty the native bridge will be loaded from the given file. An empty value means
1201   // that there's no native bridge.
1202   bool is_native_bridge_loaded_;
1203 
1204   // Whether we are running under native debugger.
1205   bool is_native_debuggable_;
1206 
1207   // whether or not any async exceptions have ever been thrown. This is used to speed up the
1208   // MterpShouldSwitchInterpreters function.
1209   bool async_exceptions_thrown_;
1210 
1211   // Whether anything is going to be using the shadow-frame APIs to force a function to return
1212   // early. Doing this requires that (1) we be debuggable and (2) that mterp is exited.
1213   bool non_standard_exits_enabled_;
1214 
1215   // Whether Java code needs to be debuggable.
1216   bool is_java_debuggable_;
1217 
1218   bool is_profileable_from_shell_ = false;
1219 
1220   // The maximum number of failed boots we allow before pruning the dalvik cache
1221   // and trying again. This option is only inspected when we're running as a
1222   // zygote.
1223   uint32_t zygote_max_failed_boots_;
1224 
1225   // Enable experimental opcodes that aren't fully specified yet. The intent is to
1226   // eventually publish them as public-usable opcodes, but they aren't ready yet.
1227   //
1228   // Experimental opcodes should not be used by other production code.
1229   ExperimentalFlags experimental_flags_;
1230 
1231   // Contains the build fingerprint, if given as a parameter.
1232   std::string fingerprint_;
1233 
1234   // Oat file manager, keeps track of what oat files are open.
1235   OatFileManager* oat_file_manager_;
1236 
1237   // Whether or not we are on a low RAM device.
1238   bool is_low_memory_mode_;
1239 
1240   // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
1241   // This is beneficial for low RAM devices since it reduces page cache thrashing.
1242   bool madvise_random_access_;
1243 
1244   // Whether the application should run in safe mode, that is, interpreter only.
1245   bool safe_mode_;
1246 
1247   // Whether access checks on hidden API should be performed.
1248   hiddenapi::EnforcementPolicy hidden_api_policy_;
1249 
1250   // Whether access checks on core platform API should be performed.
1251   hiddenapi::EnforcementPolicy core_platform_api_policy_;
1252 
1253   // Whether access checks on test API should be performed.
1254   hiddenapi::EnforcementPolicy test_api_policy_;
1255 
1256   // List of signature prefixes of methods that have been removed from the blacklist, and treated
1257   // as if whitelisted.
1258   std::vector<std::string> hidden_api_exemptions_;
1259 
1260   // Do not warn about the same hidden API access violation twice.
1261   // This is only used for testing.
1262   bool dedupe_hidden_api_warnings_;
1263 
1264   // How often to log hidden API access to the event log. An integer between 0
1265   // (never) and 0x10000 (always).
1266   uint32_t hidden_api_access_event_log_rate_;
1267 
1268   // The package of the app running in this process.
1269   std::string process_package_name_;
1270 
1271   // The data directory of the app running in this process.
1272   std::string process_data_directory_;
1273 
1274   // Whether threads should dump their native stack on SIGQUIT.
1275   bool dump_native_stack_on_sig_quit_;
1276 
1277   // Whether the dalvik cache was pruned when initializing the runtime.
1278   bool pruned_dalvik_cache_;
1279 
1280   // Whether or not we currently care about pause times.
1281   ProcessState process_state_;
1282 
1283   // Whether zygote code is in a section that should not start threads.
1284   bool zygote_no_threads_;
1285 
1286   // The string containing requested jdwp options
1287   std::string jdwp_options_;
1288 
1289   // The jdwp provider we were configured with.
1290   JdwpProvider jdwp_provider_;
1291 
1292   // True if jmethodID and jfieldID are opaque Indices. When false (the default) these are simply
1293   // pointers. This is set by -Xopaque-jni-ids:{true,false}.
1294   JniIdType jni_ids_indirection_;
1295 
1296   // Set to false in cases where we want to directly control when jni-id
1297   // indirection is changed. This is intended only for testing JNI id swapping.
1298   bool automatically_set_jni_ids_indirection_;
1299 
1300   // Saved environment.
1301   class EnvSnapshot {
1302    public:
1303     EnvSnapshot() = default;
1304     void TakeSnapshot();
1305     char** GetSnapshot() const;
1306 
1307    private:
1308     std::unique_ptr<char*[]> c_env_vector_;
1309     std::vector<std::unique_ptr<std::string>> name_value_pairs_;
1310 
1311     DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
1312   } env_snapshot_;
1313 
1314   // Generic system-weak holders.
1315   std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
1316 
1317   std::unique_ptr<RuntimeCallbacks> callbacks_;
1318 
1319   std::atomic<uint32_t> deoptimization_counts_[
1320       static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
1321 
1322   MemMap protected_fault_page_;
1323 
1324   uint32_t verifier_logging_threshold_ms_;
1325 
1326   bool load_app_image_startup_cache_ = false;
1327 
1328   // If startup has completed, must happen at most once.
1329   std::atomic<bool> startup_completed_ = false;
1330 
1331   gc::space::ImageSpaceLoadingOrder image_space_loading_order_ =
1332       gc::space::ImageSpaceLoadingOrder::kSystemFirst;
1333 
1334   bool verifier_missing_kthrow_fatal_;
1335   bool perfetto_hprof_enabled_;
1336 
1337   // Note: See comments on GetFaultMessage.
1338   friend std::string GetFaultMessageForAbortLogging();
1339   friend class Dex2oatImageTest;
1340   friend class ScopedThreadPoolUsage;
1341   friend class OatFileAssistantTest;
1342   class NotifyStartupCompletedTask;
1343 
1344   DISALLOW_COPY_AND_ASSIGN(Runtime);
1345 };
1346 
1347 }  // namespace art
1348 
1349 #endif  // ART_RUNTIME_RUNTIME_H_
1350