1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "debugger_interface.h"
18
19 #include <android-base/logging.h>
20
21 #include "base/array_ref.h"
22 #include "base/bit_utils.h"
23 #include "base/logging.h"
24 #include "base/mutex.h"
25 #include "base/time_utils.h"
26 #include "base/utils.h"
27 #include "dex/dex_file.h"
28 #include "elf/elf_debug_reader.h"
29 #include "jit/jit.h"
30 #include "jit/jit_code_cache.h"
31 #include "jit/jit_memory_region.h"
32 #include "runtime.h"
33 #include "thread-current-inl.h"
34 #include "thread.h"
35
36 #include <atomic>
37 #include <cstddef>
38
39 //
40 // Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
41 //
42 // See http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
43 //
44 // There are two ways for native tools to access the debug data safely:
45 //
46 // 1) Synchronously, by setting a breakpoint in the __*_debug_register_code
47 // method, which is called after every modification of the linked list.
48 // GDB does this, but it is complex to set up and it stops the process.
49 //
50 // 2) Asynchronously, using the entry seqlocks.
51 // * The seqlock is a monotonically increasing counter, which
52 // is even if the entry is valid and odd if it is invalid.
53 // It is set to even value after all other fields are set,
54 // and it is set to odd value before the entry is deleted.
55 // * This makes it possible to safely read the symfile data:
56 // * The reader should read the value of the seqlock both
57 // before and after reading the symfile. If the seqlock
58 // values match and are even the copy is consistent.
59 // * Entries are recycled, but never freed, which guarantees
60 // that the seqlock is not overwritten by a random value.
61 // * The linked-list is one level higher. The next-pointer
62 // must always point to an entry with even seqlock, which
63 // ensures that entries of a crashed process can be read.
64 // This means the entry must be added after it is created
65 // and it must be removed before it is invalidated (odd).
66 // * When iterating over the linked list the reader can use
67 // the timestamps to ensure that current and next entry
68 // were not deleted using the following steps:
69 // 1) Read next pointer and the next entry's seqlock.
70 // 2) Read the symfile and re-read the next pointer.
71 // 3) Re-read both the current and next seqlock.
72 // 4) Go to step 1 with using new entry and seqlock.
73 //
74 // 3) Asynchronously, using the global seqlock.
75 // * The seqlock is a monotonically increasing counter which is incremented
76 // before and after every modification of the linked list. Odd value of
77 // the counter means the linked list is being modified (it is locked).
78 // * The tool should read the value of the seqlock both before and after
79 // copying the linked list. If the seqlock values match and are even,
80 // the copy is consistent. Otherwise, the reader should try again.
81 // * Note that using the data directly while is it being modified
82 // might crash the tool. Therefore, the only safe way is to make
83 // a copy and use the copy only after the seqlock has been checked.
84 // * Note that the process might even free and munmap the data while
85 // it is being copied, therefore the reader should either handle
86 // SEGV or use OS calls to read the memory (e.g. process_vm_readv).
87 // * The timestamps on the entry record the time when the entry was
88 // created which is relevant if the unwinding is not live and is
89 // postponed until much later. All timestamps must be unique.
90 // * For full conformance with the C++ memory model, all seqlock
91 // protected accesses should be atomic. We currently do this in the
92 // more critical cases. The rest will have to be fixed before
93 // attempting to run TSAN on this code.
94 //
95
96 namespace art {
97
98 static Mutex g_jit_debug_lock("JIT native debug entries", kNativeDebugInterfaceLock);
99 static Mutex g_dex_debug_lock("DEX native debug entries", kNativeDebugInterfaceLock);
100
101 // Most loads and stores need no synchronization since all memory is protected by the global locks.
102 // Some writes are synchronized so libunwindstack can read the memory safely from another process.
103 constexpr std::memory_order kNonRacingRelaxed = std::memory_order_relaxed;
104
105 // Size of JIT code range covered by each packed JITCodeEntry.
106 constexpr uint32_t kJitRepackGroupSize = 64 * KB;
107
108 // Automatically call the repack method every 'n' new entries.
109 constexpr uint32_t kJitRepackFrequency = 64;
110
111 // Public binary interface between ART and native tools (gdb, libunwind, etc).
112 // The fields below need to be exported and have special names as per the gdb api.
113 extern "C" {
114 enum JITAction {
115 JIT_NOACTION = 0,
116 JIT_REGISTER_FN,
117 JIT_UNREGISTER_FN
118 };
119
120 // Public/stable binary interface.
121 struct JITCodeEntryPublic {
122 std::atomic<const JITCodeEntry*> next_; // Atomic to guarantee consistency after crash.
123 const JITCodeEntry* prev_ = nullptr; // For linked list deletion. Unused in readers.
124 const uint8_t* symfile_addr_ = nullptr; // Address of the in-memory ELF file.
125 uint64_t symfile_size_ = 0; // NB: The offset is 12 on x86 but 16 on ARM32.
126
127 // Android-specific fields:
128 uint64_t timestamp_; // CLOCK_MONOTONIC time of entry registration.
129 std::atomic_uint32_t seqlock_{1}; // Synchronization. Even value if entry is valid.
130 };
131
132 // Implementation-specific fields (which can be used only in this file).
133 struct JITCodeEntry : public JITCodeEntryPublic {
134 // Unpacked entries: Code address of the symbol in the ELF file.
135 // Packed entries: The start address of the covered memory range.
136 const void* addr_ = nullptr;
137 // Allow merging of ELF files to save space.
138 // Packing drops advanced DWARF data, so it is not always desirable.
139 bool allow_packing_ = false;
140 // Whether this entry has been LZMA compressed.
141 // Compression is expensive, so we don't always do it.
142 bool is_compressed_ = false;
143 };
144
145 // Public/stable binary interface.
146 struct JITDescriptorPublic {
147 uint32_t version_ = 1; // NB: GDB supports only version 1.
148 uint32_t action_flag_ = JIT_NOACTION; // One of the JITAction enum values.
149 const JITCodeEntry* relevant_entry_ = nullptr; // The entry affected by the action.
150 std::atomic<const JITCodeEntry*> head_{nullptr}; // Head of link list of all entries.
151
152 // Android-specific fields:
153 uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '2'};
154 uint32_t flags_ = 0; // Reserved for future use. Must be 0.
155 uint32_t sizeof_descriptor = sizeof(JITDescriptorPublic);
156 uint32_t sizeof_entry = sizeof(JITCodeEntryPublic);
157 std::atomic_uint32_t seqlock_{0}; // Incremented before and after any modification.
158 uint64_t timestamp_ = 1; // CLOCK_MONOTONIC time of last action.
159 };
160
161 // Implementation-specific fields (which can be used only in this file).
162 struct JITDescriptor : public JITDescriptorPublic {
163 const JITCodeEntry* tail_ = nullptr; // Tail of link list of all live entries.
164 const JITCodeEntry* free_entries_ = nullptr; // List of deleted entries ready for reuse.
165
166 // Used for memory sharing with zygote. See NativeDebugInfoPreFork().
167 const JITCodeEntry* zygote_head_entry_ = nullptr;
168 JITCodeEntry application_tail_entry_{};
169 };
170
171 // Public interface: Can be used by reader to check the structs have the expected size.
172 uint32_t g_art_sizeof_jit_code_entry = sizeof(JITCodeEntryPublic);
173 uint32_t g_art_sizeof_jit_descriptor = sizeof(JITDescriptorPublic);
174
175 // Check that std::atomic has the expected layout.
176 static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment");
177 static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size");
178 static_assert(std::atomic_uint32_t::is_always_lock_free, "Expected to be lock free");
179 static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment");
180 static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size");
181 static_assert(std::atomic<void*>::is_always_lock_free, "Expected to be lock free");
182
183 // GDB may set breakpoint here. We must ensure it is not removed or deduplicated.
__jit_debug_register_code()184 void __attribute__((noinline)) __jit_debug_register_code() {
185 __asm__("");
186 }
187
188 // Alternatively, native tools may overwrite this field to execute custom handler.
189 void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
190
191 // The root data structure describing of all JITed methods.
GUARDED_BY(g_jit_debug_lock)192 JITDescriptor __jit_debug_descriptor GUARDED_BY(g_jit_debug_lock) {};
193
194 // The following globals mirror the ones above, but are used to register dex files.
__dex_debug_register_code()195 void __attribute__((noinline)) __dex_debug_register_code() {
196 __asm__("");
197 }
198 void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
GUARDED_BY(g_dex_debug_lock)199 JITDescriptor __dex_debug_descriptor GUARDED_BY(g_dex_debug_lock) {};
200 }
201
202 // The fields below are internal, but we keep them here anyway for consistency.
203 // Their state is related to the static state above and it must be kept in sync.
204
205 // Used only in debug builds to check that we are not adding duplicate entries.
206 static std::unordered_set<const void*> g_dcheck_all_jit_functions GUARDED_BY(g_jit_debug_lock);
207
208 // Methods that have been marked for deletion on the next repack pass.
209 static std::vector<const void*> g_removed_jit_functions GUARDED_BY(g_jit_debug_lock);
210
211 // Number of small (single symbol) ELF files. Used to trigger repacking.
212 static uint32_t g_jit_num_unpacked_entries = 0;
213
214 struct DexNativeInfo {
215 static constexpr bool kCopySymfileData = false; // Just reference DEX files.
Descriptorart::DexNativeInfo216 static JITDescriptor& Descriptor() { return __dex_debug_descriptor; }
NotifyNativeDebuggerart::DexNativeInfo217 static void NotifyNativeDebugger() { __dex_debug_register_code_ptr(); }
Allocart::DexNativeInfo218 static const void* Alloc(size_t size) { return malloc(size); }
Freeart::DexNativeInfo219 static void Free(const void* ptr) { free(const_cast<void*>(ptr)); }
Writableart::DexNativeInfo220 template<class T> static T* Writable(const T* v) { return const_cast<T*>(v); }
221 };
222
223 struct JitNativeInfo {
224 static constexpr bool kCopySymfileData = true; // Copy debug info to JIT memory.
Descriptorart::JitNativeInfo225 static JITDescriptor& Descriptor() { return __jit_debug_descriptor; }
NotifyNativeDebuggerart::JitNativeInfo226 static void NotifyNativeDebugger() { __jit_debug_register_code_ptr(); }
Allocart::JitNativeInfo227 static const void* Alloc(size_t size) { return Memory()->AllocateData(size); }
Freeart::JitNativeInfo228 static void Free(const void* ptr) { Memory()->FreeData(reinterpret_cast<const uint8_t*>(ptr)); }
229 static void Free(void* ptr) = delete;
230
Writableart::JitNativeInfo231 template<class T> static T* Writable(const T* v) {
232 // Special case: This entry is in static memory and not allocated in JIT memory.
233 if (v == reinterpret_cast<const void*>(&Descriptor().application_tail_entry_)) {
234 return const_cast<T*>(v);
235 }
236 return const_cast<T*>(Memory()->GetWritableDataAddress(v));
237 }
238
Memoryart::JitNativeInfo239 static jit::JitMemoryRegion* Memory() ASSERT_CAPABILITY(Locks::jit_lock_) {
240 Locks::jit_lock_->AssertHeld(Thread::Current());
241 jit::JitCodeCache* jit_code_cache = Runtime::Current()->GetJitCodeCache();
242 CHECK(jit_code_cache != nullptr);
243 jit::JitMemoryRegion* memory = jit_code_cache->GetCurrentRegion();
244 CHECK(memory->IsValid());
245 return memory;
246 }
247 };
248
GetJITCodeEntrySymFile(const JITCodeEntry * entry)249 ArrayRef<const uint8_t> GetJITCodeEntrySymFile(const JITCodeEntry* entry) {
250 return ArrayRef<const uint8_t>(entry->symfile_addr_, entry->symfile_size_);
251 }
252
253 // Ensure the timestamp is monotonically increasing even in presence of low
254 // granularity system timer. This ensures each entry has unique timestamp.
GetNextTimestamp(JITDescriptor & descriptor)255 static uint64_t GetNextTimestamp(JITDescriptor& descriptor) {
256 return std::max(descriptor.timestamp_ + 1, NanoTime());
257 }
258
259 // Mark the descriptor as "locked", so native tools know the data is being modified.
Seqlock(JITDescriptor & descriptor)260 static void Seqlock(JITDescriptor& descriptor) {
261 DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Already locked";
262 descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
263 // Ensure that any writes within the locked section cannot be reordered before the increment.
264 std::atomic_thread_fence(std::memory_order_release);
265 }
266
267 // Mark the descriptor as "unlocked", so native tools know the data is safe to read.
Sequnlock(JITDescriptor & descriptor)268 static void Sequnlock(JITDescriptor& descriptor) {
269 DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Already unlocked";
270 // Ensure that any writes within the locked section cannot be reordered after the increment.
271 std::atomic_thread_fence(std::memory_order_release);
272 descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
273 }
274
275 // Insert 'entry' in the linked list before 'next' and mark it as valid (append if 'next' is null).
276 // This method must be called under global lock (g_jit_debug_lock or g_dex_debug_lock).
277 template<class NativeInfo>
InsertNewEntry(const JITCodeEntry * entry,const JITCodeEntry * next)278 static void InsertNewEntry(const JITCodeEntry* entry, const JITCodeEntry* next) {
279 CHECK_EQ(entry->seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Expected invalid entry";
280 JITDescriptor& descriptor = NativeInfo::Descriptor();
281 const JITCodeEntry* prev = (next != nullptr ? next->prev_ : descriptor.tail_);
282 JITCodeEntry* writable = NativeInfo::Writable(entry);
283 writable->next_ = next;
284 writable->prev_ = prev;
285 writable->seqlock_.fetch_add(1, std::memory_order_release); // Mark as valid.
286 // Backward pointers should not be used by readers, so they are non-atomic.
287 if (next != nullptr) {
288 NativeInfo::Writable(next)->prev_ = entry;
289 } else {
290 descriptor.tail_ = entry;
291 }
292 // Forward pointers must be atomic and they must point to a valid entry at all times.
293 if (prev != nullptr) {
294 NativeInfo::Writable(prev)->next_.store(entry, std::memory_order_release);
295 } else {
296 descriptor.head_.store(entry, std::memory_order_release);
297 }
298 }
299
300 // This must be called with the appropriate lock taken (g_{jit,dex}_debug_lock).
301 template<class NativeInfo>
CreateJITCodeEntryInternal(ArrayRef<const uint8_t> symfile=ArrayRef<const uint8_t> (),const void * addr=nullptr,bool allow_packing=false,bool is_compressed=false)302 static const JITCodeEntry* CreateJITCodeEntryInternal(
303 ArrayRef<const uint8_t> symfile = ArrayRef<const uint8_t>(),
304 const void* addr = nullptr,
305 bool allow_packing = false,
306 bool is_compressed = false) {
307 JITDescriptor& descriptor = NativeInfo::Descriptor();
308
309 // Allocate JITCodeEntry if needed.
310 if (descriptor.free_entries_ == nullptr) {
311 const void* memory = NativeInfo::Alloc(sizeof(JITCodeEntry));
312 if (memory == nullptr) {
313 LOG(ERROR) << "Failed to allocate memory for native debug info";
314 return nullptr;
315 }
316 new (NativeInfo::Writable(memory)) JITCodeEntry();
317 descriptor.free_entries_ = reinterpret_cast<const JITCodeEntry*>(memory);
318 }
319
320 // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
321 if (NativeInfo::kCopySymfileData && !symfile.empty()) {
322 const uint8_t* copy = reinterpret_cast<const uint8_t*>(NativeInfo::Alloc(symfile.size()));
323 if (copy == nullptr) {
324 LOG(ERROR) << "Failed to allocate memory for native debug info";
325 return nullptr;
326 }
327 memcpy(NativeInfo::Writable(copy), symfile.data(), symfile.size());
328 symfile = ArrayRef<const uint8_t>(copy, symfile.size());
329 }
330
331 uint64_t timestamp = GetNextTimestamp(descriptor);
332
333 // We must insert entries at specific place. See NativeDebugInfoPreFork().
334 const JITCodeEntry* next = descriptor.head_.load(kNonRacingRelaxed); // Insert at the head.
335 if (descriptor.zygote_head_entry_ != nullptr && Runtime::Current()->IsZygote()) {
336 next = nullptr; // Insert zygote entries at the tail.
337 }
338
339 // Pop entry from the free list.
340 const JITCodeEntry* entry = descriptor.free_entries_;
341 descriptor.free_entries_ = descriptor.free_entries_->next_.load(kNonRacingRelaxed);
342
343 // Create the entry and set all its fields.
344 JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
345 writable_entry->symfile_addr_ = symfile.data();
346 writable_entry->symfile_size_ = symfile.size();
347 writable_entry->addr_ = addr;
348 writable_entry->allow_packing_ = allow_packing;
349 writable_entry->is_compressed_ = is_compressed;
350 writable_entry->timestamp_ = timestamp;
351
352 // Add the entry to the main linked list.
353 Seqlock(descriptor);
354 InsertNewEntry<NativeInfo>(entry, next);
355 descriptor.relevant_entry_ = entry;
356 descriptor.action_flag_ = JIT_REGISTER_FN;
357 descriptor.timestamp_ = timestamp;
358 Sequnlock(descriptor);
359
360 NativeInfo::NotifyNativeDebugger();
361
362 return entry;
363 }
364
365 template<class NativeInfo>
DeleteJITCodeEntryInternal(const JITCodeEntry * entry)366 static void DeleteJITCodeEntryInternal(const JITCodeEntry* entry) {
367 CHECK(entry != nullptr);
368 JITDescriptor& descriptor = NativeInfo::Descriptor();
369
370 // Remove the entry from the main linked-list.
371 Seqlock(descriptor);
372 const JITCodeEntry* next = entry->next_.load(kNonRacingRelaxed);
373 const JITCodeEntry* prev = entry->prev_;
374 if (next != nullptr) {
375 NativeInfo::Writable(next)->prev_ = prev;
376 } else {
377 descriptor.tail_ = prev;
378 }
379 if (prev != nullptr) {
380 NativeInfo::Writable(prev)->next_.store(next, std::memory_order_relaxed);
381 } else {
382 descriptor.head_.store(next, std::memory_order_relaxed);
383 }
384 descriptor.relevant_entry_ = entry;
385 descriptor.action_flag_ = JIT_UNREGISTER_FN;
386 descriptor.timestamp_ = GetNextTimestamp(descriptor);
387 Sequnlock(descriptor);
388
389 NativeInfo::NotifyNativeDebugger();
390
391 // Delete the entry.
392 JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
393 CHECK_EQ(writable_entry->seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Expected valid entry";
394 // Release: Ensures that "next_" points to valid entry at any time in reader.
395 writable_entry->seqlock_.fetch_add(1, std::memory_order_release); // Mark as invalid.
396 // Release: Ensures that the entry is seen as invalid before it's data is freed.
397 std::atomic_thread_fence(std::memory_order_release);
398 const uint8_t* symfile = entry->symfile_addr_;
399 writable_entry->symfile_addr_ = nullptr;
400 if (NativeInfo::kCopySymfileData && symfile != nullptr) {
401 NativeInfo::Free(symfile);
402 }
403
404 // Push the entry to the free list.
405 writable_entry->next_.store(descriptor.free_entries_, kNonRacingRelaxed);
406 writable_entry->prev_ = nullptr;
407 descriptor.free_entries_ = entry;
408 }
409
AddNativeDebugInfoForDex(Thread * self,const DexFile * dexfile)410 void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
411 MutexLock mu(self, g_dex_debug_lock);
412 DCHECK(dexfile != nullptr);
413 const ArrayRef<const uint8_t> symfile(dexfile->Begin(), dexfile->Size());
414 CreateJITCodeEntryInternal<DexNativeInfo>(symfile);
415 }
416
RemoveNativeDebugInfoForDex(Thread * self,const DexFile * dexfile)417 void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
418 MutexLock mu(self, g_dex_debug_lock);
419 DCHECK(dexfile != nullptr);
420 // We register dex files in the class linker and free them in DexFile_closeDexFile, but
421 // there might be cases where we load the dex file without using it in the class linker.
422 // On the other hand, single dex file might also be used with different class-loaders.
423 for (const JITCodeEntry* entry = __dex_debug_descriptor.head_; entry != nullptr; ) {
424 const JITCodeEntry* next = entry->next_; // Save next pointer before we free the memory.
425 if (entry->symfile_addr_ == dexfile->Begin()) {
426 DeleteJITCodeEntryInternal<DexNativeInfo>(entry);
427 }
428 entry = next;
429 }
430 }
431
432 // Splits the linked linked in to two parts:
433 // The first part (including the static head pointer) is owned by the application.
434 // The second part is owned by zygote and might be concurrently modified by it.
435 //
436 // We add two empty entries at the boundary which are never removed (app_tail, zygote_head).
437 // These entries are needed to preserve the next/prev pointers in the linked list,
438 // since zygote can not modify the application's data and vice versa.
439 //
440 // <------- owned by the application memory --------> <--- owned by zygote memory --->
441 // |----------------------|------------------|-------------|-----------------|
442 // head -> | application_entries* | application_tail | zygote_head | zygote_entries* |
443 // |+---------------------|------------------|-------------|----------------+|
444 // | |
445 // \-(new application entries) (new zygote entries)-/
446 //
447 // Zygote entries are inserted at the end, which means that repacked zygote entries
448 // will still be seen by single forward iteration of the linked list (avoiding race).
449 //
450 // Application entries are inserted at the start which introduces repacking race,
451 // but that is ok, since it is easy to read new entries from head in further pass.
452 // The benefit is that this makes it fast to read only the new entries.
453 //
NativeDebugInfoPreFork()454 void NativeDebugInfoPreFork() {
455 CHECK(Runtime::Current()->IsZygote());
456 JITDescriptor& descriptor = JitNativeInfo::Descriptor();
457 if (descriptor.zygote_head_entry_ != nullptr) {
458 return; // Already done - we need to do this only on the first fork.
459 }
460
461 // Create the zygote-owned head entry (with no ELF file).
462 // The data will be allocated from the current JIT memory (owned by zygote).
463 MutexLock mu(Thread::Current(), *Locks::jit_lock_); // Needed to alloc entry.
464 const JITCodeEntry* zygote_head =
465 reinterpret_cast<const JITCodeEntry*>(JitNativeInfo::Alloc(sizeof(JITCodeEntry)));
466 CHECK(zygote_head != nullptr);
467 new (JitNativeInfo::Writable(zygote_head)) JITCodeEntry(); // Initialize.
468 InsertNewEntry<JitNativeInfo>(zygote_head, descriptor.head_);
469 descriptor.zygote_head_entry_ = zygote_head;
470
471 // Create the child-owned tail entry (with no ELF file).
472 // The data is statically allocated since it must be owned by the forked process.
473 InsertNewEntry<JitNativeInfo>(&descriptor.application_tail_entry_, descriptor.head_);
474 }
475
NativeDebugInfoPostFork()476 void NativeDebugInfoPostFork() {
477 CHECK(!Runtime::Current()->IsZygote());
478 JITDescriptor& descriptor = JitNativeInfo::Descriptor();
479 descriptor.free_entries_ = nullptr; // Don't reuse zygote's entries.
480 }
481
482 // Split the JIT code cache into groups of fixed size and create single JITCodeEntry for each group.
483 // The start address of method's code determines which group it belongs to. The end is irrelevant.
484 // New mini debug infos will be merged if possible, and entries for GCed functions will be removed.
RepackEntries(bool compress_entries,ArrayRef<const void * > removed)485 static void RepackEntries(bool compress_entries, ArrayRef<const void*> removed)
486 REQUIRES(g_jit_debug_lock) {
487 DCHECK(std::is_sorted(removed.begin(), removed.end()));
488 jit::Jit* jit = Runtime::Current()->GetJit();
489 if (jit == nullptr) {
490 return;
491 }
492 JITDescriptor& descriptor = __jit_debug_descriptor;
493 bool is_zygote = Runtime::Current()->IsZygote();
494
495 // Collect entries that we want to pack.
496 std::vector<const JITCodeEntry*> entries;
497 entries.reserve(2 * kJitRepackFrequency);
498 for (const JITCodeEntry* it = descriptor.head_; it != nullptr; it = it->next_) {
499 if (it == descriptor.zygote_head_entry_ && !is_zygote) {
500 break; // Memory owned by the zygote process (read-only for an app).
501 }
502 if (it->allow_packing_) {
503 if (!compress_entries && it->is_compressed_ && removed.empty()) {
504 continue; // If we are not compressing, also avoid decompressing.
505 }
506 entries.push_back(it);
507 }
508 }
509 auto cmp = [](const JITCodeEntry* l, const JITCodeEntry* r) { return l->addr_ < r->addr_; };
510 std::sort(entries.begin(), entries.end(), cmp); // Sort by address.
511
512 // Process the entries in groups (each spanning memory range of size kJitRepackGroupSize).
513 for (auto group_it = entries.begin(); group_it != entries.end();) {
514 const void* group_ptr = AlignDown((*group_it)->addr_, kJitRepackGroupSize);
515 const void* group_end = reinterpret_cast<const uint8_t*>(group_ptr) + kJitRepackGroupSize;
516
517 // Find all entries in this group (each entry is an in-memory ELF file).
518 auto begin = group_it;
519 auto end = std::find_if(begin, entries.end(), [=](auto* e) { return e->addr_ >= group_end; });
520 CHECK(end > begin);
521 ArrayRef<const JITCodeEntry*> elfs(&*begin, end - begin);
522
523 // Find all symbols that have been removed in this memory range.
524 auto removed_begin = std::lower_bound(removed.begin(), removed.end(), group_ptr);
525 auto removed_end = std::lower_bound(removed.begin(), removed.end(), group_end);
526 CHECK(removed_end >= removed_begin);
527 ArrayRef<const void*> removed_subset(&*removed_begin, removed_end - removed_begin);
528
529 // Optimization: Don't compress the last group since it will likely change again soon.
530 bool compress = compress_entries && end != entries.end();
531
532 // Bail out early if there is nothing to do for this group.
533 if (elfs.size() == 1 && removed_subset.empty() && (*begin)->is_compressed_ == compress) {
534 group_it = end; // Go to next group.
535 continue;
536 }
537
538 // Create new single JITCodeEntry that covers this memory range.
539 uint64_t start_time = MicroTime();
540 size_t live_symbols;
541 std::vector<uint8_t> packed = jit->GetJitCompiler()->PackElfFileForJIT(
542 elfs, removed_subset, compress, &live_symbols);
543 VLOG(jit)
544 << "JIT mini-debug-info repacked"
545 << " for " << group_ptr
546 << " in " << MicroTime() - start_time << "us"
547 << " elfs=" << elfs.size()
548 << " dead=" << removed_subset.size()
549 << " live=" << live_symbols
550 << " size=" << packed.size() << (compress ? "(lzma)" : "");
551
552 // Replace the old entries with the new one (with their lifetime temporally overlapping).
553 CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(packed),
554 /*addr_=*/ group_ptr,
555 /*allow_packing_=*/ true,
556 /*is_compressed_=*/ compress);
557 for (auto it : elfs) {
558 DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
559 }
560 group_it = end; // Go to next group.
561 }
562 g_jit_num_unpacked_entries = 0;
563 }
564
565 void RepackNativeDebugInfoForJitLocked() REQUIRES(g_jit_debug_lock);
566
AddNativeDebugInfoForJit(const void * code_ptr,const std::vector<uint8_t> & symfile,bool allow_packing)567 void AddNativeDebugInfoForJit(const void* code_ptr,
568 const std::vector<uint8_t>& symfile,
569 bool allow_packing) {
570 MutexLock mu(Thread::Current(), g_jit_debug_lock);
571 DCHECK_NE(symfile.size(), 0u);
572 if (kIsDebugBuild && code_ptr != nullptr) {
573 DCHECK(g_dcheck_all_jit_functions.insert(code_ptr).second) << code_ptr << " already added";
574 }
575
576 // Remove all methods which have been marked for removal. The JIT GC should
577 // force repack, so this should happen only rarely for various corner cases.
578 // Must be done before addition in case the added code_ptr is in the removed set.
579 if (!g_removed_jit_functions.empty()) {
580 RepackNativeDebugInfoForJitLocked();
581 }
582
583 CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(symfile),
584 /*addr=*/ code_ptr,
585 /*allow_packing=*/ allow_packing,
586 /*is_compressed=*/ false);
587
588 VLOG(jit)
589 << "JIT mini-debug-info added"
590 << " for " << code_ptr
591 << " size=" << PrettySize(symfile.size());
592
593 // Automatically repack entries on regular basis to save space.
594 // Pack (but don't compress) recent entries - this is cheap and reduces memory use by ~4x.
595 // We delay compression until after GC since it is more expensive (and saves further ~4x).
596 // Always compress zygote, since it does not GC and we want to keep the high-water mark low.
597 if (++g_jit_num_unpacked_entries >= kJitRepackFrequency) {
598 bool is_zygote = Runtime::Current()->IsZygote();
599 RepackEntries(/*compress_entries=*/ is_zygote, /*removed=*/ ArrayRef<const void*>());
600 }
601 }
602
RemoveNativeDebugInfoForJit(const void * code_ptr)603 void RemoveNativeDebugInfoForJit(const void* code_ptr) {
604 MutexLock mu(Thread::Current(), g_jit_debug_lock);
605 g_dcheck_all_jit_functions.erase(code_ptr);
606
607 // Method removal is very expensive since we need to decompress and read ELF files.
608 // Collet methods to be removed and do the removal in bulk later.
609 g_removed_jit_functions.push_back(code_ptr);
610
611 VLOG(jit) << "JIT mini-debug-info removed for " << code_ptr;
612 }
613
RepackNativeDebugInfoForJitLocked()614 void RepackNativeDebugInfoForJitLocked() {
615 // Remove entries which are inside packed and compressed ELF files.
616 std::vector<const void*>& removed = g_removed_jit_functions;
617 std::sort(removed.begin(), removed.end());
618 RepackEntries(/*compress_entries=*/ true, ArrayRef<const void*>(removed));
619
620 // Remove entries which are not allowed to be packed (containing single method each).
621 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr;) {
622 const JITCodeEntry* next = it->next_;
623 if (!it->allow_packing_ && std::binary_search(removed.begin(), removed.end(), it->addr_)) {
624 DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
625 }
626 it = next;
627 }
628
629 removed.clear();
630 removed.shrink_to_fit();
631 }
632
RepackNativeDebugInfoForJit()633 void RepackNativeDebugInfoForJit() {
634 MutexLock mu(Thread::Current(), g_jit_debug_lock);
635 RepackNativeDebugInfoForJitLocked();
636 }
637
GetJitMiniDebugInfoMemUsage()638 size_t GetJitMiniDebugInfoMemUsage() {
639 MutexLock mu(Thread::Current(), g_jit_debug_lock);
640 size_t size = 0;
641 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr; it = it->next_) {
642 size += sizeof(JITCodeEntry) + it->symfile_size_;
643 }
644 return size;
645 }
646
GetNativeDebugInfoLock()647 Mutex* GetNativeDebugInfoLock() {
648 return &g_jit_debug_lock;
649 }
650
ForEachNativeDebugSymbol(std::function<void (const void *,size_t,const char *)> cb)651 void ForEachNativeDebugSymbol(std::function<void(const void*, size_t, const char*)> cb) {
652 MutexLock mu(Thread::Current(), g_jit_debug_lock);
653 using ElfRuntimeTypes = std::conditional<sizeof(void*) == 4, ElfTypes32, ElfTypes64>::type;
654 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr; it = it->next_) {
655 ArrayRef<const uint8_t> buffer(it->symfile_addr_, it->symfile_size_);
656 if (!buffer.empty()) {
657 ElfDebugReader<ElfRuntimeTypes> reader(buffer);
658 reader.VisitFunctionSymbols([&](ElfRuntimeTypes::Sym sym, const char* name) {
659 cb(reinterpret_cast<const void*>(sym.st_value), sym.st_size, name);
660 });
661 }
662 }
663 }
664
665 } // namespace art
666