1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <string.h>
20 #include <sys/mman.h>
21 #include <sys/ptrace.h>
22 #include <sys/stat.h>
23 #include <sys/types.h>
24 #include <sys/uio.h>
25 #include <unistd.h>
26
27 #include <algorithm>
28 #include <memory>
29
30 #include <android-base/unique_fd.h>
31
32 #include <unwindstack/Memory.h>
33
34 #include "Check.h"
35 #include "MemoryBuffer.h"
36 #include "MemoryCache.h"
37 #include "MemoryFileAtOffset.h"
38 #include "MemoryLocal.h"
39 #include "MemoryOffline.h"
40 #include "MemoryOfflineBuffer.h"
41 #include "MemoryRange.h"
42 #include "MemoryRemote.h"
43
44 namespace unwindstack {
45
ProcessVmRead(pid_t pid,uint64_t remote_src,void * dst,size_t len)46 static size_t ProcessVmRead(pid_t pid, uint64_t remote_src, void* dst, size_t len) {
47
48 // Split up the remote read across page boundaries.
49 // From the manpage:
50 // A partial read/write may result if one of the remote_iov elements points to an invalid
51 // memory region in the remote process.
52 //
53 // Partial transfers apply at the granularity of iovec elements. These system calls won't
54 // perform a partial transfer that splits a single iovec element.
55 constexpr size_t kMaxIovecs = 64;
56 struct iovec src_iovs[kMaxIovecs];
57
58 uint64_t cur = remote_src;
59 size_t total_read = 0;
60 while (len > 0) {
61 struct iovec dst_iov = {
62 .iov_base = &reinterpret_cast<uint8_t*>(dst)[total_read], .iov_len = len,
63 };
64
65 size_t iovecs_used = 0;
66 while (len > 0) {
67 if (iovecs_used == kMaxIovecs) {
68 break;
69 }
70
71 // struct iovec uses void* for iov_base.
72 if (cur >= UINTPTR_MAX) {
73 errno = EFAULT;
74 return total_read;
75 }
76
77 src_iovs[iovecs_used].iov_base = reinterpret_cast<void*>(cur);
78
79 uintptr_t misalignment = cur & (getpagesize() - 1);
80 size_t iov_len = getpagesize() - misalignment;
81 iov_len = std::min(iov_len, len);
82
83 len -= iov_len;
84 if (__builtin_add_overflow(cur, iov_len, &cur)) {
85 errno = EFAULT;
86 return total_read;
87 }
88
89 src_iovs[iovecs_used].iov_len = iov_len;
90 ++iovecs_used;
91 }
92
93 ssize_t rc = process_vm_readv(pid, &dst_iov, 1, src_iovs, iovecs_used, 0);
94 if (rc == -1) {
95 return total_read;
96 }
97 total_read += rc;
98 }
99 return total_read;
100 }
101
PtraceReadLong(pid_t pid,uint64_t addr,long * value)102 static bool PtraceReadLong(pid_t pid, uint64_t addr, long* value) {
103 // ptrace() returns -1 and sets errno when the operation fails.
104 // To disambiguate -1 from a valid result, we clear errno beforehand.
105 errno = 0;
106 *value = ptrace(PTRACE_PEEKTEXT, pid, reinterpret_cast<void*>(addr), nullptr);
107 if (*value == -1 && errno) {
108 return false;
109 }
110 return true;
111 }
112
PtraceRead(pid_t pid,uint64_t addr,void * dst,size_t bytes)113 static size_t PtraceRead(pid_t pid, uint64_t addr, void* dst, size_t bytes) {
114 // Make sure that there is no overflow.
115 uint64_t max_size;
116 if (__builtin_add_overflow(addr, bytes, &max_size)) {
117 return 0;
118 }
119
120 size_t bytes_read = 0;
121 long data;
122 size_t align_bytes = addr & (sizeof(long) - 1);
123 if (align_bytes != 0) {
124 if (!PtraceReadLong(pid, addr & ~(sizeof(long) - 1), &data)) {
125 return 0;
126 }
127 size_t copy_bytes = std::min(sizeof(long) - align_bytes, bytes);
128 memcpy(dst, reinterpret_cast<uint8_t*>(&data) + align_bytes, copy_bytes);
129 addr += copy_bytes;
130 dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + copy_bytes);
131 bytes -= copy_bytes;
132 bytes_read += copy_bytes;
133 }
134
135 for (size_t i = 0; i < bytes / sizeof(long); i++) {
136 if (!PtraceReadLong(pid, addr, &data)) {
137 return bytes_read;
138 }
139 memcpy(dst, &data, sizeof(long));
140 dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + sizeof(long));
141 addr += sizeof(long);
142 bytes_read += sizeof(long);
143 }
144
145 size_t left_over = bytes & (sizeof(long) - 1);
146 if (left_over) {
147 if (!PtraceReadLong(pid, addr, &data)) {
148 return bytes_read;
149 }
150 memcpy(dst, &data, left_over);
151 bytes_read += left_over;
152 }
153 return bytes_read;
154 }
155
ReadFully(uint64_t addr,void * dst,size_t size)156 bool Memory::ReadFully(uint64_t addr, void* dst, size_t size) {
157 size_t rc = Read(addr, dst, size);
158 return rc == size;
159 }
160
ReadString(uint64_t addr,std::string * dst,size_t max_read)161 bool Memory::ReadString(uint64_t addr, std::string* dst, size_t max_read) {
162 char buffer[256]; // Large enough for 99% of symbol names.
163 size_t size = 0; // Number of bytes which were read into the buffer.
164 for (size_t offset = 0; offset < max_read; offset += size) {
165 // Look for null-terminator first, so we can allocate string of exact size.
166 // If we know the end of valid memory range, do the reads in larger blocks.
167 size_t read = std::min(sizeof(buffer), max_read - offset);
168 size = Read(addr + offset, buffer, read);
169 if (size == 0) {
170 return false; // We have not found end of string yet and we can not read more data.
171 }
172 size_t length = strnlen(buffer, size); // Index of the null-terminator.
173 if (length < size) {
174 // We found the null-terminator. Allocate the string and set its content.
175 if (offset == 0) {
176 // We did just single read, so the buffer already contains the whole string.
177 dst->assign(buffer, length);
178 return true;
179 } else {
180 // The buffer contains only the last block. Read the whole string again.
181 dst->assign(offset + length, '\0');
182 return ReadFully(addr, dst->data(), dst->size());
183 }
184 }
185 }
186 return false;
187 }
188
CreateFileMemory(const std::string & path,uint64_t offset)189 std::unique_ptr<Memory> Memory::CreateFileMemory(const std::string& path, uint64_t offset) {
190 auto memory = std::make_unique<MemoryFileAtOffset>();
191
192 if (memory->Init(path, offset)) {
193 return memory;
194 }
195
196 return nullptr;
197 }
198
CreateProcessMemory(pid_t pid)199 std::shared_ptr<Memory> Memory::CreateProcessMemory(pid_t pid) {
200 if (pid == getpid()) {
201 return std::shared_ptr<Memory>(new MemoryLocal());
202 }
203 return std::shared_ptr<Memory>(new MemoryRemote(pid));
204 }
205
CreateProcessMemoryCached(pid_t pid)206 std::shared_ptr<Memory> Memory::CreateProcessMemoryCached(pid_t pid) {
207 if (pid == getpid()) {
208 return std::shared_ptr<Memory>(new MemoryCache(new MemoryLocal()));
209 }
210 return std::shared_ptr<Memory>(new MemoryCache(new MemoryRemote(pid)));
211 }
212
CreateOfflineMemory(const uint8_t * data,uint64_t start,uint64_t end)213 std::shared_ptr<Memory> Memory::CreateOfflineMemory(const uint8_t* data, uint64_t start,
214 uint64_t end) {
215 return std::shared_ptr<Memory>(new MemoryOfflineBuffer(data, start, end));
216 }
217
Read(uint64_t addr,void * dst,size_t size)218 size_t MemoryBuffer::Read(uint64_t addr, void* dst, size_t size) {
219 if (addr >= size_) {
220 return 0;
221 }
222
223 size_t bytes_left = size_ - static_cast<size_t>(addr);
224 const unsigned char* actual_base = static_cast<const unsigned char*>(raw_) + addr;
225 size_t actual_len = std::min(bytes_left, size);
226
227 memcpy(dst, actual_base, actual_len);
228 return actual_len;
229 }
230
GetPtr(size_t offset)231 uint8_t* MemoryBuffer::GetPtr(size_t offset) {
232 if (offset < size_) {
233 return &raw_[offset];
234 }
235 return nullptr;
236 }
237
~MemoryFileAtOffset()238 MemoryFileAtOffset::~MemoryFileAtOffset() {
239 Clear();
240 }
241
Clear()242 void MemoryFileAtOffset::Clear() {
243 if (data_) {
244 munmap(&data_[-offset_], size_ + offset_);
245 data_ = nullptr;
246 }
247 }
248
Init(const std::string & file,uint64_t offset,uint64_t size)249 bool MemoryFileAtOffset::Init(const std::string& file, uint64_t offset, uint64_t size) {
250 // Clear out any previous data if it exists.
251 Clear();
252
253 android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(file.c_str(), O_RDONLY | O_CLOEXEC)));
254 if (fd == -1) {
255 return false;
256 }
257 struct stat buf;
258 if (fstat(fd, &buf) == -1) {
259 return false;
260 }
261 if (offset >= static_cast<uint64_t>(buf.st_size)) {
262 return false;
263 }
264
265 offset_ = offset & (getpagesize() - 1);
266 uint64_t aligned_offset = offset & ~(getpagesize() - 1);
267 if (aligned_offset > static_cast<uint64_t>(buf.st_size) ||
268 offset > static_cast<uint64_t>(buf.st_size)) {
269 return false;
270 }
271
272 size_ = buf.st_size - aligned_offset;
273 uint64_t max_size;
274 if (!__builtin_add_overflow(size, offset_, &max_size) && max_size < size_) {
275 // Truncate the mapped size.
276 size_ = max_size;
277 }
278 void* map = mmap(nullptr, size_, PROT_READ, MAP_PRIVATE, fd, aligned_offset);
279 if (map == MAP_FAILED) {
280 return false;
281 }
282
283 data_ = &reinterpret_cast<uint8_t*>(map)[offset_];
284 size_ -= offset_;
285
286 return true;
287 }
288
Read(uint64_t addr,void * dst,size_t size)289 size_t MemoryFileAtOffset::Read(uint64_t addr, void* dst, size_t size) {
290 if (addr >= size_) {
291 return 0;
292 }
293
294 size_t bytes_left = size_ - static_cast<size_t>(addr);
295 const unsigned char* actual_base = static_cast<const unsigned char*>(data_) + addr;
296 size_t actual_len = std::min(bytes_left, size);
297
298 memcpy(dst, actual_base, actual_len);
299 return actual_len;
300 }
301
Read(uint64_t addr,void * dst,size_t size)302 size_t MemoryRemote::Read(uint64_t addr, void* dst, size_t size) {
303 #if !defined(__LP64__)
304 // Cannot read an address greater than 32 bits in a 32 bit context.
305 if (addr > UINT32_MAX) {
306 return 0;
307 }
308 #endif
309
310 size_t (*read_func)(pid_t, uint64_t, void*, size_t) =
311 reinterpret_cast<size_t (*)(pid_t, uint64_t, void*, size_t)>(read_redirect_func_.load());
312 if (read_func != nullptr) {
313 return read_func(pid_, addr, dst, size);
314 } else {
315 // Prefer process_vm_read, try it first. If it doesn't work, use the
316 // ptrace function. If at least one of them returns at least some data,
317 // set that as the permanent function to use.
318 // This assumes that if process_vm_read works once, it will continue
319 // to work.
320 size_t bytes = ProcessVmRead(pid_, addr, dst, size);
321 if (bytes > 0) {
322 read_redirect_func_ = reinterpret_cast<uintptr_t>(ProcessVmRead);
323 return bytes;
324 }
325 bytes = PtraceRead(pid_, addr, dst, size);
326 if (bytes > 0) {
327 read_redirect_func_ = reinterpret_cast<uintptr_t>(PtraceRead);
328 }
329 return bytes;
330 }
331 }
332
Read(uint64_t addr,void * dst,size_t size)333 size_t MemoryLocal::Read(uint64_t addr, void* dst, size_t size) {
334 return ProcessVmRead(getpid(), addr, dst, size);
335 }
336
337 #if !defined(ANDROID_EXPERIMENTAL_MTE)
ReadTag(uint64_t)338 long MemoryRemote::ReadTag(uint64_t) {
339 return -1;
340 }
341
ReadTag(uint64_t)342 long MemoryLocal::ReadTag(uint64_t) {
343 return -1;
344 }
345 #endif
346
MemoryRange(const std::shared_ptr<Memory> & memory,uint64_t begin,uint64_t length,uint64_t offset)347 MemoryRange::MemoryRange(const std::shared_ptr<Memory>& memory, uint64_t begin, uint64_t length,
348 uint64_t offset)
349 : memory_(memory), begin_(begin), length_(length), offset_(offset) {}
350
Read(uint64_t addr,void * dst,size_t size)351 size_t MemoryRange::Read(uint64_t addr, void* dst, size_t size) {
352 if (addr < offset_) {
353 return 0;
354 }
355
356 uint64_t read_offset = addr - offset_;
357 if (read_offset >= length_) {
358 return 0;
359 }
360
361 uint64_t read_length = std::min(static_cast<uint64_t>(size), length_ - read_offset);
362 uint64_t read_addr;
363 if (__builtin_add_overflow(read_offset, begin_, &read_addr)) {
364 return 0;
365 }
366
367 return memory_->Read(read_addr, dst, read_length);
368 }
369
Insert(MemoryRange * memory)370 void MemoryRanges::Insert(MemoryRange* memory) {
371 maps_.emplace(memory->offset() + memory->length(), memory);
372 }
373
Read(uint64_t addr,void * dst,size_t size)374 size_t MemoryRanges::Read(uint64_t addr, void* dst, size_t size) {
375 auto entry = maps_.upper_bound(addr);
376 if (entry != maps_.end()) {
377 return entry->second->Read(addr, dst, size);
378 }
379 return 0;
380 }
381
Init(const std::string & file,uint64_t offset)382 bool MemoryOffline::Init(const std::string& file, uint64_t offset) {
383 auto memory_file = std::make_shared<MemoryFileAtOffset>();
384 if (!memory_file->Init(file, offset)) {
385 return false;
386 }
387
388 // The first uint64_t value is the start of memory.
389 uint64_t start;
390 if (!memory_file->ReadFully(0, &start, sizeof(start))) {
391 return false;
392 }
393
394 uint64_t size = memory_file->Size();
395 if (__builtin_sub_overflow(size, sizeof(start), &size)) {
396 return false;
397 }
398
399 memory_ = std::make_unique<MemoryRange>(memory_file, sizeof(start), size, start);
400 return true;
401 }
402
Read(uint64_t addr,void * dst,size_t size)403 size_t MemoryOffline::Read(uint64_t addr, void* dst, size_t size) {
404 if (!memory_) {
405 return 0;
406 }
407
408 return memory_->Read(addr, dst, size);
409 }
410
MemoryOfflineBuffer(const uint8_t * data,uint64_t start,uint64_t end)411 MemoryOfflineBuffer::MemoryOfflineBuffer(const uint8_t* data, uint64_t start, uint64_t end)
412 : data_(data), start_(start), end_(end) {}
413
Reset(const uint8_t * data,uint64_t start,uint64_t end)414 void MemoryOfflineBuffer::Reset(const uint8_t* data, uint64_t start, uint64_t end) {
415 data_ = data;
416 start_ = start;
417 end_ = end;
418 }
419
Read(uint64_t addr,void * dst,size_t size)420 size_t MemoryOfflineBuffer::Read(uint64_t addr, void* dst, size_t size) {
421 if (addr < start_ || addr >= end_) {
422 return 0;
423 }
424
425 size_t read_length = std::min(size, static_cast<size_t>(end_ - addr));
426 memcpy(dst, &data_[addr - start_], read_length);
427 return read_length;
428 }
429
~MemoryOfflineParts()430 MemoryOfflineParts::~MemoryOfflineParts() {
431 for (auto memory : memories_) {
432 delete memory;
433 }
434 }
435
Read(uint64_t addr,void * dst,size_t size)436 size_t MemoryOfflineParts::Read(uint64_t addr, void* dst, size_t size) {
437 if (memories_.empty()) {
438 return 0;
439 }
440
441 // Do a read on each memory object, no support for reading across the
442 // different memory objects.
443 for (MemoryOffline* memory : memories_) {
444 size_t bytes = memory->Read(addr, dst, size);
445 if (bytes != 0) {
446 return bytes;
447 }
448 }
449 return 0;
450 }
451
Read(uint64_t addr,void * dst,size_t size)452 size_t MemoryCache::Read(uint64_t addr, void* dst, size_t size) {
453 // Only bother caching and looking at the cache if this is a small read for now.
454 if (size > 64) {
455 return impl_->Read(addr, dst, size);
456 }
457
458 uint64_t addr_page = addr >> kCacheBits;
459 auto entry = cache_.find(addr_page);
460 uint8_t* cache_dst;
461 if (entry != cache_.end()) {
462 cache_dst = entry->second;
463 } else {
464 cache_dst = cache_[addr_page];
465 if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
466 // Erase the entry.
467 cache_.erase(addr_page);
468 return impl_->Read(addr, dst, size);
469 }
470 }
471 size_t max_read = ((addr_page + 1) << kCacheBits) - addr;
472 if (size <= max_read) {
473 memcpy(dst, &cache_dst[addr & kCacheMask], size);
474 return size;
475 }
476
477 // The read crossed into another cached entry, since a read can only cross
478 // into one extra cached page, duplicate the code rather than looping.
479 memcpy(dst, &cache_dst[addr & kCacheMask], max_read);
480 dst = &reinterpret_cast<uint8_t*>(dst)[max_read];
481 addr_page++;
482
483 entry = cache_.find(addr_page);
484 if (entry != cache_.end()) {
485 cache_dst = entry->second;
486 } else {
487 cache_dst = cache_[addr_page];
488 if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
489 // Erase the entry.
490 cache_.erase(addr_page);
491 return impl_->Read(addr_page << kCacheBits, dst, size - max_read) + max_read;
492 }
493 }
494 memcpy(dst, cache_dst, size - max_read);
495 return size;
496 }
497
498 } // namespace unwindstack
499