1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <inttypes.h>
19 #include <sys/auxv.h>
20 #include <sys/mman.h>
21 #include <unistd.h>
22
23 #if defined(__BIONIC__)
24 #include <bionic/mte.h>
25 #endif
26
27 #include <map>
28 #include <utility>
29
30 #include "Allocator.h"
31 #include "HeapWalker.h"
32 #include "LeakFolding.h"
33 #include "ScopedSignalHandler.h"
34 #include "log.h"
35
36 namespace android {
UntagAddress(uintptr_t addr)37 static inline uintptr_t UntagAddress(uintptr_t addr) {
38 #if defined(__aarch64__)
39 constexpr uintptr_t mask = (static_cast<uintptr_t>(1) << 56) - 1;
40 addr = addr & mask;
41 #endif
42 return addr;
43 }
44
Allocation(uintptr_t begin,uintptr_t end)45 bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
46 if (end == begin) {
47 end = begin + 1;
48 }
49 begin = UntagAddress(begin);
50 end = UntagAddress(end);
51 Range range{begin, end};
52 if (valid_mappings_range_.end != 0 &&
53 (begin < valid_mappings_range_.begin || end > valid_mappings_range_.end)) {
54 MEM_LOG_ALWAYS_FATAL("allocation %p-%p is outside mapping range %p-%p",
55 reinterpret_cast<void*>(begin), reinterpret_cast<void*>(end),
56 reinterpret_cast<void*>(valid_mappings_range_.begin),
57 reinterpret_cast<void*>(valid_mappings_range_.end));
58 }
59 auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
60 if (inserted.second) {
61 valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
62 valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
63 allocation_bytes_ += range.size();
64 return true;
65 } else {
66 Range overlap = inserted.first->first;
67 if (overlap != range) {
68 MEM_ALOGE("range %p-%p overlaps with existing range %p-%p", reinterpret_cast<void*>(begin),
69 reinterpret_cast<void*>(end), reinterpret_cast<void*>(overlap.begin),
70 reinterpret_cast<void*>(overlap.end));
71 }
72 return false;
73 }
74 }
75
76 // Sanitizers and MTE may consider certain memory inaccessible through certain pointers.
77 // With MTE we set PSTATE.TCO during the access to suppress tag checks.
ReadWordAtAddressUnsafe(uintptr_t word_ptr)78 static uintptr_t ReadWordAtAddressUnsafe(uintptr_t word_ptr)
79 __attribute__((no_sanitize("address", "hwaddress"))) {
80 #if defined(__BIONIC__)
81 ScopedDisableMTE x;
82 #endif
83 return *reinterpret_cast<uintptr_t*>(word_ptr);
84 }
85
WordContainsAllocationPtr(uintptr_t word_ptr,Range * range,AllocationInfo ** info)86 bool HeapWalker::WordContainsAllocationPtr(uintptr_t word_ptr, Range* range, AllocationInfo** info) {
87 walking_ptr_ = word_ptr;
88 // This access may segfault if the process under test has done something strange,
89 // for example mprotect(PROT_NONE) on a native heap page. If so, it will be
90 // caught and handled by mmaping a zero page over the faulting page.
91 uintptr_t value = ReadWordAtAddressUnsafe(word_ptr);
92 value = UntagAddress(value);
93 walking_ptr_ = 0;
94 if (value >= valid_allocations_range_.begin && value < valid_allocations_range_.end) {
95 AllocationMap::iterator it = allocations_.find(Range{value, value + 1});
96 if (it != allocations_.end()) {
97 *range = it->first;
98 *info = &it->second;
99 return true;
100 }
101 }
102 return false;
103 }
104
RecurseRoot(const Range & root)105 void HeapWalker::RecurseRoot(const Range& root) {
106 allocator::vector<Range> to_do(1, root, allocator_);
107 while (!to_do.empty()) {
108 Range range = to_do.back();
109 to_do.pop_back();
110
111 walking_range_ = range;
112 ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
113 if (!ref_info->referenced_from_root) {
114 ref_info->referenced_from_root = true;
115 to_do.push_back(ref_range);
116 }
117 });
118 walking_range_ = Range{0, 0};
119 }
120 }
121
Mapping(uintptr_t begin,uintptr_t end)122 void HeapWalker::Mapping(uintptr_t begin, uintptr_t end) {
123 valid_mappings_range_.begin = std::min(valid_mappings_range_.begin, begin);
124 valid_mappings_range_.end = std::max(valid_mappings_range_.end, end);
125 }
126
Root(uintptr_t begin,uintptr_t end)127 void HeapWalker::Root(uintptr_t begin, uintptr_t end) {
128 roots_.push_back(Range{begin, end});
129 }
130
Root(const allocator::vector<uintptr_t> & vals)131 void HeapWalker::Root(const allocator::vector<uintptr_t>& vals) {
132 root_vals_.insert(root_vals_.end(), vals.begin(), vals.end());
133 }
134
Allocations()135 size_t HeapWalker::Allocations() {
136 return allocations_.size();
137 }
138
AllocationBytes()139 size_t HeapWalker::AllocationBytes() {
140 return allocation_bytes_;
141 }
142
DetectLeaks()143 bool HeapWalker::DetectLeaks() {
144 // Recursively walk pointers from roots to mark referenced allocations
145 for (auto it = roots_.begin(); it != roots_.end(); it++) {
146 RecurseRoot(*it);
147 }
148
149 Range vals;
150 vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
151 vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
152
153 RecurseRoot(vals);
154
155 if (segv_page_count_ > 0) {
156 MEM_ALOGE("%zu pages skipped due to segfaults", segv_page_count_);
157 }
158
159 return true;
160 }
161
Leaked(allocator::vector<Range> & leaked,size_t limit,size_t * num_leaks_out,size_t * leak_bytes_out)162 bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit, size_t* num_leaks_out,
163 size_t* leak_bytes_out) {
164 leaked.clear();
165
166 size_t num_leaks = 0;
167 size_t leak_bytes = 0;
168 for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
169 if (!it->second.referenced_from_root) {
170 num_leaks++;
171 leak_bytes += it->first.end - it->first.begin;
172 }
173 }
174
175 size_t n = 0;
176 for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
177 if (!it->second.referenced_from_root) {
178 if (n++ < limit) {
179 leaked.push_back(it->first);
180 }
181 }
182 }
183
184 if (num_leaks_out) {
185 *num_leaks_out = num_leaks;
186 }
187 if (leak_bytes_out) {
188 *leak_bytes_out = leak_bytes;
189 }
190
191 return true;
192 }
193
MapOverPage(void * addr)194 static bool MapOverPage(void* addr) {
195 const size_t page_size = sysconf(_SC_PAGE_SIZE);
196 void* page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size - 1));
197
198 void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
199 if (ret == MAP_FAILED) {
200 MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno));
201 return false;
202 }
203
204 return true;
205 }
206
HandleSegFault(ScopedSignalHandler & handler,int signal,siginfo_t * si,void *)207 void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si,
208 void* /*uctx*/) {
209 uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
210 if (addr != walking_ptr_) {
211 handler.reset();
212 return;
213 }
214 if (!segv_logged_) {
215 MEM_ALOGW("failed to read page at %p, signal %d", si->si_addr, signal);
216 if (walking_range_.begin != 0U) {
217 MEM_ALOGW("while walking range %p-%p", reinterpret_cast<void*>(walking_range_.begin),
218 reinterpret_cast<void*>(walking_range_.end));
219 }
220 segv_logged_ = true;
221 }
222 segv_page_count_++;
223 if (!MapOverPage(si->si_addr)) {
224 handler.reset();
225 }
226 }
227
228 Allocator<ScopedSignalHandler::SignalFnMap>::unique_ptr ScopedSignalHandler::handler_map_;
229
230 } // namespace android
231