1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #if defined(LIBC_STATIC)
30 #error This file should not be compiled for static targets.
31 #endif
32
33 #include <dlfcn.h>
34 #include <fcntl.h>
35 #include <signal.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39
40 #include <platform/bionic/malloc.h>
41 #include <private/bionic_config.h>
42 #include <private/bionic_malloc_dispatch.h>
43 #include <sys/system_properties.h>
44
45 #include "gwp_asan_wrappers.h"
46 #include "malloc_common.h"
47 #include "malloc_common_dynamic.h"
48 #include "malloc_heapprofd.h"
49 #include "malloc_limit.h"
50
51 // Installing heapprofd hooks is a multi step process, as outlined below.
52 //
53 // The incremental hooking and a dedicated task thread are used since we cannot
54 // do heavy work within a signal handler, or when blocking a malloc invocation.
55 //
56 // +--->+-------------+------------------+
57 // | +->+kInitialState+----------------+ | malloc functions are not intercepted in any way.
58 // | | +-------+-----+ | |
59 // | | | HandleHeapprofd | |
60 // | | v Signal() | |
61 // | | +-------+----------------+ | | currently installing the ephemeral hooks.
62 // | | |kInstallingEphemeralHook|<--+ | |
63 // | | +-------+----------------+ | | |
64 // | | | | | |
65 // | | v | | |
66 // | | +-------+---------------+ | | | ephemeral hooks are installed. on the first call to
67 // | | |kEphemeralHookInstalled| | | | malloc these hooks spawn a thread that installs the
68 // | | +-------+---------------+ A B C heapprofd hooks.
69 // | | | MallocInit | | |
70 // | | v HeapprofdHook () | | |
71 // | | +-------+--------------+ | | | first call to malloc happened. the hooks are reset to
72 // | +--|kRemovingEphemeralHook| | | | kInitialState.
73 // | +----------------------+ | | |
74 // | | | |
75 // | | | |
76 // | +---------------+ | | | currently installing the heapprofd hook
77 // | |kInstallingHook|<-----------|-+ |
78 // | +-------+-------+ | |
79 // | | | |
80 // | v | |
81 // | +-------+------+ | | heapprofd hooks are installed. these forward calls to
82 // | |kHookInstalled|-------------+ | malloc / free / etc. to heapprofd_client.so.
83 // | +-------+------+ |
84 // | | DispatchReset() |
85 // | v |
86 // | +-------+---------+ | currently resetting the hooks to default.
87 // |----+kUninstallingHook| |
88 // +-----------------+ |
89 // |
90 // |
91 // +------------------+ | malloc debug / malloc hooks are active. these take
92 // |kIncompatibleHooks+<------------+ precendence over heapprofd, so heapprofd will not get
93 // +------------------+ enabled. this is a terminal state.
94 //
95 //
96 // A) HandleHeapprofdSignal()
97 // B) HeapprofdInstallHooksAtInit() / InitHeapprofd()
98 // C) HeapprofdRememberHookConflict()
99 enum MallocHeapprofdState : uint8_t {
100 kInitialState,
101 kInstallingEphemeralHook,
102 kEphemeralHookInstalled,
103 kRemovingEphemeralHook,
104 kInstallingHook,
105 kHookInstalled,
106 kUninstallingHook,
107 kIncompatibleHooks
108 };
109
110 enum ModifyGlobalsMode {
111 kWithLock, // all calls to MaybeModifyGlobals with kWithLock will serialise. they can fail
112 // due to a concurrent call with kWithoutLock.
113 kWithoutLock // calls to MaybeModifyGlobals with kWithoutLock do not serialise. they can fail
114 // due to concurrent calls with kWithoutLock or kWithLock.
115 };
116
117 // Provide mutual exclusion so no two threads try to modify the globals at the same time.
118 template <typename Fn>
MaybeModifyGlobals(ModifyGlobalsMode mode,Fn f)119 bool MaybeModifyGlobals(ModifyGlobalsMode mode, Fn f) {
120 bool success = false;
121 if (mode == kWithLock) {
122 pthread_mutex_lock(&gGlobalsMutateLock);
123 }
124 // As we have grabbed the mutex, the following condition should always hold, except
125 // if we are currently running HandleHeapprofdSignal.
126 if (!atomic_exchange(&gGlobalsMutating, true)) {
127 f();
128 success = true;
129 atomic_store(&gGlobalsMutating, false);
130 } else {
131 error_log("%s: heapprofd client: concurrent modification.", getprogname());
132 }
133 if (mode == kWithLock) {
134 pthread_mutex_unlock(&gGlobalsMutateLock);
135 }
136 return success;
137 }
138
139 extern "C" void* MallocInitHeapprofdHook(size_t);
140
141 static constexpr char kHeapprofdSharedLib[] = "heapprofd_client.so";
142 static constexpr char kHeapprofdPrefix[] = "heapprofd";
143 static constexpr char kHeapprofdPropertyEnable[] = "heapprofd.enable";
144
145 constexpr char kHeapprofdProgramPropertyPrefix[] = "heapprofd.enable.";
146 constexpr size_t kHeapprofdProgramPropertyPrefixSize = sizeof(kHeapprofdProgramPropertyPrefix) - 1;
147 constexpr size_t kMaxCmdlineSize = 512;
148
149 // The handle returned by dlopen when previously loading the heapprofd
150 // hooks. nullptr if shared library has not been already been loaded.
151 static _Atomic (void*) gHeapprofdHandle = nullptr;
152 static _Atomic MallocHeapprofdState gHeapprofdState = kInitialState;
153
GetHeapprofdProgramProperty(char * data,size_t size)154 static bool GetHeapprofdProgramProperty(char* data, size_t size) {
155 if (size < kHeapprofdProgramPropertyPrefixSize) {
156 error_log("%s: Overflow constructing heapprofd property", getprogname());
157 return false;
158 }
159 memcpy(data, kHeapprofdProgramPropertyPrefix, kHeapprofdProgramPropertyPrefixSize);
160
161 int fd = open("/proc/self/cmdline", O_RDONLY | O_CLOEXEC);
162 if (fd == -1) {
163 error_log("%s: Failed to open /proc/self/cmdline", getprogname());
164 return false;
165 }
166 char cmdline[kMaxCmdlineSize];
167 ssize_t rd = read(fd, cmdline, sizeof(cmdline) - 1);
168 close(fd);
169 if (rd == -1) {
170 error_log("%s: Failed to read /proc/self/cmdline", getprogname());
171 return false;
172 }
173 cmdline[rd] = '\0';
174 char* first_arg = static_cast<char*>(memchr(cmdline, '\0', rd));
175 if (first_arg == nullptr) {
176 error_log("%s: Overflow reading cmdline", getprogname());
177 return false;
178 }
179 // For consistency with what we do with Java app cmdlines, trim everything
180 // after the @ sign of the first arg.
181 char* first_at = static_cast<char*>(memchr(cmdline, '@', rd));
182 if (first_at != nullptr && first_at < first_arg) {
183 *first_at = '\0';
184 first_arg = first_at;
185 }
186
187 char* start = static_cast<char*>(memrchr(cmdline, '/', first_arg - cmdline));
188 if (start == first_arg) {
189 // The first argument ended in a slash.
190 error_log("%s: cmdline ends in /", getprogname());
191 return false;
192 } else if (start == nullptr) {
193 start = cmdline;
194 } else {
195 // Skip the /.
196 start++;
197 }
198
199 size_t name_size = static_cast<size_t>(first_arg - start);
200 if (name_size >= size - kHeapprofdProgramPropertyPrefixSize) {
201 error_log("%s: overflow constructing heapprofd property.", getprogname());
202 return false;
203 }
204 // + 1 to also copy the trailing null byte.
205 memcpy(data + kHeapprofdProgramPropertyPrefixSize, start, name_size + 1);
206 return true;
207 }
208
209 // Runtime triggering entry-point. Two possible call sites:
210 // * when receiving a profiling signal with a si_value indicating heapprofd.
211 // * when a Zygote child is marking itself as profileable, and there's a
212 // matching profiling request for this process (in which case heapprofd client
213 // is loaded synchronously).
214 // In both cases, the caller is responsible for verifying that the process is
215 // considered profileable.
216
217 // Previously installed default dispatch table, if it exists. This is used to
218 // load heapprofd properly when GWP-ASan was already installed. If GWP-ASan was
219 // already installed, heapprofd will take over the dispatch table, but will use
220 // GWP-ASan as the backing dispatch. Writes to this variable is atomically
221 // protected by MaybeModifyGlobals.
222 // Reads are not protected, so this is atomic. We cannot fail the call in
223 // MallocInitHeapprofdHook.
224 static _Atomic (const MallocDispatch*) gPreviousDefaultDispatchTable = nullptr;
225 static MallocDispatch gEphemeralDispatch;
226
HandleHeapprofdSignal()227 void HandleHeapprofdSignal() {
228 if (atomic_load(&gHeapprofdState) == kIncompatibleHooks) {
229 error_log("%s: not enabling heapprofd, malloc_debug/malloc_hooks are enabled.", getprogname());
230 return;
231 }
232
233 // We cannot grab the mutex here, as this is used in a signal handler.
234 MaybeModifyGlobals(kWithoutLock, [] {
235 MallocHeapprofdState expected = kInitialState;
236 // If hooks are already installed, we still want to install ephemeral hooks to retrigger
237 // heapprofd client initialization.
238 MallocHeapprofdState expected2 = kHookInstalled;
239 if (atomic_compare_exchange_strong(&gHeapprofdState, &expected,
240 kInstallingEphemeralHook) ||
241 atomic_compare_exchange_strong(&gHeapprofdState, &expected2,
242 kInstallingEphemeralHook)) {
243 const MallocDispatch* default_dispatch = GetDefaultDispatchTable();
244
245 // Below, we initialize heapprofd lazily by redirecting libc's malloc() to
246 // call MallocInitHeapprofdHook, which spawns off a thread and initializes
247 // heapprofd. During the short period between now and when heapprofd is
248 // initialized, allocations may need to be serviced. There are three
249 // possible configurations:
250
251 if (default_dispatch == nullptr) {
252 // 1. No malloc hooking has been done (heapprofd, GWP-ASan, etc.). In
253 // this case, everything but malloc() should come from the system
254 // allocator.
255 atomic_store(&gPreviousDefaultDispatchTable, nullptr);
256 gEphemeralDispatch = *NativeAllocatorDispatch();
257 } else if (DispatchIsGwpAsan(default_dispatch)) {
258 // 2. GWP-ASan was installed. We should use GWP-ASan for everything but
259 // malloc() in the interim period before heapprofd is properly
260 // installed. After heapprofd is finished installing, we will use
261 // GWP-ASan as heapprofd's backing allocator to allow heapprofd and
262 // GWP-ASan to coexist.
263 atomic_store(&gPreviousDefaultDispatchTable, default_dispatch);
264 gEphemeralDispatch = *default_dispatch;
265 } else {
266 // 3. It may be possible at this point in time that heapprofd is
267 // *already* the default dispatch, and as such we don't want to use
268 // heapprofd as the backing store for itself (otherwise infinite
269 // recursion occurs). We will use the system allocator functions. Note:
270 // We've checked that no other malloc interceptors are being used by
271 // validating `gHeapprofdIncompatibleHooks` above, so we don't need to
272 // worry about that case here.
273 atomic_store(&gPreviousDefaultDispatchTable, nullptr);
274 gEphemeralDispatch = *NativeAllocatorDispatch();
275 }
276
277 // Now, replace the malloc function so that the next call to malloc() will
278 // initialize heapprofd.
279 gEphemeralDispatch.malloc = MallocInitHeapprofdHook;
280
281 // And finally, install these new malloc-family interceptors.
282 __libc_globals.mutate([](libc_globals* globals) {
283 atomic_store(&globals->default_dispatch_table, &gEphemeralDispatch);
284 if (!MallocLimitInstalled()) {
285 atomic_store(&globals->current_dispatch_table, &gEphemeralDispatch);
286 }
287 });
288 atomic_store(&gHeapprofdState, kEphemeralHookInstalled);
289 } else {
290 error_log("%s: heapprofd: failed to transition kInitialState -> kInstallingEphemeralHook. "
291 "current state (possible race): %d", getprogname(), expected2);
292 }
293 });
294 // Otherwise, we're racing against malloc_limit's enable logic (at most once
295 // per process, and a niche feature). This is highly unlikely, so simply give
296 // up if it does happen.
297 }
298
HeapprofdShouldLoad()299 bool HeapprofdShouldLoad() {
300 // First check for heapprofd.enable. If it is set to "all", enable
301 // heapprofd for all processes. Otherwise, check heapprofd.enable.${prog},
302 // if it is set and not 0, enable heap profiling for this process.
303 char property_value[PROP_VALUE_MAX];
304 if (__system_property_get(kHeapprofdPropertyEnable, property_value) == 0) {
305 return false;
306 }
307 if (strcmp(property_value, "all") == 0) {
308 return true;
309 }
310
311 char program_property[kHeapprofdProgramPropertyPrefixSize + kMaxCmdlineSize];
312 if (!GetHeapprofdProgramProperty(program_property,
313 sizeof(program_property))) {
314 return false;
315 }
316 if (__system_property_get(program_property, property_value) == 0) {
317 return false;
318 }
319 return property_value[0] != '\0';
320 }
321
HeapprofdRememberHookConflict()322 void HeapprofdRememberHookConflict() {
323 atomic_store(&gHeapprofdState, kIncompatibleHooks);
324 }
325
CommonInstallHooks(libc_globals * globals)326 static void CommonInstallHooks(libc_globals* globals) {
327 void* impl_handle = atomic_load(&gHeapprofdHandle);
328 bool reusing_handle = impl_handle != nullptr;
329 if (!reusing_handle) {
330 impl_handle = LoadSharedLibrary(kHeapprofdSharedLib, kHeapprofdPrefix, &globals->malloc_dispatch_table);
331 if (impl_handle == nullptr) {
332 return;
333 }
334 } else if (!InitSharedLibrary(impl_handle, kHeapprofdSharedLib, kHeapprofdPrefix, &globals->malloc_dispatch_table)) {
335 return;
336 }
337
338 // Before we set the new default_dispatch_table in FinishInstallHooks, save
339 // the previous dispatch table. If DispatchReset() gets called later, we want
340 // to be able to restore the dispatch. We're still under
341 // MaybeModifyGlobals locks at this point.
342 atomic_store(&gPreviousDefaultDispatchTable, GetDefaultDispatchTable());
343
344 if (FinishInstallHooks(globals, nullptr, kHeapprofdPrefix)) {
345 atomic_store(&gHeapprofdHandle, impl_handle);
346 } else if (!reusing_handle) {
347 dlclose(impl_handle);
348 }
349 }
350
HeapprofdInstallHooksAtInit(libc_globals * globals)351 void HeapprofdInstallHooksAtInit(libc_globals* globals) {
352 MaybeModifyGlobals(kWithoutLock, [globals] {
353 MallocHeapprofdState expected = kInitialState;
354 if (atomic_compare_exchange_strong(&gHeapprofdState, &expected, kInstallingHook)) {
355 CommonInstallHooks(globals);
356 atomic_store(&gHeapprofdState, kHookInstalled);
357 } else {
358 error_log("%s: heapprofd: failed to transition kInitialState -> kInstallingHook. "
359 "current state (possible race): %d", getprogname(), expected);
360 }
361 });
362 }
363
InitHeapprofd(void *)364 static void* InitHeapprofd(void*) {
365 MaybeModifyGlobals(kWithLock, [] {
366 MallocHeapprofdState expected = kInitialState;
367 if (atomic_compare_exchange_strong(&gHeapprofdState, &expected, kInstallingHook)) {
368 __libc_globals.mutate([](libc_globals* globals) {
369 CommonInstallHooks(globals);
370 });
371 atomic_store(&gHeapprofdState, kHookInstalled);
372 } else {
373 error_log("%s: heapprofd: failed to transition kInitialState -> kInstallingHook. "
374 "current state (possible race): %d", getprogname(), expected);
375 }
376 });
377 return nullptr;
378 }
379
MallocInitHeapprofdHook(size_t bytes)380 extern "C" void* MallocInitHeapprofdHook(size_t bytes) {
381 MaybeModifyGlobals(kWithLock, [] {
382 MallocHeapprofdState expected = kEphemeralHookInstalled;
383 if (atomic_compare_exchange_strong(&gHeapprofdState, &expected, kRemovingEphemeralHook)) {
384 __libc_globals.mutate([](libc_globals* globals) {
385 const MallocDispatch* previous_dispatch = atomic_load(&gPreviousDefaultDispatchTable);
386 atomic_store(&globals->default_dispatch_table, previous_dispatch);
387 if (!MallocLimitInstalled()) {
388 atomic_store(&globals->current_dispatch_table, previous_dispatch);
389 }
390 });
391 atomic_store(&gHeapprofdState, kInitialState);
392
393 pthread_t thread_id;
394 if (pthread_create(&thread_id, nullptr, InitHeapprofd, nullptr) != 0) {
395 error_log("%s: heapprofd: failed to pthread_create.", getprogname());
396 } else if (pthread_setname_np(thread_id, "heapprofdinit") != 0) {
397 error_log("%s: heapprod: failed to pthread_setname_np", getprogname());
398 } else if (pthread_detach(thread_id) != 0) {
399 error_log("%s: heapprofd: failed to pthread_detach", getprogname());
400 }
401 } else {
402 warning_log("%s: heapprofd: could not transition kEphemeralHookInstalled -> "
403 "kRemovingEphemeralHook. current state (possible race): %d. this can be benign "
404 "if two threads try this transition at the same time", getprogname(),
405 expected);
406 }
407 });
408 // If we had a previous dispatch table, use that to service the allocation,
409 // otherwise fall back to the native allocator.
410 // This could be modified by a concurrent HandleHeapprofdSignal, but that is
411 // benign as we will dispatch to the ephemeral handler, which will then dispatch
412 // to the underlying one.
413 const MallocDispatch* previous_dispatch = atomic_load(&gPreviousDefaultDispatchTable);
414 if (previous_dispatch) {
415 return previous_dispatch->malloc(bytes);
416 }
417 return NativeAllocatorDispatch()->malloc(bytes);
418 }
419
HeapprofdInitZygoteChildProfiling()420 bool HeapprofdInitZygoteChildProfiling() {
421 // Conditionally start "from startup" profiling.
422 if (HeapprofdShouldLoad()) {
423 // Directly call the signal handler codepath (properly protects against
424 // concurrent invocations).
425 HandleHeapprofdSignal();
426 }
427 return true;
428 }
429
DispatchReset()430 static bool DispatchReset() {
431 if (atomic_load(&gHeapprofdState) == kInitialState) {
432 return true;
433 }
434
435 bool success = false;
436 MaybeModifyGlobals(kWithLock, [&success] {
437 MallocHeapprofdState expected = kHookInstalled;
438
439 if(atomic_compare_exchange_strong(&gHeapprofdState, &expected, kUninstallingHook)){
440 __libc_globals.mutate([](libc_globals* globals) {
441 const MallocDispatch* previous_dispatch = atomic_load(&gPreviousDefaultDispatchTable);
442 atomic_store(&globals->default_dispatch_table, previous_dispatch);
443 if (!MallocLimitInstalled()) {
444 atomic_store(&globals->current_dispatch_table, previous_dispatch);
445 }
446 });
447 atomic_store(&gHeapprofdState, kInitialState);
448 success = true;
449 } else {
450 error_log("%s: heapprofd: failed to transition kHookInstalled -> kUninstallingHook. "
451 "current state (possible race): %d", getprogname(),
452 expected);
453 }
454 });
455 if (!success) {
456 errno = EAGAIN;
457 }
458 return success;
459 }
460
HeapprofdMallopt(int opcode,void * arg,size_t arg_size)461 bool HeapprofdMallopt(int opcode, void* arg, size_t arg_size) {
462 if (opcode == M_RESET_HOOKS) {
463 if (arg != nullptr || arg_size != 0) {
464 errno = EINVAL;
465 return false;
466 }
467 return DispatchReset();
468 }
469 errno = ENOTSUP;
470 return false;
471 }
472