1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gtest/gtest.h>
18
19 #include <elf.h>
20 #include <limits.h>
21 #include <malloc.h>
22 #include <pthread.h>
23 #include <signal.h>
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <sys/auxv.h>
29 #include <sys/prctl.h>
30 #include <sys/types.h>
31 #include <sys/wait.h>
32 #include <unistd.h>
33
34 #include <atomic>
35 #include <thread>
36
37 #include <tinyxml2.h>
38
39 #include <android-base/file.h>
40
41 #include "utils.h"
42
43 #if defined(__BIONIC__)
44
45 #include "SignalUtils.h"
46
47 #include "platform/bionic/malloc.h"
48 #include "platform/bionic/mte_kernel.h"
49 #include "platform/bionic/reserved_signals.h"
50 #include "private/bionic_config.h"
51
52 #define HAVE_REALLOCARRAY 1
53
54 #else
55
56 #define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
57
58 #endif
59
TEST(malloc,malloc_std)60 TEST(malloc, malloc_std) {
61 // Simple malloc test.
62 void *ptr = malloc(100);
63 ASSERT_TRUE(ptr != nullptr);
64 ASSERT_LE(100U, malloc_usable_size(ptr));
65 free(ptr);
66 }
67
TEST(malloc,malloc_overflow)68 TEST(malloc, malloc_overflow) {
69 SKIP_WITH_HWASAN;
70 errno = 0;
71 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
72 ASSERT_EQ(ENOMEM, errno);
73 }
74
TEST(malloc,calloc_std)75 TEST(malloc, calloc_std) {
76 // Simple calloc test.
77 size_t alloc_len = 100;
78 char *ptr = (char *)calloc(1, alloc_len);
79 ASSERT_TRUE(ptr != nullptr);
80 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
81 for (size_t i = 0; i < alloc_len; i++) {
82 ASSERT_EQ(0, ptr[i]);
83 }
84 free(ptr);
85 }
86
TEST(malloc,calloc_illegal)87 TEST(malloc, calloc_illegal) {
88 SKIP_WITH_HWASAN;
89 errno = 0;
90 ASSERT_EQ(nullptr, calloc(-1, 100));
91 ASSERT_EQ(ENOMEM, errno);
92 }
93
TEST(malloc,calloc_overflow)94 TEST(malloc, calloc_overflow) {
95 SKIP_WITH_HWASAN;
96 errno = 0;
97 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
98 ASSERT_EQ(ENOMEM, errno);
99 errno = 0;
100 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
101 ASSERT_EQ(ENOMEM, errno);
102 errno = 0;
103 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
104 ASSERT_EQ(ENOMEM, errno);
105 errno = 0;
106 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
107 ASSERT_EQ(ENOMEM, errno);
108 }
109
TEST(malloc,memalign_multiple)110 TEST(malloc, memalign_multiple) {
111 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
112 // Memalign test where the alignment is any value.
113 for (size_t i = 0; i <= 12; i++) {
114 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
115 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
116 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
117 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
118 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
119 << "Failed at alignment " << alignment;
120 free(ptr);
121 }
122 }
123 }
124
TEST(malloc,memalign_overflow)125 TEST(malloc, memalign_overflow) {
126 SKIP_WITH_HWASAN;
127 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
128 }
129
TEST(malloc,memalign_non_power2)130 TEST(malloc, memalign_non_power2) {
131 SKIP_WITH_HWASAN;
132 void* ptr;
133 for (size_t align = 0; align <= 256; align++) {
134 ptr = memalign(align, 1024);
135 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
136 free(ptr);
137 }
138 }
139
TEST(malloc,memalign_realloc)140 TEST(malloc, memalign_realloc) {
141 // Memalign and then realloc the pointer a couple of times.
142 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
143 char *ptr = (char*)memalign(alignment, 100);
144 ASSERT_TRUE(ptr != nullptr);
145 ASSERT_LE(100U, malloc_usable_size(ptr));
146 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
147 memset(ptr, 0x23, 100);
148
149 ptr = (char*)realloc(ptr, 200);
150 ASSERT_TRUE(ptr != nullptr);
151 ASSERT_LE(200U, malloc_usable_size(ptr));
152 ASSERT_TRUE(ptr != nullptr);
153 for (size_t i = 0; i < 100; i++) {
154 ASSERT_EQ(0x23, ptr[i]);
155 }
156 memset(ptr, 0x45, 200);
157
158 ptr = (char*)realloc(ptr, 300);
159 ASSERT_TRUE(ptr != nullptr);
160 ASSERT_LE(300U, malloc_usable_size(ptr));
161 for (size_t i = 0; i < 200; i++) {
162 ASSERT_EQ(0x45, ptr[i]);
163 }
164 memset(ptr, 0x67, 300);
165
166 ptr = (char*)realloc(ptr, 250);
167 ASSERT_TRUE(ptr != nullptr);
168 ASSERT_LE(250U, malloc_usable_size(ptr));
169 for (size_t i = 0; i < 250; i++) {
170 ASSERT_EQ(0x67, ptr[i]);
171 }
172 free(ptr);
173 }
174 }
175
TEST(malloc,malloc_realloc_larger)176 TEST(malloc, malloc_realloc_larger) {
177 // Realloc to a larger size, malloc is used for the original allocation.
178 char *ptr = (char *)malloc(100);
179 ASSERT_TRUE(ptr != nullptr);
180 ASSERT_LE(100U, malloc_usable_size(ptr));
181 memset(ptr, 67, 100);
182
183 ptr = (char *)realloc(ptr, 200);
184 ASSERT_TRUE(ptr != nullptr);
185 ASSERT_LE(200U, malloc_usable_size(ptr));
186 for (size_t i = 0; i < 100; i++) {
187 ASSERT_EQ(67, ptr[i]);
188 }
189 free(ptr);
190 }
191
TEST(malloc,malloc_realloc_smaller)192 TEST(malloc, malloc_realloc_smaller) {
193 // Realloc to a smaller size, malloc is used for the original allocation.
194 char *ptr = (char *)malloc(200);
195 ASSERT_TRUE(ptr != nullptr);
196 ASSERT_LE(200U, malloc_usable_size(ptr));
197 memset(ptr, 67, 200);
198
199 ptr = (char *)realloc(ptr, 100);
200 ASSERT_TRUE(ptr != nullptr);
201 ASSERT_LE(100U, malloc_usable_size(ptr));
202 for (size_t i = 0; i < 100; i++) {
203 ASSERT_EQ(67, ptr[i]);
204 }
205 free(ptr);
206 }
207
TEST(malloc,malloc_multiple_realloc)208 TEST(malloc, malloc_multiple_realloc) {
209 // Multiple reallocs, malloc is used for the original allocation.
210 char *ptr = (char *)malloc(200);
211 ASSERT_TRUE(ptr != nullptr);
212 ASSERT_LE(200U, malloc_usable_size(ptr));
213 memset(ptr, 0x23, 200);
214
215 ptr = (char *)realloc(ptr, 100);
216 ASSERT_TRUE(ptr != nullptr);
217 ASSERT_LE(100U, malloc_usable_size(ptr));
218 for (size_t i = 0; i < 100; i++) {
219 ASSERT_EQ(0x23, ptr[i]);
220 }
221
222 ptr = (char*)realloc(ptr, 50);
223 ASSERT_TRUE(ptr != nullptr);
224 ASSERT_LE(50U, malloc_usable_size(ptr));
225 for (size_t i = 0; i < 50; i++) {
226 ASSERT_EQ(0x23, ptr[i]);
227 }
228
229 ptr = (char*)realloc(ptr, 150);
230 ASSERT_TRUE(ptr != nullptr);
231 ASSERT_LE(150U, malloc_usable_size(ptr));
232 for (size_t i = 0; i < 50; i++) {
233 ASSERT_EQ(0x23, ptr[i]);
234 }
235 memset(ptr, 0x23, 150);
236
237 ptr = (char*)realloc(ptr, 425);
238 ASSERT_TRUE(ptr != nullptr);
239 ASSERT_LE(425U, malloc_usable_size(ptr));
240 for (size_t i = 0; i < 150; i++) {
241 ASSERT_EQ(0x23, ptr[i]);
242 }
243 free(ptr);
244 }
245
TEST(malloc,calloc_realloc_larger)246 TEST(malloc, calloc_realloc_larger) {
247 // Realloc to a larger size, calloc is used for the original allocation.
248 char *ptr = (char *)calloc(1, 100);
249 ASSERT_TRUE(ptr != nullptr);
250 ASSERT_LE(100U, malloc_usable_size(ptr));
251
252 ptr = (char *)realloc(ptr, 200);
253 ASSERT_TRUE(ptr != nullptr);
254 ASSERT_LE(200U, malloc_usable_size(ptr));
255 for (size_t i = 0; i < 100; i++) {
256 ASSERT_EQ(0, ptr[i]);
257 }
258 free(ptr);
259 }
260
TEST(malloc,calloc_realloc_smaller)261 TEST(malloc, calloc_realloc_smaller) {
262 // Realloc to a smaller size, calloc is used for the original allocation.
263 char *ptr = (char *)calloc(1, 200);
264 ASSERT_TRUE(ptr != nullptr);
265 ASSERT_LE(200U, malloc_usable_size(ptr));
266
267 ptr = (char *)realloc(ptr, 100);
268 ASSERT_TRUE(ptr != nullptr);
269 ASSERT_LE(100U, malloc_usable_size(ptr));
270 for (size_t i = 0; i < 100; i++) {
271 ASSERT_EQ(0, ptr[i]);
272 }
273 free(ptr);
274 }
275
TEST(malloc,calloc_multiple_realloc)276 TEST(malloc, calloc_multiple_realloc) {
277 // Multiple reallocs, calloc is used for the original allocation.
278 char *ptr = (char *)calloc(1, 200);
279 ASSERT_TRUE(ptr != nullptr);
280 ASSERT_LE(200U, malloc_usable_size(ptr));
281
282 ptr = (char *)realloc(ptr, 100);
283 ASSERT_TRUE(ptr != nullptr);
284 ASSERT_LE(100U, malloc_usable_size(ptr));
285 for (size_t i = 0; i < 100; i++) {
286 ASSERT_EQ(0, ptr[i]);
287 }
288
289 ptr = (char*)realloc(ptr, 50);
290 ASSERT_TRUE(ptr != nullptr);
291 ASSERT_LE(50U, malloc_usable_size(ptr));
292 for (size_t i = 0; i < 50; i++) {
293 ASSERT_EQ(0, ptr[i]);
294 }
295
296 ptr = (char*)realloc(ptr, 150);
297 ASSERT_TRUE(ptr != nullptr);
298 ASSERT_LE(150U, malloc_usable_size(ptr));
299 for (size_t i = 0; i < 50; i++) {
300 ASSERT_EQ(0, ptr[i]);
301 }
302 memset(ptr, 0, 150);
303
304 ptr = (char*)realloc(ptr, 425);
305 ASSERT_TRUE(ptr != nullptr);
306 ASSERT_LE(425U, malloc_usable_size(ptr));
307 for (size_t i = 0; i < 150; i++) {
308 ASSERT_EQ(0, ptr[i]);
309 }
310 free(ptr);
311 }
312
TEST(malloc,realloc_overflow)313 TEST(malloc, realloc_overflow) {
314 SKIP_WITH_HWASAN;
315 errno = 0;
316 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
317 ASSERT_EQ(ENOMEM, errno);
318 void* ptr = malloc(100);
319 ASSERT_TRUE(ptr != nullptr);
320 errno = 0;
321 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
322 ASSERT_EQ(ENOMEM, errno);
323 free(ptr);
324 }
325
326 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
327 extern "C" void* pvalloc(size_t);
328 extern "C" void* valloc(size_t);
329 #endif
330
TEST(malloc,pvalloc_std)331 TEST(malloc, pvalloc_std) {
332 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
333 size_t pagesize = sysconf(_SC_PAGESIZE);
334 void* ptr = pvalloc(100);
335 ASSERT_TRUE(ptr != nullptr);
336 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
337 ASSERT_LE(pagesize, malloc_usable_size(ptr));
338 free(ptr);
339 #else
340 GTEST_SKIP() << "pvalloc not supported.";
341 #endif
342 }
343
TEST(malloc,pvalloc_overflow)344 TEST(malloc, pvalloc_overflow) {
345 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
346 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
347 #else
348 GTEST_SKIP() << "pvalloc not supported.";
349 #endif
350 }
351
TEST(malloc,valloc_std)352 TEST(malloc, valloc_std) {
353 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
354 size_t pagesize = sysconf(_SC_PAGESIZE);
355 void* ptr = valloc(100);
356 ASSERT_TRUE(ptr != nullptr);
357 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
358 free(ptr);
359 #else
360 GTEST_SKIP() << "valloc not supported.";
361 #endif
362 }
363
TEST(malloc,valloc_overflow)364 TEST(malloc, valloc_overflow) {
365 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
366 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
367 #else
368 GTEST_SKIP() << "valloc not supported.";
369 #endif
370 }
371
TEST(malloc,malloc_info)372 TEST(malloc, malloc_info) {
373 #ifdef __BIONIC__
374 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
375
376 TemporaryFile tf;
377 ASSERT_TRUE(tf.fd != -1);
378 FILE* fp = fdopen(tf.fd, "w+");
379 tf.release();
380 ASSERT_TRUE(fp != nullptr);
381 ASSERT_EQ(0, malloc_info(0, fp));
382 ASSERT_EQ(0, fclose(fp));
383
384 std::string contents;
385 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
386
387 tinyxml2::XMLDocument doc;
388 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
389
390 auto root = doc.FirstChildElement();
391 ASSERT_NE(nullptr, root);
392 ASSERT_STREQ("malloc", root->Name());
393 std::string version(root->Attribute("version"));
394 if (version == "jemalloc-1") {
395 auto arena = root->FirstChildElement();
396 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
397 int val;
398
399 ASSERT_STREQ("heap", arena->Name());
400 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
401 ASSERT_EQ(tinyxml2::XML_SUCCESS,
402 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
403 ASSERT_EQ(tinyxml2::XML_SUCCESS,
404 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
405 ASSERT_EQ(tinyxml2::XML_SUCCESS,
406 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
407 ASSERT_EQ(tinyxml2::XML_SUCCESS,
408 arena->FirstChildElement("bins-total")->QueryIntText(&val));
409
410 auto bin = arena->FirstChildElement("bin");
411 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
412 if (strcmp(bin->Name(), "bin") == 0) {
413 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
414 ASSERT_EQ(tinyxml2::XML_SUCCESS,
415 bin->FirstChildElement("allocated")->QueryIntText(&val));
416 ASSERT_EQ(tinyxml2::XML_SUCCESS,
417 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
418 ASSERT_EQ(tinyxml2::XML_SUCCESS,
419 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
420 }
421 }
422 }
423 } else if (version == "scudo-1") {
424 auto element = root->FirstChildElement();
425 for (; element != nullptr; element = element->NextSiblingElement()) {
426 int val;
427
428 ASSERT_STREQ("alloc", element->Name());
429 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
430 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
431 }
432 } else {
433 // Do not verify output for debug malloc.
434 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
435 }
436 #endif
437 }
438
TEST(malloc,malloc_info_matches_mallinfo)439 TEST(malloc, malloc_info_matches_mallinfo) {
440 #ifdef __BIONIC__
441 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
442
443 TemporaryFile tf;
444 ASSERT_TRUE(tf.fd != -1);
445 FILE* fp = fdopen(tf.fd, "w+");
446 tf.release();
447 ASSERT_TRUE(fp != nullptr);
448 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
449 ASSERT_EQ(0, malloc_info(0, fp));
450 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
451 ASSERT_EQ(0, fclose(fp));
452
453 std::string contents;
454 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
455
456 tinyxml2::XMLDocument doc;
457 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
458
459 size_t total_allocated_bytes = 0;
460 auto root = doc.FirstChildElement();
461 ASSERT_NE(nullptr, root);
462 ASSERT_STREQ("malloc", root->Name());
463 std::string version(root->Attribute("version"));
464 if (version == "jemalloc-1") {
465 auto arena = root->FirstChildElement();
466 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
467 int val;
468
469 ASSERT_STREQ("heap", arena->Name());
470 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
471 ASSERT_EQ(tinyxml2::XML_SUCCESS,
472 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
473 total_allocated_bytes += val;
474 ASSERT_EQ(tinyxml2::XML_SUCCESS,
475 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
476 total_allocated_bytes += val;
477 ASSERT_EQ(tinyxml2::XML_SUCCESS,
478 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
479 total_allocated_bytes += val;
480 ASSERT_EQ(tinyxml2::XML_SUCCESS,
481 arena->FirstChildElement("bins-total")->QueryIntText(&val));
482 }
483 // The total needs to be between the mallinfo call before and after
484 // since malloc_info allocates some memory.
485 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
486 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
487 } else if (version == "scudo-1") {
488 auto element = root->FirstChildElement();
489 for (; element != nullptr; element = element->NextSiblingElement()) {
490 ASSERT_STREQ("alloc", element->Name());
491 int size;
492 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
493 int count;
494 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
495 total_allocated_bytes += size * count;
496 }
497 // Scudo only gives the information on the primary, so simply make
498 // sure that the value is non-zero.
499 EXPECT_NE(0U, total_allocated_bytes);
500 } else {
501 // Do not verify output for debug malloc.
502 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
503 }
504 #endif
505 }
506
TEST(malloc,calloc_usable_size)507 TEST(malloc, calloc_usable_size) {
508 for (size_t size = 1; size <= 2048; size++) {
509 void* pointer = malloc(size);
510 ASSERT_TRUE(pointer != nullptr);
511 memset(pointer, 0xeb, malloc_usable_size(pointer));
512 free(pointer);
513
514 // We should get a previous pointer that has been set to non-zero.
515 // If calloc does not zero out all of the data, this will fail.
516 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
517 ASSERT_TRUE(pointer != nullptr);
518 size_t usable_size = malloc_usable_size(zero_mem);
519 for (size_t i = 0; i < usable_size; i++) {
520 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
521 }
522 free(zero_mem);
523 }
524 }
525
TEST(malloc,malloc_0)526 TEST(malloc, malloc_0) {
527 void* p = malloc(0);
528 ASSERT_TRUE(p != nullptr);
529 free(p);
530 }
531
TEST(malloc,calloc_0_0)532 TEST(malloc, calloc_0_0) {
533 void* p = calloc(0, 0);
534 ASSERT_TRUE(p != nullptr);
535 free(p);
536 }
537
TEST(malloc,calloc_0_1)538 TEST(malloc, calloc_0_1) {
539 void* p = calloc(0, 1);
540 ASSERT_TRUE(p != nullptr);
541 free(p);
542 }
543
TEST(malloc,calloc_1_0)544 TEST(malloc, calloc_1_0) {
545 void* p = calloc(1, 0);
546 ASSERT_TRUE(p != nullptr);
547 free(p);
548 }
549
TEST(malloc,realloc_nullptr_0)550 TEST(malloc, realloc_nullptr_0) {
551 // realloc(nullptr, size) is actually malloc(size).
552 void* p = realloc(nullptr, 0);
553 ASSERT_TRUE(p != nullptr);
554 free(p);
555 }
556
TEST(malloc,realloc_0)557 TEST(malloc, realloc_0) {
558 void* p = malloc(1024);
559 ASSERT_TRUE(p != nullptr);
560 // realloc(p, 0) is actually free(p).
561 void* p2 = realloc(p, 0);
562 ASSERT_TRUE(p2 == nullptr);
563 }
564
565 constexpr size_t MAX_LOOPS = 200;
566
567 // Make sure that memory returned by malloc is aligned to allow these data types.
TEST(malloc,verify_alignment)568 TEST(malloc, verify_alignment) {
569 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
570 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
571 long double** values_ldouble = new long double*[MAX_LOOPS];
572 // Use filler to attempt to force the allocator to get potentially bad alignments.
573 void** filler = new void*[MAX_LOOPS];
574
575 for (size_t i = 0; i < MAX_LOOPS; i++) {
576 // Check uint32_t pointers.
577 filler[i] = malloc(1);
578 ASSERT_TRUE(filler[i] != nullptr);
579
580 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
581 ASSERT_TRUE(values_32[i] != nullptr);
582 *values_32[i] = i;
583 ASSERT_EQ(*values_32[i], i);
584 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
585
586 free(filler[i]);
587 }
588
589 for (size_t i = 0; i < MAX_LOOPS; i++) {
590 // Check uint64_t pointers.
591 filler[i] = malloc(1);
592 ASSERT_TRUE(filler[i] != nullptr);
593
594 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
595 ASSERT_TRUE(values_64[i] != nullptr);
596 *values_64[i] = 0x1000 + i;
597 ASSERT_EQ(*values_64[i], 0x1000 + i);
598 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
599
600 free(filler[i]);
601 }
602
603 for (size_t i = 0; i < MAX_LOOPS; i++) {
604 // Check long double pointers.
605 filler[i] = malloc(1);
606 ASSERT_TRUE(filler[i] != nullptr);
607
608 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
609 ASSERT_TRUE(values_ldouble[i] != nullptr);
610 *values_ldouble[i] = 5.5 + i;
611 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
612 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
613 // required alignment to 0x7.
614 #if !defined(__BIONIC__) && !defined(__LP64__)
615 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
616 #else
617 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
618 #endif
619
620 free(filler[i]);
621 }
622
623 for (size_t i = 0; i < MAX_LOOPS; i++) {
624 free(values_32[i]);
625 free(values_64[i]);
626 free(values_ldouble[i]);
627 }
628
629 delete[] filler;
630 delete[] values_32;
631 delete[] values_64;
632 delete[] values_ldouble;
633 }
634
TEST(malloc,mallopt_smoke)635 TEST(malloc, mallopt_smoke) {
636 errno = 0;
637 ASSERT_EQ(0, mallopt(-1000, 1));
638 // mallopt doesn't set errno.
639 ASSERT_EQ(0, errno);
640 }
641
TEST(malloc,mallopt_decay)642 TEST(malloc, mallopt_decay) {
643 #if defined(__BIONIC__)
644 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
645 errno = 0;
646 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
647 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
648 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
649 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
650 #else
651 GTEST_SKIP() << "bionic-only test";
652 #endif
653 }
654
TEST(malloc,mallopt_purge)655 TEST(malloc, mallopt_purge) {
656 #if defined(__BIONIC__)
657 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
658 errno = 0;
659 ASSERT_EQ(1, mallopt(M_PURGE, 0));
660 #else
661 GTEST_SKIP() << "bionic-only test";
662 #endif
663 }
664
665 #if defined(__BIONIC__)
GetAllocatorVersion(bool * allocator_scudo)666 static void GetAllocatorVersion(bool* allocator_scudo) {
667 TemporaryFile tf;
668 ASSERT_TRUE(tf.fd != -1);
669 FILE* fp = fdopen(tf.fd, "w+");
670 tf.release();
671 ASSERT_TRUE(fp != nullptr);
672 ASSERT_EQ(0, malloc_info(0, fp));
673 ASSERT_EQ(0, fclose(fp));
674
675 std::string contents;
676 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
677
678 tinyxml2::XMLDocument doc;
679 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
680
681 auto root = doc.FirstChildElement();
682 ASSERT_NE(nullptr, root);
683 ASSERT_STREQ("malloc", root->Name());
684 std::string version(root->Attribute("version"));
685 *allocator_scudo = (version == "scudo-1");
686 }
687 #endif
688
TEST(malloc,mallopt_scudo_only_options)689 TEST(malloc, mallopt_scudo_only_options) {
690 #if defined(__BIONIC__)
691 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
692 bool allocator_scudo;
693 GetAllocatorVersion(&allocator_scudo);
694 if (!allocator_scudo) {
695 GTEST_SKIP() << "scudo allocator only test";
696 }
697 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
698 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
699 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
700 #else
701 GTEST_SKIP() << "bionic-only test";
702 #endif
703 }
704
TEST(malloc,reallocarray_overflow)705 TEST(malloc, reallocarray_overflow) {
706 #if HAVE_REALLOCARRAY
707 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
708 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
709 size_t b = 2;
710
711 errno = 0;
712 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
713 ASSERT_EQ(ENOMEM, errno);
714
715 errno = 0;
716 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
717 ASSERT_EQ(ENOMEM, errno);
718 #else
719 GTEST_SKIP() << "reallocarray not available";
720 #endif
721 }
722
TEST(malloc,reallocarray)723 TEST(malloc, reallocarray) {
724 #if HAVE_REALLOCARRAY
725 void* p = reallocarray(nullptr, 2, 32);
726 ASSERT_TRUE(p != nullptr);
727 ASSERT_GE(malloc_usable_size(p), 64U);
728 #else
729 GTEST_SKIP() << "reallocarray not available";
730 #endif
731 }
732
TEST(malloc,mallinfo)733 TEST(malloc, mallinfo) {
734 #if defined(__BIONIC__)
735 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
736 static size_t sizes[] = {
737 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
738 };
739
740 constexpr static size_t kMaxAllocs = 50;
741
742 for (size_t size : sizes) {
743 // If some of these allocations are stuck in a thread cache, then keep
744 // looping until we make an allocation that changes the total size of the
745 // memory allocated.
746 // jemalloc implementations counts the thread cache allocations against
747 // total memory allocated.
748 void* ptrs[kMaxAllocs] = {};
749 bool pass = false;
750 for (size_t i = 0; i < kMaxAllocs; i++) {
751 size_t allocated = mallinfo().uordblks;
752 ptrs[i] = malloc(size);
753 ASSERT_TRUE(ptrs[i] != nullptr);
754 size_t new_allocated = mallinfo().uordblks;
755 if (allocated != new_allocated) {
756 size_t usable_size = malloc_usable_size(ptrs[i]);
757 // Only check if the total got bigger by at least allocation size.
758 // Sometimes the mallinfo numbers can go backwards due to compaction
759 // and/or freeing of cached data.
760 if (new_allocated >= allocated + usable_size) {
761 pass = true;
762 break;
763 }
764 }
765 }
766 for (void* ptr : ptrs) {
767 free(ptr);
768 }
769 ASSERT_TRUE(pass)
770 << "For size " << size << " allocated bytes did not increase after "
771 << kMaxAllocs << " allocations.";
772 }
773 #else
774 GTEST_SKIP() << "glibc is broken";
775 #endif
776 }
777
778 template <typename Type>
VerifyAlignment(Type * floating)779 void __attribute__((optnone)) VerifyAlignment(Type* floating) {
780 size_t expected_alignment = alignof(Type);
781 if (expected_alignment != 0) {
782 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
783 << "Expected alignment " << expected_alignment << " ptr value " << floating;
784 }
785 }
786
787 template <typename Type>
TestAllocateType()788 void __attribute__((optnone)) TestAllocateType() {
789 // The number of allocations to do in a row. This is to attempt to
790 // expose the worst case alignment for native allocators that use
791 // bins.
792 static constexpr size_t kMaxConsecutiveAllocs = 100;
793
794 // Verify using new directly.
795 Type* types[kMaxConsecutiveAllocs];
796 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
797 types[i] = new Type;
798 VerifyAlignment(types[i]);
799 if (::testing::Test::HasFatalFailure()) {
800 return;
801 }
802 }
803 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
804 delete types[i];
805 }
806
807 // Verify using malloc.
808 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
809 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
810 ASSERT_TRUE(types[i] != nullptr);
811 VerifyAlignment(types[i]);
812 if (::testing::Test::HasFatalFailure()) {
813 return;
814 }
815 }
816 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
817 free(types[i]);
818 }
819
820 // Verify using a vector.
821 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
822 for (size_t i = 0; i < type_vector.size(); i++) {
823 VerifyAlignment(&type_vector[i]);
824 if (::testing::Test::HasFatalFailure()) {
825 return;
826 }
827 }
828 }
829
830 #if defined(__ANDROID__)
AndroidVerifyAlignment(size_t alloc_size,size_t aligned_bytes)831 static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
832 void* ptrs[100];
833 uintptr_t mask = aligned_bytes - 1;
834 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
835 ptrs[i] = malloc(alloc_size);
836 ASSERT_TRUE(ptrs[i] != nullptr);
837 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
838 << "Expected at least " << aligned_bytes << " byte alignment: size "
839 << alloc_size << " actual ptr " << ptrs[i];
840 }
841 }
842 #endif
843
TEST(malloc,align_check)844 TEST(malloc, align_check) {
845 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
846 // for a discussion of type alignment.
847 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
848 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
849 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
850
851 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
852 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
853 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
854 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
855 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
856 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
857 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
858 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
859 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
860 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
861 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
862 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
863 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
864 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
865
866 #if defined(__ANDROID__)
867 // On Android, there is a lot of code that expects certain alignments:
868 // - Allocations of a size that rounds up to a multiple of 16 bytes
869 // must have at least 16 byte alignment.
870 // - Allocations of a size that rounds up to a multiple of 8 bytes and
871 // not 16 bytes, are only required to have at least 8 byte alignment.
872 // This is regardless of whether it is in a 32 bit or 64 bit environment.
873
874 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
875 // a discussion of this alignment mess. The code below is enforcing
876 // strong-alignment, since who knows what code depends on this behavior now.
877 for (size_t i = 1; i <= 128; i++) {
878 size_t rounded = (i + 7) & ~7;
879 if ((rounded % 16) == 0) {
880 AndroidVerifyAlignment(i, 16);
881 } else {
882 AndroidVerifyAlignment(i, 8);
883 }
884 if (::testing::Test::HasFatalFailure()) {
885 return;
886 }
887 }
888 #endif
889 }
890
891 // Jemalloc doesn't pass this test right now, so leave it as disabled.
TEST(malloc,DISABLED_alloc_after_fork)892 TEST(malloc, DISABLED_alloc_after_fork) {
893 // Both of these need to be a power of 2.
894 static constexpr size_t kMinAllocationSize = 8;
895 static constexpr size_t kMaxAllocationSize = 2097152;
896
897 static constexpr size_t kNumAllocatingThreads = 5;
898 static constexpr size_t kNumForkLoops = 100;
899
900 std::atomic_bool stop;
901
902 // Create threads that simply allocate and free different sizes.
903 std::vector<std::thread*> threads;
904 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
905 std::thread* t = new std::thread([&stop] {
906 while (!stop) {
907 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
908 void* ptr = malloc(size);
909 if (ptr == nullptr) {
910 return;
911 }
912 // Make sure this value is not optimized away.
913 asm volatile("" : : "r,m"(ptr) : "memory");
914 free(ptr);
915 }
916 }
917 });
918 threads.push_back(t);
919 }
920
921 // Create a thread to fork and allocate.
922 for (size_t i = 0; i < kNumForkLoops; i++) {
923 pid_t pid;
924 if ((pid = fork()) == 0) {
925 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
926 void* ptr = malloc(size);
927 ASSERT_TRUE(ptr != nullptr);
928 // Make sure this value is not optimized away.
929 asm volatile("" : : "r,m"(ptr) : "memory");
930 // Make sure we can touch all of the allocation.
931 memset(ptr, 0x1, size);
932 ASSERT_LE(size, malloc_usable_size(ptr));
933 free(ptr);
934 }
935 _exit(10);
936 }
937 ASSERT_NE(-1, pid);
938 AssertChildExited(pid, 10);
939 }
940
941 stop = true;
942 for (auto thread : threads) {
943 thread->join();
944 delete thread;
945 }
946 }
947
TEST(android_mallopt,error_on_unexpected_option)948 TEST(android_mallopt, error_on_unexpected_option) {
949 #if defined(__BIONIC__)
950 const int unrecognized_option = -1;
951 errno = 0;
952 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
953 EXPECT_EQ(ENOTSUP, errno);
954 #else
955 GTEST_SKIP() << "bionic-only test";
956 #endif
957 }
958
IsDynamic()959 bool IsDynamic() {
960 #if defined(__LP64__)
961 Elf64_Ehdr ehdr;
962 #else
963 Elf32_Ehdr ehdr;
964 #endif
965 std::string path(android::base::GetExecutablePath());
966
967 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
968 if (fd == -1) {
969 // Assume dynamic on error.
970 return true;
971 }
972 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
973 close(fd);
974 // Assume dynamic in error cases.
975 return !read_completed || ehdr.e_type == ET_DYN;
976 }
977
TEST(android_mallopt,init_zygote_child_profiling)978 TEST(android_mallopt, init_zygote_child_profiling) {
979 #if defined(__BIONIC__)
980 // Successful call.
981 errno = 0;
982 if (IsDynamic()) {
983 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
984 EXPECT_EQ(0, errno);
985 } else {
986 // Not supported in static executables.
987 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
988 EXPECT_EQ(ENOTSUP, errno);
989 }
990
991 // Unexpected arguments rejected.
992 errno = 0;
993 char unexpected = 0;
994 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
995 if (IsDynamic()) {
996 EXPECT_EQ(EINVAL, errno);
997 } else {
998 EXPECT_EQ(ENOTSUP, errno);
999 }
1000 #else
1001 GTEST_SKIP() << "bionic-only test";
1002 #endif
1003 }
1004
1005 #if defined(__BIONIC__)
1006 template <typename FuncType>
CheckAllocationFunction(FuncType func)1007 void CheckAllocationFunction(FuncType func) {
1008 // Assumes that no more than 108MB of memory is allocated before this.
1009 size_t limit = 128 * 1024 * 1024;
1010 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1011 if (!func(20 * 1024 * 1024))
1012 exit(1);
1013 if (func(128 * 1024 * 1024))
1014 exit(1);
1015 exit(0);
1016 }
1017 #endif
1018
TEST(android_mallopt,set_allocation_limit)1019 TEST(android_mallopt, set_allocation_limit) {
1020 #if defined(__BIONIC__)
1021 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1022 testing::ExitedWithCode(0), "");
1023 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1024 testing::ExitedWithCode(0), "");
1025 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1026 testing::ExitedWithCode(0), "");
1027 EXPECT_EXIT(CheckAllocationFunction(
1028 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1029 testing::ExitedWithCode(0), "");
1030 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1031 void* ptr;
1032 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1033 }),
1034 testing::ExitedWithCode(0), "");
1035 EXPECT_EXIT(CheckAllocationFunction(
1036 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1037 testing::ExitedWithCode(0), "");
1038 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1039 void* p = malloc(1024 * 1024);
1040 return realloc(p, bytes) != nullptr;
1041 }),
1042 testing::ExitedWithCode(0), "");
1043 #if !defined(__LP64__)
1044 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1045 testing::ExitedWithCode(0), "");
1046 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1047 testing::ExitedWithCode(0), "");
1048 #endif
1049 #else
1050 GTEST_SKIP() << "bionic extension";
1051 #endif
1052 }
1053
TEST(android_mallopt,set_allocation_limit_multiple)1054 TEST(android_mallopt, set_allocation_limit_multiple) {
1055 #if defined(__BIONIC__)
1056 // Only the first set should work.
1057 size_t limit = 256 * 1024 * 1024;
1058 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1059 limit = 32 * 1024 * 1024;
1060 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1061 #else
1062 GTEST_SKIP() << "bionic extension";
1063 #endif
1064 }
1065
1066 #if defined(__BIONIC__)
1067 static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1068
GetMaxAllocations()1069 static size_t GetMaxAllocations() {
1070 size_t max_pointers = 0;
1071 void* ptrs[20];
1072 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1073 ptrs[i] = malloc(kAllocationSize);
1074 if (ptrs[i] == nullptr) {
1075 max_pointers = i;
1076 break;
1077 }
1078 }
1079 for (size_t i = 0; i < max_pointers; i++) {
1080 free(ptrs[i]);
1081 }
1082 return max_pointers;
1083 }
1084
VerifyMaxPointers(size_t max_pointers)1085 static void VerifyMaxPointers(size_t max_pointers) {
1086 // Now verify that we can allocate the same number as before.
1087 void* ptrs[20];
1088 for (size_t i = 0; i < max_pointers; i++) {
1089 ptrs[i] = malloc(kAllocationSize);
1090 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1091 }
1092
1093 // Make sure the next allocation still fails.
1094 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1095 for (size_t i = 0; i < max_pointers; i++) {
1096 free(ptrs[i]);
1097 }
1098 }
1099 #endif
1100
TEST(android_mallopt,set_allocation_limit_realloc_increase)1101 TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1102 #if defined(__BIONIC__)
1103 size_t limit = 128 * 1024 * 1024;
1104 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1105
1106 size_t max_pointers = GetMaxAllocations();
1107 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1108
1109 void* memory = malloc(10 * 1024 * 1024);
1110 ASSERT_TRUE(memory != nullptr);
1111
1112 // Increase size.
1113 memory = realloc(memory, 20 * 1024 * 1024);
1114 ASSERT_TRUE(memory != nullptr);
1115 memory = realloc(memory, 40 * 1024 * 1024);
1116 ASSERT_TRUE(memory != nullptr);
1117 memory = realloc(memory, 60 * 1024 * 1024);
1118 ASSERT_TRUE(memory != nullptr);
1119 memory = realloc(memory, 80 * 1024 * 1024);
1120 ASSERT_TRUE(memory != nullptr);
1121 // Now push past limit.
1122 memory = realloc(memory, 130 * 1024 * 1024);
1123 ASSERT_TRUE(memory == nullptr);
1124
1125 VerifyMaxPointers(max_pointers);
1126 #else
1127 GTEST_SKIP() << "bionic extension";
1128 #endif
1129 }
1130
TEST(android_mallopt,set_allocation_limit_realloc_decrease)1131 TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1132 #if defined(__BIONIC__)
1133 size_t limit = 100 * 1024 * 1024;
1134 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1135
1136 size_t max_pointers = GetMaxAllocations();
1137 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1138
1139 void* memory = malloc(80 * 1024 * 1024);
1140 ASSERT_TRUE(memory != nullptr);
1141
1142 // Decrease size.
1143 memory = realloc(memory, 60 * 1024 * 1024);
1144 ASSERT_TRUE(memory != nullptr);
1145 memory = realloc(memory, 40 * 1024 * 1024);
1146 ASSERT_TRUE(memory != nullptr);
1147 memory = realloc(memory, 20 * 1024 * 1024);
1148 ASSERT_TRUE(memory != nullptr);
1149 memory = realloc(memory, 10 * 1024 * 1024);
1150 ASSERT_TRUE(memory != nullptr);
1151 free(memory);
1152
1153 VerifyMaxPointers(max_pointers);
1154 #else
1155 GTEST_SKIP() << "bionic extension";
1156 #endif
1157 }
1158
TEST(android_mallopt,set_allocation_limit_realloc_free)1159 TEST(android_mallopt, set_allocation_limit_realloc_free) {
1160 #if defined(__BIONIC__)
1161 size_t limit = 100 * 1024 * 1024;
1162 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1163
1164 size_t max_pointers = GetMaxAllocations();
1165 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1166
1167 void* memory = malloc(60 * 1024 * 1024);
1168 ASSERT_TRUE(memory != nullptr);
1169
1170 memory = realloc(memory, 0);
1171 ASSERT_TRUE(memory == nullptr);
1172
1173 VerifyMaxPointers(max_pointers);
1174 #else
1175 GTEST_SKIP() << "bionic extension";
1176 #endif
1177 }
1178
1179 #if defined(__BIONIC__)
SetAllocationLimit(void * data)1180 static void* SetAllocationLimit(void* data) {
1181 std::atomic_bool* go = reinterpret_cast<std::atomic_bool*>(data);
1182 while (!go->load()) {
1183 }
1184 size_t limit = 500 * 1024 * 1024;
1185 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1186 return reinterpret_cast<void*>(-1);
1187 }
1188 return nullptr;
1189 }
1190
SetAllocationLimitMultipleThreads()1191 static void SetAllocationLimitMultipleThreads() {
1192 std::atomic_bool go;
1193 go = false;
1194
1195 static constexpr size_t kNumThreads = 4;
1196 pthread_t threads[kNumThreads];
1197 for (size_t i = 0; i < kNumThreads; i++) {
1198 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go));
1199 }
1200
1201 // Let them go all at once.
1202 go = true;
1203 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1204 // heapprofd handler.
1205 union sigval signal_value;
1206 signal_value.sival_int = 0;
1207 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
1208
1209 size_t num_successful = 0;
1210 for (size_t i = 0; i < kNumThreads; i++) {
1211 void* result;
1212 ASSERT_EQ(0, pthread_join(threads[i], &result));
1213 if (result != nullptr) {
1214 num_successful++;
1215 }
1216 }
1217 ASSERT_EQ(1U, num_successful);
1218 exit(0);
1219 }
1220 #endif
1221
TEST(android_mallopt,set_allocation_limit_multiple_threads)1222 TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1223 #if defined(__BIONIC__)
1224 if (IsDynamic()) {
1225 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1226 }
1227
1228 // Run this a number of times as a stress test.
1229 for (size_t i = 0; i < 100; i++) {
1230 // Not using ASSERT_EXIT because errors messages are not displayed.
1231 pid_t pid;
1232 if ((pid = fork()) == 0) {
1233 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1234 }
1235 ASSERT_NE(-1, pid);
1236 int status;
1237 ASSERT_EQ(pid, wait(&status));
1238 ASSERT_EQ(0, WEXITSTATUS(status));
1239 }
1240 #else
1241 GTEST_SKIP() << "bionic extension";
1242 #endif
1243 }
1244