1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <err.h>
18 #include <inttypes.h>
19 #include <pthread.h>
20 #include <sched.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <sys/mman.h>
24 #include <sys/user.h>
25 #include <unistd.h>
26 
27 #include <gtest/gtest.h>
28 
29 #include <chrono>
30 #include <thread>
31 #include <vector>
32 
33 #include <android-base/macros.h>
34 #include <android-base/threads.h>
35 
36 #include "utils.h"
37 
38 using namespace std::chrono_literals;
39 
WaitUntilAllThreadsExited(pid_t * tids,size_t tid_count)40 static void WaitUntilAllThreadsExited(pid_t* tids, size_t tid_count) {
41   // Wait until all children have exited.
42   bool alive = true;
43   while (alive) {
44     alive = false;
45     for (size_t i = 0; i < tid_count; ++i) {
46       if (tids[i] != 0) {
47         if (tgkill(getpid(), tids[i], 0) == 0) {
48           alive = true;
49         } else {
50           EXPECT_EQ(errno, ESRCH);
51           tids[i] = 0;  // Skip in next loop.
52         }
53       }
54     }
55     sched_yield();
56   }
57 }
58 
59 class LeakChecker {
60  public:
LeakChecker()61   LeakChecker() {
62     // Avoid resizing and using memory later.
63     // 64Ki is the default limit on VMAs per process.
64     maps_.reserve(64*1024);
65     Reset();
66   }
67 
~LeakChecker()68   ~LeakChecker() {
69     Check();
70   }
71 
Reset()72   void Reset() {
73     previous_size_ = GetMappingSize();
74   }
75 
DumpTo(std::ostream & os) const76   void DumpTo(std::ostream& os) const {
77     os << previous_size_;
78   }
79 
80  private:
81   size_t previous_size_;
82   std::vector<map_record> maps_;
83 
Check()84   void Check() {
85     auto current_size = GetMappingSize();
86     if (current_size > previous_size_) {
87       FAIL() << "increase in process map size: " << previous_size_ << " -> " << current_size;
88     }
89   }
90 
GetMappingSize()91   size_t GetMappingSize() {
92     if (!Maps::parse_maps(&maps_)) {
93       err(1, "failed to parse maps");
94     }
95 
96     size_t result = 0;
97     for (const map_record& map : maps_) {
98       result += map.addr_end - map.addr_start;
99     }
100 
101     return result;
102   }
103 };
104 
operator <<(std::ostream & os,const LeakChecker & lc)105 std::ostream& operator<<(std::ostream& os, const LeakChecker& lc) {
106   lc.DumpTo(os);
107   return os;
108 }
109 
110 // http://b/36045112
TEST(pthread_leak,join)111 TEST(pthread_leak, join) {
112   SKIP_WITH_NATIVE_BRIDGE;  // http://b/37920774
113 
114   LeakChecker lc;
115 
116   for (int i = 0; i < 100; ++i) {
117     pthread_t thread;
118     ASSERT_EQ(0, pthread_create(&thread, nullptr, [](void*) -> void* { return nullptr; }, nullptr));
119     ASSERT_EQ(0, pthread_join(thread, nullptr));
120   }
121 }
122 
123 // http://b/36045112
TEST(pthread_leak,detach)124 TEST(pthread_leak, detach) {
125   SKIP_WITH_NATIVE_BRIDGE;  // http://b/37920774
126 
127   LeakChecker lc;
128 
129   // Ancient devices with only 2 cores need a lower limit.
130   // http://b/129924384 and https://issuetracker.google.com/142210680.
131   const int thread_count = (sysconf(_SC_NPROCESSORS_CONF) > 2) ? 100 : 50;
132 
133   for (size_t pass = 0; pass < 1; ++pass) {
134     struct thread_data { pthread_barrier_t* barrier; pid_t* tid; } threads[thread_count];
135 
136     pthread_barrier_t barrier;
137     ASSERT_EQ(pthread_barrier_init(&barrier, nullptr, thread_count + 1), 0);
138 
139     // Start child threads.
140     pid_t tids[thread_count];
141     for (int i = 0; i < thread_count; ++i) {
142       threads[i] = {&barrier, &tids[i]};
143       const auto thread_function = +[](void* ptr) -> void* {
144         thread_data* data = static_cast<thread_data*>(ptr);
145         *data->tid = gettid();
146         pthread_barrier_wait(data->barrier);
147         return nullptr;
148       };
149       pthread_t thread;
150       ASSERT_EQ(0, pthread_create(&thread, nullptr, thread_function, &threads[i]));
151       ASSERT_EQ(0, pthread_detach(thread));
152     }
153 
154     pthread_barrier_wait(&barrier);
155     ASSERT_EQ(pthread_barrier_destroy(&barrier), 0);
156 
157     WaitUntilAllThreadsExited(tids, thread_count);
158 
159     // TODO(b/158573595): the test is flaky without the warmup pass.
160     if (pass == 0) lc.Reset();
161   }
162 }
163