1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gtest/gtest.h>
18
19 #include <utils/StrongPointer.h>
20 #include <utils/RefBase.h>
21
22 #include <thread>
23 #include <atomic>
24 #include <sched.h>
25 #include <errno.h>
26
27 // Enhanced version of StrongPointer_test, but using RefBase underneath.
28
29 using namespace android;
30
31 static constexpr int NITERS = 1000000;
32
33 static constexpr int INITIAL_STRONG_VALUE = 1 << 28; // Mirroring RefBase definition.
34
35 class Foo : public RefBase {
36 public:
Foo(bool * deleted_check)37 Foo(bool* deleted_check) : mDeleted(deleted_check) {
38 *mDeleted = false;
39 }
40
~Foo()41 ~Foo() {
42 *mDeleted = true;
43 }
44 private:
45 bool* mDeleted;
46 };
47
48 // A version of Foo that ensures that all objects are allocated at the same
49 // address. No more than one can be allocated at a time. Thread-hostile.
50 class FooFixedAlloc : public RefBase {
51 public:
operator new(size_t size)52 static void* operator new(size_t size) {
53 if (mAllocCount != 0) {
54 abort();
55 }
56 mAllocCount = 1;
57 if (theMemory == nullptr) {
58 theMemory = malloc(size);
59 }
60 return theMemory;
61 }
62
operator delete(void * p)63 static void operator delete(void *p) {
64 if (mAllocCount != 1 || p != theMemory) {
65 abort();
66 }
67 mAllocCount = 0;
68 }
69
FooFixedAlloc(bool * deleted_check)70 FooFixedAlloc(bool* deleted_check) : mDeleted(deleted_check) {
71 *mDeleted = false;
72 }
73
~FooFixedAlloc()74 ~FooFixedAlloc() {
75 *mDeleted = true;
76 }
77 private:
78 bool* mDeleted;
79 static int mAllocCount;
80 static void* theMemory;
81 };
82
83 int FooFixedAlloc::mAllocCount(0);
84 void* FooFixedAlloc::theMemory(nullptr);
85
TEST(RefBase,StrongMoves)86 TEST(RefBase, StrongMoves) {
87 bool isDeleted;
88 Foo* foo = new Foo(&isDeleted);
89 ASSERT_EQ(INITIAL_STRONG_VALUE, foo->getStrongCount());
90 ASSERT_FALSE(isDeleted) << "Already deleted...?";
91 sp<Foo> sp1(foo);
92 wp<Foo> wp1(sp1);
93 ASSERT_EQ(1, foo->getStrongCount());
94 // Weak count includes both strong and weak references.
95 ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
96 {
97 sp<Foo> sp2 = std::move(sp1);
98 ASSERT_EQ(1, foo->getStrongCount())
99 << "std::move failed, incremented refcnt";
100 ASSERT_EQ(nullptr, sp1.get()) << "std::move failed, sp1 is still valid";
101 // The strong count isn't increasing, let's double check the old object
102 // is properly reset and doesn't early delete
103 sp1 = std::move(sp2);
104 }
105 ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
106 {
107 // Now let's double check it deletes on time
108 sp<Foo> sp2 = std::move(sp1);
109 }
110 ASSERT_TRUE(isDeleted) << "foo was leaked!";
111 ASSERT_TRUE(wp1.promote().get() == nullptr);
112 }
113
TEST(RefBase,WeakCopies)114 TEST(RefBase, WeakCopies) {
115 bool isDeleted;
116 Foo* foo = new Foo(&isDeleted);
117 EXPECT_EQ(0, foo->getWeakRefs()->getWeakCount());
118 ASSERT_FALSE(isDeleted) << "Foo (weak) already deleted...?";
119 wp<Foo> wp1(foo);
120 EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
121 {
122 wp<Foo> wp2 = wp1;
123 ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
124 }
125 EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
126 ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
127 wp1 = nullptr;
128 ASSERT_FALSE(isDeleted) << "Deletion on wp destruction should no longer occur";
129 }
130
TEST(RefBase,Comparisons)131 TEST(RefBase, Comparisons) {
132 bool isDeleted, isDeleted2, isDeleted3;
133 Foo* foo = new Foo(&isDeleted);
134 Foo* foo2 = new Foo(&isDeleted2);
135 sp<Foo> sp1(foo);
136 sp<Foo> sp2(foo2);
137 wp<Foo> wp1(sp1);
138 wp<Foo> wp2(sp1);
139 wp<Foo> wp3(sp2);
140 ASSERT_TRUE(wp1 == wp2);
141 ASSERT_TRUE(wp1 == sp1);
142 ASSERT_TRUE(wp3 == sp2);
143 ASSERT_TRUE(wp1 != sp2);
144 ASSERT_TRUE(wp1 <= wp2);
145 ASSERT_TRUE(wp1 >= wp2);
146 ASSERT_FALSE(wp1 != wp2);
147 ASSERT_FALSE(wp1 > wp2);
148 ASSERT_FALSE(wp1 < wp2);
149 ASSERT_FALSE(sp1 == sp2);
150 ASSERT_TRUE(sp1 != sp2);
151 bool sp1_smaller = sp1 < sp2;
152 wp<Foo>wp_smaller = sp1_smaller ? wp1 : wp3;
153 wp<Foo>wp_larger = sp1_smaller ? wp3 : wp1;
154 ASSERT_TRUE(wp_smaller < wp_larger);
155 ASSERT_TRUE(wp_smaller != wp_larger);
156 ASSERT_TRUE(wp_smaller <= wp_larger);
157 ASSERT_FALSE(wp_smaller == wp_larger);
158 ASSERT_FALSE(wp_smaller > wp_larger);
159 ASSERT_FALSE(wp_smaller >= wp_larger);
160 sp2 = nullptr;
161 ASSERT_TRUE(isDeleted2);
162 ASSERT_FALSE(isDeleted);
163 ASSERT_FALSE(wp3 == sp2);
164 // Comparison results on weak pointers should not be affected.
165 ASSERT_TRUE(wp_smaller < wp_larger);
166 ASSERT_TRUE(wp_smaller != wp_larger);
167 ASSERT_TRUE(wp_smaller <= wp_larger);
168 ASSERT_FALSE(wp_smaller == wp_larger);
169 ASSERT_FALSE(wp_smaller > wp_larger);
170 ASSERT_FALSE(wp_smaller >= wp_larger);
171 wp2 = nullptr;
172 ASSERT_FALSE(wp1 == wp2);
173 ASSERT_TRUE(wp1 != wp2);
174 wp1.clear();
175 ASSERT_TRUE(wp1 == wp2);
176 ASSERT_FALSE(wp1 != wp2);
177 wp3.clear();
178 ASSERT_TRUE(wp1 == wp3);
179 ASSERT_FALSE(wp1 != wp3);
180 ASSERT_FALSE(isDeleted);
181 sp1.clear();
182 ASSERT_TRUE(isDeleted);
183 ASSERT_TRUE(sp1 == sp2);
184 // Try to check that null pointers are properly initialized.
185 {
186 // Try once with non-null, to maximize chances of getting junk on the
187 // stack.
188 sp<Foo> sp3(new Foo(&isDeleted3));
189 wp<Foo> wp4(sp3);
190 wp<Foo> wp5;
191 ASSERT_FALSE(wp4 == wp5);
192 ASSERT_TRUE(wp4 != wp5);
193 ASSERT_FALSE(sp3 == wp5);
194 ASSERT_FALSE(wp5 == sp3);
195 ASSERT_TRUE(sp3 != wp5);
196 ASSERT_TRUE(wp5 != sp3);
197 ASSERT_TRUE(sp3 == wp4);
198 }
199 {
200 sp<Foo> sp3;
201 wp<Foo> wp4(sp3);
202 wp<Foo> wp5;
203 ASSERT_TRUE(wp4 == wp5);
204 ASSERT_FALSE(wp4 != wp5);
205 ASSERT_TRUE(sp3 == wp5);
206 ASSERT_TRUE(wp5 == sp3);
207 ASSERT_FALSE(sp3 != wp5);
208 ASSERT_FALSE(wp5 != sp3);
209 ASSERT_TRUE(sp3 == wp4);
210 }
211 }
212
213 // Check whether comparison against dead wp works, even if the object referenced
214 // by the new wp happens to be at the same address.
TEST(RefBase,ReplacedComparison)215 TEST(RefBase, ReplacedComparison) {
216 bool isDeleted, isDeleted2;
217 FooFixedAlloc* foo = new FooFixedAlloc(&isDeleted);
218 sp<FooFixedAlloc> sp1(foo);
219 wp<FooFixedAlloc> wp1(sp1);
220 ASSERT_TRUE(wp1 == sp1);
221 sp1.clear(); // Deallocates the object.
222 ASSERT_TRUE(isDeleted);
223 FooFixedAlloc* foo2 = new FooFixedAlloc(&isDeleted2);
224 ASSERT_FALSE(isDeleted2);
225 ASSERT_EQ(foo, foo2); // Not technically a legal comparison, but ...
226 sp<FooFixedAlloc> sp2(foo2);
227 wp<FooFixedAlloc> wp2(sp2);
228 ASSERT_TRUE(sp2 == wp2);
229 ASSERT_FALSE(sp2 != wp2);
230 ASSERT_TRUE(sp2 != wp1);
231 ASSERT_FALSE(sp2 == wp1);
232 ASSERT_FALSE(sp2 == sp1); // sp1 is null.
233 ASSERT_FALSE(wp1 == wp2); // wp1 refers to old object.
234 ASSERT_TRUE(wp1 != wp2);
235 ASSERT_TRUE(wp1 > wp2 || wp1 < wp2);
236 ASSERT_TRUE(wp1 >= wp2 || wp1 <= wp2);
237 ASSERT_FALSE(wp1 >= wp2 && wp1 <= wp2);
238 ASSERT_FALSE(wp1 == nullptr);
239 wp1 = sp2;
240 ASSERT_TRUE(wp1 == wp2);
241 ASSERT_FALSE(wp1 != wp2);
242 }
243
244 // Set up a situation in which we race with visit2AndRremove() to delete
245 // 2 strong references. Bar destructor checks that there are no early
246 // deletions and prior updates are visible to destructor.
247 class Bar : public RefBase {
248 public:
Bar(std::atomic<int> * delete_count)249 Bar(std::atomic<int>* delete_count) : mVisited1(false), mVisited2(false),
250 mDeleteCount(delete_count) {
251 }
252
~Bar()253 ~Bar() {
254 EXPECT_TRUE(mVisited1);
255 EXPECT_TRUE(mVisited2);
256 (*mDeleteCount)++;
257 }
258 bool mVisited1;
259 bool mVisited2;
260 private:
261 std::atomic<int>* mDeleteCount;
262 };
263
264 static sp<Bar> buffer;
265 static std::atomic<bool> bufferFull(false);
266
267 // Wait until bufferFull has value val.
waitFor(bool val)268 static inline void waitFor(bool val) {
269 while (bufferFull != val) {}
270 }
271
272 cpu_set_t otherCpus;
273
274 // Divide the cpus we're allowed to run on into myCpus and otherCpus.
275 // Set origCpus to the processors we were originally allowed to run on.
276 // Return false if origCpus doesn't include at least processors 0 and 1.
setExclusiveCpus(cpu_set_t * origCpus,cpu_set_t * myCpus,cpu_set_t * otherCpus)277 static bool setExclusiveCpus(cpu_set_t* origCpus /* out */,
278 cpu_set_t* myCpus /* out */, cpu_set_t* otherCpus) {
279 if (sched_getaffinity(0, sizeof(cpu_set_t), origCpus) != 0) {
280 return false;
281 }
282 if (!CPU_ISSET(0, origCpus) || !CPU_ISSET(1, origCpus)) {
283 return false;
284 }
285 CPU_ZERO(myCpus);
286 CPU_ZERO(otherCpus);
287 CPU_OR(myCpus, myCpus, origCpus);
288 CPU_OR(otherCpus, otherCpus, origCpus);
289 for (unsigned i = 0; i < CPU_SETSIZE; ++i) {
290 // I get the even cores, the other thread gets the odd ones.
291 if (i & 1) {
292 CPU_CLR(i, myCpus);
293 } else {
294 CPU_CLR(i, otherCpus);
295 }
296 }
297 return true;
298 }
299
visit2AndRemove()300 static void visit2AndRemove() {
301 if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
302 FAIL() << "setaffinity returned:" << errno;
303 }
304 for (int i = 0; i < NITERS; ++i) {
305 waitFor(true);
306 buffer->mVisited2 = true;
307 buffer = nullptr;
308 bufferFull = false;
309 }
310 }
311
TEST(RefBase,RacingDestructors)312 TEST(RefBase, RacingDestructors) {
313 cpu_set_t origCpus;
314 cpu_set_t myCpus;
315 // Restrict us and the helper thread to disjoint cpu sets.
316 // This prevents us from getting scheduled against each other,
317 // which would be atrociously slow.
318 if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
319 std::thread t(visit2AndRemove);
320 std::atomic<int> deleteCount(0);
321 if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
322 FAIL() << "setaffinity returned:" << errno;
323 }
324 for (int i = 0; i < NITERS; ++i) {
325 waitFor(false);
326 Bar* bar = new Bar(&deleteCount);
327 sp<Bar> sp3(bar);
328 buffer = sp3;
329 bufferFull = true;
330 ASSERT_TRUE(bar->getStrongCount() >= 1);
331 // Weak count includes strong count.
332 ASSERT_TRUE(bar->getWeakRefs()->getWeakCount() >= 1);
333 sp3->mVisited1 = true;
334 sp3 = nullptr;
335 }
336 t.join();
337 if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
338 FAIL();
339 }
340 ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
341 } // Otherwise this is slow and probably pointless on a uniprocessor.
342 }
343
344 static wp<Bar> wpBuffer;
345 static std::atomic<bool> wpBufferFull(false);
346
347 // Wait until wpBufferFull has value val.
wpWaitFor(bool val)348 static inline void wpWaitFor(bool val) {
349 while (wpBufferFull != val) {}
350 }
351
visit3AndRemove()352 static void visit3AndRemove() {
353 if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
354 FAIL() << "setaffinity returned:" << errno;
355 }
356 for (int i = 0; i < NITERS; ++i) {
357 wpWaitFor(true);
358 {
359 sp<Bar> sp1 = wpBuffer.promote();
360 // We implicitly check that sp1 != NULL
361 sp1->mVisited2 = true;
362 }
363 wpBuffer = nullptr;
364 wpBufferFull = false;
365 }
366 }
367
TEST(RefBase,RacingPromotions)368 TEST(RefBase, RacingPromotions) {
369 cpu_set_t origCpus;
370 cpu_set_t myCpus;
371 // Restrict us and the helper thread to disjoint cpu sets.
372 // This prevents us from getting scheduled against each other,
373 // which would be atrociously slow.
374 if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
375 std::thread t(visit3AndRemove);
376 std::atomic<int> deleteCount(0);
377 if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
378 FAIL() << "setaffinity returned:" << errno;
379 }
380 for (int i = 0; i < NITERS; ++i) {
381 Bar* bar = new Bar(&deleteCount);
382 wp<Bar> wp1(bar);
383 bar->mVisited1 = true;
384 if (i % (NITERS / 10) == 0) {
385 // Do this rarely, since it generates a log message.
386 wp1 = nullptr; // No longer destroys the object.
387 wp1 = bar;
388 }
389 wpBuffer = wp1;
390 ASSERT_EQ(bar->getWeakRefs()->getWeakCount(), 2);
391 wpBufferFull = true;
392 // Promotion races with that in visit3AndRemove.
393 // This may or may not succeed, but it shouldn't interfere with
394 // the concurrent one.
395 sp<Bar> sp1 = wp1.promote();
396 wpWaitFor(false); // Waits for other thread to drop strong pointer.
397 sp1 = nullptr;
398 // No strong pointers here.
399 sp1 = wp1.promote();
400 ASSERT_EQ(sp1.get(), nullptr) << "Dead wp promotion succeeded!";
401 }
402 t.join();
403 if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
404 FAIL();
405 }
406 ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
407 } // Otherwise this is slow and probably pointless on a uniprocessor.
408 }
409