1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "RefBase"
18 // #define LOG_NDEBUG 0
19
20 #include <memory>
21
22 #include <android-base/macros.h>
23
24 #include <utils/RefBase.h>
25
26 #include <utils/CallStack.h>
27
28 #include <utils/Mutex.h>
29
30 #ifndef __unused
31 #define __unused __attribute__((__unused__))
32 #endif
33
34 // Compile with refcounting debugging enabled.
35 #define DEBUG_REFS 0
36
37 // The following three are ignored unless DEBUG_REFS is set.
38
39 // whether ref-tracking is enabled by default, if not, trackMe(true, false)
40 // needs to be called explicitly
41 #define DEBUG_REFS_ENABLED_BY_DEFAULT 0
42
43 // whether callstack are collected (significantly slows things down)
44 #define DEBUG_REFS_CALLSTACK_ENABLED 1
45
46 // folder where stack traces are saved when DEBUG_REFS is enabled
47 // this folder needs to exist and be writable
48 #define DEBUG_REFS_CALLSTACK_PATH "/data/debug"
49
50 // log all reference counting operations
51 #define PRINT_REFS 0
52
53 // Continue after logging a stack trace if ~RefBase discovers that reference
54 // count has never been incremented. Normally we conspicuously crash in that
55 // case.
56 #define DEBUG_REFBASE_DESTRUCTION 1
57
58 // ---------------------------------------------------------------------------
59
60 namespace android {
61
62 // Observations, invariants, etc:
63
64 // By default, obects are destroyed when the last strong reference disappears
65 // or, if the object never had a strong reference, when the last weak reference
66 // disappears.
67 //
68 // OBJECT_LIFETIME_WEAK changes this behavior to retain the object
69 // unconditionally until the last reference of either kind disappears. The
70 // client ensures that the extendObjectLifetime call happens before the dec
71 // call that would otherwise have deallocated the object, or before an
72 // attemptIncStrong call that might rely on it. We do not worry about
73 // concurrent changes to the object lifetime.
74 //
75 // AttemptIncStrong will succeed if the object has a strong reference, or if it
76 // has a weak reference and has never had a strong reference.
77 // AttemptIncWeak really does succeed only if there is already a WEAK
78 // reference, and thus may fail when attemptIncStrong would succeed.
79 //
80 // mStrong is the strong reference count. mWeak is the weak reference count.
81 // Between calls, and ignoring memory ordering effects, mWeak includes strong
82 // references, and is thus >= mStrong.
83 //
84 // A weakref_impl holds all the information, including both reference counts,
85 // required to perform wp<> operations. Thus these can continue to be performed
86 // after the RefBase object has been destroyed.
87 //
88 // A weakref_impl is allocated as the value of mRefs in a RefBase object on
89 // construction.
90 // In the OBJECT_LIFETIME_STRONG case, it is normally deallocated in decWeak,
91 // and hence lives as long as the last weak reference. (It can also be
92 // deallocated in the RefBase destructor iff the strong reference count was
93 // never incremented and the weak count is zero, e.g. if the RefBase object is
94 // explicitly destroyed without decrementing the strong count. This should be
95 // avoided.) In this case, the RefBase destructor should be invoked from
96 // decStrong.
97 // In the OBJECT_LIFETIME_WEAK case, the weakref_impl is always deallocated in
98 // the RefBase destructor, which is always invoked by decWeak. DecStrong
99 // explicitly avoids the deletion in this case.
100 //
101 // Memory ordering:
102 // The client must ensure that every inc() call, together with all other
103 // accesses to the object, happens before the corresponding dec() call.
104 //
105 // We try to keep memory ordering constraints on atomics as weak as possible,
106 // since memory fences or ordered memory accesses are likely to be a major
107 // performance cost for this code. All accesses to mStrong, mWeak, and mFlags
108 // explicitly relax memory ordering in some way.
109 //
110 // The only operations that are not memory_order_relaxed are reference count
111 // decrements. All reference count decrements are release operations. In
112 // addition, the final decrement leading the deallocation is followed by an
113 // acquire fence, which we can view informally as also turning it into an
114 // acquire operation. (See 29.8p4 [atomics.fences] for details. We could
115 // alternatively use acq_rel operations for all decrements. This is probably
116 // slower on most current (2016) hardware, especially on ARMv7, but that may
117 // not be true indefinitely.)
118 //
119 // This convention ensures that the second-to-last decrement synchronizes with
120 // (in the language of 1.10 in the C++ standard) the final decrement of a
121 // reference count. Since reference counts are only updated using atomic
122 // read-modify-write operations, this also extends to any earlier decrements.
123 // (See "release sequence" in 1.10.)
124 //
125 // Since all operations on an object happen before the corresponding reference
126 // count decrement, and all reference count decrements happen before the final
127 // one, we are guaranteed that all other object accesses happen before the
128 // object is destroyed.
129
130
131 #define INITIAL_STRONG_VALUE (1<<28)
132
133 #define MAX_COUNT 0xfffff
134
135 // Test whether the argument is a clearly invalid strong reference count.
136 // Used only for error checking on the value before an atomic decrement.
137 // Intended to be very cheap.
138 // Note that we cannot just check for excess decrements by comparing to zero
139 // since the object would be deallocated before that.
140 #define BAD_STRONG(c) \
141 ((c) == 0 || ((c) & (~(MAX_COUNT | INITIAL_STRONG_VALUE))) != 0)
142
143 // Same for weak counts.
144 #define BAD_WEAK(c) ((c) == 0 || ((c) & (~MAX_COUNT)) != 0)
145
146 // ---------------------------------------------------------------------------
147
148 class RefBase::weakref_impl : public RefBase::weakref_type
149 {
150 public:
151 std::atomic<int32_t> mStrong;
152 std::atomic<int32_t> mWeak;
153 RefBase* const mBase;
154 std::atomic<int32_t> mFlags;
155
156 #if !DEBUG_REFS
157
weakref_impl(RefBase * base)158 explicit weakref_impl(RefBase* base)
159 : mStrong(INITIAL_STRONG_VALUE)
160 , mWeak(0)
161 , mBase(base)
162 , mFlags(0)
163 {
164 }
165
addStrongRef(const void *)166 void addStrongRef(const void* /*id*/) { }
removeStrongRef(const void *)167 void removeStrongRef(const void* /*id*/) { }
renameStrongRefId(const void *,const void *)168 void renameStrongRefId(const void* /*old_id*/, const void* /*new_id*/) { }
addWeakRef(const void *)169 void addWeakRef(const void* /*id*/) { }
removeWeakRef(const void *)170 void removeWeakRef(const void* /*id*/) { }
renameWeakRefId(const void *,const void *)171 void renameWeakRefId(const void* /*old_id*/, const void* /*new_id*/) { }
printRefs() const172 void printRefs() const { }
trackMe(bool,bool)173 void trackMe(bool, bool) { }
174
175 #else
176
weakref_impl(RefBase * base)177 weakref_impl(RefBase* base)
178 : mStrong(INITIAL_STRONG_VALUE)
179 , mWeak(0)
180 , mBase(base)
181 , mFlags(0)
182 , mStrongRefs(NULL)
183 , mWeakRefs(NULL)
184 , mTrackEnabled(!!DEBUG_REFS_ENABLED_BY_DEFAULT)
185 , mRetain(false)
186 {
187 }
188
~weakref_impl()189 ~weakref_impl()
190 {
191 bool dumpStack = false;
192 if (!mRetain && mStrongRefs != NULL) {
193 dumpStack = true;
194 ALOGE("Strong references remain:");
195 ref_entry* refs = mStrongRefs;
196 while (refs) {
197 char inc = refs->ref >= 0 ? '+' : '-';
198 ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref);
199 #if DEBUG_REFS_CALLSTACK_ENABLED
200 CallStack::logStack(LOG_TAG, refs->stack.get());
201 #endif
202 refs = refs->next;
203 }
204 }
205
206 if (!mRetain && mWeakRefs != NULL) {
207 dumpStack = true;
208 ALOGE("Weak references remain!");
209 ref_entry* refs = mWeakRefs;
210 while (refs) {
211 char inc = refs->ref >= 0 ? '+' : '-';
212 ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref);
213 #if DEBUG_REFS_CALLSTACK_ENABLED
214 CallStack::logStack(LOG_TAG, refs->stack.get());
215 #endif
216 refs = refs->next;
217 }
218 }
219 if (dumpStack) {
220 ALOGE("above errors at:");
221 CallStack::logStack(LOG_TAG);
222 }
223 }
224
addStrongRef(const void * id)225 void addStrongRef(const void* id) {
226 //ALOGD_IF(mTrackEnabled,
227 // "addStrongRef: RefBase=%p, id=%p", mBase, id);
228 addRef(&mStrongRefs, id, mStrong.load(std::memory_order_relaxed));
229 }
230
removeStrongRef(const void * id)231 void removeStrongRef(const void* id) {
232 //ALOGD_IF(mTrackEnabled,
233 // "removeStrongRef: RefBase=%p, id=%p", mBase, id);
234 if (!mRetain) {
235 removeRef(&mStrongRefs, id);
236 } else {
237 addRef(&mStrongRefs, id, -mStrong.load(std::memory_order_relaxed));
238 }
239 }
240
renameStrongRefId(const void * old_id,const void * new_id)241 void renameStrongRefId(const void* old_id, const void* new_id) {
242 //ALOGD_IF(mTrackEnabled,
243 // "renameStrongRefId: RefBase=%p, oid=%p, nid=%p",
244 // mBase, old_id, new_id);
245 renameRefsId(mStrongRefs, old_id, new_id);
246 }
247
addWeakRef(const void * id)248 void addWeakRef(const void* id) {
249 addRef(&mWeakRefs, id, mWeak.load(std::memory_order_relaxed));
250 }
251
removeWeakRef(const void * id)252 void removeWeakRef(const void* id) {
253 if (!mRetain) {
254 removeRef(&mWeakRefs, id);
255 } else {
256 addRef(&mWeakRefs, id, -mWeak.load(std::memory_order_relaxed));
257 }
258 }
259
renameWeakRefId(const void * old_id,const void * new_id)260 void renameWeakRefId(const void* old_id, const void* new_id) {
261 renameRefsId(mWeakRefs, old_id, new_id);
262 }
263
trackMe(bool track,bool retain)264 void trackMe(bool track, bool retain)
265 {
266 mTrackEnabled = track;
267 mRetain = retain;
268 }
269
printRefs() const270 void printRefs() const
271 {
272 String8 text;
273
274 {
275 Mutex::Autolock _l(mMutex);
276 char buf[128];
277 snprintf(buf, sizeof(buf),
278 "Strong references on RefBase %p (weakref_type %p):\n",
279 mBase, this);
280 text.append(buf);
281 printRefsLocked(&text, mStrongRefs);
282 snprintf(buf, sizeof(buf),
283 "Weak references on RefBase %p (weakref_type %p):\n",
284 mBase, this);
285 text.append(buf);
286 printRefsLocked(&text, mWeakRefs);
287 }
288
289 {
290 char name[100];
291 snprintf(name, sizeof(name), DEBUG_REFS_CALLSTACK_PATH "/%p.stack",
292 this);
293 int rc = open(name, O_RDWR | O_CREAT | O_APPEND, 644);
294 if (rc >= 0) {
295 (void)write(rc, text.string(), text.length());
296 close(rc);
297 ALOGD("STACK TRACE for %p saved in %s", this, name);
298 }
299 else ALOGE("FAILED TO PRINT STACK TRACE for %p in %s: %s", this,
300 name, strerror(errno));
301 }
302 }
303
304 private:
305 struct ref_entry
306 {
307 ref_entry* next;
308 const void* id;
309 #if DEBUG_REFS_CALLSTACK_ENABLED
310 CallStack::CallStackUPtr stack;
311 #endif
312 int32_t ref;
313 };
314
addRef(ref_entry ** refs,const void * id,int32_t mRef)315 void addRef(ref_entry** refs, const void* id, int32_t mRef)
316 {
317 if (mTrackEnabled) {
318 AutoMutex _l(mMutex);
319
320 ref_entry* ref = new ref_entry;
321 // Reference count at the time of the snapshot, but before the
322 // update. Positive value means we increment, negative--we
323 // decrement the reference count.
324 ref->ref = mRef;
325 ref->id = id;
326 #if DEBUG_REFS_CALLSTACK_ENABLED
327 ref->stack = CallStack::getCurrent(2);
328 #endif
329 ref->next = *refs;
330 *refs = ref;
331 }
332 }
333
removeRef(ref_entry ** refs,const void * id)334 void removeRef(ref_entry** refs, const void* id)
335 {
336 if (mTrackEnabled) {
337 AutoMutex _l(mMutex);
338
339 ref_entry* const head = *refs;
340 ref_entry* ref = head;
341 while (ref != NULL) {
342 if (ref->id == id) {
343 *refs = ref->next;
344 delete ref;
345 return;
346 }
347 refs = &ref->next;
348 ref = *refs;
349 }
350
351 ALOGE("RefBase: removing id %p on RefBase %p"
352 "(weakref_type %p) that doesn't exist!",
353 id, mBase, this);
354
355 ref = head;
356 while (ref) {
357 char inc = ref->ref >= 0 ? '+' : '-';
358 ALOGD("\t%c ID %p (ref %d):", inc, ref->id, ref->ref);
359 ref = ref->next;
360 }
361
362 CallStack::logStack(LOG_TAG);
363 }
364 }
365
renameRefsId(ref_entry * r,const void * old_id,const void * new_id)366 void renameRefsId(ref_entry* r, const void* old_id, const void* new_id)
367 {
368 if (mTrackEnabled) {
369 AutoMutex _l(mMutex);
370 ref_entry* ref = r;
371 while (ref != NULL) {
372 if (ref->id == old_id) {
373 ref->id = new_id;
374 }
375 ref = ref->next;
376 }
377 }
378 }
379
printRefsLocked(String8 * out,const ref_entry * refs) const380 void printRefsLocked(String8* out, const ref_entry* refs) const
381 {
382 char buf[128];
383 while (refs) {
384 char inc = refs->ref >= 0 ? '+' : '-';
385 snprintf(buf, sizeof(buf), "\t%c ID %p (ref %d):\n",
386 inc, refs->id, refs->ref);
387 out->append(buf);
388 #if DEBUG_REFS_CALLSTACK_ENABLED
389 out->append(CallStack::stackToString("\t\t", refs->stack.get()));
390 #else
391 out->append("\t\t(call stacks disabled)");
392 #endif
393 refs = refs->next;
394 }
395 }
396
397 mutable Mutex mMutex;
398 ref_entry* mStrongRefs;
399 ref_entry* mWeakRefs;
400
401 bool mTrackEnabled;
402 // Collect stack traces on addref and removeref, instead of deleting the stack references
403 // on removeref that match the address ones.
404 bool mRetain;
405
406 #endif
407 };
408
409 // ---------------------------------------------------------------------------
410
incStrong(const void * id) const411 void RefBase::incStrong(const void* id) const
412 {
413 weakref_impl* const refs = mRefs;
414 refs->incWeak(id);
415
416 refs->addStrongRef(id);
417 const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
418 ALOG_ASSERT(c > 0, "incStrong() called on %p after last strong ref", refs);
419 #if PRINT_REFS
420 ALOGD("incStrong of %p from %p: cnt=%d\n", this, id, c);
421 #endif
422 if (c != INITIAL_STRONG_VALUE) {
423 return;
424 }
425
426 int32_t old __unused = refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE, std::memory_order_relaxed);
427 // A decStrong() must still happen after us.
428 ALOG_ASSERT(old > INITIAL_STRONG_VALUE, "0x%x too small", old);
429 refs->mBase->onFirstRef();
430 }
431
decStrong(const void * id) const432 void RefBase::decStrong(const void* id) const
433 {
434 weakref_impl* const refs = mRefs;
435 refs->removeStrongRef(id);
436 const int32_t c = refs->mStrong.fetch_sub(1, std::memory_order_release);
437 #if PRINT_REFS
438 ALOGD("decStrong of %p from %p: cnt=%d\n", this, id, c);
439 #endif
440 LOG_ALWAYS_FATAL_IF(BAD_STRONG(c), "decStrong() called on %p too many times",
441 refs);
442 if (c == 1) {
443 std::atomic_thread_fence(std::memory_order_acquire);
444 refs->mBase->onLastStrongRef(id);
445 int32_t flags = refs->mFlags.load(std::memory_order_relaxed);
446 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
447 delete this;
448 // The destructor does not delete refs in this case.
449 }
450 }
451 // Note that even with only strong reference operations, the thread
452 // deallocating this may not be the same as the thread deallocating refs.
453 // That's OK: all accesses to this happen before its deletion here,
454 // and all accesses to refs happen before its deletion in the final decWeak.
455 // The destructor can safely access mRefs because either it's deleting
456 // mRefs itself, or it's running entirely before the final mWeak decrement.
457 //
458 // Since we're doing atomic loads of `flags`, the static analyzer assumes
459 // they can change between `delete this;` and `refs->decWeak(id);`. This is
460 // not the case. The analyzer may become more okay with this patten when
461 // https://bugs.llvm.org/show_bug.cgi?id=34365 gets resolved. NOLINTNEXTLINE
462 refs->decWeak(id);
463 }
464
forceIncStrong(const void * id) const465 void RefBase::forceIncStrong(const void* id) const
466 {
467 // Allows initial mStrong of 0 in addition to INITIAL_STRONG_VALUE.
468 // TODO: Better document assumptions.
469 weakref_impl* const refs = mRefs;
470 refs->incWeak(id);
471
472 refs->addStrongRef(id);
473 const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
474 ALOG_ASSERT(c >= 0, "forceIncStrong called on %p after ref count underflow",
475 refs);
476 #if PRINT_REFS
477 ALOGD("forceIncStrong of %p from %p: cnt=%d\n", this, id, c);
478 #endif
479
480 switch (c) {
481 case INITIAL_STRONG_VALUE:
482 refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE,
483 std::memory_order_relaxed);
484 FALLTHROUGH_INTENDED;
485 case 0:
486 refs->mBase->onFirstRef();
487 }
488 }
489
getStrongCount() const490 int32_t RefBase::getStrongCount() const
491 {
492 // Debugging only; No memory ordering guarantees.
493 return mRefs->mStrong.load(std::memory_order_relaxed);
494 }
495
refBase() const496 RefBase* RefBase::weakref_type::refBase() const
497 {
498 return static_cast<const weakref_impl*>(this)->mBase;
499 }
500
incWeak(const void * id)501 void RefBase::weakref_type::incWeak(const void* id)
502 {
503 weakref_impl* const impl = static_cast<weakref_impl*>(this);
504 impl->addWeakRef(id);
505 const int32_t c __unused = impl->mWeak.fetch_add(1,
506 std::memory_order_relaxed);
507 ALOG_ASSERT(c >= 0, "incWeak called on %p after last weak ref", this);
508 }
509
510
decWeak(const void * id)511 void RefBase::weakref_type::decWeak(const void* id)
512 {
513 weakref_impl* const impl = static_cast<weakref_impl*>(this);
514 impl->removeWeakRef(id);
515 const int32_t c = impl->mWeak.fetch_sub(1, std::memory_order_release);
516 LOG_ALWAYS_FATAL_IF(BAD_WEAK(c), "decWeak called on %p too many times",
517 this);
518 if (c != 1) return;
519 atomic_thread_fence(std::memory_order_acquire);
520
521 int32_t flags = impl->mFlags.load(std::memory_order_relaxed);
522 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
523 // This is the regular lifetime case. The object is destroyed
524 // when the last strong reference goes away. Since weakref_impl
525 // outlives the object, it is not destroyed in the dtor, and
526 // we'll have to do it here.
527 if (impl->mStrong.load(std::memory_order_relaxed)
528 == INITIAL_STRONG_VALUE) {
529 // Decrementing a weak count to zero when object never had a strong
530 // reference. We assume it acquired a weak reference early, e.g.
531 // in the constructor, and will eventually be properly destroyed,
532 // usually via incrementing and decrementing the strong count.
533 // Thus we no longer do anything here. We log this case, since it
534 // seems to be extremely rare, and should not normally occur. We
535 // used to deallocate mBase here, so this may now indicate a leak.
536 ALOGW("RefBase: Object at %p lost last weak reference "
537 "before it had a strong reference", impl->mBase);
538 } else {
539 // ALOGV("Freeing refs %p of old RefBase %p\n", this, impl->mBase);
540 delete impl;
541 }
542 } else {
543 // This is the OBJECT_LIFETIME_WEAK case. The last weak-reference
544 // is gone, we can destroy the object.
545 impl->mBase->onLastWeakRef(id);
546 delete impl->mBase;
547 }
548 }
549
attemptIncStrong(const void * id)550 bool RefBase::weakref_type::attemptIncStrong(const void* id)
551 {
552 incWeak(id);
553
554 weakref_impl* const impl = static_cast<weakref_impl*>(this);
555 int32_t curCount = impl->mStrong.load(std::memory_order_relaxed);
556
557 ALOG_ASSERT(curCount >= 0,
558 "attemptIncStrong called on %p after underflow", this);
559
560 while (curCount > 0 && curCount != INITIAL_STRONG_VALUE) {
561 // we're in the easy/common case of promoting a weak-reference
562 // from an existing strong reference.
563 if (impl->mStrong.compare_exchange_weak(curCount, curCount+1,
564 std::memory_order_relaxed)) {
565 break;
566 }
567 // the strong count has changed on us, we need to re-assert our
568 // situation. curCount was updated by compare_exchange_weak.
569 }
570
571 if (curCount <= 0 || curCount == INITIAL_STRONG_VALUE) {
572 // we're now in the harder case of either:
573 // - there never was a strong reference on us
574 // - or, all strong references have been released
575 int32_t flags = impl->mFlags.load(std::memory_order_relaxed);
576 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
577 // this object has a "normal" life-time, i.e.: it gets destroyed
578 // when the last strong reference goes away
579 if (curCount <= 0) {
580 // the last strong-reference got released, the object cannot
581 // be revived.
582 decWeak(id);
583 return false;
584 }
585
586 // here, curCount == INITIAL_STRONG_VALUE, which means
587 // there never was a strong-reference, so we can try to
588 // promote this object; we need to do that atomically.
589 while (curCount > 0) {
590 if (impl->mStrong.compare_exchange_weak(curCount, curCount+1,
591 std::memory_order_relaxed)) {
592 break;
593 }
594 // the strong count has changed on us, we need to re-assert our
595 // situation (e.g.: another thread has inc/decStrong'ed us)
596 // curCount has been updated.
597 }
598
599 if (curCount <= 0) {
600 // promote() failed, some other thread destroyed us in the
601 // meantime (i.e.: strong count reached zero).
602 decWeak(id);
603 return false;
604 }
605 } else {
606 // this object has an "extended" life-time, i.e.: it can be
607 // revived from a weak-reference only.
608 // Ask the object's implementation if it agrees to be revived
609 if (!impl->mBase->onIncStrongAttempted(FIRST_INC_STRONG, id)) {
610 // it didn't so give-up.
611 decWeak(id);
612 return false;
613 }
614 // grab a strong-reference, which is always safe due to the
615 // extended life-time.
616 curCount = impl->mStrong.fetch_add(1, std::memory_order_relaxed);
617 // If the strong reference count has already been incremented by
618 // someone else, the implementor of onIncStrongAttempted() is holding
619 // an unneeded reference. So call onLastStrongRef() here to remove it.
620 // (No, this is not pretty.) Note that we MUST NOT do this if we
621 // are in fact acquiring the first reference.
622 if (curCount != 0 && curCount != INITIAL_STRONG_VALUE) {
623 impl->mBase->onLastStrongRef(id);
624 }
625 }
626 }
627
628 impl->addStrongRef(id);
629
630 #if PRINT_REFS
631 ALOGD("attemptIncStrong of %p from %p: cnt=%d\n", this, id, curCount);
632 #endif
633
634 // curCount is the value of mStrong before we incremented it.
635 // Now we need to fix-up the count if it was INITIAL_STRONG_VALUE.
636 // This must be done safely, i.e.: handle the case where several threads
637 // were here in attemptIncStrong().
638 // curCount > INITIAL_STRONG_VALUE is OK, and can happen if we're doing
639 // this in the middle of another incStrong. The subtraction is handled
640 // by the thread that started with INITIAL_STRONG_VALUE.
641 if (curCount == INITIAL_STRONG_VALUE) {
642 impl->mStrong.fetch_sub(INITIAL_STRONG_VALUE,
643 std::memory_order_relaxed);
644 }
645
646 return true;
647 }
648
attemptIncWeak(const void * id)649 bool RefBase::weakref_type::attemptIncWeak(const void* id)
650 {
651 weakref_impl* const impl = static_cast<weakref_impl*>(this);
652
653 int32_t curCount = impl->mWeak.load(std::memory_order_relaxed);
654 ALOG_ASSERT(curCount >= 0, "attemptIncWeak called on %p after underflow",
655 this);
656 while (curCount > 0) {
657 if (impl->mWeak.compare_exchange_weak(curCount, curCount+1,
658 std::memory_order_relaxed)) {
659 break;
660 }
661 // curCount has been updated.
662 }
663
664 if (curCount > 0) {
665 impl->addWeakRef(id);
666 }
667
668 return curCount > 0;
669 }
670
getWeakCount() const671 int32_t RefBase::weakref_type::getWeakCount() const
672 {
673 // Debug only!
674 return static_cast<const weakref_impl*>(this)->mWeak
675 .load(std::memory_order_relaxed);
676 }
677
printRefs() const678 void RefBase::weakref_type::printRefs() const
679 {
680 static_cast<const weakref_impl*>(this)->printRefs();
681 }
682
trackMe(bool enable,bool retain)683 void RefBase::weakref_type::trackMe(bool enable, bool retain)
684 {
685 static_cast<weakref_impl*>(this)->trackMe(enable, retain);
686 }
687
createWeak(const void * id) const688 RefBase::weakref_type* RefBase::createWeak(const void* id) const
689 {
690 mRefs->incWeak(id);
691 return mRefs;
692 }
693
getWeakRefs() const694 RefBase::weakref_type* RefBase::getWeakRefs() const
695 {
696 return mRefs;
697 }
698
RefBase()699 RefBase::RefBase()
700 : mRefs(new weakref_impl(this))
701 {
702 }
703
~RefBase()704 RefBase::~RefBase()
705 {
706 int32_t flags = mRefs->mFlags.load(std::memory_order_relaxed);
707 // Life-time of this object is extended to WEAK, in
708 // which case weakref_impl doesn't out-live the object and we
709 // can free it now.
710 if ((flags & OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_WEAK) {
711 // It's possible that the weak count is not 0 if the object
712 // re-acquired a weak reference in its destructor
713 if (mRefs->mWeak.load(std::memory_order_relaxed) == 0) {
714 delete mRefs;
715 }
716 } else if (mRefs->mStrong.load(std::memory_order_relaxed) == INITIAL_STRONG_VALUE) {
717 // We never acquired a strong reference on this object.
718 #if DEBUG_REFBASE_DESTRUCTION
719 // Treating this as fatal is prone to causing boot loops. For debugging, it's
720 // better to treat as non-fatal.
721 ALOGD("RefBase: Explicit destruction, weak count = %d (in %p)", mRefs->mWeak.load(), this);
722 CallStack::logStack(LOG_TAG);
723 #else
724 LOG_ALWAYS_FATAL("RefBase: Explicit destruction, weak count = %d", mRefs->mWeak.load());
725 #endif
726 }
727 // For debugging purposes, clear mRefs. Ineffective against outstanding wp's.
728 const_cast<weakref_impl*&>(mRefs) = nullptr;
729 }
730
extendObjectLifetime(int32_t mode)731 void RefBase::extendObjectLifetime(int32_t mode)
732 {
733 // Must be happens-before ordered with respect to construction or any
734 // operation that could destroy the object.
735 mRefs->mFlags.fetch_or(mode, std::memory_order_relaxed);
736 }
737
onFirstRef()738 void RefBase::onFirstRef()
739 {
740 }
741
onLastStrongRef(const void *)742 void RefBase::onLastStrongRef(const void* /*id*/)
743 {
744 }
745
onIncStrongAttempted(uint32_t flags,const void *)746 bool RefBase::onIncStrongAttempted(uint32_t flags, const void* /*id*/)
747 {
748 return (flags&FIRST_INC_STRONG) ? true : false;
749 }
750
onLastWeakRef(const void *)751 void RefBase::onLastWeakRef(const void* /*id*/)
752 {
753 }
754
755 // ---------------------------------------------------------------------------
756
757 #if DEBUG_REFS
renameRefs(size_t n,const ReferenceRenamer & renamer)758 void RefBase::renameRefs(size_t n, const ReferenceRenamer& renamer) {
759 for (size_t i=0 ; i<n ; i++) {
760 renamer(i);
761 }
762 }
763 #else
renameRefs(size_t,const ReferenceRenamer &)764 void RefBase::renameRefs(size_t /*n*/, const ReferenceRenamer& /*renamer*/) { }
765 #endif
766
renameRefId(weakref_type * ref,const void * old_id,const void * new_id)767 void RefBase::renameRefId(weakref_type* ref,
768 const void* old_id, const void* new_id) {
769 weakref_impl* const impl = static_cast<weakref_impl*>(ref);
770 impl->renameStrongRefId(old_id, new_id);
771 impl->renameWeakRefId(old_id, new_id);
772 }
773
renameRefId(RefBase * ref,const void * old_id,const void * new_id)774 void RefBase::renameRefId(RefBase* ref,
775 const void* old_id, const void* new_id) {
776 ref->mRefs->renameStrongRefId(old_id, new_id);
777 ref->mRefs->renameWeakRefId(old_id, new_id);
778 }
779
780 }; // namespace android
781