1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MIRROR_REFERENCE_H_
18 #define ART_RUNTIME_MIRROR_REFERENCE_H_
19 
20 #include "base/enums.h"
21 #include "base/locks.h"
22 #include "base/macros.h"
23 #include "obj_ptr.h"
24 #include "object.h"
25 #include "read_barrier_option.h"
26 
27 namespace art {
28 
29 namespace gc {
30 
31 class ReferenceProcessor;
32 class ReferenceQueue;
33 
34 }  // namespace gc
35 
36 struct ReferenceOffsets;
37 struct FinalizerReferenceOffsets;
38 
39 namespace mirror {
40 
41 // C++ mirror of java.lang.ref.Reference
42 class MANAGED Reference : public Object {
43  public:
44   // Size of java.lang.ref.Reference.class.
45   static uint32_t ClassSize(PointerSize pointer_size);
46 
47   // Size of an instance of java.lang.ref.Reference.
InstanceSize()48   static constexpr uint32_t InstanceSize() {
49     return sizeof(Reference);
50   }
51 
PendingNextOffset()52   static MemberOffset PendingNextOffset() {
53     return OFFSET_OF_OBJECT_MEMBER(Reference, pending_next_);
54   }
QueueOffset()55   static MemberOffset QueueOffset() {
56     return OFFSET_OF_OBJECT_MEMBER(Reference, queue_);
57   }
QueueNextOffset()58   static MemberOffset QueueNextOffset() {
59     return OFFSET_OF_OBJECT_MEMBER(Reference, queue_next_);
60   }
ReferentOffset()61   static MemberOffset ReferentOffset() {
62     return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
63   }
64   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
GetReferent()65   Object* GetReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
66     return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>(
67         ReferentOffset());
68   }
69   template<bool kTransactionActive>
70   void SetReferent(ObjPtr<Object> referent) REQUIRES_SHARED(Locks::mutator_lock_);
71   template<bool kTransactionActive>
ClearReferent()72   void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
73     SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
74   }
75 
76   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
GetPendingNext()77   Reference* GetPendingNext() REQUIRES_SHARED(Locks::mutator_lock_) {
78     return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset());
79   }
80 
81   void SetPendingNext(ObjPtr<Reference> pending_next) REQUIRES_SHARED(Locks::mutator_lock_);
82 
83   // Returns true if the reference's pendingNext is null, indicating it is
84   // okay to process this reference.
85   //
86   // If pendingNext is not null, then one of the following cases holds:
87   // 1. The reference has already been enqueued to a java ReferenceQueue. In
88   // this case the referent should not be considered for reference processing
89   // ever again.
90   // 2. The reference is currently part of a list of references that may
91   // shortly be enqueued on a java ReferenceQueue. In this case the reference
92   // should not be processed again until and unless the reference has been
93   // removed from the list after having determined the reference is not ready
94   // to be enqueued on a java ReferenceQueue.
IsUnprocessed()95   bool IsUnprocessed() REQUIRES_SHARED(Locks::mutator_lock_) {
96     return GetPendingNext<kWithoutReadBarrier>() == nullptr;
97   }
98 
99  private:
100   // Note: This avoids a read barrier, it should only be used by the GC.
GetReferentReferenceAddr()101   HeapReference<Object>* GetReferentReferenceAddr() REQUIRES_SHARED(Locks::mutator_lock_) {
102     return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset());
103   }
104 
105   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
106   HeapReference<Reference> pending_next_;
107   HeapReference<Object> queue_;
108   HeapReference<Reference> queue_next_;
109   HeapReference<Object> referent_;  // Note this is Java volatile:
110 
111   friend struct art::ReferenceOffsets;  // for verifying offset information
112   friend class gc::ReferenceProcessor;
113   friend class gc::ReferenceQueue;
114   DISALLOW_IMPLICIT_CONSTRUCTORS(Reference);
115 };
116 
117 // C++ mirror of java.lang.ref.FinalizerReference
118 class MANAGED FinalizerReference : public Reference {
119  public:
ZombieOffset()120   static MemberOffset ZombieOffset() {
121     return OFFSET_OF_OBJECT_MEMBER(FinalizerReference, zombie_);
122   }
123 
124   template<bool kTransactionActive>
125   void SetZombie(ObjPtr<Object> zombie) REQUIRES_SHARED(Locks::mutator_lock_);
126 
GetZombie()127   Object* GetZombie() REQUIRES_SHARED(Locks::mutator_lock_) {
128     return GetFieldObjectVolatile<Object>(ZombieOffset());
129   }
130 
131  private:
132   HeapReference<FinalizerReference> next_;
133   HeapReference<FinalizerReference> prev_;
134   HeapReference<Object> zombie_;
135 
136   friend struct art::FinalizerReferenceOffsets;  // for verifying offset information
137   DISALLOW_IMPLICIT_CONSTRUCTORS(FinalizerReference);
138 };
139 
140 }  // namespace mirror
141 }  // namespace art
142 
143 #endif  // ART_RUNTIME_MIRROR_REFERENCE_H_
144