1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_TRANSACTION_H_
18 #define ART_RUNTIME_TRANSACTION_H_
19 
20 #include "base/macros.h"
21 #include "base/mutex.h"
22 #include "base/safe_map.h"
23 #include "base/value_object.h"
24 #include "dex/dex_file_types.h"
25 #include "dex/primitive.h"
26 #include "gc_root.h"
27 #include "offsets.h"
28 
29 #include <list>
30 #include <map>
31 
32 namespace art {
33 namespace gc {
34 class Heap;
35 }  // namespace gc
36 namespace mirror {
37 class Array;
38 class Class;
39 class DexCache;
40 class Object;
41 class String;
42 }  // namespace mirror
43 class InternTable;
44 template<class MirrorType> class ObjPtr;
45 
46 class Transaction final {
47  public:
48   static constexpr const char* kAbortExceptionDescriptor = "dalvik.system.TransactionAbortError";
49   static constexpr const char* kAbortExceptionSignature = "Ldalvik/system/TransactionAbortError;";
50 
51   Transaction(bool strict, mirror::Class* root);
52   ~Transaction();
53 
54   void Abort(const std::string& abort_message)
55       REQUIRES(!log_lock_)
56       REQUIRES_SHARED(Locks::mutator_lock_);
57   void ThrowAbortError(Thread* self, const std::string* abort_message)
58       REQUIRES(!log_lock_)
59       REQUIRES_SHARED(Locks::mutator_lock_);
60   bool IsAborted() REQUIRES(!log_lock_);
61 
62   // If the transaction is rollbacking. Transactions will set this flag when they start rollbacking,
63   // because the nested transaction should be disabled when rollbacking to restore the memory.
64   bool IsRollingBack();
65 
66   // If the transaction is in strict mode, then all access of static fields will be constrained,
67   // one class's clinit will not be allowed to read or modify another class's static fields, unless
68   // the transaction is aborted.
IsStrict()69   bool IsStrict() {
70     return strict_;
71   }
72 
73   // Record object field changes.
74   void RecordWriteFieldBoolean(mirror::Object* obj,
75                                MemberOffset field_offset,
76                                uint8_t value,
77                                bool is_volatile)
78       REQUIRES(!log_lock_);
79   void RecordWriteFieldByte(mirror::Object* obj,
80                             MemberOffset field_offset,
81                             int8_t value,
82                             bool is_volatile)
83       REQUIRES(!log_lock_);
84   void RecordWriteFieldChar(mirror::Object* obj,
85                             MemberOffset field_offset,
86                             uint16_t value,
87                             bool is_volatile)
88       REQUIRES(!log_lock_);
89   void RecordWriteFieldShort(mirror::Object* obj,
90                              MemberOffset field_offset,
91                              int16_t value,
92                              bool is_volatile)
93       REQUIRES(!log_lock_);
94   void RecordWriteField32(mirror::Object* obj,
95                           MemberOffset field_offset,
96                           uint32_t value,
97                           bool is_volatile)
98       REQUIRES(!log_lock_);
99   void RecordWriteField64(mirror::Object* obj,
100                           MemberOffset field_offset,
101                           uint64_t value,
102                           bool is_volatile)
103       REQUIRES(!log_lock_);
104   void RecordWriteFieldReference(mirror::Object* obj,
105                                  MemberOffset field_offset,
106                                  mirror::Object* value,
107                                  bool is_volatile)
108       REQUIRES(!log_lock_);
109 
110   // Record array change.
111   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value)
112       REQUIRES(!log_lock_)
113       REQUIRES_SHARED(Locks::mutator_lock_);
114 
115   // Record intern string table changes.
116   void RecordStrongStringInsertion(ObjPtr<mirror::String> s)
117       REQUIRES(Locks::intern_table_lock_)
118       REQUIRES(!log_lock_);
119   void RecordWeakStringInsertion(ObjPtr<mirror::String> s)
120       REQUIRES(Locks::intern_table_lock_)
121       REQUIRES(!log_lock_);
122   void RecordStrongStringRemoval(ObjPtr<mirror::String> s)
123       REQUIRES(Locks::intern_table_lock_)
124       REQUIRES(!log_lock_);
125   void RecordWeakStringRemoval(ObjPtr<mirror::String> s)
126       REQUIRES(Locks::intern_table_lock_)
127       REQUIRES(!log_lock_);
128 
129   // Record resolve string.
130   void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx)
131       REQUIRES_SHARED(Locks::mutator_lock_)
132       REQUIRES(!log_lock_);
133 
134   // Abort transaction by undoing all recorded changes.
135   void Rollback()
136       REQUIRES_SHARED(Locks::mutator_lock_)
137       REQUIRES(!log_lock_);
138 
139   void VisitRoots(RootVisitor* visitor)
140       REQUIRES(!log_lock_)
141       REQUIRES_SHARED(Locks::mutator_lock_);
142 
143   bool ReadConstraint(Thread* self, ObjPtr<mirror::Object> obj)
144       REQUIRES(!log_lock_)
145       REQUIRES_SHARED(Locks::mutator_lock_);
146 
147   bool WriteConstraint(Thread* self, ObjPtr<mirror::Object> obj)
148       REQUIRES(!log_lock_)
149       REQUIRES_SHARED(Locks::mutator_lock_);
150 
151   bool WriteValueConstraint(Thread* self, ObjPtr<mirror::Object> value)
152       REQUIRES(!log_lock_)
153       REQUIRES_SHARED(Locks::mutator_lock_);
154 
155  private:
156   class ObjectLog : public ValueObject {
157    public:
158     void LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile);
159     void LogByteValue(MemberOffset offset, int8_t value, bool is_volatile);
160     void LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile);
161     void LogShortValue(MemberOffset offset, int16_t value, bool is_volatile);
162     void Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile);
163     void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
164     void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
165 
166     void Undo(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
167     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
168 
Size()169     size_t Size() const {
170       return field_values_.size();
171     }
172 
173     ObjectLog() = default;
174     ObjectLog(ObjectLog&& log) = default;
175 
176    private:
177     enum FieldValueKind {
178       kBoolean,
179       kByte,
180       kChar,
181       kShort,
182       k32Bits,
183       k64Bits,
184       kReference
185     };
186     struct FieldValue : public ValueObject {
187       // TODO use JValue instead ?
188       uint64_t value;
189       FieldValueKind kind;
190       bool is_volatile;
191 
FieldValueFieldValue192       FieldValue() : value(0), kind(FieldValueKind::kBoolean), is_volatile(false) {}
193       FieldValue(FieldValue&& log) = default;
194 
195      private:
196       DISALLOW_COPY_AND_ASSIGN(FieldValue);
197     };
198 
199     void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
200     void UndoFieldWrite(mirror::Object* obj,
201                         MemberOffset field_offset,
202                         const FieldValue& field_value) const REQUIRES_SHARED(Locks::mutator_lock_);
203 
204     // Maps field's offset to its value.
205     std::map<uint32_t, FieldValue> field_values_;
206 
207     DISALLOW_COPY_AND_ASSIGN(ObjectLog);
208   };
209 
210   class ArrayLog : public ValueObject {
211    public:
212     void LogValue(size_t index, uint64_t value);
213 
214     void Undo(mirror::Array* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
215 
Size()216     size_t Size() const {
217       return array_values_.size();
218     }
219 
220     ArrayLog() = default;
221     ArrayLog(ArrayLog&& log) = default;
222 
223    private:
224     void UndoArrayWrite(mirror::Array* array,
225                         Primitive::Type array_type,
226                         size_t index,
227                         uint64_t value) const REQUIRES_SHARED(Locks::mutator_lock_);
228 
229     // Maps index to value.
230     // TODO use JValue instead ?
231     std::map<size_t, uint64_t> array_values_;
232 
233     DISALLOW_COPY_AND_ASSIGN(ArrayLog);
234   };
235 
236   class InternStringLog : public ValueObject {
237    public:
238     enum StringKind {
239       kStrongString,
240       kWeakString
241     };
242     enum StringOp {
243       kInsert,
244       kRemove
245     };
246     InternStringLog(ObjPtr<mirror::String> s, StringKind kind, StringOp op);
247 
248     void Undo(InternTable* intern_table) const
249         REQUIRES_SHARED(Locks::mutator_lock_)
250         REQUIRES(Locks::intern_table_lock_);
251     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
252 
253     InternStringLog() = default;
254     InternStringLog(InternStringLog&& log) = default;
255 
256    private:
257     mutable GcRoot<mirror::String> str_;
258     const StringKind string_kind_;
259     const StringOp string_op_;
260 
261     DISALLOW_COPY_AND_ASSIGN(InternStringLog);
262   };
263 
264   class ResolveStringLog : public ValueObject {
265    public:
266     ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx);
267 
268     void Undo() const REQUIRES_SHARED(Locks::mutator_lock_);
269 
270     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
271 
272    private:
273     GcRoot<mirror::DexCache> dex_cache_;
274     const dex::StringIndex string_idx_;
275 
276     DISALLOW_COPY_AND_ASSIGN(ResolveStringLog);
277   };
278 
279   void LogInternedString(InternStringLog&& log)
280       REQUIRES(Locks::intern_table_lock_)
281       REQUIRES(!log_lock_);
282 
283   void UndoObjectModifications()
284       REQUIRES(log_lock_)
285       REQUIRES_SHARED(Locks::mutator_lock_);
286   void UndoArrayModifications()
287       REQUIRES(log_lock_)
288       REQUIRES_SHARED(Locks::mutator_lock_);
289   void UndoInternStringTableModifications()
290       REQUIRES(Locks::intern_table_lock_)
291       REQUIRES(log_lock_)
292       REQUIRES_SHARED(Locks::mutator_lock_);
293   void UndoResolveStringModifications()
294       REQUIRES(log_lock_)
295       REQUIRES_SHARED(Locks::mutator_lock_);
296 
297   void VisitObjectLogs(RootVisitor* visitor)
298       REQUIRES(log_lock_)
299       REQUIRES_SHARED(Locks::mutator_lock_);
300   void VisitArrayLogs(RootVisitor* visitor)
301       REQUIRES(log_lock_)
302       REQUIRES_SHARED(Locks::mutator_lock_);
303   void VisitInternStringLogs(RootVisitor* visitor)
304       REQUIRES(log_lock_)
305       REQUIRES_SHARED(Locks::mutator_lock_);
306   void VisitResolveStringLogs(RootVisitor* visitor)
307       REQUIRES(log_lock_)
308       REQUIRES_SHARED(Locks::mutator_lock_);
309 
310   const std::string& GetAbortMessage() REQUIRES(!log_lock_);
311 
312   Mutex log_lock_ ACQUIRED_AFTER(Locks::intern_table_lock_);
313   std::map<mirror::Object*, ObjectLog> object_logs_ GUARDED_BY(log_lock_);
314   std::map<mirror::Array*, ArrayLog> array_logs_  GUARDED_BY(log_lock_);
315   std::list<InternStringLog> intern_string_logs_ GUARDED_BY(log_lock_);
316   std::list<ResolveStringLog> resolve_string_logs_ GUARDED_BY(log_lock_);
317   bool aborted_ GUARDED_BY(log_lock_);
318   bool rolling_back_;  // Single thread, no race.
319   gc::Heap* const heap_;
320   const bool strict_;
321   std::string abort_message_ GUARDED_BY(log_lock_);
322   mirror::Class* root_ GUARDED_BY(log_lock_);
323   const char* assert_no_new_records_reason_ GUARDED_BY(log_lock_);
324 
325   friend class ScopedAssertNoNewTransactionRecords;
326 
327   DISALLOW_COPY_AND_ASSIGN(Transaction);
328 };
329 
330 class ScopedAssertNoNewTransactionRecords {
331  public:
ScopedAssertNoNewTransactionRecords(const char * reason)332   explicit ScopedAssertNoNewTransactionRecords(const char* reason)
333     : transaction_(kIsDebugBuild ? InstallAssertion(reason) : nullptr) {}
334 
~ScopedAssertNoNewTransactionRecords()335   ~ScopedAssertNoNewTransactionRecords() {
336     if (kIsDebugBuild && transaction_ != nullptr) {
337       RemoveAssertion(transaction_);
338     }
339   }
340 
341  private:
342   static Transaction* InstallAssertion(const char* reason);
343   static void RemoveAssertion(Transaction* transaction);
344 
345   Transaction* transaction_;
346 };
347 
348 }  // namespace art
349 
350 #endif  // ART_RUNTIME_TRANSACTION_H_
351