1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jni_id_manager.h"
18 
19 #include <algorithm>
20 #include <cstdint>
21 #include <type_traits>
22 
23 #include "android-base/macros.h"
24 #include "art_field-inl.h"
25 #include "art_method-inl.h"
26 #include "base/enums.h"
27 #include "base/globals.h"
28 #include "base/locks.h"
29 #include "base/mutex.h"
30 #include "class_root-inl.h"
31 #include "gc/allocation_listener.h"
32 #include "gc/heap.h"
33 #include "jni/jni_internal.h"
34 #include "jni_id_type.h"
35 #include "mirror/array-inl.h"
36 #include "mirror/array.h"
37 #include "mirror/class-alloc-inl.h"
38 #include "mirror/class-inl.h"
39 #include "mirror/class.h"
40 #include "mirror/class_ext-inl.h"
41 #include "mirror/object-inl.h"
42 #include "obj_ptr-inl.h"
43 #include "reflective_handle_scope-inl.h"
44 #include "reflective_handle_scope.h"
45 #include "reflective_value_visitor.h"
46 #include "thread-inl.h"
47 #include "thread.h"
48 
49 namespace art {
50 namespace jni {
51 
52 constexpr bool kTraceIds = false;
53 
54 // TODO This whole thing could be done lock & wait free (since we never remove anything from the
55 // ids list). It's not clear this would be worthwile though.
56 
57 namespace {
58 
IdToIndex(uintptr_t id)59 static constexpr size_t IdToIndex(uintptr_t id) {
60   return id >> 1;
61 }
62 
IndexToId(size_t index)63 static constexpr uintptr_t IndexToId(size_t index) {
64   return (index << 1) + 1;
65 }
66 
67 template <typename ArtType>
GetIds(ObjPtr<mirror::Class> k,ArtType * t)68 ObjPtr<mirror::PointerArray> GetIds(ObjPtr<mirror::Class> k, ArtType* t)
69     REQUIRES_SHARED(Locks::mutator_lock_) {
70   ObjPtr<mirror::Object> ret;
71   if constexpr (std::is_same_v<ArtType, ArtField>) {
72     ret = t->IsStatic() ? k->GetStaticFieldIds() : k->GetInstanceFieldIds();
73   } else {
74     ret = t->IsObsolete() ? nullptr : k->GetMethodIds();
75   }
76   DCHECK(ret.IsNull() || ret->IsArrayInstance()) << "Should have bailed out early!";
77   if (kIsDebugBuild && !ret.IsNull()) {
78     if (kRuntimePointerSize == PointerSize::k32) {
79       CHECK(ret->IsIntArray());
80     } else {
81       CHECK(ret->IsLongArray());
82     }
83   }
84   return down_cast<mirror::PointerArray*>(ret.Ptr());
85 }
86 
87 template <typename ArtType>
88 bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtType* t)
89     REQUIRES_SHARED(Locks::mutator_lock_);
90 
91 template <>
ShouldReturnPointer(ObjPtr<mirror::Class> klass,ArtMethod * t ATTRIBUTE_UNUSED)92 bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtMethod* t ATTRIBUTE_UNUSED) {
93   ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
94   if (ext.IsNull()) {
95     return true;
96   }
97   ObjPtr<mirror::Object> arr = ext->GetJMethodIDs();
98   return arr.IsNull() || !arr->IsArrayInstance();
99 }
100 
101 template<>
ShouldReturnPointer(ObjPtr<mirror::Class> klass,ArtField * t)102 bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtField* t) {
103   ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
104   if (ext.IsNull()) {
105     return true;
106   }
107   ObjPtr<mirror::Object> arr = t->IsStatic() ? ext->GetStaticJFieldIDs()
108                                              : ext->GetInstanceJFieldIDs();
109   return arr.IsNull() || !arr->IsArrayInstance();
110 }
111 
112 
113 // Forces the appropriate id array to be present if possible. Returns true if allocation was
114 // attempted but failed.
115 template <typename ArtType>
116 bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtType* t)
117     REQUIRES_SHARED(Locks::mutator_lock_);
118 
119 template <>
EnsureIdsArray(Thread * self,ObjPtr<mirror::Class> k,ArtField * field)120 bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtField* field) {
121   ScopedExceptionStorage ses(self);
122   StackHandleScope<1> hs(self);
123   Handle<mirror::Class> h_k(hs.NewHandle(k));
124   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
125     return false;
126   } else {
127     // NB This modifies the class to allocate the ClassExt and the ids array.
128     field->IsStatic() ? mirror::Class::EnsureStaticFieldIds(h_k)
129                       : mirror::Class::EnsureInstanceFieldIds(h_k);
130   }
131   if (self->IsExceptionPending()) {
132     self->AssertPendingOOMException();
133     ses.SuppressOldException("Failed to allocate maps for jmethodIDs. ");
134     return true;
135   }
136   return false;
137 }
138 
139 template <>
EnsureIdsArray(Thread * self,ObjPtr<mirror::Class> k,ArtMethod * method)140 bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtMethod* method) {
141   if (method->IsObsolete()) {
142     if (kTraceIds) {
143       LOG(INFO) << "jmethodID for Obsolete method " << method->PrettyMethod() << " requested!";
144     }
145     // No ids array for obsolete methods. Just do a linear scan.
146     return false;
147   }
148   StackHandleScope<1> hs(self);
149   Handle<mirror::Class> h_k(hs.NewHandle(k));
150   if (Locks::mutator_lock_->IsExclusiveHeld(self) || !Locks::mutator_lock_->IsSharedHeld(self)) {
151     return false;
152   } else {
153     // NB This modifies the class to allocate the ClassExt and the ids array.
154     mirror::Class::EnsureMethodIds(h_k);
155   }
156   if (self->IsExceptionPending()) {
157     self->AssertPendingOOMException();
158     return true;
159   }
160   return false;
161 }
162 
163 template <typename ArtType>
164 size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtType* t, PointerSize pointer_size)
165     REQUIRES_SHARED(Locks::mutator_lock_);
166 template <>
GetIdOffset(ObjPtr<mirror::Class> k,ArtField * f,PointerSize ptr_size ATTRIBUTE_UNUSED)167 size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtField* f, PointerSize ptr_size ATTRIBUTE_UNUSED) {
168   return f->IsStatic() ? k->GetStaticFieldIdOffset(f) : k->GetInstanceFieldIdOffset(f);
169 }
170 template <>
GetIdOffset(ObjPtr<mirror::Class> k,ArtMethod * method,PointerSize pointer_size)171 size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtMethod* method, PointerSize pointer_size) {
172   return method->IsObsolete() ? -1 : k->GetMethodIdOffset(method, pointer_size);
173 }
174 
175 // Calls the relevant PrettyMethod/PrettyField on the input.
176 template <typename ArtType>
177 std::string PrettyGeneric(ArtType t) REQUIRES_SHARED(Locks::mutator_lock_);
178 template <>
PrettyGeneric(ArtMethod * f)179 std::string PrettyGeneric(ArtMethod* f) {
180   return f->PrettyMethod();
181 }
182 template <>
PrettyGeneric(ReflectiveHandle<ArtMethod> f)183 std::string PrettyGeneric(ReflectiveHandle<ArtMethod> f) {
184   return f->PrettyMethod();
185 }
186 template <>
PrettyGeneric(ArtField * f)187 std::string PrettyGeneric(ArtField* f) {
188   return f->PrettyField();
189 }
190 template <>
PrettyGeneric(ReflectiveHandle<ArtField> f)191 std::string PrettyGeneric(ReflectiveHandle<ArtField> f) {
192   return f->PrettyField();
193 }
194 
195 // Checks if the field or method is obsolete.
196 template <typename ArtType>
197 bool IsObsolete(ReflectiveHandle<ArtType> t) REQUIRES_SHARED(Locks::mutator_lock_);
198 template <>
IsObsolete(ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED)199 bool IsObsolete(ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED) {
200   return false;
201 }
202 template <>
IsObsolete(ReflectiveHandle<ArtMethod> t)203 bool IsObsolete(ReflectiveHandle<ArtMethod> t) {
204   return t->IsObsolete();
205 }
206 
207 // Get the canonical (non-copied) version of the field or method. Only relevant for methods.
208 template <typename ArtType>
209 ArtType* Canonicalize(ReflectiveHandle<ArtType> t) REQUIRES_SHARED(Locks::mutator_lock_);
210 template <>
Canonicalize(ReflectiveHandle<ArtField> t)211 ArtField* Canonicalize(ReflectiveHandle<ArtField> t) {
212   return t.Get();
213 }
214 template <>
Canonicalize(ReflectiveHandle<ArtMethod> t)215 ArtMethod* Canonicalize(ReflectiveHandle<ArtMethod> t) {
216   if (UNLIKELY(t->IsCopied())) {
217     return t->GetCanonicalMethod();
218   }
219   return t.Get();
220 }
221 
222 };  // namespace
223 
224 // We increment the id by 2 each time to allow us to use the LSB as a flag that the ID is an index
225 // and not a pointer. This gives us 2**31 unique methods that can be addressed on 32-bit art, which
226 // should be more than enough.
227 template <>
GetNextId(JniIdType type)228 uintptr_t JniIdManager::GetNextId<ArtField>(JniIdType type) {
229   DCHECK_EQ(type, JniIdType::kIndices);
230   uintptr_t res = next_field_id_;
231   next_field_id_ += 2;
232   CHECK_GT(next_field_id_, res) << "jfieldID Overflow";
233   return res;
234 }
235 
236 template <>
GetNextId(JniIdType type)237 uintptr_t JniIdManager::GetNextId<ArtMethod>(JniIdType type) {
238   DCHECK_EQ(type, JniIdType::kIndices);
239   uintptr_t res = next_method_id_;
240   next_method_id_ += 2;
241   CHECK_GT(next_method_id_, res) << "jmethodID Overflow";
242   return res;
243 }
244 template <>
GetGenericMap()245 std::vector<ArtField*>& JniIdManager::GetGenericMap<ArtField>() {
246   return field_id_map_;
247 }
248 
249 template <>
GetGenericMap()250 std::vector<ArtMethod*>& JniIdManager::GetGenericMap<ArtMethod>() {
251   return method_id_map_;
252 }
253 template <>
GetLinearSearchStartId(ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED)254 size_t JniIdManager::GetLinearSearchStartId<ArtField>(
255     ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED) {
256   return deferred_allocation_field_id_start_;
257 }
258 
259 template <>
GetLinearSearchStartId(ReflectiveHandle<ArtMethod> m)260 size_t JniIdManager::GetLinearSearchStartId<ArtMethod>(ReflectiveHandle<ArtMethod> m) {
261   if (m->IsObsolete()) {
262     return 1;
263   } else {
264     return deferred_allocation_method_id_start_;
265   }
266 }
267 
268 // TODO need to fix races in here with visitors
269 template <typename ArtType>
EncodeGenericId(ReflectiveHandle<ArtType> t)270 uintptr_t JniIdManager::EncodeGenericId(ReflectiveHandle<ArtType> t) {
271   static_assert(std::is_same_v<ArtType, ArtField> || std::is_same_v<ArtType, ArtMethod>,
272                 "Expected ArtField or ArtMethod");
273   Runtime* runtime = Runtime::Current();
274   JniIdType id_type = runtime->GetJniIdType();
275   if (id_type == JniIdType::kPointer || t == nullptr) {
276     return reinterpret_cast<uintptr_t>(t.Get());
277   }
278   Thread* self = Thread::Current();
279   ScopedExceptionStorage ses(self);
280   DCHECK(!t->GetDeclaringClass().IsNull()) << "Null declaring class " << PrettyGeneric(t);
281   size_t off = GetIdOffset(t->GetDeclaringClass(), Canonicalize(t), kRuntimePointerSize);
282   // Here is the earliest point we can suspend.
283   bool allocation_failure = EnsureIdsArray(self, t->GetDeclaringClass(), t.Get());
284   if (allocation_failure) {
285     self->AssertPendingOOMException();
286     ses.SuppressOldException("OOM exception while trying to allocate JNI ids.");
287     return 0u;
288   } else if (ShouldReturnPointer(t->GetDeclaringClass(), t.Get())) {
289     return reinterpret_cast<uintptr_t>(t.Get());
290   }
291   ObjPtr<mirror::Class> klass = t->GetDeclaringClass();
292   ObjPtr<mirror::PointerArray> ids(GetIds(klass, t.Get()));
293   uintptr_t cur_id = 0;
294   if (!ids.IsNull()) {
295     DCHECK_GT(ids->GetLength(), static_cast<int32_t>(off)) << " is " << PrettyGeneric(t);
296     DCHECK_LE(0, static_cast<int32_t>(off)) << " is " << PrettyGeneric(t);
297     cur_id = ids->GetElementPtrSize<uintptr_t>(off, kRuntimePointerSize);
298   }
299   if (cur_id != 0) {
300     return cur_id;
301   }
302   WriterMutexLock mu(self, *Locks::jni_id_lock_);
303   ScopedAssertNoThreadSuspension sants("EncodeJniId critical section.");
304   // Check the ids array for a racing id.
305   constexpr std::pair<size_t, size_t> counts {
306     std::is_same_v<ArtType, ArtField> ? 1 : 0,
307     std::is_same_v<ArtType, ArtField> ? 0 : 1,
308   };
309   StackReflectiveHandleScope<counts.first, counts.second> hs(self);
310   t = hs.NewHandle(Canonicalize(t));
311   if (!ids.IsNull()) {
312     // It's possible we got suspended and structurally redefined during the EnsureIdsArray. We need
313     // to get the information again.
314     ids = GetIds(klass, t.Get());
315     off = GetIdOffset(klass, Canonicalize(t), kRuntimePointerSize);
316     CHECK(!ids.IsNull());
317     cur_id = ids->GetElementPtrSize<uintptr_t>(off, kRuntimePointerSize);
318     if (cur_id != 0) {
319       // We were racing some other thread and lost.
320       return cur_id;
321     }
322   } else {
323     // We cannot allocate anything here or don't have an ids array (we might be an obsolete method).
324     DCHECK(IsObsolete(t) || deferred_allocation_refcount_ > 0u)
325         << "deferred_allocation_refcount_: " << deferred_allocation_refcount_
326         << " t: " << PrettyGeneric(t);
327     // Check to see if we raced and lost to another thread.
328     const std::vector<ArtType*>& vec = GetGenericMap<ArtType>();
329     bool found = false;
330     // simple count-while.
331     size_t search_start_index = IdToIndex(GetLinearSearchStartId(t));
332     size_t index = std::count_if(vec.cbegin() + search_start_index,
333                                  vec.cend(),
334                                  [&found, &self, t](const ArtType* candidate) {
335                                    Locks::mutator_lock_->AssertSharedHeld(self);
336                                    found = found || candidate == t.Get();
337                                    return !found;
338                                  }) +
339                    search_start_index;
340     if (found) {
341       // We were either racing some other thread and lost or this thread was asked to encode the
342       // same method multiple times while holding the mutator lock.
343       DCHECK_EQ(vec[index], t.Get())
344           << "Expected: " << PrettyGeneric(vec[index]) << " got " << PrettyGeneric(t)
345           << " at index " << index << " (id: " << IndexToId(index) << ").";
346       return IndexToId(index);
347     }
348   }
349   cur_id = GetNextId<ArtType>(id_type);
350   DCHECK_EQ(cur_id % 2, 1u);
351   size_t cur_index = IdToIndex(cur_id);
352   std::vector<ArtType*>& vec = GetGenericMap<ArtType>();
353   vec.reserve(cur_index + 1);
354   vec.resize(std::max(vec.size(), cur_index + 1), nullptr);
355   vec[cur_index] = t.Get();
356   if (ids.IsNull()) {
357     if (kIsDebugBuild && !IsObsolete(t)) {
358       CHECK_NE(deferred_allocation_refcount_, 0u)
359           << "Failed to allocate ids array despite not being forbidden from doing so!";
360       Locks::mutator_lock_->AssertExclusiveHeld(self);
361     }
362   } else {
363     ids->SetElementPtrSize(off, reinterpret_cast<void*>(cur_id), kRuntimePointerSize);
364   }
365   return cur_id;
366 }
367 
EncodeFieldId(ArtField * field)368 jfieldID JniIdManager::EncodeFieldId(ArtField* field) {
369   StackArtFieldHandleScope<1> rhs(Thread::Current());
370   return EncodeFieldId(rhs.NewHandle(field));
371 }
372 
EncodeFieldId(ReflectiveHandle<ArtField> field)373 jfieldID JniIdManager::EncodeFieldId(ReflectiveHandle<ArtField> field) {
374   auto* res = reinterpret_cast<jfieldID>(EncodeGenericId(field));
375   if (kTraceIds && field != nullptr) {
376     LOG(INFO) << "Returning " << res << " for field " << field->PrettyField();
377   }
378   return res;
379 }
380 
EncodeMethodId(ArtMethod * method)381 jmethodID JniIdManager::EncodeMethodId(ArtMethod* method) {
382   StackArtMethodHandleScope<1> rhs(Thread::Current());
383   return EncodeMethodId(rhs.NewHandle(method));
384 }
385 
EncodeMethodId(ReflectiveHandle<ArtMethod> method)386 jmethodID JniIdManager::EncodeMethodId(ReflectiveHandle<ArtMethod> method) {
387   auto* res = reinterpret_cast<jmethodID>(EncodeGenericId(method));
388   if (kTraceIds && method != nullptr) {
389     LOG(INFO) << "Returning " << res << " for method " << method->PrettyMethod();
390   }
391   return res;
392 }
393 
VisitRoots(RootVisitor * visitor)394 void JniIdManager::VisitRoots(RootVisitor *visitor) {
395   pointer_marker_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
396 }
397 
Init(Thread * self)398 void JniIdManager::Init(Thread* self) {
399   // When compiling we don't want to have anything to do with any of this, which is fine since JNI
400   // ids won't be created during AOT compilation. This also means we don't need to do any
401   // complicated stuff with the image-writer.
402   if (!Runtime::Current()->IsAotCompiler()) {
403     // Allocate the marker
404     StackHandleScope<3> hs(self);
405     Handle<mirror::Object> marker_obj(
406         hs.NewHandle(GetClassRoot<mirror::Object>()->AllocObject(self)));
407     CHECK(!marker_obj.IsNull());
408     pointer_marker_ = GcRoot<mirror::Object>(marker_obj.Get());
409     // Manually mark class-ext as having all pointer-ids to avoid any annoying loops.
410     Handle<mirror::Class> class_ext_class(hs.NewHandle(GetClassRoot<mirror::ClassExt>()));
411     mirror::Class::EnsureExtDataPresent(class_ext_class, self);
412     Handle<mirror::ClassExt> class_ext_ext(hs.NewHandle(class_ext_class->GetExtData()));
413     class_ext_ext->SetIdsArraysForClassExtExtData(marker_obj.Get());
414   }
415 }
416 
VisitReflectiveTargets(ReflectiveValueVisitor * rvv)417 void JniIdManager::VisitReflectiveTargets(ReflectiveValueVisitor* rvv) {
418   art::WriterMutexLock mu(Thread::Current(), *Locks::jni_id_lock_);
419   for (auto it = field_id_map_.begin(); it != field_id_map_.end(); ++it) {
420     ArtField* old_field = *it;
421     uintptr_t id = IndexToId(std::distance(field_id_map_.begin(), it));
422     ArtField* new_field =
423         rvv->VisitField(old_field, JniIdReflectiveSourceInfo(reinterpret_cast<jfieldID>(id)));
424     if (old_field != new_field) {
425       *it = new_field;
426       ObjPtr<mirror::Class> old_class(old_field->GetDeclaringClass());
427       ObjPtr<mirror::Class> new_class(new_field->GetDeclaringClass());
428       ObjPtr<mirror::ClassExt> old_ext_data(old_class->GetExtData());
429       ObjPtr<mirror::ClassExt> new_ext_data(new_class->GetExtData());
430       if (!old_ext_data.IsNull()) {
431         CHECK(!old_ext_data->HasInstanceFieldPointerIdMarker() &&
432               !old_ext_data->HasStaticFieldPointerIdMarker())
433             << old_class->PrettyClass();
434         // Clear the old field mapping.
435         if (old_field->IsStatic()) {
436           size_t old_off = ArraySlice<ArtField>(old_class->GetSFieldsPtr()).OffsetOf(old_field);
437           ObjPtr<mirror::PointerArray> old_statics(old_ext_data->GetStaticJFieldIDsPointerArray());
438           if (!old_statics.IsNull()) {
439             old_statics->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
440           }
441         } else {
442           size_t old_off = ArraySlice<ArtField>(old_class->GetIFieldsPtr()).OffsetOf(old_field);
443           ObjPtr<mirror::PointerArray> old_instances(
444               old_ext_data->GetInstanceJFieldIDsPointerArray());
445           if (!old_instances.IsNull()) {
446             old_instances->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
447           }
448         }
449       }
450       if (!new_ext_data.IsNull()) {
451         CHECK(!new_ext_data->HasInstanceFieldPointerIdMarker() &&
452               !new_ext_data->HasStaticFieldPointerIdMarker())
453             << new_class->PrettyClass();
454         // Set the new field mapping.
455         if (new_field->IsStatic()) {
456           size_t new_off = ArraySlice<ArtField>(new_class->GetSFieldsPtr()).OffsetOf(new_field);
457           ObjPtr<mirror::PointerArray> new_statics(new_ext_data->GetStaticJFieldIDsPointerArray());
458           if (!new_statics.IsNull()) {
459             new_statics->SetElementPtrSize(new_off, id, kRuntimePointerSize);
460           }
461         } else {
462           size_t new_off = ArraySlice<ArtField>(new_class->GetIFieldsPtr()).OffsetOf(new_field);
463           ObjPtr<mirror::PointerArray> new_instances(
464               new_ext_data->GetInstanceJFieldIDsPointerArray());
465           if (!new_instances.IsNull()) {
466             new_instances->SetElementPtrSize(new_off, id, kRuntimePointerSize);
467           }
468         }
469       }
470     }
471   }
472   for (auto it = method_id_map_.begin(); it != method_id_map_.end(); ++it) {
473     ArtMethod* old_method = *it;
474     uintptr_t id = IndexToId(std::distance(method_id_map_.begin(), it));
475     ArtMethod* new_method =
476         rvv->VisitMethod(old_method, JniIdReflectiveSourceInfo(reinterpret_cast<jmethodID>(id)));
477     if (old_method != new_method) {
478       *it = new_method;
479       ObjPtr<mirror::Class> old_class(old_method->GetDeclaringClass());
480       ObjPtr<mirror::Class> new_class(new_method->GetDeclaringClass());
481       ObjPtr<mirror::ClassExt> old_ext_data(old_class->GetExtData());
482       ObjPtr<mirror::ClassExt> new_ext_data(new_class->GetExtData());
483       if (!old_ext_data.IsNull()) {
484         CHECK(!old_ext_data->HasMethodPointerIdMarker()) << old_class->PrettyClass();
485         // Clear the old method mapping.
486         size_t old_off = ArraySlice<ArtMethod>(old_class->GetMethodsPtr()).OffsetOf(old_method);
487         ObjPtr<mirror::PointerArray> old_methods(old_ext_data->GetJMethodIDsPointerArray());
488         if (!old_methods.IsNull()) {
489           old_methods->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
490         }
491       }
492       if (!new_ext_data.IsNull()) {
493         CHECK(!new_ext_data->HasMethodPointerIdMarker()) << new_class->PrettyClass();
494         // Set the new method mapping.
495         size_t new_off = ArraySlice<ArtMethod>(new_class->GetMethodsPtr()).OffsetOf(new_method);
496         ObjPtr<mirror::PointerArray> new_methods(new_ext_data->GetJMethodIDsPointerArray());
497         if (!new_methods.IsNull()) {
498           new_methods->SetElementPtrSize(new_off, id, kRuntimePointerSize);
499         }
500       }
501     }
502   }
503 }
504 
DecodeGenericId(uintptr_t t)505 template <typename ArtType> ArtType* JniIdManager::DecodeGenericId(uintptr_t t) {
506   if (Runtime::Current()->GetJniIdType() == JniIdType::kIndices && (t % 2) == 1) {
507     ReaderMutexLock mu(Thread::Current(), *Locks::jni_id_lock_);
508     size_t index = IdToIndex(t);
509     DCHECK_GT(GetGenericMap<ArtType>().size(), index);
510     return GetGenericMap<ArtType>().at(index);
511   } else {
512     DCHECK_EQ((t % 2), 0u) << "id: " << t;
513     return reinterpret_cast<ArtType*>(t);
514   }
515 }
516 
DecodeMethodId(jmethodID method)517 ArtMethod* JniIdManager::DecodeMethodId(jmethodID method) {
518   return DecodeGenericId<ArtMethod>(reinterpret_cast<uintptr_t>(method));
519 }
520 
DecodeFieldId(jfieldID field)521 ArtField* JniIdManager::DecodeFieldId(jfieldID field) {
522   return DecodeGenericId<ArtField>(reinterpret_cast<uintptr_t>(field));
523 }
524 
GetPointerMarker()525 ObjPtr<mirror::Object> JniIdManager::GetPointerMarker() {
526   return pointer_marker_.Read();
527 }
528 
529 // This whole defer system is an annoying requirement to allow us to generate IDs during heap-walks
530 // such as those required for instrumentation tooling.
531 //
532 // The defer system works with the normal id-assignment routine to ensure that all the class-ext
533 // data structures are eventually created and filled in. Basically how it works is the id-assignment
534 // function will check to see if it has a strong mutator-lock. If it does not then it will try to
535 // allocate the class-ext data structures normally and fail if it is unable to do so. In the case
536 // where mutator-lock is being held exclusive no attempt to allocate will be made and the thread
537 // will CHECK that allocations are being deferred (or that the method is obsolete, in which case
538 // there is no class-ext to store the method->id map in).
539 //
540 // Once the thread is done holding the exclusive mutator-lock it will go back and fill-in the
541 // class-ext data of all the methods that were added. We do this without the exclusive mutator-lock
542 // on a copy of the maps before we decrement the deferred refcount. This ensures that any other
543 // threads running at the same time know they need to perform a linear scan of the id-map. Since we
544 // don't have the mutator-lock anymore other threads can allocate the class-ext data, meaning our
545 // copy is fine. The only way additional methods could end up on the id-maps after our copy without
546 // having class-ext data is if another thread picked up the exclusive mutator-lock and added another
547 // defer, in which case that thread would fix-up the remaining ids. In this way we maintain eventual
548 // consistency between the class-ext method/field->id maps and the JniIdManager id->method/field
549 // maps.
550 //
551 // TODO It is possible that another thread to gain the mutator-lock and allocate new ids without
552 // calling StartDefer. This is basically a race that we should try to catch but doing so is
553 // rather difficult and since this defer system is only used in very rare circumstances unlikely to
554 // be worth the trouble.
StartDefer()555 void JniIdManager::StartDefer() {
556   Thread* self = Thread::Current();
557   WriterMutexLock mu(self, *Locks::jni_id_lock_);
558   if (deferred_allocation_refcount_++ == 0) {
559     deferred_allocation_field_id_start_ = next_field_id_;
560     deferred_allocation_method_id_start_ = next_method_id_;
561   }
562 }
563 
564 class JniIdDeferStackReflectiveScope : public BaseReflectiveHandleScope {
565  public:
REQUIRES_SHARED(art::Locks::mutator_lock_)566   JniIdDeferStackReflectiveScope() REQUIRES_SHARED(art::Locks::mutator_lock_)
567       : BaseReflectiveHandleScope(), methods_(), fields_() {
568     PushScope(Thread::Current());
569   }
570 
Initialize(const std::vector<ArtMethod * > & methods,const std::vector<ArtField * > & fields)571   void Initialize(const std::vector<ArtMethod*>& methods, const std::vector<ArtField*>& fields)
572       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Roles::uninterruptible_) {
573     methods_ = methods;
574     fields_ = fields;
575   }
576 
REQUIRES_SHARED(Locks::mutator_lock_)577   ~JniIdDeferStackReflectiveScope() REQUIRES_SHARED(Locks::mutator_lock_) {
578     PopScope();
579   }
580 
VisitTargets(ReflectiveValueVisitor * visitor)581   void VisitTargets(ReflectiveValueVisitor* visitor) override
582       REQUIRES_SHARED(Locks::mutator_lock_) {
583     for (auto it = methods_.begin(); it != methods_.end(); ++it) {
584       if (*it == nullptr) {
585         continue;
586       }
587       *it = visitor->VisitMethod(*it, ReflectiveHandleScopeSourceInfo(this));
588     }
589     for (auto it = fields_.begin(); it != fields_.end(); ++it) {
590       if (*it == nullptr) {
591         continue;
592       }
593       *it = visitor->VisitField(*it, ReflectiveHandleScopeSourceInfo(this));
594     }
595   }
596 
GetFieldPtr(size_t idx)597   ArtField** GetFieldPtr(size_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
598     return &fields_[idx];
599   }
600 
GetMethodPtr(size_t idx)601   ArtMethod** GetMethodPtr(size_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
602     return &methods_[idx];
603   }
604 
NumFields() const605   size_t NumFields() const {
606     return fields_.size();
607   }
NumMethods() const608   size_t NumMethods() const {
609     return methods_.size();
610   }
611 
612  private:
613   std::vector<ArtMethod*> methods_;
614   std::vector<ArtField*> fields_;
615 };
616 
EndDefer()617 void JniIdManager::EndDefer() {
618   // Fixup the method->id map.
619   Thread* self = Thread::Current();
620   auto set_id = [&](auto** t, uintptr_t id) REQUIRES_SHARED(Locks::mutator_lock_) {
621     if (t == nullptr) {
622       return;
623     }
624     bool alloc_failure = EnsureIdsArray(self, (*t)->GetDeclaringClass(), *t);
625     ObjPtr<mirror::Class> klass((*t)->GetDeclaringClass());
626     size_t off = GetIdOffset(klass, (*t), kRuntimePointerSize);
627     ObjPtr<mirror::PointerArray> ids = GetIds(klass, (*t));
628     CHECK(!alloc_failure) << "Could not allocate jni ids array!";
629     if (ids.IsNull()) {
630       return;
631     }
632     if (kIsDebugBuild) {
633       uintptr_t old_id = ids->GetElementPtrSize<uintptr_t, kRuntimePointerSize>(off);
634       if (old_id != 0) {
635         DCHECK_EQ(old_id, id);
636       }
637     }
638     ids->SetElementPtrSize(off, reinterpret_cast<void*>(id), kRuntimePointerSize);
639   };
640   // To ensure eventual consistency this depends on the fact that the method_id_map_ and
641   // field_id_map_ are the ultimate source of truth and no id is ever reused to be valid. It also
642   // relies on all threads always getting calling StartDefer if they are going to be allocating jni
643   // ids while suspended. If a thread tries to do so while it doesn't have a scope we could miss
644   // ids.
645   // TODO We should use roles or something to verify that this requirement is not broken.
646   //
647   // If another thread comes along and adds more methods to the list after
648   // copying either (1) the id-maps are already present for the method and everything is fine, (2)
649   // the thread is not suspended and so can create the ext-data and id lists or, (3) the thread also
650   // suspended everything and incremented the deferred_allocation_refcount_ so it will fix up new
651   // ids when it finishes.
652   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
653   Locks::mutator_lock_->AssertSharedHeld(self);
654   JniIdDeferStackReflectiveScope jidsrs;
655   uintptr_t method_start_id;
656   uintptr_t field_start_id;
657   {
658     ReaderMutexLock mu(self, *Locks::jni_id_lock_);
659     ScopedAssertNoThreadSuspension sants(__FUNCTION__);
660     jidsrs.Initialize(method_id_map_, field_id_map_);
661     method_start_id = deferred_allocation_method_id_start_;
662     field_start_id = deferred_allocation_field_id_start_;
663   }
664 
665   for (size_t index = kIsDebugBuild ? 0 : IdToIndex(method_start_id); index < jidsrs.NumMethods();
666        ++index) {
667     set_id(jidsrs.GetMethodPtr(index), IndexToId(index));
668   }
669   for (size_t index = kIsDebugBuild ? 0 : IdToIndex(field_start_id); index < jidsrs.NumFields();
670        ++index) {
671     set_id(jidsrs.GetFieldPtr(index), IndexToId(index));
672   }
673   WriterMutexLock mu(self, *Locks::jni_id_lock_);
674   DCHECK_GE(deferred_allocation_refcount_, 1u);
675   if (--deferred_allocation_refcount_ == 0) {
676     deferred_allocation_field_id_start_ = 0;
677     deferred_allocation_method_id_start_ = 0;
678   }
679 }
680 
ScopedEnableSuspendAllJniIdQueries()681 ScopedEnableSuspendAllJniIdQueries::ScopedEnableSuspendAllJniIdQueries()
682     : manager_(Runtime::Current()->GetJniIdManager()) {
683   manager_->StartDefer();
684 }
685 
~ScopedEnableSuspendAllJniIdQueries()686 ScopedEnableSuspendAllJniIdQueries::~ScopedEnableSuspendAllJniIdQueries() {
687   manager_->EndDefer();
688 }
689 
690 };  // namespace jni
691 };  // namespace art
692