背景
最近在浏览 Android 11 源代码的时候,发现在ART虚拟机头文件 art_method.h 中,存在大量的类似REQUIRES_SHARED(Locks::mutator_lock_);
的代码。
如下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 |
/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_RUNTIME_ART_METHOD_H_ #define ART_RUNTIME_ART_METHOD_H_ #include <cstddef> #include <limits> #include <android-base/logging.h> #include <jni.h> #include "base/array_ref.h" #include "base/bit_utils.h" #include "base/casts.h" #include "base/enums.h" #include "base/macros.h" #include "base/runtime_debug.h" #include "dex/dex_file_structs.h" #include "dex/modifiers.h" #include "dex/primitive.h" #include "gc_root.h" #include "obj_ptr.h" #include "offsets.h" #include "read_barrier_option.h" namespace art { class CodeItemDataAccessor; class CodeItemDebugInfoAccessor; class CodeItemInstructionAccessor; class DexFile; template<class T> class Handle; class ImtConflictTable; enum InvokeType : uint32_t; union JValue; class OatQuickMethodHeader; class ProfilingInfo; class ScopedObjectAccessAlreadyRunnable; class ShadowFrame; class Signature; namespace mirror { class Array; class Class; class ClassLoader; class DexCache; class IfTable; class Object; template <typename MirrorType> class ObjectArray; class PointerArray; class String; template <typename T> struct NativeDexCachePair; using MethodDexCachePair = NativeDexCachePair<ArtMethod>; using MethodDexCacheType = std::atomic<MethodDexCachePair>; } // namespace mirror class ArtMethod final { public: // Should the class state be checked on sensitive operations? DECLARE_RUNTIME_DEBUG_FLAG(kCheckDeclaringClassState); // The runtime dex_method_index is kDexNoIndex. To lower dependencies, we use this // constexpr, and ensure that the value is correct in art_method.cc. static constexpr uint32_t kRuntimeMethodDexMethodIndex = 0xFFFFFFFF; ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0), method_index_(0), hotness_count_(0) { } ArtMethod(ArtMethod* src, PointerSize image_pointer_size) { CopyFrom(src, image_pointer_size); } static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject jlr_method) REQUIRES_SHARED(Locks::mutator_lock_); template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier> ALWAYS_INLINE ObjPtr<mirror::Class> GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_); template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier> ALWAYS_INLINE ObjPtr<mirror::Class> GetDeclaringClassUnchecked() REQUIRES_SHARED(Locks::mutator_lock_); mirror::CompressedReference<mirror::Object>* GetDeclaringClassAddressWithoutBarrier() { return declaring_class_.AddressWithoutBarrier(); } void SetDeclaringClass(ObjPtr<mirror::Class> new_declaring_class) REQUIRES_SHARED(Locks::mutator_lock_); bool CASDeclaringClass(ObjPtr<mirror::Class> expected_class, ObjPtr<mirror::Class> desired_class) REQUIRES_SHARED(Locks::mutator_lock_); static constexpr MemberOffset DeclaringClassOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_)); } uint32_t GetAccessFlags() const { return access_flags_.load(std::memory_order_relaxed); } // This version should only be called when it's certain there is no // concurrency so there is no need to guarantee atomicity. For example, // before the method is linked. void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_) { access_flags_.store(new_access_flags, std::memory_order_relaxed); } static constexpr MemberOffset AccessFlagsOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtMethod, access_flags_)); } // Approximate what kind of method call would be used for this method. InvokeType GetInvokeType() REQUIRES_SHARED(Locks::mutator_lock_); // Returns true if the method is declared public. bool IsPublic() const { return (GetAccessFlags() & kAccPublic) != 0; } // Returns true if the method is declared private. bool IsPrivate() const { return (GetAccessFlags() & kAccPrivate) != 0; } // Returns true if the method is declared static. bool IsStatic() const { return (GetAccessFlags() & kAccStatic) != 0; } // Returns true if the method is a constructor according to access flags. bool IsConstructor() const { return (GetAccessFlags() & kAccConstructor) != 0; } // Returns true if the method is a class initializer according to access flags. bool IsClassInitializer() const { return IsConstructor() && IsStatic(); } // Returns true if the method is static, private, or a constructor. bool IsDirect() const { return IsDirect(GetAccessFlags()); } static bool IsDirect(uint32_t access_flags) { constexpr uint32_t direct = kAccStatic | kAccPrivate | kAccConstructor; return (access_flags & direct) != 0; } // Returns true if the method is declared synchronized. bool IsSynchronized() const { constexpr uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized; return (GetAccessFlags() & synchonized) != 0; } bool IsFinal() const { return (GetAccessFlags() & kAccFinal) != 0; } bool IsIntrinsic() const { return (GetAccessFlags() & kAccIntrinsic) != 0; } ALWAYS_INLINE void SetIntrinsic(uint32_t intrinsic) REQUIRES_SHARED(Locks::mutator_lock_); uint32_t GetIntrinsic() const { static const int kAccFlagsShift = CTZ(kAccIntrinsicBits); static_assert(IsPowerOfTwo((kAccIntrinsicBits >> kAccFlagsShift) + 1), "kAccIntrinsicBits are not continuous"); static_assert((kAccIntrinsic & kAccIntrinsicBits) == 0, "kAccIntrinsic overlaps kAccIntrinsicBits"); DCHECK(IsIntrinsic()); return (GetAccessFlags() & kAccIntrinsicBits) >> kAccFlagsShift; } void SetNotIntrinsic() REQUIRES_SHARED(Locks::mutator_lock_); bool IsCopied() const { static_assert((kAccCopied & (kAccIntrinsic | kAccIntrinsicBits)) == 0, "kAccCopied conflicts with intrinsic modifier"); const bool copied = (GetAccessFlags() & kAccCopied) != 0; // (IsMiranda() || IsDefaultConflicting()) implies copied DCHECK(!(IsMiranda() || IsDefaultConflicting()) || copied) << "Miranda or default-conflict methods must always be copied."; return copied; } bool IsMiranda() const { // The kAccMiranda flag value is used with a different meaning for native methods and methods // marked kAccCompileDontBother, so we need to check these flags as well. return (GetAccessFlags() & (kAccNative | kAccMiranda | kAccCompileDontBother)) == kAccMiranda; } // Returns true if invoking this method will not throw an AbstractMethodError or // IncompatibleClassChangeError. bool IsInvokable() const { return !IsAbstract() && !IsDefaultConflicting(); } bool IsPreCompiled() const { if (IsIntrinsic()) { // kAccCompileDontBother overlaps with kAccIntrinsicBits. return false; } uint32_t expected = (kAccPreCompiled | kAccCompileDontBother); return (GetAccessFlags() & expected) == expected; } void SetPreCompiled() REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsInvokable()); DCHECK(IsCompilable()); AddAccessFlags(kAccPreCompiled | kAccCompileDontBother); } void ClearPreCompiled() REQUIRES_SHARED(Locks::mutator_lock_) { ClearAccessFlags(kAccPreCompiled | kAccCompileDontBother); } bool IsCompilable() const { if (IsIntrinsic()) { // kAccCompileDontBother overlaps with kAccIntrinsicBits. return true; } if (IsPreCompiled()) { return true; } return (GetAccessFlags() & kAccCompileDontBother) == 0; } void ClearDontCompile() REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(!IsMiranda()); ClearAccessFlags(kAccCompileDontBother); } void SetDontCompile() REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(!IsMiranda()); AddAccessFlags(kAccCompileDontBother); } // A default conflict method is a special sentinel method that stands for a conflict between // multiple default methods. It cannot be invoked, throwing an IncompatibleClassChangeError if one // attempts to do so. bool IsDefaultConflicting() const { if (IsIntrinsic()) { return false; } return (GetAccessFlags() & kAccDefaultConflict) != 0u; } // This is set by the class linker. bool IsDefault() const { static_assert((kAccDefault & (kAccIntrinsic | kAccIntrinsicBits)) == 0, "kAccDefault conflicts with intrinsic modifier"); return (GetAccessFlags() & kAccDefault) != 0; } bool IsObsolete() const { return (GetAccessFlags() & kAccObsoleteMethod) != 0; } void SetIsObsolete() REQUIRES_SHARED(Locks::mutator_lock_) { AddAccessFlags(kAccObsoleteMethod); } bool IsNative() const { return (GetAccessFlags() & kAccNative) != 0; } // Checks to see if the method was annotated with @dalvik.annotation.optimization.FastNative. bool IsFastNative() const { // The presence of the annotation is checked by ClassLinker and recorded in access flags. // The kAccFastNative flag value is used with a different meaning for non-native methods, // so we need to check the kAccNative flag as well. constexpr uint32_t mask = kAccFastNative | kAccNative; return (GetAccessFlags() & mask) == mask; } // Checks to see if the method was annotated with @dalvik.annotation.optimization.CriticalNative. bool IsCriticalNative() const { // The presence of the annotation is checked by ClassLinker and recorded in access flags. // The kAccCriticalNative flag value is used with a different meaning for non-native methods, // so we need to check the kAccNative flag as well. constexpr uint32_t mask = kAccCriticalNative | kAccNative; return (GetAccessFlags() & mask) == mask; } bool IsAbstract() const { return (GetAccessFlags() & kAccAbstract) != 0; } bool IsSynthetic() const { return (GetAccessFlags() & kAccSynthetic) != 0; } bool IsVarargs() const { return (GetAccessFlags() & kAccVarargs) != 0; } bool IsProxyMethod() REQUIRES_SHARED(Locks::mutator_lock_); bool IsPolymorphicSignature() REQUIRES_SHARED(Locks::mutator_lock_); bool UseFastInterpreterToInterpreterInvoke() const { // The bit is applicable only if the method is not intrinsic. constexpr uint32_t mask = kAccFastInterpreterToInterpreterInvoke | kAccIntrinsic; return (GetAccessFlags() & mask) == kAccFastInterpreterToInterpreterInvoke; } void SetFastInterpreterToInterpreterInvokeFlag() REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(!IsIntrinsic()); AddAccessFlags(kAccFastInterpreterToInterpreterInvoke); } void ClearFastInterpreterToInterpreterInvokeFlag() REQUIRES_SHARED(Locks::mutator_lock_) { if (!IsIntrinsic()) { ClearAccessFlags(kAccFastInterpreterToInterpreterInvoke); } } bool SkipAccessChecks() const { // The kAccSkipAccessChecks flag value is used with a different meaning for native methods, // so we need to check the kAccNative flag as well. return (GetAccessFlags() & (kAccSkipAccessChecks | kAccNative)) == kAccSkipAccessChecks; } void SetSkipAccessChecks() REQUIRES_SHARED(Locks::mutator_lock_) { // SkipAccessChecks() is applicable only to non-native methods. DCHECK(!IsNative()); AddAccessFlags(kAccSkipAccessChecks); } void ClearSkipAccessChecks() REQUIRES_SHARED(Locks::mutator_lock_) { // SkipAccessChecks() is applicable only to non-native methods. DCHECK(!IsNative()); ClearAccessFlags(kAccSkipAccessChecks); } bool PreviouslyWarm() const { if (IsIntrinsic()) { // kAccPreviouslyWarm overlaps with kAccIntrinsicBits. return true; } return (GetAccessFlags() & kAccPreviouslyWarm) != 0; } void SetPreviouslyWarm() REQUIRES_SHARED(Locks::mutator_lock_) { if (IsIntrinsic()) { // kAccPreviouslyWarm overlaps with kAccIntrinsicBits. return; } AddAccessFlags(kAccPreviouslyWarm); } // Should this method be run in the interpreter and count locks (e.g., failed structured- // locking verification)? bool MustCountLocks() const { if (IsIntrinsic()) { return false; } return (GetAccessFlags() & kAccMustCountLocks) != 0; } void ClearMustCountLocks() REQUIRES_SHARED(Locks::mutator_lock_) { ClearAccessFlags(kAccMustCountLocks); } void SetMustCountLocks() REQUIRES_SHARED(Locks::mutator_lock_) { AddAccessFlags(kAccMustCountLocks); ClearAccessFlags(kAccSkipAccessChecks); } // Returns true if this method could be overridden by a default method. bool IsOverridableByDefaultMethod() REQUIRES_SHARED(Locks::mutator_lock_); bool CheckIncompatibleClassChange(InvokeType type) REQUIRES_SHARED(Locks::mutator_lock_); // Throws the error that would result from trying to invoke this method (i.e. // IncompatibleClassChangeError or AbstractMethodError). Only call if !IsInvokable(); void ThrowInvocationTimeError() REQUIRES_SHARED(Locks::mutator_lock_); uint16_t GetMethodIndex() REQUIRES_SHARED(Locks::mutator_lock_); // Doesn't do erroneous / unresolved class checks. uint16_t GetMethodIndexDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_); size_t GetVtableIndex() REQUIRES_SHARED(Locks::mutator_lock_) { return GetMethodIndex(); } void SetMethodIndex(uint16_t new_method_index) REQUIRES_SHARED(Locks::mutator_lock_) { // Not called within a transaction. method_index_ = new_method_index; } static constexpr MemberOffset DexMethodIndexOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtMethod, dex_method_index_)); } static constexpr MemberOffset MethodIndexOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtMethod, method_index_)); } static constexpr MemberOffset ImtIndexOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtMethod, imt_index_)); } uint32_t GetCodeItemOffset() const { return dex_code_item_offset_; } void SetCodeItemOffset(uint32_t new_code_off) REQUIRES_SHARED(Locks::mutator_lock_) { // Not called within a transaction. dex_code_item_offset_ = new_code_off; } // Number of 32bit registers that would be required to hold all the arguments static size_t NumArgRegisters(const char* shorty); ALWAYS_INLINE uint32_t GetDexMethodIndex() const { return dex_method_index_; } void SetDexMethodIndex(uint32_t new_idx) REQUIRES_SHARED(Locks::mutator_lock_) { // Not called within a transaction. dex_method_index_ = new_idx; } // Lookup the Class from the type index into this method's dex cache. ObjPtr<mirror::Class> LookupResolvedClassFromTypeIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_); // Resolve the Class from the type index into this method's dex cache. ObjPtr<mirror::Class> ResolveClassFromTypeIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_); // Returns true if this method has the same name and signature of the other method. bool HasSameNameAndSignature(ArtMethod* other) REQUIRES_SHARED(Locks::mutator_lock_); // Find the method that this method overrides. ArtMethod* FindOverriddenMethod(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_); // Find the method index for this method within other_dexfile. If this method isn't present then // return dex::kDexNoIndex. The name_and_signature_idx MUST refer to a MethodId with the same // name and signature in the other_dexfile, such as the method index used to resolve this method // in the other_dexfile. uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile, uint32_t name_and_signature_idx) REQUIRES_SHARED(Locks::mutator_lock_); void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty) REQUIRES_SHARED(Locks::mutator_lock_); const void* GetEntryPointFromQuickCompiledCode() const { return GetEntryPointFromQuickCompiledCodePtrSize(kRuntimePointerSize); } ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(PointerSize pointer_size) const { return GetNativePointer<const void*>( EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size); } void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) REQUIRES_SHARED(Locks::mutator_lock_) { SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code, kRuntimePointerSize); } ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize( const void* entry_point_from_quick_compiled_code, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { SetNativePointer(EntryPointFromQuickCompiledCodeOffset(pointer_size), entry_point_from_quick_compiled_code, pointer_size); // We might want to invoke compiled code, so don't use the fast path. ClearFastInterpreterToInterpreterInvokeFlag(); } // Registers the native method and returns the new entry point. NB The returned entry point might // be different from the native_method argument if some MethodCallback modifies it. const void* RegisterNative(const void* native_method) REQUIRES_SHARED(Locks::mutator_lock_) WARN_UNUSED; void UnregisterNative() REQUIRES_SHARED(Locks::mutator_lock_); static constexpr MemberOffset DataOffset(PointerSize pointer_size) { return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER( PtrSizedFields, data_) / sizeof(void*) * static_cast<size_t>(pointer_size)); } static constexpr MemberOffset EntryPointFromJniOffset(PointerSize pointer_size) { return DataOffset(pointer_size); } static constexpr MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) { return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER( PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * static_cast<size_t>(pointer_size)); } ImtConflictTable* GetImtConflictTable(PointerSize pointer_size) const { DCHECK(IsRuntimeMethod()); return reinterpret_cast<ImtConflictTable*>(GetDataPtrSize(pointer_size)); } ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsRuntimeMethod()); SetDataPtrSize(table, pointer_size); } ProfilingInfo* GetProfilingInfo(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { if (UNLIKELY(IsNative() || IsProxyMethod() || !IsInvokable())) { return nullptr; } return reinterpret_cast<ProfilingInfo*>(GetDataPtrSize(pointer_size)); } ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) REQUIRES_SHARED(Locks::mutator_lock_) { SetDataPtrSize(info, kRuntimePointerSize); } ALWAYS_INLINE void SetProfilingInfoPtrSize(ProfilingInfo* info, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { SetDataPtrSize(info, pointer_size); } static MemberOffset ProfilingInfoOffset() { DCHECK(IsImagePointerSize(kRuntimePointerSize)); return DataOffset(kRuntimePointerSize); } template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier> ALWAYS_INLINE bool HasSingleImplementation() REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE void SetHasSingleImplementation(bool single_impl) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(!IsIntrinsic()) << "conflict with intrinsic bits"; if (single_impl) { AddAccessFlags(kAccSingleImplementation); } else { ClearAccessFlags(kAccSingleImplementation); } } ALWAYS_INLINE bool HasSingleImplementationFlag() const { return (GetAccessFlags() & kAccSingleImplementation) != 0; } // Takes a method and returns a 'canonical' one if the method is default (and therefore // potentially copied from some other class). For example, this ensures that the debugger does not // get confused as to which method we are in. ArtMethod* GetCanonicalMethod(PointerSize pointer_size = kRuntimePointerSize) REQUIRES_SHARED(Locks::mutator_lock_); ArtMethod* GetSingleImplementation(PointerSize pointer_size); ALWAYS_INLINE void SetSingleImplementation(ArtMethod* method, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(!IsNative()); // Non-abstract method's single implementation is just itself. DCHECK(IsAbstract()); SetDataPtrSize(method, pointer_size); } void* GetEntryPointFromJni() const { DCHECK(IsNative()); return GetEntryPointFromJniPtrSize(kRuntimePointerSize); } ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(PointerSize pointer_size) const { return GetDataPtrSize(pointer_size); } void SetEntryPointFromJni(const void* entrypoint) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsNative()); SetEntryPointFromJniPtrSize(entrypoint, kRuntimePointerSize); } ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { SetDataPtrSize(entrypoint, pointer_size); } ALWAYS_INLINE void* GetDataPtrSize(PointerSize pointer_size) const { DCHECK(IsImagePointerSize(pointer_size)); return GetNativePointer<void*>(DataOffset(pointer_size), pointer_size); } ALWAYS_INLINE void SetDataPtrSize(const void* data, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsImagePointerSize(pointer_size)); SetNativePointer(DataOffset(pointer_size), data, pointer_size); } // Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal // conventions for a method of managed code. Returns false for Proxy methods. ALWAYS_INLINE bool IsRuntimeMethod() const { return dex_method_index_ == kRuntimeMethodDexMethodIndex; } // Is this a hand crafted method used for something like describing callee saves? bool IsCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_); bool IsResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_); bool IsImtUnimplementedMethod() REQUIRES_SHARED(Locks::mutator_lock_); // Find the catch block for the given exception type and dex_pc. When a catch block is found, // indicates whether the found catch block is responsible for clearing the exception or whether // a move-exception instruction is present. uint32_t FindCatchBlock(Handle<mirror::Class> exception_type, uint32_t dex_pc, bool* has_no_move_exception) REQUIRES_SHARED(Locks::mutator_lock_); // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires. template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename RootVisitorType> void VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) NO_THREAD_SAFETY_ANALYSIS; const DexFile* GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_); const char* GetDeclaringClassDescriptor() REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE const char* GetShorty() REQUIRES_SHARED(Locks::mutator_lock_); const char* GetShorty(uint32_t* out_length) REQUIRES_SHARED(Locks::mutator_lock_); const Signature GetSignature() REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE const char* GetName() REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE std::string_view GetNameView() REQUIRES_SHARED(Locks::mutator_lock_); ObjPtr<mirror::String> ResolveNameString() REQUIRES_SHARED(Locks::mutator_lock_); const dex::CodeItem* GetCodeItem() REQUIRES_SHARED(Locks::mutator_lock_); bool IsResolvedTypeIdx(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_); int32_t GetLineNumFromDexPC(uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_); const dex::ProtoId& GetPrototype() REQUIRES_SHARED(Locks::mutator_lock_); const dex::TypeList* GetParameterTypeList() REQUIRES_SHARED(Locks::mutator_lock_); const char* GetDeclaringClassSourceFile() REQUIRES_SHARED(Locks::mutator_lock_); uint16_t GetClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_); const dex::ClassDef& GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE size_t GetNumberOfParameters() REQUIRES_SHARED(Locks::mutator_lock_); const char* GetReturnTypeDescriptor() REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE Primitive::Type GetReturnTypePrimitive() REQUIRES_SHARED(Locks::mutator_lock_); const char* GetTypeDescriptorFromTypeIdx(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_); // Lookup return type. ObjPtr<mirror::Class> LookupResolvedReturnType() REQUIRES_SHARED(Locks::mutator_lock_); // Resolve return type. May cause thread suspension due to GetClassFromTypeIdx // calling ResolveType this caused a large number of bugs at call sites. ObjPtr<mirror::Class> ResolveReturnType() REQUIRES_SHARED(Locks::mutator_lock_); ObjPtr<mirror::ClassLoader> GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_); template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier> ObjPtr<mirror::DexCache> GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_); ObjPtr<mirror::DexCache> GetObsoleteDexCache() REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetInterfaceMethodForProxyUnchecked(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_); ArtMethod* GetNonObsoleteMethod() REQUIRES_SHARED(Locks::mutator_lock_); // May cause thread suspension due to class resolution. bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params) REQUIRES_SHARED(Locks::mutator_lock_); // Size of an instance of this native class. static size_t Size(PointerSize pointer_size) { return PtrSizedFieldsOffset(pointer_size) + (sizeof(PtrSizedFields) / sizeof(void*)) * static_cast<size_t>(pointer_size); } // Alignment of an instance of this native class. static size_t Alignment(PointerSize pointer_size) { // The ArtMethod alignment is the same as image pointer size. This differs from // alignof(ArtMethod) if cross-compiling with pointer_size != sizeof(void*). return static_cast<size_t>(pointer_size); } void CopyFrom(ArtMethod* src, PointerSize image_pointer_size) REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE void SetCounter(uint16_t hotness_count) REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE uint16_t GetCounter() REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE static constexpr uint16_t MaxCounter() { return std::numeric_limits<decltype(hotness_count_)>::max(); } ALWAYS_INLINE uint32_t GetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_); void CalculateAndSetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_); static constexpr MemberOffset HotnessCountOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtMethod, hotness_count_)); } ArrayRef<const uint8_t> GetQuickenedInfo() REQUIRES_SHARED(Locks::mutator_lock_); uint16_t GetIndexFromQuickening(uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_); // Returns the method header for the compiled code containing 'pc'. Note that runtime // methods will return null for this method, as they are not oat based. const OatQuickMethodHeader* GetOatQuickMethodHeader(uintptr_t pc) REQUIRES_SHARED(Locks::mutator_lock_); // Get compiled code for the method, return null if no code exists. const void* GetOatMethodQuickCode(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_); // Returns whether the method has any compiled code, JIT or AOT. bool HasAnyCompiledCode() REQUIRES_SHARED(Locks::mutator_lock_); // Returns a human-readable signature for 'm'. Something like "a.b.C.m" or // "a.b.C.m(II)V" (depending on the value of 'with_signature'). static std::string PrettyMethod(ArtMethod* m, bool with_signature = true) REQUIRES_SHARED(Locks::mutator_lock_); std::string PrettyMethod(bool with_signature = true) REQUIRES_SHARED(Locks::mutator_lock_); // Returns the JNI native function name for the non-overloaded method 'm'. std::string JniShortName() REQUIRES_SHARED(Locks::mutator_lock_); // Returns the JNI native function name for the overloaded method 'm'. std::string JniLongName() REQUIRES_SHARED(Locks::mutator_lock_); // Update entry points by passing them through the visitor. template <typename Visitor> ALWAYS_INLINE void UpdateEntrypoints(const Visitor& visitor, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_); // Visit the individual members of an ArtMethod. Used by imgdiag. // As imgdiag does not support mixing instruction sets or pointer sizes (e.g., using imgdiag32 // to inspect 64-bit images, etc.), we can go beneath the accessors directly to the class members. template <typename VisitorFunc> void VisitMembers(VisitorFunc& visitor) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsImagePointerSize(kRuntimePointerSize)); visitor(this, &declaring_class_, "declaring_class_"); visitor(this, &access_flags_, "access_flags_"); visitor(this, &dex_code_item_offset_, "dex_code_item_offset_"); visitor(this, &dex_method_index_, "dex_method_index_"); visitor(this, &method_index_, "method_index_"); visitor(this, &hotness_count_, "hotness_count_"); visitor(this, &ptr_sized_fields_.data_, "ptr_sized_fields_.data_"); visitor(this, &ptr_sized_fields_.entry_point_from_quick_compiled_code_, "ptr_sized_fields_.entry_point_from_quick_compiled_code_"); } // Returns the dex instructions of the code item for the art method. Returns an empty array for // the null code item case. ALWAYS_INLINE CodeItemInstructionAccessor DexInstructions() REQUIRES_SHARED(Locks::mutator_lock_); // Returns the dex code item data section of the DexFile for the art method. ALWAYS_INLINE CodeItemDataAccessor DexInstructionData() REQUIRES_SHARED(Locks::mutator_lock_); // Returns the dex code item debug info section of the DexFile for the art method. ALWAYS_INLINE CodeItemDebugInfoAccessor DexInstructionDebugInfo() REQUIRES_SHARED(Locks::mutator_lock_); GcRoot<mirror::Class>& DeclaringClassRoot() { return declaring_class_; } protected: // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". // The class we are a part of. GcRoot<mirror::Class> declaring_class_; // Access flags; low 16 bits are defined by spec. // Getting and setting this flag needs to be atomic when concurrency is // possible, e.g. after this method's class is linked. Such as when setting // verifier flags and single-implementation flag. std::atomic<std::uint32_t> access_flags_; /* Dex file fields. The defining dex file is available via declaring_class_->dex_cache_ */ // Offset to the CodeItem. uint32_t dex_code_item_offset_; // Index into method_ids of the dex file associated with this method. uint32_t dex_method_index_; /* End of dex file fields. */ // Entry within a dispatch table for this method. For static/direct methods the index is into // the declaringClass.directMethods, for virtual methods the vtable and for interface methods the // ifTable. uint16_t method_index_; union { // Non-abstract methods: The hotness we measure for this method. Not atomic, // as we allow missing increments: if the method is hot, we will see it eventually. uint16_t hotness_count_; // Abstract methods: IMT index (bitwise negated) or zero if it was not cached. // The negation is needed to distinguish zero index and missing cached entry. uint16_t imt_index_; }; // Fake padding field gets inserted here. // Must be the last fields in the method. struct PtrSizedFields { // Depending on the method type, the data is // - native method: pointer to the JNI function registered to this method // or a function to resolve the JNI function, // - conflict method: ImtConflictTable, // - abstract/interface method: the single-implementation if any, // - proxy method: the original interface method or constructor, // - other methods: the profiling data. void* data_; // Method dispatch from quick compiled code invokes this pointer which may cause bridging into // the interpreter. void* entry_point_from_quick_compiled_code_; } ptr_sized_fields_; private: uint16_t FindObsoleteDexClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_); static constexpr size_t PtrSizedFieldsOffset(PointerSize pointer_size) { // Round up to pointer size for padding field. Tested in art_method.cc. return RoundUp(offsetof(ArtMethod, hotness_count_) + sizeof(hotness_count_), static_cast<size_t>(pointer_size)); } // Compare given pointer size to the image pointer size. static bool IsImagePointerSize(PointerSize pointer_size); dex::TypeIndex GetReturnTypeIndex() REQUIRES_SHARED(Locks::mutator_lock_); template<typename T> ALWAYS_INLINE T GetNativePointer(MemberOffset offset, PointerSize pointer_size) const { static_assert(std::is_pointer<T>::value, "T must be a pointer type"); const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value(); if (pointer_size == PointerSize::k32) { return reinterpret_cast<T>(*reinterpret_cast<const uint32_t*>(addr)); } else { auto v = *reinterpret_cast<const uint64_t*>(addr); return reinterpret_cast<T>(dchecked_integral_cast<uintptr_t>(v)); } } template<typename T> ALWAYS_INLINE void SetNativePointer(MemberOffset offset, T new_value, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { static_assert(std::is_pointer<T>::value, "T must be a pointer type"); const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value(); if (pointer_size == PointerSize::k32) { uintptr_t ptr = reinterpret_cast<uintptr_t>(new_value); *reinterpret_cast<uint32_t*>(addr) = dchecked_integral_cast<uint32_t>(ptr); } else { *reinterpret_cast<uint64_t*>(addr) = reinterpret_cast<uintptr_t>(new_value); } } static inline bool IsValidIntrinsicUpdate(uint32_t modifier) { return (((modifier & kAccIntrinsic) == kAccIntrinsic) && (((modifier & ~(kAccIntrinsic | kAccIntrinsicBits)) == 0))); } static inline bool OverlapsIntrinsicBits(uint32_t modifier) { return (modifier & kAccIntrinsicBits) != 0; } // This setter guarantees atomicity. void AddAccessFlags(uint32_t flag) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(!IsIntrinsic() || !OverlapsIntrinsicBits(flag) || IsValidIntrinsicUpdate(flag)); // None of the readers rely ordering. access_flags_.fetch_or(flag, std::memory_order_relaxed); } // This setter guarantees atomicity. void ClearAccessFlags(uint32_t flag) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(!IsIntrinsic() || !OverlapsIntrinsicBits(flag) || IsValidIntrinsicUpdate(flag)); access_flags_.fetch_and(~flag, std::memory_order_relaxed); } // Used by GetName and GetNameView to share common code. const char* GetRuntimeMethodName() REQUIRES_SHARED(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(ArtMethod); // Need to use CopyFrom to deal with 32 vs 64 bits. }; class MethodCallback { public: virtual ~MethodCallback() {} virtual void RegisterNativeMethod(ArtMethod* method, const void* original_implementation, /*out*/void** new_implementation) REQUIRES_SHARED(Locks::mutator_lock_) = 0; }; } // namespace art #endif // ART_RUNTIME_ART_METHOD_H_ |
在代码中搜索REQUIRES_SHARED
,没找到相关定义,刚刚开始百思不解。后来查询了一下,才找到相关的介绍。
REQUIRES_SHARED
这个宏是Clang用于静态线程安全代码分析的宏,相关定义在mutex.h
。
这个宏的目的是通知Clang在静态代码分析的时候,确保在调用被修饰的函数的时候,是进行加锁操作的。
后面的Locks::mutator_lock_
为需要使用哪个锁对象进行加锁操作,这个变量的定义在 locks.cc 中。
介绍
Clang的线程安全分析模块是C++语言的一个扩展,能对代码中潜在的竞争条件进行警告。这种分析是完全静态的(即编译时进行),没有运行时的消耗。当前这个功能还在开发中,但它已经具备了足够的成熟度,可以被部署到生产环境中。它由Google开发,同时受到CERT(United States Computer Emergency Readiness Team,美国互联网应急中心)/SEI(Software Engineering Institute,软件工程中心)的协助,并在Google的内部代码中被广泛应用。
对于多线程的程序来说,线程安全分析很像一个类型系统。在一个多线程的环境中,程序员除了可以声明一个数据的类型(比如,int, float等)之外,还可以声明对数据的访问是如何被控制的。例如,如果变量foo受到互斥锁mu的监控,那么如果如果一段代码在读或者写foo之前没有加锁,就会发出警告。同样,如果一段仅应被GUI线程访问的代码被其它线程访问了,也会发出警告。
入门
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
#include "mutex.h" class BankAccount { private: Mutex mu; int balance GUARDED_BY(mu); void depositImpl(int amount) { balance += amount; // WARNING! Cannot write balance without locking mu. } void withdrawImpl(int amount) REQUIRES(mu) { balance -= amount; // OK. Caller must have locked mu. } public: void withdraw(int amount) { mu.Lock(); withdrawImpl(amount); // OK. We've locked mu. } // WARNING! Failed to unlock mu. void transferFrom(BankAccount& b, int amount) { mu.Lock(); b.withdrawImpl(amount); // WARNING! Calling withdrawImpl() requires locking b.mu. depositImpl(amount); // OK. depositImpl() has no requirements. mu.Unlock(); } }; |
这段代码说明了线程安全分析背后的基本概念。GUARDED_BY属性声明,一个线程在读或写balance变量之前,必须先锁住mu,由此保证对balance的增加和降低操作都是原子的。同样,REQUIRES声明了在调用线程调用withdrawImpl方法之前,必须先锁住mu。因为调用者已经在方法调用之前锁住了mu,因此在方法体内部修改balance就是安全的了。
depositeImpl方法没有REQUIRES生命,因此分析模块给出了一个警告。线程安全分析模块并不是进程内部的,因此对调用者的需求必须被显式的声明。在transferFrom方法内部也有一个警告,因为尽管方法锁住了this->mu,它没有锁住b.mu,分析模块知道这是两个不同的锁,分属两个不同的对象。
最后,在withdraw方法内部也有一个警告,因为它没有解锁mu。每一个上锁操作必须有一个配对的解锁操作,分析模块将检测成对的上锁和解锁操作。一个函数可以仅上锁而不解锁(反之亦然),但这必须被显式标注(使用ACQUIRE/RELEASE)。
运行分析
为了运行分析模块,只需要加入编译选项 -Wthread-safety,比如
1 |
clang -c -Wthread-safety example.cpp |
注意,这段代码假设已经有一个正确的标注文件mutex.h存在,这个文件中声明了哪个方法执行了上锁、解锁的操作。
基本概念:监护权
线程安全分析提供了一种使用“监护权”保护资源的方法。“资源”可以是数据成员,或者可以访问底层资源的过程或方法。分析模块保证了,除非调用者线程拥有了对于资源的监护权(调用一个方法,或者读/写一个数据),否则它是无法访问到资源的。监护权被绑定到一些具名的C++对象上,这些对象声明了专用的方法来获取和释放监护权。这些对象的名称被用来识别监护权。最常见的例子就是互斥锁。例如,如果mu是一个互斥锁,那么调用mu.Lock()使得调用者线程拥有了mu所保护的数据的监护权。同样的,调用mu.Unlock()释放监护权。
线程可以排他的或者共享的拥有监护权。一个排他的监护权每次仅能被一个线程拥有,而一个共享的监护权可以同时被多个线程拥有。这个机制使得多读一写的模式成为可能。写操作需要排他的监护权,而读操作仅需要共享的监护权。
在程序执行的给定时刻,每个线程拥有各自的监护权集合(该线程锁住的互斥锁的集合)。它们类似于钥匙或者令牌,允许线程访问这些资源。跟物理上的安全钥匙一样,线程不能复制、也不能销毁监护权。一个线程只能把监护权释放给另外一个线程,或者从另外一个线程获得监护权。安全起见,分析模块的标识不清楚具体获取和释放监护权的机制,它假设底层实现(例如,互斥锁的实现)能够恰当的完成这个任务。
在程序运行的某个具体时刻,某个线程拥有的监护权集合是一个运行时的概念。静态的任务是对这个集合(也被称为监护权环境)进行估计。分析模块会通过静态分析描述程序任何执行节点的监护权环境。这个估计,是对实际运行时监护权环境的保守估计。
应用指导
线程安全分析模块使用属性来声明线程约束。属性必须被绑定到具名的声明,比如类、方法、数据成员。我们强烈建议用户为这些不同的属性定义宏,示例请参见以下的mutex.h文件。接下来的说明将假设使用了宏。
由于历史原因,线程安全分析模块的早期版本是用了以锁为中心的宏名称。为了适应更普适的模型,这些宏被更改了名称。之前的名称仍然在使用,在接下来的文档里会特别指明。
GUARDED_BY(c) 和 PT_GUARDED_BY(c)
GUARDED_BY是一个应用在数据成员上的属性,它声明了数据成员被给定的监护权保护。对于数据的读操作需要共享的访问权限,而写操作需要独占的访问权限。
PT_GUARDED_BY与之类似,只不过它是为指针和智能指针准备的。对数据成员(指针)本身没有任何限制,它保护的是指针指向的数据。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
Mutex mu; int *p1 GUARDED_BY(mu); int *p2 PT_GUARDED_BY(mu); unique_ptr<int> p3 PT_GUARDED_BY(mu); void test() { p1 = 0; // Warning! *p2 = 42; // Warning! p2 = new int; // OK. *p3 = 42; // Warning! p3.reset(new int); // OK. } |
REQUIRES(...),REQUIRES_SHARED(...)
早期的版本是EXCLUSIVE_LOCKS_REQUIRED,SHARED_LOCKS_REQUIRED
REQUIRES是作用于方法或者函数上的属性,它表明了调用线程必须独享给定的监护权。可以指定不止一个监护权。监护权必须在函数的入口处、出口处同时被声明。
REQUIRES_SHARED与之类似,只不过仅需要共享的访问权限。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
Mutex mu1, mu2; int a GUARDED_BY(mu1); int b GUARDED_BY(mu2); void foo() REQUIRES(mu1, mu2) { a = 0; b = 0; } void test() { mu1.Lock(); foo(); // Warning! Requires mu2. mu1.Unlock(); } |
ACQUIRE(...),ACQUIRE_SHARED(...),RELEASE(...),RELEASE_SHARED(...)
早期版本是EXECLUSIVE_LOCK_FUNCTION,SHARED_LOCK_FUNCTION,UNLOCK_FUNCTION
ACQUIRE是一个作用在函数或者方法上的属性,它声明了这个函数或方法需要一个监护权,但不会释放它。调用者在调用之前不能拥有监护权,在调用之后需要获得监护权。ACQUIRE_SHARED与之类似。
RELEASE和RELEASE_SHARED声明,函数必须释放监护权。调用者在调用之前必须拥有监护权,在调用之后将失去监护权。监护权是共享还是排他的,并不重要。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
Mutex mu; MyClass myObject GUARDED_BY(mu); void lockAndInit() ACQUIRE(mu) { mu.Lock(); myObject.init(); } void cleanupAndUnlock() RELEASE(mu) { myObject.cleanup(); } // Warning! Need to unlock mu. void test() { lockAndInit(); myObject.doSomething(); cleanupAndUnlock(); myObject.doSomething(); // Warning, mu is not locked. } |
如果没有向ACQUIRE或RELEASE传递参数,那么this将会成为它的默认参数,分析模块将不会检查它修饰的函数体。这种模式通常被在抽象接口下隐藏具体锁细节的类使用(译者注:为了不向外界暴露锁的实现细节,将锁作为类的私有数据,因此,对共有函数声明不带参数的ACQUIRE/RELEASE,相当于对当前对象——也相当于对这个私有的锁——进行加锁/释放锁操作),示例如下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
template <class T> class CAPABILITY("mutex") Container { private: Mutex mu; T* data; public: // Hide mu from public interface. void Lock() ACQUIRE() { mu.Lock(); } void Unlock() RELEASE() { mu.Unlock(); } T& getElem(int i) { return data[i]; } }; void test() { Container<int> c; c.Lock(); int i = c.getElem(0); c.Unlock(); } |
EXCLUDES(...)
早期版本LOCKS_EXCLUDED
EXCLUDES是一种函数或方法的属性,用来声明调用者绝对不能拥有监护权。这样做的目的是为了防止死锁。很多互斥锁的实现是不允许重入的,因此如果一个函数二次申请一个互斥锁,会引起死锁。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
Mutex mu; int a GUARDED_BY(mu); void clear() EXCLUDES(mu) { mu.Lock(); a = 0; mu.Unlock(); } void reset() { mu.Lock(); clear(); // Warning! Caller cannot hold 'mu'. mu.Unlock(); } |
与REQUIRES不同,EXCLUDES是可选的。如果该属性缺失的话,分析模块不会发出警告,这在某些情况下可能会产生某些错误的负样本(本来应该在函数内部进行加锁和释放锁,但没有这么做,分析系统也没有警告,这样在实际运行中可能会出现错误)。这个问题将在“负监护权”章节中讨论。
NO_THREAD_SAFETY_ANALYSIS
NO_THREAD_SAFETY_ANALYSIS是一种函数或方法的属性,它意味着对该函数关闭线程安全分析。它为以下两种函数的实现提供了可能,第一,故意设计的线程不安全的代码,第二,代码是线程安全的,但是对于线程安全分析模块来说太复杂,模块无法理解。第二种情况将在“已知限制”章节中讨论。
1 2 3 4 5 6 |
class Counter { Mutex mu; int a GUARDED_BY(mu); void unsafeIncrement() NO_THREAD_SAFETY_ANALYSIS { a++; } }; |
与其它属性不同的是,NO_THREAD_SAFETY_ANALYSIS不是函数接口的一部分,它需要被放在源文件(cc或cpp)而不是头文件(h)中。
RETURN_CAPABILITY(c)
早期版本LOCK_RETURNED
RETURN_CAPABILITY是一种函数或方法的属性,它声明了该函数将返回一个给定监护权的引用。通常用来修饰会返回互斥锁的getter方法。
1 2 3 4 5 6 7 8 9 10 11 |
class MyClass { private: Mutex mu; int a GUARDED_BY(mu); public: Mutex* getMu() RETURN_CAPABILITY(mu) { return μ } // analysis knows that getMu() == mu void clear() REQUIRES(getMu()) { a = 0; } }; |
ACQUIRED_BEFORE(...),ACQUIRED_AFTER(...)
ACQUIRED_BEFORE和ACQUIRED_AFTER是成员变量的属性,特别是用来声明互斥锁或其他监护权。这种声明在互斥锁之间强加了一个获取的优先级,目的是为了防止死锁。
1 2 3 4 5 6 7 8 9 10 11 12 13 |
Mutex m1; Mutex m2 ACQUIRED_AFTER(m1); // Alternative declaration // Mutex m2; // Mutex m1 ACQUIRED_BEFORE(m2); void foo() { m2.Lock(); m1.Lock(); // Warning! m2 must be acquired after m1. m1.Unlock(); m2.Unlock(); } |
CAPABILITY(<string>)
早期版本LOCKABLE
CAPABILITY是一种类的属性,它意味着该类的对象可以被当做监护权使用。string参数使用错误信息指定了监护权的类型,例如“mutex"。参见之前给出的”Container"示例,或者mutex.h文件中的Mutex类。
SCOPED_CAPABILITY
早期版本SCOPED_LOCKABLE
SCOPED_CAPABILITY是一种类的属性,这种类实现了RAII风格的锁,监护权在构造函数中获取,在析构函数中释放。这种类需要被特别指出,因为构造和析构函数指定的监护权的名称是不一样的,参见mutex.h文件中的MutexLocker类。
TRY_ACQUIRE(<bool>,...),TRY_ACQUIRE_SHARED(<bool>,...)
早期版本EXECLUSIVE_TRYLOCK_FUNCTION,SHARED_TRYLOCK_FUNCTION
这是一种函数或方法的属性,这些函数或方法试图获取指定的监护权,并且返回一个布尔值表明是否成功。函数的第一个参数必须是true或者false,来说明哪个值表示监护权获取成功,剩余参数等同于ACQUIRE。具体示例参见mutex.h。
ASSERT_CAPABILITY(...)和ASSERT_SHARED_CAPABILITY(...)
早期版本ASSERT_EXECLUSIVE_LOCK,ASSERT_SHARED_LOCK
GUARDED_VAR和PT_GUARDED_VAR
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
#ifndef THREAD_SAFETY_ANALYSIS_MUTEX_H #define THREAD_SAFETY_ANALYSIS_MUTEX_H // Enable thread safety attributes only with clang. // The attributes can be safely erased when compiling with other compilers. #if defined(__clang__) && (!defined(SWIG)) #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) #else #define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op #endif #define CAPABILITY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) #define SCOPED_CAPABILITY \ THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) #define GUARDED_BY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) #define PT_GUARDED_BY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) #define ACQUIRED_BEFORE(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) #define ACQUIRED_AFTER(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) #define REQUIRES(...) \ THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) #define REQUIRES_SHARED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) #define ACQUIRE(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) #define ACQUIRE_SHARED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) #define RELEASE(...) \ THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) #define RELEASE_SHARED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) #define TRY_ACQUIRE(...) \ THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) #define TRY_ACQUIRE_SHARED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) #define EXCLUDES(...) \ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) #define ASSERT_CAPABILITY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) #define ASSERT_SHARED_CAPABILITY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) #define RETURN_CAPABILITY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) #define NO_THREAD_SAFETY_ANALYSIS \ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) // Defines an annotated interface for mutexes. // These methods can be implemented to use any internal mutex implementation. class CAPABILITY("mutex") Mutex { public: // Acquire/lock this mutex exclusively. Only one thread can have exclusive // access at any one time. Write operations to guarded data require an // exclusive lock. void Lock() ACQUIRE(); // Acquire/lock this mutex for read operations, which require only a shared // lock. This assumes a multiple-reader, single writer semantics. Multiple // threads may acquire the mutex simultaneously as readers, but a writer // must wait for all of them to release the mutex before it can acquire it // exclusively. void ReaderLock() ACQUIRE_SHARED(); // Release/unlock an exclusive mutex. void Unlock() RELEASE(); // Release/unlock a shared mutex. void ReaderUnlock() RELEASE_SHARED(); // Try to acquire the mutex. Returns true on success, and false on failure. bool TryLock() TRY_ACQUIRE(true); // Try to acquire the mutex for read operations. bool ReaderTryLock() TRY_ACQUIRE_SHARED(true); // Assert that this mutex is currently held by the calling thread. void AssertHeld() ASSERT_CAPABILITY(this); // Assert that is mutex is currently held for read operations. void AssertReaderHeld() ASSERT_SHARED_CAPABILITY(this); // For negative capabilities. const Mutex& operator!() const { return *this; } }; // MutexLocker is an RAII class that acquires a mutex in its constructor, and // releases it in its destructor. class SCOPED_CAPABILITY MutexLocker { private: Mutex* mut; public: MutexLocker(Mutex *mu) ACQUIRE(mu) : mut(mu) { mu->Lock(); } ~MutexLocker() RELEASE() { mut->Unlock(); } }; #ifdef USE_LOCK_STYLE_THREAD_SAFETY_ATTRIBUTES // The original version of thread safety analysis the following attribute // definitions. These use a lock-based terminology. They are still in use // by existing thread safety code, and will continue to be supported. // Deprecated. #define PT_GUARDED_VAR \ THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_var) // Deprecated. #define GUARDED_VAR \ THREAD_ANNOTATION_ATTRIBUTE__(guarded_var) // Replaced by REQUIRES #define EXCLUSIVE_LOCKS_REQUIRED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) // Replaced by REQUIRES_SHARED #define SHARED_LOCKS_REQUIRED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) // Replaced by CAPABILITY #define LOCKABLE \ THREAD_ANNOTATION_ATTRIBUTE__(lockable) // Replaced by SCOPED_CAPABILITY #define SCOPED_LOCKABLE \ THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) // Replaced by ACQUIRE #define EXCLUSIVE_LOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) // Replaced by ACQUIRE_SHARED #define SHARED_LOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) // Replaced by RELEASE and RELEASE_SHARED #define UNLOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) // Replaced by TRY_ACQUIRE #define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) // Replaced by TRY_ACQUIRE_SHARED #define SHARED_TRYLOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) // Replaced by ASSERT_CAPABILITY #define ASSERT_EXCLUSIVE_LOCK(...) \ THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) // Replaced by ASSERT_SHARED_CAPABILITY #define ASSERT_SHARED_LOCK(...) \ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) // Replaced by EXCLUDE_CAPABILITY. #define LOCKS_EXCLUDED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) // Replaced by RETURN_CAPABILITY #define LOCK_RETURNED(x) \ THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) #endif // USE_LOCK_STYLE_THREAD_SAFETY_ATTRIBUTES #endif // THREAD_SAFETY_ANALYSIS_MUTEX_H |