1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #pragma once
30 
31 #include <stdlib.h>
32 #include <stdint.h>
33 #include <sys/reboot.h>
34 #include <unistd.h>
35 
36 #include <async_safe/log.h>
37 #include <private/bionic_globals.h>
38 
39 // We choose a static pointer tag here for performance reasons. Dynamic tagging
40 // doesn't improve our detection, and simply hurts performance. This tag is
41 // deliberately chosen to always point to inaccessible memory on a standard
42 // 64-bit userspace process, and be easily identifiable by developers. This tag
43 // is also deliberately different from the standard pattern-init tag (0xAA), as
44 // to be distinguishable from an uninitialized-pointer access. The first and
45 // second nibbles are also deliberately designed to be the bitset-mirror of each
46 // other (0b1011, 0b0100) in order to reduce incidental matches. We also ensure
47 // that the top bit is set, as this catches incorrect code that assumes that a
48 // "negative" pointer indicates error. Users must not rely on the
49 // implementation-defined value of this pointer tag, as it may change.
50 static constexpr uintptr_t POINTER_TAG = 0xB4;
51 static constexpr unsigned UNTAG_SHIFT = 40;
52 static constexpr unsigned CHECK_SHIFT = 48;
53 static constexpr unsigned TAG_SHIFT = 56;
54 #if defined(__aarch64__)
55 static constexpr uintptr_t ADDRESS_MASK = (static_cast<uintptr_t>(1) << TAG_SHIFT) - 1;
56 static constexpr uintptr_t TAG_MASK = static_cast<uintptr_t>(0xFF) << TAG_SHIFT;
57 
FixedPointerTag()58 static inline uintptr_t FixedPointerTag() {
59   return __libc_globals->heap_pointer_tag & TAG_MASK;
60 }
61 
PointerCheckMask()62 static inline uintptr_t PointerCheckMask() {
63   return (__libc_globals->heap_pointer_tag << (TAG_SHIFT - CHECK_SHIFT)) & TAG_MASK;
64 }
65 
PointerUntagMask()66 static inline uintptr_t PointerUntagMask() {
67   return ~(__libc_globals->heap_pointer_tag << (TAG_SHIFT - UNTAG_SHIFT));
68 }
69 #endif // defined(__aarch64__)
70 
71 // Return a forcibly-tagged pointer.
TagPointer(void * ptr)72 static inline void* TagPointer(void* ptr) {
73 #if defined(__aarch64__)
74   return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) | FixedPointerTag());
75 #else
76   async_safe_fatal("Attempting to tag a pointer (%p) on non-aarch64.", ptr);
77 #endif
78 }
79 
80 #if defined(__aarch64__)
81 // Return a forcibly-untagged pointer. The pointer tag is not checked for
82 // validity.
UntagPointer(const volatile void * ptr)83 static inline void* UntagPointer(const volatile void* ptr) {
84   return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) & ADDRESS_MASK);
85 }
86 
87 // Untag the pointer, and check the pointer tag iff the kernel supports tagged pointers and the
88 // pointer tag isn't being used by HWASAN or MTE. If the tag is incorrect, trap.
MaybeUntagAndCheckPointer(const volatile void * ptr)89 static inline void* MaybeUntagAndCheckPointer(const volatile void* ptr) {
90   if (__predict_false(ptr == nullptr)) {
91     return nullptr;
92   }
93 
94   uintptr_t ptr_int = reinterpret_cast<uintptr_t>(ptr);
95 
96   // Applications may disable pointer tagging, which will be propagated to
97   // libc in the zygote. This means that there may already be tagged heap
98   // allocations that will fail when checked against the zero-ed heap tag. The
99   // check below allows us to turn *off* pointer tagging (by setting PointerCheckMask() and
100   // FixedPointerTag() to zero) and still allow tagged heap allocations to be freed.
101   if ((ptr_int & PointerCheckMask()) != FixedPointerTag()) {
102     // TODO(b/145604058) - Upstream tagged pointers documentation and provide
103     // a link to it in the abort message here.
104     async_safe_fatal("Pointer tag for %p was truncated.", ptr);
105   }
106   return reinterpret_cast<void*>(ptr_int & PointerUntagMask());
107 }
108 
109 // Return a tagged pointer iff the kernel supports tagged pointers, and `ptr` is
110 // non-null.
MaybeTagPointer(void * ptr)111 static inline void* MaybeTagPointer(void* ptr) {
112   if (__predict_true(ptr != nullptr)) {
113     return TagPointer(ptr);
114   }
115   return ptr;
116 }
117 
118 #else  // defined(__aarch64__)
UntagPointer(const volatile void * ptr)119 static inline void* UntagPointer(const volatile void* ptr) {
120   return const_cast<void*>(ptr);
121 }
122 
MaybeTagPointer(void * ptr)123 static inline void* MaybeTagPointer(void* ptr) {
124   return ptr;
125 }
126 
MaybeUntagAndCheckPointer(const volatile void * ptr)127 static inline void* MaybeUntagAndCheckPointer(const volatile void* ptr) {
128   return const_cast<void *>(ptr);
129 }
130 
131 #endif  // defined(__aarch64__)
132