1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "trampoline_compiler.h"
18 
19 #include "base/arena_allocator.h"
20 #include "base/malloc_arena_pool.h"
21 #include "jni/jni_env_ext.h"
22 
23 #ifdef ART_ENABLE_CODEGEN_arm
24 #include "utils/arm/assembler_arm_vixl.h"
25 #endif
26 
27 #ifdef ART_ENABLE_CODEGEN_arm64
28 #include "utils/arm64/assembler_arm64.h"
29 #endif
30 
31 #ifdef ART_ENABLE_CODEGEN_x86
32 #include "utils/x86/assembler_x86.h"
33 #endif
34 
35 #ifdef ART_ENABLE_CODEGEN_x86_64
36 #include "utils/x86_64/assembler_x86_64.h"
37 #endif
38 
39 #define __ assembler.
40 
41 namespace art {
42 
43 #ifdef ART_ENABLE_CODEGEN_arm
44 namespace arm {
45 
46 #ifdef ___
47 #error "ARM Assembler macro already defined."
48 #else
49 #define ___ assembler.GetVIXLAssembler()->
50 #endif
51 
CreateTrampoline(ArenaAllocator * allocator,EntryPointCallingConvention abi,ThreadOffset32 offset)52 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
53     ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) {
54   using vixl::aarch32::MemOperand;
55   using vixl::aarch32::pc;
56   using vixl::aarch32::r0;
57   ArmVIXLAssembler assembler(allocator);
58 
59   switch (abi) {
60     case kInterpreterAbi:  // Thread* is first argument (R0) in interpreter ABI.
61       ___ Ldr(pc, MemOperand(r0, offset.Int32Value()));
62       break;
63     case kJniAbi: {  // Load via Thread* held in JNIEnv* in first argument (R0).
64       vixl::aarch32::UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
65       const vixl::aarch32::Register temp_reg = temps.Acquire();
66 
67       // VIXL will use the destination as a scratch register if
68       // the offset is not encodable as an immediate operand.
69       ___ Ldr(temp_reg, MemOperand(r0, JNIEnvExt::SelfOffset(4).Int32Value()));
70       ___ Ldr(pc, MemOperand(temp_reg, offset.Int32Value()));
71       break;
72     }
73     case kQuickAbi:  // TR holds Thread*.
74       ___ Ldr(pc, MemOperand(tr, offset.Int32Value()));
75   }
76 
77   __ FinalizeCode();
78   size_t cs = __ CodeSize();
79   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
80   MemoryRegion code(entry_stub->data(), entry_stub->size());
81   __ FinalizeInstructions(code);
82 
83   return std::move(entry_stub);
84 }
85 
86 #undef ___
87 
88 }  // namespace arm
89 #endif  // ART_ENABLE_CODEGEN_arm
90 
91 #ifdef ART_ENABLE_CODEGEN_arm64
92 namespace arm64 {
CreateTrampoline(ArenaAllocator * allocator,EntryPointCallingConvention abi,ThreadOffset64 offset)93 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
94     ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) {
95   Arm64Assembler assembler(allocator);
96 
97   switch (abi) {
98     case kInterpreterAbi:  // Thread* is first argument (X0) in interpreter ABI.
99       __ JumpTo(Arm64ManagedRegister::FromXRegister(X0), Offset(offset.Int32Value()),
100           Arm64ManagedRegister::FromXRegister(IP1));
101 
102       break;
103     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (X0).
104       __ LoadRawPtr(Arm64ManagedRegister::FromXRegister(IP1),
105                       Arm64ManagedRegister::FromXRegister(X0),
106                       Offset(JNIEnvExt::SelfOffset(8).Int32Value()));
107 
108       __ JumpTo(Arm64ManagedRegister::FromXRegister(IP1), Offset(offset.Int32Value()),
109                 Arm64ManagedRegister::FromXRegister(IP0));
110 
111       break;
112     case kQuickAbi:  // X18 holds Thread*.
113       __ JumpTo(Arm64ManagedRegister::FromXRegister(TR), Offset(offset.Int32Value()),
114                 Arm64ManagedRegister::FromXRegister(IP0));
115 
116       break;
117   }
118 
119   __ FinalizeCode();
120   size_t cs = __ CodeSize();
121   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
122   MemoryRegion code(entry_stub->data(), entry_stub->size());
123   __ FinalizeInstructions(code);
124 
125   return std::move(entry_stub);
126 }
127 }  // namespace arm64
128 #endif  // ART_ENABLE_CODEGEN_arm64
129 
130 #ifdef ART_ENABLE_CODEGEN_x86
131 namespace x86 {
CreateTrampoline(ArenaAllocator * allocator,ThreadOffset32 offset)132 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
133                                                                     ThreadOffset32 offset) {
134   X86Assembler assembler(allocator);
135 
136   // All x86 trampolines call via the Thread* held in fs.
137   __ fs()->jmp(Address::Absolute(offset));
138   __ int3();
139 
140   __ FinalizeCode();
141   size_t cs = __ CodeSize();
142   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
143   MemoryRegion code(entry_stub->data(), entry_stub->size());
144   __ FinalizeInstructions(code);
145 
146   return std::move(entry_stub);
147 }
148 }  // namespace x86
149 #endif  // ART_ENABLE_CODEGEN_x86
150 
151 #ifdef ART_ENABLE_CODEGEN_x86_64
152 namespace x86_64 {
CreateTrampoline(ArenaAllocator * allocator,ThreadOffset64 offset)153 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
154                                                                     ThreadOffset64 offset) {
155   x86_64::X86_64Assembler assembler(allocator);
156 
157   // All x86 trampolines call via the Thread* held in gs.
158   __ gs()->jmp(x86_64::Address::Absolute(offset, true));
159   __ int3();
160 
161   __ FinalizeCode();
162   size_t cs = __ CodeSize();
163   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
164   MemoryRegion code(entry_stub->data(), entry_stub->size());
165   __ FinalizeInstructions(code);
166 
167   return std::move(entry_stub);
168 }
169 }  // namespace x86_64
170 #endif  // ART_ENABLE_CODEGEN_x86_64
171 
CreateTrampoline64(InstructionSet isa,EntryPointCallingConvention abi,ThreadOffset64 offset)172 std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
173                                                                EntryPointCallingConvention abi,
174                                                                ThreadOffset64 offset) {
175   MallocArenaPool pool;
176   ArenaAllocator allocator(&pool);
177   switch (isa) {
178 #ifdef ART_ENABLE_CODEGEN_arm64
179     case InstructionSet::kArm64:
180       return arm64::CreateTrampoline(&allocator, abi, offset);
181 #endif
182 #ifdef ART_ENABLE_CODEGEN_x86_64
183     case InstructionSet::kX86_64:
184       return x86_64::CreateTrampoline(&allocator, offset);
185 #endif
186     default:
187       UNUSED(abi);
188       UNUSED(offset);
189       LOG(FATAL) << "Unexpected InstructionSet: " << isa;
190       UNREACHABLE();
191   }
192 }
193 
CreateTrampoline32(InstructionSet isa,EntryPointCallingConvention abi,ThreadOffset32 offset)194 std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
195                                                                EntryPointCallingConvention abi,
196                                                                ThreadOffset32 offset) {
197   MallocArenaPool pool;
198   ArenaAllocator allocator(&pool);
199   switch (isa) {
200 #ifdef ART_ENABLE_CODEGEN_arm
201     case InstructionSet::kArm:
202     case InstructionSet::kThumb2:
203       return arm::CreateTrampoline(&allocator, abi, offset);
204 #endif
205 #ifdef ART_ENABLE_CODEGEN_x86
206     case InstructionSet::kX86:
207       UNUSED(abi);
208       return x86::CreateTrampoline(&allocator, offset);
209 #endif
210     default:
211       LOG(FATAL) << "Unexpected InstructionSet: " << isa;
212       UNREACHABLE();
213   }
214 }
215 
216 }  // namespace art
217