1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jni_macro_assembler_x86.h"
18 
19 #include "base/casts.h"
20 #include "entrypoints/quick/quick_entrypoints.h"
21 #include "thread.h"
22 #include "utils/assembler.h"
23 
24 namespace art {
25 namespace x86 {
26 
GetScratchRegister()27 static Register GetScratchRegister() {
28   // ECX is an argument register on entry and gets spilled in BuildFrame().
29   // After that, we can use it as a scratch register.
30   return ECX;
31 }
32 
33 // Slowpath entered when Thread::Current()->_exception is non-null
34 class X86ExceptionSlowPath final : public SlowPath {
35  public:
X86ExceptionSlowPath(size_t stack_adjust)36   explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
37   void Emit(Assembler *sp_asm) override;
38  private:
39   const size_t stack_adjust_;
40 };
41 
DWARFReg(Register reg)42 static dwarf::Reg DWARFReg(Register reg) {
43   return dwarf::Reg::X86Core(static_cast<int>(reg));
44 }
45 
46 constexpr size_t kFramePointerSize = 4;
47 
48 static constexpr size_t kNativeStackAlignment = 16;
49 static_assert(kNativeStackAlignment == kStackAlignment);
50 
51 #define __ asm_.
52 
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> spill_regs)53 void X86JNIMacroAssembler::BuildFrame(size_t frame_size,
54                                       ManagedRegister method_reg,
55                                       ArrayRef<const ManagedRegister> spill_regs) {
56   DCHECK_EQ(CodeSize(), 0U);  // Nothing emitted yet.
57   cfi().SetCurrentCFAOffset(4);  // Return address on stack.
58   if (frame_size == kFramePointerSize) {
59     // For @CriticalNative tail call.
60     CHECK(method_reg.IsNoRegister());
61     CHECK(spill_regs.empty());
62   } else if (method_reg.IsNoRegister()) {
63     CHECK_ALIGNED(frame_size, kNativeStackAlignment);
64   } else {
65     CHECK_ALIGNED(frame_size, kStackAlignment);
66   }
67   int gpr_count = 0;
68   for (int i = spill_regs.size() - 1; i >= 0; --i) {
69     Register spill = spill_regs[i].AsX86().AsCpuRegister();
70     __ pushl(spill);
71     gpr_count++;
72     cfi().AdjustCFAOffset(kFramePointerSize);
73     cfi().RelOffset(DWARFReg(spill), 0);
74   }
75 
76   // return address then method on stack.
77   int32_t adjust = frame_size - gpr_count * kFramePointerSize -
78       kFramePointerSize /*return address*/ -
79       (method_reg.IsRegister() ? kFramePointerSize /*method*/ : 0u);
80   if (adjust != 0) {
81     __ addl(ESP, Immediate(-adjust));
82     cfi().AdjustCFAOffset(adjust);
83   }
84   if (method_reg.IsRegister()) {
85     __ pushl(method_reg.AsX86().AsCpuRegister());
86     cfi().AdjustCFAOffset(kFramePointerSize);
87   }
88   DCHECK_EQ(static_cast<size_t>(cfi().GetCurrentCFAOffset()), frame_size);
89 }
90 
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> spill_regs,bool may_suspend ATTRIBUTE_UNUSED)91 void X86JNIMacroAssembler::RemoveFrame(size_t frame_size,
92                                        ArrayRef<const ManagedRegister> spill_regs,
93                                        bool may_suspend ATTRIBUTE_UNUSED) {
94   CHECK_ALIGNED(frame_size, kNativeStackAlignment);
95   cfi().RememberState();
96   // -kFramePointerSize for ArtMethod*.
97   int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
98   if (adjust != 0) {
99     __ addl(ESP, Immediate(adjust));
100     cfi().AdjustCFAOffset(-adjust);
101   }
102   for (size_t i = 0; i < spill_regs.size(); ++i) {
103     Register spill = spill_regs[i].AsX86().AsCpuRegister();
104     __ popl(spill);
105     cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
106     cfi().Restore(DWARFReg(spill));
107   }
108   __ ret();
109   // The CFI should be restored for any code that follows the exit block.
110   cfi().RestoreState();
111   cfi().DefCFAOffset(frame_size);
112 }
113 
IncreaseFrameSize(size_t adjust)114 void X86JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
115   if (adjust != 0u) {
116     CHECK_ALIGNED(adjust, kNativeStackAlignment);
117     __ addl(ESP, Immediate(-adjust));
118     cfi().AdjustCFAOffset(adjust);
119   }
120 }
121 
DecreaseFrameSizeImpl(X86Assembler * assembler,size_t adjust)122 static void DecreaseFrameSizeImpl(X86Assembler* assembler, size_t adjust) {
123   if (adjust != 0u) {
124     CHECK_ALIGNED(adjust, kNativeStackAlignment);
125     assembler->addl(ESP, Immediate(adjust));
126     assembler->cfi().AdjustCFAOffset(-adjust);
127   }
128 }
129 
DecreaseFrameSize(size_t adjust)130 void X86JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
131   DecreaseFrameSizeImpl(&asm_, adjust);
132 }
133 
Store(FrameOffset offs,ManagedRegister msrc,size_t size)134 void X86JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
135   X86ManagedRegister src = msrc.AsX86();
136   if (src.IsNoRegister()) {
137     CHECK_EQ(0u, size);
138   } else if (src.IsCpuRegister()) {
139     CHECK_EQ(4u, size);
140     __ movl(Address(ESP, offs), src.AsCpuRegister());
141   } else if (src.IsRegisterPair()) {
142     CHECK_EQ(8u, size);
143     __ movl(Address(ESP, offs), src.AsRegisterPairLow());
144     __ movl(Address(ESP, FrameOffset(offs.Int32Value()+4)), src.AsRegisterPairHigh());
145   } else if (src.IsX87Register()) {
146     if (size == 4) {
147       __ fstps(Address(ESP, offs));
148     } else {
149       __ fstpl(Address(ESP, offs));
150     }
151   } else {
152     CHECK(src.IsXmmRegister());
153     if (size == 4) {
154       __ movss(Address(ESP, offs), src.AsXmmRegister());
155     } else {
156       __ movsd(Address(ESP, offs), src.AsXmmRegister());
157     }
158   }
159 }
160 
StoreRef(FrameOffset dest,ManagedRegister msrc)161 void X86JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
162   X86ManagedRegister src = msrc.AsX86();
163   CHECK(src.IsCpuRegister());
164   __ movl(Address(ESP, dest), src.AsCpuRegister());
165 }
166 
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)167 void X86JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
168   X86ManagedRegister src = msrc.AsX86();
169   CHECK(src.IsCpuRegister());
170   __ movl(Address(ESP, dest), src.AsCpuRegister());
171 }
172 
StoreImmediateToFrame(FrameOffset dest,uint32_t imm)173 void X86JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm) {
174   __ movl(Address(ESP, dest), Immediate(imm));
175 }
176 
StoreStackOffsetToThread(ThreadOffset32 thr_offs,FrameOffset fr_offs)177 void X86JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs) {
178   Register scratch = GetScratchRegister();
179   __ leal(scratch, Address(ESP, fr_offs));
180   __ fs()->movl(Address::Absolute(thr_offs), scratch);
181 }
182 
StoreStackPointerToThread(ThreadOffset32 thr_offs)183 void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
184   __ fs()->movl(Address::Absolute(thr_offs), ESP);
185 }
186 
StoreSpanning(FrameOffset,ManagedRegister,FrameOffset)187 void X86JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
188                                          ManagedRegister /*src*/,
189                                          FrameOffset /*in_off*/) {
190   UNIMPLEMENTED(FATAL);  // this case only currently exists for ARM
191 }
192 
Load(ManagedRegister mdest,FrameOffset src,size_t size)193 void X86JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
194   X86ManagedRegister dest = mdest.AsX86();
195   if (dest.IsNoRegister()) {
196     CHECK_EQ(0u, size);
197   } else if (dest.IsCpuRegister()) {
198     CHECK_EQ(4u, size);
199     __ movl(dest.AsCpuRegister(), Address(ESP, src));
200   } else if (dest.IsRegisterPair()) {
201     CHECK_EQ(8u, size);
202     __ movl(dest.AsRegisterPairLow(), Address(ESP, src));
203     __ movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
204   } else if (dest.IsX87Register()) {
205     if (size == 4) {
206       __ flds(Address(ESP, src));
207     } else {
208       __ fldl(Address(ESP, src));
209     }
210   } else {
211     CHECK(dest.IsXmmRegister());
212     if (size == 4) {
213       __ movss(dest.AsXmmRegister(), Address(ESP, src));
214     } else {
215       __ movsd(dest.AsXmmRegister(), Address(ESP, src));
216     }
217   }
218 }
219 
LoadFromThread(ManagedRegister mdest,ThreadOffset32 src,size_t size)220 void X86JNIMacroAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
221   X86ManagedRegister dest = mdest.AsX86();
222   if (dest.IsNoRegister()) {
223     CHECK_EQ(0u, size);
224   } else if (dest.IsCpuRegister()) {
225     if (size == 1u) {
226       __ fs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src));
227     } else {
228       CHECK_EQ(4u, size);
229       __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
230     }
231   } else if (dest.IsRegisterPair()) {
232     CHECK_EQ(8u, size);
233     __ fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
234     __ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4)));
235   } else if (dest.IsX87Register()) {
236     if (size == 4) {
237       __ fs()->flds(Address::Absolute(src));
238     } else {
239       __ fs()->fldl(Address::Absolute(src));
240     }
241   } else {
242     CHECK(dest.IsXmmRegister());
243     if (size == 4) {
244       __ fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
245     } else {
246       __ fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
247     }
248   }
249 }
250 
LoadRef(ManagedRegister mdest,FrameOffset src)251 void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
252   X86ManagedRegister dest = mdest.AsX86();
253   CHECK(dest.IsCpuRegister());
254   __ movl(dest.AsCpuRegister(), Address(ESP, src));
255 }
256 
LoadRef(ManagedRegister mdest,ManagedRegister base,MemberOffset offs,bool unpoison_reference)257 void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
258                            bool unpoison_reference) {
259   X86ManagedRegister dest = mdest.AsX86();
260   CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
261   __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
262   if (unpoison_reference) {
263     __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
264   }
265 }
266 
LoadRawPtr(ManagedRegister mdest,ManagedRegister base,Offset offs)267 void X86JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest,
268                                       ManagedRegister base,
269                                       Offset offs) {
270   X86ManagedRegister dest = mdest.AsX86();
271   CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
272   __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
273 }
274 
LoadRawPtrFromThread(ManagedRegister mdest,ThreadOffset32 offs)275 void X86JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
276   X86ManagedRegister dest = mdest.AsX86();
277   CHECK(dest.IsCpuRegister());
278   __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
279 }
280 
SignExtend(ManagedRegister mreg,size_t size)281 void X86JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
282   X86ManagedRegister reg = mreg.AsX86();
283   CHECK(size == 1 || size == 2) << size;
284   CHECK(reg.IsCpuRegister()) << reg;
285   if (size == 1) {
286     __ movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
287   } else {
288     __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
289   }
290 }
291 
ZeroExtend(ManagedRegister mreg,size_t size)292 void X86JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
293   X86ManagedRegister reg = mreg.AsX86();
294   CHECK(size == 1 || size == 2) << size;
295   CHECK(reg.IsCpuRegister()) << reg;
296   if (size == 1) {
297     __ movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
298   } else {
299     __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
300   }
301 }
302 
MoveArguments(ArrayRef<ArgumentLocation> dests,ArrayRef<ArgumentLocation> srcs)303 void X86JNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests,
304                                          ArrayRef<ArgumentLocation> srcs) {
305   DCHECK_EQ(dests.size(), srcs.size());
306   bool found_hidden_arg = false;
307   for (size_t i = 0, arg_count = srcs.size(); i != arg_count; ++i) {
308     const ArgumentLocation& src = srcs[i];
309     const ArgumentLocation& dest = dests[i];
310     DCHECK_EQ(src.GetSize(), dest.GetSize());
311     if (UNLIKELY(dest.IsRegister())) {
312       // Native ABI has only stack arguments but we may pass one "hidden arg" in register.
313       CHECK(!found_hidden_arg);
314       found_hidden_arg = true;
315       CHECK(src.IsRegister());
316       Move(dest.GetRegister(), src.GetRegister(), dest.GetSize());
317     } else {
318       if (src.IsRegister()) {
319         Store(dest.GetFrameOffset(), src.GetRegister(), dest.GetSize());
320       } else {
321         Copy(dest.GetFrameOffset(), src.GetFrameOffset(), dest.GetSize());
322       }
323     }
324   }
325 }
326 
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)327 void X86JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
328   DCHECK(!mdest.Equals(X86ManagedRegister::FromCpuRegister(GetScratchRegister())));
329   X86ManagedRegister dest = mdest.AsX86();
330   X86ManagedRegister src = msrc.AsX86();
331   if (!dest.Equals(src)) {
332     if (dest.IsCpuRegister() && src.IsCpuRegister()) {
333       __ movl(dest.AsCpuRegister(), src.AsCpuRegister());
334     } else if (src.IsX87Register() && dest.IsXmmRegister()) {
335       // Pass via stack and pop X87 register
336       IncreaseFrameSize(16);
337       if (size == 4) {
338         CHECK_EQ(src.AsX87Register(), ST0);
339         __ fstps(Address(ESP, 0));
340         __ movss(dest.AsXmmRegister(), Address(ESP, 0));
341       } else {
342         CHECK_EQ(src.AsX87Register(), ST0);
343         __ fstpl(Address(ESP, 0));
344         __ movsd(dest.AsXmmRegister(), Address(ESP, 0));
345       }
346       DecreaseFrameSize(16);
347     } else {
348       // TODO: x87, SSE
349       UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
350     }
351   }
352 }
353 
CopyRef(FrameOffset dest,FrameOffset src)354 void X86JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src) {
355   Register scratch = GetScratchRegister();
356   __ movl(scratch, Address(ESP, src));
357   __ movl(Address(ESP, dest), scratch);
358 }
359 
CopyRef(FrameOffset dest,ManagedRegister base,MemberOffset offs,bool unpoison_reference)360 void X86JNIMacroAssembler::CopyRef(FrameOffset dest,
361                                    ManagedRegister base,
362                                    MemberOffset offs,
363                                    bool unpoison_reference) {
364   Register scratch = GetScratchRegister();
365   __ movl(scratch, Address(base.AsX86().AsCpuRegister(), offs));
366   if (unpoison_reference) {
367     __ MaybeUnpoisonHeapReference(scratch);
368   }
369   __ movl(Address(ESP, dest), scratch);
370 }
371 
CopyRawPtrFromThread(FrameOffset fr_offs,ThreadOffset32 thr_offs)372 void X86JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset32 thr_offs) {
373   Register scratch = GetScratchRegister();
374   __ fs()->movl(scratch, Address::Absolute(thr_offs));
375   __ movl(Address(ESP, fr_offs), scratch);
376 }
377 
CopyRawPtrToThread(ThreadOffset32 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)378 void X86JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
379                                               FrameOffset fr_offs,
380                                               ManagedRegister mscratch) {
381   X86ManagedRegister scratch = mscratch.AsX86();
382   CHECK(scratch.IsCpuRegister());
383   Load(scratch, fr_offs, 4);
384   __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
385 }
386 
Copy(FrameOffset dest,FrameOffset src,size_t size)387 void X86JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size) {
388   DCHECK(size == 4 || size == 8) << size;
389   Register scratch = GetScratchRegister();
390   __ movl(scratch, Address(ESP, src));
391   __ movl(Address(ESP, dest), scratch);
392   if (size == 8) {
393     __ movl(scratch, Address(ESP, FrameOffset(src.Int32Value() + 4)));
394     __ movl(Address(ESP, FrameOffset(dest.Int32Value() + 4)), scratch);
395   }
396 }
397 
Copy(FrameOffset,ManagedRegister,Offset,ManagedRegister,size_t)398 void X86JNIMacroAssembler::Copy(FrameOffset /*dst*/,
399                                 ManagedRegister /*src_base*/,
400                                 Offset /*src_offset*/,
401                                 ManagedRegister /*scratch*/,
402                                 size_t /*size*/) {
403   UNIMPLEMENTED(FATAL);
404 }
405 
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister scratch,size_t size)406 void X86JNIMacroAssembler::Copy(ManagedRegister dest_base,
407                                 Offset dest_offset,
408                                 FrameOffset src,
409                                 ManagedRegister scratch,
410                                 size_t size) {
411   CHECK(scratch.IsNoRegister());
412   CHECK_EQ(size, 4u);
413   __ pushl(Address(ESP, src));
414   __ popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
415 }
416 
Copy(FrameOffset dest,FrameOffset src_base,Offset src_offset,ManagedRegister mscratch,size_t size)417 void X86JNIMacroAssembler::Copy(FrameOffset dest,
418                                 FrameOffset src_base,
419                                 Offset src_offset,
420                                 ManagedRegister mscratch,
421                                 size_t size) {
422   Register scratch = mscratch.AsX86().AsCpuRegister();
423   CHECK_EQ(size, 4u);
424   __ movl(scratch, Address(ESP, src_base));
425   __ movl(scratch, Address(scratch, src_offset));
426   __ movl(Address(ESP, dest), scratch);
427 }
428 
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister scratch,size_t size)429 void X86JNIMacroAssembler::Copy(ManagedRegister dest,
430                                 Offset dest_offset,
431                                 ManagedRegister src,
432                                 Offset src_offset,
433                                 ManagedRegister scratch,
434                                 size_t size) {
435   CHECK_EQ(size, 4u);
436   CHECK(scratch.IsNoRegister());
437   __ pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
438   __ popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
439 }
440 
Copy(FrameOffset dest,Offset dest_offset,FrameOffset src,Offset src_offset,ManagedRegister mscratch,size_t size)441 void X86JNIMacroAssembler::Copy(FrameOffset dest,
442                                 Offset dest_offset,
443                                 FrameOffset src,
444                                 Offset src_offset,
445                                 ManagedRegister mscratch,
446                                 size_t size) {
447   Register scratch = mscratch.AsX86().AsCpuRegister();
448   CHECK_EQ(size, 4u);
449   CHECK_EQ(dest.Int32Value(), src.Int32Value());
450   __ movl(scratch, Address(ESP, src));
451   __ pushl(Address(scratch, src_offset));
452   __ popl(Address(scratch, dest_offset));
453 }
454 
MemoryBarrier(ManagedRegister)455 void X86JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
456   __ mfence();
457 }
458 
CreateHandleScopeEntry(ManagedRegister mout_reg,FrameOffset handle_scope_offset,ManagedRegister min_reg,bool null_allowed)459 void X86JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
460                                                   FrameOffset handle_scope_offset,
461                                                   ManagedRegister min_reg,
462                                                   bool null_allowed) {
463   X86ManagedRegister out_reg = mout_reg.AsX86();
464   X86ManagedRegister in_reg = min_reg.AsX86();
465   CHECK(in_reg.IsCpuRegister());
466   CHECK(out_reg.IsCpuRegister());
467   VerifyObject(in_reg, null_allowed);
468   if (null_allowed) {
469     Label null_arg;
470     if (!out_reg.Equals(in_reg)) {
471       __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
472     }
473     __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
474     __ j(kZero, &null_arg);
475     __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
476     __ Bind(&null_arg);
477   } else {
478     __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
479   }
480 }
481 
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,bool null_allowed)482 void X86JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
483                                                   FrameOffset handle_scope_offset,
484                                                   bool null_allowed) {
485   Register scratch = GetScratchRegister();
486   if (null_allowed) {
487     Label null_arg;
488     __ movl(scratch, Address(ESP, handle_scope_offset));
489     __ testl(scratch, scratch);
490     __ j(kZero, &null_arg);
491     __ leal(scratch, Address(ESP, handle_scope_offset));
492     __ Bind(&null_arg);
493   } else {
494     __ leal(scratch, Address(ESP, handle_scope_offset));
495   }
496   __ movl(Address(ESP, out_off), scratch);
497 }
498 
499 // Given a handle scope entry, load the associated reference.
LoadReferenceFromHandleScope(ManagedRegister mout_reg,ManagedRegister min_reg)500 void X86JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
501                                                         ManagedRegister min_reg) {
502   X86ManagedRegister out_reg = mout_reg.AsX86();
503   X86ManagedRegister in_reg = min_reg.AsX86();
504   CHECK(out_reg.IsCpuRegister());
505   CHECK(in_reg.IsCpuRegister());
506   Label null_arg;
507   if (!out_reg.Equals(in_reg)) {
508     __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
509   }
510   __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
511   __ j(kZero, &null_arg);
512   __ movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
513   __ Bind(&null_arg);
514 }
515 
VerifyObject(ManagedRegister,bool)516 void X86JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
517   // TODO: not validating references
518 }
519 
VerifyObject(FrameOffset,bool)520 void X86JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
521   // TODO: not validating references
522 }
523 
Jump(ManagedRegister mbase,Offset offset)524 void X86JNIMacroAssembler::Jump(ManagedRegister mbase, Offset offset) {
525   X86ManagedRegister base = mbase.AsX86();
526   CHECK(base.IsCpuRegister());
527   __ jmp(Address(base.AsCpuRegister(), offset.Int32Value()));
528 }
529 
Call(ManagedRegister mbase,Offset offset)530 void X86JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset) {
531   X86ManagedRegister base = mbase.AsX86();
532   CHECK(base.IsCpuRegister());
533   __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
534   // TODO: place reference map on call
535 }
536 
Call(FrameOffset base,Offset offset)537 void X86JNIMacroAssembler::Call(FrameOffset base, Offset offset) {
538   Register scratch = GetScratchRegister();
539   __ movl(scratch, Address(ESP, base));
540   __ call(Address(scratch, offset));
541 }
542 
CallFromThread(ThreadOffset32 offset)543 void X86JNIMacroAssembler::CallFromThread(ThreadOffset32 offset) {
544   __ fs()->call(Address::Absolute(offset));
545 }
546 
GetCurrentThread(ManagedRegister dest)547 void X86JNIMacroAssembler::GetCurrentThread(ManagedRegister dest) {
548   __ fs()->movl(dest.AsX86().AsCpuRegister(),
549                 Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
550 }
551 
GetCurrentThread(FrameOffset offset)552 void X86JNIMacroAssembler::GetCurrentThread(FrameOffset offset) {
553   Register scratch = GetScratchRegister();
554   __ fs()->movl(scratch, Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
555   __ movl(Address(ESP, offset), scratch);
556 }
557 
ExceptionPoll(size_t stack_adjust)558 void X86JNIMacroAssembler::ExceptionPoll(size_t stack_adjust) {
559   X86ExceptionSlowPath* slow = new (__ GetAllocator()) X86ExceptionSlowPath(stack_adjust);
560   __ GetBuffer()->EnqueueSlowPath(slow);
561   __ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
562   __ j(kNotEqual, slow->Entry());
563 }
564 
CreateLabel()565 std::unique_ptr<JNIMacroLabel> X86JNIMacroAssembler::CreateLabel() {
566   return std::unique_ptr<JNIMacroLabel>(new X86JNIMacroLabel());
567 }
568 
Jump(JNIMacroLabel * label)569 void X86JNIMacroAssembler::Jump(JNIMacroLabel* label) {
570   CHECK(label != nullptr);
571   __ jmp(X86JNIMacroLabel::Cast(label)->AsX86());
572 }
573 
TestGcMarking(JNIMacroLabel * label,JNIMacroUnaryCondition cond)574 void X86JNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) {
575   CHECK(label != nullptr);
576 
577   art::x86::Condition x86_cond;
578   switch (cond) {
579     case JNIMacroUnaryCondition::kZero:
580       x86_cond = art::x86::kZero;
581       break;
582     case JNIMacroUnaryCondition::kNotZero:
583       x86_cond = art::x86::kNotZero;
584       break;
585     default:
586       LOG(FATAL) << "Not implemented condition: " << static_cast<int>(cond);
587       UNREACHABLE();
588   }
589 
590   // CMP self->tls32_.is_gc_marking, 0
591   // Jcc <Offset>
592   DCHECK_EQ(Thread::IsGcMarkingSize(), 4u);
593   __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86PointerSize>()), Immediate(0));
594   __ j(x86_cond, X86JNIMacroLabel::Cast(label)->AsX86());
595 }
596 
Bind(JNIMacroLabel * label)597 void X86JNIMacroAssembler::Bind(JNIMacroLabel* label) {
598   CHECK(label != nullptr);
599   __ Bind(X86JNIMacroLabel::Cast(label)->AsX86());
600 }
601 
602 #undef __
603 
Emit(Assembler * sasm)604 void X86ExceptionSlowPath::Emit(Assembler *sasm) {
605   X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
606 #define __ sp_asm->
607   __ Bind(&entry_);
608   // Note: the return value is dead
609   if (stack_adjust_ != 0) {  // Fix up the frame.
610     DecreaseFrameSizeImpl(sp_asm, stack_adjust_);
611   }
612   // Pass exception as argument in EAX
613   __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
614   __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
615   // this call should never return
616   __ int3();
617 #undef __
618 }
619 
620 }  // namespace x86
621 }  // namespace art
622