1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <cstdio>
18 
19 #include "art_field-inl.h"
20 #include "art_method-inl.h"
21 #include "base/callee_save_type.h"
22 #include "base/enums.h"
23 #include "class_linker-inl.h"
24 #include "class_root-inl.h"
25 #include "common_runtime_test.h"
26 #include "entrypoints/quick/quick_entrypoints_enum.h"
27 #include "imt_conflict_table.h"
28 #include "jni/jni_internal.h"
29 #include "linear_alloc.h"
30 #include "mirror/class-alloc-inl.h"
31 #include "mirror/string-inl.h"
32 #include "mirror/object_array-alloc-inl.h"
33 #include "scoped_thread_state_change-inl.h"
34 
35 namespace art {
36 
37 
38 class StubTest : public CommonRuntimeTest {
39  protected:
40   // We need callee-save methods set up in the Runtime for exceptions.
SetUp()41   void SetUp() override {
42     // Do the normal setup.
43     CommonRuntimeTest::SetUp();
44 
45     {
46       // Create callee-save methods
47       ScopedObjectAccess soa(Thread::Current());
48       runtime_->SetInstructionSet(kRuntimeISA);
49       for (uint32_t i = 0; i < static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType); ++i) {
50         CalleeSaveType type = CalleeSaveType(i);
51         if (!runtime_->HasCalleeSaveMethod(type)) {
52           runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
53         }
54       }
55     }
56   }
57 
SetUpRuntimeOptions(RuntimeOptions * options)58   void SetUpRuntimeOptions(RuntimeOptions *options) override {
59     // Use a smaller heap
60     for (std::pair<std::string, const void*>& pair : *options) {
61       if (pair.first.find("-Xmx") == 0) {
62         pair.first = "-Xmx4M";  // Smallest we can go.
63       }
64     }
65     options->push_back(std::make_pair("-Xint", nullptr));
66   }
67 
68   // Helper function needed since TEST_F makes a new class.
GetTlsPtr(Thread * self)69   Thread::tls_ptr_sized_values* GetTlsPtr(Thread* self) {
70     return &self->tlsPtr_;
71   }
72 
73  public:
Invoke3(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self)74   size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
75     return Invoke3WithReferrer(arg0, arg1, arg2, code, self, nullptr);
76   }
77 
78   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrer(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer)79   size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
80                              ArtMethod* referrer) {
81     return Invoke3WithReferrerAndHidden(arg0, arg1, arg2, code, self, referrer, 0);
82   }
83 
84   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrerAndHidden(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer,size_t hidden)85   size_t Invoke3WithReferrerAndHidden(size_t arg0, size_t arg1, size_t arg2, uintptr_t code,
86                                       Thread* self, ArtMethod* referrer, size_t hidden) {
87     // Push a transition back into managed code onto the linked list in thread.
88     ManagedStack fragment;
89     self->PushManagedStackFragment(&fragment);
90 
91     size_t result;
92     size_t fpr_result = 0;
93 #if defined(__i386__)
94     // TODO: Set the thread?
95 #define PUSH(reg) "push " # reg "\n\t .cfi_adjust_cfa_offset 4\n\t"
96 #define POP(reg) "pop " # reg "\n\t .cfi_adjust_cfa_offset -4\n\t"
97     __asm__ __volatile__(
98         "movd %[hidden], %%xmm7\n\t"  // This is a memory op, so do this early. If it is off of
99                                       // esp, then we won't be able to access it after spilling.
100 
101         // Spill 6 registers.
102         PUSH(%%ebx)
103         PUSH(%%ecx)
104         PUSH(%%edx)
105         PUSH(%%esi)
106         PUSH(%%edi)
107         PUSH(%%ebp)
108 
109         // Store the inputs to the stack, but keep the referrer up top, less work.
110         PUSH(%[referrer])           // Align stack.
111         PUSH(%[referrer])           // Store referrer
112 
113         PUSH(%[arg0])
114         PUSH(%[arg1])
115         PUSH(%[arg2])
116         PUSH(%[code])
117         // Now read them back into the required registers.
118         POP(%%edi)
119         POP(%%edx)
120         POP(%%ecx)
121         POP(%%eax)
122         // Call is prepared now.
123 
124         "call *%%edi\n\t"           // Call the stub
125         "addl $8, %%esp\n\t"        // Pop referrer and padding.
126         ".cfi_adjust_cfa_offset -8\n\t"
127 
128         // Restore 6 registers.
129         POP(%%ebp)
130         POP(%%edi)
131         POP(%%esi)
132         POP(%%edx)
133         POP(%%ecx)
134         POP(%%ebx)
135 
136         : "=a" (result)
137           // Use the result from eax
138         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
139           [referrer]"r"(referrer), [hidden]"m"(hidden)
140           // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
141         : "memory", "xmm7");  // clobber.
142 #undef PUSH
143 #undef POP
144 #elif defined(__arm__)
145     __asm__ __volatile__(
146         "push {r1-r12, lr}\n\t"     // Save state, 13*4B = 52B
147         ".cfi_adjust_cfa_offset 52\n\t"
148         "push {r9}\n\t"
149         ".cfi_adjust_cfa_offset 4\n\t"
150         "mov r9, %[referrer]\n\n"
151         "str r9, [sp, #-8]!\n\t"   // Push referrer, +8B padding so 16B aligned
152         ".cfi_adjust_cfa_offset 8\n\t"
153         "ldr r9, [sp, #8]\n\t"
154 
155         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
156         "sub sp, sp, #24\n\t"
157         "str %[arg0], [sp]\n\t"
158         "str %[arg1], [sp, #4]\n\t"
159         "str %[arg2], [sp, #8]\n\t"
160         "str %[code], [sp, #12]\n\t"
161         "str %[self], [sp, #16]\n\t"
162         "str %[hidden], [sp, #20]\n\t"
163         "ldr r0, [sp]\n\t"
164         "ldr r1, [sp, #4]\n\t"
165         "ldr r2, [sp, #8]\n\t"
166         "ldr r3, [sp, #12]\n\t"
167         "ldr r9, [sp, #16]\n\t"
168         "ldr r12, [sp, #20]\n\t"
169         "add sp, sp, #24\n\t"
170 
171         "blx r3\n\t"                // Call the stub
172         "add sp, sp, #12\n\t"       // Pop null and padding
173         ".cfi_adjust_cfa_offset -12\n\t"
174         "pop {r1-r12, lr}\n\t"      // Restore state
175         ".cfi_adjust_cfa_offset -52\n\t"
176         "mov %[result], r0\n\t"     // Save the result
177         : [result] "=r" (result)
178           // Use the result from r0
179         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
180           [referrer] "r"(referrer), [hidden] "r"(hidden)
181         : "r0", "memory");  // clobber.
182 #elif defined(__aarch64__)
183     __asm__ __volatile__(
184         // Spill x0-x7 which we say we don't clobber. May contain args.
185         "sub sp, sp, #80\n\t"
186         ".cfi_adjust_cfa_offset 80\n\t"
187         "stp x0, x1, [sp]\n\t"
188         "stp x2, x3, [sp, #16]\n\t"
189         "stp x4, x5, [sp, #32]\n\t"
190         "stp x6, x7, [sp, #48]\n\t"
191         // To be extra defensive, store x20,x21. We do this because some of the stubs might make a
192         // transition into the runtime via the blr instruction below and *not* save x20.
193         "stp x20, x21, [sp, #64]\n\t"
194 
195         "sub sp, sp, #16\n\t"          // Reserve stack space, 16B aligned
196         ".cfi_adjust_cfa_offset 16\n\t"
197         "str %[referrer], [sp]\n\t"    // referrer
198 
199         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
200         "sub sp, sp, #48\n\t"
201         ".cfi_adjust_cfa_offset 48\n\t"
202         // All things are "r" constraints, so direct str/stp should work.
203         "stp %[arg0], %[arg1], [sp]\n\t"
204         "stp %[arg2], %[code], [sp, #16]\n\t"
205         "stp %[self], %[hidden], [sp, #32]\n\t"
206 
207         // Now we definitely have x0-x3 free, use it to garble d8 - d15
208         "movk x0, #0xfad0\n\t"
209         "movk x0, #0xebad, lsl #16\n\t"
210         "movk x0, #0xfad0, lsl #32\n\t"
211         "movk x0, #0xebad, lsl #48\n\t"
212         "fmov d8, x0\n\t"
213         "add x0, x0, 1\n\t"
214         "fmov d9, x0\n\t"
215         "add x0, x0, 1\n\t"
216         "fmov d10, x0\n\t"
217         "add x0, x0, 1\n\t"
218         "fmov d11, x0\n\t"
219         "add x0, x0, 1\n\t"
220         "fmov d12, x0\n\t"
221         "add x0, x0, 1\n\t"
222         "fmov d13, x0\n\t"
223         "add x0, x0, 1\n\t"
224         "fmov d14, x0\n\t"
225         "add x0, x0, 1\n\t"
226         "fmov d15, x0\n\t"
227 
228         // Load call params into the right registers.
229         "ldp x0, x1, [sp]\n\t"
230         "ldp x2, x3, [sp, #16]\n\t"
231         "ldp x19, x17, [sp, #32]\n\t"
232         "add sp, sp, #48\n\t"
233         ".cfi_adjust_cfa_offset -48\n\t"
234 
235         "blr x3\n\t"              // Call the stub
236         "mov x8, x0\n\t"          // Store result
237         "add sp, sp, #16\n\t"     // Drop the quick "frame"
238         ".cfi_adjust_cfa_offset -16\n\t"
239 
240         // Test d8 - d15. We can use x1 and x2.
241         "movk x1, #0xfad0\n\t"
242         "movk x1, #0xebad, lsl #16\n\t"
243         "movk x1, #0xfad0, lsl #32\n\t"
244         "movk x1, #0xebad, lsl #48\n\t"
245         "fmov x2, d8\n\t"
246         "cmp x1, x2\n\t"
247         "b.ne 1f\n\t"
248         "add x1, x1, 1\n\t"
249 
250         "fmov x2, d9\n\t"
251         "cmp x1, x2\n\t"
252         "b.ne 1f\n\t"
253         "add x1, x1, 1\n\t"
254 
255         "fmov x2, d10\n\t"
256         "cmp x1, x2\n\t"
257         "b.ne 1f\n\t"
258         "add x1, x1, 1\n\t"
259 
260         "fmov x2, d11\n\t"
261         "cmp x1, x2\n\t"
262         "b.ne 1f\n\t"
263         "add x1, x1, 1\n\t"
264 
265         "fmov x2, d12\n\t"
266         "cmp x1, x2\n\t"
267         "b.ne 1f\n\t"
268         "add x1, x1, 1\n\t"
269 
270         "fmov x2, d13\n\t"
271         "cmp x1, x2\n\t"
272         "b.ne 1f\n\t"
273         "add x1, x1, 1\n\t"
274 
275         "fmov x2, d14\n\t"
276         "cmp x1, x2\n\t"
277         "b.ne 1f\n\t"
278         "add x1, x1, 1\n\t"
279 
280         "fmov x2, d15\n\t"
281         "cmp x1, x2\n\t"
282         "b.ne 1f\n\t"
283 
284         "mov x9, #0\n\t"              // Use x9 as flag, in clobber list
285 
286         // Finish up.
287         "2:\n\t"
288         "ldp x0, x1, [sp]\n\t"        // Restore stuff not named clobbered, may contain fpr_result
289         "ldp x2, x3, [sp, #16]\n\t"
290         "ldp x4, x5, [sp, #32]\n\t"
291         "ldp x6, x7, [sp, #48]\n\t"
292         "ldp x20, x21, [sp, #64]\n\t"
293         "add sp, sp, #80\n\t"         // Free stack space, now sp as on entry
294         ".cfi_adjust_cfa_offset -80\n\t"
295 
296         "str x9, %[fpr_result]\n\t"   // Store the FPR comparison result
297         "mov %[result], x8\n\t"              // Store the call result
298 
299         "b 3f\n\t"                     // Goto end
300 
301         // Failed fpr verification.
302         "1:\n\t"
303         "mov x9, #1\n\t"
304         "b 2b\n\t"                     // Goto finish-up
305 
306         // End
307         "3:\n\t"
308         : [result] "=r" (result)
309           // Use the result from r0
310         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
311           [referrer] "r"(referrer), [hidden] "r"(hidden), [fpr_result] "m" (fpr_result)
312           // X18 is a reserved register, cannot be clobbered.
313           // Leave one register unclobbered, which is needed for compiling with
314           // -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
315           // which means we should unclobber one of the callee-saved registers that are unused.
316           // Here we use x20.
317           // http://b/72613441, Clang 7.0 asks for one more register, so we do not reserve x21.
318         : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19",
319           "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
320           "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
321           "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
322           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
323           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
324           "memory");
325 #elif defined(__x86_64__) && !defined(__APPLE__)
326 #define PUSH(reg) "pushq " # reg "\n\t .cfi_adjust_cfa_offset 8\n\t"
327 #define POP(reg) "popq " # reg "\n\t .cfi_adjust_cfa_offset -8\n\t"
328     // Note: Uses the native convention. We do a callee-save regimen by manually spilling and
329     //       restoring almost all registers.
330     // TODO: Set the thread?
331     __asm__ __volatile__(
332         // Spill almost everything (except rax, rsp). 14 registers.
333         PUSH(%%rbx)
334         PUSH(%%rcx)
335         PUSH(%%rdx)
336         PUSH(%%rsi)
337         PUSH(%%rdi)
338         PUSH(%%rbp)
339         PUSH(%%r8)
340         PUSH(%%r9)
341         PUSH(%%r10)
342         PUSH(%%r11)
343         PUSH(%%r12)
344         PUSH(%%r13)
345         PUSH(%%r14)
346         PUSH(%%r15)
347 
348         PUSH(%[referrer])              // Push referrer & 16B alignment padding
349         PUSH(%[referrer])
350 
351         // Now juggle the input registers.
352         PUSH(%[arg0])
353         PUSH(%[arg1])
354         PUSH(%[arg2])
355         PUSH(%[hidden])
356         PUSH(%[code])
357         POP(%%r8)
358         POP(%%rax)
359         POP(%%rdx)
360         POP(%%rsi)
361         POP(%%rdi)
362 
363         "call *%%r8\n\t"                  // Call the stub
364         "addq $16, %%rsp\n\t"             // Pop null and padding
365         ".cfi_adjust_cfa_offset -16\n\t"
366 
367         POP(%%r15)
368         POP(%%r14)
369         POP(%%r13)
370         POP(%%r12)
371         POP(%%r11)
372         POP(%%r10)
373         POP(%%r9)
374         POP(%%r8)
375         POP(%%rbp)
376         POP(%%rdi)
377         POP(%%rsi)
378         POP(%%rdx)
379         POP(%%rcx)
380         POP(%%rbx)
381 
382         : "=a" (result)
383         // Use the result from rax
384         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
385           [referrer] "r"(referrer), [hidden] "r"(hidden)
386         // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into some other
387         // register. We can't use "b" (rbx), as ASAN uses this for the frame pointer.
388         : "memory");  // We spill and restore (almost) all registers, so only mention memory here.
389 #undef PUSH
390 #undef POP
391 #else
392     UNUSED(arg0, arg1, arg2, code, referrer, hidden);
393     LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
394     result = 0;
395 #endif
396     // Pop transition.
397     self->PopManagedStackFragment(fragment);
398 
399     fp_result = fpr_result;
400     EXPECT_EQ(0U, fp_result);
401 
402     return result;
403   }
404 
GetEntrypoint(Thread * self,QuickEntrypointEnum entrypoint)405   static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
406     int32_t offset;
407     offset = GetThreadOffset<kRuntimePointerSize>(entrypoint).Int32Value();
408     return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
409   }
410 
411  protected:
412   size_t fp_result;
413 };
414 
415 
TEST_F(StubTest,Memcpy)416 TEST_F(StubTest, Memcpy) {
417 #if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
418   Thread* self = Thread::Current();
419 
420   uint32_t orig[20];
421   uint32_t trg[20];
422   for (size_t i = 0; i < 20; ++i) {
423     orig[i] = i;
424     trg[i] = 0;
425   }
426 
427   Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
428           10 * sizeof(uint32_t), StubTest::GetEntrypoint(self, kQuickMemcpy), self);
429 
430   EXPECT_EQ(orig[0], trg[0]);
431 
432   for (size_t i = 1; i < 4; ++i) {
433     EXPECT_NE(orig[i], trg[i]);
434   }
435 
436   for (size_t i = 4; i < 14; ++i) {
437     EXPECT_EQ(orig[i], trg[i]);
438   }
439 
440   for (size_t i = 14; i < 20; ++i) {
441     EXPECT_NE(orig[i], trg[i]);
442   }
443 
444   // TODO: Test overlapping?
445 
446 #else
447   LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
448   // Force-print to std::cout so it's also outside the logcat.
449   std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
450 #endif
451 }
452 
TEST_F(StubTest,LockObject)453 TEST_F(StubTest, LockObject) {
454 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
455     (defined(__x86_64__) && !defined(__APPLE__))
456   static constexpr size_t kThinLockLoops = 100;
457 
458   Thread* self = Thread::Current();
459 
460   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
461 
462   // Create an object
463   ScopedObjectAccess soa(self);
464   // garbage is created during ClassLinker::Init
465 
466   StackHandleScope<2> hs(soa.Self());
467   Handle<mirror::String> obj(
468       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
469   LockWord lock = obj->GetLockWord(false);
470   LockWord::LockState old_state = lock.GetState();
471   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
472 
473   Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
474 
475   LockWord lock_after = obj->GetLockWord(false);
476   LockWord::LockState new_state = lock_after.GetState();
477   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
478   EXPECT_EQ(lock_after.ThinLockCount(), 0U);  // Thin lock starts count at zero
479 
480   for (size_t i = 1; i < kThinLockLoops; ++i) {
481     Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
482 
483     // Check we're at lock count i
484 
485     LockWord l_inc = obj->GetLockWord(false);
486     LockWord::LockState l_inc_state = l_inc.GetState();
487     EXPECT_EQ(LockWord::LockState::kThinLocked, l_inc_state);
488     EXPECT_EQ(l_inc.ThinLockCount(), i);
489   }
490 
491   // Force a fat lock by running identity hashcode to fill up lock word.
492   Handle<mirror::String> obj2(hs.NewHandle(
493       mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
494 
495   obj2->IdentityHashCode();
496 
497   Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U, art_quick_lock_object, self);
498 
499   LockWord lock_after2 = obj2->GetLockWord(false);
500   LockWord::LockState new_state2 = lock_after2.GetState();
501   EXPECT_EQ(LockWord::LockState::kFatLocked, new_state2);
502   EXPECT_NE(lock_after2.FatLockMonitor(), static_cast<Monitor*>(nullptr));
503 
504   // Test done.
505 #else
506   LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
507   // Force-print to std::cout so it's also outside the logcat.
508   std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
509 #endif
510 }
511 
512 
513 class RandGen {
514  public:
RandGen(uint32_t seed)515   explicit RandGen(uint32_t seed) : val_(seed) {}
516 
next()517   uint32_t next() {
518     val_ = val_ * 48271 % 2147483647 + 13;
519     return val_;
520   }
521 
522   uint32_t val_;
523 };
524 
525 
526 // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
TestUnlockObject(StubTest * test)527 static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
528 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
529     (defined(__x86_64__) && !defined(__APPLE__))
530   static constexpr size_t kThinLockLoops = 100;
531 
532   Thread* self = Thread::Current();
533 
534   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
535   const uintptr_t art_quick_unlock_object = StubTest::GetEntrypoint(self, kQuickUnlockObject);
536   // Create an object
537   ScopedObjectAccess soa(self);
538   // garbage is created during ClassLinker::Init
539   static constexpr size_t kNumberOfLocks = 10;  // Number of objects = lock
540   StackHandleScope<kNumberOfLocks + 1> hs(self);
541   Handle<mirror::String> obj(
542       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
543   LockWord lock = obj->GetLockWord(false);
544   LockWord::LockState old_state = lock.GetState();
545   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
546 
547   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
548   // This should be an illegal monitor state.
549   EXPECT_TRUE(self->IsExceptionPending());
550   self->ClearException();
551 
552   LockWord lock_after = obj->GetLockWord(false);
553   LockWord::LockState new_state = lock_after.GetState();
554   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
555 
556   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
557 
558   LockWord lock_after2 = obj->GetLockWord(false);
559   LockWord::LockState new_state2 = lock_after2.GetState();
560   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
561 
562   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
563 
564   LockWord lock_after3 = obj->GetLockWord(false);
565   LockWord::LockState new_state3 = lock_after3.GetState();
566   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state3);
567 
568   // Stress test:
569   // Keep a number of objects and their locks in flight. Randomly lock or unlock one of them in
570   // each step.
571 
572   RandGen r(0x1234);
573 
574   constexpr size_t kIterations = 10000;  // Number of iterations
575   constexpr size_t kMoveToFat = 1000;     // Chance of 1:kMoveFat to make a lock fat.
576 
577   size_t counts[kNumberOfLocks];
578   bool fat[kNumberOfLocks];  // Whether a lock should be thin or fat.
579   Handle<mirror::String> objects[kNumberOfLocks];
580 
581   // Initialize = allocate.
582   for (size_t i = 0; i < kNumberOfLocks; ++i) {
583     counts[i] = 0;
584     fat[i] = false;
585     objects[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
586   }
587 
588   for (size_t i = 0; i < kIterations; ++i) {
589     // Select which lock to update.
590     size_t index = r.next() % kNumberOfLocks;
591 
592     // Make lock fat?
593     if (!fat[index] && (r.next() % kMoveToFat == 0)) {
594       fat[index] = true;
595       objects[index]->IdentityHashCode();
596 
597       LockWord lock_iter = objects[index]->GetLockWord(false);
598       LockWord::LockState iter_state = lock_iter.GetState();
599       if (counts[index] == 0) {
600         EXPECT_EQ(LockWord::LockState::kHashCode, iter_state);
601       } else {
602         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state);
603       }
604     } else {
605       bool take_lock;  // Whether to lock or unlock in this step.
606       if (counts[index] == 0) {
607         take_lock = true;
608       } else if (counts[index] == kThinLockLoops) {
609         take_lock = false;
610       } else {
611         // Randomly.
612         take_lock = r.next() % 2 == 0;
613       }
614 
615       if (take_lock) {
616         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
617                       self);
618         counts[index]++;
619       } else {
620         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
621                       art_quick_unlock_object, self);
622         counts[index]--;
623       }
624 
625       EXPECT_FALSE(self->IsExceptionPending());
626 
627       // Check the new state.
628       LockWord lock_iter = objects[index]->GetLockWord(true);
629       LockWord::LockState iter_state = lock_iter.GetState();
630       if (fat[index]) {
631         // Abuse MonitorInfo.
632         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state) << index;
633         MonitorInfo info(objects[index].Get());
634         EXPECT_EQ(counts[index], info.entry_count_) << index;
635       } else {
636         if (counts[index] > 0) {
637           EXPECT_EQ(LockWord::LockState::kThinLocked, iter_state);
638           EXPECT_EQ(counts[index] - 1, lock_iter.ThinLockCount());
639         } else {
640           EXPECT_EQ(LockWord::LockState::kUnlocked, iter_state);
641         }
642       }
643     }
644   }
645 
646   // Unlock the remaining count times and then check it's unlocked. Then deallocate.
647   // Go reverse order to correctly handle Handles.
648   for (size_t i = 0; i < kNumberOfLocks; ++i) {
649     size_t index = kNumberOfLocks - 1 - i;
650     size_t count = counts[index];
651     while (count > 0) {
652       test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_unlock_object,
653                     self);
654       count--;
655     }
656 
657     LockWord lock_after4 = objects[index]->GetLockWord(false);
658     LockWord::LockState new_state4 = lock_after4.GetState();
659     EXPECT_TRUE(LockWord::LockState::kUnlocked == new_state4
660                 || LockWord::LockState::kFatLocked == new_state4);
661   }
662 
663   // Test done.
664 #else
665   UNUSED(test);
666   LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
667   // Force-print to std::cout so it's also outside the logcat.
668   std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
669 #endif
670 }
671 
TEST_F(StubTest,UnlockObject)672 TEST_F(StubTest, UnlockObject) {
673   // This will lead to monitor error messages in the log.
674   ScopedLogSeverity sls(LogSeverity::FATAL);
675 
676   TestUnlockObject(this);
677 }
678 
679 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
680     (defined(__x86_64__) && !defined(__APPLE__))
681 extern "C" void art_quick_check_instance_of(void);
682 #endif
683 
TEST_F(StubTest,CheckCast)684 TEST_F(StubTest, CheckCast) {
685 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
686     (defined(__x86_64__) && !defined(__APPLE__))
687   Thread* self = Thread::Current();
688 
689   const uintptr_t art_quick_check_instance_of =
690       StubTest::GetEntrypoint(self, kQuickCheckInstanceOf);
691 
692   // Find some classes.
693   ScopedObjectAccess soa(self);
694   // garbage is created during ClassLinker::Init
695 
696   VariableSizedHandleScope hs(soa.Self());
697   Handle<mirror::Class> klass_obj(
698       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
699   Handle<mirror::Class> klass_str(
700       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;")));
701   Handle<mirror::Class> klass_list(
702       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/List;")));
703   Handle<mirror::Class> klass_cloneable(
704         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;")));
705   Handle<mirror::Class> klass_array_list(
706       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/ArrayList;")));
707   Handle<mirror::Object> obj(hs.NewHandle(klass_obj->AllocObject(soa.Self())));
708   Handle<mirror::String> string(hs.NewHandle(
709       mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABCD")));
710   Handle<mirror::Object> array_list(hs.NewHandle(klass_array_list->AllocObject(soa.Self())));
711 
712   EXPECT_FALSE(self->IsExceptionPending());
713 
714   Invoke3(reinterpret_cast<size_t>(obj.Get()),
715           reinterpret_cast<size_t>(klass_obj.Get()),
716           0U,
717           art_quick_check_instance_of,
718           self);
719   EXPECT_FALSE(self->IsExceptionPending());
720 
721   // Expected true: Test string instance of java.lang.String.
722   Invoke3(reinterpret_cast<size_t>(string.Get()),
723           reinterpret_cast<size_t>(klass_str.Get()),
724           0U,
725           art_quick_check_instance_of,
726           self);
727   EXPECT_FALSE(self->IsExceptionPending());
728 
729   // Expected true: Test string instance of java.lang.Object.
730   Invoke3(reinterpret_cast<size_t>(string.Get()),
731           reinterpret_cast<size_t>(klass_obj.Get()),
732           0U,
733           art_quick_check_instance_of,
734           self);
735   EXPECT_FALSE(self->IsExceptionPending());
736 
737   // Expected false: Test object instance of java.lang.String.
738   Invoke3(reinterpret_cast<size_t>(obj.Get()),
739           reinterpret_cast<size_t>(klass_str.Get()),
740           0U,
741           art_quick_check_instance_of,
742           self);
743   EXPECT_TRUE(self->IsExceptionPending());
744   self->ClearException();
745 
746   Invoke3(reinterpret_cast<size_t>(array_list.Get()),
747           reinterpret_cast<size_t>(klass_list.Get()),
748           0U,
749           art_quick_check_instance_of,
750           self);
751   EXPECT_FALSE(self->IsExceptionPending());
752 
753   Invoke3(reinterpret_cast<size_t>(array_list.Get()),
754           reinterpret_cast<size_t>(klass_cloneable.Get()),
755           0U,
756           art_quick_check_instance_of,
757           self);
758   EXPECT_FALSE(self->IsExceptionPending());
759 
760   Invoke3(reinterpret_cast<size_t>(string.Get()),
761           reinterpret_cast<size_t>(klass_array_list.Get()),
762           0U,
763           art_quick_check_instance_of,
764           self);
765   EXPECT_TRUE(self->IsExceptionPending());
766   self->ClearException();
767 
768   Invoke3(reinterpret_cast<size_t>(string.Get()),
769           reinterpret_cast<size_t>(klass_cloneable.Get()),
770           0U,
771           art_quick_check_instance_of,
772           self);
773   EXPECT_TRUE(self->IsExceptionPending());
774   self->ClearException();
775 
776 #else
777   LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
778   // Force-print to std::cout so it's also outside the logcat.
779   std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
780 #endif
781 }
782 
TEST_F(StubTest,AllocObject)783 TEST_F(StubTest, AllocObject) {
784 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
785     (defined(__x86_64__) && !defined(__APPLE__))
786   // This will lead to OOM  error messages in the log.
787   ScopedLogSeverity sls(LogSeverity::FATAL);
788 
789   // TODO: Check the "Unresolved" allocation stubs
790 
791   Thread* self = Thread::Current();
792   // Create an object
793   ScopedObjectAccess soa(self);
794   // garbage is created during ClassLinker::Init
795 
796   StackHandleScope<2> hs(soa.Self());
797   Handle<mirror::Class> c(
798       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
799 
800   // Play with it...
801 
802   EXPECT_FALSE(self->IsExceptionPending());
803   {
804     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
805                             StubTest::GetEntrypoint(self, kQuickAllocObjectWithChecks),
806                             self);
807 
808     EXPECT_FALSE(self->IsExceptionPending());
809     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
810     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
811     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
812     VerifyObject(obj);
813   }
814 
815   {
816     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
817                             StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
818                             self);
819 
820     EXPECT_FALSE(self->IsExceptionPending());
821     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
822     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
823     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
824     VerifyObject(obj);
825   }
826 
827   {
828     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
829                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
830                             self);
831 
832     EXPECT_FALSE(self->IsExceptionPending());
833     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
834     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
835     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
836     VerifyObject(obj);
837   }
838 
839   // Failure tests.
840 
841   // Out-of-memory.
842   {
843     Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
844 
845     // Array helps to fill memory faster.
846     Handle<mirror::Class> ca(
847         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
848 
849     // Use arbitrary large amount for now.
850     static const size_t kMaxHandles = 1000000;
851     std::unique_ptr<StackHandleScope<kMaxHandles>> hsp(new StackHandleScope<kMaxHandles>(self));
852 
853     std::vector<Handle<mirror::Object>> handles;
854     // Start allocating with 128K
855     size_t length = 128 * KB / 4;
856     while (length > 10) {
857       Handle<mirror::Object> h(hsp->NewHandle<mirror::Object>(
858           mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), length / 4)));
859       if (self->IsExceptionPending() || h == nullptr) {
860         self->ClearException();
861 
862         // Try a smaller length
863         length = length / 8;
864         // Use at most half the reported free space.
865         size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
866         if (length * 8 > mem) {
867           length = mem / 8;
868         }
869       } else {
870         handles.push_back(h);
871       }
872     }
873     LOG(INFO) << "Used " << handles.size() << " arrays to fill space.";
874 
875     // Allocate simple objects till it fails.
876     while (!self->IsExceptionPending()) {
877       Handle<mirror::Object> h = hsp->NewHandle(c->AllocObject(soa.Self()));
878       if (!self->IsExceptionPending() && h != nullptr) {
879         handles.push_back(h);
880       }
881     }
882     self->ClearException();
883 
884     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
885                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
886                             self);
887     EXPECT_TRUE(self->IsExceptionPending());
888     self->ClearException();
889     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
890   }
891 
892   // Tests done.
893 #else
894   LOG(INFO) << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA;
895   // Force-print to std::cout so it's also outside the logcat.
896   std::cout << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA << std::endl;
897 #endif
898 }
899 
TEST_F(StubTest,AllocObjectArray)900 TEST_F(StubTest, AllocObjectArray) {
901 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
902     (defined(__x86_64__) && !defined(__APPLE__))
903   // TODO: Check the "Unresolved" allocation stubs
904 
905   // This will lead to OOM  error messages in the log.
906   ScopedLogSeverity sls(LogSeverity::FATAL);
907 
908   Thread* self = Thread::Current();
909   // Create an object
910   ScopedObjectAccess soa(self);
911   // garbage is created during ClassLinker::Init
912 
913   StackHandleScope<1> hs(self);
914   Handle<mirror::Class> c(
915       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
916 
917   // Play with it...
918 
919   EXPECT_FALSE(self->IsExceptionPending());
920 
921   {
922     // We can use null in the second argument as we do not need a method here (not used in
923     // resolved/initialized cases)
924     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
925                             reinterpret_cast<size_t>(nullptr),
926                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
927                             self);
928     EXPECT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
929     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
930     ObjPtr<mirror::Object> obj = reinterpret_cast<mirror::Object*>(result);
931     EXPECT_TRUE(obj->IsArrayInstance());
932     EXPECT_TRUE(obj->IsObjectArray());
933     EXPECT_OBJ_PTR_EQ(c.Get(), obj->GetClass());
934     VerifyObject(obj);
935     ObjPtr<mirror::Array> array = reinterpret_cast<mirror::Array*>(result);
936     EXPECT_EQ(array->GetLength(), 10);
937   }
938 
939   // Failure tests.
940 
941   // Out-of-memory.
942   {
943     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()),
944                             GB,  // that should fail...
945                             reinterpret_cast<size_t>(nullptr),
946                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
947                             self);
948 
949     EXPECT_TRUE(self->IsExceptionPending());
950     self->ClearException();
951     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
952   }
953 
954   // Tests done.
955 #else
956   LOG(INFO) << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA;
957   // Force-print to std::cout so it's also outside the logcat.
958   std::cout << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA << std::endl;
959 #endif
960 }
961 
962 
TEST_F(StubTest,StringCompareTo)963 TEST_F(StubTest, StringCompareTo) {
964   TEST_DISABLED_FOR_STRING_COMPRESSION();
965   // There is no StringCompareTo runtime entrypoint for __arm__ or __aarch64__.
966 #if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
967   // TODO: Check the "Unresolved" allocation stubs
968 
969   Thread* self = Thread::Current();
970 
971   const uintptr_t art_quick_string_compareto = StubTest::GetEntrypoint(self, kQuickStringCompareTo);
972 
973   ScopedObjectAccess soa(self);
974   // garbage is created during ClassLinker::Init
975 
976   // Create some strings
977   // Use array so we can index into it and use a matrix for expected results
978   // Setup: The first half is standard. The second half uses a non-zero offset.
979   // TODO: Shared backing arrays.
980   const char* c[] = { "", "", "a", "aa", "ab",
981       "aacaacaacaacaacaac",  // This one's under the default limit to go to __memcmp16.
982       "aacaacaacaacaacaacaacaacaacaacaacaac",     // This one's over.
983       "aacaacaacaacaacaacaacaacaacaacaacaaca" };  // As is this one. We need a separate one to
984                                                   // defeat object-equal optimizations.
985   static constexpr size_t kStringCount = arraysize(c);
986 
987   StackHandleScope<kStringCount> hs(self);
988   Handle<mirror::String> s[kStringCount];
989 
990   for (size_t i = 0; i < kStringCount; ++i) {
991     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i]));
992   }
993 
994   // TODO: wide characters
995 
996   // Matrix of expectations. First component is first parameter. Note we only check against the
997   // sign, not the value. As we are testing random offsets, we need to compute this and need to
998   // rely on String::CompareTo being correct.
999   int32_t expected[kStringCount][kStringCount];
1000   for (size_t x = 0; x < kStringCount; ++x) {
1001     for (size_t y = 0; y < kStringCount; ++y) {
1002       expected[x][y] = s[x]->CompareTo(s[y].Get());
1003     }
1004   }
1005 
1006   // Play with it...
1007 
1008   for (size_t x = 0; x < kStringCount; ++x) {
1009     for (size_t y = 0; y < kStringCount; ++y) {
1010       // Test string_compareto x y
1011       size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
1012                               reinterpret_cast<size_t>(s[y].Get()), 0U,
1013                               art_quick_string_compareto, self);
1014 
1015       EXPECT_FALSE(self->IsExceptionPending());
1016 
1017       // The result is a 32b signed integer
1018       union {
1019         size_t r;
1020         int32_t i;
1021       } conv;
1022       conv.r = result;
1023       int32_t e = expected[x][y];
1024       EXPECT_TRUE(e == 0 ? conv.i == 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1025           conv.r;
1026       EXPECT_TRUE(e < 0 ? conv.i < 0 : true)   << "x=" << c[x] << " y="  << c[y] << " res=" <<
1027           conv.r;
1028       EXPECT_TRUE(e > 0 ? conv.i > 0 : true)   << "x=" << c[x] << " y=" << c[y] << " res=" <<
1029           conv.r;
1030     }
1031   }
1032 
1033   // TODO: Deallocate things.
1034 
1035   // Tests done.
1036 #else
1037   LOG(INFO) << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA;
1038   // Force-print to std::cout so it's also outside the logcat.
1039   std::cout << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA <<
1040       std::endl;
1041 #endif
1042 }
1043 
1044 
GetSetBooleanStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1045 static void GetSetBooleanStatic(ArtField* f, Thread* self,
1046                                 ArtMethod* referrer, StubTest* test)
1047     REQUIRES_SHARED(Locks::mutator_lock_) {
1048 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1049     (defined(__x86_64__) && !defined(__APPLE__))
1050   constexpr size_t num_values = 5;
1051   uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
1052 
1053   for (size_t i = 0; i < num_values; ++i) {
1054     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1055                               static_cast<size_t>(values[i]),
1056                               0U,
1057                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1058                               self,
1059                               referrer);
1060 
1061     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1062                                            0U, 0U,
1063                                            StubTest::GetEntrypoint(self, kQuickGetBooleanStatic),
1064                                            self,
1065                                            referrer);
1066     // Boolean currently stores bools as uint8_t, be more zealous about asserting correct writes/gets.
1067     EXPECT_EQ(values[i], static_cast<uint8_t>(res)) << "Iteration " << i;
1068   }
1069 #else
1070   UNUSED(f, self, referrer, test);
1071   LOG(INFO) << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA;
1072   // Force-print to std::cout so it's also outside the logcat.
1073   std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1074 #endif
1075 }
GetSetByteStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1076 static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1077                              StubTest* test)
1078     REQUIRES_SHARED(Locks::mutator_lock_) {
1079 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1080     (defined(__x86_64__) && !defined(__APPLE__))
1081   int8_t values[] = { -128, -64, 0, 64, 127 };
1082 
1083   for (size_t i = 0; i < arraysize(values); ++i) {
1084     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1085                               static_cast<size_t>(values[i]),
1086                               0U,
1087                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1088                               self,
1089                               referrer);
1090 
1091     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1092                                            0U, 0U,
1093                                            StubTest::GetEntrypoint(self, kQuickGetByteStatic),
1094                                            self,
1095                                            referrer);
1096     EXPECT_EQ(values[i], static_cast<int8_t>(res)) << "Iteration " << i;
1097   }
1098 #else
1099   UNUSED(f, self, referrer, test);
1100   LOG(INFO) << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA;
1101   // Force-print to std::cout so it's also outside the logcat.
1102   std::cout << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1103 #endif
1104 }
1105 
1106 
GetSetBooleanInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1107 static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
1108                                   ArtMethod* referrer, StubTest* test)
1109     REQUIRES_SHARED(Locks::mutator_lock_) {
1110 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1111     (defined(__x86_64__) && !defined(__APPLE__))
1112   uint8_t values[] = { 0, true, 2, 128, 0xFF };
1113 
1114   for (size_t i = 0; i < arraysize(values); ++i) {
1115     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1116                               reinterpret_cast<size_t>(obj->Get()),
1117                               static_cast<size_t>(values[i]),
1118                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1119                               self,
1120                               referrer);
1121 
1122     uint8_t res = f->GetBoolean(obj->Get());
1123     EXPECT_EQ(values[i], res) << "Iteration " << i;
1124 
1125     f->SetBoolean<false>(obj->Get(), res);
1126 
1127     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1128                                             reinterpret_cast<size_t>(obj->Get()),
1129                                             0U,
1130                                             StubTest::GetEntrypoint(self, kQuickGetBooleanInstance),
1131                                             self,
1132                                             referrer);
1133     EXPECT_EQ(res, static_cast<uint8_t>(res2));
1134   }
1135 #else
1136   UNUSED(obj, f, self, referrer, test);
1137   LOG(INFO) << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA;
1138   // Force-print to std::cout so it's also outside the logcat.
1139   std::cout << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1140 #endif
1141 }
GetSetByteInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1142 static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
1143                              Thread* self, ArtMethod* referrer, StubTest* test)
1144     REQUIRES_SHARED(Locks::mutator_lock_) {
1145 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1146     (defined(__x86_64__) && !defined(__APPLE__))
1147   int8_t values[] = { -128, -64, 0, 64, 127 };
1148 
1149   for (size_t i = 0; i < arraysize(values); ++i) {
1150     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1151                               reinterpret_cast<size_t>(obj->Get()),
1152                               static_cast<size_t>(values[i]),
1153                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1154                               self,
1155                               referrer);
1156 
1157     int8_t res = f->GetByte(obj->Get());
1158     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1159     f->SetByte<false>(obj->Get(), ++res);
1160 
1161     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1162                                             reinterpret_cast<size_t>(obj->Get()),
1163                                             0U,
1164                                             StubTest::GetEntrypoint(self, kQuickGetByteInstance),
1165                                             self,
1166                                             referrer);
1167     EXPECT_EQ(res, static_cast<int8_t>(res2));
1168   }
1169 #else
1170   UNUSED(obj, f, self, referrer, test);
1171   LOG(INFO) << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA;
1172   // Force-print to std::cout so it's also outside the logcat.
1173   std::cout << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1174 #endif
1175 }
1176 
GetSetCharStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1177 static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1178                              StubTest* test)
1179     REQUIRES_SHARED(Locks::mutator_lock_) {
1180 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1181     (defined(__x86_64__) && !defined(__APPLE__))
1182   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1183 
1184   for (size_t i = 0; i < arraysize(values); ++i) {
1185     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1186                               static_cast<size_t>(values[i]),
1187                               0U,
1188                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1189                               self,
1190                               referrer);
1191 
1192     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1193                                            0U, 0U,
1194                                            StubTest::GetEntrypoint(self, kQuickGetCharStatic),
1195                                            self,
1196                                            referrer);
1197 
1198     EXPECT_EQ(values[i], static_cast<uint16_t>(res)) << "Iteration " << i;
1199   }
1200 #else
1201   UNUSED(f, self, referrer, test);
1202   LOG(INFO) << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA;
1203   // Force-print to std::cout so it's also outside the logcat.
1204   std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1205 #endif
1206 }
GetSetShortStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1207 static void GetSetShortStatic(ArtField* f, Thread* self,
1208                               ArtMethod* referrer, StubTest* test)
1209     REQUIRES_SHARED(Locks::mutator_lock_) {
1210 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1211     (defined(__x86_64__) && !defined(__APPLE__))
1212   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1213 
1214   for (size_t i = 0; i < arraysize(values); ++i) {
1215     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1216                               static_cast<size_t>(values[i]),
1217                               0U,
1218                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1219                               self,
1220                               referrer);
1221 
1222     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1223                                            0U, 0U,
1224                                            StubTest::GetEntrypoint(self, kQuickGetShortStatic),
1225                                            self,
1226                                            referrer);
1227 
1228     EXPECT_EQ(static_cast<int16_t>(res), values[i]) << "Iteration " << i;
1229   }
1230 #else
1231   UNUSED(f, self, referrer, test);
1232   LOG(INFO) << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA;
1233   // Force-print to std::cout so it's also outside the logcat.
1234   std::cout << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1235 #endif
1236 }
1237 
GetSetCharInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1238 static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
1239                                Thread* self, ArtMethod* referrer, StubTest* test)
1240     REQUIRES_SHARED(Locks::mutator_lock_) {
1241 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1242     (defined(__x86_64__) && !defined(__APPLE__))
1243   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1244 
1245   for (size_t i = 0; i < arraysize(values); ++i) {
1246     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1247                               reinterpret_cast<size_t>(obj->Get()),
1248                               static_cast<size_t>(values[i]),
1249                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1250                               self,
1251                               referrer);
1252 
1253     uint16_t res = f->GetChar(obj->Get());
1254     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1255     f->SetChar<false>(obj->Get(), ++res);
1256 
1257     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1258                                             reinterpret_cast<size_t>(obj->Get()),
1259                                             0U,
1260                                             StubTest::GetEntrypoint(self, kQuickGetCharInstance),
1261                                             self,
1262                                             referrer);
1263     EXPECT_EQ(res, static_cast<uint16_t>(res2));
1264   }
1265 #else
1266   UNUSED(obj, f, self, referrer, test);
1267   LOG(INFO) << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA;
1268   // Force-print to std::cout so it's also outside the logcat.
1269   std::cout << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1270 #endif
1271 }
GetSetShortInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1272 static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
1273                              Thread* self, ArtMethod* referrer, StubTest* test)
1274     REQUIRES_SHARED(Locks::mutator_lock_) {
1275 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1276     (defined(__x86_64__) && !defined(__APPLE__))
1277   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1278 
1279   for (size_t i = 0; i < arraysize(values); ++i) {
1280     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1281                               reinterpret_cast<size_t>(obj->Get()),
1282                               static_cast<size_t>(values[i]),
1283                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1284                               self,
1285                               referrer);
1286 
1287     int16_t res = f->GetShort(obj->Get());
1288     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1289     f->SetShort<false>(obj->Get(), ++res);
1290 
1291     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1292                                             reinterpret_cast<size_t>(obj->Get()),
1293                                             0U,
1294                                             StubTest::GetEntrypoint(self, kQuickGetShortInstance),
1295                                             self,
1296                                             referrer);
1297     EXPECT_EQ(res, static_cast<int16_t>(res2));
1298   }
1299 #else
1300   UNUSED(obj, f, self, referrer, test);
1301   LOG(INFO) << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA;
1302   // Force-print to std::cout so it's also outside the logcat.
1303   std::cout << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1304 #endif
1305 }
1306 
GetSet32Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1307 static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
1308                            StubTest* test)
1309     REQUIRES_SHARED(Locks::mutator_lock_) {
1310 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1311     (defined(__x86_64__) && !defined(__APPLE__))
1312   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1313 
1314   for (size_t i = 0; i < arraysize(values); ++i) {
1315     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1316                               static_cast<size_t>(values[i]),
1317                               0U,
1318                               StubTest::GetEntrypoint(self, kQuickSet32Static),
1319                               self,
1320                               referrer);
1321 
1322     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1323                                            0U, 0U,
1324                                            StubTest::GetEntrypoint(self, kQuickGet32Static),
1325                                            self,
1326                                            referrer);
1327 
1328     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1329   }
1330 #else
1331   UNUSED(f, self, referrer, test);
1332   LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
1333   // Force-print to std::cout so it's also outside the logcat.
1334   std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
1335 #endif
1336 }
1337 
1338 
GetSet32Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1339 static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
1340                              Thread* self, ArtMethod* referrer, StubTest* test)
1341     REQUIRES_SHARED(Locks::mutator_lock_) {
1342 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1343     (defined(__x86_64__) && !defined(__APPLE__))
1344   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1345 
1346   for (size_t i = 0; i < arraysize(values); ++i) {
1347     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1348                               reinterpret_cast<size_t>(obj->Get()),
1349                               static_cast<size_t>(values[i]),
1350                               StubTest::GetEntrypoint(self, kQuickSet32Instance),
1351                               self,
1352                               referrer);
1353 
1354     int32_t res = f->GetInt(obj->Get());
1355     EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
1356 
1357     res++;
1358     f->SetInt<false>(obj->Get(), res);
1359 
1360     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1361                                             reinterpret_cast<size_t>(obj->Get()),
1362                                             0U,
1363                                             StubTest::GetEntrypoint(self, kQuickGet32Instance),
1364                                             self,
1365                                             referrer);
1366     EXPECT_EQ(res, static_cast<int32_t>(res2));
1367   }
1368 #else
1369   UNUSED(obj, f, self, referrer, test);
1370   LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
1371   // Force-print to std::cout so it's also outside the logcat.
1372   std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1373 #endif
1374 }
1375 
1376 
1377 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1378     (defined(__x86_64__) && !defined(__APPLE__))
1379 
set_and_check_static(uint32_t f_idx,ObjPtr<mirror::Object> val,Thread * self,ArtMethod * referrer,StubTest * test)1380 static void set_and_check_static(uint32_t f_idx,
1381                                  ObjPtr<mirror::Object> val,
1382                                  Thread* self,
1383                                  ArtMethod* referrer,
1384                                  StubTest* test)
1385     REQUIRES_SHARED(Locks::mutator_lock_) {
1386   StackHandleScope<1u> hs(self);
1387   Handle<mirror::Object> h_val = hs.NewHandle(val);
1388   test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1389                             reinterpret_cast<size_t>(h_val.Get()),
1390                             0U,
1391                             StubTest::GetEntrypoint(self, kQuickSetObjStatic),
1392                             self,
1393                             referrer);
1394 
1395   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1396                                          0U,
1397                                          0U,
1398                                          StubTest::GetEntrypoint(self, kQuickGetObjStatic),
1399                                          self,
1400                                          referrer);
1401 
1402   EXPECT_EQ(res, reinterpret_cast<size_t>(h_val.Get())) << "Value " << h_val.Get();
1403 }
1404 #endif
1405 
GetSetObjStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1406 static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1407                             StubTest* test)
1408     REQUIRES_SHARED(Locks::mutator_lock_) {
1409 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1410     (defined(__x86_64__) && !defined(__APPLE__))
1411   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1412 
1413   // Allocate a string object for simplicity.
1414   ObjPtr<mirror::String> str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1415   set_and_check_static(f->GetDexFieldIndex(), str, self, referrer, test);
1416 
1417   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1418 #else
1419   UNUSED(f, self, referrer, test);
1420   LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
1421   // Force-print to std::cout so it's also outside the logcat.
1422   std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
1423 #endif
1424 }
1425 
1426 
1427 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1428     (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(ArtField * f,ObjPtr<mirror::Object> trg,ObjPtr<mirror::Object> val,Thread * self,ArtMethod * referrer,StubTest * test)1429 static void set_and_check_instance(ArtField* f,
1430                                    ObjPtr<mirror::Object> trg,
1431                                    ObjPtr<mirror::Object> val,
1432                                    Thread* self,
1433                                    ArtMethod* referrer,
1434                                    StubTest* test)
1435     REQUIRES_SHARED(Locks::mutator_lock_) {
1436   StackHandleScope<2u> hs(self);
1437   Handle<mirror::Object> h_trg = hs.NewHandle(trg);
1438   Handle<mirror::Object> h_val = hs.NewHandle(val);
1439   test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1440                             reinterpret_cast<size_t>(h_trg.Get()),
1441                             reinterpret_cast<size_t>(h_val.Get()),
1442                             StubTest::GetEntrypoint(self, kQuickSetObjInstance),
1443                             self,
1444                             referrer);
1445 
1446   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1447                                          reinterpret_cast<size_t>(h_trg.Get()),
1448                                          0U,
1449                                          StubTest::GetEntrypoint(self, kQuickGetObjInstance),
1450                                          self,
1451                                          referrer);
1452 
1453   EXPECT_EQ(res, reinterpret_cast<size_t>(h_val.Get())) << "Value " << h_val.Get();
1454 
1455   EXPECT_OBJ_PTR_EQ(h_val.Get(), f->GetObj(h_trg.Get()));
1456 }
1457 #endif
1458 
GetSetObjInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1459 static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
1460                               Thread* self, ArtMethod* referrer, StubTest* test)
1461     REQUIRES_SHARED(Locks::mutator_lock_) {
1462 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1463     (defined(__x86_64__) && !defined(__APPLE__))
1464   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1465 
1466   // Allocate a string object for simplicity.
1467   ObjPtr<mirror::String> str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1468   set_and_check_instance(f, obj->Get(), str, self, referrer, test);
1469 
1470   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1471 #else
1472   UNUSED(obj, f, self, referrer, test);
1473   LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
1474   // Force-print to std::cout so it's also outside the logcat.
1475   std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
1476 #endif
1477 }
1478 
1479 
1480 // TODO: Complete these tests for 32b architectures
1481 
GetSet64Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1482 static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
1483                            StubTest* test)
1484     REQUIRES_SHARED(Locks::mutator_lock_) {
1485 #if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
1486   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1487 
1488   for (size_t i = 0; i < arraysize(values); ++i) {
1489     // 64 bit FieldSet stores the set value in the second register.
1490     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1491                               values[i],
1492                               0U,
1493                               StubTest::GetEntrypoint(self, kQuickSet64Static),
1494                               self,
1495                               referrer);
1496 
1497     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1498                                            0U, 0U,
1499                                            StubTest::GetEntrypoint(self, kQuickGet64Static),
1500                                            self,
1501                                            referrer);
1502 
1503     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1504   }
1505 #else
1506   UNUSED(f, self, referrer, test);
1507   LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
1508   // Force-print to std::cout so it's also outside the logcat.
1509   std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
1510 #endif
1511 }
1512 
1513 
GetSet64Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1514 static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
1515                              Thread* self, ArtMethod* referrer, StubTest* test)
1516     REQUIRES_SHARED(Locks::mutator_lock_) {
1517 #if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
1518   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1519 
1520   for (size_t i = 0; i < arraysize(values); ++i) {
1521     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1522                               reinterpret_cast<size_t>(obj->Get()),
1523                               static_cast<size_t>(values[i]),
1524                               StubTest::GetEntrypoint(self, kQuickSet64Instance),
1525                               self,
1526                               referrer);
1527 
1528     int64_t res = f->GetLong(obj->Get());
1529     EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
1530 
1531     res++;
1532     f->SetLong<false>(obj->Get(), res);
1533 
1534     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1535                                             reinterpret_cast<size_t>(obj->Get()),
1536                                             0U,
1537                                             StubTest::GetEntrypoint(self, kQuickGet64Instance),
1538                                             self,
1539                                             referrer);
1540     EXPECT_EQ(res, static_cast<int64_t>(res2));
1541   }
1542 #else
1543   UNUSED(obj, f, self, referrer, test);
1544   LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
1545   // Force-print to std::cout so it's also outside the logcat.
1546   std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1547 #endif
1548 }
1549 
TestFields(Thread * self,StubTest * test,Primitive::Type test_type)1550 static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) {
1551   // garbage is created during ClassLinker::Init
1552 
1553   JNIEnv* env = Thread::Current()->GetJniEnv();
1554   jclass jc = env->FindClass("AllFields");
1555   CHECK(jc != nullptr);
1556   jobject o = env->AllocObject(jc);
1557   CHECK(o != nullptr);
1558 
1559   ScopedObjectAccess soa(self);
1560   StackHandleScope<3> hs(self);
1561   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object>(o)));
1562   Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
1563   // Need a method as a referrer
1564   ArtMethod* m = c->GetDirectMethod(0, kRuntimePointerSize);
1565 
1566   // Play with it...
1567 
1568   // Static fields.
1569   for (ArtField& f : c->GetSFields()) {
1570     Primitive::Type type = f.GetTypeAsPrimitiveType();
1571     if (test_type != type) {
1572      continue;
1573     }
1574     switch (type) {
1575       case Primitive::Type::kPrimBoolean:
1576         GetSetBooleanStatic(&f, self, m, test);
1577         break;
1578       case Primitive::Type::kPrimByte:
1579         GetSetByteStatic(&f, self, m, test);
1580         break;
1581       case Primitive::Type::kPrimChar:
1582         GetSetCharStatic(&f, self, m, test);
1583         break;
1584       case Primitive::Type::kPrimShort:
1585         GetSetShortStatic(&f, self, m, test);
1586         break;
1587       case Primitive::Type::kPrimInt:
1588         GetSet32Static(&f, self, m, test);
1589         break;
1590       case Primitive::Type::kPrimLong:
1591         GetSet64Static(&f, self, m, test);
1592         break;
1593       case Primitive::Type::kPrimNot:
1594         // Don't try array.
1595         if (f.GetTypeDescriptor()[0] != '[') {
1596           GetSetObjStatic(&f, self, m, test);
1597         }
1598         break;
1599       default:
1600         break;  // Skip.
1601     }
1602   }
1603 
1604   // Instance fields.
1605   for (ArtField& f : c->GetIFields()) {
1606     Primitive::Type type = f.GetTypeAsPrimitiveType();
1607     if (test_type != type) {
1608       continue;
1609     }
1610     switch (type) {
1611       case Primitive::Type::kPrimBoolean:
1612         GetSetBooleanInstance(&obj, &f, self, m, test);
1613         break;
1614       case Primitive::Type::kPrimByte:
1615         GetSetByteInstance(&obj, &f, self, m, test);
1616         break;
1617       case Primitive::Type::kPrimChar:
1618         GetSetCharInstance(&obj, &f, self, m, test);
1619         break;
1620       case Primitive::Type::kPrimShort:
1621         GetSetShortInstance(&obj, &f, self, m, test);
1622         break;
1623       case Primitive::Type::kPrimInt:
1624         GetSet32Instance(&obj, &f, self, m, test);
1625         break;
1626       case Primitive::Type::kPrimLong:
1627         GetSet64Instance(&obj, &f, self, m, test);
1628         break;
1629       case Primitive::Type::kPrimNot:
1630         // Don't try array.
1631         if (f.GetTypeDescriptor()[0] != '[') {
1632           GetSetObjInstance(&obj, &f, self, m, test);
1633         }
1634         break;
1635       default:
1636         break;  // Skip.
1637     }
1638   }
1639 
1640   // TODO: Deallocate things.
1641 }
1642 
TEST_F(StubTest,Fields8)1643 TEST_F(StubTest, Fields8) {
1644   Thread* self = Thread::Current();
1645 
1646   self->TransitionFromSuspendedToRunnable();
1647   LoadDex("AllFields");
1648   bool started = runtime_->Start();
1649   CHECK(started);
1650 
1651   TestFields(self, this, Primitive::Type::kPrimBoolean);
1652   TestFields(self, this, Primitive::Type::kPrimByte);
1653 }
1654 
TEST_F(StubTest,Fields16)1655 TEST_F(StubTest, Fields16) {
1656   Thread* self = Thread::Current();
1657 
1658   self->TransitionFromSuspendedToRunnable();
1659   LoadDex("AllFields");
1660   bool started = runtime_->Start();
1661   CHECK(started);
1662 
1663   TestFields(self, this, Primitive::Type::kPrimChar);
1664   TestFields(self, this, Primitive::Type::kPrimShort);
1665 }
1666 
TEST_F(StubTest,Fields32)1667 TEST_F(StubTest, Fields32) {
1668   Thread* self = Thread::Current();
1669 
1670   self->TransitionFromSuspendedToRunnable();
1671   LoadDex("AllFields");
1672   bool started = runtime_->Start();
1673   CHECK(started);
1674 
1675   TestFields(self, this, Primitive::Type::kPrimInt);
1676 }
1677 
TEST_F(StubTest,FieldsObj)1678 TEST_F(StubTest, FieldsObj) {
1679   Thread* self = Thread::Current();
1680 
1681   self->TransitionFromSuspendedToRunnable();
1682   LoadDex("AllFields");
1683   bool started = runtime_->Start();
1684   CHECK(started);
1685 
1686   TestFields(self, this, Primitive::Type::kPrimNot);
1687 }
1688 
TEST_F(StubTest,Fields64)1689 TEST_F(StubTest, Fields64) {
1690   Thread* self = Thread::Current();
1691 
1692   self->TransitionFromSuspendedToRunnable();
1693   LoadDex("AllFields");
1694   bool started = runtime_->Start();
1695   CHECK(started);
1696 
1697   TestFields(self, this, Primitive::Type::kPrimLong);
1698 }
1699 
1700 // Disabled, b/27991555 .
1701 // FIXME: Hacking the entry point to point to art_quick_to_interpreter_bridge is broken.
1702 // The bridge calls through to GetCalleeSaveMethodCaller() which looks up the pre-header
1703 // and gets a bogus OatQuickMethodHeader* pointing into our assembly code just before
1704 // the bridge and uses that to check for inlined frames, crashing in the process.
TEST_F(StubTest,DISABLED_IMT)1705 TEST_F(StubTest, DISABLED_IMT) {
1706 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1707     (defined(__x86_64__) && !defined(__APPLE__))
1708   Thread* self = Thread::Current();
1709 
1710   ScopedObjectAccess soa(self);
1711   StackHandleScope<7> hs(self);
1712 
1713   JNIEnv* env = Thread::Current()->GetJniEnv();
1714 
1715   // ArrayList
1716 
1717   // Load ArrayList and used methods (JNI).
1718   jclass arraylist_jclass = env->FindClass("java/util/ArrayList");
1719   ASSERT_NE(nullptr, arraylist_jclass);
1720   jmethodID arraylist_constructor = env->GetMethodID(arraylist_jclass, "<init>", "()V");
1721   ASSERT_NE(nullptr, arraylist_constructor);
1722   jmethodID contains_jmethod = env->GetMethodID(
1723       arraylist_jclass, "contains", "(Ljava/lang/Object;)Z");
1724   ASSERT_NE(nullptr, contains_jmethod);
1725   jmethodID add_jmethod = env->GetMethodID(arraylist_jclass, "add", "(Ljava/lang/Object;)Z");
1726   ASSERT_NE(nullptr, add_jmethod);
1727 
1728   // Get representation.
1729   ArtMethod* contains_amethod = jni::DecodeArtMethod(contains_jmethod);
1730 
1731   // Patch up ArrayList.contains.
1732   if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) {
1733     contains_amethod->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
1734         StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge)));
1735   }
1736 
1737   // List
1738 
1739   // Load List and used methods (JNI).
1740   jclass list_jclass = env->FindClass("java/util/List");
1741   ASSERT_NE(nullptr, list_jclass);
1742   jmethodID inf_contains_jmethod = env->GetMethodID(
1743       list_jclass, "contains", "(Ljava/lang/Object;)Z");
1744   ASSERT_NE(nullptr, inf_contains_jmethod);
1745 
1746   // Get mirror representation.
1747   ArtMethod* inf_contains = jni::DecodeArtMethod(inf_contains_jmethod);
1748 
1749   // Object
1750 
1751   jclass obj_jclass = env->FindClass("java/lang/Object");
1752   ASSERT_NE(nullptr, obj_jclass);
1753   jmethodID obj_constructor = env->GetMethodID(obj_jclass, "<init>", "()V");
1754   ASSERT_NE(nullptr, obj_constructor);
1755 
1756   // Create instances.
1757 
1758   jobject jarray_list = env->NewObject(arraylist_jclass, arraylist_constructor);
1759   ASSERT_NE(nullptr, jarray_list);
1760   Handle<mirror::Object> array_list(hs.NewHandle(soa.Decode<mirror::Object>(jarray_list)));
1761 
1762   jobject jobj = env->NewObject(obj_jclass, obj_constructor);
1763   ASSERT_NE(nullptr, jobj);
1764   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object>(jobj)));
1765 
1766   // Invocation tests.
1767 
1768   // 1. imt_conflict
1769 
1770   // Contains.
1771 
1772   // We construct the ImtConflictTable ourselves, as we cannot go into the runtime stub
1773   // that will create it: the runtime stub expects to be called by compiled code.
1774   LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
1775   ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
1776   ImtConflictTable* empty_conflict_table =
1777       Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count=*/0u, linear_alloc);
1778   void* data = linear_alloc->Alloc(
1779       self,
1780       ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize));
1781   ImtConflictTable* new_table = new (data) ImtConflictTable(
1782       empty_conflict_table, inf_contains, contains_amethod, kRuntimePointerSize);
1783   conflict_method->SetImtConflictTable(new_table, kRuntimePointerSize);
1784 
1785   size_t result =
1786       Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
1787                                    reinterpret_cast<size_t>(array_list.Get()),
1788                                    reinterpret_cast<size_t>(obj.Get()),
1789                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1790                                    self,
1791                                    contains_amethod,
1792                                    static_cast<size_t>(inf_contains->GetDexMethodIndex()));
1793 
1794   ASSERT_FALSE(self->IsExceptionPending());
1795   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
1796 
1797   // Add object.
1798 
1799   env->CallBooleanMethod(jarray_list, add_jmethod, jobj);
1800 
1801   ASSERT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
1802 
1803   // Contains.
1804 
1805   result =
1806       Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
1807                                    reinterpret_cast<size_t>(array_list.Get()),
1808                                    reinterpret_cast<size_t>(obj.Get()),
1809                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1810                                    self,
1811                                    contains_amethod,
1812                                    static_cast<size_t>(inf_contains->GetDexMethodIndex()));
1813 
1814   ASSERT_FALSE(self->IsExceptionPending());
1815   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
1816 
1817   // 2. regular interface trampoline
1818 
1819   result = Invoke3WithReferrer(static_cast<size_t>(inf_contains->GetDexMethodIndex()),
1820                                reinterpret_cast<size_t>(array_list.Get()),
1821                                reinterpret_cast<size_t>(obj.Get()),
1822                                StubTest::GetEntrypoint(self,
1823                                    kQuickInvokeInterfaceTrampolineWithAccessCheck),
1824                                self, contains_amethod);
1825 
1826   ASSERT_FALSE(self->IsExceptionPending());
1827   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
1828 
1829   result = Invoke3WithReferrer(
1830       static_cast<size_t>(inf_contains->GetDexMethodIndex()),
1831       reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(array_list.Get()),
1832       StubTest::GetEntrypoint(self, kQuickInvokeInterfaceTrampolineWithAccessCheck), self,
1833       contains_amethod);
1834 
1835   ASSERT_FALSE(self->IsExceptionPending());
1836   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
1837 #else
1838   LOG(INFO) << "Skipping imt as I don't know how to do that on " << kRuntimeISA;
1839   // Force-print to std::cout so it's also outside the logcat.
1840   std::cout << "Skipping imt as I don't know how to do that on " << kRuntimeISA << std::endl;
1841 #endif
1842 }
1843 
TEST_F(StubTest,StringIndexOf)1844 TEST_F(StubTest, StringIndexOf) {
1845 #if defined(__arm__) || defined(__aarch64__)
1846   Thread* self = Thread::Current();
1847   ScopedObjectAccess soa(self);
1848   // garbage is created during ClassLinker::Init
1849 
1850   // Create some strings
1851   // Use array so we can index into it and use a matrix for expected results
1852   // Setup: The first half is standard. The second half uses a non-zero offset.
1853   // TODO: Shared backing arrays.
1854   const char* c_str[] = { "", "a", "ba", "cba", "dcba", "edcba", "asdfghjkl" };
1855   static constexpr size_t kStringCount = arraysize(c_str);
1856   const char c_char[] = { 'a', 'b', 'c', 'd', 'e' };
1857   static constexpr size_t kCharCount = arraysize(c_char);
1858 
1859   StackHandleScope<kStringCount> hs(self);
1860   Handle<mirror::String> s[kStringCount];
1861 
1862   for (size_t i = 0; i < kStringCount; ++i) {
1863     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c_str[i]));
1864   }
1865 
1866   // Matrix of expectations. First component is first parameter. Note we only check against the
1867   // sign, not the value. As we are testing random offsets, we need to compute this and need to
1868   // rely on String::CompareTo being correct.
1869   static constexpr size_t kMaxLen = 9;
1870   DCHECK_LE(strlen(c_str[kStringCount-1]), kMaxLen) << "Please fix the indexof test.";
1871 
1872   // Last dimension: start, offset by 1.
1873   int32_t expected[kStringCount][kCharCount][kMaxLen + 3];
1874   for (size_t x = 0; x < kStringCount; ++x) {
1875     for (size_t y = 0; y < kCharCount; ++y) {
1876       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
1877         expected[x][y][z] = s[x]->FastIndexOf(c_char[y], static_cast<int32_t>(z) - 1);
1878       }
1879     }
1880   }
1881 
1882   // Play with it...
1883 
1884   for (size_t x = 0; x < kStringCount; ++x) {
1885     for (size_t y = 0; y < kCharCount; ++y) {
1886       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
1887         int32_t start = static_cast<int32_t>(z) - 1;
1888 
1889         // Test string_compareto x y
1890         size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()), c_char[y], start,
1891                                 StubTest::GetEntrypoint(self, kQuickIndexOf), self);
1892 
1893         EXPECT_FALSE(self->IsExceptionPending());
1894 
1895         // The result is a 32b signed integer
1896         union {
1897           size_t r;
1898           int32_t i;
1899         } conv;
1900         conv.r = result;
1901 
1902         EXPECT_EQ(expected[x][y][z], conv.i) << "Wrong result for " << c_str[x] << " / " <<
1903             c_char[y] << " @ " << start;
1904       }
1905     }
1906   }
1907 
1908   // TODO: Deallocate things.
1909 
1910   // Tests done.
1911 #else
1912   LOG(INFO) << "Skipping indexof as I don't know how to do that on " << kRuntimeISA;
1913   // Force-print to std::cout so it's also outside the logcat.
1914   std::cout << "Skipping indexof as I don't know how to do that on " << kRuntimeISA << std::endl;
1915 #endif
1916 }
1917 
1918 // TODO: Exercise the ReadBarrierMarkRegX entry points.
1919 
TEST_F(StubTest,ReadBarrier)1920 TEST_F(StubTest, ReadBarrier) {
1921 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
1922       defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)))
1923   Thread* self = Thread::Current();
1924 
1925   const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow);
1926 
1927   // Create an object
1928   ScopedObjectAccess soa(self);
1929   // garbage is created during ClassLinker::Init
1930 
1931   StackHandleScope<2> hs(soa.Self());
1932   Handle<mirror::Class> c(
1933       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
1934 
1935   // Build an object instance
1936   Handle<mirror::Object> obj(hs.NewHandle(c->AllocObject(soa.Self())));
1937 
1938   EXPECT_FALSE(self->IsExceptionPending());
1939 
1940   size_t result = Invoke3(0U, reinterpret_cast<size_t>(obj.Get()),
1941                           mirror::Object::ClassOffset().SizeValue(), readBarrierSlow, self);
1942 
1943   EXPECT_FALSE(self->IsExceptionPending());
1944   EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1945   mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
1946   EXPECT_OBJ_PTR_EQ(klass, obj->GetClass());
1947 
1948   // Tests done.
1949 #else
1950   LOG(INFO) << "Skipping read_barrier_slow";
1951   // Force-print to std::cout so it's also outside the logcat.
1952   std::cout << "Skipping read_barrier_slow" << std::endl;
1953 #endif
1954 }
1955 
TEST_F(StubTest,ReadBarrierForRoot)1956 TEST_F(StubTest, ReadBarrierForRoot) {
1957 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
1958       defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)))
1959   Thread* self = Thread::Current();
1960 
1961   const uintptr_t readBarrierForRootSlow =
1962       StubTest::GetEntrypoint(self, kQuickReadBarrierForRootSlow);
1963 
1964   // Create an object
1965   ScopedObjectAccess soa(self);
1966   // garbage is created during ClassLinker::Init
1967 
1968   StackHandleScope<1> hs(soa.Self());
1969 
1970   Handle<mirror::String> obj(
1971       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
1972 
1973   EXPECT_FALSE(self->IsExceptionPending());
1974 
1975   GcRoot<mirror::Class> root(GetClassRoot<mirror::String>());
1976   size_t result = Invoke3(reinterpret_cast<size_t>(&root), 0U, 0U, readBarrierForRootSlow, self);
1977 
1978   EXPECT_FALSE(self->IsExceptionPending());
1979   EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1980   mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
1981   EXPECT_OBJ_PTR_EQ(klass, obj->GetClass());
1982 
1983   // Tests done.
1984 #else
1985   LOG(INFO) << "Skipping read_barrier_for_root_slow";
1986   // Force-print to std::cout so it's also outside the logcat.
1987   std::cout << "Skipping read_barrier_for_root_slow" << std::endl;
1988 #endif
1989 }
1990 
1991 }  // namespace art
1992