/art/runtime/interpreter/mterp/arm64ng/ |
D | control_flow.S | 168 mov sp, ip 169 .cfi_def_cfa sp, CALLEE_SAVES_SIZE
|
D | array.S | 151 ldr x1, [sp]
|
D | other.S | 60 ldr x1, [sp]
|
/art/runtime/interpreter/mterp/arm64/ |
D | main.S | 317 stp \reg1, \reg2, [sp, #(\offset)] 326 ldp \reg1, \reg2, [sp, #(\offset)] 335 stp \reg1, \reg2, [sp, #-(\frame_adjustment)]! 345 ldp \reg1, \reg2, [sp], #(\frame_adjustment) 409 add fp, sp, #64
|
/art/test/1945-proxy-method-arguments/ |
D | get_args.cc | 84 ArtMethod** sp)
|
/art/runtime/ |
D | stack.h | 286 ArtMethod** sp = GetCurrentQuickFrame(); in GetCurrentHandleScope() local 288 return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size); in GetCurrentHandleScope()
|
D | stack.cc | 143 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 346 const uint8_t* sp = reinterpret_cast<const uint8_t*>(cur_quick_frame_); in GetVRegFromOptimizedCode() local 347 *val = *reinterpret_cast<const uint32_t*>(sp + location.GetStackOffsetInBytes()); in GetVRegFromOptimizedCode() 563 uintptr_t sp = reinterpret_cast<uintptr_t>(GetCurrentQuickFrame()); in GetReturnPcAddr() local 564 DCHECK_NE(sp, 0u); in GetReturnPcAddr() 565 return sp + GetCurrentQuickFrameInfo().GetReturnPcOffset(); in GetReturnPcAddr()
|
D | cha.cc | 231 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame()); in SetShouldDeoptimizeFlag() local 237 uint8_t* should_deoptimize_addr = sp + offset; in SetShouldDeoptimizeFlag()
|
D | runtime_common.cc | 276 DumpRegister64(os, "sp", context.sp); in Dump()
|
D | thread.cc | 1898 sched_param sp; in DumpState() local 1905 int sched_getparam_result = sched_getparam(tid, &sp); in DumpState() 1908 sp.sched_priority = -1; in DumpState() 1911 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp), in DumpState() 1914 os << " sched=" << policy << "/" << sp.sched_priority in DumpState() 3673 extern std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMethod** sp)
|
/art/compiler/optimizing/ |
D | code_generator_arm_vixl.cc | 179 __ Vstr(vixl32::SRegister(first), MemOperand(sp, stack_offset)); in SaveContiguousSRegisterList() 183 __ Vstr(vixl32::SRegister(first++), MemOperand(sp, stack_offset)); in SaveContiguousSRegisterList() 199 __ Vstr(d_reg, MemOperand(sp, stack_offset)); in SaveContiguousSRegisterList() 202 vixl32::Register base = sp; in SaveContiguousSRegisterList() 205 __ Add(base, sp, Operand::From(stack_offset)); in SaveContiguousSRegisterList() 213 __ Vstr(vixl32::SRegister(last + 1), MemOperand(sp, stack_offset)); in SaveContiguousSRegisterList() 228 __ Vldr(vixl32::SRegister(first), MemOperand(sp, stack_offset)); in RestoreContiguousSRegisterList() 232 __ Vldr(vixl32::SRegister(first++), MemOperand(sp, stack_offset)); in RestoreContiguousSRegisterList() 247 __ Vldr(d_reg, MemOperand(sp, stack_offset)); in RestoreContiguousSRegisterList() 250 vixl32::Register base = sp; in RestoreContiguousSRegisterList() [all …]
|
D | common_arm64.h | 177 return vixl::aarch64::MemOperand(vixl::aarch64::sp, location.GetStackIndex()); in StackOperandFrom()
|
D | code_generator_vector_arm64_sve.cc | 1551 __ Ldr(temp, MemOperand(sp, source.GetStackIndex())); in MoveToSIMDStackSlot() 1552 __ Str(temp, MemOperand(sp, destination.GetStackIndex())); in MoveToSIMDStackSlot() 1553 __ Ldr(temp, MemOperand(sp, source.GetStackIndex() + kArm64WordSize)); in MoveToSIMDStackSlot() 1554 __ Str(temp, MemOperand(sp, destination.GetStackIndex() + kArm64WordSize)); in MoveToSIMDStackSlot()
|
D | code_generator_vector_arm64_neon.cc | 1551 __ Ldr(temp, MemOperand(sp, source.GetStackIndex())); in MoveToSIMDStackSlot() 1552 __ Str(temp, MemOperand(sp, destination.GetStackIndex())); in MoveToSIMDStackSlot() 1553 __ Ldr(temp, MemOperand(sp, source.GetStackIndex() + kArm64WordSize)); in MoveToSIMDStackSlot() 1554 __ Str(temp, MemOperand(sp, destination.GetStackIndex() + kArm64WordSize)); in MoveToSIMDStackSlot()
|
D | code_generator_arm64.cc | 1134 __ Ldr(method, MemOperand(sp, 0)); in MaybeIncrementHotness() 1163 __ Stp(kArtMethodRegister, lr, MemOperand(sp, 0)); in MaybeIncrementHotness() 1165 __ Str(kArtMethodRegister, MemOperand(sp, 0)); in MaybeIncrementHotness() 1178 __ Ldr(lr, MemOperand(sp, 8)); in MaybeIncrementHotness() 1196 __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kArm64))); in GenerateFrameEntry() 1231 __ Stp(kArtMethodRegister, lowest_spill, MemOperand(sp, -frame_size, PreIndex)); in GenerateFrameEntry() 1233 __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex)); in GenerateFrameEntry() 1248 __ Str(wzr, MemOperand(sp, GetStackOffsetOfShouldDeoptimizeFlag())); in GenerateFrameEntry() 1277 __ Ldp(xzr, lowest_spill, MemOperand(sp, frame_size, PostIndex)); in GenerateFrameExit() 1387 __ Str(reg, MemOperand(sp, stack_index)); in SaveCoreRegister() [all …]
|
/art/runtime/interpreter/mterp/arm/ |
D | main.S | 385 stmfd sp!, {r3-r10,fp,lr} @ save 10 regs, (r3 just to align 64) 737 ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return 748 ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
|
/art/compiler/utils/arm64/ |
D | assembler_arm64.h | 143 return vixl::aarch64::sp; in reg_x()
|
D | jni_macro_assembler_arm64.cc | 457 ___ Str(scratch, MEM_OP(sp, fr_offs.Int32Value())); in CopyRawPtrFromThread()
|
D | managed_register_arm64_test.cc | 632 EXPECT_TRUE(vixl::aarch64::sp.Is(Arm64Assembler::reg_x(SP))); in TEST()
|
/art/runtime/arch/arm64/ |
D | context_arm64.cc | 29 #define __hwasan_handle_longjmp(sp) argument
|
/art/runtime/entrypoints/ |
D | entrypoint_utils.h | 197 ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
|
/art/test/510-checker-try-catch/smali/ |
D | Runtime.smali | 393 ## CHECK-NEXT: ParallelMove moves:[{{.*->}}{{\d+}}(sp)] 395 ## CHECK-NEXT: ParallelMove moves:[{{.*->}}{{\d+}}(sp)] 443 ## CHECK-NEXT: ParallelMove moves:[{{.*->}}2x{{\d+}}(sp)] 445 ## CHECK-NEXT: ParallelMove moves:[{{.*->}}2x{{\d+}}(sp)]
|
/art/libnativeloader/ |
D | README.md | 32 with the [VNDK-SP](https://source.android.com/devices/architecture/vndk#sp-hal)
|