/art/compiler/utils/ |
D | assembler_thumb_test.cc | 74 #define __ assembler. macro 77 __ FinalizeCode(); in EmitAndCheck() 78 size_t cs = __ CodeSize(); in EmitAndCheck() 81 __ FinalizeInstructions(code); in EmitAndCheck() 86 #undef __ 88 #define __ assembler. macro 122 __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs); in TEST_F() 129 __ Store(mr_conv->CurrentParamStackOffset(), mr_conv->CurrentParamRegister(), size); in TEST_F() 132 __ IncreaseFrameSize(32); in TEST_F() 135 __ IncreaseFrameSize(4096); in TEST_F() [all …]
|
/art/compiler/utils/x86/ |
D | jni_macro_assembler_x86.cc | 51 #define __ asm_. macro 70 __ pushl(spill); in BuildFrame() 81 __ addl(ESP, Immediate(-adjust)); in BuildFrame() 85 __ pushl(method_reg.AsX86().AsCpuRegister()); in BuildFrame() 99 __ addl(ESP, Immediate(adjust)); in RemoveFrame() 104 __ popl(spill); in RemoveFrame() 108 __ ret(); in RemoveFrame() 117 __ addl(ESP, Immediate(-adjust)); in IncreaseFrameSize() 140 __ movl(Address(ESP, offs), src.AsCpuRegister()); in Store() 143 __ movl(Address(ESP, offs), src.AsRegisterPairLow()); in Store() [all …]
|
/art/compiler/utils/x86_64/ |
D | jni_macro_assembler_x86_64.cc | 43 #define __ asm_. macro 60 __ pushq(spill.AsCpuRegister()); in BuildFrame() 71 __ subq(CpuRegister(RSP), Immediate(rest_of_frame)); in BuildFrame() 81 __ movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister()); in BuildFrame() 90 __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister()); in BuildFrame() 107 __ movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset)); in RemoveFrame() 117 __ addq(CpuRegister(RSP), Immediate(offset)); in RemoveFrame() 123 __ popq(spill.AsCpuRegister()); in RemoveFrame() 128 __ ret(); in RemoveFrame() 137 __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust))); in IncreaseFrameSize() [all …]
|
/art/compiler/optimizing/ |
D | intrinsics_x86_64.cc | 67 #define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> // NOLINT macro 94 __ Bind(GetEntryLabel()); in EmitNativeCode() 96 __ Bind(&loop); in EmitNativeCode() 97 __ movl(CpuRegister(TMP), Address(src_curr_addr, 0)); in EmitNativeCode() 98 __ MaybeUnpoisonHeapReference(CpuRegister(TMP)); in EmitNativeCode() 107 __ MaybePoisonHeapReference(CpuRegister(TMP)); in EmitNativeCode() 108 __ movl(Address(dst_curr_addr, 0), CpuRegister(TMP)); in EmitNativeCode() 109 __ addl(src_curr_addr, Immediate(element_size)); in EmitNativeCode() 110 __ addl(dst_curr_addr, Immediate(element_size)); in EmitNativeCode() 111 __ cmpl(src_curr_addr, src_stop_addr); in EmitNativeCode() [all …]
|
D | intrinsics_x86.cc | 69 #define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> // NOLINT macro 103 __ Bind(GetEntryLabel()); in EmitNativeCode() 115 __ xorl(temp1, temp1); in EmitNativeCode() 117 __ Bind(&loop); in EmitNativeCode() 122 __ movl(temp2, Address(src, temp1, ScaleFactor::TIMES_4, adjusted_offset)); in EmitNativeCode() 124 __ leal(temp2, Address(src_pos.AsRegister<Register>(), temp1, ScaleFactor::TIMES_1, 0)); in EmitNativeCode() 125 __ movl(temp2, Address(src, temp2, ScaleFactor::TIMES_4, offset)); in EmitNativeCode() 127 __ MaybeUnpoisonHeapReference(temp2); in EmitNativeCode() 140 __ MaybePoisonHeapReference(temp2); in EmitNativeCode() 145 __ movl(Address(dest, temp1, ScaleFactor::TIMES_4, adjusted_offset), temp2); in EmitNativeCode() [all …]
|
D | intrinsics_arm_vixl.cc | 39 #define __ assembler->GetVIXLAssembler()-> macro 89 __ Add(base, array, element_size * constant + data_offset); in GenSystemArrayCopyBaseAddress() 91 __ Add(base, array, Operand(RegisterFrom(pos), vixl32::LSL, element_size_shift)); in GenSystemArrayCopyBaseAddress() 92 __ Add(base, base, data_offset); in GenSystemArrayCopyBaseAddress() 111 __ Add(end, base, element_size * constant); in GenSystemArrayCopyEndAddress() 113 __ Add(end, base, Operand(RegisterFrom(copy_length), vixl32::LSL, element_size_shift)); in GenSystemArrayCopyEndAddress() 147 __ Bind(GetEntryLabel()); in EmitNativeCode() 152 __ Bind(&loop); in EmitNativeCode() 153 __ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex)); in EmitNativeCode() 180 __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex)); in EmitNativeCode() [all …]
|
D | intrinsics_arm64.cc | 81 #define __ codegen->GetVIXLAssembler()-> macro 109 __ Bind(GetEntryLabel()); in EmitNativeCode() 111 __ Bind(&slow_copy_loop); in EmitNativeCode() 112 __ Ldr(tmp_reg, MemOperand(src_curr_addr, element_size, PostIndex)); in EmitNativeCode() 139 __ Str(tmp_reg, MemOperand(dst_curr_addr, element_size, PostIndex)); in EmitNativeCode() 140 __ Cmp(src_curr_addr, src_stop_addr); in EmitNativeCode() 141 __ B(&slow_copy_loop, ne); in EmitNativeCode() 142 __ B(GetExitLabel()); in EmitNativeCode() 152 #undef __ 163 #define __ masm-> macro [all …]
|
D | code_generator_vector_x86.cc | 26 #define __ down_cast<X86Assembler*>(GetAssembler())-> // NOLINT macro 69 cpu_has_avx ? __ vxorps(dst, dst, dst) : __ xorps(dst, dst); in VisitVecReplicateScalar() 78 __ movd(dst, locations->InAt(0).AsRegister<Register>()); in VisitVecReplicateScalar() 79 __ punpcklbw(dst, dst); in VisitVecReplicateScalar() 80 __ punpcklwd(dst, dst); in VisitVecReplicateScalar() 81 __ pshufd(dst, dst, Immediate(0)); in VisitVecReplicateScalar() 86 __ movd(dst, locations->InAt(0).AsRegister<Register>()); in VisitVecReplicateScalar() 87 __ punpcklwd(dst, dst); in VisitVecReplicateScalar() 88 __ pshufd(dst, dst, Immediate(0)); in VisitVecReplicateScalar() 92 __ movd(dst, locations->InAt(0).AsRegister<Register>()); in VisitVecReplicateScalar() [all …]
|
D | code_generator_x86.cc | 71 #define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> // NOLINT macro 80 __ Bind(GetEntryLabel()); in EmitNativeCode() 106 __ Bind(GetEntryLabel()); in EmitNativeCode() 125 __ Bind(GetEntryLabel()); in EmitNativeCode() 127 __ negl(reg_); in EmitNativeCode() 129 __ movl(reg_, Immediate(0)); in EmitNativeCode() 131 __ jmp(GetExitLabel()); in EmitNativeCode() 149 __ Bind(GetEntryLabel()); in EmitNativeCode() 173 __ movl(length_loc.AsRegister<Register>(), array_len); in EmitNativeCode() 175 __ shrl(length_loc.AsRegister<Register>(), Immediate(1)); in EmitNativeCode() [all …]
|
D | code_generator_x86_64.cc | 70 #define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> // NOLINT macro 79 __ Bind(GetEntryLabel()); in EmitNativeCode() 105 __ Bind(GetEntryLabel()); in EmitNativeCode() 124 __ Bind(GetEntryLabel()); in EmitNativeCode() 127 __ negl(cpu_reg_); in EmitNativeCode() 129 __ xorl(cpu_reg_, cpu_reg_); in EmitNativeCode() 135 __ negq(cpu_reg_); in EmitNativeCode() 137 __ xorl(cpu_reg_, cpu_reg_); in EmitNativeCode() 140 __ jmp(GetExitLabel()); in EmitNativeCode() 160 __ Bind(GetEntryLabel()); in EmitNativeCode() [all …]
|
D | code_generator_vector_x86_64.cc | 26 #define __ down_cast<X86_64Assembler*>(GetAssembler())-> // NOLINT macro 64 cpu_has_avx ? __ vxorps(dst, dst, dst) : __ xorps(dst, dst); in VisitVecReplicateScalar() 73 __ movd(dst, locations->InAt(0).AsRegister<CpuRegister>(), /*64-bit*/ false); in VisitVecReplicateScalar() 74 __ punpcklbw(dst, dst); in VisitVecReplicateScalar() 75 __ punpcklwd(dst, dst); in VisitVecReplicateScalar() 76 __ pshufd(dst, dst, Immediate(0)); in VisitVecReplicateScalar() 81 __ movd(dst, locations->InAt(0).AsRegister<CpuRegister>(), /*64-bit*/ false); in VisitVecReplicateScalar() 82 __ punpcklwd(dst, dst); in VisitVecReplicateScalar() 83 __ pshufd(dst, dst, Immediate(0)); in VisitVecReplicateScalar() 87 __ movd(dst, locations->InAt(0).AsRegister<CpuRegister>(), /*64-bit*/ false); in VisitVecReplicateScalar() [all …]
|
D | code_generator_vector_arm64_sve.cc | 41 #define __ GetVIXLAssembler()-> macro 89 __ Movi(dst.V16B(), Int64FromLocation(src_loc)); in VisitVecReplicateScalar() 91 __ Dup(dst.V16B(), InputRegisterAt(instruction, 0)); in VisitVecReplicateScalar() 98 __ Movi(dst.V8H(), Int64FromLocation(src_loc)); in VisitVecReplicateScalar() 100 __ Dup(dst.V8H(), InputRegisterAt(instruction, 0)); in VisitVecReplicateScalar() 106 __ Movi(dst.V4S(), Int64FromLocation(src_loc)); in VisitVecReplicateScalar() 108 __ Dup(dst.V4S(), InputRegisterAt(instruction, 0)); in VisitVecReplicateScalar() 114 __ Movi(dst.V2D(), Int64FromLocation(src_loc)); in VisitVecReplicateScalar() 116 __ Dup(dst.V2D(), XRegisterFrom(src_loc)); in VisitVecReplicateScalar() 122 __ Fmov(dst.V4S(), src_loc.GetConstant()->AsFloatConstant()->GetValue()); in VisitVecReplicateScalar() [all …]
|
D | code_generator_vector_arm64_neon.cc | 41 #define __ GetVIXLAssembler()-> macro 89 __ Movi(dst.V16B(), Int64FromLocation(src_loc)); in VisitVecReplicateScalar() 91 __ Dup(dst.V16B(), InputRegisterAt(instruction, 0)); in VisitVecReplicateScalar() 98 __ Movi(dst.V8H(), Int64FromLocation(src_loc)); in VisitVecReplicateScalar() 100 __ Dup(dst.V8H(), InputRegisterAt(instruction, 0)); in VisitVecReplicateScalar() 106 __ Movi(dst.V4S(), Int64FromLocation(src_loc)); in VisitVecReplicateScalar() 108 __ Dup(dst.V4S(), InputRegisterAt(instruction, 0)); in VisitVecReplicateScalar() 114 __ Movi(dst.V2D(), Int64FromLocation(src_loc)); in VisitVecReplicateScalar() 116 __ Dup(dst.V2D(), XRegisterFrom(src_loc)); in VisitVecReplicateScalar() 122 __ Fmov(dst.V4S(), src_loc.GetConstant()->AsFloatConstant()->GetValue()); in VisitVecReplicateScalar() [all …]
|
D | code_generator_arm_vixl.cc | 97 #ifdef __ 102 #define __ down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler()-> // NOLINT macro 179 __ Vstr(vixl32::SRegister(first), MemOperand(sp, stack_offset)); in SaveContiguousSRegisterList() 183 __ Vstr(vixl32::SRegister(first++), MemOperand(sp, stack_offset)); in SaveContiguousSRegisterList() 199 __ Vstr(d_reg, MemOperand(sp, stack_offset)); in SaveContiguousSRegisterList() 205 __ Add(base, sp, Operand::From(stack_offset)); in SaveContiguousSRegisterList() 207 __ Vstm(F64, base, NO_WRITE_BACK, DRegisterList(d_reg, number_of_d_regs)); in SaveContiguousSRegisterList() 213 __ Vstr(vixl32::SRegister(last + 1), MemOperand(sp, stack_offset)); in SaveContiguousSRegisterList() 228 __ Vldr(vixl32::SRegister(first), MemOperand(sp, stack_offset)); in RestoreContiguousSRegisterList() 232 __ Vldr(vixl32::SRegister(first++), MemOperand(sp, stack_offset)); in RestoreContiguousSRegisterList() [all …]
|
D | code_generator_arm64.cc | 50 #ifdef __ 163 #define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> // NOLINT macro 197 __ Add(new_base, base, Operand(spill_offset + core_spill_size)); in SaveRestoreLiveRegistersHelper() 206 __ StoreCPURegList(core_list, MemOperand(base, spill_offset)); in SaveRestoreLiveRegistersHelper() 207 __ StoreCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size)); in SaveRestoreLiveRegistersHelper() 209 __ LoadCPURegList(core_list, MemOperand(base, spill_offset)); in SaveRestoreLiveRegistersHelper() 210 __ LoadCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size)); in SaveRestoreLiveRegistersHelper() 256 __ Bind(GetEntryLabel()); in EmitNativeCode() 292 __ Bind(GetEntryLabel()); in EmitNativeCode() 321 __ Bind(GetEntryLabel()); in EmitNativeCode() [all …]
|
D | code_generator_vector_arm_vixl.cc | 34 #define __ GetVIXLAssembler()-> macro 62 __ Vdup(Untyped8, dst, InputRegisterAt(instruction, 0)); in VisitVecReplicateScalar() 67 __ Vdup(Untyped16, dst, InputRegisterAt(instruction, 0)); in VisitVecReplicateScalar() 71 __ Vdup(Untyped32, dst, InputRegisterAt(instruction, 0)); in VisitVecReplicateScalar() 98 __ Vmov(OutputRegister(instruction), DRegisterLane(src, 0)); in VisitVecExtractScalar() 143 __ Vpadd(DataTypeValue::I32, dst, src, src); in VisitVecReduce() 146 __ Vpmin(DataTypeValue::S32, dst, src, src); in VisitVecReduce() 149 __ Vpmax(DataTypeValue::S32, dst, src, src); in VisitVecReduce() 179 __ Vneg(DataTypeValue::S8, dst, src); in VisitVecNeg() 184 __ Vneg(DataTypeValue::S16, dst, src); in VisitVecNeg() [all …]
|
D | optimizing_cfi_test.cc | 193 #define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \ in TEST_ISA() macro 196 __ CompareAndBranchIfZero(r0, &target); in TEST_ISA() 199 __ Ldr(r0, vixl32::MemOperand(r0)); in TEST_ISA() 201 __ Bind(&target); in TEST_ISA() 202 #undef __ in TEST_ISA()
|
/art/runtime/hprof/ |
D | hprof.cc | 433 #define __ output_-> macro 569 __ AddU4(sn); in WriteClassTable() 570 __ AddObjectId(c); in WriteClassTable() 571 __ AddStackTraceSerialNumber(LookupStackTraceSerialNumber(c)); in WriteClassTable() 572 __ AddStringId(LookupClassNameId(c)); in WriteClassTable() 587 __ AddU4(id); in WriteStringTable() 588 __ AddUtf8String(string.c_str()); in WriteStringTable() 664 __ AddU1List(reinterpret_cast<const uint8_t*>(magic), sizeof(magic)); in WriteFixedHeader() 671 __ AddU4(sizeof(uint32_t)); in WriteFixedHeader() 679 __ AddU4(static_cast<uint32_t>(nowMs >> 32)); in WriteFixedHeader() [all …]
|
/art/compiler/jni/quick/ |
D | jni_compiler.cc | 49 #define __ jni_asm-> macro 228 __ BuildFrame(current_frame_size, method_register, callee_save_regs); in ArtJniCompileMethodInternal() 239 __ Store(mr_conv->CurrentParamStackOffset(), mr_conv->CurrentParamRegister(), size); in ArtJniCompileMethodInternal() 249 __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(), in ArtJniCompileMethodInternal() 252 __ CopyRawPtrFromThread(main_jni_conv->HandleScopeLinkOffset(), in ArtJniCompileMethodInternal() 254 __ StoreStackOffsetToThread(Thread::TopHandleScopeOffset<kPointerSize>(), in ArtJniCompileMethodInternal() 266 __ CopyRef(handle_scope_offset, in ArtJniCompileMethodInternal() 294 __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull()); in ArtJniCompileMethodInternal() 295 __ StoreRef(handle_scope_offset, in_reg); in ArtJniCompileMethodInternal() 298 __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull()); in ArtJniCompileMethodInternal() [all …]
|
/art/compiler/trampolines/ |
D | trampoline_compiler.cc | 39 #define __ assembler. macro 77 __ FinalizeCode(); in CreateTrampoline() 78 size_t cs = __ CodeSize(); in CreateTrampoline() 81 __ FinalizeInstructions(code); in CreateTrampoline() 99 __ JumpTo(Arm64ManagedRegister::FromXRegister(X0), Offset(offset.Int32Value()), in CreateTrampoline() 104 __ LoadRawPtr(Arm64ManagedRegister::FromXRegister(IP1), in CreateTrampoline() 108 __ JumpTo(Arm64ManagedRegister::FromXRegister(IP1), Offset(offset.Int32Value()), in CreateTrampoline() 113 __ JumpTo(Arm64ManagedRegister::FromXRegister(TR), Offset(offset.Int32Value()), in CreateTrampoline() 119 __ FinalizeCode(); in CreateTrampoline() 120 size_t cs = __ CodeSize(); in CreateTrampoline() [all …]
|
/art/runtime/ |
D | runtime_intrinsics.cc | 95 #define IS_INTRINSIC_INITIALIZED(Name, InvokeType, _, __, ___, ClassName, MethodName, Signature) \ in AreAllIntrinsicsInitialized() argument 113 #define INITIALIZE_INTRINSIC(Name, InvokeType, _, __, ___, ClassName, MethodName, Signature) \ in InitializeIntrinsics() argument
|
D | method_handles.cc | 81 #define CASE_PRIMITIVE(primitive, _, java_name, __) \ in GetBoxedPrimitiveClass() argument
|
/art/build/ |
D | Android.cpplint.mk | 55 art_cpplint_touch := $$(OUT_CPPLINT)/$$(subst /,__,$$(art_cpplint_file))
|
/art/dex2oat/driver/ |
D | compiler_driver.cc | 1074 #define ADD_INTRINSIC_OWNER_CLASS(_, __, ___, ____, _____, ClassName, ______, _______) \ in AddClassesContainingIntrinsics() argument
|