Home
last modified time | relevance | path

Searched refs:imm (Results 1 – 25 of 25) sorted by relevance

/art/test/442-checker-constant-folding/src/
DMain.java1396 long imm = 33L; in ReturnInt33() local
1397 return (int) imm; in ReturnInt33()
1413 float imm = 1.0e34f; in ReturnIntMax() local
1414 return (int) imm; in ReturnIntMax()
1430 double imm = Double.NaN; in ReturnInt0() local
1431 return (int) imm; in ReturnInt0()
1447 int imm = 33; in ReturnLong33() local
1448 return (long) imm; in ReturnLong33()
1464 float imm = 34.0f; in ReturnLong34() local
1465 return (long) imm; in ReturnLong34()
[all …]
/art/compiler/optimizing/
Dscheduler_arm64.cc94 int64_t imm = Int64FromConstant(instr->GetRight()->AsConstant()); in VisitDiv() local
95 if (imm == 0) { in VisitDiv()
98 } else if (imm == 1 || imm == -1) { in VisitDiv()
101 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in VisitDiv()
105 DCHECK(imm <= -2 || imm >= 2); in VisitDiv()
162 int64_t imm = Int64FromConstant(instruction->GetRight()->AsConstant()); in VisitRem() local
163 if (imm == 0) { in VisitRem()
166 } else if (imm == 1 || imm == -1) { in VisitRem()
169 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in VisitRem()
173 DCHECK(imm <= -2 || imm >= 2); in VisitRem()
Dscheduler_arm.cc815 void SchedulingLatencyVisitorARM::HandleDivRemConstantIntegralLatencies(int32_t imm) { in HandleDivRemConstantIntegralLatencies() argument
816 if (imm == 0) { in HandleDivRemConstantIntegralLatencies()
819 } else if (imm == 1 || imm == -1) { in HandleDivRemConstantIntegralLatencies()
821 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in HandleDivRemConstantIntegralLatencies()
836 int32_t imm = Int32ConstantFrom(rhs->AsConstant()); in VisitDiv() local
837 HandleDivRemConstantIntegralLatencies(imm); in VisitDiv()
899 int32_t imm = Int32ConstantFrom(rhs->AsConstant()); in VisitRem() local
900 HandleDivRemConstantIntegralLatencies(imm); in VisitRem()
Dcode_generator_x86_64.cc3481 Immediate imm(second.GetConstant()->AsIntConstant()->GetValue()); in VisitSub() local
3482 __ subl(first.AsRegister<CpuRegister>(), imm); in VisitSub()
3584 Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue()); in VisitMul() local
3585 __ imull(out.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>(), imm); in VisitMul()
3742 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemOneOrMinusOne() local
3744 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne()
3752 if (imm == -1) { in DivRemOneOrMinusOne()
3764 if (imm == -1) { in DivRemOneOrMinusOne()
3780 int64_t imm = Int64FromConstant(second.GetConstant()); in RemByPowerOfTwo() local
3781 DCHECK(IsPowerOfTwo(AbsOrMin(imm))); in RemByPowerOfTwo()
[all …]
Dscheduler_arm.h126 void HandleDivRemConstantIntegralLatencies(int32_t imm);
Dcode_generator_arm64.cc3056 int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1)); in FOR_EACH_CONDITION_INSTRUCTION() local
3057 uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm)); in FOR_EACH_CONDITION_INSTRUCTION()
3103 if (imm > 0) { in FOR_EACH_CONDITION_INSTRUCTION()
3158 int64_t imm = Int64FromConstant(second.GetConstant()); in GenerateInt64DivRemWithAnyConstant() local
3162 CalculateMagicAndShiftForDivRem(imm, /* is_long= */ true, &magic, &shift); in GenerateInt64DivRemWithAnyConstant()
3181 if (NeedToAddDividend(magic, imm)) { in GenerateInt64DivRemWithAnyConstant()
3184 } else if (NeedToSubDividend(magic, imm)) { in GenerateInt64DivRemWithAnyConstant()
3195 GenerateResultRemWithAnyConstant(out, dividend, temp, imm, &temps); in GenerateInt64DivRemWithAnyConstant()
3212 int64_t imm = Int64FromConstant(second.GetConstant()); in GenerateInt32DivRemWithAnyConstant() local
3216 CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift); in GenerateInt32DivRemWithAnyConstant()
[all …]
Dcode_generator_x86.cc3415 Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue()); in VisitMul() local
3416 __ imull(out.AsRegister<Register>(), first.AsRegister<Register>(), imm); in VisitMul()
3657 int32_t imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); in DivRemOneOrMinusOne() local
3659 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne()
3665 if (imm == -1) { in DivRemOneOrMinusOne()
3678 int32_t imm = Int64FromConstant(second.GetConstant()); in RemByPowerOfTwo() local
3679 DCHECK(IsPowerOfTwo(AbsOrMin(imm))); in RemByPowerOfTwo()
3680 uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm)); in RemByPowerOfTwo()
3698 int32_t imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); in DivByPowerOfTwo() local
3699 DCHECK(IsPowerOfTwo(AbsOrMin(imm))); in DivByPowerOfTwo()
[all …]
Dcode_generator_arm_vixl.cc4209 int32_t imm = Int32ConstantFrom(second); in DivRemOneOrMinusOne() local
4210 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne()
4215 if (imm == 1) { in DivRemOneOrMinusOne()
4233 int32_t imm = Int32ConstantFrom(second); in DivRemByPowerOfTwo() local
4234 uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm)); in DivRemByPowerOfTwo()
4237 auto generate_div_code = [this, imm, ctz_imm](vixl32::Register out, vixl32::Register in) { in DivRemByPowerOfTwo()
4239 if (imm < 0) { in DivRemByPowerOfTwo()
4315 int32_t imm = Int32ConstantFrom(second); in GenerateDivRemWithAnyConstant() local
4319 CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift); in GenerateDivRemWithAnyConstant()
4353 if (imm > 0 && IsGEZero(instruction->GetLeft())) { in GenerateDivRemWithAnyConstant()
[all …]
/art/compiler/utils/x86/
Dassembler_x86.h330 void pushl(const Immediate& imm);
340 void movl(const Address& dst, const Immediate& imm);
359 void rorl(Register reg, const Immediate& imm);
361 void roll(Register reg, const Immediate& imm);
370 void movb(const Address& dst, const Immediate& imm);
378 void movw(const Address& dst, const Immediate& imm);
537 void roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm);
538 void roundss(XmmRegister dst, XmmRegister src, const Immediate& imm);
619 void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm);
620 void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm);
[all …]
Dassembler_x86.cc115 void X86Assembler::pushl(const Immediate& imm) { in pushl() argument
117 if (imm.is_int8()) { in pushl()
119 EmitUint8(imm.value() & 0xFF); in pushl()
122 EmitImmediate(imm); in pushl()
140 void X86Assembler::movl(Register dst, const Immediate& imm) { in movl() argument
143 EmitImmediate(imm); in movl()
168 void X86Assembler::movl(const Address& dst, const Immediate& imm) { in movl() argument
172 EmitImmediate(imm); in movl()
325 void X86Assembler::movb(const Address& dst, const Immediate& imm) { in movb() argument
329 CHECK(imm.is_int8()); in movb()
[all …]
Djni_macro_assembler_x86.h62 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) override;
Djni_macro_assembler_x86.cc173 void X86JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm) { in StoreImmediateToFrame() argument
174 __ movl(Address(ESP, dest), Immediate(imm)); in StoreImmediateToFrame()
/art/compiler/utils/x86_64/
Dassembler_x86_64.h374 void pushq(const Immediate& imm);
390 void movq(const Address& dst, const Immediate& imm);
392 void movl(const Address& dst, const Immediate& imm);
404 void movb(const Address& dst, const Immediate& imm);
412 void movw(const Address& dst, const Immediate& imm);
576 void roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm);
577 void roundss(XmmRegister dst, XmmRegister src, const Immediate& imm);
657 void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm);
658 void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm);
659 void pshufd(XmmRegister dst, XmmRegister src, const Immediate& imm);
[all …]
Dassembler_x86_64.cc114 void X86_64Assembler::pushq(const Immediate& imm) { in pushq() argument
116 CHECK(imm.is_int32()); // pushq only supports 32b immediate. in pushq()
117 if (imm.is_int8()) { in pushq()
119 EmitUint8(imm.value() & 0xFF); in pushq()
122 EmitImmediate(imm); in pushq()
142 void X86_64Assembler::movq(CpuRegister dst, const Immediate& imm) { in movq() argument
144 if (imm.is_int32()) { in movq()
149 EmitInt32(static_cast<int32_t>(imm.value())); in movq()
153 EmitInt64(imm.value()); in movq()
158 void X86_64Assembler::movl(CpuRegister dst, const Immediate& imm) { in movl() argument
[all …]
Djni_macro_assembler_x86_64.h63 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) override;
Djni_macro_assembler_x86_64.cc199 void X86_64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm) { in StoreImmediateToFrame() argument
200 __ movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq? in StoreImmediateToFrame()
Dassembler_x86_64_test.cc113 x86_64::Immediate imm(value); in TEST() local
114 EXPECT_FALSE(imm.is_int8()); in TEST()
115 EXPECT_FALSE(imm.is_int16()); in TEST()
116 EXPECT_FALSE(imm.is_int32()); in TEST()
/art/compiler/utils/arm/
Dassembler_arm_vixl.h151 void Vmov(vixl32::DRegister rd, double imm) { in Vmov() argument
152 if (vixl::VFP::IsImmFP64(imm)) { in Vmov()
153 MacroAssembler::Vmov(rd, imm); in Vmov()
155 MacroAssembler::Vldr(rd, imm); in Vmov()
Djni_macro_assembler_arm_vixl.h69 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) override;
Djni_macro_assembler_arm_vixl.cc313 void ArmVIXLJNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm) { in StoreImmediateToFrame() argument
316 asm_.LoadImmediate(scratch, imm); in StoreImmediateToFrame()
/art/compiler/utils/
Dassembler_test.h196 for (int64_t imm : imms) { variable
197 ImmType new_imm = CreateImmediate(imm);
218 sreg << imm * multiplier + bias;
252 for (int64_t imm : imms) { in RepeatTemplatedRegistersImmBits() local
253 ImmType new_imm = CreateImmediate(imm); in RepeatTemplatedRegistersImmBits()
280 sreg << imm + bias; in RepeatTemplatedRegistersImmBits()
313 for (int64_t imm : imms) { in RepeatTemplatedImmBitsRegisters() local
314 ImmType new_imm = CreateImmediate(imm); in RepeatTemplatedImmBitsRegisters()
335 sreg << imm; in RepeatTemplatedImmBitsRegisters()
363 for (int64_t imm : imms) { in RepeatTemplatedRegisterImmBits() local
[all …]
Djni_macro_assembler.h119 virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) = 0;
/art/dex2oat/linker/arm/
Drelative_patcher_thumb2.cc80 uint32_t imm = (diff16 >> 11) & 0x1u; in PatchPcRelativeReference() local
83 insn = (insn & 0xfbf08f00u) | (imm << 26) | (imm4 << 16) | (imm3 << 12) | imm8; in PatchPcRelativeReference()
/art/compiler/utils/arm64/
Djni_macro_assembler_arm64.h71 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) override;
Djni_macro_assembler_arm64.cc165 void Arm64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm) { in StoreImmediateToFrame() argument
168 ___ Mov(scratch, imm); in StoreImmediateToFrame()