/frameworks/rs/driver/runtime/ll64/ |
D | allocation.ll | 6 declare i8* @rsOffset(%struct.rs_allocation* nocapture readonly %a, i32 %sizeOf, i32 %x, i32 %y, i3… 7 declare i8* @rsOffsetNs(%struct.rs_allocation* nocapture readonly %a, i32 %x, i32 %y, i32 %z) 38 define void @rsSetElementAtImpl_char(%struct.rs_allocation* nocapture readonly %a, i8 signext %val,… 39 %1 = tail call i8* @rsOffset(%struct.rs_allocation* %a, i32 1, i32 %x, i32 %y, i32 %z) #2 40 store i8 %val, i8* %1, align 1, !tbaa !21 44 define signext i8 @rsGetElementAtImpl_char(%struct.rs_allocation* nocapture readonly %a, i32 %x, i3… 45 %1 = tail call i8* @rsOffset(%struct.rs_allocation* %a, i32 1, i32 %x, i32 %y, i32 %z) #2 46 %2 = load i8, i8* %1, align 1, !tbaa !21 47 ret i8 %2 52 %1 = tail call i8* @rsOffset(%struct.rs_allocation* %a, i32 2, i32 %x, i32 %y, i32 %z) #2 [all …]
|
/frameworks/rs/driver/runtime/ll32/ |
D | allocation.ll | 1 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v… 4 declare i8* @rsOffset([1 x i32] %a.coerce, i32 %sizeOf, i32 %x, i32 %y, i32 %z) 5 declare i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) 36 define void @rsSetElementAtImpl_char([1 x i32] %a.coerce, i8 signext %val, i32 %x, i32 %y, i32 %z) … 37 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 1, i32 %x, i32 %y, i32 %z) #2 38 store i8 %val, i8* %1, align 1, !tbaa !21 42 define signext i8 @rsGetElementAtImpl_char([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 { 43 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 1, i32 %x, i32 %y, i32 %z) #2 44 %2 = load i8, i8* %1, align 1, !tbaa !21 45 ret i8 %2 [all …]
|
/frameworks/compile/slang/lit-tests/padding/ |
D | more_structs.rscript | 7 // CHECK-LL: %struct.char_struct{{(\.[0-9]+)?}} = type { i16, [6 x i8], i64 } 8 …K-LL: %struct.five_struct{{(\.[0-9]+)?}} = type { i8, [7 x i8], i64, i16, [6 x i8], i64, half, [6 … 13 // CHECK-LL: define void @.helper_check_char_struct({ i16, [6 x i8], i64 }* nocapture) 14 // CHECK-LL: [[C_F1_ADDR:%[0-9]+]] = getelementptr inbounds { i16, [6 x i8], i64 }, { i16, [6 x i8]… 16 // CHECK-LL: [[C_F2_ADDR:%[0-9]+]] = getelementptr inbounds { i16, [6 x i8], i64 }, { i16, [6 x i8]… 20 …HECK-LL: define void @.helper_check_five_struct({ i8, [7 x i8], i64, i16, [6 x i8], i64, half, [6 … 21 …lementptr inbounds { i8, [7 x i8], i64, i16, [6 x i8], i64, half, [6 x i8] }, { i8, [7 x i8], i64,… 22 // CHECK-LL: [[F_F1_VAL:%[0-9]+]] = load i8, i8* [[F_F1_ADDR]] 23 …lementptr inbounds { i8, [7 x i8], i64, i16, [6 x i8], i64, half, [6 x i8] }, { i8, [7 x i8], i64,… 25 …lementptr inbounds { i8, [7 x i8], i64, i16, [6 x i8], i64, half, [6 x i8] }, { i8, [7 x i8], i64,… [all …]
|
D | small_struct.rscript | 5 // CHECK-LL: %struct.small_struct{{(\.[0-9]+)?}} = type { i32, [4 x i8], i64 } 8 // CHECK-LL: define void @.helper_checkStruct({ i32, [4 x i8], i64 }* nocapture) 9 … [[FIELD_I_ADDR:%[0-9]+]] = getelementptr inbounds { i32, [4 x i8], i64 }, { i32, [4 x i8], i64 }*… 11 … [[FIELD_L_ADDR:%[0-9]+]] = getelementptr inbounds { i32, [4 x i8], i64 }, { i32, [4 x i8], i64 }*…
|
D | small_struct_2.rscript | 5 // CHECK-LL: %struct.small_struct_2{{(\.[0-9]+)?}} = type { i64, i32, [4 x i8] } 8 // CHECK-LL: define void @.helper_checkStruct({ i64, i32, [4 x i8] }* nocapture) 9 …ELD_L_ADDR:%[0-9]+]] = getelementptr inbounds { i64, i32, [4 x i8] }, { i64, i32, [4 x i8] }* %0, … 11 …ELD_I_ADDR:%[0-9]+]] = getelementptr inbounds { i64, i32, [4 x i8] }, { i64, i32, [4 x i8] }* %0, …
|
/frameworks/compile/libbcc/tests/libbcc/ |
D | test_reduce_general_metadata.ll | 46 …i8*] [i8* bitcast (void (<2 x i32>*)* @fz2Init to i8*), i8* bitcast (void ([256 x i32]*, [256 x i3… 90 %1 = bitcast %struct.MinAndMax.0* %accum to i8* 91 …tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.MinAndMax.0* @fMMInit.r to … 96 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #0 241 define internal void @hsgAccum([256 x i32]* nocapture %h, i8 %in) #0 { 242 %1 = zext i8 %in to i64
|
D | test_reduce_general_cleanup.ll | 29 @.rs.reduce_fn.aiAccum = global i8* bitcast (void (i32*, i32)* @aiAccum to i8*), align 4 30 @.rs.reduce_fn.dpAccum = global i8* bitcast (void (float*, float, float)* @dpAccum to i8*), align 4 31 @.rs.reduce_fn.dpSum = global i8* bitcast (void (float*, float*)* @dpSum to i8*), align 4 32 @.rs.reduce_fn.fMMInit = global i8* bitcast (void (%struct.MinAndMax*)* @fMMInit to i8*), align 4 33 @.rs.reduce_fn.fMMAccumulator = global i8* bitcast (void (%struct.MinAndMax*, float, i32)* @fMMAccu… 34 @.rs.reduce_fn.fMMCombiner = global i8* bitcast (void (%struct.MinAndMax*, %struct.MinAndMax*)* @fM… 35 ….rs.reduce_fn.fMMOutConverter = global i8* bitcast (void (<2 x i32>*, %struct.MinAndMax*)* @fMMOut… 36 @.rs.reduce_fn.fzInit = global i8* bitcast (void (i32*)* @fzInit to i8*), align 4 37 @.rs.reduce_fn.fzAccum = global i8* bitcast (void (i32*, i32, i32)* @fzAccum to i8*), align 4 38 @.rs.reduce_fn.fzCombine = global i8* bitcast (void (i32*, i32*)* @fzCombine to i8*), align 4 [all …]
|
D | test_slang_version_info.ll | 54 define <4 x i8> @swizzle(<4 x i8> %in) #0 !dbg !4 { 55 %1 = alloca <4 x i8>, align 4 56 %result = alloca <4 x i8>, align 4 57 store <4 x i8> %in, <4 x i8>* %1, align 4 58 call void @llvm.dbg.declare(metadata <4 x i8>* %1, metadata !27, metadata !28), !dbg !29 59 call void @llvm.dbg.declare(metadata <4 x i8>* %result, metadata !30, metadata !28), !dbg !31 60 %2 = load <4 x i8>, <4 x i8>* %1, align 4, !dbg !31 61 %3 = shufflevector <4 x i8> %2, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>, !dbg !31 62 store <4 x i8> %3, <4 x i8>* %result, align 4, !dbg !31 63 %4 = load <4 x i8>, <4 x i8>* %result, align 4, !dbg !32 [all …]
|
D | getelementptr.ll | 31 ; CHECK: load i8*, i8** %input_buf.gep 33 ; CHECK: load i8*, i8** %out_buf.gep 47 ; CHECK: load i8*, i8** %out_buf.gep 53 ; CHECK: load i8*, i8** %input_buf.gep 55 ; CHECK: load i8*, i8** %input_buf.gep1
|
D | tbaa-through-alloca.ll | 39 %5 = bitcast %struct.int5.0* %agg.result to i8* 40 %6 = bitcast %struct.int5.0* %in to i8* 41 …tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* %6, i64 20, i32 4, i1 false), !tbaa.struct !… 46 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #0
|
D | tbaa.ll | 21 ; CHECK: NoAlias: %0 = load {{.*}}, i8** %out_buf.gep, !tbaa {{.*}} <-> store i32 %call.resul… 22 ; CHECK: NoAlias: %input_buf = load i8*, i8** %input_buf.gep, !tbaa {{.*}} <-> store i32 %cal…
|
/frameworks/rs/driver/runtime/arch/ |
D | neon.ll | 1 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v… 26 declare <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 30 declare <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 34 declare <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 370 define signext i8 @_Z3maxcc(i8 signext %v1, i8 signext %v2) nounwind readnone { 371 %1 = icmp sgt i8 %v1, %v2 372 %2 = select i1 %1, i8 %v1, i8 %v2 373 ret i8 %2 376 define <2 x i8> @_Z3maxDv2_cS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone { 377 %1 = sext <2 x i8> %v1 to <2 x i32> [all …]
|
D | asimd.ll | 26 declare <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 30 declare <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16>, <8 x i16>) nounwind readnone 366 define signext i8 @_Z3maxcc(i8 signext %v1, i8 signext %v2) nounwind readnone { 367 %1 = icmp sgt i8 %v1, %v2 368 %2 = select i1 %1, i8 %v1, i8 %v2 369 ret i8 %2 372 define <2 x i8> @_Z3maxDv2_cS_(<2 x i8> %v1, <2 x i8> %v2) nounwind readnone { 373 %1 = sext <2 x i8> %v1 to <2 x i32> 374 %2 = sext <2 x i8> %v2 to <2 x i32> 376 %4 = trunc <2 x i32> %3 to <2 x i8> [all …]
|
/frameworks/rs/cpu_ref/ |
D | rsCpuIntrinsics_neon_YuvToRGB.S | 34 vmov.i8 d15, #149 39 vmov.i8 d14, #50 40 vmov.i8 d15, #104 52 vmov.i8 d14, #204 53 vmov.i8 d15, #254 96 vmov.i8 q3, #0xff 137 vmov.i8 q8, #0 138 vmov.i8 q10, #0
|
D | rsCpuIntrinsics_neon_Blend.S | 61 vmov.i8 q0, #0 62 vmov.i8 q1, #0 63 vmov.i8 q2, #0 64 vmov.i8 q3, #0 490 vmov.i8 q0, #0 491 vmov.i8 q1, #0 492 vmov.i8 q2, #0 493 vmov.i8 q3, #0 495 vmov.i8 q8, #0 496 vmov.i8 q9, #0 [all …]
|
/frameworks/rs/script_api/ |
D | rs_convert.spec | 28 t: u8, u16, u32, i8, i16, i32, f32 29 t: u8, u16, u32, i8, i16, i32, f32 61 t: u8, u16, u32, i8, i16, i32, f32 70 t: u8, u16, u32, i8, i16, i32, f32 81 t: u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, f64 90 t: u8, u16, u32, u64, i8, i16, i32, i64, f32, f64
|
D | rs_allocation_create.spec | 174 t: u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, f64 189 t: u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, f64 203 t: u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, f64 217 t: u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, f64 230 t: u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, f64 243 t: u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, f64
|
/frameworks/ml/nn/tools/test_generator/tests/P_vts_internal/ |
D | add_internal.mod.py | 25 i8 = Input("i8", ("TENSOR_FLOAT32", [2])) # input 0 variable 51 model.Operation("ADD", i7, i8, act).To(o1) 62 i8: [0, 0]
|
/frameworks/ml/nn/tools/test_generator/tests/P_internal/ |
D | add_internal.mod.py | 25 i8 = Input("i8", ("TENSOR_FLOAT32", [2])) # input 0 variable 51 model.Operation("ADD", i7, i8, act).To(o1) 62 i8: [0, 0]
|
/frameworks/ml/nn/runtime/test/specs/V1_2/ |
D | transpose_conv2d.mod.py | 264 i8 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0 variable 269 Model().Operation("TRANSPOSE_CONV_2D", i8, w8, b8, s8, 1, 2, 2, 0, layout).To(o8) 273 i8: ("TENSOR_QUANT8_ASYMM", 0.5, 100), 280 i8: [1, 2, 3, 4], 282 }).AddNchw(i8, o8, s8, layout).AddVariations("relaxed", quant8, "float16")
|
/frameworks/ml/nn/runtime/test/specs/V1_0/ |
D | depthwise_conv.mod.py | 6 i8 = Int32Scalar("b8", 0) variable 11 model = model.Operation("DEPTHWISE_CONV_2D", i2, i0, i1, i4, i5, i6, i7, i8).To(i3)
|
/frameworks/ml/nn/runtime/test/specs/V1_1/ |
D | depthwise_conv_relaxed.mod.py | 22 i8 = Int32Scalar("b8", 0) variable 27 model = model.Operation("DEPTHWISE_CONV_2D", i2, i0, i1, i4, i5, i6, i7, i8).To(i3)
|
/frameworks/ml/nn/runtime/test/specs/V1_3/ |
D | transpose_conv2d_quant8_signed.mod.py | 299 i8 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0 variable 304 Model().Operation("TRANSPOSE_CONV_2D", i8, w8, b8, s8, 1, 2, 2, 0, layout).To(o8) 308 i8: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28), 315 i8: [1, 2, 3, 4], 317 }).AddNchw(i8, o8, s8, layout).AddVariations(quant8_signed, includeDefault=False)
|
D | transpose_quant8_signed.mod.py | 300 i8 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0 variable 305 Model().Operation("TRANSPOSE_CONV_2D", i8, w8, b8, s8, 1, 2, 2, 0, layout).To(o8) 309 i8: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28), 316 i8: [1, 2, 3, 4], 318 }).AddNchw(i8, o8, s8, layout).AddVariations(quant8_signed, includeDefault=False)
|
/frameworks/av/media/mtp/ |
D | MtpProperty.cpp | 76 mDefaultValue.u.i8 = defaultValue; in MtpProperty() 253 mMinimumValue.u.i8 = min; in setFormRange() 254 mMaximumValue.u.i8 = max; in setFormRange() 255 mStepSize.u.i8 = step; in setFormRange() 308 mEnumValues[i].u.i8 = value; in setFormEnum() 394 buffer += std::to_string(value.u.i8); in print() 444 if (!packet.getInt8(value.u.i8)) return false; in readValue() 499 packet.putInt8(value.u.i8); in writeValue()
|