/frameworks/av/media/libstagefright/codecs/m4v_h263/enc/src/ |
D | sad.cpp | 113 Int saddata[16], tmp, tmp2; /* used when collecting flag (global) is on */ in SAD_MB_HTFM_Collect() local 128 tmp2 = (cur_word >> 24) & 0xFF; in SAD_MB_HTFM_Collect() 129 sad = SUB_SAD(sad, tmp, tmp2); in SAD_MB_HTFM_Collect() 131 tmp2 = (cur_word >> 16) & 0xFF; in SAD_MB_HTFM_Collect() 132 sad = SUB_SAD(sad, tmp, tmp2); in SAD_MB_HTFM_Collect() 134 tmp2 = (cur_word >> 8) & 0xFF; in SAD_MB_HTFM_Collect() 135 sad = SUB_SAD(sad, tmp, tmp2); in SAD_MB_HTFM_Collect() 138 tmp2 = (cur_word & 0xFF); in SAD_MB_HTFM_Collect() 139 sad = SUB_SAD(sad, tmp, tmp2); in SAD_MB_HTFM_Collect() 143 tmp2 = (cur_word >> 24) & 0xFF; in SAD_MB_HTFM_Collect() [all …]
|
D | sad_halfpel.cpp | 343 Int difmad, tmp, tmp2; in SAD_MB_HP_HTFM_Collectxhyh() local 364 tmp2 = p1[13] + p2[13]; in SAD_MB_HP_HTFM_Collectxhyh() 365 tmp += tmp2; in SAD_MB_HP_HTFM_Collectxhyh() 366 tmp2 = (cur_word >> 24) & 0xFF; in SAD_MB_HP_HTFM_Collectxhyh() 368 sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; in SAD_MB_HP_HTFM_Collectxhyh() 370 tmp2 = p1[9] + p2[9]; in SAD_MB_HP_HTFM_Collectxhyh() 371 tmp += tmp2; in SAD_MB_HP_HTFM_Collectxhyh() 372 tmp2 = (cur_word >> 16) & 0xFF; in SAD_MB_HP_HTFM_Collectxhyh() 374 sad = INTERP2_SUB_SAD(sad, tmp, tmp2);; in SAD_MB_HP_HTFM_Collectxhyh() 376 tmp2 = p1[5] + p2[5]; in SAD_MB_HP_HTFM_Collectxhyh() [all …]
|
D | sad_halfpel_inline.h | 34 __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) in INTERP1_SUB_SAD() argument 36 tmp = (tmp2 >> 1) - tmp; in INTERP1_SUB_SAD() 43 __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) in INTERP2_SUB_SAD() argument 45 tmp = (tmp >> 2) - tmp2; in INTERP2_SUB_SAD() 54 __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) 58 rsbs tmp, tmp, tmp2, asr #1 ; 66 __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) 70 rsbs tmp, tmp2, tmp, asr #2 ; 81 __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) 87 register int32 uu = tmp2; [all …]
|
D | me_utils.cpp | 318 Int tmp, tmp2, mask = 0x00FF00FF; in ComputeMBSum_C() local 326 tmp2 = tmp & mask; in ComputeMBSum_C() 328 tmp += tmp2; in ComputeMBSum_C() 331 tmp2 = tmp & mask; in ComputeMBSum_C() 333 tmp += tmp2; in ComputeMBSum_C() 336 tmp2 = tmp & mask; in ComputeMBSum_C() 338 tmp += tmp2; in ComputeMBSum_C() 342 tmp2 = tmp & mask; in ComputeMBSum_C() 344 tmp += tmp2; in ComputeMBSum_C() 348 tmp2 = tmp & mask; in ComputeMBSum_C() [all …]
|
D | dct.cpp | 47 Int tmp, tmp2; in BlockDCT_AANwSub() local 60 tmp2 = *((Int*) pred); /* prediction 4 pixels */ in BlockDCT_AANwSub() 61 k0 = tmp2 & 0xFF; in BlockDCT_AANwSub() 64 k1 = (tmp2 >> 8) & 0xFF; in BlockDCT_AANwSub() 67 k2 = (tmp2 >> 16) & 0xFF; in BlockDCT_AANwSub() 70 k3 = (tmp2 >> 24) & 0xFF; in BlockDCT_AANwSub() 74 tmp2 = *((Int*)(pred + 4)); in BlockDCT_AANwSub() 75 k4 = tmp2 & 0xFF; in BlockDCT_AANwSub() 78 k5 = (tmp2 >> 8) & 0xFF; in BlockDCT_AANwSub() 81 k6 = (tmp2 >> 16) & 0xFF; in BlockDCT_AANwSub() [all …]
|
/frameworks/av/media/libstagefright/codecs/mp3dec/src/ |
D | pvmp3_mdct_18.cpp | 138 int32 tmp2; in pvmp3_mdct_18() local 172 tmp2 = vec[10]; // vec[10] in pvmp3_mdct_18() 178 vec[ 1] = vec[ 9] - tmp2; // vec[9] + vec[10] in pvmp3_mdct_18() 179 vec[ 3] = vec[11] - tmp2; in pvmp3_mdct_18() 190 tmp2 = vec[0]; in pvmp3_mdct_18() 201 history[i ] = -(tmp2 + tmp1); in pvmp3_mdct_18() 202 tmp2 = tmp1; in pvmp3_mdct_18() 211 history[6] = -(tmp2 + tmp1); in pvmp3_mdct_18() 222 tmp2 = history[16]; in pvmp3_mdct_18() 227 vec[16] = fxp_mac32_Q32(tmp2, vec[11] << 1, window[16]); in pvmp3_mdct_18() [all …]
|
D | pvmp3_dct_16.cpp | 157 int32 tmp2; in pvmp3_dct_16() local 200 tmp2 = vec[ 2] + vec[13]; in pvmp3_dct_16() 204 itmp_e2 = (tmp2 + tmp5); in pvmp3_dct_16() 205 tmp5 = fxp_mul32_Q32((tmp2 - tmp5), Qfmt_31(0.89997622313642F)); in pvmp3_dct_16() 275 tmp2 = fxp_mul32_Q32((tmp1 - tmp0) << 1, Qfmt_31(0.54119610014620F)); in pvmp3_dct_16() 293 tmp6 = fxp_mul32_Q32((tmp2 - tmp5) << 1, Qfmt_31(0.70710678118655F)); in pvmp3_dct_16() 294 tmp2 += tmp5 + tmp6; in pvmp3_dct_16() 295 tmp0 += tmp2; in pvmp3_dct_16() 299 tmp2 += tmp4; in pvmp3_dct_16() 300 vec[ 5] = tmp2 + vec[ 5]; in pvmp3_dct_16() [all …]
|
D | pvmp3_dct_9.cpp | 130 int32 tmp2 = vec[6] + vec[2]; in pvmp3_dct_9() local 135 vec[0] = (tmp0 + tmp2 + tmp3) + (tmp1 + vec[4]); in pvmp3_dct_9() 136 vec[6] = ((tmp0 + tmp2 + tmp3) >> 1) - (tmp1 + vec[4]); in pvmp3_dct_9() 143 vec[2] = fxp_mac32_Q32(vec[2], tmp2 << 1, cos_5pi_9); in pvmp3_dct_9() 144 vec[4] = fxp_mac32_Q32(vec[4], tmp2 << 1, cos_8pi_9); in pvmp3_dct_9() 145 vec[8] = fxp_mac32_Q32(vec[8], tmp2 << 1, cos_2pi_9); in pvmp3_dct_9()
|
D | pvmp3_get_scale_factors.cpp | 200 int32 tmp2 = tmp1 * tmp4; in pvmp3_get_scale_factors() local 201 uint32 tmp3 = getNbits(pMainData, tmp2); in pvmp3_get_scale_factors() 203 for (; tmp2 > 0; tmp2 -= tmp1) in pvmp3_get_scale_factors() 205 *(ptr++) = (tmp3 << (32 - tmp2)) >> tmp4; in pvmp3_get_scale_factors()
|
D | pvmp3_dct_6.cpp | 119 Int32 tmp2; in pvmp3_dct_6() local 131 tmp2 = vec[3] + vec[2]; in pvmp3_dct_6() 134 vec[0] = tmp0 + tmp2 ; in pvmp3_dct_6() 135 vec[2] = fxp_mul32_Q30(tmp0 - tmp2, cos_pi_6); in pvmp3_dct_6()
|
D | pvmp3_reorder.cpp | 156 int32 tmp2 = xr[src_line+(sfb_lines)]; in pvmp3_reorder() local 160 Scratch_mem[freq+1] = tmp2; in pvmp3_reorder() 177 int32 tmp2 = xr[src_line+(sfb_lines)]; in pvmp3_reorder() local 181 Scratch_mem[freq+1] = tmp2; in pvmp3_reorder()
|
/frameworks/av/media/libstagefright/codecs/amrwb/src/ |
D | pred_lt4.cpp | 201 int16 tmp2 = pt_exc[i+1]; in Pred_lt4() local 206 L_sum2 = fxp_mac_16by16(tmp2, pt_inter4_2[i ], L_sum2); in Pred_lt4() 207 L_sum1 = fxp_mac_16by16(tmp2, pt_inter4_2[i+1], L_sum1); in Pred_lt4() 213 tmp2 = pt_exc[i+4]; in Pred_lt4() 219 L_sum4 = fxp_mac_16by16(tmp2, pt_inter4_2[i+1], L_sum4); in Pred_lt4() 220 L_sum2 = fxp_mac_16by16(tmp2, pt_inter4_2[i+3], L_sum2); in Pred_lt4() 221 L_sum3 = fxp_mac_16by16(tmp2, pt_inter4_2[i+2], L_sum3); in Pred_lt4() 224 tmp2 = pt_exc[i+6]; in Pred_lt4() 228 L_sum4 = fxp_mac_16by16(tmp2, pt_inter4_2[i+3], L_sum4); in Pred_lt4() 250 int16 tmp2 = pt_exc[i+1]; in Pred_lt4() local [all …]
|
D | oversamp_12k8_to_16k.cpp | 284 int16 tmp1, tmp2, tmp3, tmp4; in AmrWbInterpol() local 289 tmp2 = *(pt_x++); in AmrWbInterpol() 293 L_sum = fxp_mac_16by16(tmp2, *(pt_fir++), L_sum); in AmrWbInterpol() 297 tmp2 = *(pt_x++); in AmrWbInterpol() 301 L_sum = fxp_mac_16by16(tmp2, *(pt_fir++), L_sum); in AmrWbInterpol() 305 tmp2 = *(pt_x++); in AmrWbInterpol() 309 L_sum = fxp_mac_16by16(tmp2, *(pt_fir++), L_sum); in AmrWbInterpol() 313 tmp2 = *(pt_x++); in AmrWbInterpol() 317 L_sum = fxp_mac_16by16(tmp2, *(pt_fir++), L_sum); in AmrWbInterpol() 321 tmp2 = *(pt_x++); in AmrWbInterpol() [all …]
|
D | isf_extrapolation.cpp | 122 int16 coeff, mean, tmp, tmp2, tmp3; in isf_extrapolation() local 161 tmp2 = sub_int16(IsfDiff[i], mean); in isf_extrapolation() 163 L_tmp = mul_16by16_to_int32(tmp2, tmp3); in isf_extrapolation() 171 tmp2 = sub_int16(IsfDiff[i], mean); in isf_extrapolation() 173 L_tmp = mul_16by16_to_int32(tmp2, tmp3); in isf_extrapolation() 181 tmp2 = sub_int16(IsfDiff[i], mean); in isf_extrapolation() 183 L_tmp = mul_16by16_to_int32(tmp2, tmp3); in isf_extrapolation() 224 tmp2 = sub_int16(HfIsf[M16k - 2], HfIsf[M - 2]); in isf_extrapolation() 226 exp2 = norm_s(tmp2); in isf_extrapolation() 230 tmp2 <<= exp2; in isf_extrapolation() [all …]
|
D | low_pass_filt_7k.cpp | 174 int16 tmp2 = x[(i<<2)+j+1]; in low_pass_filt_7k() local 178 L_tmp2 = fxp_mac_16by16(tmp2, fir_7k[j ], L_tmp2); in low_pass_filt_7k() 179 L_tmp1 = fxp_mac_16by16(tmp2, fir_7k[j+1], L_tmp1); in low_pass_filt_7k() 185 tmp2 = x[(i<<2)+j+4]; in low_pass_filt_7k() 191 L_tmp2 = fxp_mac_16by16(tmp2, fir_7k[j+3], L_tmp2); in low_pass_filt_7k() 192 L_tmp4 = fxp_mac_16by16(tmp2, fir_7k[j+1], L_tmp4); in low_pass_filt_7k() 193 L_tmp3 = fxp_mac_16by16(tmp2, fir_7k[j+2], L_tmp3); in low_pass_filt_7k() 196 tmp2 = x[(i<<2)+j+6]; in low_pass_filt_7k() 200 L_tmp4 = fxp_mac_16by16(tmp2, fir_7k[j+3], L_tmp4); in low_pass_filt_7k()
|
D | band_pass_6k_7k.cpp | 194 int16 tmp2 = x[(i<<2)+j+1]; in band_pass_6k_7k() local 198 L_tmp2 = fxp_mac_16by16(tmp2, fir_6k_7k[j ], L_tmp2); in band_pass_6k_7k() 199 L_tmp1 = fxp_mac_16by16(tmp2, fir_6k_7k[j+1], L_tmp1); in band_pass_6k_7k() 205 tmp2 = x[(i<<2)+j+4]; in band_pass_6k_7k() 211 L_tmp2 = fxp_mac_16by16(tmp2, fir_6k_7k[j+3], L_tmp2); in band_pass_6k_7k() 212 L_tmp4 = fxp_mac_16by16(tmp2, fir_6k_7k[j+1], L_tmp4); in band_pass_6k_7k() 213 L_tmp3 = fxp_mac_16by16(tmp2, fir_6k_7k[j+2], L_tmp3); in band_pass_6k_7k() 216 tmp2 = x[(i<<2)+j+6]; in band_pass_6k_7k() 220 L_tmp4 = fxp_mac_16by16(tmp2, fir_6k_7k[j+3], L_tmp4); in band_pass_6k_7k()
|
/frameworks/av/media/libstagefright/codecs/amrnb/common/src/ |
D | gmed_n.cpp | 192 Word16 tmp2[NMAX]; in gmed_n() local 196 *(tmp2 + i) = *(ind + i); in gmed_n() 204 if (*(tmp2 + j) >= max) in gmed_n() 206 max = *(tmp2 + j); in gmed_n() 210 *(tmp2 + ix) = -32768; in gmed_n()
|
/frameworks/ml/nn/runtime/test/specs/V1_2/ |
D | concat_zero_sized.mod.py | 25 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 26 …el().Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 32 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized) 64 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 65 …el().Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 71 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
|
D | l2_pool_v1_2.mod.py | 63 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 64 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 69 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized) 91 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 92 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 97 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
|
D | resize_bilinear_v1_2.mod.py | 99 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 100 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 105 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized) 137 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 138 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 143 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
|
D | max_pool_v1_2.mod.py | 119 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 120 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 125 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized) 157 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 158 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 163 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
|
/frameworks/av/media/libstagefright/codecs/amrnb/enc/src/ |
D | calc_en.cpp | 286 Word16 tmp2; /* temporal storage */ in calc_unfilt_energies() local 307 tmp2 = exc[i]; in calc_unfilt_energies() 310 …s2 = amrnb_fxp_mac_16_by_16bb((Word32) tmp2, (Word32) tmp2, s2); /* Compute ltp excitation energ… in calc_unfilt_energies() 311 … s3 = amrnb_fxp_mac_16_by_16bb((Word32) tmp2, (Word32) code[i], s3);/* Compute scalar product */ in calc_unfilt_energies() 314 L_temp = L_mult(tmp2, gain_pit, pOverflow); in calc_unfilt_energies() 316 tmp2 = sub(tmp1, pv_round(L_temp, pOverflow), pOverflow); in calc_unfilt_energies() 318 s4 = L_mac(s4, tmp2, tmp2, pOverflow); in calc_unfilt_energies()
|
/frameworks/ml/nn/runtime/test/specs/V1_3/ |
D | resize_quant8_signed.mod.py | 99 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 100 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 105 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized) 137 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 138 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 143 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized) 347 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 348 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 353 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized) 385 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable [all …]
|
D | transpose_quant8_signed.mod.py | 222 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 223 …ed").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 228 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized) 265 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 266 …ed").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 271 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 4, 4, 2.0, 2.0, 4, 4, layout).To(zero_sized) 387 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 388 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 394 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
|
D | concat_quant8_signed.mod.py | 145 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 146 …el().Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 152 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized) 185 tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out variable 186 …el().Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2) 192 model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
|