Lines Matching refs:activation
55 switch (activation) { \
75 int32_t activation, float* out, const Shape& shapeOut)>;
78 const Shape& shape2, int32_t activation, _Float16* out, in binaryOperationFloat16() argument
86 operationFloat32(in1_float32.data(), shape1, in2_float32.data(), shape2, activation, in binaryOperationFloat16()
94 int32_t activation, float* out, const Shape& shapeOut) { in addFloat32() argument
99 #define ANDROID_NN_BROADCAST_ADD(activation) \ in addFloat32() argument
100 tflite::optimized_ops::BroadcastAdd<tflite::FusedActivationFunctionType::activation>( \ in addFloat32()
108 #define ANDROID_NN_ADD(activation) \ in addFloat32() argument
109 tflite::optimized_ops::Add<tflite::FusedActivationFunctionType::activation>( \ in addFloat32()
121 int32_t activation, _Float16* out, const Shape& shapeOut) { in addFloat16() argument
123 return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &addFloat32); in addFloat16()
128 int32_t activation, T* out, const Shape& shapeOut) { in addQuant8() argument
159 CalculateActivationRangeInt8(activation, shapeOut, &output_activation_min, in addQuant8()
162 CalculateActivationRangeUint8(activation, shapeOut, &output_activation_min, in addQuant8()
209 const Shape& bShape, int32_t activation, int32_t* outputData, in executeInt32() argument
211 NN_RET_CHECK_EQ(activation, ANEURALNETWORKS_FUSED_NONE); in executeInt32()
233 int32_t activation, float* out, const Shape& shapeOut) { in mulFloat32() argument
239 #define ANDROID_NN_BROADCAST_MUL(activation) \ in mulFloat32() argument
240 tflite::optimized_ops::BroadcastMul<tflite::FusedActivationFunctionType::activation>( \ in mulFloat32()
248 CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max); in mulFloat32()
260 int32_t activation, _Float16* out, const Shape& shapeOut) { in mulFloat16() argument
262 return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &mulFloat32); in mulFloat16()
267 int32_t activation, T* out, const Shape& shapeOut) { in mulQuant8() argument
283 CalculateActivationRangeInt8(activation, shapeOut, &output_activation_min, in mulQuant8()
286 CalculateActivationRangeUint8(activation, shapeOut, &output_activation_min, in mulQuant8()
314 int32_t activation, float* out, const Shape& shapeOut) { in subFloat32() argument
322 CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max); in subFloat32()
331 int32_t activation, _Float16* out, const Shape& shapeOut) { in subFloat16() argument
333 return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &subFloat32); in subFloat16()
338 int32_t activation, T* out, const Shape& shapeOut) { in subQuant8() argument
371 CalculateActivationRangeInt8(activation, shapeOut, &output_activation_min, in subQuant8()
374 CalculateActivationRangeUint8(activation, shapeOut, &output_activation_min, in subQuant8()
410 int32_t activation, float* out, const Shape& shapeOut) { in divFloat32() argument
413 CalculateActivationRangeFloat(activation, &output_activation_min, &output_activation_max); in divFloat32()
431 int32_t activation, _Float16* out, const Shape& shapeOut) { in divFloat16() argument
433 return binaryOperationFloat16(in1, shape1, in2, shape2, activation, out, shapeOut, &divFloat32); in divFloat16()