/frameworks/ml/nn/common/ |
D | Utils.cpp | 169 bool isExtensionOperandType(OperandType type) { in isExtensionOperandType() 212 OperandType getInputType(uint32_t index) const override; 217 OperandType getOutputType(uint32_t index) const override; 259 OperandType OperationValidationContext::getInputType(uint32_t index) const { in getInputType() 273 OperandType OperationValidationContext::getOutputType(uint32_t index) const { in getOutputType() 287 std::string getOperandTypeName(OperandType type) { in getOperandTypeName() 362 uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) { in nonExtensionOperandSizeOfData() 392 bool nonExtensionOperandSizeOfDataOverflowsUInt32(hal::OperandType type, in nonExtensionOperandSizeOfDataOverflowsUInt32() 415 bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions) { in tensorHasUnspecifiedDimensions() 496 if (halOperand.type != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { in validateOperandSymmPerChannelQuantParams() [all …]
|
D | ValidateHal.cpp | 102 case OperandType::FLOAT32: in validateOperandExtraParams() 103 case OperandType::INT32: in validateOperandExtraParams() 104 case OperandType::UINT32: in validateOperandExtraParams() 105 case OperandType::BOOL: in validateOperandExtraParams() 106 case OperandType::SUBGRAPH: in validateOperandExtraParams() 107 case OperandType::TENSOR_FLOAT32: in validateOperandExtraParams() 108 case OperandType::TENSOR_FLOAT16: in validateOperandExtraParams() 109 case OperandType::TENSOR_INT32: in validateOperandExtraParams() 110 case OperandType::TENSOR_QUANT8_ASYMM: in validateOperandExtraParams() 111 case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: in validateOperandExtraParams() [all …]
|
/frameworks/ml/nn/common/operations/ |
D | Comparisons.cpp | 59 if (aShape.type == OperandType::TENSOR_QUANT8_ASYMM || in compute() 60 aShape.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in compute() 132 OperandType inputType = context->getInputType(kInputTensor1); in validate() 134 inputType == OperandType::TENSOR_BOOL8 || inputType == OperandType::TENSOR_FLOAT16 || in validate() 135 inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_INT32 || in validate() 136 inputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 137 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) in validate() 140 NN_RET_CHECK(validateOutputTypes(context, {OperandType::TENSOR_BOOL8})); in validate() 141 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 158 case OperandType::TENSOR_FLOAT16: in executeLess() [all …]
|
D | UnidirectionalSequenceLSTM.cpp | 122 const OperandType inputType = context->getInputType(kInputTensor); in validate() 123 std::vector<OperandType> inExpectedTypes; in validate() 124 std::vector<OperandType> outExpectedTypes; in validate() 125 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 126 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 127 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 128 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 129 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 130 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 131 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() [all …]
|
D | Fill.cpp | 49 bool getValueType(OperandType outputType, OperandType* valueType) { in getValueType() 51 case OperandType::TENSOR_FLOAT16: in getValueType() 52 *valueType = OperandType::FLOAT16; in getValueType() 54 case OperandType::TENSOR_FLOAT32: in getValueType() 55 *valueType = OperandType::FLOAT32; in getValueType() 57 case OperandType::TENSOR_INT32: in getValueType() 58 *valueType = OperandType::INT32; in getValueType() 72 OperandType outputType = context->getOutputType(kOutputTensor); in validate() 73 NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 || in validate() 74 outputType == OperandType::TENSOR_FLOAT32 || in validate() [all …]
|
D | Dequantize.cpp | 83 const OperandType inputType = context->getInputType(kInputTensor); in validate() 84 const OperandType outputType = context->getOutputType(kOutputTensor); in validate() 91 if (inputType == OperandType::TENSOR_QUANT8_ASYMM && in validate() 92 outputType == OperandType::TENSOR_FLOAT32) { in validate() 96 NN_RET_CHECK(inputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 97 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || in validate() 98 inputType == OperandType::TENSOR_QUANT8_SYMM || in validate() 99 inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) in validate() 101 NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 || in validate() 102 outputType == OperandType::TENSOR_FLOAT32) in validate() [all …]
|
D | Reduce.cpp | 75 OperandType inputType = context->getInputType(kInputTensor); in validateProdSum() 76 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateProdSum() 77 inputType == OperandType::TENSOR_FLOAT32) in validateProdSum() 80 validateInputTypes(context, {inputType, OperandType::TENSOR_INT32, OperandType::BOOL})); in validateProdSum() 92 OperandType inputType = context->getInputType(kInputTensor); in validateMaxMin() 93 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateMaxMin() 94 inputType == OperandType::TENSOR_FLOAT32 || in validateMaxMin() 95 inputType == OperandType::TENSOR_QUANT8_ASYMM || in validateMaxMin() 96 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) in validateMaxMin() 99 validateInputTypes(context, {inputType, OperandType::TENSOR_INT32, OperandType::BOOL})); in validateMaxMin() [all …]
|
D | Rank.cpp | 37 hal::OperandType inputType = context->getInputType(kInputTensor); in validate() 38 NN_RET_CHECK(inputType == hal::OperandType::TENSOR_FLOAT16 || in validate() 39 inputType == hal::OperandType::TENSOR_FLOAT32 || in validate() 40 inputType == hal::OperandType::TENSOR_INT32 || in validate() 41 inputType == hal::OperandType::TENSOR_QUANT8_ASYMM || in validate() 42 inputType == hal::OperandType::TENSOR_QUANT16_SYMM || in validate() 43 inputType == hal::OperandType::TENSOR_BOOL8 || in validate() 44 inputType == hal::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || in validate() 45 inputType == hal::OperandType::TENSOR_QUANT16_ASYMM || in validate() 46 inputType == hal::OperandType::TENSOR_QUANT8_SYMM || in validate() [all …]
|
D | Quantize.cpp | 73 const OperandType inputType = context->getInputType(kInputTensor); in validate() 74 const OperandType outputType = context->getOutputType(kOutputTensor); in validate() 76 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 77 inputType == OperandType::TENSOR_FLOAT32) in validate() 79 NN_RET_CHECK(outputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 80 outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) in validate() 82 if (outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 100 const OperandType inputType = context->getInputType(kInputTensor); in execute() 101 const OperandType outputType = context->getOutputType(kOutputTensor); in execute() 102 if (inputType == OperandType::TENSOR_FLOAT32) { in execute() [all …]
|
D | RoiPooling.cpp | 193 std::vector<OperandType> inExpectedTypes; in validate() 195 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 196 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, in validate() 197 OperandType::TENSOR_INT32, OperandType::INT32, in validate() 198 OperandType::INT32, OperandType::FLOAT32, in validate() 199 OperandType::FLOAT32, OperandType::BOOL}; in validate() 200 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 201 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, in validate() 202 OperandType::TENSOR_INT32, OperandType::INT32, in validate() 203 OperandType::INT32, OperandType::FLOAT16, in validate() [all …]
|
D | LogSoftmax.cpp | 77 OperandType inputType = context->getInputType(kInputTensor); in validate() 78 std::vector<OperandType> inExpectedTypes; in validate() 79 std::vector<OperandType> outExpectedTypes; in validate() 80 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 81 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::FLOAT32, OperandType::INT32}; in validate() 82 outExpectedTypes = {OperandType::TENSOR_FLOAT32}; in validate() 83 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 84 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::FLOAT16, OperandType::INT32}; in validate() 85 outExpectedTypes = {OperandType::TENSOR_FLOAT16}; in validate() 103 case OperandType::TENSOR_FLOAT16: in execute() [all …]
|
D | FullyConnected.cpp | 188 if (input.type == OperandType::TENSOR_QUANT8_ASYMM || in validateShapes() 189 input.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validateShapes() 190 NN_RET_CHECK(bias.type == OperandType::TENSOR_INT32); in validateShapes() 227 std::vector<OperandType> inExpectedTypes; in validate() 228 std::vector<OperandType> outExpectedTypes; in validate() 229 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 232 OperandType::TENSOR_FLOAT32, in validate() 233 OperandType::TENSOR_FLOAT32, in validate() 234 OperandType::TENSOR_FLOAT32, in validate() 235 OperandType::INT32, in validate() [all …]
|
D | TopK_V2.cpp | 82 OperandType inputType = context->getInputType(kInputTensor); in validate() 83 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 84 inputType == OperandType::TENSOR_FLOAT32 || in validate() 85 inputType == OperandType::TENSOR_INT32 || in validate() 86 inputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 87 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) in validate() 89 NN_RET_CHECK(validateInputTypes(context, {inputType, OperandType::INT32})); in validate() 90 NN_RET_CHECK(validateOutputTypes(context, {inputType, OperandType::TENSOR_INT32})); in validate() 92 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 109 outputIndicesShape.type = OperandType::TENSOR_INT32; in prepare() [all …]
|
D | Pooling.cpp | 114 if (output.type == OperandType::TENSOR_QUANT8_ASYMM) { in toTfliteParam() 121 } else if (output.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in toTfliteParam() 298 std::vector<OperandType> inExpectedTypes; in validate() 299 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 302 inputType, OperandType::INT32, OperandType::INT32, OperandType::INT32, in validate() 303 OperandType::INT32, OperandType::INT32, OperandType::INT32, in validate() 305 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 308 OperandType::TENSOR_FLOAT16, OperandType::INT32, OperandType::INT32, in validate() 309 OperandType::INT32, OperandType::INT32, OperandType::INT32, in validate() 310 OperandType::INT32, in validate() [all …]
|
D | GenerateProposals.cpp | 206 std::vector<OperandType> inExpectedTypes; in validate() 209 if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_FLOAT16) { in validate() 210 inExpectedTypes = {inputType, inputType, OperandType::TENSOR_INT32, inputType}; in validate() 211 } else if (inputType == OperandType::TENSOR_QUANT16_ASYMM) { in validate() 212 if (deltaInputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 213 deltaInputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 214 inExpectedTypes = {OperandType::TENSOR_QUANT16_ASYMM, deltaInputType, in validate() 215 OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_ASYMM}; in validate() 254 if (roiShape.type == OperandType::TENSOR_QUANT16_ASYMM) { in prepare() 265 if (roiShape.type == OperandType::TENSOR_QUANT16_ASYMM) { in prepare() [all …]
|
D | StridedSlice.cpp | 105 OperandType inputType = context->getInputType(kInputTensor); in validate() 106 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 107 inputType == OperandType::TENSOR_FLOAT32 || in validate() 108 inputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 109 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) in validate() 113 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 115 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 123 OperandType::TENSOR_INT32, in validate() 124 OperandType::TENSOR_INT32, in validate() 125 OperandType::TENSOR_INT32, in validate() [all …]
|
D | LocalResponseNormalization.cpp | 141 const OperandType inputType = context->getInputType(kInputTensor); in validate() 142 std::vector<OperandType> inExpectedTypes; in validate() 143 std::vector<OperandType> outExpectedTypes; in validate() 144 if (inputType == OperandType::TENSOR_FLOAT32) { in validate() 147 OperandType::TENSOR_FLOAT32, OperandType::INT32, OperandType::FLOAT32, in validate() 148 OperandType::FLOAT32, OperandType::FLOAT32, in validate() 150 outExpectedTypes = {OperandType::TENSOR_FLOAT32}; in validate() 151 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 154 OperandType::TENSOR_FLOAT16, OperandType::INT32, OperandType::FLOAT16, in validate() 155 OperandType::FLOAT16, OperandType::FLOAT16, in validate() [all …]
|
D | Squeeze.cpp | 44 OperandType inputType = context->getInputType(kInputTensor); in validate() 45 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 46 inputType == OperandType::TENSOR_FLOAT32 || in validate() 47 inputType == OperandType::TENSOR_QUANT8_ASYMM || in validate() 48 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) in validate() 52 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { in validate() 54 } else if (inputType == OperandType::TENSOR_FLOAT16) { in validate() 62 OperandType::TENSOR_INT32, in validate() 85 NN_OPS_CHECK(squeezeDimsShape.type == OperandType::TENSOR_INT32); in prepare() 131 case OperandType::TENSOR_FLOAT16: in execute() [all …]
|
/frameworks/ml/nn/tools/test_generator/tests/P_vts_variation/ |
D | stdout.txt.expect | 17 .type = OperandType::TENSOR_FLOAT32, 26 .type = OperandType::TENSOR_FLOAT32, 35 .type = OperandType::TENSOR_FLOAT32, 44 .type = OperandType::INT32, 53 .type = OperandType::INT32, 62 .type = OperandType::INT32, 71 .type = OperandType::INT32, 80 .type = OperandType::INT32, 89 .type = OperandType::TENSOR_FLOAT32, 164 .type = OperandType::TENSOR_FLOAT32, [all …]
|
/frameworks/ml/nn/tools/test_generator/tests/P_vts_implicit_variation/ |
D | stdout.txt.expect | 17 .type = OperandType::TENSOR_FLOAT32, 26 .type = OperandType::TENSOR_FLOAT32, 35 .type = OperandType::TENSOR_FLOAT32, 44 .type = OperandType::INT32, 53 .type = OperandType::INT32, 62 .type = OperandType::INT32, 71 .type = OperandType::INT32, 80 .type = OperandType::INT32, 89 .type = OperandType::TENSOR_FLOAT32, 164 .type = OperandType::TENSOR_FLOAT32, [all …]
|
/frameworks/ml/nn/tools/test_generator/tests/P_vts_naming/ |
D | stdout.txt.expect | 17 .type = OperandType::TENSOR_FLOAT32, 26 .type = OperandType::TENSOR_FLOAT32, 35 .type = OperandType::TENSOR_FLOAT32, 44 .type = OperandType::INT32, 53 .type = OperandType::INT32, 62 .type = OperandType::INT32, 71 .type = OperandType::INT32, 80 .type = OperandType::INT32, 89 .type = OperandType::TENSOR_FLOAT32, 164 .type = OperandType::TENSOR_FLOAT32, [all …]
|
/frameworks/ml/nn/tools/test_generator/tests/P_vts_backward_compatibility_float/ |
D | stdout.txt.expect | 17 .type = OperandType::TENSOR_FLOAT32, 26 .type = OperandType::TENSOR_FLOAT32, 35 .type = OperandType::TENSOR_FLOAT32, 44 .type = OperandType::TENSOR_FLOAT32, 53 .type = OperandType::TENSOR_FLOAT32, 62 .type = OperandType::TENSOR_FLOAT32, 71 .type = OperandType::TENSOR_FLOAT32, 80 .type = OperandType::TENSOR_FLOAT32, 89 .type = OperandType::TENSOR_FLOAT32, 98 .type = OperandType::TENSOR_FLOAT32, [all …]
|
/frameworks/ml/nn/tools/test_generator/tests/P_variation/ |
D | stdout.txt.expect | 17 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 18 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 19 OperandType type2(Type::TENSOR_FLOAT32, {1}); 20 OperandType type3(Type::INT32, {}); 90 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 91 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 92 OperandType type2(Type::TENSOR_FLOAT32, {1}); 93 OperandType type3(Type::INT32, {}); 165 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 166 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); [all …]
|
/frameworks/ml/nn/tools/test_generator/tests/P_implicit_variation/ |
D | stdout.txt.expect | 17 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 18 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 19 OperandType type2(Type::TENSOR_FLOAT32, {1}); 20 OperandType type3(Type::INT32, {}); 90 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 91 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 92 OperandType type2(Type::TENSOR_FLOAT32, {1}); 93 OperandType type3(Type::INT32, {}); 165 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 166 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); [all …]
|
/frameworks/ml/nn/tools/test_generator/tests/P_naming/ |
D | stdout.txt.expect | 17 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 18 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 19 OperandType type2(Type::TENSOR_FLOAT32, {1}); 20 OperandType type3(Type::INT32, {}); 90 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 91 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); 92 OperandType type2(Type::TENSOR_FLOAT32, {1}); 93 OperandType type3(Type::INT32, {}); 165 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2}); 166 OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2}); [all …]
|