1 /* 2 * Copyright (C) 2019 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include <android-base/logging.h> 18 #include <android/hardware/neuralnetworks/1.2/types.h> 19 20 #include <functional> 21 #include <numeric> 22 23 namespace android { 24 namespace hardware { 25 namespace neuralnetworks { 26 sizeOfData(V1_2::OperandType type)27uint32_t sizeOfData(V1_2::OperandType type) { 28 switch (type) { 29 case V1_2::OperandType::FLOAT32: 30 case V1_2::OperandType::INT32: 31 case V1_2::OperandType::UINT32: 32 case V1_2::OperandType::TENSOR_FLOAT32: 33 case V1_2::OperandType::TENSOR_INT32: 34 return 4; 35 case V1_2::OperandType::TENSOR_QUANT16_SYMM: 36 case V1_2::OperandType::TENSOR_FLOAT16: 37 case V1_2::OperandType::FLOAT16: 38 case V1_2::OperandType::TENSOR_QUANT16_ASYMM: 39 return 2; 40 case V1_2::OperandType::TENSOR_QUANT8_ASYMM: 41 case V1_2::OperandType::BOOL: 42 case V1_2::OperandType::TENSOR_BOOL8: 43 case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: 44 case V1_2::OperandType::TENSOR_QUANT8_SYMM: 45 return 1; 46 default: 47 CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); 48 return 0; 49 } 50 } 51 isTensor(V1_2::OperandType type)52static bool isTensor(V1_2::OperandType type) { 53 switch (type) { 54 case V1_2::OperandType::FLOAT32: 55 case V1_2::OperandType::INT32: 56 case V1_2::OperandType::UINT32: 57 case V1_2::OperandType::FLOAT16: 58 case V1_2::OperandType::BOOL: 59 return false; 60 case V1_2::OperandType::TENSOR_FLOAT32: 61 case V1_2::OperandType::TENSOR_INT32: 62 case V1_2::OperandType::TENSOR_QUANT16_SYMM: 63 case V1_2::OperandType::TENSOR_FLOAT16: 64 case V1_2::OperandType::TENSOR_QUANT16_ASYMM: 65 case V1_2::OperandType::TENSOR_QUANT8_ASYMM: 66 case V1_2::OperandType::TENSOR_BOOL8: 67 case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: 68 case V1_2::OperandType::TENSOR_QUANT8_SYMM: 69 return true; 70 default: 71 CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); 72 return false; 73 } 74 } 75 sizeOfData(const V1_2::Operand & operand)76uint32_t sizeOfData(const V1_2::Operand& operand) { 77 const uint32_t dataSize = sizeOfData(operand.type); 78 if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0; 79 return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize, 80 std::multiplies<>{}); 81 } 82 83 } // namespace neuralnetworks 84 } // namespace hardware 85 } // namespace android 86