1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
18 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
19 
20 #include <android-base/logging.h>
21 
22 #include <set>
23 #include <string>
24 #include <tuple>
25 #include <utility>
26 #include <vector>
27 
28 #include "HalInterfaces.h"
29 #include "NeuralNetworks.h"
30 #include "ValidateHal.h"
31 
32 namespace android {
33 namespace nn {
34 
35 // The number of data types (OperandCode) defined in NeuralNetworks.h.
36 const int kNumberOfDataTypes = 16;
37 
38 // The number of operation types (OperationCode) defined in NeuralNetworks.h.
39 const int kNumberOfOperationTypes = 102;
40 
41 // The number of execution preferences defined in NeuralNetworks.h.
42 const int kNumberOfPreferences = 3;
43 
44 // The number of data types (OperandCode) defined in NeuralNetworksOEM.h.
45 const int kNumberOfDataTypesOEM = 2;
46 
47 // The number of operation types (OperationCode) defined in NeuralNetworksOEM.h.
48 const int kNumberOfOperationTypesOEM = 1;
49 
50 // The lowest number assigned to any OEM Code in NeuralNetworksOEM.h.
51 const int kOEMCodeBase = 10000;
52 
53 /* IMPORTANT: if you change the following list, don't
54  * forget to update the corresponding 'tags' table in
55  * the initVlogMask() function implemented in Utils.cpp.
56  */
57 enum VLogFlags { MODEL = 0, COMPILATION, EXECUTION, CPUEXE, MANAGER, DRIVER, MEMORY };
58 
59 #define VLOG_IS_ON(TAG) ((vLogMask & (1 << (TAG))) != 0)
60 
61 #define VLOG(TAG)                 \
62     if (LIKELY(!VLOG_IS_ON(TAG))) \
63         ;                         \
64     else                          \
65         LOG(INFO)
66 
67 extern int vLogMask;
68 void initVLogMask();
69 
70 #ifdef NN_DEBUGGABLE
71 #define SHOW_IF_DEBUG(msg) msg
72 #else
73 #define SHOW_IF_DEBUG(msg) ""
74 #endif
75 
76 // DEPRECATED(b/118737105). Use CHECK.
77 #define nnAssert(v) CHECK(v)
78 
79 #define NN_RETURN_IF_ERROR(expr)                      \
80     do {                                              \
81         int _errorCode = (expr);                      \
82         if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \
83             return _errorCode;                        \
84         }                                             \
85     } while (0)
86 
87 // The NN_RET_CHECK family of macros defined below is similar to the CHECK family defined in
88 // system/core/base/include/android-base/logging.h
89 //
90 // The difference is that NN_RET_CHECK macros use LOG(ERROR) instead of LOG(FATAL)
91 // and return false instead of aborting.
92 
93 // Logs an error and returns false. Append context using << after. For example:
94 //
95 //   NN_RET_CHECK_FAIL() << "Something went wrong";
96 //
97 // The containing function must return a bool.
98 #define NN_RET_CHECK_FAIL()                   \
99     return ::android::nn::FalseyErrorStream() \
100            << "NN_RET_CHECK failed (" << __FILE__ << ":" << __LINE__ << "): "
101 
102 // Logs an error and returns false if condition is false. Extra logging can be appended using <<
103 // after. For example:
104 //
105 //   NN_RET_CHECK(false) << "Something went wrong";
106 //
107 // The containing function must return a bool.
108 #define NN_RET_CHECK(condition) \
109     while (UNLIKELY(!(condition))) NN_RET_CHECK_FAIL() << #condition << " "
110 
111 // Helper for NN_CHECK_xx(x, y) macros.
112 #define NN_RET_CHECK_OP(LHS, RHS, OP)                                                       \
113     for (auto _values = ::android::base::MakeEagerEvaluator(LHS, RHS);                      \
114          UNLIKELY(!(_values.lhs.v OP _values.rhs.v));                                       \
115          /* empty */)                                                                       \
116     NN_RET_CHECK_FAIL()                                                                     \
117             << #LHS << " " << #OP << " " << #RHS << " (" << #LHS << " = "                   \
118             << ::android::base::LogNullGuard<decltype(_values.lhs.v)>::Guard(_values.lhs.v) \
119             << ", " << #RHS << " = "                                                        \
120             << ::android::base::LogNullGuard<decltype(_values.rhs.v)>::Guard(_values.rhs.v) \
121             << ") "
122 
123 // Logs an error and returns false if a condition between x and y does not hold. Extra logging can
124 // be appended using << after. For example:
125 //
126 //   NN_RET_CHECK_EQ(a, b) << "Something went wrong";
127 //
128 // The values must implement the appropriate comparison operator as well as
129 // `operator<<(std::ostream&, ...)`.
130 // The containing function must return a bool.
131 #define NN_RET_CHECK_EQ(x, y) NN_RET_CHECK_OP(x, y, ==)
132 #define NN_RET_CHECK_NE(x, y) NN_RET_CHECK_OP(x, y, !=)
133 #define NN_RET_CHECK_LE(x, y) NN_RET_CHECK_OP(x, y, <=)
134 #define NN_RET_CHECK_LT(x, y) NN_RET_CHECK_OP(x, y, <)
135 #define NN_RET_CHECK_GE(x, y) NN_RET_CHECK_OP(x, y, >=)
136 #define NN_RET_CHECK_GT(x, y) NN_RET_CHECK_OP(x, y, >)
137 
138 // Type to represent a deadline time point across processes.
139 using Deadline = std::chrono::steady_clock::time_point;
140 
141 // Make an Deadline from a duration. If the sum of the current time and the
142 // duration exceeds the max time, return a time point holding the maximum
143 // expressible time.
144 Deadline makeDeadline(uint64_t duration);
145 
146 // Convenience function. If the duration is provided, this function creates a
147 // Deadline using makeDeadline. If the duration is not provided, this function
148 // returns std::nullopt.
149 std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration);
150 
151 // Make an optional Deadline from an OptionalTimePoint. If
152 // timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a
153 // time point holding the maximum Deadline. If the OptionalTimePoint is none,
154 // this function returns std::nullopt.
155 std::optional<Deadline> makeDeadline(const hal::OptionalTimePoint& timePoint);
156 
157 // Returns true if the deadline has passed. Returns false if either the deadline
158 // has not been exceeded or if the deadline is not present.
159 bool hasDeadlinePassed(const std::optional<Deadline>& deadline);
160 
161 // Make an OptionalTimePoint from an optional Deadline. If the Deadline is not
162 // provided, this function returns none for OptionalTimePoint.
163 hal::OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline);
164 
165 // Ensure that every user of FalseyErrorStream is linked to the
166 // correct instance, using the correct LOG_TAG
167 namespace {
168 
169 // A wrapper around LOG(ERROR) that can be implicitly converted to bool (always evaluates to false).
170 // Used to implement stream logging in NN_RET_CHECK.
171 class FalseyErrorStream {
172     DISALLOW_COPY_AND_ASSIGN(FalseyErrorStream);
173 
174    public:
FalseyErrorStream()175     FalseyErrorStream() {}
176 
177     template <typename T>
178     FalseyErrorStream& operator<<(const T& value) {
179         mBuffer << value;
180         return *this;
181     }
182 
~FalseyErrorStream()183     ~FalseyErrorStream() { LOG(ERROR) << mBuffer.str(); }
184 
185     operator bool() const { return false; }
186 
187    private:
188     std::ostringstream mBuffer;
189 };
190 
191 template <HalVersion version>
192 struct VersionedType {};
193 
194 template <>
195 struct VersionedType<HalVersion::V1_2> {
196     using OperandPerformance = hal::V1_2::Capabilities::OperandPerformance;
197     using OperandType = hal::V1_2::OperandType;
198 };
199 
200 template <>
201 struct VersionedType<HalVersion::V1_3> {
202     using OperandPerformance = hal::V1_3::Capabilities::OperandPerformance;
203     using OperandType = hal::V1_3::OperandType;
204 };
205 
206 template <HalVersion version>
207 using VersionedOperandPerformance = typename VersionedType<version>::OperandPerformance;
208 template <HalVersion version>
209 using VersionedOperandType = typename VersionedType<version>::OperandType;
210 
211 }  // namespace
212 
213 // Return a vector with one entry for each non-extension OperandType except
214 // SUBGRAPH, set to the specified PerformanceInfo value.  The vector will be
215 // sorted by OperandType.
216 //
217 // Control flow (OperandType::SUBGRAPH) operation performance is specified
218 // separately using Capabilities::ifPerformance and
219 // Capabilities::whilePerformance.
220 template <HalVersion version>
221 hal::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
222         hal::PerformanceInfo perf);
223 
224 // Update the vector entry corresponding to the specified OperandType with the
225 // specified PerformanceInfo value.  The vector must already have an entry for
226 // that OperandType, and must be sorted by OperandType.
227 void update(hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>* operandPerformance,
228             hal::V1_2::OperandType type, hal::PerformanceInfo perf);
229 void update(hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>* operandPerformance,
230             hal::V1_3::OperandType type, hal::PerformanceInfo perf);
231 
232 // Look for a vector entry corresponding to the specified OperandType.  If
233 // found, return the associated PerformanceInfo.  If not, return a pessimistic
234 // PerformanceInfo (FLT_MAX).  The vector must be sorted by OperandType.
235 hal::PerformanceInfo lookup(
236         const hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>& operandPerformance,
237         hal::V1_2::OperandType type);
238 hal::PerformanceInfo lookup(
239         const hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>& operandPerformance,
240         hal::V1_3::OperandType type);
241 
242 // Returns true if an operand type is an extension type.
243 bool isExtensionOperandType(hal::OperandType type);
244 
245 // Returns true if an operation type is an extension type.
246 bool isExtensionOperationType(hal::OperationType type);
247 
248 // Returns the amount of space needed to store a value of the specified
249 // dimensions and type. For a tensor with unspecified rank or at least one
250 // unspecified dimension, returns zero.
251 //
252 // Aborts if the specified type is an extension type.
253 // Aborts if the size would overflow the return type.
254 //
255 // See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&).
256 uint32_t nonExtensionOperandSizeOfData(hal::OperandType type,
257                                        const std::vector<uint32_t>& dimensions);
258 
259 // Returns the amount of space needed to store a value of the dimensions and
260 // type of this operand. For a tensor with unspecified rank or at least one
261 // unspecified dimension, returns zero.
262 //
263 // Aborts if the specified type is an extension type.
264 // Aborts if the size would overflow the return type.
265 //
266 // See also TypeManager::getSizeOfData(const Operand&).
267 inline uint32_t nonExtensionOperandSizeOfData(const hal::Operand& operand) {
268     return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
269 }
270 
271 // Returns the amount of space needed to store a value of the specified
272 // dimensions and element size. For a tensor with unspecified rank or at least
273 // one unspecified dimension, returns zero.
274 //
275 // Aborts if the size would overflow the return type.
276 //
277 // See also TypeManager::getSizeOfData(const Operand&).
278 uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions);
279 
280 // Returns true if the amount of space needed to store a value of the specified
281 // dimensions and element size overflows the uint32_t type.
282 //
283 // Aborts if the specified type is an extension type.
284 //
285 // See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
286 bool nonExtensionOperandSizeOfDataOverflowsUInt32(hal::OperandType type,
287                                                   const std::vector<uint32_t>& dimensions);
288 
289 // Returns true if the amount of space needed to store a value of the specified
290 // dimensions and element size overflows the uint32_t type.
291 //
292 // See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
293 bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector<uint32_t>& dimensions);
294 
295 // Returns true if a non-extension operand type is a scalar type.
296 //
297 // Aborts if the specified type is an extension type.
298 //
299 // See also TypeManager::isTensorType(OperandType).
300 bool nonExtensionOperandTypeIsScalar(int type);
301 
302 // Returns the name of the operation type in ASCII.
303 std::string getOperationName(hal::OperationType opCode);
304 
305 // Returns the name of the operand type in ASCII.
306 std::string getOperandTypeName(hal::OperandType type);
307 
308 // Whether an operand of tensor type has unspecified dimensions.
309 //
310 // Undefined behavior if the operand type is a scalar type.
311 bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount);
312 bool tensorHasUnspecifiedDimensions(hal::OperandType type, const std::vector<uint32_t>& dimensions);
313 bool tensorHasUnspecifiedDimensions(const hal::Operand& operand);
314 bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type);
315 
316 // Returns the number of padding bytes needed to align data of the
317 // specified length.  It aligns object of length:
318 // 2, 3 on a 2 byte boundary,
319 // 4+ on a 4 byte boundary.
320 // We may want to have different alignments for tensors.
321 // TODO: This is arbitrary, more a proof of concept.  We need
322 // to determine what this should be.
323 uint32_t alignBytesNeeded(uint32_t index, size_t length);
324 
325 // Does a detailed LOG(INFO) of the model
326 void logModelToInfo(const hal::V1_0::Model& model);
327 void logModelToInfo(const hal::V1_1::Model& model);
328 void logModelToInfo(const hal::V1_2::Model& model);
329 void logModelToInfo(const hal::V1_3::Model& model);
330 
331 inline std::string toString(uint32_t obj) {
332     return std::to_string(obj);
333 }
334 
335 template <typename Type>
336 std::string toString(const std::vector<Type>& range) {
337     std::string os = "[";
338     for (size_t i = 0; i < range.size(); ++i) {
339         os += (i == 0 ? "" : ", ") + toString(range[i]);
340     }
341     return os += "]";
342 }
343 
344 template <typename A, typename B>
345 std::string toString(const std::pair<A, B>& pair) {
346     std::ostringstream oss;
347     oss << "(" << toString(pair.first) << ", " << toString(pair.second) << ")";
348     return oss.str();
349 }
350 
351 inline std::string toString(HalVersion halVersion) {
352     switch (halVersion) {
353         case HalVersion::UNKNOWN:
354             return "UNKNOWN HAL version";
355         case HalVersion::V1_0:
356             return "HAL version 1.0";
357         case HalVersion::V1_1:
358             return "HAL version 1.1";
359         case HalVersion::V1_2:
360             return "HAL version 1.2";
361         case HalVersion::V1_3:
362             return "HAL version 1.3";
363     }
364 }
365 
366 inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) {
367     return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM);
368 }
369 
370 bool validateOperandSymmPerChannelQuantParams(
371         const hal::Operand& halOperand,
372         const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag);
373 
374 // Validates an operand type.
375 //
376 // extensionOperandTypeInfo must be nullptr iff the type is not an extension type.
377 //
378 // If allowPartial is true, the dimensions may be underspecified.
379 int validateOperandType(
380         const ANeuralNetworksOperandType& type,
381         const hal::Extension::OperandTypeInformation* const extensionOperandTypeInfo,
382         const char* tag, bool allowPartial);
383 int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
384                         const char* tag);
385 
386 // A set of functions to help validate models containing IF or WHILE operations.
387 struct SubgraphValidationHelper {
388     // Checks if a given operand is a SUBGRAPH operand with a valid offset.
389     std::function<bool(const hal::Operand&)> isValidSubgraphReference;
390     // Gets the input count of a subgraph referenced by a given operand.
391     std::function<uint32_t(const hal::Operand&)> getSubgraphInputCount;
392     // Gets the output count of a subgraph referenced by a given operand.
393     std::function<uint32_t(const hal::Operand&)> getSubgraphOutputCount;
394     // Gets the specified input operand of a subgraph referenced by a given operand.
395     std::function<const hal::Operand*(const hal::Operand&, uint32_t)> getSubgraphInputOperand;
396     // Gets the specified output operand of a subgraph referenced by a given operand.
397     std::function<const hal::Operand*(const hal::Operand&, uint32_t)> getSubgraphOutputOperand;
398     // Whether control flow operations with inner or outer input or output
399     // operands of unknown size are allowed.
400     bool allowControlFlowOperationWithOperandOfUnknownSize;
401 };
402 
403 // Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the
404 // provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA.
405 // The last argument is only used for validating IF and WHILE operations.
406 int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
407                       const uint32_t* inputIndexes, uint32_t outputCount,
408                       const uint32_t* outputIndexes, const std::vector<hal::Operand>& operands,
409                       HalVersion halVersion, const SubgraphValidationHelper& helper);
410 
411 inline size_t getSizeFromInts(int lower, int higher) {
412     return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32);
413 }
414 
415 // Convert ANEURALNETWORKS_* result code to ErrorStatus.
416 // Not guaranteed to be a 1-to-1 mapping.
417 hal::ErrorStatus convertResultCodeToErrorStatus(int resultCode);
418 
419 // Convert ErrorStatus to ANEURALNETWORKS_* result code.
420 // Not guaranteed to be a 1-to-1 mapping.
421 int convertErrorStatusToResultCode(hal::ErrorStatus status);
422 
423 // Convert execution results to runtime format. Additionally checks that the
424 // returned results abide by the HAL specification, and logs an error if the
425 // result violates the specification.
426 std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> getExecutionResult(
427         hal::ErrorStatus status, std::vector<hal::OutputShape> outputShapes, hal::Timing timing);
428 
429 // Combine two tensor dimensions, both may have unspecified dimensions or rank.
430 std::optional<std::vector<uint32_t>> combineDimensions(const std::vector<uint32_t>& lhs,
431                                                        const std::vector<uint32_t>& rhs);
432 
433 // Versioning
434 
435 bool compliantWithV1_0(const hal::V1_0::Capabilities& capabilities);
436 bool compliantWithV1_0(const hal::V1_1::Capabilities& capabilities);
437 bool compliantWithV1_0(const hal::V1_2::Capabilities& capabilities);
438 bool compliantWithV1_0(const hal::V1_3::Capabilities& capabilities);
439 bool compliantWithV1_1(const hal::V1_0::Capabilities& capabilities);
440 bool compliantWithV1_1(const hal::V1_1::Capabilities& capabilities);
441 bool compliantWithV1_1(const hal::V1_2::Capabilities& capabilities);
442 bool compliantWithV1_1(const hal::V1_3::Capabilities& capabilities);
443 bool compliantWithV1_2(const hal::V1_0::Capabilities& capabilities);
444 bool compliantWithV1_2(const hal::V1_1::Capabilities& capabilities);
445 bool compliantWithV1_2(const hal::V1_2::Capabilities& capabilities);
446 bool compliantWithV1_2(const hal::V1_3::Capabilities& capabilities);
447 bool compliantWithV1_3(const hal::V1_0::Capabilities& capabilities);
448 bool compliantWithV1_3(const hal::V1_1::Capabilities& capabilities);
449 bool compliantWithV1_3(const hal::V1_2::Capabilities& capabilities);
450 bool compliantWithV1_3(const hal::V1_3::Capabilities& capabilities);
451 
452 // If noncompliantOperations != nullptr, then
453 //     precondition: noncompliantOperations->empty()
454 //     postcondition: *noncompliantOperations consists of the indices of the noncompliant
455 //                    operations; if the compliance check fails for some reason
456 //                    other than a noncompliant operation,
457 //                    *noncompliantOperations consists of the indices of all operations
458 bool compliantWithV1_0(const hal::V1_0::Model& model);
459 bool compliantWithV1_0(const hal::V1_1::Model& model);
460 bool compliantWithV1_0(const hal::V1_2::Model& model,
461                        std::set<uint32_t>* noncompliantOperations = nullptr);
462 bool compliantWithV1_0(const hal::V1_3::Model& model,
463                        std::set<uint32_t>* noncompliantOperations = nullptr);
464 bool compliantWithV1_1(const hal::V1_0::Model& model);
465 bool compliantWithV1_1(const hal::V1_1::Model& model);
466 bool compliantWithV1_1(const hal::V1_2::Model& model,
467                        std::set<uint32_t>* noncompliantOperations = nullptr);
468 bool compliantWithV1_1(const hal::V1_3::Model& model,
469                        std::set<uint32_t>* noncompliantOperations = nullptr);
470 bool compliantWithV1_2(const hal::V1_0::Model& model);
471 bool compliantWithV1_2(const hal::V1_1::Model& model);
472 bool compliantWithV1_2(const hal::V1_2::Model& model,
473                        std::set<uint32_t>* noncompliantOperations = nullptr);
474 bool compliantWithV1_2(const hal::V1_3::Model& model,
475                        std::set<uint32_t>* noncompliantOperations = nullptr);
476 
477 hal::V1_0::ErrorStatus convertToV1_0(hal::V1_0::ErrorStatus status);
478 hal::V1_0::ErrorStatus convertToV1_0(hal::V1_3::ErrorStatus status);
479 hal::V1_3::ErrorStatus convertToV1_3(hal::V1_0::ErrorStatus status);
480 hal::V1_3::ErrorStatus convertToV1_3(hal::V1_3::ErrorStatus status);
481 
482 hal::V1_0::Capabilities convertToV1_0(const hal::V1_0::Capabilities& capabilities);
483 hal::V1_0::Capabilities convertToV1_0(const hal::V1_1::Capabilities& capabilities);
484 hal::V1_0::Capabilities convertToV1_0(const hal::V1_2::Capabilities& capabilities);
485 hal::V1_0::Capabilities convertToV1_0(const hal::V1_3::Capabilities& capabilities);
486 hal::V1_1::Capabilities convertToV1_1(const hal::V1_0::Capabilities& capabilities);
487 hal::V1_1::Capabilities convertToV1_1(const hal::V1_1::Capabilities& capabilities);
488 hal::V1_1::Capabilities convertToV1_1(const hal::V1_2::Capabilities& capabilities);
489 hal::V1_1::Capabilities convertToV1_1(const hal::V1_3::Capabilities& capabilities);
490 hal::V1_2::Capabilities convertToV1_2(const hal::V1_0::Capabilities& capabilities);
491 hal::V1_2::Capabilities convertToV1_2(const hal::V1_1::Capabilities& capabilities);
492 hal::V1_2::Capabilities convertToV1_2(const hal::V1_2::Capabilities& capabilities);
493 hal::V1_2::Capabilities convertToV1_2(const hal::V1_3::Capabilities& capabilities);
494 hal::V1_3::Capabilities convertToV1_3(const hal::V1_0::Capabilities& capabilities);
495 hal::V1_3::Capabilities convertToV1_3(const hal::V1_1::Capabilities& capabilities);
496 hal::V1_3::Capabilities convertToV1_3(const hal::V1_2::Capabilities& capabilities);
497 hal::V1_3::Capabilities convertToV1_3(const hal::V1_3::Capabilities& capabilities);
498 
499 hal::V1_0::Model convertToV1_0(const hal::V1_0::Model& model);
500 hal::V1_0::Model convertToV1_0(const hal::V1_1::Model& model);
501 hal::V1_0::Model convertToV1_0(const hal::V1_2::Model& model);
502 hal::V1_0::Model convertToV1_0(const hal::V1_3::Model& model);
503 hal::V1_1::Model convertToV1_1(const hal::V1_0::Model& model);
504 hal::V1_1::Model convertToV1_1(const hal::V1_1::Model& model);
505 hal::V1_1::Model convertToV1_1(const hal::V1_2::Model& model);
506 hal::V1_1::Model convertToV1_1(const hal::V1_3::Model& model);
507 hal::V1_2::Model convertToV1_2(const hal::V1_0::Model& model);
508 hal::V1_2::Model convertToV1_2(const hal::V1_1::Model& model);
509 hal::V1_2::Model convertToV1_2(const hal::V1_2::Model& model);
510 hal::V1_2::Model convertToV1_2(const hal::V1_3::Model& model);
511 hal::V1_3::Model convertToV1_3(const hal::V1_0::Model& model);
512 hal::V1_3::Model convertToV1_3(const hal::V1_1::Model& model);
513 hal::V1_3::Model convertToV1_3(const hal::V1_2::Model& model);
514 hal::V1_3::Model convertToV1_3(const hal::V1_3::Model& model);
515 
516 hal::V1_0::OperationType uncheckedConvertToV1_0(hal::V1_3::OperationType type);
517 hal::V1_1::OperationType uncheckedConvertToV1_1(hal::V1_3::OperationType type);
518 hal::V1_2::OperationType uncheckedConvertToV1_2(hal::V1_3::OperationType type);
519 
520 hal::V1_0::Operand convertToV1_0(const hal::V1_2::Operand& operand);
521 hal::V1_0::Operand convertToV1_0(const hal::V1_3::Operand& operand);
522 hal::V1_2::Operand convertToV1_2(const hal::V1_0::Operand& operand);
523 hal::V1_2::Operand convertToV1_2(const hal::V1_3::Operand& operand);
524 hal::V1_3::Operand convertToV1_3(const hal::V1_0::Operand& operand);
525 hal::V1_3::Operand convertToV1_3(const hal::V1_2::Operand& operand);
526 hal::V1_3::Operand convertToV1_3(const hal::V1_3::Operand& operand);
527 
528 hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_0::Operand>& operands);
529 hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_2::Operand>& operands);
530 hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_3::Operand>& operands);
531 hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_0::Operand>& operands);
532 hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_2::Operand>& operands);
533 hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_3::Operand>& operands);
534 hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_0::Operand>& operands);
535 hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_2::Operand>& operands);
536 hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_3::Operand>& operands);
537 
538 bool compliantWithV1_0(const hal::V1_0::Request& request);
539 bool compliantWithV1_0(const hal::V1_3::Request& request);
540 bool compliantWithV1_2(const hal::V1_3::Request& request);
541 
542 hal::V1_0::Request convertToV1_0(const hal::V1_0::Request& request);
543 hal::V1_0::Request convertToV1_0(const hal::V1_3::Request& request);
544 hal::V1_0::Request convertToV1_2(const hal::V1_3::Request& request);
545 hal::V1_3::Request convertToV1_3(const hal::V1_0::Request& request);
546 hal::V1_3::Request convertToV1_3(const hal::V1_3::Request& request);
547 
548 bool compliantWithV1_0(hal::V1_0::OperandLifeTime lifetime);
549 bool compliantWithV1_0(hal::V1_3::OperandLifeTime lifetime);
550 bool compliantWithV1_3(hal::V1_0::OperandLifeTime lifetime);
551 bool compliantWithV1_3(hal::V1_3::OperandLifeTime lifetime);
552 
553 hal::V1_0::OperandLifeTime convertToV1_0(hal::V1_0::OperandLifeTime lifetime);
554 hal::V1_0::OperandLifeTime convertToV1_0(hal::V1_3::OperandLifeTime lifetime);
555 hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_0::OperandLifeTime lifetime);
556 hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_3::OperandLifeTime lifetime);
557 
558 constexpr hal::Priority convertToHalPriority(int32_t priority) {
559     switch (priority) {
560         case ANEURALNETWORKS_PRIORITY_LOW:
561             return hal::Priority::LOW;
562         case ANEURALNETWORKS_PRIORITY_MEDIUM:
563             return hal::Priority::MEDIUM;
564         case ANEURALNETWORKS_PRIORITY_HIGH:
565             return hal::Priority::HIGH;
566     }
567     LOG(FATAL) << "unrecognized priority: " << priority;
568     return {};
569 }
570 
571 // The function syncWait() has the same semantics as the system function
572 // ::sync_wait(), except that the syncWait() return value is semantically
573 // richer.  The timeout parameter is in msecs.
574 enum class FenceState {
575     ACTIVE,    // fence has not been signaled
576     SIGNALED,  // fence has been signaled
577     ERROR,     // fence has been placed in the error state
578     UNKNOWN,   // either bad argument passed to syncWait(), or internal error
579 };
580 FenceState syncWait(int fd, int timeout);
581 
582 #ifdef NN_DEBUGGABLE
583 uint32_t getProp(const char* str, uint32_t defaultValue = 0);
584 #endif  // NN_DEBUGGABLE
585 
586 }  // namespace nn
587 }  // namespace android
588 
589 #endif  // ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
590