1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /**
18  * @addtogroup NeuralNetworks
19  * @{
20  */
21 
22 /**
23  * @file NeuralNetworks.h
24  */
25 
26 #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H
27 #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H
28 
29 /******************************************************************
30  *
31  * IMPORTANT NOTICE:
32  *
33  *   This file is part of Android's set of stable system headers
34  *   exposed by the Android NDK (Native Development Kit).
35  *
36  *   Third-party source AND binary code relies on the definitions
37  *   here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES.
38  *
39  *   - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES)
40  *   - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS
41  *   - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY
42  *   - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES
43  */
44 
45 #include <android/hardware_buffer.h>
46 #include <stdbool.h>
47 #include <stddef.h>
48 #include <stdint.h>
49 #include <sys/cdefs.h>
50 
51 __BEGIN_DECLS
52 
53 /**
54  * Operand types.
55  *
56  * The type of an operand in a model.
57  *
58  * Types prefaced with ANEURALNETWORKS_TENSOR_* must be used for tensor data (i.e., tensors
59  * with at least one dimension). Types not prefaced by ANEURALNETWORKS_TENSOR_* represent
60  * scalar values and must have no dimensions.
61  *
62  * Although we define many types, most operators accept just a few
63  * types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32},
64  * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
65  * and {@link ANEURALNETWORKS_INT32}.
66  *
67  * Available since API level 27.
68  */
69 typedef enum {
70     /** A 32 bit floating point scalar value. */
71     ANEURALNETWORKS_FLOAT32 = 0,
72     /** A signed 32 bit integer scalar value. */
73     ANEURALNETWORKS_INT32 = 1,
74     /** An unsigned 32 bit integer scalar value. */
75     ANEURALNETWORKS_UINT32 = 2,
76     /** A tensor of 32 bit floating point values. */
77     ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
78     /** A tensor of 32 bit integer values. */
79     ANEURALNETWORKS_TENSOR_INT32 = 4,
80     /**
81      * A tensor of 8 bit unsigned integers that represent real numbers.
82      *
83      * Attached to this tensor are two numbers that can be used to convert the
84      * 8 bit integer to the real value and vice versa. These two numbers are:
85      * - scale: a 32 bit floating point value greater than zero.
86      * - zeroPoint: a 32 bit integer, in range [0, 255].
87      *
88      * The formula is:
89      *   real_value = (integer_value - zeroPoint) * scale.
90      */
91     ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5,
92     /**
93      * An 8 bit boolean scalar value.
94      *
95      * Values of this operand type are either true or false. A zero value
96      * represents false; any other value represents true.
97      *
98      * Available since API level 29.
99      */
100     ANEURALNETWORKS_BOOL = 6,
101     /**
102      * A tensor of 16 bit signed integers that represent real numbers.
103      *
104      * Attached to this tensor is a number representing real value scale that is
105      * used to convert the 16 bit number to a real value in the following way:
106      * realValue = integerValue * scale.
107      *
108      * scale is a 32 bit floating point with value greater than zero.
109      *
110      * Available since API level 29.
111      */
112     ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7,
113     /**
114      * A tensor of IEEE 754 16 bit floating point values.
115      *
116      * Available since API level 29.
117      */
118     ANEURALNETWORKS_TENSOR_FLOAT16 = 8,
119     /**
120      * A tensor of 8 bit boolean values.
121      *
122      * Values of this operand type are either true or false. A zero value
123      * represents false; any other value represents true.
124      *
125      * Available since API level 29.
126      */
127     ANEURALNETWORKS_TENSOR_BOOL8 = 9,
128     /**
129      * An IEEE 754 16 bit floating point scalar value.
130      *
131      * Available since API level 29.
132      */
133     ANEURALNETWORKS_FLOAT16 = 10,
134     /**
135      * A tensor of 8 bit signed integers that represent real numbers.
136      *
137      * This tensor is associated with additional fields that can
138      * be used to convert the 8 bit signed integer to the real value and vice versa.
139      * These fields are:
140      * - channelDim: a 32 bit unsigned integer indicating channel dimension.
141      * - scales: an array of positive 32 bit floating point values.
142      * The size of the scales array must be equal to dimensions[channelDim].
143      *
144      * {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams} must be used
145      * to set the parameters for an Operand of this type.
146      *
147      * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0).
148      *
149      * The formula is:
150      * realValue[..., C, ...] =
151      *     integerValue[..., C, ...] * scales[C]
152      * where C is an index in the Channel dimension.
153      *
154      * Available since API level 29.
155      */
156     ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
157     /**
158      * A tensor of 16 bit unsigned integers that represent real numbers.
159      *
160      * Attached to this tensor are two numbers that can be used to convert the
161      * 16 bit integer to the real value and vice versa. These two numbers are:
162      * - scale: a 32 bit floating point value greater than zero.
163      * - zeroPoint: a 32 bit integer, in range [0, 65535].
164      *
165      * The formula is:
166      * real_value = (integer_value - zeroPoint) * scale.
167      *
168      * Available since API level 29.
169      */
170     ANEURALNETWORKS_TENSOR_QUANT16_ASYMM = 12,
171     /**
172      * A tensor of 8 bit signed integers that represent real numbers.
173      *
174      * Attached to this tensor is a number representing real value scale that is
175      * used to convert the 8 bit number to a real value in the following way:
176      * realValue = integerValue * scale.
177      *
178      * scale is a 32 bit floating point with value greater than zero.
179      *
180      * Available since API level 29.
181      */
182     ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13,
183     /**
184      * A tensor of 8 bit signed integers that represent real numbers.
185      *
186      * Attached to this tensor are two numbers that can be used to convert the
187      * 8 bit integer to the real value and vice versa. These two numbers are:
188      * - scale: a 32 bit floating point value greater than zero.
189      * - zeroPoint: a 32 bit integer, in range [-128, 127].
190      *
191      * The formula is:
192      * real_value = (integer_value - zeroPoint) * scale.
193      *
194      * Available since API level 30.
195      */
196     ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14,
197 
198     /**
199      * A reference to a model.
200      *
201      * {@link ANeuralNetworksModel_setOperandValueFromModel} must be used to set
202      * the value for an Operand of this type.
203      *
204      * Available since API level 30.
205      */
206     ANEURALNETWORKS_MODEL = 15,
207 } OperandCode;
208 
209 /**
210  * Operation types.
211  *
212  * The type of an operation in a model.
213  *
214  * Available since API level 27.
215  */
216 typedef enum {
217     // Operations below are available since API level 27.
218 
219     /**
220      * Adds two tensors, element-wise.
221      *
222      * Takes two input tensors of identical {@link OperandCode} and compatible
223      * dimensions. The output is the sum of both input tensors, optionally
224      * modified by an activation function.
225      *
226      * Two dimensions are compatible when:
227      *     1. they are equal, or
228      *     2. one of them is 1
229      *
230      * The size of the output is the maximum size along each dimension of the
231      * input operands. It starts with the trailing dimensions, and works its
232      * way forward.
233      *
234      * Example:
235      *
236      *     input1.dimension = {4, 1, 2}
237      *     input2.dimension = {5, 4, 3, 1}
238      *     output.dimension = {5, 4, 3, 2}
239      *
240      * Since API level 29, generic zero-sized input tensor is supported. Zero
241      * dimension is only compatible with 0 or 1. The size of the output
242      * dimension is zero if either of corresponding input dimension is zero.
243      *
244      * Supported tensor {@link OperandCode}:
245      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
246      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
247      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
248      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
249      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30)
250      *
251      * Supported tensor rank: up to 4
252      *
253      * Inputs:
254      * * 0: A tensor.
255      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
256      *      as input0.
257      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
258      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
259      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
260      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
261      *      {@link FuseCode} values. Specifies the activation to
262      *      invoke on the result.
263      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
264      *      the {@link FuseCode} must be "NONE".
265      *
266      * Outputs:
267      * * 0: The sum, a tensor of the same {@link OperandCode} as input0.
268      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
269      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
270      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
271      *
272      * Available since API level 27.
273      */
274     ANEURALNETWORKS_ADD = 0,
275 
276     /**
277      * Performs a 2-D average pooling operation.
278      *
279      * The output dimensions are functions of the filter dimensions, stride, and
280      * padding.
281      *
282      * The values in the output tensor are computed as:
283      *
284      *     output[b, i, j, channel] =
285      *         sum_{di, dj}(
286      *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
287      *         ) / sum(1)
288      *
289      * Supported tensor {@link OperandCode}:
290      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
291      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
292      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
293      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
294      *
295      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
296      * With the default data layout NHWC, the data is stored in the order of:
297      * [batch, height, width, channels]. Alternatively, the data layout could
298      * be NCHW, the data storage order of: [batch, channels, height, width].
299      * NCHW is supported since API level 29.
300      *
301      * Both explicit padding and implicit padding are supported.
302      *
303      * Inputs (explicit padding):
304      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
305      *      the input.
306      *      Since API level 29, zero batches is supported for this tensor.
307      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
308      *      the left, in the ‘width’ dimension.
309      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
310      *      the right, in the ‘width’ dimension.
311      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
312      *      the top, in the ‘height’ dimension.
313      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
314      *      the bottom, in the ‘height’ dimension.
315      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
316      *      walking through input in the ‘width’ dimension.
317      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
318      *      walking through input in the ‘height’ dimension.
319      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
320      *      width.
321      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
322      *      height.
323      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
324      *      {@link FuseCode} values. Specifies the activation to
325      *      invoke on the result.
326      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
327      *       Set to true to specify NCHW data layout for input0 and output0.
328      *       Available since API level 29.
329      *
330      * Inputs (implicit padding):
331      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
332      *      the input.
333      *      Since API level 29, zero batches is supported for this tensor.
334      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
335      *      padding scheme, has to be one of the
336      *      {@link PaddingCode} values.
337      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
338      *      walking through input in the ‘width’ dimension.
339      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
340      *      walking through input in the ‘height’ dimension.
341      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
342      *      width.
343      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
344      *      height.
345      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
346      *      {@link FuseCode} values. Specifies the activation to
347      *      invoke on the result.
348      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
349      *      Set to true to specify NCHW data layout for input0 and output0.
350      *      Available since API level 29.
351      *
352      * Outputs:
353      * * 0: The output 4-D tensor, of shape
354      *      [batches, out_height, out_width, depth].
355      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
356      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
357      *      the scale and zeroPoint must be the same as input0.
358      *
359      * Available since API level 27.
360      */
361     ANEURALNETWORKS_AVERAGE_POOL_2D = 1,
362 
363     /**
364      * Concatenates the input tensors along the given dimension.
365      *
366      * The input tensors must have identical {@link OperandCode} and the same
367      * dimensions except the dimension along the concatenation axis.
368      *
369      * Supported tensor {@link OperandCode}:
370      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
371      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
372      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
373      *   (full support since API level 29, see the input section)
374      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
375      *
376      * Supported tensor rank: up to 4
377      *
378      * Inputs:
379      * * 0 ~ n-1: The list of n input tensors, of shape
380      *            [D0, D1, ..., Daxis(i), ..., Dm].
381      *            Before API level 29, all input tensors of
382      *            {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
383      *            must have the same scale and zeroPoint as the output tensor.
384      *            Input tensors of
385      *            {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
386      *            are allowed to have different scale and zeroPoint.
387      *            Since API level 29, zero-sized tensors are supported.
388      * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the
389      *      concatenation axis.
390      *
391      * Outputs:
392      * * 0: The output, a tensor of the same {@link OperandCode} as the input
393      *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
394      *      Since API level 29, for a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
395      *      the scale and zeroPoint values can be different from
396      *      input tensors. Before API level 29 they have to be the same as for the input tensors.
397      *
398      * Available since API level 27.
399      */
400     ANEURALNETWORKS_CONCATENATION = 2,
401 
402     /**
403      * Performs a 2-D convolution operation.
404      *
405      * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
406      * batch of images, applying the filter to each window of each image of the
407      * appropriate size.
408      *
409      * The output dimensions are functions of the filter dimensions, stride, and
410      * padding.
411      *
412      * The values in the output tensor are computed as:
413      *
414      *     output[b, i, j, channel] =
415      *         sum_{di, dj, k} (
416      *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
417      *             filter[channel, di, dj, k]
418      *         ) + bias[channel]
419      *
420      * Supported tensor {@link OperandCode} configurations:
421      * * 32 bit floating point:
422      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
423      *
424      * * Quantized:
425      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
426      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
427      * * * input.scale * filter.scale).
428      *
429      * Available since API level 29:
430      * * 16 bit floating point:
431      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
432      *
433      * * Quantized with symmetric per channel quantization for the filter:
434      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
435      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
436      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
437      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
438      *
439      * Available since API level 30:
440      * * Quantized signed (since API level 30):
441      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
442      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
443      * * * input.scale * filter.scale).
444      *
445      * * Quantized signed with filter symmetric per channel quantization (since API level 30):
446      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
447      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
448      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
449      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
450      *
451      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
452      * With the default data layout NHWC, the data is stored in the order of:
453      * [batch, height, width, channels]. Alternatively, the data layout could
454      * be NCHW, the data storage order of: [batch, channels, height, width].
455      * NCHW is supported since API level 29.
456      *
457      * Both explicit padding and implicit padding are supported.
458      *
459      * Inputs (explicit padding):
460      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
461      *      specifying the input.
462      *      Since API level 29, zero batches is supported for this tensor.
463      * * 1: A 4-D tensor, of shape
464      *      [depth_out, filter_height, filter_width, depth_in], specifying the
465      *      filter.
466      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
467      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
468      *      must be set to 0.
469      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
470      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
471      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
472      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
473      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
474      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
475      *      of 0 and bias_scale == input_scale * filter_scale.
476      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
477      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
478      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
479      *      bias_scale[i] = input_scale * filter_scale[i].
480      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
481      *      the left, in the ‘width’ dimension.
482      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
483      *      the right, in the ‘width’ dimension.
484      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
485      *      the top, in the ‘height’ dimension.
486      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
487      *      the bottom, in the ‘height’ dimension.
488      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
489      *      walking through input in the ‘width’ dimension.
490      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
491      *      walking through input in the ‘height’ dimension.
492      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
493      *      {@link FuseCode} values. Specifies the activation to
494      *      invoke on the result.
495      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
496      *      Set to true to specify NCHW data layout for input0 and output0.
497      *      Available since API level 29.
498      * * 11: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
499      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
500      *      cells between each filter element on width dimension. If this input is set,
501      *      input 12 (dilation factor for height) must be specified as well.
502      *      Available since API level 29.
503      * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
504      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
505      *      cells between each filter element on height dimension. If this input is set,
506      *      input 11 (dilation factor for width) must be specified as well.
507      *      Available since API level 29.
508      *
509      * Inputs (implicit padding):
510      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
511      *      specifying the input.
512      *      Since API level 29, zero batches is supported for this tensor.
513      * * 1: A 4-D tensor, of shape
514      *      [depth_out, filter_height, filter_width, depth_in], specifying the
515      *      filter.
516      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
517      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
518      *      must be set to 0.
519      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
520      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
521      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same
522      *      type.
523      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
524      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
525      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
526      *      of 0 and bias_scale == input_scale * filter_scale.
527      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
528      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
529      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
530      *      bias_scale[i] = input_scale * filter_scale[i].
531      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
532      *      padding scheme, has to be one of the
533      *      {@link PaddingCode} values.
534      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
535      *      walking through input in the ‘width’ dimension.
536      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
537      *      walking through input in the ‘height’ dimension.
538      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
539      *      {@link FuseCode} values. Specifies the activation to
540      *      invoke on the result.
541      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
542      *      Set to true to specify NCHW data layout for input0 and output0.
543      *      Available since API level 29.
544      * * 8: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
545      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
546      *      cells between each filter element on width dimension. If this input is set,
547      *      input 9 (dilation factor for height) must be specified as well.
548      *      Available since API level 29.
549      * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
550      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
551      *      cells between each filter element on height dimension. If this input is set,
552      *      input 8 (dilation factor for width) must be specified as well.
553      *      Available since API level 29.
554      *
555      * Outputs:
556      * * 0: The output 4-D tensor, of shape
557      *      [batches, out_height, out_width, depth_out].
558      *      Before API level 29, for output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
559      *      the following condition must be satisfied: output_scale > input_scale * filter_scale
560      *
561      * Available since API level 27.
562      */
563     ANEURALNETWORKS_CONV_2D = 3,
564 
565     /**
566      * Performs a depthwise 2-D convolution operation.
567      *
568      * Given an input tensor of shape [batches, height, width, depth_in] and a
569      * filter tensor of shape [1, filter_height, filter_width, depth_out]
570      * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
571      * applies a different filter to each input channel (expanding from 1
572      * channel to channel_multiplier channels for each), then concatenates the
573      * results together.
574      *
575      * The output has depth_out = depth_in * depth_multiplier channels.
576      * The output dimensions are functions of the filter dimensions, stride, and
577      * padding.
578      *
579      * The values in the output tensor are computed as:
580      *
581      *     output[b, i, j, k * channel_multiplier + q] =
582      *         sum_{di, dj} (
583      *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
584      *             filter[1, di, dj, k * channel_multiplier + q]
585      *         ) + bias[k * channel_multiplier + q]
586      *
587      * Supported tensor {@link OperandCode} configurations:
588      * * 32 bit floating point:
589      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
590      *
591      * * Quantized:
592      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
593      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
594      * * * input.scale * filter.scale).
595      *
596      * Available since API level 29:
597      * * 16 bit floating point:
598      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
599      *
600      * * Quantized with symmetric per channel quantization for the filter:
601      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
602      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
603      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
604      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
605      *
606      * Available since API level 30:
607      * * Quantized signed (since API level 30):
608      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
609      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
610      * * * input.scale * filter.scale).
611      *
612      * * Quantized signed with filter symmetric per channel quantization (since API level 30):
613      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
614      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
615      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
616      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
617      *
618      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
619      * With the default data layout NHWC, the data is stored in the order of:
620      * [batch, height, width, channels]. Alternatively, the data layout could
621      * be NCHW, the data storage order of: [batch, channels, height, width].
622      * NCHW is supported since API level 29.
623      *
624      * Both explicit padding and implicit padding are supported.
625      *
626      * Inputs (explicit padding):
627      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
628      *      specifying the input.
629      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
630      *      specifying the filter.
631      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
632      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
633      *      must be set to 3.
634      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
635      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
636      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
637      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
638      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
639      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
640      *      of 0 and bias_scale == input_scale * filter_scale.
641      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
642      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
643      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
644      *      bias_scale[i] = input_scale * filter_scale[i].
645      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
646      *      the left, in the ‘width’ dimension.
647      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
648      *      the right, in the ‘width’ dimension.
649      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
650      *      the top, in the ‘height’ dimension.
651      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
652      *      the bottom, in the ‘height’ dimension.
653      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
654      *      walking through input in the ‘width’ dimension.
655      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
656      *      walking through input in the ‘height’ dimension.
657      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
658      *      multiplier.
659      * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
660      *       {@link FuseCode} values. Specifies the activation to
661      *       invoke on the result.
662      * * 11: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
663      *       Set to true to specify NCHW data layout for input0 and output0.
664      *       Available since API level 29.
665      * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
666      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
667      *      cells between each filter element on width dimension. If this input is set,
668      *      input 13 (dilation factor for height) must be specified as well.
669      *      Available since API level 29.
670      * * 13: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
671      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
672      *      cells between each filter element on height dimension. If this input is set,
673      *      input 12 (dilation factor for width) must be specified as well.
674      *      Available since API level 29.
675      *
676      * Inputs (implicit padding):
677      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
678      *      specifying the input.
679      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
680      *      specifying the filter.
681      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
682      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
683      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
684      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
685      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
686      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
687      *      of 0 and bias_scale == input_scale * filter_scale.
688      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
689      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
690      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
691      *      bias_scale[i] = input_scale * filter_scale[i].
692      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
693      *      padding scheme, has to be one of the
694      *      {@link PaddingCode} values.
695      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
696      *      walking through input in the ‘width’ dimension.
697      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
698      *      walking through input in the ‘height’ dimension.
699      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
700      *      multiplier.
701      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
702      *      {@link FuseCode} values. Specifies the activation to
703      *      invoke on the result.
704      * * 8: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
705      *      Set to true to specify NCHW data layout for input0 and output0.
706      *      Available since API level 29.
707      * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
708      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
709      *      cells between each filter element on width dimension. If this input is set,
710      *      input 10 (dilation factor for height) must be specified as well.
711      *      Available since API level 29.
712      * * 10: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
713      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
714      *      cells between each filter element on height dimension. If this input is set,
715      *      input 9 (dilation factor for width) must be specified as well.
716      *      Available since API level 29.
717      *
718      * Outputs:
719      * * 0: The output 4-D tensor, of shape
720      *      [batches, out_height, out_width, depth_out]. Before API level 29, for
721      *      output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
722      *      the following condition must be satisfied:
723      *      output_scale > input_scale * filter_scale
724      *
725      * Available since API level 27.
726      */
727     ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4,
728 
729     /**
730      * Rearranges data from depth into blocks of spatial data.
731      *
732      * More specifically, this op outputs a copy of the input tensor where
733      * values from the depth dimension are moved in spatial blocks to the height
734      * and width dimensions. The value block_size indicates the input block size
735      * and how the data is moved.
736      *
737      * Chunks of data of size block_size * block_size from depth are rearranged
738      * into non-overlapping blocks of size block_size x block_size.
739      *
740      * The width of the output tensor is input_depth * block_size, whereas the
741      * height is input_height * block_size. The depth of the input tensor must
742      * be divisible by block_size * block_size
743      *
744      * Supported tensor {@link OperandCode}:
745      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
746      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
747      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
748      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
749      *
750      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
751      * With the default data layout NHWC, the data is stored in the order of:
752      * [batch, height, width, channels]. Alternatively, the data layout could
753      * be NCHW, the data storage order of: [batch, channels, height, width].
754      * NCHW is supported since API level 29.
755      *
756      * Inputs:
757      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
758      *      specifying the input.
759      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
760      *      block_size must be >=1 and block_size * block_size must be a divisor
761      *      of the input depth.
762      * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
763      *      Set to true to specify NCHW data layout for input0 and output0.
764      *      Available since API level 29.
765      *
766      * Outputs:
767      * * 0: The output 4-D tensor, of shape [batch, height*block_size,
768      *      width*block_size, depth/(block_size*block_size)].
769      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
770      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
771      *      the scale and zeroPoint must be the same as input0.
772      *
773      * Available since API level 27.
774      */
775     ANEURALNETWORKS_DEPTH_TO_SPACE = 5,
776 
777     /**
778      * Dequantizes the input tensor.
779      *
780      * The formula is:
781      *
782      *     output = (input - zeroPoint) * scale.
783      *
784      * Supported input tensor {@link OperandCode}:
785      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
786      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} (since API level 29)
787      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} (since API level 29)
788      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
789      *
790      * Supported output tensor {@link OperandCode}:
791      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
792      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
793      *
794      * Supported tensor rank: up to 4
795      *
796      * Inputs:
797      * * 0: A tensor.
798      *      Since API level 29, this tensor may be zero-sized.
799      *
800      * Outputs:
801      * * 0: A tensor with the same shape as input0.
802      *
803      * Available since API level 27.
804      */
805     ANEURALNETWORKS_DEQUANTIZE = 6,
806 
807     /**
808      * Looks up sub-tensors in the input tensor.
809      *
810      * This operator takes for input a tensor of values (Values) and
811      * a one-dimensional tensor of selection indices (Lookups).
812      * The output tensor is the concatenation of sub-tensors of Values as
813      * selected by Lookups.
814      *
815      * Think of Values as being sliced along its first dimension:
816      * The entries in Lookups select which slices are concatenated together
817      * to create the output tensor.
818      *
819      * For example, if Values has shape of [40, 200, 300] and
820      * Lookups has shape of [3], all three values found in Lookups are
821      * expected to be between 0 and 39. The resulting tensor must
822      * have shape of [3, 200, 300].
823      *
824      * If a value in Lookups is out of bounds, the operation must fail
825      * and an error must be reported.
826      *
827      * Supported value tensor {@link OperandCode}:
828      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 30)
829      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
830      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 29)
831      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)
832      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
833      *
834      * Supported value tensor rank: from 2
835      *
836      * Inputs:
837      * * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.
838      *      The values are indices into the first dimension of Values.
839      * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
840      *      extracted.
841      *
842      * Output:
843      * * 0: A n-D tensor with the same rank and shape as the Values
844      *      tensor, except for the first dimension which has the same size
845      *      as Lookups' only dimension.
846      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
847      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
848      *      the scale and zeroPoint must be the same as input1.
849      *
850      * Available since API level 27.
851      */
852     ANEURALNETWORKS_EMBEDDING_LOOKUP = 7,
853 
854     /**
855      * Computes element-wise floor() on the input tensor.
856      *
857      * Supported tensor {@link OperandCode}:
858      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
859      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
860      *
861      * Supported tensor rank: up to 4
862      *
863      * Inputs:
864      * * 0: A tensor.
865      *
866      * Outputs:
867      * * 0: The output tensor, of the same {@link OperandCode} and dimensions as
868      *      the input tensor.
869      *
870      * Available since API level 27.
871      */
872     ANEURALNETWORKS_FLOOR = 8,
873 
874     /**
875      * Denotes a fully (densely) connected layer, which connects all elements
876      * in the input tensor with each element in the output tensor.
877      *
878      * This layer implements the operation:
879      *
880      *     outputs = activation(inputs * weights’ + bias)
881      *
882      * Supported tensor {@link OperandCode}:
883      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
884      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
885      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
886      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
887      *
888      * Supported tensor rank: up to 4.
889      *
890      * Inputs:
891      * * 0: A tensor of at least rank 2, specifying the input. If rank is
892      *      greater than 2, then it gets flattened to a 2-D Tensor. The
893      *      (flattened) 2-D Tensor is reshaped (if necessary) to
894      *      [batch_size, input_size], where "input_size" corresponds to the
895      *      number of inputs to the layer, matching the second dimension of
896      *      weights, and "batch_size" is calculated by dividing the number of
897      *      elements by "input_size".
898      *      Since API level 29, zero batch_size is supported for this tensor.
899      * * 1: A 2-D tensor, specifying the weights, of shape
900      *      [num_units, input_size], where "num_units" corresponds to the number
901      *      of output nodes.
902      * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
903      *      tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
904      *      also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
905      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
906      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
907      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
908      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
909      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
910      *      {@link FuseCode} values. Specifies the activation to
911      *      invoke on the result.
912      *
913      * Outputs:
914      * * 0: The output tensor, of shape [batch_size, num_units]. Before API level 29, for
915      *      output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following
916      *      condition must be satisfied: output_scale > input_scale * filter_scale.
917      *
918      * Available since API level 27.
919      */
920     ANEURALNETWORKS_FULLY_CONNECTED = 9,
921 
922     /**
923      * Looks up sub-tensors in the input tensor using a key-value map.
924      *
925      * This operator takes for input a tensor of values (Values),
926      * a one-dimensional tensor of selection values (Lookups) and
927      * a one-dimensional tensor that maps these values to Values
928      * indexes. The output tensor is the concatenation of sub-tensors of
929      * Values as selected by Lookups via Keys.
930      *
931      * Think of Values as being sliced along its outer-most dimension.
932      * The output is a concatenation of selected slices, with one slice
933      * for each entry of Lookups. The slice selected is the one at the
934      * same index as the Maps entry that matches the value in Lookups.
935      *
936      * For a hit, the corresponding sub-tensor of Values is included
937      * in the Output tensor. For a miss, the corresponding sub-tensor in
938      * Output must have zero values.
939      *
940      * For example, if Values has shape of [40, 200, 300],
941      * Keys should have a shape of [40]. If Lookups tensor has shape
942      * of [3], three slices are being concatenated, so the resulting tensor
943      * must have the shape of [3, 200, 300]. If the first entry in Lookups
944      * has the value 123456, that value must be located in Keys tensor.
945      * If the sixth entry of Keys contains 123456, the sixth slice of Values
946      * must be selected. If no entry in Keys has 123456, a slice of zeroes
947      * must be concatenated.
948      *
949      * Supported value tensor {@link OperandCode}:
950      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
951      * * {@link ANEURALNETWORKS_TENSOR_INT32}
952      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
953      *
954      * Supported value tensor rank: from 2
955      *
956      * Inputs:
957      * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with
958      *      shape [ k ].
959      * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape
960      *      [ n ]; Keys and Values pair represent a map, i.e., the ith element
961      *      in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
962      *      (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
963      *      ascending order.
964      * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
965      *      must be n.
966      *
967      * Outputs:
968      * * 0: Output. A tensor with shape [ k …].
969      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
970      *      the scale and zeroPoint must be the same as input2.
971      * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
972      *      hits (True) or not (False).
973      *      Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0
974      *      and scale 1.0f.
975      *      A non-zero byte represents True, a hit. A zero indicates otherwise.
976      *
977      * Available since API level 27.
978      */
979     ANEURALNETWORKS_HASHTABLE_LOOKUP = 10,
980 
981     /**
982      * Applies L2 normalization along the axis dimension.
983      *
984      * The values in the output tensor are computed as:
985      *
986      *     output[batch, row, col, channel] =
987      *         input[batch, row, col, channel] /
988      *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
989      *
990      * By default the axis dimension is the last dimension of the input tensor.
991      *
992      * Supported tensor {@link OperandCode}:
993      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
994      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
995      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)
996      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
997      *
998      * Supported tensor rank: up to 4
999      * Tensors with rank less than 4 are only supported since API level 29.
1000      *
1001      * Inputs:
1002      * * 0: An n-D tensor, specifying the tensor to be normalized.
1003      * * 1: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1004      *      specifying the dimension normalization would be performed on.
1005      *      Negative index is used to specify axis from the end (e.g. -1 for
1006      *      the last axis). Must be in the range [-n, n).
1007      *      Available since API level 29.
1008      *
1009      * Outputs:
1010      * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
1011      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1012      *      the scale must be 1.f / 128 and the zeroPoint must be 128.
1013      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1014      *      the scale must be 1.f / 128 and the zeroPoint must be 0.
1015      *
1016      *      NOTE: Before API level 30, if the elements along an axis are all zeros,
1017      *      the result is undefined. Since API level 30, if the elements along an axis
1018      *      are all zeros, the result is logical zero.
1019      *
1020      * Available since API level 27.
1021      */
1022     ANEURALNETWORKS_L2_NORMALIZATION = 11,
1023 
1024     /**
1025      * Performs an 2-D L2 pooling operation.
1026      *
1027      * The output dimensions are functions of the filter dimensions, stride, and
1028      * padding.
1029      *
1030      * The values in the output tensor are computed as:
1031      *
1032      *     output[b, i, j, c] =
1033      *         sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
1034      *              sum(1))
1035      *
1036      * Supported tensor {@link OperandCode}:
1037      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1038      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1039      *
1040      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1041      * With the default data layout NHWC, the data is stored in the order of:
1042      * [batch, height, width, channels]. Alternatively, the data layout could
1043      * be NCHW, the data storage order of: [batch, channels, height, width].
1044      * NCHW is supported since API level 29.
1045      *
1046      * Both explicit padding and implicit padding are supported.
1047      *
1048      * Inputs (explicit padding):
1049      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1050      *      the input.
1051      *      Since API level 29, zero batches is supported for this tensor.
1052      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1053      *      the left, in the ‘width’ dimension.
1054      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1055      *      the right, in the ‘width’ dimension.
1056      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1057      *      the top, in the ‘height’ dimension.
1058      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1059      *      the bottom, in the ‘height’ dimension.
1060      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1061      *      walking through input in the ‘width’ dimension.
1062      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1063      *      walking through input in the ‘height’ dimension.
1064      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1065      *      width.
1066      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1067      *      height.
1068      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1069      *      {@link FuseCode} values. Specifies the activation to
1070      *      invoke on the result.
1071      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1072      *       Set to true to specify NCHW data layout for input0 and output0.
1073      *       Available since API level 29.
1074      *
1075      * Inputs (implicit padding):
1076      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1077      *      the input.
1078      *      Since API level 29, zero batches is supported for this tensor.
1079      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
1080      *      padding scheme, has to be one of the
1081      *      {@link PaddingCode} values.
1082      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1083      *      walking through input in the ‘width’ dimension.
1084      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1085      *      walking through input in the ‘height’ dimension.
1086      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1087      *      width.
1088      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1089      *      height.
1090      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1091      *      {@link FuseCode} values. Specifies the activation to
1092      *      invoke on the result.
1093      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1094      *      Set to true to specify NCHW data layout for input0 and output0.
1095      *      Available since API level 29.
1096      *
1097      * Outputs:
1098      * * 0: The output 4-D tensor, of shape
1099      *      [batches, out_height, out_width, depth].
1100      *
1101      * Available since API level 27.
1102      */
1103     ANEURALNETWORKS_L2_POOL_2D = 12,
1104 
1105     /**
1106      * Applies Local Response Normalization along the depth dimension.
1107      *
1108      * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
1109      * last dimension), and each vector is normalized independently. Within a
1110      * given vector, each component is divided by the weighted, squared sum of
1111      * inputs within depth_radius.
1112      *
1113      * The output is calculated using this formula:
1114      *
1115      *     sqr_sum[a, b, c, d] = sum(
1116      *         pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
1117      *     output = input / pow((bias + alpha * sqr_sum), beta)
1118      *
1119      * For input tensor with rank less than 4, independently normalizes each
1120      * 1-D slice along specified dimension.
1121      *
1122      * Supported tensor {@link OperandCode}:
1123      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1124      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1125      *
1126      * Supported tensor rank: up to 4
1127      * Tensors with rank less than 4 are only supported since API level 29.
1128      *
1129      * Inputs:
1130      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1131      *      the input.
1132      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of
1133      *      the normalization window.
1134      * * 2: A scalar, specifying the bias, must not be zero.
1135      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias
1136      *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
1137      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias
1138      *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
1139      * * 3: A scalar, specifying the scale factor, alpha.
1140      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the
1141      *      alpha value must be of {@link ANEURALNETWORKS_FLOAT16}.
1142      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the
1143      *      alpha value must be of {@link ANEURALNETWORKS_FLOAT32}.
1144      * * 4: A scalar, specifying the exponent, beta.
1145      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta
1146      *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
1147      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta
1148      *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
1149      * * 5: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1150      *      specifying the dimension normalization would be performed on.
1151      *      Negative index is used to specify axis from the end (e.g. -1 for
1152      *      the last axis). Must be in the range [-n, n).
1153      *      Available since API level 29.
1154      *
1155      * Outputs:
1156      * * 0: The output tensor of same shape as input0.
1157      *
1158      * Available since API level 27.
1159      */
1160     ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13,
1161 
1162     /**
1163      * Computes sigmoid activation on the input tensor element-wise.
1164      *
1165      * The output is calculated using this formula:
1166      *
1167      *     output = 1 / (1 + exp(-input))
1168      *
1169      * Supported tensor {@link OperandCode}:
1170      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1171      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1172      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1173      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
1174      *
1175      * Supported tensor rank: up to 4.
1176      *
1177      * Inputs:
1178      * * 0: A tensor, specifying the input.
1179      *      Since API level 29, this tensor may be zero-sized.
1180      *
1181      * Outputs:
1182      * * 0: The output tensor of same shape as input0.
1183      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1184      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1185      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1186      *      the scale must be 1.f / 256 and the zeroPoint must be -128.
1187      *
1188      * Available since API level 27.
1189      */
1190     ANEURALNETWORKS_LOGISTIC = 14,
1191 
1192     /**
1193      * Projects an input to a bit vector via locality senstive hashing.
1194      *
1195      * Supported input tensor {@link OperandCode}:
1196      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1197      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1198      * * {@link ANEURALNETWORKS_TENSOR_INT32}
1199      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1200      *
1201      * Supported input tensor rank: from 1
1202      *
1203      * Inputs:
1204      * * 0: Hash functions. Dim.size == 2, DataType: Float.
1205      *      Tensor[0].Dim[0]: Number of hash functions.
1206      *      Tensor[0].Dim[1]: Number of projected output bits generated by each
1207      *      hash function.
1208      *      If the projection type is Sparse:
1209      *      Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
1210      *
1211      * * 1: Input. Dim.size >= 1, no restriction on DataType.
1212      * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
1213      *      If not set, each input element is considered to have the same weight
1214      *      of 1.0.
1215      *      Tensor[1].Dim[0] == Tensor[2].Dim[0]
1216      * * 3: Type:
1217      *        Sparse:
1218      *          Value LSHProjectionType_SPARSE(=3) (since API level 29).
1219      *          Computed bit vector is considered to be sparse.
1220      *          Each output element is an int32 made up of multiple bits
1221      *          computed from hash functions.
1222      *
1223      *          NOTE: To avoid collisions across hash functions, an offset value
1224      *          of k * (1 << Tensor[0].Dim[1]) will be added to each signature,
1225      *          where k is the index of the hash function.
1226      *
1227      *          Value LSHProjectionType_SPARSE_DEPRECATED(=1).
1228      *          Legacy behavior that does not include the offset value.
1229      *
1230      *        Dense:
1231      *          Value LSHProjectionType_DENSE(=2).
1232      *          Computed bit vector is considered to be dense. Each output
1233      *          element represents a bit and can take the value of either
1234      *          0 or 1.
1235      *
1236      * Outputs:
1237      * * 0: If the projection type is Sparse:
1238      *      Output.Dim == { Tensor[0].Dim[0] }
1239      *      A tensor of int32 that represents hash signatures.
1240      *
1241      *      If the projection type is Dense:
1242      *      Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
1243      *      A flattened tensor that represents projected bit vectors.
1244      *
1245      * Available since API level 27.
1246      * The offset value for sparse projections was added in API level 29.
1247      */
1248     ANEURALNETWORKS_LSH_PROJECTION = 15,
1249 
1250     /**
1251      * Performs a single time step in a Long Short-Term Memory (LSTM) layer
1252      *
1253      * The LSTM operation is described by the following equations.
1254      *
1255      * \f{eqnarray*}{
1256      * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
1257      * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
1258      * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
1259      *        g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
1260      * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
1261      *      & & \\
1262      *      & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
1263      *      & if\ there\ is\ a\ projection; \\
1264      * h_t =& & \\
1265      *      & o_t \odot g(C_t) & otherwise. \\
1266      * \f}
1267      * Where:
1268      * * \f$x_t\f$ is the input,
1269      * * \f$i_t\f$ is the input gate,
1270      * * \f$f_t\f$ is the forget gate,
1271      * * \f$C_t\f$ is the cell state,
1272      * * \f$o_t\f$ is the output,
1273      * * \f$h_t\f$ is the output state,
1274      * * \f$\sigma\f$ is the logistic sigmoid function,
1275      * * \f$g\f$ is the cell input and cell output activation function, usually
1276      *   \f$tahn\f$,
1277      * * \f$W_{xi}\f$ is the input-to-input weight matrix,
1278      * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
1279      * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
1280      * * \f$b_i\f$ is the input gate bias,
1281      * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
1282      * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
1283      * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
1284      * * \f$b_f\f$ is the forget gate bias,
1285      * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
1286      * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
1287      * * \f$b_c\f$ is the cell bias,
1288      * * \f$W_{xo}\f$ is the input-to-output weight matrix,
1289      * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
1290      * * \f$W_{co}\f$ is the cell-to-output weight matrix,
1291      * * \f$b_o\f$ is the output gate bias,
1292      * * \f$W_{proj}\f$ is the projection weight matrix,
1293      * * \f$b_{proj}\f$ is the projection bias,
1294      * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
1295      * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
1296      * * \f$\odot\f$ is the
1297      *   <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
1298      *   Hadamard product</a> that takes two matrices and produces another
1299      *   matrix, each element of which is the product of the corresponding
1300      *   elements of the input matrices.
1301      *
1302      * Since API level 29 LSTM supports layer normalization.
1303      * In case layer normalization is used, the inputs to internal activation
1304      * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
1305      * following an approach from section 3.1 from
1306      * https://arxiv.org/pdf/1607.06450.pdf
1307      *
1308      * The operation has the following independently optional inputs:
1309      * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
1310      *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
1311      *   have values or neither of them have values (i.e., all set to null). If
1312      *   they have values, the peephole optimization is used.
1313      * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
1314      *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
1315      *   or none of them have values. If they have no values, coupling of input
1316      *   and forget gates (CIFG) is used, in which case the input gate
1317      *   (\f$i_t\f$) is calculated using the following equation instead.
1318      *   \f{eqnarray*}{
1319      *   i_t = 1 - f_t
1320      *   \f}
1321      *   In case peephole optimization is used and CIFG is not used
1322      *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
1323      *   cell-to-input weights must have no value.
1324      * * The projection weights (\f$W_{proj}\f$) is required only for the
1325      *   recurrent projection layer, and should otherwise have no value.
1326      * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
1327      *   value if the recurrent projection layer exists, and should otherwise
1328      *   have no value.
1329      * * (API level 29 or later) The four layer normalization weights either all have
1330      *   values or none of them have values. Additionally, if CIFG is used,
1331      *   input layer normalization weights tensor is omitted and the other layer
1332      *   normalization weights either all have values or none of them have
1333      *   values. Layer normalization is used when the values of all the layer
1334      *   normalization weights are present.
1335      *
1336      * References:
1337      *
1338      * The default non-peephole non-CIFG implementation is based on:
1339      * http://www.bioinf.jku.at/publications/older/2604.pdf
1340      * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
1341      * Computation, 9(8):1735-1780, 1997.
1342      *
1343      * The peephole implementation and projection layer is based on:
1344      * https://research.google.com/pubs/archive/43905.pdf
1345      * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
1346      * recurrent neural network architectures for large scale acoustic
1347      * modeling." INTERSPEECH, 2014.
1348      * (However, the concept of peephole optimization was introduced in work
1349      * prior to this paper.)
1350      *
1351      * The coupling of input and forget gate (CIFG) is based on:
1352      * http://arxiv.org/pdf/1503.04069.pdf
1353      * Greff et al. "LSTM: A Search Space Odyssey"
1354      *
1355      * The layer normalization is based on:
1356      * https://arxiv.org/pdf/1607.06450.pdf
1357      * Jimmy Ba et al. "Layer Normalization"
1358      *
1359      * Supported tensor {@link OperandCode}:
1360      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1361      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1362      *
1363      * All input and output tensors must be of the same type.
1364      *
1365      * Inputs:
1366      * * 0: The input (\f$x_t\f$).
1367      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1368      *      corresponds to the batching dimension, and “input_size” is the size
1369      *      of the input.
1370      * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
1371      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1372      *      corresponds to the number of cell units.
1373      * * 2: The input-to-forget weights (\f$W_{xf}\f$).
1374      *      A 2-D tensor of shape [num_units, input_size].
1375      * * 3: The input-to-cell weights (\f$W_{xc}\f$).
1376      *      A 2-D tensor of shape [num_units, input_size].
1377      * * 4: The input-to-output weights (\f$W_{xo}\f$).
1378      *      A 2-D tensor of shape [num_units, input_size].
1379      * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
1380      *      A 2-D tensor of shape [num_units, output_size], where “output_size”
1381      *      corresponds to either the number of cell units (i.e., “num_units”),
1382      *      or the second dimension of the “projection_weights”, if defined.
1383      * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
1384      *      A 2-D tensor of shape [num_units, output_size].
1385      * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
1386      *      A 2-D tensor of shape [num_units, output_size].
1387      * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
1388      *      A 2-D tensor of shape [num_units, output_size].
1389      * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
1390      *      A 1-D tensor of shape [num_units].
1391      * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
1392      *      A 1-D tensor of shape [num_units].
1393      * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
1394      *      A 1-D tensor of shape [num_units].
1395      * * 12:The input gate bias (\f$b_i\f$). Optional.
1396      *      A 1-D tensor of shape [num_units].
1397      * * 13:The forget gate bias (\f$b_f\f$).
1398      *      A 1-D tensor of shape [num_units].
1399      * * 14:The cell bias (\f$b_c\f$).
1400      *      A 1-D tensor of shape [num_units].
1401      * * 15:The output gate bias (\f$b_o\f$).
1402      *      A 1-D tensor of shape [num_units].
1403      * * 16:The projection weights (\f$W_{proj}\f$). Optional.
1404      *      A 2-D tensor of shape [output_size, num_units].
1405      * * 17:The projection bias (\f$b_{proj}\f$). Optional.
1406      *      A 1-D tensor of shape [output_size].
1407      * * 18:The output state (in) (\f$h_{t-1}\f$).
1408      *      A 2-D tensor of shape [batch_size, output_size].
1409      * * 19:The cell state (in) (\f$C_{t-1}\f$).
1410      *      A 2-D tensor of shape [batch_size, num_units].
1411      * * 20:The activation function (\f$g\f$).
1412      *      A value indicating the activation function:
1413      *      <ul>
1414      *      <li>0: None;
1415      *      <li>1: Relu;
1416      *      <li>3: Relu6;
1417      *      <li>4: Tanh;
1418      *      <li>6: Sigmoid.
1419      *      </ul>
1420      * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
1421      *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
1422      *      then clipping is disabled.
1423      *      Until API level 29 this scalar must be of type {@link
1424      *      ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input
1425      *      tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this
1426      *      scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
1427      *      otherwise if all the input tensors have the type {@link
1428      *      ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
1429      *      ANEURALNETWORKS_FLOAT16}.
1430      * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
1431      *      projection layer, such that values are bound within
1432      *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1433      *      Until API level 29 this scalar must be of type {@link
1434      *      ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input
1435      *      tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this
1436      *      scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
1437      *      otherwise if all the input tensors have the type {@link
1438      *      ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
1439      *      ANEURALNETWORKS_FLOAT16}.
1440      * Since API level 29 there are additional inputs to this op:
1441      * * 23:The input layer normalization weights.
1442      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1443      *      to activation at input gate.
1444      * * 24:The forget layer normalization weights.
1445      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1446      *      to activation at forget gate.
1447      * * 25:The cell layer normalization weights.
1448      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1449      *      to activation at cell gate.
1450      * * 26:The output layer normalization weights.
1451      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1452      *      to activation at output gate.
1453      *
1454      * Outputs:
1455      * * 0: The scratch buffer.
1456      *      A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
1457      *      [batch_size, num_units * 4] without CIFG.
1458      * * 1: The output state (out) (\f$h_t\f$).
1459      *      A 2-D tensor of shape [batch_size, output_size].
1460      * * 2: The cell state (out) (\f$C_t\f$).
1461      *      A 2-D tensor of shape [batch_size, num_units].
1462      * * 3: The output (\f$o_t\f$).
1463      *      A 2-D tensor of shape [batch_size, output_size]. This is effectively
1464      *      the same as the current “output state (out)” value.
1465      *
1466      * Available since API level 27.
1467      */
1468     ANEURALNETWORKS_LSTM = 16,
1469 
1470     /**
1471      * Performs an 2-D max pooling operation.
1472      *
1473      * The output dimensions are functions of the filter dimensions, stride, and
1474      * padding.
1475      *
1476      * The values in the output tensor are computed as:
1477      *
1478      *     output[b, i, j, channel] =
1479      *         max_{di, dj} (
1480      *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
1481      *         )
1482      *
1483      * Supported tensor {@link OperandCode}:
1484      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1485      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1486      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1487      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
1488      *
1489      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1490      * With the default data layout NHWC, the data is stored in the order of:
1491      * [batch, height, width, channels]. Alternatively, the data layout could
1492      * be NCHW, the data storage order of: [batch, channels, height, width].
1493      * NCHW is supported since API level 29.
1494      *
1495      * Both explicit padding and implicit padding are supported.
1496      *
1497      * Inputs (explicit padding):
1498      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1499      *      the input.
1500      *      Since API level 29, zero batches is supported for this tensor.
1501      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1502      *      the left, in the ‘width’ dimension.
1503      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1504      *      the right, in the ‘width’ dimension.
1505      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1506      *      the top, in the ‘height’ dimension.
1507      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1508      *      the bottom, in the ‘height’ dimension.
1509      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1510      *      walking through input in the ‘width’ dimension.
1511      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1512      *      walking through input in the ‘height’ dimension.
1513      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1514      *      width.
1515      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1516      *      height.
1517      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1518      *      {@link FuseCode} values. Specifies the activation to
1519      *      invoke on the result.
1520      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1521      *       Set to true to specify NCHW data layout for input0 and output0.
1522      *       Available since API level 29.
1523      *
1524      * Inputs (implicit padding):
1525      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1526      *      the input.
1527      *      Since API level 29, zero batches is supported for this tensor.
1528      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
1529      *      padding scheme, has to be one of the
1530      *      {@link PaddingCode} values.
1531      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1532      *      walking through input in the ‘width’ dimension.
1533      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1534      *      walking through input in the ‘height’ dimension.
1535      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1536      *      width.
1537      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1538      *      height.
1539      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1540      *      {@link FuseCode} values. Specifies the activation to
1541      *      invoke on the result.
1542      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1543      *      Set to true to specify NCHW data layout for input0 and output0.
1544      *      Available since API level 29.
1545      *
1546      * Outputs:
1547      * * 0: The output 4-D tensor, of shape
1548      *      [batches, out_height, out_width, depth].
1549      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1550      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1551      *      the scale and zeroPoint must be the same as input0.
1552      *
1553      * Available since API level 27.
1554      */
1555     ANEURALNETWORKS_MAX_POOL_2D = 17,
1556 
1557     /**
1558      * Multiplies two tensors, element-wise.
1559      *
1560      * Takes two input tensors of identical {@link OperandCode} and compatible
1561      * dimensions. The output is the product of both input tensors, optionally
1562      * modified by an activation function.
1563      *
1564      * Two dimensions are compatible when:
1565      *     1. they are equal, or
1566      *     2. one of them is 1
1567      *
1568      * The size of the resulting output is the maximum size along each dimension
1569      * of the input operands. It starts with the trailing dimensions, and works
1570      * its way forward.
1571      *
1572      * Since API level 29, generic zero-sized input tensor is supported. Zero
1573      * dimension is only compatible with 0 or 1. The size of the output
1574      * dimension is zero if either of corresponding input dimension is zero.
1575      *
1576      * Supported tensor {@link OperandCode}:
1577      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1578      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1579      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1580      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
1581      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30)
1582      *
1583      * Supported tensor rank: up to 4
1584      *
1585      * Inputs:
1586      * * 0: A tensor.
1587      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
1588      *      as input0.
1589      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1590      *      {@link FuseCode} values. Specifies the activation to
1591      *      invoke on the result.
1592      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
1593      *      the {@link FuseCode} must be "NONE".
1594      *
1595      * Outputs:
1596      * * 0: The product, a tensor of the same {@link OperandCode} as input0.
1597      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1598      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1599      *      the following condition must be satisfied:
1600      *      output_scale > input1_scale * input2_scale.
1601      *
1602      * Available since API level 27.
1603      */
1604     ANEURALNETWORKS_MUL = 18,
1605 
1606     /**
1607      * Computes rectified linear activation on the input tensor element-wise.
1608      *
1609      * The output is calculated using this formula:
1610      *
1611      *     output = max(0, input)
1612      *
1613      * Supported tensor {@link OperandCode}:
1614      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1615      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1616      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1617      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
1618      *
1619      * Supported tensor rank: up to 4.
1620      *
1621      * Inputs:
1622      * * 0: A tensor, specifying the input.
1623      *      Since API level 29, this tensor may be zero-sized.
1624      *
1625      * Outputs:
1626      * * 0: The output tensor of same shape as input0.
1627      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1628      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1629      *      the scale and zeroPoint must be the same as input0.
1630      *
1631      * Available since API level 27.
1632      */
1633     ANEURALNETWORKS_RELU = 19,
1634 
1635     /**
1636      * Computes rectified linear 1 activation on the input tensor element-wise.
1637      *
1638      * The output is calculated using this formula:
1639      *
1640      *     output = min(1.f, max(-1.f, input))
1641      *
1642      * Supported tensor {@link OperandCode}:
1643      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1644      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1645      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1646      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
1647      *
1648      * Supported tensor rank: up to 4.
1649      *
1650      * Inputs:
1651      * * 0: A tensor, specifying the input.
1652      *      Since API level 29, this tensor may be zero-sized.
1653      *
1654      * Outputs:
1655      * * 0: The output tensor of the same shape as input0.
1656      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1657      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1658      *      the scale and zeroPoint must be the same as input0.
1659      *
1660      * Available since API level 27.
1661      */
1662     ANEURALNETWORKS_RELU1 = 20,
1663 
1664     /**
1665      * Computes rectified linear 6 activation on the input tensor element-wise.
1666      *
1667      * The output is calculated using this formula:
1668      *
1669      *     output = min(6, max(0, input))
1670      *
1671      * Supported tensor {@link OperandCode}:
1672      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1673      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1674      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1675      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
1676      *
1677      * Supported tensor rank: up to 4.
1678      *
1679      * Inputs:
1680      * * 0: A tensor, specifying the input.
1681      *      Since API level 29, this tensor may be zero-sized.
1682      *
1683      * Outputs:
1684      * * 0: The output tensor of same shape as input0.
1685      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1686      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1687      *      the scale and zeroPoint must be the same as input0.
1688      *
1689      * Available since API level 27.
1690      */
1691     ANEURALNETWORKS_RELU6 = 21,
1692 
1693     /**
1694      * Reshapes a tensor.
1695      *
1696      * Given tensor, this operation returns a tensor that has the same values as
1697      * tensor, but with a newly specified shape.
1698      *
1699      * Supported tensor {@link OperandCode}:
1700      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1701      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1702      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1703      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
1704      *
1705      * Supported tensor rank: up to 4.
1706      *
1707      * Inputs:
1708      * * 0: A tensor, specifying the tensor to be reshaped.
1709      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the
1710      *      shape of the output tensor. The number of elements implied by shape
1711      *      must be the same as the number of elements in the input tensor.
1712      *
1713      *      If one component of shape is the special value -1, the size of that
1714      *      dimension is computed so that the total size remains constant. In
1715      *      particular, a shape of [-1] flattens into 1-D. At most one component
1716      *      of shape can be -1.
1717      *
1718      * Outputs:
1719      * * 0: The output tensor, of shape specified by the input shape.
1720      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1721      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1722      *      the scale and zeroPoint must be the same as input0.
1723      *
1724      * Available since API level 27.
1725      */
1726     ANEURALNETWORKS_RESHAPE = 22,
1727 
1728     /**
1729      * Resizes images to given size using the bilinear interpretation.
1730      *
1731      * Resized images must be distorted if their output aspect ratio is not the
1732      * same as input aspect ratio. The corner pixels of output may not be the
1733      * same as corner pixels of input.
1734      *
1735      * Supported tensor {@link OperandCode}:
1736      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1737      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1738      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)
1739      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
1740      *
1741      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1742      * With the default data layout NHWC, the data is stored in the order of:
1743      * [batch, height, width, channels]. Alternatively, the data layout could
1744      * be NCHW, the data storage order of: [batch, channels, height, width].
1745      * NCHW is supported since API level 29.
1746      *
1747      * Both resizing by shape and resizing by scale are supported.
1748      *
1749      * Inputs (resizing by shape):
1750      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1751      *      the input.
1752      *      Since API level 29, zero batches is supported for this tensor.
1753      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
1754      *      width of the output tensor.
1755      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
1756      *      height of the output tensor.
1757      * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1758      *      Set to true to specify NCHW data layout for input0 and output0.
1759      *      Available since API level 29.
1760      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
1761      *      scalar, default to false.  If True, the centers of the 4 corner
1762      *      pixels of the input and output tensors are aligned, preserving the
1763      *      values at the corner pixels.
1764      *      Available since API level 30.
1765      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
1766      *      scalar, default to false. If True, the pixel centers are assumed to
1767      *      be at (0.5, 0.5). This is the default behavior of image.resize in
1768      *      TF 2.0. If this parameter is True, then align_corners parameter
1769      *      must be False.
1770      *      Available since API level 30.
1771      *
1772      * Inputs (resizing by scale, since API level 29):
1773      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1774      *      the input. Zero batches is supported for this tensor.
1775      * * 1: A scalar, specifying width_scale, the scaling factor of the width
1776      *      dimension from the input tensor to the output tensor. The output
1777      *      width is calculated as new_width = floor(width * width_scale).
1778      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
1779      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
1780      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
1781      * * 2: A scalar, specifying height_scale, the scaling factor of the height
1782      *      dimension from the input tensor to the output tensor. The output
1783      *      height is calculated as new_height = floor(height * height_scale).
1784      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
1785      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
1786      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
1787      * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1788      *      Set to true to specify NCHW data layout for input0 and output0.
1789      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
1790      *      scalar, default to false.  If True, the centers of the 4 corner
1791      *      pixels of the input and output tensors are aligned, preserving the
1792      *      values at the corner pixels.
1793      *      Available since API level 30.
1794      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
1795      *      scalar, default to false. If True, the pixel centers are assumed to
1796      *      be at (0.5, 0.5). This is the default behavior of image.resize in
1797      *      TF 2.0. If this parameter is True, then align_corners parameter
1798      *      must be False.
1799      *      Available since API level 30.
1800      *
1801      * Outputs:
1802      * * 0: The output 4-D tensor, of shape
1803      *      [batches, new_height, new_width, depth].
1804      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1805      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1806      *      the scale and zeroPoint must be the same as input0.
1807      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
1808      *      the scale and zeroPoint must be the same as input0.
1809      *
1810      * Available since API level 27.
1811      */
1812     ANEURALNETWORKS_RESIZE_BILINEAR = 23,
1813 
1814     /**
1815      * A basic recurrent neural network layer.
1816      *
1817      * This layer implements the operation:
1818      * outputs = state = activation(inputs * input_weights +
1819      *                              state * recurrent_weights + bias)
1820      *
1821      * Where:
1822      * * “input_weights” is a weight matrix that multiplies the inputs;
1823      * * “recurrent_weights” is a weight matrix that multiplies the current
1824      *    “state” which itself is the output from the previous time step
1825      *    computation;
1826      * * “bias” is a bias vector (added to each output vector in the batch);
1827      * * “activation” is the function passed as the “fused_activation_function”
1828      *   argument (if not “NONE”).
1829      *
1830      * Supported tensor {@link OperandCode}:
1831      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1832      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1833      *
1834      * The input tensors must all be the same type.
1835      *
1836      * Inputs:
1837      * * 0: input.
1838      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1839      *      corresponds to the batching dimension, and “input_size” is the size
1840      *      of the input.
1841      * * 1: weights.
1842      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1843      *      corresponds to the number of units.
1844      * * 2: recurrent_weights.
1845      *      A 2-D tensor of shape [num_units, num_units], with columns
1846      *      corresponding to the weights from each unit.
1847      * * 3: bias.
1848      *      A 1-D tensor of shape [num_units].
1849      * * 4: hidden state (in).
1850      *      A 2-D tensor of shape [batch_size, num_units].
1851      * * 5: fused_activation_function.
1852      *      An optional {@link FuseCode} value indicating the
1853      *      activation function. If “NONE” is specified then it results in a
1854      *      linear activation.
1855      *
1856      * Outputs:
1857      * * 0: hidden state (out).
1858      *      A 2-D tensor of shape [batch_size, num_units].
1859      *
1860      * * 1: output.
1861      *      A 2-D tensor of shape [batch_size, num_units]. This is effectively
1862      *      the same as the current state value.
1863      *
1864      * Available since API level 27.
1865      */
1866     ANEURALNETWORKS_RNN = 24,
1867 
1868     /**
1869      * Computes the softmax activation on the input tensor element-wise, per
1870      * batch, by normalizing the input vector so the maximum coefficient is
1871      * zero.
1872      *
1873      * The output is calculated using this formula:
1874      *
1875      *     output[batch, i] =
1876      *         exp((input[batch, i] - max(input[batch, :])) * beta) /
1877      *         sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
1878      *
1879      * For input tensor with rank other than 2, the activation will be applied
1880      * independently on each 1-D slice along specified dimension.
1881      *
1882      * Supported tensor {@link OperandCode}:
1883      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1884      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1885      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1886      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
1887      *
1888      * Supported tensor rank: up to 4.
1889      * Tensors with rank other than 2 or 4 are only supported since API level 29.
1890      *
1891      * Inputs:
1892      * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
1893      *      Since API level 29, this tensor may be zero-sized.
1894      * * 1: A scalar, specifying the positive scaling factor for the exponent,
1895      *      beta. If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
1896      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
1897      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scalar
1898      *      must be of {@link ANEURALNETWORKS_FLOAT32}.
1899      *      If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, then the
1900      *      scalar must be of {@link ANEURALNETWORKS_FLOAT16}.
1901      * * 2: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1902      *      specifying the dimension the activation would be performed on.
1903      *      Negative index is used to specify axis from the end (e.g. -1 for
1904      *      the last axis). Must be in the range [-n, n).
1905      *      Available since API level 29.
1906      *
1907      * Outputs:
1908      * * 0: The output tensor of same shape as input0.
1909      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1910      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1911      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1912      *      the scale must be 1.f / 256 and the zeroPoint must be -128.
1913      *
1914      * Available since API level 27.
1915      */
1916     ANEURALNETWORKS_SOFTMAX = 25,
1917 
1918     /**
1919      * Rearranges blocks of spatial data, into depth.
1920      *
1921      * More specifically, this op outputs a copy of the input tensor where
1922      * values from the height and width dimensions are moved to the depth
1923      * dimension. The value block_size indicates the input block size and how
1924      * the data is moved.
1925      *
1926      * Chunks of data of size block_size * block_size from depth are rearranged
1927      * into non-overlapping blocks of size block_size x block_size.
1928      *
1929      * The depth of the output tensor is input_depth * block_size * block_size.
1930      * The input tensor's height and width must be divisible by block_size.
1931      *
1932      * Supported tensor {@link OperandCode}:
1933      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
1934      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1935      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1936      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
1937      *
1938      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1939      * With the default data layout NHWC, the data is stored in the order of:
1940      * [batch, height, width, channels]. Alternatively, the data layout could
1941      * be NCHW, the data storage order of: [batch, channels, height, width].
1942      * NCHW is supported since API level 29.
1943      *
1944      * Inputs:
1945      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
1946      *      specifying the input.
1947      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
1948      *      block_size must be >=1 and block_size must be a divisor of both the
1949      *      input height and width.
1950      * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1951      *      Set to true to specify NCHW data layout for input0 and output0.
1952      *      Available since API level 29.
1953      *
1954      * Outputs:
1955      * * 0: The output 4-D tensor, of shape [batches, height/block_size,
1956      *      width/block_size, depth_in*block_size*block_size].
1957      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1958      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1959      *      the scale and zeroPoint must be the same as input0.
1960      *
1961      * Available since API level 27.
1962      */
1963     ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
1964 
1965     /**
1966      * SVDF op is a kind of stateful layer derived from the notion that a
1967      * densely connected layer that's processing a sequence of input frames can
1968      * be approximated by using a singular value decomposition of each of its
1969      * nodes. The implementation is based on:
1970      *
1971      * https://research.google.com/pubs/archive/43813.pdf
1972      *
1973      * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
1974      * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
1975      * INTERSPEECH, 2015.
1976      *
1977      * It processes the incoming input using a 2-stage filtering mechanism:
1978      * * stage 1 performs filtering on the "features" dimension, whose outputs
1979      *   get pushed into a memory of fixed-size memory_size.
1980      * * stage 2 performs filtering on the "time" dimension of the memory_size
1981      *   memoized outputs of stage 1.
1982      *
1983      * Specifically, for rank 1, this layer implements the operation:
1984      *
1985      *     memory = push(conv1d(inputs, weights_feature, feature_dim,
1986      *                          "ANEURALNETWORKS_PADDING_VALID"));
1987      *     outputs = activation(memory * weights_time + bias);
1988      *
1989      * Where:
1990      * * “weights_feature” is a weights matrix that processes the inputs (by
1991      *   convolving the input with every “feature filter”), and whose outputs
1992      *   get pushed, stacked in order, into the fixed-size “memory” (the oldest
1993      *   entry gets dropped);
1994      * * “weights_time” is a weights matrix that processes the “memory” (by a
1995      *   batched matrix multiplication on the num_units);
1996      * * “bias” is an optional bias vector (added to each output vector in the
1997      *   batch); and
1998      * * “activation” is the function passed as the “fused_activation_function”
1999      *   argument (if not “NONE”).
2000      *
2001      * Each rank adds a dimension to the weights matrices by means of stacking
2002      * the filters.
2003      *
2004      * Supported tensor {@link OperandCode}:
2005      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
2006      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2007      *
2008      * All input tensors must be the same type.
2009      *
2010      * Inputs:
2011      * * 0: input.
2012      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
2013      *      corresponds to the batching dimension, and “input_size” is the size
2014      *      of the input.
2015      * * 1: weights_feature.
2016      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
2017      *      corresponds to the number of units.
2018      * * 2: weights_time.
2019      *      A 2-D tensor of shape [num_units, memory_size], where “memory_size”
2020      *      corresponds to the fixed-size of the memory.
2021      * * 3: bias.
2022      *      An optional 1-D tensor of shape [num_units].
2023      * * 4: state (in).
2024      *      A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
2025      * * 5: rank.
2026      *      The rank of the SVD approximation.
2027      * * 6: fused_activation_function.
2028      *      An optional {@link FuseCode} value indicating the
2029      *      activation function. If “NONE” is specified then it results in a
2030      *      linear activation.
2031      *
2032      * Outputs:
2033      * * 0: state (out).
2034      *      A 2-D tensor of the same {@link OperandCode} as the inputs, with shape
2035      *      [batch_size, (memory_size - 1) * num_units * rank].
2036      * * 1: output.
2037      *      A 2-D tensor of the same {@link OperandCode} as the inputs, with shape
2038      *      [batch_size, num_units].
2039      *
2040      * Available since API level 27.
2041      */
2042     ANEURALNETWORKS_SVDF = 27,
2043 
2044     /**
2045      * Computes hyperbolic tangent of input tensor element-wise.
2046      *
2047      * The output is calculated using this formula:
2048      *
2049      *     output = tanh(input)
2050      *
2051      * Supported tensor {@link OperandCode}:
2052      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
2053      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2054      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)
2055      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
2056      *
2057      * Supported tensor rank: up to 4.
2058      *
2059      * Inputs:
2060      * * 0: A tensor, specifying the input.
2061      *      Since API level 29, this tensor may be zero-sized.
2062      *
2063      * Outputs:
2064      * * 0: The output tensor of same shape as input0.
2065      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
2066      *      the scale must be 1.f / 128 and the zeroPoint must be 128.
2067      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
2068      *      the scale must be 1.f / 128 and the zeroPoint must be 0.
2069      *
2070      * Available since API level 27.
2071      */
2072     ANEURALNETWORKS_TANH = 28,
2073 
2074     // Operations below are available since API level 28.
2075 
2076     /**
2077      * BatchToSpace for N-dimensional tensors.
2078      *
2079      * This operation reshapes the batch dimension (dimension 0) into M + 1
2080      * dimensions of shape block_shape + [batch], interleaves these blocks back
2081      * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
2082      * result with the same rank as the input.
2083      *
2084      * This is the reverse of SpaceToBatch.
2085      *
2086      * Supported tensor {@link OperandCode}:
2087      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
2088      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2089      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2090      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
2091      *
2092      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
2093      * With the default data layout NHWC, the data is stored in the order of:
2094      * [batch, height, width, channels]. Alternatively, the data layout could
2095      * be NCHW, the data storage order of: [batch, channels, height, width].
2096      * NCHW is supported since API level 29.
2097      *
2098      * Inputs:
2099      * * 0: An n-D tensor, specifying the tensor to be reshaped
2100      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
2101      *      sizes for each spatial dimension of the input tensor. All values
2102      *      must be >= 1.
2103      * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
2104      *      Set to true to specify NCHW data layout for input0 and output0.
2105      *      Available since API level 29.
2106      *
2107      * Outputs:
2108      * * 0: A tensor of the same {@link OperandCode} as input0.
2109      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2110      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2111      *      the scale and zeroPoint must be the same as input0.
2112      *
2113      * Available since API level 28.
2114      */
2115     ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29,
2116 
2117     /**
2118      * Element-wise division of two tensors.
2119      *
2120      * Takes two input tensors of identical {@link OperandCode} and compatible
2121      * dimensions. The output is the result of dividing the first input tensor
2122      * by the second, optionally modified by an activation function.
2123      *
2124      * For inputs of {@link ANEURALNETWORKS_TENSOR_INT32}, performs
2125      * "floor division" ("//" in Python). For example,
2126      *     5 // 2 = 2
2127      *    -5 // 2 = -3
2128      *
2129      * Two dimensions are compatible when:
2130      *     1. they are equal, or
2131      *     2. one of them is 1
2132      *
2133      * The size of the output is the maximum size along each dimension of the
2134      * input operands. It starts with the trailing dimensions, and works its way
2135      * forward.
2136      *
2137      * Example:
2138      *     input1.dimension =    {4, 1, 2}
2139      *     input2.dimension = {5, 4, 3, 1}
2140      *     output.dimension = {5, 4, 3, 2}
2141      *
2142      * Since API level 29, generic zero-sized input tensor is supported. Zero
2143      * dimension is only compatible with 0 or 1. The size of the output
2144      * dimension is zero if either of corresponding input dimension is zero.
2145      *
2146      * Supported tensor {@link OperandCode}:
2147      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
2148      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2149      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30)
2150      *
2151      * Supported tensor rank: up to 4
2152      *
2153      * Inputs:
2154      * * 0: An n-D tensor, specifying the first input.
2155      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
2156      *      as input0.
2157      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
2158      *      {@link FuseCode} values. Specifies the activation to
2159      *      invoke on the result.
2160      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
2161      *      the {@link FuseCode} must be "NONE".
2162      *
2163      * Outputs:
2164      * * 0: A tensor of the same {@link OperandCode} as input0.
2165      *
2166      * Available since API level 28.
2167      */
2168     ANEURALNETWORKS_DIV = 30,
2169 
2170     /**
2171      * Computes the mean of elements across dimensions of a tensor.
2172      *
2173      * Reduces the input tensor along the given dimensions to reduce. Unless
2174      * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
2175      * in axis. If keep_dims is true, the reduced dimensions are retained with
2176      * length 1.
2177      *
2178      * Supported tensor {@link OperandCode}:
2179      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
2180      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2181      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2182      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
2183      *
2184      * Supported tensor rank: up to 4
2185      *
2186      * Inputs:
2187      * * 0: A tensor, specifying the input.
2188      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
2189      *      to reduce. Must be in the range
2190      *      [-rank(input_tensor), rank(input_tensor)).
2191      *
2192      *      NOTE: When the operation was introduced, the documentation
2193      *      incorrectly stated that if dimensions were empty, the operation
2194      *      would reduce across all dimensions. This behavior was never
2195      *      implemented.
2196      *
2197      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive,
2198      *      retains reduced dimensions with length 1.
2199      *
2200      * Outputs:
2201      * * 0: A tensor of the same {@link OperandCode} as input0.
2202      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2203      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2204      *      the scale and zeroPoint must be the same as input0.
2205      *      If all dimensions are reduced and keep_dims is false, the output
2206      *      shape is [1].
2207      *
2208      * Available since API level 28.
2209      */
2210     ANEURALNETWORKS_MEAN = 31,
2211 
2212     /**
2213      * Pads a tensor.
2214      *
2215      * This operation pads a tensor according to the specified paddings.
2216      *
2217      * Supported tensor {@link OperandCode}:
2218      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
2219      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2220      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2221      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
2222      *   (full support since API level 29, see the output section)
2223      *
2224      * Supported tensor rank: up to 4
2225      *
2226      * Inputs:
2227      * * 0: An n-D tensor, specifying the tensor to be padded.
2228      * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
2229      *      for each spatial dimension of the input tensor. The shape of the
2230      *      tensor must be {rank(input0), 2}.
2231      *      padding[i, 0] specifies the number of elements to be padded in the
2232      *      front of dimension i.
2233      *      padding[i, 1] specifies the number of elements to be padded after the
2234      *      end of dimension i.
2235      *
2236      * Outputs:
2237      * * 0: A tensor of the same {@link OperandCode} as input0. The
2238      *      output tensor has the same rank as input0, and each
2239      *      dimension of the output tensor has the same size as the
2240      *      corresponding dimension of the input tensor plus the size
2241      *      of the padding:
2242      *          output0.dimension[i] =
2243      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
2244      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2245      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2246      *      the scale and zeroPoint must be the same as input0.
2247      *
2248      *      NOTE: Before API level 29, the pad value for
2249      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
2250      *      Since API level 29, the pad value is always the logical zero.
2251      *
2252      * Available since API level 28.
2253      */
2254     ANEURALNETWORKS_PAD = 32,
2255 
2256     /**
2257      * SpaceToBatch for N-Dimensional tensors.
2258      *
2259      * This operation divides "spatial" dimensions [1, ..., M] of the input into
2260      * a grid of blocks of shape block_shape, and interleaves these blocks with
2261      * the "batch" dimension (0) such that in the output, the spatial dimensions
2262      * [1, ..., M] correspond to the position within the grid, and the batch
2263      * dimension combines both the position within a spatial block and the
2264      * original batch position. Prior to division into blocks, the spatial
2265      * dimensions of the input are optionally zero padded according to paddings.
2266      *
2267      * Supported tensor {@link OperandCode}:
2268      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
2269      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2270      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2271      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
2272      *   (full support since API level 29, see the output section)
2273      *
2274      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
2275      * With the default data layout NHWC, the data is stored in the order of:
2276      * [batch, height, width, channels]. Alternatively, the data layout could
2277      * be NCHW, the data storage order of: [batch, channels, height, width].
2278      * NCHW is supported since API level 29.
2279      *
2280      * Inputs:
2281      * * 0: An n-D tensor, specifying the input.
2282      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
2283      *      sizes for each spatial dimension of the input tensor. All values
2284      *      must be >= 1.
2285      * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
2286      *      for each spatial dimension of the input tensor. All values must be
2287      *      >= 0. The shape of the tensor must be {M, 2}, where M is the number
2288      *      of spatial dimensions.
2289      *      padding[i, 0] specifies the number of element to be padded in the
2290      *      front of dimension i.
2291      *      padding[i, 1] specifies the number of element to be padded after the
2292      *      end of dimension i.
2293      * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
2294      *      Set to true to specify NCHW data layout for input0 and output0.
2295      *      Available since API level 29.
2296      *
2297      * Outputs:
2298      * * 0: A tensor of the same {@link OperandCode} as input0.
2299      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2300      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2301      *      the scale and zeroPoint must be the same as input0.
2302      *
2303      *      NOTE: Before API level 29, the pad value for
2304      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
2305      *      Since API level 29, the pad value is always the logical zero.
2306      *
2307      * Available since API level 28.
2308      */
2309     ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33,
2310 
2311     /**
2312      * Removes dimensions of size 1 from the shape of a tensor.
2313      *
2314      * Given a tensor input, this operation returns a tensor of the same
2315      * {@link OperandCode} with all dimensions of size 1 removed. If you don't
2316      * want to remove all size 1 dimensions, you can remove specific size 1
2317      * dimensions by specifying the axes (input1).
2318      *
2319      * Supported tensor {@link OperandCode}:
2320      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
2321      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2322      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2323      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
2324      *
2325      * Supported tensor rank: up to 4
2326      *
2327      * Inputs:
2328      * * 0: An n-D tensor, the tensor to be squeezed.
2329      * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2330      *      dimensions to squeeze. If specified only squeezes the dimensions
2331      *      listed. Otherwise, squeezes all dimensions. The dimension index
2332      *      starts at 0. An error must be reported if squeezing a dimension that
2333      *      is not 1.
2334      *
2335      * Outputs:
2336      * * 0: A tensor of the same {@link OperandCode} as input0. Contains the
2337      *      same data as input, but has one or more dimensions of size 1
2338      *      removed.
2339      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2340      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2341      *      the scale and zeroPoint must be the same as input0.
2342      *      If all input dimensions are equal to 1 and are to be squeezed, the
2343      *      output shape is [1].
2344      *
2345      * Available since API level 28.
2346      */
2347     ANEURALNETWORKS_SQUEEZE = 34,
2348 
2349     /**
2350      * Extracts a strided slice of a tensor.
2351      *
2352      * Roughly speaking, this op extracts a slice of size (end - begin) / stride
2353      * from the given input tensor. Starting at the location specified by begin
2354      * the slice continues by adding stride to the index until all dimensions
2355      * are not less than end. Note that a stride can be negative, which causes a
2356      * reverse slice.
2357      *
2358      * Supported tensor {@link OperandCode}:
2359      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
2360      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2361      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2362      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
2363      *
2364      * Supported tensor rank: up to 4
2365      *
2366      * Inputs:
2367      * * 0: An n-D tensor, specifying the tensor to be sliced.
2368      * * 1: begin, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2369      *      starts of the dimensions of the input tensor to be sliced. The
2370      *      length must be of rank(input0).
2371      * * 2: end, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2372      *      ends of the dimensions of the input tensor to be sliced. The length
2373      *      must be of rank(input0).
2374      * * 3: strides, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2375      *      strides of the dimensions of the input tensor to be sliced. The
2376      *      length must be of rank(input0). The entries must be non-zero.
2377      * * 4: begin_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit
2378      *      of begin_mask is set, begin[i] is ignored and the fullest possible
2379      *      range in that dimension is used instead.
2380      * * 5: end_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit of
2381      *      end_mask is set, end[i] is ignored and the fullest possible range in
2382      *      that dimension is used instead.
2383      * * 6: shrink_axis_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the
2384      *      ith bit of shrink_axis_mask is set, the ith dimension specification
2385      *      shrinks the dimensionality by 1, taking on the value at index
2386      *      begin[i]. In this case, the ith specification must define a
2387      *      slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
2388      *
2389      * Outputs:
2390      * * 0: A tensor of the same {@link OperandCode} as input0 and rank (n - k),
2391      *      where k is the number of bits set in shrink_axis_mask.
2392      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2393      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2394      *      the scale and zeroPoint must be the same as input0.
2395      *      If shrink_axis_mask is true for all input dimensions, the output
2396      *      shape is [1].
2397      *
2398      * Available since API level 28.
2399      */
2400     ANEURALNETWORKS_STRIDED_SLICE = 35,
2401 
2402     /**
2403      * Element-wise subtraction of two tensors.
2404      *
2405      * Takes two input tensors of identical {@link OperandCode} and compatible
2406      * dimensions. The output is the result of subtracting the second input
2407      * tensor from the first one, optionally modified by an activation function.
2408      *
2409      * Two dimensions are compatible when:
2410      *     1. they are equal, or
2411      *     2. one of them is 1
2412      *
2413      * The size of the output is the maximum size along each dimension of the
2414      * input operands. It starts with the trailing dimensions, and works its way
2415      * forward.
2416      *
2417      * Example:
2418      *     input1.dimension =    {4, 1, 2}
2419      *     input2.dimension = {5, 4, 3, 1}
2420      *     output.dimension = {5, 4, 3, 2}
2421      *
2422      * Since API level 29, generic zero-sized input tensor is supported. Zero
2423      * dimension is only compatible with 0 or 1. The size of the output
2424      * dimension is zero if either of corresponding input dimension is zero.
2425      *
2426      * Supported tensor {@link OperandCode}:
2427      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
2428      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2429      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)
2430      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
2431      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30)
2432      *
2433      * Supported tensor rank: up to 4
2434      *
2435      * Inputs:
2436      * * 0: An n-D tensor, specifying the first input.
2437      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
2438      *      as input0.
2439      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
2440      *      {@link FuseCode} values. Specifies the activation to
2441      *      invoke on the result.
2442      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
2443      *      the {@link FuseCode} must be "NONE".
2444      *
2445      * Outputs:
2446      * * 0: A tensor of the same {@link OperandCode} as input0.
2447      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2448      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2449      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
2450      *
2451      * Available since API level 28.
2452      */
2453     ANEURALNETWORKS_SUB = 36,
2454 
2455     /**
2456      * Transposes the input tensor, permuting the dimensions according to the
2457      * perm tensor.
2458      *
2459      * The returned tensor's dimension i corresponds to the input dimension
2460      * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
2461      * rank of the input tensor. Hence by default, this operation performs a
2462      * regular matrix transpose on 2-D input Tensors.
2463      *
2464      * Supported tensor {@link OperandCode}:
2465      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
2466      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2467      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2468      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
2469      *
2470      * Supported tensor rank: up to 4
2471      *
2472      * Inputs:
2473      * * 0: An n-D tensor, specifying the tensor to be transposed.
2474      *      Since API level 29, this tensor may be zero-sized.
2475      * * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
2476      *      the permutation of the dimensions of the input tensor.
2477      *
2478      * Outputs:
2479      * * 0: A tensor of the same {@link OperandCode} as input0.
2480      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2481      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2482      *      the scale and zeroPoint must be the same as input0.
2483      *
2484      * Available since API level 28.
2485      */
2486     ANEURALNETWORKS_TRANSPOSE = 37,
2487 
2488     // Operations below are available since API level 29.
2489 
2490     /**
2491      * Computes the absolute value of a tensor, element-wise.
2492      *
2493      * Supported tensor {@link OperandCode}:
2494      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2495      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2496      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30)
2497      *
2498      * Supported tensor rank: from 1.
2499      *
2500      * Inputs:
2501      * * 0: A tensor.
2502      *
2503      * Outputs:
2504      * * 0: The output tensor of same shape as input0.
2505      *
2506      * Available since API level 29.
2507      */
2508     ANEURALNETWORKS_ABS = 38,
2509 
2510     /**
2511      * Returns the index of the largest element along an axis.
2512      *
2513      * Supported tensor {@link OperandCode}:
2514      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2515      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2516      * * {@link ANEURALNETWORKS_TENSOR_INT32}
2517      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2518      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
2519      *
2520      * Supported tensor rank: from 1
2521      *
2522      * Inputs:
2523      * * 0: An n-D tensor specifying the input. Must be non-empty.
2524      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
2525      *      reduce across. Negative index is used to specify axis from the
2526      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2527      *
2528      * Outputs:
2529      * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.
2530      *      If input is 1-dimensional, the output shape is [1].
2531      *
2532      * Available since API level 29.
2533      */
2534     // There is no underscore in ARG_MAX to avoid name conflict with
2535     // the macro defined in libc/kernel/uapi/linux/limits.h.
2536     ANEURALNETWORKS_ARGMAX = 39,
2537 
2538     /**
2539      * Returns the index of the smallest element along an axis.
2540      *
2541      * Supported tensor {@link OperandCode}:
2542      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2543      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2544      * * {@link ANEURALNETWORKS_TENSOR_INT32}
2545      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2546      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
2547      *
2548      * Supported tensor rank: from 1
2549      *
2550      * Inputs:
2551      * * 0: An n-D tensor specifying the input. Must be non-empty.
2552      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
2553      *      reduce across. Negative index is used to specify axis from the
2554      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2555      *
2556      * Outputs:
2557      * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.
2558      *      If input is 1-dimensional, the output shape is [1].
2559      *
2560      * Available since API level 29.
2561      */
2562     ANEURALNETWORKS_ARGMIN = 40,  // See ARGMAX for naming discussion.
2563 
2564     /**
2565      * Transform axis-aligned bounding box proposals using bounding box deltas.
2566      *
2567      * Given the positions of bounding box proposals and the corresponding
2568      * bounding box deltas for each class, return the refined bounding box
2569      * regions. The resulting bounding boxes are cliped against the edges of
2570      * the image.
2571      *
2572      * Supported tensor {@link OperandCode}:
2573      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2574      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2575      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
2576      *
2577      * Inputs:
2578      * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
2579      *      bounding box proposals, each line with format [x1, y1, x2, y2].
2580      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
2581      *      the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
2582      *      is supported for this tensor.
2583      * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
2584      *      bounding box delta for each region of interest and each class. The
2585      *      bounding box deltas are organized in the following order
2586      *      [dx, dy, dw, dh], where dx and dy is the relative correction factor
2587      *      for the center position of the bounding box with respect to the width
2588      *      and height, dw and dh is the log-scale relative correction factor
2589      *      for the width and height. For input0 of type
2590      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, this tensor should be
2591      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
2592      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is
2593      *      supported for this tensor.
2594      * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
2595      *      [num_rois], specifying the batch index of each box. Boxes with
2596      *      the same batch index are grouped together. Zero num_rois is
2597      *      supported for this tensor.
2598      * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
2599      *      each image in the batch, each line with format
2600      *      [image_height, image_width].
2601      *
2602      * Outputs:
2603      * * 0: A tensor of the same {@link OperandCode} as input0, with shape
2604      *      [num_rois, num_classes * 4], specifying the coordinates of each
2605      *      output bounding box for each class, with format [x1, y1, x2, y2].
2606      *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
2607      *      scale must be 0.125 and the zero point must be 0.
2608      *
2609      * Available since API level 29.
2610      */
2611     ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41,
2612 
2613     /**
2614      * A recurrent neural network layer that applies an LSTM cell to a
2615      * sequence of inputs in forward and backward directions.
2616      *
2617      * The op supports cross-linking via an auxiliary input. Regular cell feeds
2618      * one input into the two RNN cells in the following way:
2619      *
2620      *       INPUT  (INPUT_REVERSED)
2621      *         |         |
2622      *    ---------------------
2623      *    | FW_LSTM   BW_LSTM |
2624      *    ---------------------
2625      *         |         |
2626      *      FW_OUT     BW_OUT
2627      *
2628      * An op with cross-linking takes two inputs and feeds them into the RNN
2629      * cells in the following way:
2630      *
2631      *       AUX_INPUT   (AUX_INPUT_REVERSED)
2632      *           |             |
2633      *     INPUT | (INPUT_R'D.)|
2634      *       |   |       |     |
2635      *    -----------------------
2636      *    |  \  /        \    / |
2637      *    | FW_LSTM     BW_LSTM |
2638      *    -----------------------
2639      *         |           |
2640      *      FW_OUT      BW_OUT
2641      *
2642      * The cross-linking mode is enabled iff auxiliary input and auxiliary
2643      * weights are present. While stacking this op on top of itself, this
2644      * allows to connect both forward and backward outputs from previous cell
2645      * to the next cell's input.
2646      *
2647      * Since API level 30 parallel linking mode is supported. The mode is
2648      * enabled if auxiliary input is present but auxiliary weights are omitted.
2649      * In this case, the cell feeds inputs into the RNN in the following way:
2650      *
2651      *       INPUT (AUX_INPUT_REVERSED)
2652      *         |         |
2653      *    ---------------------
2654      *    | FW_LSTM   BW_LSTM |
2655      *    ---------------------
2656      *         |         |
2657      *      FW_OUT     BW_OUT
2658      *
2659      * While stacking this op on top of itself, this allows to connect both
2660      * forward and backward outputs from previous cell to the next cell's
2661      * corresponding inputs.
2662      *
2663      * Supported tensor {@link OperandCode}:
2664      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2665      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2666      *
2667      * Supported tensor rank: 3, either time-major or batch-major.
2668      *
2669      * All input and output tensors must be of the same type.
2670      *
2671      * Inputs:
2672      * * 0: The input.
2673      *      A 3-D tensor of shape:
2674      *        If time-major: [max_time, batch_size, input_size]
2675      *        If batch-major: [batch_size, max_time, input_size]
2676      *      where "max_time" is the number of timesteps (sequence length),
2677      *      "batch_size" corresponds to the batching dimension, and
2678      *      "input_size" is the size of the input.
2679      * * 1: The forward input-to-input weights. Optional.
2680      *      A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
2681      *      corresponds to the number of forward cell units.
2682      * * 2: The forward input-to-forget weights.
2683      *      A 2-D tensor of shape [fw_num_units, input_size].
2684      * * 3: The forward input-to-cell weights.
2685      *      A 2-D tensor of shape [fw_num_units, input_size].
2686      * * 4: The forward input-to-output weights.
2687      *      A 2-D tensor of shape [fw_num_units, input_size].
2688      * * 5: The forward recurrent-to-input weights. Optional.
2689      *      A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
2690      *      corresponds to either the number of cell units (i.e., fw_num_units),
2691      *      or the second dimension of the “fw_projection_weights”, if defined.
2692      * * 6: The forward recurrent-to-forget weights.
2693      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2694      * * 7: The forward recurrent-to-cell weights.
2695      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2696      * * 8: The forward recurrent-to-output weights.
2697      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2698      * * 9: The forward cell-to-input weights. Optional.
2699      *      A 1-D tensor of shape [fw_num_units].
2700      * * 10: The forward cell-to-forget weights. Optional.
2701      *       A 1-D tensor of shape [fw_num_units].
2702      * * 11: The forward cell-to-output weights. Optional.
2703      *       A 1-D tensor of shape [fw_num_units].
2704      * * 12: The forward input gate bias. Optional.
2705      *       A 1-D tensor of shape [fw_num_units].
2706      * * 13: The forward forget gate bias.
2707      *       A 1-D tensor of shape [fw_num_units].
2708      * * 14: The forward cell gate bias.
2709      *       A 1-D tensor of shape [fw_num_units].
2710      * * 15: The forward output gate bias.
2711      *       A 1-D tensor of shape [fw_num_units].
2712      * * 16: The forward projection weights. Optional.
2713      *       A 2-D tensor of shape [fw_output_size, fw_num_units].
2714      * * 17: The forward projection bias. Optional.
2715      *       A 1-D tensor of shape [fw_output_size].
2716      * * 18: The backward input-to-input weights. Optional.
2717      *       A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
2718      *       corresponds to the number of backward cell units.
2719      * * 19: The backward input-to-forget weights.
2720      *       A 2-D tensor of shape [bw_num_units, input_size].
2721      * * 20: The backward input-to-cell weights.
2722      *       A 2-D tensor of shape [bw_num_units, input_size].
2723      * * 21: The backward input-to-output weights.
2724      *       A 2-D tensor of shape [bw_num_units, input_size].
2725      * * 22: The backward recurrent-to-input weights. Optional.
2726      *       A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
2727      *       corresponds to either the number of cell units (i.e., “bw_num_units”),
2728      *       or the second dimension of the “bw_projection_weights”, if defined.
2729      * * 23: The backward recurrent-to-forget weights.
2730      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2731      * * 24: The backward recurrent-to-cell weights.
2732      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2733      * * 25: The backward recurrent-to-output weights.
2734      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2735      * * 26: The backward cell-to-input weights. Optional.
2736      *       A 1-D tensor of shape [bw_num_units].
2737      * * 27: The backward cell-to-forget weights. Optional.
2738      *       A 1-D tensor of shape [bw_num_units].
2739      * * 28: The backward cell-to-output weights. Optional.
2740      *       A 1-D tensor of shape [bw_num_units].
2741      * * 29: The backward input gate bias. Optional.
2742      *       A 1-D tensor of shape [bw_num_units].
2743      * * 30: The backward forget gate bias.
2744      *       A 1-D tensor of shape [bw_num_units].
2745      * * 31: The backward cell gate bias.
2746      *       A 1-D tensor of shape [bw_num_units].
2747      * * 32: The backward output gate bias.
2748      *       A 1-D tensor of shape [bw_num_units].
2749      * * 33: The backward projection weights. Optional.
2750      *       A 2-D tensor of shape [bw_output_size, bw_num_units].
2751      * * 34: The backward projection bias. Optional.
2752      *       A 1-D tensor of shape [bw_output_size].
2753      * * 35: The forward input activation state.
2754      *       A 2-D tensor of shape [batch_size, bw_output_size].
2755      * * 36: The forward input cell state.
2756      *       A 2-D tensor of shape [batch_size, bw_num_units].
2757      * * 37: The backward input activation state.
2758      *       A 2-D tensor of shape [batch_size, bw_output_size].
2759      * * 38: The backward input cell state.
2760      *       A 2-D tensor of shape [batch_size, bw_num_units].
2761      * * 39: The auxiliary input. Optional.
2762      *       A 3-D tensor of shape [max_time, batch_size, aux_input_size],
2763      *       where “batch_size” corresponds to the batching dimension, and
2764      *       “aux_input_size” is the size of the auxiliary input. Optional. See
2765      *       the docs above for the usage modes explanation.
2766      * * 40: The forward auxiliary input-to-input weights.
2767      *       Optional. See the docs above for the usage modes explanation.
2768      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2769      * * 41: The forward auxiliary input-to-forget weights.
2770      *       Optional. See the docs above for the usage modes explanation.
2771      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2772      * * 42: The forward auxiliary input-to-cell weights.
2773      *       Optional. See the docs above for the usage modes explanation.
2774      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2775      * * 43: The forward auxiliary input-to-output weights.
2776      *       Optional. See the docs above for the usage modes explanation.
2777      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2778      * * 44: The backward auxiliary input-to-input weights.
2779      *       Optional. See the docs above for the usage modes explanation.
2780      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2781      * * 45: The backward auxiliary input-to-forget weights.
2782      *       Optional. See the docs above for the usage modes explanation.
2783      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2784      * * 46: The backward auxiliary input-to-cell weights.
2785      *       Optional. See the docs above for the usage modes explanation.
2786      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2787      * * 47: The backward auxiliary input-to-output weights.
2788      *       Optional. See the docs above for the usage modes explanation.
2789      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2790      * * 48: The activation function.
2791      *       A value indicating the activation function:
2792      *       <ul>
2793      *       <li>0: None;
2794      *       <li>1: Relu;
2795      *       <li>3: Relu6;
2796      *       <li>4: Tanh;
2797      *       <li>6: Sigmoid.
2798      *       </ul>
2799      * * 49: The clipping threshold for the cell state, such
2800      *       that values are bound within [-cell_clip, cell_clip]. If set to 0.0
2801      *       then clipping is disabled.
2802      *       If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},
2803      *       this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
2804      *       otherwise if all the input tensors have the type
2805      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be
2806      *       of type {@link ANEURALNETWORKS_FLOAT16}.
2807      * * 50: The clipping threshold for the output from the
2808      *       projection layer, such that values are bound within
2809      *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2810      *       If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},
2811      *       this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
2812      *       otherwise if all the input tensors have the type
2813      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be
2814      *       of type {@link ANEURALNETWORKS_FLOAT16}.
2815      * * 51: merge_outputs
2816      *       An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs
2817      *       from forward and backward cells should be merged.
2818      * * 52: time_major
2819      *       An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format
2820      *       of input and output tensors.
2821      * * 53: The forward input layer normalization weights. Optional.
2822      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2823      *       to activation at input gate.
2824      * * 54: The forward forget layer normalization weights. Optional.
2825      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2826      *       to activation at forget gate.
2827      * * 55: The forward cell layer normalization weights. Optional.
2828      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2829      *       to activation at cell gate.
2830      * * 56: The forward output layer normalization weights. Optional.
2831      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2832      *       to activation at output gate.
2833      * * 57: The backward input layer normalization weights. Optional.
2834      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2835      *       to activation at input gate.
2836      * * 58: The backward forget layer normalization weights. Optional.
2837      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2838      *       to activation at forget gate.
2839      * * 59: The backward cell layer normalization weights. Optional.
2840      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2841      *       to activation at cell gate.
2842      * * 60: The backward output layer normalization weights. Optional.
2843      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2844      *       to activation at output gate.
2845      *
2846      * Outputs:
2847      * * 0: The forward output.
2848      *      A 3-D tensor of shape:
2849      *        If time-major and not merge_outputs:
2850      *          [max_time, batch_size, fw_output_size]
2851      *        If time-major and merge_outputs:
2852      *          [max_time, batch_size, fw_output_size + bw_output_size]
2853      *        If batch-major and not merge_outputs:
2854      *          [batch_size, max_time, fw_output_size]
2855      *        If batch-major and merge_outputs:
2856      *          [batch_size, max_time, fw_output_size + bw_output_size]
2857      * * 1: The backward output.  Unused if merge_outputs is true.
2858      *      A 3-D tensor of shape:
2859      *        If time-major: [max_time, batch_size, bw_output_size]
2860      *        If batch-major: [batch_size, max_time, bw_output_size]
2861      * * 2: The forward activation state output.
2862      *      A 2-D tensor of shape [batch_size, fw_output_size] containing an
2863      *      activation state from the last time step in the sequence. This
2864      *      output is optional and can be omitted. If this output is present
2865      *      then outputs 3-5 must be present as well.
2866      *      Available since API level 30.
2867      * * 3: The forward cell state output.
2868      *      A tensor of shape [batch_size, fw_cell_size] containing a cell state
2869      *      from the last time step in the sequence. This output is optional
2870      *      and can be omitted. If this output is present
2871      *      then outputs 2, 4, 5 must be present as well.
2872      *      Available since API level 30.
2873      * * 4: The backward activation state output.
2874      *      A 2-D tensor of shape [batch_size, bw_output_size] containing an
2875      *      activation state from the last time step in the sequence. This
2876      *      output is optional and can be omitted. If this output is present
2877      *      then outputs 2, 3, 5 must be present as well.
2878      *      Available since API level 30.
2879      * * 5: The backward cell state output.
2880      *      A tensor of shape [batch_size, bw_cell_size] containing a cell state
2881      *      from the last time step in the sequence. This output is optional
2882      *      and can be omitted. If this output is present
2883      *      then outputs 2-4 must be present as well.
2884      *      Available since API level 30.
2885      *
2886      * Available since API level 29.
2887      *
2888      * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI
2889      * does not maintain internal states. This operator does not support the usage pattern in which
2890      * multiple cells are chained and state tensors are propagated.
2891      */
2892     ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42,
2893 
2894     /**
2895      * A recurrent neural network layer that applies a basic RNN cell to a
2896      * sequence of inputs in forward and backward directions.
2897      *
2898      * This Op unrolls the input along the sequence dimension, and implements
2899      * the following operation for each element in the sequence s =
2900      * 1...sequence_length:
2901      *   fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +
2902      *          fw_state * fw_recurrent_weights’ + fw_bias)
2903      *
2904      * And for each element in sequence t = sequence_length : 1
2905      *   bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +
2906      *          bw_state * bw_recurrent_weights’ + bw_bias)
2907      *
2908      * Where:
2909      * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;
2910      * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the
2911      *    current “state” which itself is the output from the previous time step
2912      *    computation;
2913      * * “{fw,bw}_bias” is a bias vector (added to each output vector in the
2914      *    batch);
2915      * * “activation” is the function passed as the “fused_activation_function”
2916      *   argument (if not “NONE”).
2917      *
2918      * The op supports cross-linking via an auxiliary input. Regular cell feeds
2919      * one input into the two RNN cells in the following way:
2920      *
2921      *       INPUT  (INPUT_REVERSED)
2922      *         |         |
2923      *    ---------------------
2924      *    | FW_RNN     BW_RNN |
2925      *    ---------------------
2926      *         |         |
2927      *      FW_OUT     BW_OUT
2928      *
2929      * An op with cross-linking takes two inputs and feeds them into the RNN
2930      * cells in the following way:
2931      *
2932      *       AUX_INPUT   (AUX_INPUT_REVERSED)
2933      *           |             |
2934      *     INPUT | (INPUT_R'D.)|
2935      *       |   |       |     |
2936      *    -----------------------
2937      *    |  \  /        \    / |
2938      *    | FW_RNN       BW_RNN |
2939      *    -----------------------
2940      *         |           |
2941      *      FW_OUT      BW_OUT
2942      *
2943      * The cross-linking mode is enabled iff auxiliary input and auxiliary
2944      * weights are present. While stacking this op on top of itself, this
2945      * allows to connect both forward and backward outputs from previous cell
2946      * to the next cell's input.
2947      *
2948      * Since API level 30 parallel linking mode is supported. The mode is
2949      * enabled if auxiliary input is present but auxiliary weights are omitted.
2950      * In this case, the cell feeds inputs into the RNN in the following way:
2951      *
2952      *       INPUT (AUX_INPUT_REVERSED)
2953      *         |         |
2954      *    ---------------------
2955      *    | FW_RNN     BW_RNN |
2956      *    ---------------------
2957      *         |         |
2958      *      FW_OUT     BW_OUT
2959      *
2960      * While stacking this op on top of itself, this allows to connect both
2961      * forward and backward outputs from previous cell to the next cell's
2962      * corresponding inputs.
2963      *
2964      * Supported tensor {@link OperandCode}:
2965      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2966      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2967      *
2968      * The input tensors must all be the same type.
2969      *
2970      * Inputs:
2971      * * 0: input.
2972      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
2973      *      it is set to true, then the input has a shape [maxTime, batchSize,
2974      *      inputSize], otherwise the input has a shape [batchSize, maxTime,
2975      *      inputSize].
2976      * * 1: fwWeights.
2977      *      A 2-D tensor of shape [fwNumUnits, inputSize].
2978      * * 2: fwRecurrentWeights.
2979      *      A 2-D tensor of shape [fwNumUnits, fwNumUnits].
2980      * * 3: fwBias.
2981      *      A 1-D tensor of shape [fwNumUnits].
2982      * * 4: fwHiddenState.
2983      *      A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
2984      *      state input for the first time step of the computation.
2985      * * 5: bwWeights.
2986      *      A 2-D tensor of shape [bwNumUnits, inputSize].
2987      * * 6: bwRecurrentWeights.
2988      *      A 2-D tensor of shape [bwNumUnits, bwNumUnits].
2989      * * 7: bwBias.
2990      *      A 1-D tensor of shape [bwNumUnits].
2991      * * 8: bwHiddenState
2992      *      A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
2993      *      state input for the first time step of the computation.
2994      * * 9: auxInput.
2995      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
2996      *      it is set to true, then the input has a shape [maxTime, batchSize,
2997      *      auxInputSize], otherwise the input has a shape [batchSize, maxTime,
2998      *      auxInputSize]. Can be omitted. See the docs above for the usage
2999      *      modes explanation.
3000      * * 10:fwAuxWeights.
3001      *      A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted.
3002      *      See the docs above for the usage modes explanation.
3003      * * 11:bwAuxWeights.
3004      *      A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted.
3005      *      See the docs above for the usage modes explanation.
3006      * * 12:fusedActivationFunction.
3007      *      A {@link FuseCode} value indicating the activation function. If
3008      *      “NONE” is specified then it results in a linear activation.
3009      * * 13:timeMajor
3010      *      An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format
3011      *      of input and output tensors.
3012      * * 14:mergeOutputs
3013      *      An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs
3014      *      from forward and backward cells are separate (if set to false) or
3015      *      concatenated (if set to true).
3016      * Outputs:
3017      * * 0: fwOutput.
3018      *      A 3-D tensor. The first two dimensions of the shape are defined by
3019      *      the input 6 (timeMajor) and the third dimension is defined by the
3020      *      input 14 (mergeOutputs). If timeMajor is set to true, then the first
3021      *      two dimensions are [maxTime, batchSize], otherwise they are set to
3022      *      [batchSize, maxTime]. If mergeOutputs is set to true, then the third
3023      *      dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
3024      *      to fwNumUnits.
3025      * * 1: bwOutput.
3026      *      A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
3027      *      this tensor is not produced. The shape is defined by the input 6
3028      *      (timeMajor). If it is set to true, then the shape is set to
3029      *      [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
3030      *      [batchSize, maxTime, bwNumUnits].
3031      * * 2: The forward hidden state output.
3032      *      A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden
3033      *      state from the last time step in the sequence. This output is
3034      *      optional and can be omitted. If this output is present then output
3035      *      3 must be present as well.
3036      *      Available since API level 30.
3037      * * 3: The backward hidden state output.
3038      *      A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden
3039      *      state from the last time step in the sequence. This output is
3040      *      optional and can be omitted. If this output is present then output
3041      *      2 must be present as well.
3042      *      Available since API level 30.
3043      *
3044      * Available since API level 29.
3045      *
3046      * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI
3047      * does not maintain internal states. This operator does not support the usage pattern in which
3048      * multiple cells are chained and state tensors are propagated.
3049      */
3050     ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN = 43,
3051 
3052     /**
3053      * Greedily selects a subset of bounding boxes in descending order of score.
3054      *
3055      * This op applies NMS algorithm to each class. In each loop of execution,
3056      * the box with maximum score gets selected and removed from the pending set.
3057      * The scores of the rest of boxes are lowered according to the
3058      * intersection-over-union (IOU) overlapping with the previously selected
3059      * boxes and a specified NMS kernel method. Any boxes with score less
3060      * than a threshold are removed from the pending set.
3061      *
3062      * Three NMS kernels are supported:
3063      * * Hard:     score_new = score_old * (1 if IoU < threshold else 0)
3064      * * Linear:   score_new = score_old * (1 if IoU < threshold else 1 - IoU)
3065      * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
3066      *
3067      * Axis-aligned bounding boxes are represented by its upper-left corner
3068      * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
3069      * bounding box should satisfy x1 <= x2 and y1 <= y2.
3070      *
3071      * Supported tensor {@link OperandCode}:
3072      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3073      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3074      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3075      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3076      *
3077      * Inputs:
3078      * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
3079      *      of each bounding box proposal. The boxes are grouped by batches in the
3080      *      first dimension. Zero num_rois is supported for this tensor.
3081      * * 1: A 2-D Tensor specifying the bounding boxes of shape
3082      *      [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
3083      *      The boxes are grouped by batches in the first dimension. The sequential
3084      *      order of the boxes corresponds with input0. For input0 of type
3085      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of
3086      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
3087      *      scale of 0.125.
3088      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
3089      *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
3090      *      with zeroPoint of -128 and scale of 0.125.
3091      *      Zero num_rois is supported for this tensor.
3092      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3093      *      [num_rois], specifying the batch index of each box. Boxes with
3094      *      the same batch index are grouped together.
3095      * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, score_threshold. Boxes
3096      *      with scores lower than the threshold are filtered before sending
3097      *      to the NMS algorithm.
3098      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3099      *      number of selected bounding boxes for each image. Set to a negative
3100      *      value for unlimited number of output bounding boxes.
3101      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the NMS
3102      *      kernel method, options are 0:hard, 1:linear, 2:gaussian.
3103      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU
3104      *      threshold in hard and linear NMS kernel. This field is ignored if
3105      *      gaussian kernel is selected.
3106      * * 7: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the sigma in
3107      *      gaussian NMS kernel. This field is ignored if gaussian kernel is
3108      *      not selected.
3109      * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, nms_score_threshold.
3110      *      Boxes with scores lower than the threshold are dropped during the
3111      *      score updating phase in soft NMS.
3112      *
3113      * Outputs:
3114      * * 0: A 1-D Tensor of the same {@link OperandCode} as input0, with shape
3115      *      [num_output_rois], specifying the score of each output box. The boxes
3116      *      are grouped by batches, but the sequential order in each batch is not
3117      *      guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
3118      *      guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3119      *      or {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
3120      *      the scale and zero point must be the same as input0.
3121      * * 1: A 2-D Tensor of the same {@link OperandCode} as input1, with shape
3122      *      [num_output_rois, 4], specifying the coordinates of each
3123      *      output bounding box with the same format as input1. The sequential
3124      *      order of the boxes corresponds with output0. For type of
3125      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the scale must be
3126      *      0.125 and the zero point must be 0.
3127      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3128      *      [num_output_rois], specifying the class of each output box. The
3129      *      sequential order of the boxes corresponds with output0.
3130      * * 3: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3131      *      [num_output_rois], specifying the batch index of each box. Boxes
3132      *      with the same batch index are grouped together.
3133      *
3134      * Available since API level 29.
3135      */
3136     ANEURALNETWORKS_BOX_WITH_NMS_LIMIT = 44,
3137 
3138     /**
3139      * Casts a tensor to a type.
3140      *
3141      * This operation ignores the scale and zeroPoint of quanized tensors,
3142      * e.g. it treats a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} input
3143      * as a tensor of uint8 values.
3144      *
3145      * Supported tensor {@link OperandCode}:
3146      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3147      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3148      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3149      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3150      * Since API level 30, casting tensors of the following
3151      * {@link OperandCode} to the same {@link OperandCode} is supported:
3152      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3153      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3154      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
3155      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
3156      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3157      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
3158      *
3159      * Supported tensor rank: from 1
3160      *
3161      * Inputs:
3162      * * 0: A tensor.
3163      *
3164      * Outputs:
3165      * * 0: A tensor with the same shape as input0.
3166      *
3167      * Available since API level 29.
3168      */
3169     ANEURALNETWORKS_CAST = 45,
3170 
3171     /**
3172      * Shuffle the channels of the input tensor.
3173      *
3174      * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
3175      * divide the channel dimension into num_groups groups, and reorganize the
3176      * channels by grouping channels with the same index in each group.
3177      *
3178      * Along the channel dimension, the output is calculated using this formula:
3179      *
3180      *     output_channel[k * num_groups + g] = input_channel[g * group_size + k]
3181      *
3182      * where group_size = num_channels / num_groups
3183      *
3184      * The number of channels must be divisible by num_groups.
3185      *
3186      * Supported tensor {@link OperandCode}:
3187      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3188      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3189      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3190      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3191      *
3192      * Supported tensor rank: up to 4
3193      *
3194      * Inputs:
3195      * * 0: An n-D tensor, specifying the tensor to be shuffled.
3196      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3197      *      groups.
3198      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the dimension
3199      *      channel shuffle would be performed on. Negative index is used to
3200      *      specify axis from the end (e.g. -1 for the last axis). Must be in
3201      *      the range [-n, n).
3202      *
3203      * Outputs:
3204      * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
3205      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3206      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3207      *      the scale and zeroPoint must be the same as input0.
3208      *
3209      * Available since API level 29.
3210      */
3211     ANEURALNETWORKS_CHANNEL_SHUFFLE = 46,
3212 
3213     /**
3214      * Apply postprocessing steps to bounding box detections.
3215      *
3216      * Bounding box detections are generated by applying transformation on a set
3217      * of predefined anchors with the bounding box deltas from bounding box
3218      * regression. A final step of hard NMS is applied to limit the number of
3219      * returned boxes.
3220      *
3221      * Supported tensor {@link OperandCode}:
3222      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3223      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3224      *
3225      * Inputs:
3226      * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
3227      *      the score of each anchor with each class. Class 0 for each
3228      *      [batches, num_anchors, 0] is background and will be ignored.
3229      * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
3230      *      the first four values in length_box_encoding specifying the bounding
3231      *      box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
3232      *      where dy and dx is the linear-scale relative correction factor for the
3233      *      center position of the bounding box with respect to the width and height,
3234      *      dh and dw is the log-scale relative correction factor for the width and
3235      *      height. All the entries in length_box_encoding beyond the first four
3236      *      values are ignored in this operation.
3237      * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
3238      *      predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
3239      *      ctr_x are the center position of the box, and h and w are the height
3240      *      and the width.
3241      * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3242      *      factor for dy in bounding box deltas.
3243      * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3244      *      factor for dx in bounding box deltas.
3245      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3246      *      factor for dh in bounding box deltas.
3247      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3248      *      factor for dw in bounding box deltas.
3249      * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to use regular
3250      *      multi-class NMS algorithm that do NMS separately for each class,
3251      *      set to false for a faster algorithm that only do one single NMS
3252      *      using the highest class score..
3253      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, max_num_detections, specifying
3254      *      the maximum number of boxes for the output. Boxes with the lowest
3255      *      scores are discarded to meet the limit.
3256      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is
3257      *      set to false, specifying the maximum number of classes per detection.
3258      * * 10: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is
3259      *       set to true, specifying the maximum number of detections when
3260      *       applying NMS algorithm for each single class.
3261      * * 11: A scalar, score_threshold. Boxes with scores lower than the
3262      *       threshold are filtered before sending to the NMS algorithm. The
3263      *       scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of
3264      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3265      *       {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3266      *       {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3267      * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
3268      *       must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of
3269      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3270      *       {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3271      *       {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3272      * * 13: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to include
3273      *       background class in the list of label map for the output, set
3274      *       to false to not include the background. When the background
3275      *       class is included, it has label 0 and the output classes start
3276      *       at 1 in the label map, otherwise, the output classes start at 0.
3277      *
3278      * Outputs:
3279      * * 0: A 2-D tensor of the same {@link OperandCode} as input0, with shape
3280      *      [batches, max_num_detections], specifying the score of each output
3281      *      detections.
3282      * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
3283      *      coordinates of each output bounding box, with format
3284      *      [y1, x1, y2, x2].
3285      * * 2: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3286      *      [batches, max_num_detections], specifying the class label for each
3287      *      output detection.
3288      * * 3: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape [batches],
3289      *      specifying the number of valid output detections for each batch.
3290      *
3291      * Available since API level 29.
3292      */
3293     ANEURALNETWORKS_DETECTION_POSTPROCESSING = 47,
3294 
3295     /**
3296      * For input tensors x and y, computes x == y elementwise.
3297      *
3298      * Supported tensor {@link OperandCode}:
3299      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3300      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3301      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3302      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3303      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3304      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3305      *
3306      * Supported tensor rank: from 1
3307      *
3308      * This operation supports broadcasting.
3309      *
3310      * Inputs:
3311      * * 0: A tensor.
3312      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3313      *      with input0.
3314      *
3315      * Outputs:
3316      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3317      *
3318      * Available since API level 29.
3319      */
3320     ANEURALNETWORKS_EQUAL = 48,
3321 
3322     /**
3323      * Computes exponential of x element-wise.
3324      *
3325      * Supported tensor {@link OperandCode}:
3326      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3327      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3328      *
3329      * Supported tensor rank: from 1.
3330      *
3331      * Inputs:
3332      * * 0: A tensor.
3333      *
3334      * Outputs:
3335      * * 0: The output tensor of same shape as input0.
3336      *
3337      * Available since API level 29.
3338      */
3339     ANEURALNETWORKS_EXP = 49,
3340 
3341     /**
3342      * Inserts a dimension of 1 into a tensor's shape.
3343      *
3344      * Given a tensor input, this operation inserts a dimension of 1 at the
3345      * given dimension index of input's shape. The dimension index starts at
3346      * zero; if you specify a negative dimension index, it is counted backward
3347      * from the end.
3348      *
3349      * Supported tensor {@link OperandCode}:
3350      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3351      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3352      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3353      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3354      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3355      *
3356      * Supported tensor rank: from 1
3357      *
3358      * Inputs:
3359      * * 0: An n-D tensor.
3360      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the dimension
3361      *      index to expand. Must be in the range [-(n + 1), (n + 1)).
3362      *
3363      * Outputs:
3364      * * 0: An (n + 1)-D tensor with the same {@link OperandCode} and data as
3365      *      input0.
3366      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3367      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3368      *      the scale and zeroPoint must be the same as input0.
3369      *
3370      * Available since API level 29.
3371      */
3372     ANEURALNETWORKS_EXPAND_DIMS = 50,
3373 
3374     /**
3375      * Gathers values along an axis.
3376      *
3377      * Produces an output tensor with shape
3378      *     input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
3379      * where:
3380      *     # Vector indices (output is rank(input0)).
3381      *     output[a_0, ..., a_n, i, b_0, ..., b_n] =
3382      *       input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
3383      *
3384      *     # Higher rank indices (output is rank(input0) + rank(indices) - 1).
3385      *     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
3386      *       input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
3387      *
3388      * Supported tensor {@link OperandCode}:
3389      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3390      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3391      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3392      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3393      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3394      *
3395      * Supported tensor rank: from 1
3396      *
3397      * Inputs:
3398      * * 0: An n-D tensor from which to gather values.
3399      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis.
3400      *      Negative index is used to specify axis from the end
3401      *      (e.g. -1 for the last axis). Must be in the range [-n, n).
3402      * * 2: A k-D tensor {@link ANEURALNETWORKS_TENSOR_INT32} of indices.
3403      *      The values must be in the bounds of the corresponding dimensions
3404      *      of input0.
3405      *
3406      * Outputs:
3407      * * 0: An (n + k - 1)-D tensor with the same {@link OperandCode} as input0.
3408      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3409      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3410      *      the scale and zeroPoint must be the same as input0.
3411      *
3412      * Available since API level 29.
3413      */
3414     ANEURALNETWORKS_GATHER = 51,
3415 
3416     /**
3417      * Generate aixs-aligned bounding box proposals.
3418      *
3419      * Bounding box proposals are generated by applying transformation on a set
3420      * of predefined anchors with the bounding box deltas from bounding box
3421      * regression. A final step of hard NMS is applied to limit the number of
3422      * returned boxes.
3423      *
3424      * Axis-aligned bounding boxes are represented by its upper-left corner
3425      * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
3426      * bounding box should satisfy x1 <= x2 and y1 <= y2.
3427      *
3428      * Supported tensor {@link OperandCode}:
3429      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3430      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3431      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3432      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3433      *
3434      * Inputs:
3435      * * 0: A 4-D Tensor specifying the score of each anchor at each
3436      *      location. With "NHWC" data layout, the tensor shape is
3437      *      [batches, height, width, num_anchors]. With "NCHW" data layout,
3438      *      the tensor shape is [batches, num_anchors, height, width].
3439      * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
3440      *      layout, the tensor shape is [batches, height, width, num_anchors * 4].
3441      *      With "NCHW" data layout, the tensor shape is
3442      *      [batches, num_anchors * 4, height, width]. The box deltas are encoded
3443      *      in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
3444      *      relative correction factor for the center position of the bounding box
3445      *      with respect to the width and height, dw and dh is the log-scale
3446      *      relative correction factor for the width and height. The last
3447      *      dimensions is the channel dimension.
3448      * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
3449      *      predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
3450      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3451      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of
3452      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with scale of 0.125.
3453      * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
3454      *      each image in the batch, with format [image_height, image_width].
3455      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3456      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this
3457      *      tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with
3458      *      scale of 0.125.
3459      * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
3460      *      from the height of original image to the height of feature map.
3461      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
3462      *      from the width of original image to the width of feature map.
3463      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3464      *      number of boxes before going into the hard NMS algorithm. Boxes
3465      *      with the lowest scores are discarded to meet the limit. Set to
3466      *      a non-positive value for unlimited number.
3467      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3468      *      number of boxes returning from the hard NMS algorithm. Boxes
3469      *      with the lowest scores are discarded to meet the limit. Set to
3470      *      a non-positive value for unlimited number.
3471      * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU
3472      *      threshold for hard NMS.
3473      * * 9: An {@link ANEURALNETWORKS_FLOAT32} scalar, min_size. Boxes with
3474      *      height or width lower than the absolute threshold are filtered out.
3475      * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3476      *       NCHW data layout for input0 and input1. Set to false for NHWC.
3477      *
3478      * Outputs:
3479      * * 0: A tensor of the same {@link OperandCode} as input0, of shape
3480      *      [num_output_rois], specifying the score of each output box.
3481      *      The boxes are grouped by batches, but the sequential order in
3482      *      each batch is not guaranteed. For type of
3483      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3484      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero
3485      *      point must be the same as input0.
3486      * * 1: A tensor of the same {@link OperandCode} as input3, of shape
3487      *      [num_output_rois, 4], specifying the coordinates of each output
3488      *      bounding box for each class, with format [x1, y1, x2, y2].
3489      *      The sequential order of the boxes corresponds with output0.
3490      *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
3491      *      scale must be 0.125 and the zero point must be 0.
3492      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3493      *      [num_output_rois], specifying the batch index of each box. Boxes
3494      *      with the same batch index are grouped together.
3495      *
3496      * Available since API level 29.
3497      */
3498     ANEURALNETWORKS_GENERATE_PROPOSALS = 52,
3499 
3500     /**
3501      * For input tensors x and y, computes x > y elementwise.
3502      *
3503      * Supported tensor {@link OperandCode}:
3504      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3505      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3506      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3507      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3508      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3509      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3510      *
3511      * Supported tensor rank: from 1
3512      *
3513      * This operation supports broadcasting.
3514      *
3515      * Inputs:
3516      * * 0: A tensor.
3517      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3518      *      with input0.
3519      *
3520      * Outputs:
3521      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3522      *
3523      * Available since API level 29.
3524      */
3525     ANEURALNETWORKS_GREATER = 53,
3526     /**
3527      * For input tensors x and y, computes x >= y elementwise.
3528      *
3529      * Supported tensor {@link OperandCode}:
3530      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3531      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3532      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3533      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3534      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3535      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3536      *
3537      * Supported tensor rank: from 1
3538      *
3539      * This operation supports broadcasting.
3540      *
3541      * Inputs:
3542      * * 0: A tensor.
3543      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3544      *      with input0.
3545      *
3546      * Outputs:
3547      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3548      *
3549      * Available since API level 29.
3550      */
3551     ANEURALNETWORKS_GREATER_EQUAL = 54,
3552 
3553     /**
3554      * Performs a grouped 2-D convolution operation.
3555      *
3556      * Given an input tensor of shape [batches, height, width, depth_in] and a
3557      * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
3558      * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
3559      * applies a group of different filters to each input channel group, then
3560      * concatenates the results together.
3561      *
3562      * Specifically, the input channels are divided into num_groups groups, each with
3563      * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
3564      * filters are also divided into num_groups groups, i.e. depth_out is divisible
3565      * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
3566      * input channel group, and the result are concatenated together.
3567      *
3568      * The output dimensions are functions of the filter dimensions, stride, and
3569      * padding.
3570      *
3571      * The values in the output tensor are computed as:
3572      *
3573      *     output[b, i, j, g * channel_multiplier + q] =
3574      *         sum_{di, dj, dk} (
3575      *             input[b, strides[1] * i + di, strides[2] * j + dj,
3576      *                   g * depth_group + dk] *
3577      *             filter[g * channel_multiplier + q, di, dj, dk]
3578      *         ) + bias[channel]
3579      *
3580      * where channel_multiplier = depth_out / num_groups
3581      *
3582      * Supported tensor {@link OperandCode} configurations:
3583      * * 16 bit floating point:
3584      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
3585      *
3586      * * 32 bit floating point:
3587      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
3588      *
3589      * * Quantized:
3590      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
3591      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
3592      * * * input.scale * filter.scale).
3593      *
3594      * * Quantized signed (since API level 30):
3595      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
3596      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
3597      * * * input.scale * filter.scale).
3598      *
3599      * * Quantized with symmetric per channel quantization for the filter:
3600      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
3601      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3602      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
3603      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3604      *
3605      * * Quantized signed with filter symmetric per channel quantization (since API level 30):
3606      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
3607      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3608      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
3609      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3610      *
3611      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3612      * With the default data layout NHWC, the data is stored in the order of:
3613      * [batch, height, width, channels]. Alternatively, the data layout could
3614      * be NCHW, the data storage order of: [batch, channels, height, width].
3615      *
3616      * Both explicit padding and implicit padding are supported.
3617      *
3618      * Inputs (explicit padding):
3619      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3620      *      specifying the input, where depth_in = num_groups * depth_group.
3621      * * 1: A 4-D tensor, of shape
3622      *      [depth_out, filter_height, filter_width, depth_group], specifying
3623      *      the filter, where depth_out must be divisible by num_groups.  For
3624      *      tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
3625      *      the channel dimension (channelDim at
3626      *      {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0.
3627      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3628      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
3629      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type.
3630      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3631      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3632      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
3633      *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3634      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3635      *      should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
3636      *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3637      *      bias_scale[i] = input_scale * filter_scale[i].
3638      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3639      *      the left, in the ‘width’ dimension.
3640      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3641      *      the right, in the ‘width’ dimension.
3642      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3643      *      the top, in the ‘height’ dimension.
3644      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3645      *      the bottom, in the ‘height’ dimension.
3646      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3647      *      walking through input in the ‘width’ dimension.
3648      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3649      *      walking through input in the ‘height’ dimension.
3650      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3651      *      groups.
3652      * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
3653      *       {@link FuseCode} values. Specifies the activation to
3654      *       invoke on the result.
3655      * * 11: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3656      *       NCHW data layout for input0 and output0. Set to false for NHWC.
3657      *
3658      * Inputs (implicit padding):
3659      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3660      *      specifying the input, where depth_in = num_groups * depth_group.
3661      * * 1: A 4-D tensor, of shape
3662      *      [depth_out, filter_height, filter_width, depth_group], specifying
3663      *      the filter, where depth_out must be divisible by num_groups.  For
3664      *      tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
3665      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
3666      *      must be set to 0.
3667      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3668      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
3669      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same
3670      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type.
3671      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3672      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3673      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
3674      *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3675      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3676      *      should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
3677      *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3678      *      bias_scale[i] = input_scale * filter_scale[i].
3679      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
3680      *      padding scheme, has to be one of the
3681      *      {@link PaddingCode} values.
3682      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3683      *      walking through input in the ‘width’ dimension.
3684      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3685      *      walking through input in the ‘height’ dimension.
3686      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3687      *      groups.
3688      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
3689      *      {@link FuseCode} values. Specifies the activation to
3690      *      invoke on the result.
3691      * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3692      *      NCHW data layout for input0 and output0. Set to false for NHWC.
3693      *
3694      * Outputs:
3695      * * 0: The output 4-D tensor, of shape
3696      *      [batches, out_height, out_width, depth_out].
3697      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3698      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3699      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
3700      *
3701      * Available since API level 29.
3702      */
3703     ANEURALNETWORKS_GROUPED_CONV_2D = 55,
3704 
3705     /**
3706      * Localize the maximum keypoints from heatmaps.
3707      *
3708      * This operation approximates the accurate maximum keypoint scores and
3709      * indices after bicubic upscaling by using Taylor expansion up to the
3710      * quadratic term.
3711      *
3712      * The bounding box is represented by its upper-left corner coordinate
3713      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
3714      * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
3715      *
3716      * Supported tensor {@link OperandCode}:
3717      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3718      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3719      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3720      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3721      *
3722      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3723      * With the default data layout NHWC, the data is stored in the order of:
3724      * [batch, height, width, channels]. Alternatively, the data layout could
3725      * be NCHW, the data storage order of: [batch, channels, height, width].
3726      *
3727      * Inputs:
3728      * * 0: A 4-D Tensor of shape
3729      *      [num_boxes, heatmap_size, heatmap_size, num_keypoints],
3730      *      specifying the heatmaps, the height and width of heatmaps should
3731      *      be the same, and must be greater than or equal to 2.
3732      * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
3733      *      each with format [x1, y1, x2, y2]. For input0 of type
3734      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should
3735      *      be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint
3736      *      of 0 and scale of 0.125.
3737      *      For input0 of type
3738      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor
3739      *      should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with
3740      *      zeroPoint of -128 and scale of 0.125.
3741      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3742      *      NCHW data layout for input0. Set to false for NHWC.
3743      *
3744      * Outputs:
3745      * * 0: A tensor of the same {@link OperandCode} as input0, with shape
3746      *      [num_boxes, num_keypoints], specifying score of the keypoints.
3747      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3748      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3749      *      the scale and zeroPoint can be different from input0 scale and zeroPoint.
3750      * * 1: A tensor of the same {@link OperandCode} as input1, with shape
3751      *      [num_boxes, num_keypoints, 2], specifying the location of
3752      *      the keypoints, the second dimension is organized as
3753      *      [keypoint_x, keypoint_y].
3754      *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
3755      *      scale must be 0.125 and the zero point must be 0.
3756      *
3757      * Available since API level 29.
3758      */
3759     ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT = 56,
3760 
3761     /**
3762      * Applies instance normalization to the input tensor.
3763      *
3764      * The values in the output tensor are computed as:
3765      *
3766      *     output[b, h, w, c] =
3767      *         (input[b, h, w, c] - mean[b, c]) * gamma /
3768      *         sqrt(var[b, c] + epsilon) + beta
3769      *
3770      * Where the mean and variance are computed across the spatial dimensions:
3771      *
3772      *     mean[b, c] =
3773      *         sum_{h, w}(input[b, h, w, c]) / sum(1)
3774      *
3775      *     var[b, c] =
3776      *         sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
3777      *
3778      * Supported tensor {@link OperandCode}:
3779      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3780      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3781      *
3782      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3783      * With the default data layout NHWC, the data is stored in the order of:
3784      * [batch, height, width, channels]. Alternatively, the data layout could
3785      * be NCHW, the data storage order of: [batch, channels, height, width].
3786      *
3787      * Inputs:
3788      * * 0: An n-D tensor, specifying the tensor to be normalized.
3789      * * 1: A scalar, specifying gamma, the scale applied to the normalized
3790      *      tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3791      *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3792      *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3793      *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3794      * * 2: A scalar, specifying beta, the offset applied to the normalized
3795      *      tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3796      *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3797      *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3798      *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3799      * * 3: A scalar, specifying epsilon, the small value added to variance to
3800      *      avoid dividing by zero. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3801      *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3802      *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3803      *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3804      * * 4: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3805      *      NCHW data layout for input0 and output0. Set to false for NHWC.
3806      *
3807      * Outputs:
3808      * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
3809      *
3810      * Available since API level 29.
3811      */
3812     ANEURALNETWORKS_INSTANCE_NORMALIZATION = 57,
3813 
3814     /**
3815      * For input tensors x and y, computes x < y elementwise.
3816      *
3817      * Supported tensor {@link OperandCode}:
3818      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3819      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3820      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3821      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3822      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3823      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3824      *
3825      * Supported tensor rank: from 1
3826      *
3827      * This operation supports broadcasting.
3828      *
3829      * Inputs:
3830      * * 0: A tensor.
3831      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3832      *      with input0.
3833      *
3834      * Outputs:
3835      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3836      *
3837      * Available since API level 29.
3838      */
3839     ANEURALNETWORKS_LESS = 58,
3840 
3841     /**
3842      * For input tensors x and y, computes x <= y elementwise.
3843      *
3844      * Supported tensor {@link OperandCode}:
3845      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3846      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3847      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3848      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3849      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3850      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3851      *
3852      * Supported tensor rank: from 1
3853      *
3854      * This operation supports broadcasting.
3855      *
3856      * Inputs:
3857      * * 0: A tensor.
3858      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3859      *      with input0.
3860      *
3861      * Outputs:
3862      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3863      *
3864      * Available since API level 29.
3865      */
3866     ANEURALNETWORKS_LESS_EQUAL = 59,
3867 
3868     /**
3869      * Computes natural logarithm of x element-wise.
3870      *
3871      * Supported tensor {@link OperandCode}:
3872      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3873      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3874      *
3875      * Supported tensor rank: from 1.
3876      *
3877      * Inputs:
3878      * * 0: A tensor.
3879      *
3880      * Outputs:
3881      * * 0: The output tensor of same shape as input0.
3882      *
3883      * Available since API level 29.
3884      */
3885     ANEURALNETWORKS_LOG = 60,
3886 
3887     /**
3888      * Returns the truth value of x AND y element-wise.
3889      *
3890      * Supported tensor {@link OperandCode}:
3891      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3892      *
3893      * Supported tensor rank: from 1
3894      *
3895      * This operation supports broadcasting.
3896      *
3897      * Inputs:
3898      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3899      * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions
3900      *      compatible with input0.
3901      *
3902      * Outputs:
3903      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3904      *
3905      * Available since API level 29.
3906      */
3907     ANEURALNETWORKS_LOGICAL_AND = 61,
3908 
3909     /**
3910      * Computes the truth value of NOT x element-wise.
3911      *
3912      * Supported tensor {@link OperandCode}:
3913      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3914      *
3915      * Supported tensor rank: from 1.
3916      *
3917      * Inputs:
3918      * * 0: A tensor.
3919      *
3920      * Outputs:
3921      * * 0: The output tensor of same shape as input0.
3922      *
3923      * Available since API level 29.
3924      */
3925     ANEURALNETWORKS_LOGICAL_NOT = 62,
3926 
3927     /**
3928      * Returns the truth value of x OR y element-wise.
3929      *
3930      * Supported tensor {@link OperandCode}:
3931      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3932      *
3933      * Supported tensor rank: from 1
3934      *
3935      * This operation supports broadcasting.
3936      *
3937      * Inputs:
3938      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3939      * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions
3940      *      compatible with input0.
3941      *
3942      * Outputs:
3943      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3944      *
3945      * Available since API level 29.
3946      */
3947     ANEURALNETWORKS_LOGICAL_OR = 63,
3948 
3949     /**
3950      * Computes the log softmax activations given logits.
3951      *
3952      * The output is calculated using this formula:
3953      *
3954      *     output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
3955      *
3956      * Supported tensor {@link OperandCode}:
3957      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3958      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3959      *
3960      * Supported tensor rank: from 1.
3961      *
3962      * Inputs:
3963      * * 0: A tensor specifying the input logits.
3964      * * 1: A scalar, specifying the positive scaling factor for the exponent,
3965      *      beta.
3966      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta
3967      *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
3968      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta
3969      *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
3970      * * 2: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
3971      *      reduce across. Negative index is used to specify axis from the
3972      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
3973      *
3974      * Outputs:
3975      * * 0: The output tensor of the same {@link OperandCode} and shape as
3976      *      input0.
3977      *
3978      * Available since API level 29.
3979      */
3980     ANEURALNETWORKS_LOG_SOFTMAX = 64,
3981 
3982     /**
3983      * Returns the element-wise maximum of two tensors.
3984      *
3985      * Supported tensor {@link OperandCode}:
3986      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3987      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3988      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3989      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3990      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
3991      *
3992      * Supported tensor rank: from 1.
3993      *
3994      * Inputs:
3995      * * 0: A tensor.
3996      * * 1: A tensor of the same {@link OperandCode} and compatible dimensions
3997      *      with input0.
3998      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
3999      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4000      *
4001      * Outputs:
4002      * * 0: A tensor of the same {@link OperandCode} as input0.
4003      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4004      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4005      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4006      *
4007      * Available since API level 29.
4008      */
4009     ANEURALNETWORKS_MAXIMUM = 65,
4010 
4011     /**
4012      * Returns the element-wise minimum of two tensors.
4013      *
4014      * Supported tensor {@link OperandCode}:
4015      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4016      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4017      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4018      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4019      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4020      *
4021      * Supported tensor rank: from 1.
4022      *
4023      * Inputs:
4024      * * 0: A tensor.
4025      * * 1: A tensor of the same {@link OperandCode} and compatible dimensions
4026      *      with input0.
4027      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4028      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4029      *
4030      * Outputs:
4031      * * 0: A tensor of the same {@link OperandCode} as input0.
4032      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4033      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4034      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4035      *
4036      * Available since API level 29.
4037      */
4038     ANEURALNETWORKS_MINIMUM = 66,
4039 
4040     /**
4041      * Computes numerical negative value element-wise.
4042      *
4043      * Supported tensor {@link OperandCode}:
4044      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4045      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4046      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4047      *
4048      * Supported tensor rank: from 1.
4049      *
4050      * Inputs:
4051      * * 0: A tensor.
4052      *
4053      * Outputs:
4054      * * 0: The output tensor of same shape as input0.
4055      *
4056      * Available since API level 29.
4057      */
4058     ANEURALNETWORKS_NEG = 67,
4059 
4060     /**
4061      * For input tensors x and y, computes x != y elementwise.
4062      *
4063      * Supported tensor {@link OperandCode}:
4064      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4065      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4066      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4067      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4068      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4069      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4070      *
4071      * Supported tensor rank: from 1
4072      *
4073      * This operation supports broadcasting.
4074      *
4075      * Inputs:
4076      * * 0: A tensor.
4077      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
4078      *      with input0.
4079      *
4080      * Outputs:
4081      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
4082      *
4083      * Available since API level 29.
4084      */
4085     ANEURALNETWORKS_NOT_EQUAL = 68,
4086 
4087     /**
4088      * Pads a tensor with the given constant value according to the specified
4089      * paddings.
4090      *
4091      * Supported tensor {@link OperandCode}:
4092      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4093      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4094      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4095      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4096      *
4097      * Supported tensor rank: up to 4
4098      *
4099      * Inputs:
4100      * * 0: An n-D tensor, specifying the tensor to be padded.
4101      * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
4102      *      for each spatial dimension of the input tensor. The shape of the
4103      *      tensor must be {rank(input0), 2}.
4104      *      padding[i, 0] specifies the number of elements to be padded in the
4105      *      front of dimension i.
4106      *      padding[i, 1] specifies the number of elements to be padded after
4107      *      the end of dimension i.
4108      * * 2: An scalar specifying the value to use for padding input0.
4109      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the
4110      *      pad value must be of {@link ANEURALNETWORKS_FLOAT16}.
4111      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the
4112      *      pad value must be of {@link ANEURALNETWORKS_FLOAT32}.
4113      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4114      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
4115      *      the pad value must be of {@link ANEURALNETWORKS_INT32}. The
4116      *      scale and zeroPoint are assumed to be the same as in input0.
4117      *
4118      * Outputs:
4119      * * 0: A tensor of the same {@link OperandCode} as input0. The
4120      *      output tensor has the same rank as input0, and each
4121      *      dimension of the output tensor has the same size as the
4122      *      corresponding dimension of the input tensor plus the size
4123      *      of the padding:
4124      *          output0.dimension[i] =
4125      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
4126      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4127      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4128      *      the scale and zeroPoint must be the same as input0.
4129      *
4130      * Available since API level 29.
4131      */
4132     ANEURALNETWORKS_PAD_V2 = 69,
4133 
4134     /**
4135      * Computes the power of one value to another.
4136      *
4137      * Given a tensor base and a tensor exponent, this operation computes
4138      * base^exponent elementwise.
4139      *
4140      * This operations supports broadcasting. The size of the output is the
4141      * maximum size along each dimension of the input operands. It starts with
4142      * the trailing dimensions, and works its way forward.
4143      *
4144      * For example:
4145      *     base.dimension     =    {4, 1, 2}
4146      *     exponent.dimension = {5, 4, 3, 1}
4147      *     output.dimension   = {5, 4, 3, 2}
4148      *
4149      * Supported tensor {@link OperandCode}:
4150      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4151      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4152      *
4153      * Supported tensor rank: from 1
4154      *
4155      * Inputs:
4156      * * 0: A tensor specifying the base.
4157      * * 1: A tensor specifying the exponent.
4158      *
4159      * Outputs:
4160      * * 0: An output tensor.
4161      *
4162      * Available since API level 29.
4163      */
4164     ANEURALNETWORKS_POW = 70,
4165 
4166     /**
4167      * Parametric Rectified Linear Unit.
4168      *
4169      * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
4170      * is a learned array with the same {@link OperandCode} and compatible
4171      * dimensions as input x.
4172      *
4173      * Two dimensions are compatible when:
4174      *     1. they are equal, or
4175      *     2. one of them is 1
4176      *
4177      * The size of the output is the maximum size along each dimension of the
4178      * input operands. It starts with the trailing dimensions, and works its way
4179      * forward.
4180      *
4181      * Example:
4182      *     input.dimension  =    {4, 1, 2}
4183      *     alpha.dimension  = {5, 4, 3, 1}
4184      *     output.dimension = {5, 4, 3, 2}
4185      *
4186      * Supported tensor {@link OperandCode}:
4187      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4188      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4189      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4190      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4191      *
4192      * Supported tensor rank: from 1
4193      *
4194      * Inputs:
4195      * * 0: A tensor, specifying the input.
4196      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
4197      *      as input0, specifying the alpha.
4198      *
4199      * Outputs:
4200      * * 0: A tensor of the same {@link OperandCode} as input0.
4201      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4202      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4203      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4204      *
4205      * Available since API level 29.
4206      */
4207     ANEURALNETWORKS_PRELU = 71,
4208 
4209     /**
4210      * Quantizes the input tensor.
4211      *
4212      * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} output tensor is:
4213      *
4214      *     output = max(0, min(255, round(input / scale) + zeroPoint)
4215      *
4216      * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} output
4217      * tensor is:
4218      *
4219      *     output = max(-128, min(127, round(input / scale) + zeroPoint)
4220      *
4221      * Supported input tensor {@link OperandCode}:
4222      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4223      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4224      *
4225      * Supported output tensor {@link OperandCode}:
4226      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4227      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4228      *
4229      * Supported tensor rank: from 1
4230      *
4231      * Inputs:
4232      * * 0: A tensor, may be zero-sized.
4233      *
4234      * Outputs:
4235      * * 0: The output tensor of same shape as input0, but with
4236      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or.
4237      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}.
4238      *
4239      * Available since API level 29.
4240      */
4241     ANEURALNETWORKS_QUANTIZE = 72,
4242 
4243     /**
4244      * A version of quantized LSTM, using 16 bit quantization for internal
4245      * state.
4246      *
4247      * There is no projection layer, so cell state size is equal to the output
4248      * size.
4249      *
4250      * Inputs:
4251      * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4252      *      and shape [numBatches, inputSize] specifying the input to the LSTM
4253      *      cell. Tensor is quantized with a fixed quantization range of
4254      *      [-1, 127/128] (scale = 1/128, zeroPoint = 128).
4255      * * 1: The input-to-input weights.
4256      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4257      *      and shape [outputSize, inputSize] specifying input-to-input part of
4258      *      weights for fully-connected layer inside the LSTM cell.
4259      *      Quantization zero point and scale must be the same across all the
4260      *      weights.
4261      * * 2: The input-to-forget weights.
4262      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4263      *      and shape [outputSize, inputSize] specifying input-to-forget part of
4264      *      weights for fully-connected layer inside the LSTM cell.
4265      *      Quantization zero point and scale must be the same across all the
4266      *      weights.
4267      * * 3: The input-to-cell weights.
4268      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4269      *      and shape [outputSize, inputSize] specifying input-to-cell part of
4270      *      weights for fully-connected layer inside the LSTM cell.
4271      *      Quantization zero point and scale must be the same across all the
4272      *      weights.
4273      * * 4: The input-to-output weights.
4274      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4275      *      and shape [outputSize, inputSize] specifying input-to-output part of
4276      *      weights for fully-connected layer inside the LSTM cell.
4277      *      Quantization zero point and scale must be the same across all the
4278      *      weights.
4279      * * 5: The recurrent-to-input weights.
4280      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4281      *      and shape [outputSize, outputSize] specifying recurrent-to-input part
4282      *      of weights for fully-connected layer inside the LSTM cell.
4283      *      Quantization zero point and scale must be the same across all the
4284      *      weights.
4285      * * 6: The recurrent-to-forget weights.
4286      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4287      *      and shape [outputSize, outputSize] specifying recurrent-to-forget
4288      *      part of weights for fully-connected layer inside the LSTM cell.
4289      *      Quantization zero point and scale must be the same across all the
4290      *      weights.
4291      * * 7: The recurrent-to-cell weights.
4292      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4293      *      and shape [outputSize, outputSize] specifying recurrent-to-cell part
4294      *      of weights for fully-connected layer inside the LSTM cell.
4295      *      Quantization zero point and scale must be the same across all the
4296      *      weights.
4297      * * 8: The recurrent-to-output weights.
4298      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4299      *      and shape [outputSize, outputSize] specifying recurrent-to-output
4300      *      part of weights for fully-connected layer inside the LSTM cell.
4301      *      Quantization zero point and scale must be the same across all the
4302      *      weights.
4303      * * 9: The input gate bias.
4304      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4305      *      [outputSize] specifying the bias for the fully-connected layer
4306      *      inside the LSTM cell. Bias is quantized with scale being a product
4307      *      of input and weights scales and zeroPoint equal to 0.
4308      * * 10:The forget gate bias.
4309      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4310      *      [outputSize] specifying the bias for the fully-connected layer
4311      *      inside the LSTM cell. Bias is quantized with scale being a product
4312      *      of input and weights scales and zeroPoint equal to 0.
4313      * * 11:The cell bias.
4314      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4315      *      [outputSize] specifying the bias for the fully-connected layer
4316      *      inside the LSTM cell. Bias is quantized with scale being a product
4317      *      of input and weights scales and zeroPoint equal to 0.
4318      * * 12:The output gate bias.
4319      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4320      *      [outputSize] specifying the bias for the fully-connected layer
4321      *      inside the LSTM cell. Bias is quantized with scale being a product
4322      *      of input and weights scales and zeroPoint equal to 0.
4323      * * 13: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
4324      *       and shape [numBatches, outputSize] specifying the cell state from the
4325      *       previous time step of the LSTM cell. It is quantized using a
4326      *       quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
4327      *       32768, zeroPoint = 0).
4328      * * 14: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4329      *       and shape [numBathes, outputSize] specifying the output of the LSTM
4330      *       cell from previous time-step. Tensor is quantized with a fixed
4331      *       quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
4332      *       128).
4333      *
4334      *
4335      * Outputs:
4336      * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
4337      *      and shape [numBatches, outputSize] which contains a cell state from
4338      *      the current time step. Tensor is quantized using a quantization
4339      *      range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
4340      *      0).
4341      * * 1: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4342      *      and shape [numBathes, outputSize] which contains the output value.
4343      *      Tensor is quantized with a fixed quantization range of [-1, 127/128]
4344      *      (scale = 1/128, zeroPoint = 128).
4345      */
4346     ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73,
4347 
4348     /**
4349      * Draws samples from a multinomial distribution.
4350      *
4351      * Supported tensor {@link OperandCode}:
4352      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4353      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4354      *
4355      * Inputs:
4356      * * 0: A 2-D tensor with shape [batches, classes], specifying the
4357      *      unnormalized log-probabilities for all classes.
4358      * * 1: A scalar {@link ANEURALNETWORKS_INT32}, specifying the number of
4359      *      independent samples to draw for each row slice.
4360      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [2],
4361      *      specifying seeds used to initialize the random distribution. If both
4362      *      provided seeds are 0, both will be randomly generated.
4363      * Outputs:
4364      * * 0: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape
4365      *      [batches, samples], containing the drawn samples.
4366      *
4367      * Available since API level 29.
4368      */
4369     ANEURALNETWORKS_RANDOM_MULTINOMIAL = 74,
4370 
4371     /**
4372      * Reduces a tensor by computing the "logical and" of elements along given
4373      * dimensions.
4374      *
4375      * If keep_dims is true, the reduced dimensions are
4376      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4377      * 1 for each entry in dimensions.
4378      *
4379      * Supported tensor {@link OperandCode}:
4380      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4381      *
4382      * Supported tensor rank: up to 4
4383      *
4384      * Inputs:
4385      * * 0: An n-D tensor.
4386      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4387      *      to reduce. Dimension values must be in the range [-n, n).
4388      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4389      *      retains reduced dimensions with length 1.
4390      *
4391      * Outputs:
4392      * * 0: A tensor of the same {@link OperandCode} as input0.
4393      *      If all dimensions are reduced and keep_dims is false, the output
4394      *      shape is [1].
4395      *
4396      * Available since API level 29.
4397      */
4398     ANEURALNETWORKS_REDUCE_ALL = 75,
4399 
4400     /**
4401      * Reduces a tensor by computing the "logical or" of elements along given
4402      * dimensions.
4403      *
4404      * If keep_dims is true, the reduced dimensions are
4405      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4406      * 1 for each entry in dimensions.
4407      *
4408      * Supported tensor {@link OperandCode}:
4409      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4410      *
4411      * Supported tensor rank: up to 4
4412      *
4413      * Inputs:
4414      * * 0: An n-D tensor.
4415      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4416      *      to reduce. Dimension values must be in the range [-n, n).
4417      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4418      *      retains reduced dimensions with length 1.
4419      *
4420      * Outputs:
4421      * * 0: A tensor of the same {@link OperandCode} as input0.
4422      *      If all dimensions are reduced and keep_dims is false, the output
4423      *      shape is [1].
4424      *
4425      * Available since API level 29.
4426      */
4427     ANEURALNETWORKS_REDUCE_ANY = 76,
4428 
4429     /**
4430      * Reduces a tensor by computing the maximum of elements along given
4431      * dimensions.
4432      *
4433      * If keep_dims is true, the reduced dimensions are
4434      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4435      * 1 for each entry in dimensions.
4436      *
4437      * Supported tensor {@link OperandCode}:
4438      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4439      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4440      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4441      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4442      *
4443      * Supported tensor rank: up to 4
4444      *
4445      * Inputs:
4446      * * 0: An n-D tensor.
4447      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4448      *      to reduce. Dimension values must be in the range [-n, n).
4449      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4450      *      retains reduced dimensions with length 1.
4451      *
4452      * Outputs:
4453      * * 0: A tensor of the same {@link OperandCode} as input0.
4454      *      If all dimensions are reduced and keep_dims is false, the output
4455      *      shape is [1].
4456      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4457      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4458      *      the scale and zeroPoint must be the same as input0.
4459      *
4460      * Available since API level 29.
4461      */
4462     ANEURALNETWORKS_REDUCE_MAX = 77,
4463 
4464     /**
4465      * Reduces a tensor by computing the minimum of elements along given
4466      * dimensions.
4467      *
4468      * If keep_dims is true, the reduced dimensions are
4469      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4470      * 1 for each entry in dimensions.
4471      *
4472      * Supported tensor {@link OperandCode}:
4473      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4474      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4475      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4476      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4477      *
4478      * Supported tensor rank: up to 4
4479      *
4480      * Inputs:
4481      * * 0: An n-D tensor.
4482      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4483      *      to reduce. Dimension values must be in the range [-n, n).
4484      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4485      *      retains reduced dimensions with length 1.
4486      *
4487      * Outputs:
4488      * * 0: A tensor of the same {@link OperandCode} as input0.
4489      *      If all dimensions are reduced and keep_dims is false, the output
4490      *      shape is [1].
4491      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4492      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4493      *      the scale and zeroPoint must be the same as input0.
4494      *
4495      * Available since API level 29.
4496      */
4497     ANEURALNETWORKS_REDUCE_MIN = 78,
4498 
4499     /**
4500      * Reduces a tensor by multiplying elements along given dimensions.
4501      *
4502      * If keep_dims is true, the reduced dimensions are
4503      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4504      * 1 for each entry in dimensions.
4505      *
4506      * Supported tensor {@link OperandCode}:
4507      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4508      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4509      *
4510      * Supported tensor rank: up to 4
4511      *
4512      * Inputs:
4513      * * 0: An n-D tensor.
4514      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4515      *      to reduce. Dimension values must be in the range [-n, n).
4516      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4517      *      retains reduced dimensions with length 1.
4518      *
4519      * Outputs:
4520      * * 0: A tensor of the same {@link OperandCode} as input0.
4521      *      If all dimensions are reduced and keep_dims is false, the output
4522      *      shape is [1].
4523      *
4524      * Available since API level 29.
4525      */
4526     ANEURALNETWORKS_REDUCE_PROD = 79,
4527 
4528     /**
4529      * Reduces a tensor by summing elements along given dimensions.
4530      *
4531      * If keep_dims is true, the reduced dimensions are
4532      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4533      * 1 for each entry in dimensions.
4534      *
4535      * Supported tensor {@link OperandCode}:
4536      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4537      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4538      *
4539      * Supported tensor rank: up to 4
4540      *
4541      * Inputs:
4542      * * 0: An n-D tensor.
4543      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4544      *      to reduce. Dimension values must be in the range [-n, n).
4545      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4546      *      retains reduced dimensions with length 1.
4547      *
4548      * Outputs:
4549      * * 0: A tensor of the same {@link OperandCode} as input0.
4550      *      If all dimensions are reduced and keep_dims is false, the output
4551      *      shape is [1].
4552      *
4553      * Available since API level 29.
4554      */
4555     ANEURALNETWORKS_REDUCE_SUM = 80,
4556 
4557     /**
4558      * Select and scale the feature map of each region of interest to a unified
4559      * output size by average pooling sampling points from bilinear interpolation.
4560      *
4561      * The region of interest is represented by its upper-left corner coordinate
4562      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4563      * A spatial scaling factor is applied to map into feature map coordinate.
4564      * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4565      *
4566      * No rounding is applied in this operation. The sampling points are unified
4567      * distributed in the pooling bin and their values are calculated by bilinear
4568      * interpolation.
4569      *
4570      * Supported tensor {@link OperandCode}:
4571      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4572      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4573      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4574      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4575      *
4576      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4577      * With the default data layout NHWC, the data is stored in the order of:
4578      * [batch, height, width, channels]. Alternatively, the data layout could
4579      * be NCHW, the data storage order of: [batch, channels, height, width].
4580      *
4581      * Inputs:
4582      * * 0: A 4-D tensor, specifying the feature map.
4583      * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4584      *      the regions of interest, each line with format [x1, y1, x2, y2].
4585      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
4586      *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
4587      *      with zeroPoint of 0 and scale of 0.125. Zero num_rois is
4588      *      supported for this tensor.
4589      * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
4590      *      [num_rois], specifying the batch index of each box. Boxes with
4591      *      the same batch index are grouped together. Zero num_rois is
4592      *      supported for this tensor.
4593      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4594      *      height of the output tensor.
4595      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4596      *      width of the output tensor.
4597      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4598      *      from the height of original image to the height of feature map.
4599      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4600      *      from the width of original image to the width of feature map.
4601      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4602      *      sampling points in height dimension used to compute the output.
4603      *      Set to 0 for adaptive value of ceil(roi_height/out_height).
4604      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4605      *      sampling points in width dimension used to compute the output.
4606      *      Set to 0 for adaptive value of ceil(roi_width/out_width).
4607      * * 9: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
4608      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4609      *
4610      * Outputs:
4611      * * 0: A tensor of the same {@link OperandCode} as input0. The output
4612      *      shape is [num_rois, out_height, out_width, depth].
4613      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4614      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4615      *      the scale and zeroPoint can be different from the input0 scale and zeroPoint.
4616      *
4617      * Available since API level 29.
4618      */
4619     ANEURALNETWORKS_ROI_ALIGN = 81,
4620 
4621     /**
4622      * Select and scale the feature map of each region of interest to a unified
4623      * output size by max-pooling.
4624      *
4625      * The region of interest is represented by its upper-left corner coordinate
4626      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4627      * A spatial scaling factor is applied to map into feature map coordinate.
4628      * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4629      *
4630      * Rounding is applied in this operation to ensure integer boundary for
4631      * regions of interest and pooling bins.
4632      *
4633      * Supported tensor {@link OperandCode}:
4634      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4635      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4636      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4637      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4638      *
4639      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4640      * With the default data layout NHWC, the data is stored in the order of:
4641      * [batch, height, width, channels]. Alternatively, the data layout could
4642      * be NCHW, the data storage order of: [batch, channels, height, width].
4643      *
4644      * Inputs:
4645      * * 0: A 4-D tensor, specifying the feature map.
4646      * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4647      *      the regions of interest, each line with format [x1, y1, x2, y2].
4648      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4649      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4650      *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
4651      *      with zeroPoint of 0 and scale of 0.125.
4652      * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
4653      *      [num_rois], specifying the batch index of each box. Boxes with
4654      *      the same batch index are grouped together.
4655      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4656      *      height of the output tensor.
4657      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4658      *      width of the output tensor.
4659      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4660      *      from the height of original image to the height of feature map.
4661      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4662      *      from the width of original image to the width of feature map.
4663      * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
4664      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4665      *
4666      * Outputs:
4667      * * 0: A tensor of the same {@link OperandCode} as input0. The output
4668      *      shape is [num_rois, out_height, out_width, depth].
4669      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4670      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4671      *      the scale and zeroPoint must be the same as input0.
4672      *
4673      * Available since API level 29.
4674      */
4675     ANEURALNETWORKS_ROI_POOLING = 82,
4676 
4677     /**
4678      * Computes reciprocal of square root of x element-wise.
4679      *
4680      * Supported tensor {@link OperandCode}:
4681      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4682      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4683      *
4684      * Supported tensor rank: from 1.
4685      *
4686      * Inputs:
4687      * * 0: A tensor.
4688      *
4689      * Outputs:
4690      * * 0: The output tensor of same shape as input0.
4691      *
4692      * Available since API level 29.
4693      */
4694     ANEURALNETWORKS_RSQRT = 83,
4695 
4696     /**
4697      * Using a tensor of booleans c and input tensors x and y select values
4698      * elementwise from both input tensors:
4699      *
4700      * O[i] = C[i] ? x[i] : y[i].
4701      *
4702      * Supported tensor {@link OperandCode}:
4703      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4704      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4705      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4706      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4707      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4708      *
4709      * Supported tensor rank: from 1
4710      *
4711      * Inputs:
4712      * * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_BOOL8} acting as a
4713      *      mask that chooses, based on the value at each element, whether the
4714      *      corresponding element in the output should be taken from input1 (if
4715      *      true) or input2 (if false).
4716      * * 1: An input tensor of the same shape as input0.
4717      * * 2: An input tensor of the same shape and type as input1.
4718      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4719      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4720      *      the scales and zeroPoint can be different from input1 scale and zeroPoint.
4721      *
4722      * Outputs:
4723      * * 0: A tensor of the same type and shape as input1 and input2.
4724      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4725      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4726      *
4727      * Available since API level 29.
4728      */
4729     ANEURALNETWORKS_SELECT = 84,
4730 
4731     /**
4732      * Computes sin of x element-wise.
4733      *
4734      * Supported tensor {@link OperandCode}:
4735      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4736      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4737      *
4738      * Supported tensor rank: from 1.
4739      *
4740      * Inputs:
4741      * * 0: A tensor.
4742      *
4743      * Outputs:
4744      * * 0: The output tensor of same shape as input0.
4745      *
4746      * Available since API level 29.
4747      */
4748     ANEURALNETWORKS_SIN = 85,
4749 
4750     /**
4751      * Extracts a slice of specified size from the input tensor starting at a
4752      * specified location.
4753      *
4754      * The starting location is specified as a 1-D tensor containing offsets
4755      * for each dimension. The size is specified as a 1-D tensor containing
4756      * either size of a slice along corresponding dimension or -1. In the latter
4757      * case, all the remaining elements in dimension are included in the slice.
4758      *
4759      * A sum of begin offset and a size of a slice must not exceed size of a
4760      * corresponding dimension.
4761      *
4762      * Supported tensor {@link OperandCode}:
4763      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4764      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4765      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4766      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4767      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4768      *
4769      * Supported tensor rank: from 1
4770      *
4771      * Inputs:
4772      * * 0: An n-D tensor to take slice from, may be zero-sized.
4773      * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
4774      *      the beginning indices of the slice in each dimension.
4775      * * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
4776      *      the size of the slice in each dimension.
4777      *
4778      * Outputs:
4779      * * 0: An n-D tensor of the same type as the input containing the slice.
4780      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4781      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4782      *      its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
4783      *
4784      * Available since API level 29.
4785      */
4786     ANEURALNETWORKS_SLICE = 86,
4787 
4788     /**
4789      * Splits a tensor along a given axis into num_splits subtensors.
4790      *
4791      * Supported tensor {@link OperandCode}:
4792      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4793      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4794      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4795      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4796      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4797      *
4798      * Supported tensor rank: from 1
4799      *
4800      * Inputs:
4801      * * 0: An n-D tensor to split.
4802      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis along
4803      *      which to split.
4804      * * 2: An {@link ANEURALNETWORKS_INT32} scalar indicating the number of
4805      *      splits along given axis. Must evenly divide axis size.
4806      *
4807      * Outputs:
4808      * * 0 ~ (num_splits - 1): Resulting subtensors.
4809      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4810      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4811      *      the scale and zeroPoint must be the same as input0.
4812      *
4813      * Available since API level 29.
4814      */
4815     ANEURALNETWORKS_SPLIT = 87,
4816 
4817     /**
4818      * Computes square root of x element-wise.
4819      *
4820      * Supported tensor {@link OperandCode}:
4821      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4822      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4823      *
4824      * Supported tensor rank: from 1.
4825      *
4826      * Inputs:
4827      * * 0: A tensor.
4828      *
4829      * Outputs:
4830      * * 0: The output tensor of same shape as input0.
4831      *
4832      * Available since API level 29.
4833      */
4834     ANEURALNETWORKS_SQRT = 88,
4835 
4836     /**
4837      * Constructs a tensor by tiling a given tensor.
4838      *
4839      * This operation creates a new tensor by replicating `input` `multiples`
4840      * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
4841      * elements, and the values of `input` are replicated `multiples[i]` times
4842      * along the i-th dimension.
4843      * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
4844      *
4845      * Supported tensor {@link OperandCode}:
4846      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4847      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4848      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4849      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4850      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4851      *
4852      * Supported tensor rank: from 1
4853      *
4854      * Inputs:
4855      * * 0: input, an n-D tensor specifying the input.
4856      * * 1: multiples, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.
4857      *      The length of multiples must be n.
4858      *
4859      * Outputs:
4860      * * 0: A tiled tensor of the same {@link OperandCode} and rank as `input`.
4861      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4862      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4863      *      the scale and zeroPoint must be the same as input0.
4864      *
4865      * Available since API level 29.
4866      */
4867     ANEURALNETWORKS_TILE = 89,
4868 
4869     /**
4870      * Finds values and indices of the k largest entries for the last dimension.
4871      *
4872      * Resulting values in each dimensions are sorted in descending order. If
4873      * two values are equal, the one with larger index appears first.
4874      *
4875      * Supported tensor {@link OperandCode}:
4876      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4877      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4878      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4879      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4880      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
4881      *
4882      * Supported tensor rank: from 1
4883      *
4884      * Inputs:
4885      * * 0: input, an n-D tensor specifying the input.
4886      * * 1: k, an {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4887      *      top elements to look for along the last dimension.
4888      *
4889      * Outputs:
4890      * * 0: An n-D tensor of the same type as the input, containing the k
4891      *      largest elements along each last dimensional slice.
4892      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4893      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4894      *      the scale and zeroPoint must be the same as input0.
4895      * * 1: An n-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32}
4896      *      containing the indices of values within the last dimension of input.
4897      *
4898      * Available since API level 29.
4899      */
4900     ANEURALNETWORKS_TOPK_V2 = 90,
4901 
4902     /**
4903      * Performs the transpose of 2-D convolution operation.
4904      *
4905      * This operation is sometimes called "deconvolution" after Deconvolutional
4906      * Networks, but is actually the transpose (gradient) of
4907      * {@link ANEURALNETWORKS_CONV_2D} rather than an actual deconvolution.
4908      *
4909      * The output dimensions are functions of the filter dimensions, stride, and
4910      * padding.
4911      *
4912      * Supported tensor {@link OperandCode} configurations:
4913      * * 16 bit floating point:
4914      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
4915      *
4916      * * 32 bit floating point:
4917      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
4918      *
4919      * * Quantized:
4920      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
4921      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
4922      * * * input.scale * filter.scale).
4923      *
4924      * * Quantized with symmetric per channel quantization for the filter:
4925      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
4926      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4927      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
4928      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4929      *
4930      * Available since API level 30:
4931      * * Quantized signed (since API level 30):
4932      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
4933      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
4934      * * * input.scale * filter.scale).
4935      *
4936      * * Quantized signed with filter symmetric per channel quantization (since API level 30):
4937      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
4938      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4939      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
4940      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4941      *
4942      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4943      * With the default data layout NHWC, the data is stored in the order of:
4944      * [batch, height, width, channels]. Alternatively, the data layout could
4945      * be NCHW, the data storage order of: [batch, channels, height, width].
4946      *
4947      * Both explicit padding and implicit padding are supported.
4948      *
4949      * Inputs (explicit padding):
4950      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4951      *      specifying the input.
4952      *      Since API level 29, zero batches is supported for this tensor.
4953      * * 1: A 4-D tensor, of shape
4954      *      [depth_out, filter_height, filter_width, depth_in], specifying the
4955      *      filter. For tensor of type
4956      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
4957      *      dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
4958      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
4959      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
4960      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the
4961      *      same type.
4962      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4963      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
4964      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
4965      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
4966      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
4967      *      the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
4968      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
4969      *      bias_scale[i] = input_scale * filter_scale[i].
4970      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4971      *      the left, in the ‘width’ dimension.
4972      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4973      *      the right, in the ‘width’ dimension.
4974      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4975      *      the top, in the ‘height’ dimension.
4976      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4977      *      the bottom, in the ‘height’ dimension.
4978      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
4979      *      walking through input in the ‘width’ dimension.
4980      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
4981      *      walking through input in the ‘height’ dimension.
4982      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
4983      *      {@link FuseCode} values. Specifies the activation to
4984      *      invoke on the result.
4985      * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
4986      *       NCHW data layout for input0 and output0. Set to false for NHWC.
4987      *
4988      * Inputs (implicit padding):
4989      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4990      *      specifying the input.
4991      *      Since API level 29, zero batches is supported for this tensor.
4992      * * 1: A 4-D tensor, of shape
4993      *      [depth_out, filter_height, filter_width, depth_in], specifying the
4994      *      filter. For tensor of type
4995      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
4996      *      dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
4997      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
4998      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
4999      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the
5000      *      same type.
5001      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5002      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
5003      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
5004      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
5005      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
5006      *      the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
5007      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
5008      *      bias_scale[i] = input_scale * filter_scale[i].
5009      * * 3: An {@link ANEURALNETWORKS_TENSOR_INT32} tensor, specifying the output
5010      *      tensor shape.
5011      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
5012      *      padding scheme, has to be one of the
5013      *      {@link PaddingCode} values.
5014      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
5015      *      walking through input in the ‘width’ dimension.
5016      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
5017      *      walking through input in the ‘height’ dimension.
5018      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
5019      *      {@link FuseCode} values. Specifies the activation to
5020      *      invoke on the result.
5021      * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
5022      *      NCHW data layout for input0 and output0. Set to false for NHWC.
5023      *
5024      * Outputs:
5025      * * 0: The output 4-D tensor, of shape
5026      *      [batches, out_height, out_width, depth_out].
5027      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
5028      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5029      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
5030      *
5031      * Available since API level 29.
5032      */
5033     ANEURALNETWORKS_TRANSPOSE_CONV_2D = 91,
5034 
5035     /**
5036      * A recurrent neural network specified by an LSTM cell.
5037      *
5038      * Performs (fully) dynamic unrolling of input.
5039      *
5040      * This Op unrolls the input along the time dimension, and implements the
5041      * following operation for each element in the sequence
5042      * s = 1...sequence_length:
5043      *   outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
5044      *
5045      * Where LSTMOp is the LSTM op as in {@link ANEURALNETWORKS_LSTM},
5046      * the "projection" is an optional projection layer from state and output
5047      * and the “activation” is the function passed as the
5048      * “fused_activation_function” argument (if not “NONE”).
5049      *
5050      * Supported tensor {@link OperandCode}:
5051      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5052      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5053      *
5054      * Supported tensor rank: 3, either time-major or batch-major.
5055      *
5056      * All input and output tensors must be of the same type.
5057      *
5058      * Inputs:
5059      * * 0: The input (\f$x_t\f$).
5060      *      A 3-D tensor of shape:
5061      *        If time-major: [max_time, batch_size, input_size]
5062      *        If batch-major: [batch_size, max_time, input_size]
5063      *      where “max_time” is the number of timesteps (sequence length),
5064      *      “batch_size” corresponds to the batching dimension, and
5065      *      “input_size” is the size of the input.
5066      * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
5067      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
5068      *      corresponds to the number of cell units.
5069      * * 2: The input-to-forget weights (\f$W_{xf}\f$).
5070      *      A 2-D tensor of shape [num_units, input_size].
5071      * * 3: The input-to-cell weights (\f$W_{xc}\f$).
5072      *      A 2-D tensor of shape [num_units, input_size].
5073      * * 4: The input-to-output weights (\f$W_{xo}\f$).
5074      *      A 2-D tensor of shape [num_units, input_size].
5075      * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
5076      *      A 2-D tensor of shape [num_units, output_size], where “output_size”
5077      *      corresponds to either the number of cell units (i.e., “num_units”),
5078      *      or the second dimension of the “projection_weights”, if defined.
5079      * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
5080      *      A 2-D tensor of shape [num_units, output_size].
5081      * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
5082      *      A 2-D tensor of shape [num_units, output_size].
5083      * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
5084      *      A 2-D tensor of shape [num_units, output_size].
5085      * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
5086      *      A 1-D tensor of shape [num_units].
5087      * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
5088      *      A 1-D tensor of shape [num_units].
5089      * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
5090      *      A 1-D tensor of shape [num_units].
5091      * * 12:The input gate bias (\f$b_i\f$). Optional.
5092      *      A 1-D tensor of shape [num_units].
5093      * * 13:The forget gate bias (\f$b_f\f$).
5094      *      A 1-D tensor of shape [num_units].
5095      * * 14:The cell bias (\f$b_c\f$).
5096      *      A 1-D tensor of shape [num_units].
5097      * * 15:The output gate bias (\f$b_o\f$).
5098      *      A 1-D tensor of shape [num_units].
5099      * * 16:The projection weights (\f$W_{proj}\f$). Optional.
5100      *      A 2-D tensor of shape [output_size, num_units].
5101      * * 17:The projection bias (\f$b_{proj}\f$). Optional.
5102      *      A 1-D tensor of shape [output_size].
5103      * * 18:The output state (in) (\f$h_{t-1}\f$).
5104      *      A 2-D tensor of shape [batch_size, output_size].
5105      * * 19:The cell state (in) (\f$C_{t-1}\f$).
5106      *      A 2-D tensor of shape [batch_size, num_units].
5107      * * 20:The activation function (\f$g\f$).
5108      *      A value indicating the activation function:
5109      *      <ul>
5110      *      <li>0: None;
5111      *      <li>1: Relu;
5112      *      <li>3: Relu6;
5113      *      <li>4: Tanh;
5114      *      <li>6: Sigmoid.
5115      *      </ul>
5116      * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
5117      *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
5118      *      then clipping is disabled.
5119      * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
5120      *      projection layer, such that values are bound within
5121      *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
5122      * * 23:Time-major if true, batch-major if false.
5123      * * 24:The input layer normalization weights. Optional.
5124      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5125      *      to activation at input gate.
5126      * * 25:The forget layer normalization weights. Optional.
5127      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5128      *      to activation at forget gate.
5129      * * 26:The cell layer normalization weights. Optional.
5130      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5131      *      to activation at cell gate.
5132      * * 27:The output layer normalization weights. Optional.
5133      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5134      *      to activation at output gate.
5135      *
5136      * Outputs:
5137      * * 0: The output (\f$o_t\f$).
5138      *      A 3-D tensor of shape:
5139      *        If time-major: [max_time, batch_size, output_size]
5140      *        If batch-major: [batch_size, max_time, output_size]
5141      * * 1: A tensor of shape [batch_size, output_size] containing a hidden
5142      *      state from the last time step in the sequence. This output is
5143      *      optional and can be omitted. If this output is present then
5144      *      output #2 must be present as well.
5145      *      Available since API level 30.
5146      * * 2: A tensor of shape [batch_size, cell_size] containing a cell state
5147      *      from the last time step in the sequence. This output is optional
5148      *      and can be omitted.
5149      *      Available since API level 30.
5150      *
5151      * Available since API level 29.
5152      *
5153      * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI
5154      * does not maintain internal states. This operator does not support the usage pattern in which
5155      * multiple cells are chained and state tensors are propagated.
5156      */
5157     ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
5158 
5159     /**
5160      * A recurrent neural network layer that applies a basic RNN cell to a
5161      * sequence of inputs.
5162      *
5163      * This layer unrolls the input along the sequence dimension, and implements
5164      * the following operation
5165      * for each element in the sequence s = 1...sequence_length:
5166      *   outputs[s] = state = activation(inputs[s] * input_weights’ + state *
5167      *   recurrent_weights’ + bias)
5168      *
5169      * Where:
5170      * * “input_weights” is a weight matrix that multiplies the inputs;
5171      * * “recurrent_weights” is a weight matrix that multiplies the current
5172      *    “state” which itself is the output from the previous time step
5173      *    computation;
5174      * * “bias” is a bias vector (added to each output vector in the batch);
5175      * * “activation” is the function passed as the “fused_activation_function”
5176      *   argument (if not “NONE”).
5177      *
5178      * Supported tensor {@link OperandCode}:
5179      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5180      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5181      *
5182      * The input tensors must all be the same type.
5183      *
5184      * Inputs:
5185      * * 0: input.
5186      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
5187      *      it is set to 1, then the input has a shape [maxTime, batchSize,
5188      *      inputSize], otherwise the input has a shape [batchSize, maxTime,
5189      *      inputSize].
5190      * * 1: weights.
5191      *      A 2-D tensor of shape [numUnits, inputSize].
5192      * * 2: recurrent_weights.
5193      *      A 2-D tensor of shape [numUnits, numUnits].
5194      * * 3: bias.
5195      *      A 1-D tensor of shape [numUnits].
5196      * * 4: hidden state
5197      *      A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
5198      *      state input for the first time step of the computation.
5199      * * 5: fusedActivationFunction.
5200      *      A {@link FuseCode} value indicating the activation function. If
5201      *      “NONE” is specified then it results in a linear activation.
5202      * * 6: timeMajor
5203      *      An {@link ANEURALNETWORKS_INT32} scalar specifying the shape format
5204      *      of input and output tensors. Must be set to either 0 or 1.
5205      * Outputs:
5206      * * 0: output.
5207      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
5208      *      it is set to 1, then the output has a shape [maxTime, batchSize,
5209      *      numUnits], otherwise the output has a shape [batchSize, maxTime,
5210      *      numUnits].
5211      * * 1: A tensor of shape [batchSize, numUnits] containing hidden state
5212      *      from the last time step in the sequence. This output is optional
5213      *      and can be omitted.
5214      *      Available since API level 30.
5215      *
5216      * Available since API level 29.
5217      *
5218      * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI
5219      * does not maintain internal states. This operator does not support the usage pattern in which
5220      * multiple cells are chained and state tensors are propagated.
5221      */
5222     ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93,
5223 
5224     /**
5225      * Resizes images to given size using the nearest neighbor interpretation.
5226      *
5227      * Resized images must be distorted if their output aspect ratio is not the
5228      * same as input aspect ratio. The corner pixels of output may not be the
5229      * same as corner pixels of input.
5230      *
5231      * Supported tensor {@link OperandCode}:
5232      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5233      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5234      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5235      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30)
5236      *
5237      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
5238      * With the default data layout NHWC, the data is stored in the order of:
5239      * [batch, height, width, channels]. Alternatively, the data layout could
5240      * be NCHW, the data storage order of: [batch, channels, height, width].
5241      *
5242      * Both resizing by shape and resizing by scale are supported.
5243      *
5244      * Inputs (resizing by shape):
5245      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
5246      *      the input. Zero batches is supported for this tensor.
5247      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
5248      *      width of the output tensor.
5249      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
5250      *      height of the output tensor.
5251      * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.
5252      *      Set to true to specify NCHW data layout for input0 and output0.
5253      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
5254      *      scalar, default to false.  If True, the centers of the 4 corner
5255      *      pixels of the input and output tensors are aligned, preserving the
5256      *      values at the corner pixels.
5257      *      Available since API level 30.
5258      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
5259      *      scalar, default to false. If True, the pixel centers are assumed to
5260      *      be at (0.5, 0.5). This is the default behavior of image.resize in
5261      *      TF 2.0. If this parameter is True, then align_corners parameter
5262      *      must be False.
5263      *      Available since API level 30.
5264      *
5265      * Inputs (resizing by scale):
5266      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
5267      *      the input. Zero batches is supported for this tensor.
5268      * * 1: A scalar, specifying width_scale, the scaling factor of the width
5269      *      dimension from the input tensor to the output tensor. The output
5270      *      width is calculated as new_width = floor(width * width_scale).
5271      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
5272      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
5273      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
5274      * * 2: A scalar, specifying height_scale, the scaling factor of the height
5275      *      dimension from the input tensor to the output tensor. The output
5276      *      height is calculated as new_height = floor(height * height_scale).
5277      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
5278      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
5279      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
5280      * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.
5281      *      Set to true to specify NCHW data layout for input0 and output0.
5282      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
5283      *      scalar, default to false.  If True, the centers of the 4 corner
5284      *      pixels of the input and output tensors are aligned, preserving the
5285      *      values at the corner pixels.
5286      *      Available since API level 30.
5287      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
5288      *      scalar, default to false. If True, the pixel centers are assumed to
5289      *      be at (0.5, 0.5). This is the default behavior of image.resize in
5290      *      TF 2.0. If this parameter is True, then align_corners parameter
5291      *      must be False.
5292      *      Available since API level 30.
5293      *
5294      * Outputs:
5295      * * 0: The output 4-D tensor, of shape
5296      *      [batches, new_height, new_width, depth].
5297      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
5298      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5299      *      the scale and zeroPoint must be the same as input0.
5300      *
5301      * Available since API level 29.
5302      */
5303     ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94,
5304 
5305     // Operations below are available since API level 30.
5306 
5307     /**
5308      * Quantized version of {@link ANEURALNETWORKS_LSTM}.
5309      *
5310      * The input and the output use asymmetric quantized types, while the rest
5311      * use symmetric ones.
5312      *
5313      * Inputs:
5314      * * 0: The input to the LSTM cell.
5315      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5316      *      Shape: [batchSize, inputSize]
5317      * * 1: The input-to-input weights. Optional.
5318      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5319      *      Shape: [numUnits, inputSize]
5320      * * 2: The input-to-forget weights.
5321      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5322      *      Shape: [numUnits, inputSize]
5323      * * 3: The input-to-cell weights.
5324      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5325      *      Shape: [numUnits, inputSize]
5326      * * 4: The input-to-output weights.
5327      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5328      *      Shape: [numUnits, inputSize]
5329      * * 5: The recurrent-to-input weights. Optional.
5330      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5331      *      Shape: [numUnits, outputSize]
5332      * * 6: The recurrent-to-forget weights.
5333      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5334      *      Shape: [numUnits, outputSize]
5335      * * 7: The recurrent-to-cell weights.
5336      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5337      *      Shape: [numUnits, outputSize]
5338      * * 8: The recurrent-to-output weights.
5339      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5340      *      Shape: [numUnits, outputSize]
5341      * * 9: The cell-to-input weights (for peephole). Optional.
5342      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5343      *      Shape: [numUnits]
5344      * * 10: The cell-to-forget weights (for peephole). Optional.
5345      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5346      *       Shape: [numUnits]
5347      * * 11: The cell-to-output weights (for peephole). Optional.
5348      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5349      *       Shape: [numUnits]
5350      * * 12: The input gate bias. Quantized with scale being the
5351      *       product of input and weights scales and zeroPoint equal to 0.
5352      *       Optional.
5353      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5354      *       Shape: [numUnits]
5355      * * 13: The forget gate bias. Quantized with scale being the
5356      *       product of input and weights scales and zeroPoint equal to 0.
5357      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5358      *       Shape: [numUnits]
5359      * * 14: The cell bias. Quantized with scale being the
5360      *       product of input and weights scales and zeroPoint equal to 0.
5361      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5362      *       Shape: [numUnits]
5363      * * 15: The output gate bias. Quantized with scale being the
5364      *       product of input and weights scales and zeroPoint equal to 0.
5365      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5366      *       Shape: [numUnits]
5367      * * 16: The projection weights. Optional.
5368      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5369      *       Shape: [outputSize, numUnits]
5370      * * 17: The projection bias. Quantized with scale being the
5371      *       product of input and weights scales and zeroPoint equal to 0.
5372      *       Optional.
5373      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5374      *       Shape: [outputSize]
5375      * * 18: The output from the previous time step.
5376      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5377      *       Shape: [batchSize, outputSize]
5378      * * 19: The cell state from the previous time step.
5379      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5380      *       Shape: [batchSize, numUnits]
5381      * * 20: The input layer normalization weights. Used to rescale
5382      *       normalized inputs to activation at input gate. Optional.
5383      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5384      *       Shape: [numUnits]
5385      * * 21: The forget layer normalization weights. Used to
5386      *       rescale normalized inputs to activation at forget gate. Optional.
5387      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5388      *       Shape: [numUnits]
5389      * * 22: The cell layer normalization weights. Used to rescale
5390      *       normalized inputs to activation at cell gate. Optional.
5391      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5392      *       Shape: [numUnits]
5393      * * 23: The output layer normalization weights. Used to
5394      *       rescale normalized inputs to activation at output gate. Optional.
5395      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5396      *       Shape: [numUnits]
5397      * * 24: The cell clip. If provided the cell state is clipped
5398      *       by this value prior to the cell output activation. Optional.
5399      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5400      * * 25: The projection clip. If provided and projection is enabled,
5401      *       this is used for clipping the projected values. Optional.
5402      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5403      * * 26: The scale of the intermediate result of matmul,
5404      *       i.e. input to layer normalization, at input gate.
5405      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5406      * * 27: The scale of the intermediate result of matmul,
5407      *       i.e. input to layer normalization, at forget gate.
5408      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5409      * * 28: The scale of the intermediate result of matmul,
5410      *       i.e. input to layer normalization, at cell gate.
5411      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5412      * * 29: The scale of the intermediate result of matmul,
5413      *       i.e. input to layer normalization, at output gate.
5414      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5415      * * 30: The zero point of the hidden state, i.e. input to
5416      *       projection.
5417      *       Type: {@link ANEURALNETWORKS_INT32}.
5418      * * 31: The scale of the hidden state, i.e. input to
5419      *       projection.
5420      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5421      *
5422      * Outputs:
5423      * * 0: The output state (out).
5424      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5425      *      Shape: [batchSize, outputSize]
5426      * * 1: The cell state (out).
5427      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5428      *      Shape: [batchSize, numUnits]
5429      * * 2: The output. This is effectively the same as the current
5430      *      "output state (out)" value.
5431      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5432      *      Shape: [batchSize, outputSize]
5433      *
5434      * Available since API level 30.
5435      */
5436     ANEURALNETWORKS_QUANTIZED_LSTM = 95,
5437 
5438     /**
5439      * Executes one of the two referenced models as determined by a boolean
5440      * value.
5441      *
5442      * The inputs and outputs of the two referenced models must agree with the
5443      * signature of this operation. That is, if the operation has (3 + n) inputs
5444      * and m outputs, both models must have n inputs and m outputs with the same
5445      * types, ranks (if specified), dimensions (if specified), scales,
5446      * zeroPoints, and other operand parameters as the corresponding operation
5447      * inputs and outputs.
5448      *
5449      * Inputs:
5450      * * 0: A value of type {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1]
5451      *      that determines which of the two referenced models to execute.
5452      *      The operand must have fully specified dimensions.
5453      * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the model to be
5454      *      executed if the condition is true.
5455      * * 2: A {@link ANEURALNETWORKS_MODEL} reference to the model to be
5456      *      executed if the condition is false.
5457      * * 3 ~ (n + 2): Inputs to be passed to the model selected for execution.
5458      *
5459      * Outputs:
5460      * * 0 ~ (m - 1): Outputs produced by the selected model.
5461      *
5462      * Available since API level 30.
5463      */
5464     ANEURALNETWORKS_IF = 96,
5465 
5466     /**
5467      * Executes the body model until the condition model outputs false.
5468      *
5469      * The inputs to this operation are the condition model, the body model,
5470      * and operand values for the first iteration of the loop. The values are
5471      * implicitly split into three groups of input-output, state-only, and
5472      * input-only values, as described below.
5473      *
5474      * The outputs of this operation are the final values of input-output
5475      * operands.
5476      *
5477      * Both the condition and body model receive (m + k + n) inputs.
5478      * * The first m (m >= 1) inputs are input-output operands. For the first
5479      *   iteration, these are initialized from the corresponding inputs of the
5480      *   WHILE operation. In subsequent iterations, their values come from the
5481      *   corresponding outputs of the body model produced during the previous
5482      *   iteration.
5483      * * The next k (k >= 0) inputs are state-only operands. They are similar to
5484      *   the input-output operands, except that their values are no longer
5485      *   available after the loop terminates.
5486      * * The last n (n >= 0) inputs are input-only operands. Their values come
5487      *   from the corresponding inputs of the WHILE operation.
5488      *
5489      * The body model produces (m + k) outputs.
5490      * * The first m outputs are input-output operands. They become the outputs
5491      *   of the WHILE operation when a termination condition is reached.
5492      * * The last k outputs are state-only operands. Their values are no longer
5493      *   available after the loop terminates.
5494      *
5495      * The numbers m, k, and n are inferred by the runtime as follows:
5496      *     m = (WHILE operation output count)
5497      *     k = (body model output count) - m
5498      *     n = (body model input count) - m - k
5499      *
5500      * The pseudo-code below illustrates the flow of a WHILE operation with
5501      * inputs condition, body, initial_input_output, initial_state, input_only
5502      * (m = 1, k = 1, n = 1):
5503      *
5504      *     input_output = initial_input_output
5505      *     state = initial_state
5506      *     while condition(input_output, state, input_only):
5507      *         input_output, state = body(input_output, state, input_only)
5508      *     return input_output
5509      *
5510      * To prevent infinite loops, there is an implicit execution timeout
5511      * associated with each loop ("loop timeout duration"). See {@link
5512      * ANeuralNetworksExecution_setLoopTimeout}.
5513      *
5514      * Inputs:
5515      * * 0: A {@link ANEURALNETWORKS_MODEL} reference to the condition
5516      *      model. The model must have (m + k + n) inputs with
5517      *      the same types, ranks (if specified), dimensions (if specified),
5518      *      scales, zeroPoints, and other operand parameters as the
5519      *      corresponding inputs of the WHILE operation and exactly one output
5520      *      of {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1].
5521      *      The output operand must have fully specified dimensions.
5522      * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the body model.
5523      *      The model must have (m + k + n) inputs and (m + k) outputs with
5524      *      the same types, ranks (if specified), dimensions (if specified),
5525      *      scales, zeroPoints, and other operand parameters as the
5526      *      corresponding inputs and outputs of the WHILE operation.
5527      * * (m inputs): Initial values for input-output operands.
5528      * * (k inputs): Initial values for state-only operands.
5529      * * (n inputs): Values for input-only operands.
5530      *
5531      * Outputs:
5532      * * 0 ~ (m - 1): Outputs produced by the loop.
5533      *
5534      * Available since API level 30.
5535      */
5536     ANEURALNETWORKS_WHILE = 97,
5537 
5538     /**
5539      * Computes exponential linear activation on the input tensor element-wise.
5540      *
5541      * The output is calculated using the following formula:
5542      *
5543      *     ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1))
5544      *
5545      * Supported tensor {@link OperandCode}:
5546      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5547      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5548      *
5549      * Supported tensor rank: from 1.
5550      *
5551      * Inputs:
5552      * * 0: A tensor, specifying the input. May be zero-sized.
5553      * * 1: A scalar, specifying the alpha parameter.
5554      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16},
5555      *      the alpha value must be of {@link ANEURALNETWORKS_FLOAT16}.
5556      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
5557      *      the alpha value must be of {@link ANEURALNETWORKS_FLOAT32}.
5558      *
5559      * Outputs:
5560      * * 0: The output tensor of same shape and type as input0.
5561      *
5562      * Available since API level 30.
5563      */
5564     ANEURALNETWORKS_ELU = 98,
5565 
5566     /**
5567      * Computes hard-swish activation on the input tensor element-wise.
5568      *
5569      * Hard swish activation is introduced in
5570      * https://arxiv.org/pdf/1905.02244.pdf
5571      *
5572      * The output is calculated using the following formula:
5573      *
5574      *     h-swish(x) = x * max(0, min(6, (x + 3))) / 6
5575 
5576      * Supported tensor {@link OperandCode}:
5577      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5578      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5579      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5580      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5581      *
5582      * Supported tensor rank: from 1.
5583      *
5584      * Inputs:
5585      * * 0: A tensor, specifying the input. May be zero-sized.
5586      *
5587      * Outputs:
5588      * * 0: The output tensor of same shape and type as input0.
5589      *      Scale and zero point of this tensor may be different from the input
5590      *      tensor's parameters.
5591      *
5592      * Available since API level 30.
5593      */
5594     ANEURALNETWORKS_HARD_SWISH = 99,
5595 
5596     /**
5597      * Creates a tensor filled with a scalar value.
5598      *
5599      * Supported output tensor {@link OperandCode}:
5600      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5601      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5602      * * {@link ANEURALNETWORKS_TENSOR_INT32}
5603      *
5604      * Supported tensor rank: from 1.
5605      *
5606      * Inputs:
5607      * * 0: A 1-D tensor, specifying the desired output tensor shape.
5608      * * 1: A scalar, specifying the value to fill the output tensors with.
5609      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16},
5610      *      the scalar must be of {@link ANEURALNETWORKS_FLOAT16}.
5611      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
5612      *      the scalar must be of {@link ANEURALNETWORKS_FLOAT32}.
5613      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
5614      *      the scalar must be of {@link ANEURALNETWORKS_INT32}.
5615      *
5616      * Outputs:
5617      * * 0: The output tensor.
5618      *
5619      * Available since API level 30.
5620      */
5621     ANEURALNETWORKS_FILL = 100,
5622 
5623     /**
5624      * Returns the rank of a tensor.
5625      *
5626      * The rank of a tensor is the number of dimensions in it. Also known as
5627      * "order", "degree", "ndims".
5628      *
5629      * Supported tensor {@link OperandCode}:
5630      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5631      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5632      * * {@link ANEURALNETWORKS_TENSOR_INT32}
5633      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5634      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5635      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
5636      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
5637      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
5638      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5639      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5640      *
5641      * Supported tensor rank: from 1.
5642      *
5643      * Inputs:
5644      * * 0: The input tensor.
5645      *
5646      * Outputs:
5647      * * 0: A scalar of {@link ANEURALNETWORKS_INT32}, specifying the rank
5648      *      of the input tensor.
5649      *
5650      * Available since API level 30.
5651      */
5652     ANEURALNETWORKS_RANK = 101,
5653 } OperationCode;
5654 
5655 /**
5656  * Fused activation function types.
5657  *
5658  *
5659  * Available since API level 27.
5660  */
5661 typedef enum {
5662     /** NO fused activation function. */
5663     ANEURALNETWORKS_FUSED_NONE = 0,
5664     /** Fused ReLU activation function. */
5665     ANEURALNETWORKS_FUSED_RELU = 1,
5666     /** Fused ReLU1 activation function. */
5667     ANEURALNETWORKS_FUSED_RELU1 = 2,
5668     /** Fused ReLU6 activation function. */
5669     ANEURALNETWORKS_FUSED_RELU6 = 3,
5670 } FuseCode;
5671 
5672 /**
5673  * Implicit padding algorithms.
5674  *
5675  *
5676  * Available since API level 27.
5677  */
5678 typedef enum {
5679     /**
5680      * SAME padding.
5681      * Padding on both ends are the "same":
5682      *     padding_to_beginning =  total_padding / 2
5683      *     padding_to_end       = (total_padding + 1)/2.
5684      * i.e., for even number of padding, padding to both ends are exactly
5685      * the same; for odd number of padding, padding to the ending is bigger
5686      * than the padding to the beginning by 1.
5687      *
5688      * total_padding is a function of input, stride, dilation and filter size.
5689      * It could be computed as follows:
5690      *    out_size = (input + stride - 1) / stride
5691      *    effective_filter_size = (filter_size - 1) * dilation + 1
5692      *    needed_input = (out_size - 1) * stride + effective_filter_size
5693      *    total_padding = max(0, needed_input - input_size)
5694      *  The computation is the same for the horizontal and vertical directions.
5695      */
5696     ANEURALNETWORKS_PADDING_SAME = 1,
5697 
5698     /**
5699      * VALID padding.
5700      * No padding. When the input size is not evenly divisible by
5701      * the filter size, the input at the end that could not fill
5702      * the whole filter tile will simply be ignored.
5703      */
5704     ANEURALNETWORKS_PADDING_VALID = 2,
5705 } PaddingCode;
5706 
5707 /**
5708  * Execution preferences.
5709  *
5710  * Available since API level 27.
5711  */
5712 typedef enum {
5713     /**
5714      * Prefer executing in a way that minimizes battery drain.
5715      * This is desirable for compilations that will be executed often.
5716      */
5717     ANEURALNETWORKS_PREFER_LOW_POWER = 0,
5718     /**
5719      * Prefer returning a single answer as fast as possible, even if this causes
5720      * more power consumption.
5721      */
5722     ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1,
5723     /**
5724      * Prefer maximizing the throughput of successive frames, for example when
5725      * processing successive frames coming from the camera.
5726      */
5727     ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2,
5728 } PreferenceCode;
5729 
5730 /**
5731  * Device types.
5732  *
5733  * The type of NNAPI device.
5734  */
5735 typedef enum {
5736     /** The device type cannot be provided. */
5737     ANEURALNETWORKS_DEVICE_UNKNOWN = 0,
5738     /** The device does not fall into any category below. */
5739     ANEURALNETWORKS_DEVICE_OTHER = 1,
5740     /** The device runs NNAPI models on single or multi-core CPU. */
5741     ANEURALNETWORKS_DEVICE_CPU = 2,
5742     /** The device can run NNAPI models and also accelerate graphics APIs such
5743      * as OpenGL ES and Vulkan. */
5744     ANEURALNETWORKS_DEVICE_GPU = 3,
5745     /** Dedicated accelerator for Machine Learning workloads. */
5746     ANEURALNETWORKS_DEVICE_ACCELERATOR = 4,
5747 } DeviceTypeCode;
5748 
5749 /**
5750  * Result codes.
5751  *
5752  * <p>Any NNAPI function can return any result code, including result codes not
5753  * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR}
5754  * indicates a failure of some kind.</p>
5755  *
5756  * <p>Additional information about the nature of a failure can be obtained from
5757  * the device log after enabling NNAPI debugging by setting the debug.nn.vlog
5758  * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p>
5759  *
5760  * Available since API level 27.
5761  */
5762 typedef enum {
5763     /**
5764      * Operation was succesful.
5765      */
5766     ANEURALNETWORKS_NO_ERROR = 0,
5767 
5768     /**
5769      * Failure caused by not enough available memory.
5770      */
5771     ANEURALNETWORKS_OUT_OF_MEMORY = 1,
5772 
5773     ANEURALNETWORKS_INCOMPLETE = 2,
5774 
5775     /**
5776      * Failure caused by unexpected null argument.
5777      */
5778     ANEURALNETWORKS_UNEXPECTED_NULL = 3,
5779 
5780     /**
5781      * Failure caused by invalid function arguments, invalid model definition,
5782      * invalid execution definition or invalid data at execution time.
5783      */
5784     ANEURALNETWORKS_BAD_DATA = 4,
5785 
5786     /**
5787      * Failure caused by failed model execution.
5788      */
5789     ANEURALNETWORKS_OP_FAILED = 5,
5790 
5791     /**
5792      * Failure caused by object being in the wrong state.
5793      */
5794     ANEURALNETWORKS_BAD_STATE = 6,
5795 
5796     /**
5797      * Failure caused by not being able to map a file into memory.
5798      * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer
5799      * not supported by the device.
5800      * Mitigate by reading its content into memory.
5801      */
5802     ANEURALNETWORKS_UNMAPPABLE = 7,
5803 
5804     /**
5805      * Failure caused by insufficient buffer size provided to a model output.
5806      */
5807     ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8,
5808 
5809     /**
5810      * Failure caused by a device not being available.
5811      */
5812     ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9,
5813 
5814     /**
5815      * Failure because a deadline could not be met for a task, but future
5816      * deadlines may still be met for the same task after a short delay.
5817      *
5818      * Available since API level 30.
5819      */
5820     ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10,
5821 
5822     /**
5823      * Failure because a deadline could not be met for a task, and future
5824      * deadlines will likely also not be met for the same task even after a
5825      * short delay.
5826      *
5827      * Available since API level 30.
5828      */
5829     ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11,
5830 
5831     /**
5832      * Failure because of a resource limitation within the driver, but future
5833      * calls for the same task may still succeed after a short delay.
5834      *
5835      * Available since API level 30.
5836      */
5837     ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12,
5838 
5839     /**
5840      * Failure because of a resource limitation within the driver, and future
5841      * calls for the same task will likely also fail even after a short
5842      * delay.
5843      *
5844      * Available since API level 30.
5845      */
5846     ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13,
5847 
5848     /**
5849      * Failure indicating an object is in a dead state.
5850      *
5851      * Available since API level 30.
5852      */
5853     ANEURALNETWORKS_DEAD_OBJECT = 14,
5854 } ResultCode;
5855 
5856 /**
5857  * For {@link ANeuralNetworksModel_setOperandValue}, values with a
5858  * length smaller or equal to this will be immediately copied into
5859  * the model. The size is in bytes.
5860  *
5861  * Available since API level 27.
5862  */
5863 enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 };
5864 
5865 /**
5866  * For {@link ANeuralNetworksCompilation_setCaching}, specify the size
5867  * of the cache token required from the application. The size is in bytes.
5868  *
5869  * Available since API level 29.
5870  */
5871 enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 };
5872 
5873 /**
5874  * Different duration measurements.
5875  *
5876  * Durations are measured in nanoseconds.
5877  *
5878  * Available since API level 29.
5879  */
5880 typedef enum {
5881     // Execution time on hardware (not driver, which runs on host processor).
5882     ANEURALNETWORKS_DURATION_ON_HARDWARE = 0,
5883     // Execution time in driver (including time on hardware).  Excludes overhead
5884     // such as that of the runtime itself and the IPC needed for the runtime to
5885     // communicate with the driver.
5886     ANEURALNETWORKS_DURATION_IN_DRIVER = 1,
5887     // Execution time on hardware, after all dependencies have been signaled.
5888     // If no dependencies specified (for example, if the execution was scheduled other
5889     // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the
5890     // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE.
5891     // Available since API level 30.
5892     ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2,
5893     // Execution time in driver, after all dependencies have been signaled. Excludes
5894     // overhead such as that of the runtime itself and the IPC needed for the runtime
5895     // to communicate with the driver.
5896     // If no dependencies specified (for example, if the execution was scheduled other
5897     // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the
5898     // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER.
5899     // Available since API level 30.
5900     ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3,
5901 } DurationCode;
5902 
5903 /**
5904  * Relative execution priority.
5905  *
5906  * Available since API level 30.
5907  */
5908 typedef enum {
5909     ANEURALNETWORKS_PRIORITY_LOW = 90,
5910     ANEURALNETWORKS_PRIORITY_MEDIUM = 100,
5911     ANEURALNETWORKS_PRIORITY_HIGH = 110,
5912     ANEURALNETWORKS_PRIORITY_DEFAULT = ANEURALNETWORKS_PRIORITY_MEDIUM,
5913 } PriorityCode;
5914 
5915 /**
5916  * ANeuralNetworksMemory is an opaque type that represents memory.
5917  *
5918  * This type is used to represent shared memory, memory mapped files,
5919  * and similar memories.
5920  *
5921  * By using shared memory, a program can efficiently communicate to the
5922  * runtime and drivers the tensors that define a model. See
5923  * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application
5924  * should typically create one shared memory object that contains every constant tensor
5925  * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to
5926  * create shared memory from a file handle.
5927  * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to
5928  * create shared memory from an AHardwareBuffer handle.
5929  *
5930  * Memory objects can also be used to specify the input and output arguments of
5931  * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory}
5932  * and {@link ANeuralNetworksExecution_setOutputFromMemory}.
5933  *
5934  * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory},
5935  * {@link ANeuralNetworksExecution_setInputFromMemory} and
5936  * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared
5937  * memory object must be aligned on a boundary of a byte size that is a multiple
5938  * of the element type byte size, e.g., a tensor with
5939  * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary.
5940  *
5941  * It is the application's responsibility to ensure that there are no uses of
5942  * the memory after calling {@link ANeuralNetworksMemory_free}. This includes
5943  * any model which references this memory because of a call to
5944  * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation
5945  * created using such a model, any execution object or burst object created
5946  * using such a compilation, or any execution which references this memory
5947  * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or
5948  * {@link ANeuralNetworksExecution_setOutputFromMemory}.
5949  *
5950  * Available since API level 27.
5951  *
5952  * Starting at API level 30, the application may request creation of device native memory from
5953  * {@link ANeuralNetworksMemoryDesc} to avoid potential memory copying and transformation
5954  * overhead between executions. See also {@link ANeuralNetworksMemoryDesc} and
5955  * {@link ANeuralNetworksMemory_createFromDesc}.
5956  */
5957 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
5958 
5959 /**
5960  * ANeuralNetworksModel is an opaque type that contains a description of the
5961  * mathematical operations that constitute the model.
5962  *
5963  * <p>Build the model by calling<ul>
5964  * <li>{@link ANeuralNetworksModel_create}</li>
5965  * <li>{@link ANeuralNetworksModel_addOperation}</li>
5966  * <li>{@link ANeuralNetworksModel_addOperand}</li>
5967  * </ul>
5968  *
5969  * This forms a graph in which each operation and operand is a node, a
5970  * directed edge from an operand to an operation indicates that the
5971  * operand is an input to the operation, and a directed edge from an
5972  * operation to an operand indicates that the operand is an output
5973  * from the operation. This graph must be acyclic.
5974  *
5975  * A model is completed by calling {@link ANeuralNetworksModel_finish}.
5976  * A model is destroyed by calling {@link ANeuralNetworksModel_free}.
5977  *
5978  * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish}
5979  * has been called on it.</p>
5980  *
5981  * <p>It is the application's responsibility to make sure that only one thread
5982  * modifies a model at a given time. It is however safe for more than one
5983  * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p>
5984  *
5985  * <p>It is also the application's responsibility to ensure that there are no
5986  * other uses of the model after calling {@link ANeuralNetworksModel_free}.
5987  * This includes any compilation, execution object or burst object created using
5988  * the model.</p>
5989  *
5990  * Available since API level 27.
5991  */
5992 typedef struct ANeuralNetworksModel ANeuralNetworksModel;
5993 
5994 /**
5995  * ANeuralNetworksCompilation is an opaque type that can be used to compile
5996  * a machine learning model.
5997  *
5998  * <p>To use:<ul>
5999  *    <li>Create a new compilation instance by calling the
6000  *        {@link ANeuralNetworksCompilation_create} function or
6001  *        {@link ANeuralNetworksCompilation_createForDevices}.</li>
6002  *    <li>Set any desired properties on the compilation (for example,
6003  *        {@link ANeuralNetworksCompilation_setPreference}).</li>
6004  *    <li>Optionally, set the caching signature and the cache directory on the
6005  *        compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li>
6006  *    <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li>
6007  *    <li>Use the compilation as many times as needed
6008  *        with {@link ANeuralNetworksExecution_create} and
6009  *        {@link ANeuralNetworksBurst_create}.</li>
6010  *    <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free}
6011  *        once all executions using the compilation have completed.</li></ul></p>
6012  *
6013  * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}.
6014  * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}.
6015  *
6016  * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish}
6017  * has been called on it.</p>
6018  *
6019  * <p>It is the application's responsibility to make sure that only
6020  * one thread modifies a compilation at a given time. It is however
6021  * safe for more than one thread to use the compilation once
6022  * {@link ANeuralNetworksCompilation_finish} has returned.</p>
6023  *
6024  * <p>It is also the application's responsibility to ensure that there are no other
6025  * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}.
6026  * This includes any execution object or burst object created using the compilation,
6027  * or any memory descriptor with the compilation as part of one of the roles specified by
6028  * {@link ANeuralNetworksMemoryDesc_addInputRole} or
6029  * {@link ANeuralNetworksMemoryDesc_addOutputRole}.</p>
6030  *
6031  * Available since API level 27.
6032  */
6033 typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation;
6034 
6035 /**
6036  * ANeuralNetworksExecution is an opaque type that can be used to apply a machine
6037  * learning model to a set of inputs.
6038  *
6039  * <p>To use:<ul>
6040  *    <li>Create a new execution instance by calling the
6041  *        {@link ANeuralNetworksExecution_create} function.</li>
6042  *    <li>Associate input buffers or memory regions to the model inputs with
6043  *        {@link ANeuralNetworksExecution_setInput} or
6044  *        {@link ANeuralNetworksExecution_setInputFromMemory}.</li>
6045  *    <li>Associate output buffers or memory regions to the model outputs with
6046  *        {@link ANeuralNetworksExecution_setOutput} or
6047  *        {@link ANeuralNetworksExecution_setOutputFromMemory}.</li>
6048  *    <li>Apply the model with one of the following:</li><ul>
6049  *        <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute}
6050  *            or with {@link ANeuralNetworksExecution_startComputeWithDependencies},
6051  *            waiting for the execution to complete with
6052  *            {@link ANeuralNetworksEvent_wait}.</li>
6053  *        <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li>
6054  *        <li>Synchronously as part of an execution burst with
6055  *            {@link ANeuralNetworksExecution_burstCompute}.</li></ul>
6056  *    <li>Destroy the execution with
6057  *        {@link ANeuralNetworksExecution_free}.</li></ul></p>
6058  *
6059  * <p>An output buffer or memory region must not overlap with any
6060  * other output buffer or memory region, with an input buffer or
6061  * memory region, or with an operand value in a memory object
6062  * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p>
6063  *
6064  * <p>An execution cannot be modified once
6065  * {@link ANeuralNetworksExecution_burstCompute},
6066  * {@link ANeuralNetworksExecution_compute},
6067  * {@link ANeuralNetworksExecution_startCompute} or
6068  * {@link ANeuralNetworksExecution_startComputeWithDependencies} has been called on it.</p>
6069  *
6070  * <p>An execution can be applied to a model with
6071  * {@link ANeuralNetworksExecution_burstCompute},
6072  * {@link ANeuralNetworksExecution_compute},
6073  * {@link ANeuralNetworksExecution_startCompute} or
6074  * {@link ANeuralNetworksExecution_startComputeWithDependencies} only once. Create new
6075  * executions to do new evaluations of the model.</p>
6076  *
6077  * <p>It is the application's responsibility to make sure that only one thread
6078  * modifies an execution at a given time. It is however safe for more than one
6079  * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p>
6080  *
6081  * <p>It is also the application's responsibility to ensure that the execution
6082  * either has never been scheduled or has completed (i.e., that
6083  * {@link ANeuralNetworksExecution_burstCompute},
6084  * {@link ANeuralNetworksExecution_compute}, or
6085  * {@link ANeuralNetworksEvent_wait} has returned) before calling
6086  * {@link ANeuralNetworksExecution_free}.</p>.
6087  *
6088  * <p>It is also the application's responsibility to ensure that there are no other
6089  * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p>
6090  *
6091  * <p>Multiple executions can be scheduled and evaluated concurrently, either by
6092  * means of {@link ANeuralNetworksExecution_compute} or
6093  * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous) in
6094  * different threads, or by means of
6095  * {@link ANeuralNetworksExecution_startCompute} or
6096  * {@link ANeuralNetworksExecution_startComputeWithDependencies} (which are asynchronous).
6097  * (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on
6098  * different burst objects.) The runtime makes no guarantee on the ordering of
6099  * completion of executions. If it's important to the application, the
6100  * application should enforce the ordering by ensuring that one execution
6101  * completes before the next is scheduled (for example, by scheduling all
6102  * executions synchronously within a single thread, or by scheduling all
6103  * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between
6104  * calls to {@link ANeuralNetworksExecution_startCompute}); or by using
6105  * {@link ANeuralNetworksExecution_startComputeWithDependencies} to make the execution wait for a
6106  * list of events to be signaled before starting the actual evaluation.</p>
6107  *
6108  * Available since API level 27.
6109  */
6110 typedef struct ANeuralNetworksExecution ANeuralNetworksExecution;
6111 
6112 #if __ANDROID_API__ >= 29
6113 /**
6114  * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand.
6115  */
6116 typedef struct ANeuralNetworksSymmPerChannelQuantParams {
6117     /* The index of the channel dimension. */
6118     uint32_t channelDim;
6119     /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */
6120     uint32_t scaleCount;
6121     /** The array of scaling values for each channel. Each value must be greater than zero. */
6122     const float* scales;
6123 } ANeuralNetworksSymmPerChannelQuantParams;
6124 
6125 /**
6126  * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency
6127  * of a rapid sequence of executions. It will likely cause overhead if only used
6128  * for a single execution.
6129  *
6130  * ANeuralNetworksBurst serves as a context object for any number of inferences
6131  * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst
6132  * object and the {@link ANeuralNetworksExecution} objects used with it must all
6133  * have been created from the same {@link ANeuralNetworksCompilation} object.
6134  *
6135  * This object is also used as a hint to drivers, providing insight to the
6136  * lifetime of a rapid sequence of executions. For example, a driver may choose
6137  * to increase the clock frequency of its accelerator for the lifetime of a
6138  * burst object.
6139  *
6140  * <p>To use:<ul>
6141  *    <li>Create a new burst object by calling the
6142  *        {@link ANeuralNetworksBurst_create} function.</li>
6143  *    <li>For each execution:</li><ul>
6144  *        <li>Create {@link ANeuralNetworksExecution} and configure its
6145  *            properties (see {@link ANeuralNetworksExecution} for details).</li>
6146  *        <li>Apply the model synchronously with
6147  *            {@link ANeuralNetworksExecution_burstCompute}, reusing the same
6148  *            {@link ANeuralNetworksBurst} with the new
6149  *            {@link ANeuralNetworksExecution}.</li>
6150  *        <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul>
6151  *    <li>Destroy the burst with
6152  *        {@link ANeuralNetworksBurst_free}.</li></ul></p>
6153  *
6154  * Available since API level 29.
6155  */
6156 typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
6157 #endif  //  __ANDROID_API__ >= 29
6158 
6159 /**
6160  * ANeuralNetworksOperandType describes the type of an operand.
6161  *
6162  * This structure is used to describe both scalars and tensors.
6163  *
6164  * A tensor operand type with all dimensions specified is "fully
6165  * specified".  Whenever possible (i.e., whenever the dimensions are
6166  * known at model construction time), a tensor operand type should be
6167  * (but is not required to be) fully specified, in order to enable the
6168  * best possible performance.
6169  *
6170  * If a tensor operand's type is not fully specified, the dimensions
6171  * of the operand are deduced from the operand types and values of the
6172  * operation for which that operand is an output or from the corresponding
6173  * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input
6174  * operand type in the case of referenced model input operands.
6175  *
6176  * <p>In the following situations, a tensor operand type must be fully
6177  * specified:<ul>
6178  *     <li>The operand has a constant value, set by
6179  *         {@link ANeuralNetworksModel_setOperandValue} (with a
6180  *         non-nullptr buffer) or
6181  *         {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
6182  *     <li>The operand is a model input (see
6183  *         {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
6184  *         model within a compilation.  A fully specified tensor operand type
6185  *         must either be provided to {@link ANeuralNetworksModel_addOperand};
6186  *         or it must be provided to the corresponding
6187  *         {@link ANeuralNetworksExecution_setInput}, or
6188  *         {@link ANeuralNetworksExecution_setInputFromMemory}.
6189  *         EXCEPTION: If the input is optional and omitted
6190  *         (by passing nullptr for buffer to
6191  *         {@link ANeuralNetworksExecution_setInput}) then it need
6192  *         not have a fully specified tensor operand type.</li>
6193  *     <li>The operand is a model output (see
6194  *         {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
6195  *         model within a compilation and is to be used with {@link
6196  *         ANeuralNetworksExecution_startComputeWithDependencies}.
6197  *         A fully specified tensor operand type must either be provided
6198  *         to {@link ANeuralNetworksModel_addOperand}; or it must be
6199  *         provided to the corresponding
6200  *         {@link ANeuralNetworksExecution_setOutput}, or
6201  *         {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul>
6202  *
6203  * A tensor operand type of specified rank but some number of
6204  * unspecified dimensions is represented by setting dimensionCount to
6205  * the rank and each unspecified dimension to 0.
6206  *
6207  * Available since API level 27.
6208  *
6209  * Starting at API level 29, a tensor operand type of unspecified rank is
6210  * represented by setting dimensionCount to 0 and dimensions to NULL (just as if
6211  * it were a scalar operand type).
6212  */
6213 typedef struct ANeuralNetworksOperandType {
6214     /**
6215      * The data type, e.g ANEURALNETWORKS_FLOAT32.
6216      */
6217     int32_t type;
6218 
6219     /**
6220      * The number of dimensions (rank).
6221      *
6222      * Must be 0 for scalars.
6223      */
6224     uint32_t dimensionCount;
6225 
6226     /**
6227      * The dimensions of the tensor.
6228      *
6229      * Must be nullptr for scalars.
6230      */
6231     const uint32_t* dimensions;
6232 
6233     /**
6234      * The quantization scale.
6235      *
6236      * Must be 0 when not applicable to an operand type.
6237      *
6238      * See {@link OperandCode}.
6239      */
6240     float scale;
6241 
6242     /**
6243      * The quantization zero point.
6244      *
6245      * Must be 0 when not applicable to an operand type.
6246      *
6247      * See {@link OperandCode}.
6248      */
6249     int32_t zeroPoint;
6250 } ANeuralNetworksOperandType;
6251 
6252 typedef int32_t ANeuralNetworksOperationType;
6253 
6254 /**
6255  * ANeuralNetworksEvent is an opaque type that represents an event
6256  * that will be signaled once an execution completes.
6257  *
6258  * Available since API level 27.
6259  */
6260 typedef struct ANeuralNetworksEvent ANeuralNetworksEvent;
6261 
6262 #if __ANDROID_API__ >= 29
6263 
6264 /**
6265  * ANeuralNetworksDevice is an opaque type that represents a device.
6266  *
6267  * This type is used to query basic properties and supported operations of the corresponding
6268  * device, and control which device(s) a model is to be run on.
6269  *
6270  * Available since API level 29.
6271  */
6272 typedef struct ANeuralNetworksDevice ANeuralNetworksDevice;
6273 
6274 #endif  // __ANDROID_API__ >= 29
6275 
6276 #if __ANDROID_API__ >= 30
6277 
6278 /**
6279  * ANeuralNetworksMemoryDesc is an opaque type that represents a memory descriptor.
6280  *
6281  * A memory descriptor describes the properties of a memory object, and is used by
6282  * {@link ANeuralNetworksMemory_createFromDesc}.
6283  *
6284  * To use:
6285  *   - Create a new memory descriptor by calling {@link ANeuralNetworksMemoryDesc_create}.
6286  *   - Specify all of the intended input and output roles by calling
6287  *     {@link ANeuralNetworksMemoryDesc_addInputRole} and
6288  *     {@link ANeuralNetworksMemoryDesc_addOutputRole}.
6289  *   - Optionally, specify the memory dimensions by calling
6290  *     {@link ANeuralNetworksMemoryDesc_setDimensions}.
6291  *   - Complete the memory descriptor with {@link ANeuralNetworksMemoryDesc_finish}.
6292  *   - Use the memory descriptor as many times as needed with
6293  *     {@link ANeuralNetworksMemory_createFromDesc}.
6294  *   - Destroy the memory descriptor with {@link ANeuralNetworksMemoryDesc_free}.
6295  *
6296  * A memory descriptor is completed by calling {@link ANeuralNetworksMemoryDesc_finish}.
6297  * A memory descriptor is destroyed by calling {@link ANeuralNetworksMemoryDesc_free}.
6298  *
6299  * A memory descriptor must not be modified once {@link ANeuralNetworksMemoryDesc_finish}
6300  * has been called on it.
6301  *
6302  * It is the application's responsibility to make sure that only
6303  * one thread modifies a memory descriptor at a given time. It is however
6304  * safe for more than one thread to use the memory descriptor once
6305  * {@link ANeuralNetworksMemoryDesc_finish} has returned.
6306  *
6307  * It is also the application's responsibility to ensure that there are no other
6308  * uses of the memory descriptor after calling {@link ANeuralNetworksMemoryDesc_free}.
6309  * It is however safe to continue using a {@link ANeuralNetworksMemory} object created
6310  * from the memory descriptor.
6311  *
6312  * Available since API level 30.
6313  */
6314 typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc;
6315 
6316 /**
6317  * Create a {@link ANeuralNetworksMemoryDesc} with no properties.
6318  *
6319  * This only creates the memory descriptor. Its properties should be set with calls to
6320  * {@link ANeuralNetworksMemoryDesc_addInputRole},
6321  * {@link ANeuralNetworksMemoryDesc_addOutputRole}, and
6322  * {@link ANeuralNetworksMemoryDesc_setDimensions}.
6323  *
6324  * {@link ANeuralNetworksMemoryDesc_finish} must be called once all properties have been set.
6325  *
6326  * {@link ANeuralNetworksMemoryDesc_free} must be called once the memory descriptor
6327  * is no longer needed.
6328  *
6329  * Available since API level 30.
6330  *
6331  * @param desc The {@link ANeuralNetworksMemoryDesc} to be created.
6332  *             Set to NULL if unsuccessful.
6333  *
6334  * @return ANEURALNETWORKS_NO_ERROR if successful.
6335  */
6336 int ANeuralNetworksMemoryDesc_create(ANeuralNetworksMemoryDesc** desc) __INTRODUCED_IN(30);
6337 
6338 /**
6339  * Destroy a memory descriptor.
6340  *
6341  * The memory descriptor need not have been finished by a call to
6342  * {@link ANeuralNetworksMemoryDesc_finish}.
6343  *
6344  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
6345  *
6346  * Available since API level 30.
6347  *
6348  * @param desc The memory descriptor to be destroyed. Passing NULL is acceptable and
6349  *             results in no operation.
6350  */
6351 void ANeuralNetworksMemoryDesc_free(ANeuralNetworksMemoryDesc* desc) __INTRODUCED_IN(30);
6352 
6353 /**
6354  * Specify that a memory object will be playing the role of an input to an execution created from a
6355  * particular compilation.
6356  *
6357  * The compilation and the input index fully specify an input operand. This function
6358  * may be invoked multiple times on the same memory descriptor with different input operands,
6359  * and the same input operand may be specified on multiple memory descriptors. However,
6360  * specifying the same input operand on the same memory descriptor more than once will
6361  * return an error.
6362  *
6363  * The dimensions of the corresponding model operands of all the roles specified by
6364  * {@link ANeuralNetworksMemoryDesc_addInputRole} and
6365  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two
6366  * dimensions are incompatible if both ranks are fully specified but have different values, or if
6367  * there is at least one axis that is fully specified in both but has different values.
6368  *
6369  * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
6370  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on a memory descriptor
6371  * before invoking {@link ANeuralNetworksMemoryDesc_finish}.
6372  *
6373  * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
6374  * called will return an error.
6375  *
6376  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
6377  *
6378  * Available since API level 30.
6379  *
6380  * @param desc The memory descriptor to be modified.
6381  * @param compilation The compilation object. It must already have been finished by calling
6382  *                    {@link ANeuralNetworksCompilation_finish}, and must outlive the memory
6383  *                    descriptor.
6384  * @param index The index of the input argument we are referencing from the compilation. It is
6385  *              an index into the inputs list passed to
6386  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
6387  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
6388  * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the
6389  *                  memory is to be used in the specified role. This is provided as a hint to
6390  *                  optimize the case when different roles prefer different memory locations or data
6391  *                  layouts.
6392  *
6393  * @return ANEURALNETWORKS_NO_ERROR if successful.
6394  */
6395 int ANeuralNetworksMemoryDesc_addInputRole(ANeuralNetworksMemoryDesc* desc,
6396                                            const ANeuralNetworksCompilation* compilation,
6397                                            uint32_t index, float frequency) __INTRODUCED_IN(30);
6398 
6399 /**
6400  * Specify that a memory object will be playing the role of an output to an execution created from a
6401  * particular compilation.
6402  *
6403  * The compilation and the output index fully specify an output operand. This function
6404  * may be invoked multiple times on the same memory descriptor with different output operands,
6405  * and the same output operand may be specified on multiple memory descriptors. However,
6406  * specifying the same output operand on the same memory descriptor object more than once will
6407  * return an error.
6408  *
6409  * The dimensions of the corresponding model operands of all the roles specified by
6410  * {@link ANeuralNetworksMemoryDesc_addInputRole} and
6411  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two
6412  * dimensions are incompatible if both ranks are fully specified but have different values, or if
6413  * there is at least one axis that is fully specified in both but has different values.
6414  *
6415  * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
6416  * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on the memory descriptor
6417  * before invoking {@link ANeuralNetworksMemoryDesc_finish}.
6418  *
6419  * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
6420  * called will return an error.
6421  *
6422  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
6423  *
6424  * Available since API level 30.
6425  *
6426  * @param desc The memory descriptor to be modified.
6427  * @param compilation The compilation object. It must already have been finished by calling
6428  *                    {@link ANeuralNetworksCompilation_finish}, and must outlive the memory
6429  *                    descriptor.
6430  * @param index The index of the output argument we are referencing from the compilation. It is
6431  *              an index into the outputs list passed to
6432  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
6433  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
6434  * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the
6435  *                  memory is to be used in the specified role. This is provided as a hint to
6436  *                  optimize the case when multiple roles prefer different memory locations or data
6437  *                  layouts.
6438  *
6439  * @return ANEURALNETWORKS_NO_ERROR if successful.
6440  */
6441 int ANeuralNetworksMemoryDesc_addOutputRole(ANeuralNetworksMemoryDesc* desc,
6442                                             const ANeuralNetworksCompilation* compilation,
6443                                             uint32_t index, float frequency) __INTRODUCED_IN(30);
6444 
6445 /**
6446  * Set the dimensional information of the memory descriptor.
6447  *
6448  * The specified dimensions must be compatible with the dimensions of the corresponding model
6449  * operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and
6450  * {@link ANeuralNetworksMemoryDesc_addOutputRole}. Two dimensions are incompatible if both ranks
6451  * are fully specified but have different values, or if there is at least one axis that is fully
6452  * specified in both but has different values.
6453  *
6454  * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
6455  * called will return an error.
6456  *
6457  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
6458  *
6459  * Available since API level 30.
6460  *
6461  * @param desc The memory descriptor to be modified.
6462  * @param rank The number of dimensions. Must be 0 for scalars.
6463  * @param dimensions An array of dimensions. An entry with the value 0 indicates that the
6464  *                   corresponding axis has an unknown size.
6465  *
6466  * @return ANEURALNETWORKS_NO_ERROR if successful.
6467  */
6468 int ANeuralNetworksMemoryDesc_setDimensions(ANeuralNetworksMemoryDesc* desc, uint32_t rank,
6469                                             const uint32_t* dimensions) __INTRODUCED_IN(30);
6470 
6471 /**
6472  * Indicate that we have finished modifying a memory descriptor. Required before calling
6473  * {@link ANeuralNetworksMemory_createFromDesc}.
6474  *
6475  * This function must only be called once for a given memory descriptor.
6476  *
6477  * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
6478  *
6479  * Available since API level 30.
6480  *
6481  * @param desc The memory descriptor to be finished.
6482  *
6483  * @return ANEURALNETWORKS_NO_ERROR if successful.
6484  */
6485 int ANeuralNetworksMemoryDesc_finish(ANeuralNetworksMemoryDesc* desc) __INTRODUCED_IN(30);
6486 
6487 /**
6488  * Creates a memory object from a memory descriptor.
6489  *
6490  * The memory object is created with an uninitialized buffer. A memory object with an uninitialized
6491  * buffer may only be used according to the roles specified by {@link
6492  * ANeuralNetworksMemoryDesc_addOutputRole}, or as the destination memory in {@link
6493  * ANeuralNetworksMemory_copy}. The buffer of a memory object is initialized after the memory object
6494  * is used as an output in a successful execution, or used as the destination memory in a successful
6495  * {@link ANeuralNetworksMemory_copy}. A memory object with an initialized buffer may be used
6496  * according to all roles specified in {@link ANeuralNetworksMemoryDesc}, or as the source or
6497  * destination memory in {@link ANeuralNetworksMemory_copy}. The buffer of a memory object will
6498  * return to the uninitialized state if the memory object is used as an output in a failed
6499  * execution, or used as the destination memory in a failed {@link ANeuralNetworksMemory_copy}.
6500  *
6501  * The dimensions of the memory descriptor are deduced from the dimensions of the corresponding
6502  * model operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and
6503  * {@link ANeuralNetworksMemoryDesc_addOutputRole}, as well as the dimensions set by the call to
6504  * {@link ANeuralNetworksMemoryDesc_setDimensions}, if any. The memory descriptor may have
6505  * unspecified dimensions or rank. In such a case, the same memory object may be used with different
6506  * shapes of outputs in different executions. When the memory is used as an input, the input shape
6507  * must be the same as the output shape from the last execution using this memory object as an
6508  * output, or the last {@link ANeuralNetworkMemory_copy} using this memory object as the destination
6509  * memory. Creating a memory object with unspecified dimensions or rank may fail for certain sets of
6510  * roles.
6511  *
6512  * Using the memory in roles or shapes that are not compatible with the rules specified above will
6513  * return an error.
6514  *
6515  * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or
6516  * {@link ANeuralNetworksExecution_setOutputFromMemory} with the memory object,
6517  * both offset and length must be set to zero and the entire memory region will be
6518  * associated with the specified input or output operand.
6519  *
6520  * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with the memory created from this
6521  * function will return an error.
6522  *
6523  * {@link ANeuralNetworksMemory_free} must be called once the memory is no longer needed.
6524  *
6525  * Attempting to create memory from an unfinished memory descriptor will return an error.
6526  *
6527  * The provided {@link ANeuralNetworksMemoryDesc} need not outlive the {@link ANeuralNetworksMemory}
6528  * object.
6529  *
6530  * Available since API level 30.
6531  *
6532  * @param desc The memory descriptor.
6533  * @param memory The memory object to be created.
6534  *               Set to NULL if unsuccessful.
6535  *
6536  * @return ANEURALNETWORKS_NO_ERROR if successful; ANEURALNETWORKS_OP_FAILED if the memory is
6537  *         created with unspecified dimensions or rank and it is not supported for this set of
6538  *         roles.
6539  */
6540 int ANeuralNetworksMemory_createFromDesc(const ANeuralNetworksMemoryDesc* desc,
6541                                          ANeuralNetworksMemory** memory) __INTRODUCED_IN(30);
6542 
6543 /**
6544  * Copies data from one memory object to another.
6545  *
6546  * If at most one of the src and dst is created from {@link ANeuralNetworksMemory_createFromDesc},
6547  * the src and dst must have the same logical size:
6548  * - If the memory is created from {@link ANeuralNetworksMemory_createFromFd}, or if it is created
6549  *   from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with format of
6550  *   AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size of the memory.
6551  * - If the memory is created from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with a
6552  *   format other than AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size when there is
6553  *   no padding and the data is tightly packed. This function may fail if the AHardwareBuffer
6554  *   cannot be accessed.
6555  * - If the memory is created from {@link ANeuralNetworksMemory_createFromDesc}, the logical size
6556  *   equals the size indicated by the {@link OperandCode} multiplied by the number of elements. This
6557  *   function will fail if the number of elements is unknown.
6558  *
6559  * If both src and dst are created from {@link ANeuralNetworksMemory_createFromDesc}, they must have
6560  * compatible dimensions. Two dimensions are incompatible if both ranks are fully specified but
6561  * have different values, or if there is at least one axis that is fully specified in both but has
6562  * different values. The dst may have unspecified dimensions or rank. In such a case, the dimensions
6563  * of dst will get updated according to the dimensions of the src.
6564  *
6565  * In both cases, if the src is created from {@link ANeuralNetworksMemory_createFromDesc}, it must
6566  * have been used as an output in a successful execution, or used as the destination memory in a
6567  * successful {@link ANeuralNetworksMemory_copy}.
6568  *
6569  * The src and dst may have different data layout, in which case the data copying is performed
6570  * logically with data layout transformation.
6571  *
6572  * Available since API level 30.
6573  *
6574  * @param src The source memory object.
6575  * @param dst The destination memory object.
6576  *
6577  * @return ANEURALNETWORKS_NO_ERROR if successful.
6578  */
6579 int ANeuralNetworksMemory_copy(const ANeuralNetworksMemory* src, const ANeuralNetworksMemory* dst)
6580         __INTRODUCED_IN(30);
6581 
6582 #endif  // __ANDROID_API__ >= 30
6583 
6584 #if __ANDROID_API__ >= 29
6585 
6586 /**
6587  * Get the number of available devices.
6588  *
6589  * @param numDevices Used to return the number of devices.
6590  *
6591  * @return ANEURALNETWORKS_NO_ERROR if successful.
6592  *
6593  * Available since API level 29.
6594  */
6595 int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) __INTRODUCED_IN(29);
6596 
6597 /**
6598  * Get the representation of the specified device.
6599  *
6600  * @param devIndex The index of the specified device. Must be less than the
6601                    number of available devices.
6602  * @param device The representation of the specified device.
6603  *               The same representation will always be returned for the specified
6604  *               device.
6605  *
6606  * @return ANEURALNETWORKS_NO_ERROR if successful.
6607  *
6608  * Available since API level 29.
6609  */
6610 int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice** device)
6611         __INTRODUCED_IN(29);
6612 
6613 /**
6614  * Get the name of the specified device.
6615  *
6616  * @param device The representation of the specified device.
6617  * @param name   The returned name of the specified device. The name will be in UTF-8
6618  *               and will be null-terminated. It will be recognizable as a known device name
6619  *               rather than a cryptic string. For devices with feature level reported by
6620  *               {@link ANeuralNetworksDevice_getFeatureLevel} that is 29 and above, the
6621  *               format of the name is {VENDOR}-{DEVICE}. For devices with feature level 28
6622  *               or lower, the format of the name is undefined.
6623  *               The name will remain valid for the duration of the application.
6624  *
6625  * @return ANEURALNETWORKS_NO_ERROR if successful.
6626  *
6627  * Available since API level 29.
6628  */
6629 int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice* device, const char** name)
6630         __INTRODUCED_IN(29);
6631 
6632 /**
6633  * Get the type of a given device.
6634  *
6635  * The device type can be used to help application developers to distribute Machine Learning
6636  * workloads and other workloads such as graphical rendering.
6637  * E.g., for an app which renders AR scenes based on real time object detection results,
6638  * the developer could choose an ACCELERATOR type device for ML workloads, and reserve GPU
6639  * for graphical rendering.
6640  *
6641  * @param device The representation of the specified device.
6642  * @param type The returned {@link DeviceTypeCode} of the specified device.
6643  *
6644  * @return ANEURALNETWORKS_NO_ERROR if successful.
6645  *
6646  * Available since API level 29.
6647  */
6648 int ANeuralNetworksDevice_getType(const ANeuralNetworksDevice* device, int32_t* type)
6649         __INTRODUCED_IN(29);
6650 
6651 /**
6652  * Get the version of the driver implementation of the specified device.
6653  *
6654  * It’s the responsibility of the driver implementor to insure that this version string
6655  * uniquely distinguishes this implementation from all previous implementations.
6656  *
6657  * This version string must not be confused with the feature level which is solely defined
6658  * by {@link ANeuralNetworksDevice_getFeatureLevel}. There is no implicit ordering of the versions.
6659  * For example, it is not possible to filter all drivers older than a certain version.
6660  *
6661  * Application developers may use this version string to avoid or prefer specific driver
6662  * implementations. For example, an application may want to do so because:
6663  *     - A specific version of the driver does not provide the required performance,
6664  *       perhaps because of a performance regression.
6665  *     - A specific version of the driver has a bug or returns results that don’t match
6666  *       the minimum precision requirement for the application.
6667  *
6668  * @param device The representation of the specified device.
6669  * @param version The returned version string of the driver for the specified device. The
6670  *                string will be in UTF-8 and will be null-terminated. For devices with feature
6671  *                level 28 or lower, "UNKNOWN" will be returned. The version string will remain
6672  *                valid for the duration of the application.
6673  *
6674  * @return ANEURALNETWORKS_NO_ERROR if successful.
6675  *
6676  * Available since API level 29.
6677  */
6678 int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice* device, const char** version)
6679         __INTRODUCED_IN(29);
6680 
6681 /**
6682  * Get the supported NNAPI version of the specified device.
6683  *
6684  * Each device has a supported feature level, which is the most advanced feature this driver
6685  * implements. For example, if the driver implements the features introduced in Android P,
6686  * but does not implement the features introduced after Android P, the value would be 28.
6687  * Developers could decide whether or not the specified device should be used for a Model that
6688  * has certain feature requirements.
6689  *
6690  * @param device The representation of the specified device.
6691  * @param featureLevel The API level of the most advanced feature this driver implements.
6692  *
6693  * @return ANEURALNETWORKS_NO_ERROR if successful.
6694  *
6695  * Available since API level 29.
6696  */
6697 int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device,
6698                                           int64_t* featureLevel) __INTRODUCED_IN(29);
6699 
6700 #if __ANDROID_API__ >= 30
6701 
6702 /**
6703  * Wait until the device is in a live state.
6704  *
6705  * A device may encounter internal errors and temporarily enter a dead state. A
6706  * call that uses a device in such a state will return with the error
6707  * {@link ANEURALNETWORKS_DEAD_OBJECT}. ANeuralNetworksDevice_wait will block until
6708  * the device is in a live state.
6709  *
6710  * @param device The representation of the specified device.
6711  *
6712  * @return ANEURALNETWORKS_NO_ERROR if successful.
6713  *
6714  * Available since API level 30.
6715  */
6716 int ANeuralNetworksDevice_wait(const ANeuralNetworksDevice* device) __INTRODUCED_IN(30);
6717 
6718 #endif  // __ANDROID_API__ >= 30
6719 
6720 /**
6721  * Get the supported operations for a specified set of devices. If multiple devices
6722  * are selected, the supported operation list is a union of supported operations of all
6723  * selected devices.
6724  *
6725  * @param model The model to be queried.
6726  * @param devices The set of devices. Must not contain duplicates.
6727  * @param numDevices The number of devices in the set.
6728  * @param supportedOps The boolean array to be filled. True means supported. The size of the
6729  *                     boolean array must be at least as large as the number of operations
6730  *                     in the model. The order of elements in the supportedOps array matches
6731  *                     the order in which the corresponding operations were added to the model.
6732  *
6733  * @return ANEURALNETWORKS_NO_ERROR if successful.
6734  *
6735  * Available since API level 29.
6736  */
6737 int ANeuralNetworksModel_getSupportedOperationsForDevices(
6738         const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
6739         uint32_t numDevices, bool* supportedOps) __INTRODUCED_IN(29);
6740 
6741 /**
6742  * Create a {@link ANeuralNetworksCompilation} to compile the given model for a specified set
6743  * of devices. If more than one device is specified, the compilation will
6744  * distribute the workload automatically across the devices. The model must be fully
6745  * supported by the specified set of devices. This means that
6746  * ANeuralNetworksModel_getSupportedOperationsForDevices() must have returned true for every
6747  * operation for that model/devices pair.
6748  *
6749  * The user must handle all compilation and execution failures from the
6750  * specified set of devices. This is in contrast to a use of {@link
6751  * ANeuralNetworksCompilation_create}, where the runtime will attempt to recover
6752  * from such failures.
6753  *
6754  * The model passed to this function is termed the "main model" of the
6755  * compilation, to distinguish it from other models referred to by an Operand
6756  * of type {@link ANEURALNETWORKS_MODEL} within this compilation.
6757  *
6758  * @param model The {@link ANeuralNetworksModel} to be compiled.
6759  * @param devices The set of devices. Must not contain duplicates.
6760  * @param numDevices The number of devices in the set.
6761  * @param compilation The newly created object or NULL if unsuccessful.
6762  *
6763  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
6764  *         if the model is invalid.
6765  *
6766  * Available since API level 29.
6767  */
6768 int ANeuralNetworksCompilation_createForDevices(ANeuralNetworksModel* model,
6769                                                 const ANeuralNetworksDevice* const* devices,
6770                                                 uint32_t numDevices,
6771                                                 ANeuralNetworksCompilation** compilation)
6772         __INTRODUCED_IN(29);
6773 
6774 /**
6775  * Sets the compilation caching signature and the cache directory.
6776  *
6777  * Provides optional caching information to the runtime for faster repeated
6778  * compilation.
6779  *
6780  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
6781  *
6782  * @param compilation The compilation to be modified.
6783  * @param cacheDir The cache directory for the runtime to store and retrieve caching
6784  *                 data. It is recommended to use the code cache directory provided
6785  *                 by the Android runtime. If not using the code cache directory, the
6786  *                 user should choose a directory local to the application, and is
6787  *                 responsible for managing the cache entries.
6788  * @param token The token provided by the user to specify a model must be of length
6789  *              ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should ensure that
6790  *              the token is unique to a model within the application. The NNAPI
6791  *              runtime cannot detect token collisions; a collision will result in a
6792  *              failed execution or in a successful execution that produces incorrect
6793  *              output values.
6794  *
6795  * @return ANEURALNETWORKS_NO_ERROR if successful.
6796  *
6797  * Available since API level 29.
6798  */
6799 int ANeuralNetworksCompilation_setCaching(ANeuralNetworksCompilation* compilation,
6800                                           const char* cacheDir, const uint8_t* token)
6801         __INTRODUCED_IN(29);
6802 
6803 /**
6804  * Schedule synchronous evaluation of the execution.
6805  *
6806  * <p>Schedules synchronous evaluation of the execution. Returns once the
6807  * execution has completed and the outputs are ready to be consumed.
6808  * </p>
6809  *
6810  * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution,
6811  * and the execution is not able to complete before the timeout duration is
6812  * exceeded, then execution may be aborted, in which case
6813  * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned. If the device has
6814  * a feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel}
6815  * that is lower than 30, then the timeout duration hint will be ignored.
6816  *
6817  * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
6818  * the condition model does not output false within the loop timeout duration,
6819  * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*}
6820  * will be returned.
6821  *
6822  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
6823  *
6824  * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
6825  * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
6826  * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
6827  * asynchronous execution with dependencies.
6828  *
6829  * Available since API level 29.
6830  *
6831  * @param execution The execution to be scheduled and executed.
6832  *
6833  * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
6834  *         ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot
6835  *         be properly mapped.
6836  */
6837 int ANeuralNetworksExecution_compute(ANeuralNetworksExecution* execution) __INTRODUCED_IN(29);
6838 
6839 /**
6840  * Get the dimensional information of the specified output operand of the model of the
6841  * {@link ANeuralNetworksExecution}.
6842  *
6843  * The execution must have completed.  On asynchronous execution initiated by
6844  * {@link ANeuralNetworksExecution_startCompute} or
6845  * {@link ANeuralNetworksExecution_startComputeWithDependencies},
6846  * {@link ANeuralNetworksEvent_wait} must be called prior to this function.
6847  *
6848  * @param execution The execution to be queried.
6849  * @param index The index of the output argument we are querying. It is
6850  *              an index into the lists passed to
6851  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
6852  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
6853  * @param rank The rank of the output operand.
6854  *
6855  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE
6856  *         if the target output is provided an insufficient buffer at execution time,
6857  *         ANEURALNETWORKS_BAD_DATA if the index is invalid.
6858  *
6859  * Available since API level 29.
6860  */
6861 int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution* execution,
6862                                                   int32_t index, uint32_t* rank)
6863         __INTRODUCED_IN(29);
6864 
6865 /**
6866  * Get the dimensional information of the specified output operand of the model of the
6867  * {@link ANeuralNetworksExecution}. The target output operand cannot be a scalar.
6868  *
6869  * The execution must have completed.  On asynchronous execution initiated by
6870  * {@link ANeuralNetworksExecution_startCompute} or
6871  * {@link ANeuralNetworksExecution_startComputeWithDependencies},
6872  * {@link ANeuralNetworksEvent_wait} must be called prior to this function.
6873  *
6874  * @param execution The execution to be queried.
6875  * @param index The index of the output argument we are querying. It is an index into the lists
6876  *              passed to {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
6877  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
6878  * @param dimensions The dimension array to be filled. The size of the array must be exactly as
6879  *                   large as the rank of the output operand to be queried in the model.
6880  *
6881  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE
6882  *         if the target output is provided an insufficient buffer at execution time,
6883  *         ANEURALNETWORKS_BAD_DATA if the index is invalid or if the target is a scalar.
6884  *
6885  * Available since API level 29.
6886  */
6887 int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution* execution,
6888                                                         int32_t index, uint32_t* dimensions)
6889         __INTRODUCED_IN(29);
6890 
6891 /**
6892  * Create a {@link ANeuralNetworksBurst} to apply the given compilation.
6893  * This only creates the burst object. Computation is only performed once
6894  * {@link ANeuralNetworksExecution_burstCompute} is invoked with a valid
6895  * {@link ANeuralNetworksExecution} and {@link ANeuralNetworksBurst}.
6896  *
6897  * <p>The provided compilation must outlive the burst object.</p>
6898  *
6899  * Available since API level 29.
6900  *
6901  * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
6902  * @param burst The newly created object or NULL if unsuccessful.
6903  *
6904  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
6905  *         if the compilation is invalid.
6906  */
6907 int ANeuralNetworksBurst_create(ANeuralNetworksCompilation* compilation,
6908                                 ANeuralNetworksBurst** burst) __INTRODUCED_IN(29);
6909 
6910 /**
6911  * Destroys the burst object.
6912  *
6913  * Available since API level 29.
6914  *
6915  * @param burst The burst object to be destroyed. Passing NULL is acceptable and
6916  *              results in no operation.
6917  */
6918 void ANeuralNetworksBurst_free(ANeuralNetworksBurst* burst) __INTRODUCED_IN(29);
6919 
6920 /**
6921  * Schedule synchronous evaluation of the execution on a burst object.
6922  *
6923  * <p>Schedules synchronous evaluation of the execution. Returns once the
6924  * execution has completed and the outputs are ready to be consumed.</p>
6925  *
6926  * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution,
6927  * and the execution is not able to complete before the timeout duration is
6928  * exceeded, then execution may be aborted, in which case
6929  * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned.
6930  *
6931  * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
6932  * the condition model does not output false within the loop timeout duration,
6933  * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*}
6934  * will be returned. If the device has a feature level reported by
6935  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the
6936  * timeout duration hint will be ignored.
6937  *
6938  * <p>There must be at most one {@link ANeuralNetworksExecution} processing at
6939  * any given time for any given burst object. Any
6940  * {@link ANeuralNetworksExecution} launched before the previous has finished
6941  * will result in ANEURALNETWORKS_BAD_STATE.</p>
6942  *
6943  * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
6944  * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
6945  * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
6946  * asynchronous execution with dependencies.
6947  *
6948  * Available since API level 29.
6949  *
6950  * @param burst The burst object to execute on.
6951  * @param execution The execution to be scheduled and executed. The execution
6952  *                  must be created from the same {@link
6953  *                  ANeuralNetworksCompilation} as the burst object.
6954  *
6955  * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
6956  */
6957 int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution* execution,
6958                                           ANeuralNetworksBurst* burst) __INTRODUCED_IN(29);
6959 
6960 /**
6961  * Creates a shared memory object from an AHardwareBuffer handle.
6962  *
6963  * If the shared memory is backed by an AHardwareBuffer of AHARDWAREBUFFER_FORMAT_BLOB
6964  * format, it can be used the same way as shared memory created from a file handle. See
6965  * {@link ANeuralNetworksMemory} for a description on how to use this shared memory.
6966  *
6967  * If the shared memory is backed by an AHardwareBuffer of a format other than
6968  * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for Model inputs and outputs.
6969  * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or
6970  * {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory, both
6971  * offset and length must be set to zero and the entire memory region will be
6972  * associated with the specified input or output operand. There is no guarantee
6973  * that an arbitrary AHardwareBuffer_Format and AHardwareBuffer_UsageFlags combination
6974  * can be used by arbitrary devices. The execution will fail if the selected set of
6975  * devices cannot consume the buffer.
6976  *
6977  * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with shared memory
6978  * backed by an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB is
6979  * disallowed.
6980  *
6981  * The provided AHardwareBuffer must outlive the ANeuralNetworksMemory object.
6982  *
6983  * Available since API level 29.
6984  *
6985  * @param ahwb The AHardwareBuffer handle.
6986  * @param memory The memory object to be created.
6987  *               Set to NULL if unsuccessful.
6988  *
6989  * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
6990  *
6991  * @see AHardwareBuffer
6992  */
6993 int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer* ahwb,
6994                                                     ANeuralNetworksMemory** memory)
6995         __INTRODUCED_IN(29);
6996 
6997 /**
6998 
6999  * Specifies whether duration of the {@link ANeuralNetworksExecution} is to be
7000  * measured. Evaluation of the execution must not have been scheduled.
7001  *
7002  * By default, duration is not measured.
7003  *
7004  * The {@link ANeuralNetworksExecution} must have been created from an
7005  * {@link ANeuralNetworksCompilation} which in turn was created from
7006  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1.
7007  * If the device has a feature level reported by
7008  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 29, then the
7009  * duration will not be measured.
7010  *
7011  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
7012  *
7013  * Available since API level 29.
7014  *
7015  * @param execution The execution to be modified.
7016  * @param measure 'true' if duration is to be measured, 'false' if not.
7017  *
7018  * @return ANEURALNETWORKS_NO_ERROR if successful.
7019  */
7020 int ANeuralNetworksExecution_setMeasureTiming(ANeuralNetworksExecution* execution, bool measure)
7021         __INTRODUCED_IN(29);
7022 
7023 /**
7024  * Get the time spent in the specified {@link ANeuralNetworksExecution}, in nanoseconds.
7025  *
7026  * The execution must have completed.  On asynchronous execution initiated by
7027  * {@link ANeuralNetworksExecution_startCompute} or
7028  * {@link ANeuralNetworksExecution_startComputeWithDependencies},
7029  * {@link ANeuralNetworksEvent_wait} must be called prior to this function.
7030  *
7031  * @param execution The execution to be queried.
7032  * @param durationCode The measurement to be queried, specified by {@link DurationCode}.
7033  * @param duration The returned duration. If no measurement was requested by
7034  *                 {@link ANeuralNetworksExecution_setMeasureTiming}, if the
7035  *                 device is has a feature level reported by
7036  *                 {@link ANeuralNetworksDevice_getFeatureLevel} that is lower
7037  *                 than 29, or for some other reason the duration is not
7038  *                 available, UINT64_MAX will be returned. A particular device
7039  *                 need not support any given measurement.
7040  *
7041  * @return ANEURALNETWORKS_NO_ERROR if successful.
7042  *
7043  * Available since API level 29.
7044  */
7045 int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution* execution,
7046                                          int32_t durationCode, uint64_t* duration)
7047         __INTRODUCED_IN(29);
7048 
7049 #endif  // __ANDROID_API__ >= 29
7050 
7051 #if __ANDROID_API__ >= 27
7052 
7053 /**
7054  * Creates a shared memory object from a file descriptor.
7055  *
7056  * The shared memory is backed by a file descriptor via mmap.
7057  * See {@link ANeuralNetworksMemory} for a description on how to use
7058  * this shared memory.
7059  *
7060  * Available since API level 27.
7061  *
7062  * @param size The requested size in bytes.
7063  *             Must not be larger than the file size.
7064  * @param prot The desired memory protection for the mapping.
7065  *             It is either PROT_NONE or the bitwise OR of one or
7066  *             more of the following flags: PROT_READ, PROT_WRITE.
7067  * @param fd The requested file descriptor.
7068  *           The file descriptor has to be mmap-able. The file
7069  *           descriptor will be duplicated.
7070  * @param offset The offset to the beginning of the file of the area to map.
7071  *               The offset has to be aligned to a page size.
7072  * @param memory The memory object to be created.
7073  *               Set to NULL if unsuccessful.
7074  *
7075  * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
7076  */
7077 int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
7078                                        ANeuralNetworksMemory** memory) __INTRODUCED_IN(27);
7079 
7080 /**
7081  * Delete a memory object.
7082  *
7083  * Destroys the object used by the run time to keep track of the memory.
7084  * This will free the underlying actual memory if no other code has open
7085  * handles to this memory.
7086  *
7087  * Available since API level 27.
7088  *
7089  * @param memory The memory object to be freed. Passing NULL is acceptable and
7090  *               results in no operation.
7091  */
7092 void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __INTRODUCED_IN(27);
7093 
7094 /**
7095  * Create an empty {@link ANeuralNetworksModel}.
7096  *
7097  * <p>This only creates the object. Computation is performed once
7098  * {@link ANeuralNetworksExecution_burstCompute},
7099  * {@link ANeuralNetworksExecution_compute},
7100  * {@link ANeuralNetworksExecution_startCompute} or
7101  * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked.
7102  *
7103  * The model should be constructed with calls to
7104  * {@link ANeuralNetworksModel_addOperation} and
7105  * {@link ANeuralNetworksModel_addOperand}
7106  *
7107  * <p>{@link ANeuralNetworksModel_finish} should be called once the model
7108  * has been fully constructed.</p>
7109  *
7110  * <p>{@link ANeuralNetworksModel_free} should be called once the model
7111  * is no longer needed.</p>
7112  *
7113  * Available since API level 27.
7114  *
7115  * @param model The {@link ANeuralNetworksModel} to be created.
7116  *              Set to NULL if unsuccessful.
7117  *
7118  * @return ANEURALNETWORKS_NO_ERROR if successful.
7119  */
7120 int ANeuralNetworksModel_create(ANeuralNetworksModel** model) __INTRODUCED_IN(27);
7121 
7122 /**
7123  * Destroy a model.
7124  *
7125  * The model need not have been finished by a call to
7126  * {@link ANeuralNetworksModel_finish}.
7127  *
7128  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
7129  *
7130  * Available since API level 27.
7131  *
7132  * @param model The model to be destroyed. Passing NULL is acceptable and
7133  *              results in no operation.
7134  */
7135 void ANeuralNetworksModel_free(ANeuralNetworksModel* model) __INTRODUCED_IN(27);
7136 
7137 /**
7138  * Indicate that we have finished modifying a model. Required before
7139  * calling {@link ANeuralNetworksCompilation_create} and
7140  * {@link ANeuralNetworksCompilation_createForDevices}.
7141  *
7142  * An application must ensure that no other thread uses the model at the same
7143  * time.
7144  *
7145  * This function must only be called once for a given model.
7146  *
7147  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
7148  *
7149  * Available since API level 27.
7150  *
7151  * @param model The model to be finished.
7152  *
7153  * @return ANEURALNETWORKS_NO_ERROR if successful.
7154  */
7155 int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) __INTRODUCED_IN(27);
7156 
7157 /**
7158  * Add an operand to a model.
7159  *
7160  * The order in which the operands are added is important. The first one added
7161  * to a model will have the index value 0, the second 1, etc. These indexes are
7162  * used as operand identifiers in
7163  * {@link ANeuralNetworksModel_addOperation},
7164  * {@link ANeuralNetworksModel_identifyInputsAndOutputs},
7165  * {@link ANeuralNetworksModel_setOperandValue},
7166  * {@link ANeuralNetworksModel_setOperandValueFromMemory},
7167  * {@link ANeuralNetworksExecution_setInput},
7168  * {@link ANeuralNetworksExecution_setInputFromMemory},
7169  * {@link ANeuralNetworksExecution_setOutput},
7170  * {@link ANeuralNetworksExecution_setOutputFromMemory} and
7171  * {@link ANeuralNetworksExecution_setOperandValue}.
7172  *
7173  * <p>Every operand must be referenced in exactly one of the following
7174  * ways:<ul>
7175  *    <li>It is identified as a model input with
7176  *        {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</li>
7177  *    <li>It is identified as a constant with
7178  *        {@link ANeuralNetworksModel_setOperandValue} or
7179  *        {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
7180  *    <li>It is identified as an output of exactly one operation with
7181  *        {@link ANeuralNetworksModel_addOperation}.</li></p>
7182  * <p>An operand that is identified as a model input or as a constant
7183  * must not also be identified as a model output with
7184  * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</p>
7185  *
7186  * To build a model that can accommodate inputs of various sizes, as
7187  * you may want to do for a CNN, leave unspecified the dimensions that
7188  * will vary at run time.  If you do so, fully specify dimensions
7189  * when calling {@link ANeuralNetworksExecution_setInput} or
7190  * {@link ANeuralNetworksExecution_setInputFromMemory}.
7191  *
7192  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
7193  * called will return an error.
7194  *
7195  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
7196  *
7197  * Available since API level 27.
7198  *
7199  * @param model The model to be modified.
7200  * @param type The {@link ANeuralNetworksOperandType} that describes the shape
7201  *             of the operand.  Neither the {@link ANeuralNetworksOperandType}
7202  *             nor the dimensions it points to need to outlive the call to
7203  *             {@link ANeuralNetworksModel_addOperand}.
7204  *
7205  * @return ANEURALNETWORKS_NO_ERROR if successful.
7206  */
7207 int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model,
7208                                     const ANeuralNetworksOperandType* type) __INTRODUCED_IN(27);
7209 
7210 /**
7211  * Sets an operand to a constant value.
7212  *
7213  * Values of length smaller or equal to
7214  * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}
7215  * are immediately copied into the model.
7216  *
7217  * For values of length greater than
7218  * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, a pointer to
7219  * the buffer is stored within the model. The application must not change the
7220  * content of this region until all executions using this model have
7221  * completed. As the data may be copied during processing, modifying the data
7222  * after this call yields undefined results. The provided buffer must outlive
7223  * this model.
7224  *
7225  * For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory}
7226  * is likely to be more efficient.
7227  *
7228  * To indicate that an optional operand should be considered missing,
7229  * pass nullptr for buffer and 0 for length.
7230  *
7231  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
7232  * called will return an error.
7233  *
7234  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
7235  *
7236  * Available since API level 27.
7237  *
7238  * @param model The model to be modified.
7239  * @param index The index of the model operand we're setting.
7240  * @param buffer A pointer to the data to use.
7241  * @param length The size in bytes of the data value.
7242  *
7243  * @return ANEURALNETWORKS_NO_ERROR if successful.
7244  */
7245 int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index,
7246                                          const void* buffer, size_t length) __INTRODUCED_IN(27);
7247 
7248 #if __ANDROID_API__ >= 29
7249 
7250 /**
7251  * Sets an operand's per channel quantization parameters.
7252  *
7253  * Sets parameters required by a tensor of type
7254  * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}.
7255  * This function must be called for every tensor of type
7256  * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before
7257  * calling {@link ANeuralNetworksModel_finish}.
7258  *
7259  * Available since API level 29.
7260  *
7261  * @param model The model to be modified.
7262  * @param index The index of the model operand we're setting.
7263  * @param channelQuant The per channel quantization parameters for the operand.
7264  *                    No memory in this struct needs to outlive the call to
7265  *                    this function.
7266  *
7267  * @return ANEURALNETWORKS_NO_ERROR if successful.
7268  */
7269 int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
7270         ANeuralNetworksModel* model, int32_t index,
7271         const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) __INTRODUCED_IN(29);
7272 
7273 #endif  // __ANDROID_API__ >= 29
7274 
7275 /**
7276  * Sets an operand to a value stored in a memory object.
7277  *
7278  * The content of the memory is not copied. A reference to that memory is stored
7279  * inside the model. The application must not change the content of the memory
7280  * region until all executions using this model have completed.  As the data may
7281  * be copied during processing, modifying the data after this call yields
7282  * undefined results.
7283  *
7284  * <p>The provided memory must outlive this model.</p>
7285  *
7286  * To indicate that an optional operand should be considered missing,
7287  * use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer.
7288  *
7289  * It is disallowed to set an operand value with shared memory backed by an AHardwareBuffer
7290  * of a format other than AHARDWAREBUFFER_FORMAT_BLOB.
7291  *
7292  * It is disallowed to set an operand value with memory created from
7293  * {@link ANeuralNetworksMemory_createFromDesc}.
7294  *
7295  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
7296  * called will return an error.
7297  *
7298  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
7299  * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
7300  * AHardwareBuffer usage.
7301  *
7302  * Available since API level 27.
7303  *
7304  * @param model The model to be modified.
7305  * @param index The index of the model operand we're setting.
7306  * @param buffer A pointer to the data to use.
7307  * @param memory The memory containing the data.
7308  * @param offset This specifies the location of the data within the memory.
7309  *               The offset is in bytes from the start of memory.
7310  * @param length The size in bytes of the data value.
7311  *
7312  * @return ANEURALNETWORKS_NO_ERROR if successful.
7313  */
7314 int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, int32_t index,
7315                                                    const ANeuralNetworksMemory* memory,
7316                                                    size_t offset, size_t length)
7317         __INTRODUCED_IN(27);
7318 
7319 #if __ANDROID_API__ >= 30
7320 
7321 /**
7322  * Sets an operand to a value that is a reference to another NNAPI model.
7323  *
7324  * The referenced model must already have been finished by a call to
7325  * {@link ANeuralNetworksModel_finish}.
7326  *
7327  * The {@link ANeuralNetworksModel_relaxComputationFloat32toFloat16} setting of
7328  * referenced models is overridden by that setting of the main model of a
7329  * compilation.
7330  *
7331  * The referenced model must outlive the model referring to it.
7332  *
7333  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
7334  * been called will return an error.
7335  *
7336  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
7337  *
7338  * Available since API level 30.
7339  *
7340  * @param model The model to be modified.
7341  * @param index The index of the model operand we're setting.
7342  * @param value The model to be referenced.
7343  *
7344  * @return ANEURALNETWORKS_NO_ERROR if successful.
7345  */
7346 int ANeuralNetworksModel_setOperandValueFromModel(ANeuralNetworksModel* model, int32_t index,
7347                                                   const ANeuralNetworksModel* value)
7348         __INTRODUCED_IN(30);
7349 
7350 #endif  // __ANDROID_API__ >= 30
7351 
7352 /**
7353  * Add an operation to a model.
7354  *
7355  * @param model The model to be modified.
7356  * @param type The {@link ANeuralNetworksOperationType} of the operation.
7357  * @param inputCount The number of entries in the inputs array.
7358  * @param inputs An array of indexes identifying each operand.
7359  * @param outputCount The number of entries in the outputs array.
7360  * @param outputs An array of indexes identifying each operand.
7361  *
7362  * The operands specified by inputs and outputs must have been
7363  * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
7364  *
7365  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
7366  * called will return an error.
7367  *
7368  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
7369  *
7370  * Available since API level 27.
7371  *
7372  * @return ANEURALNETWORKS_NO_ERROR if successful.
7373  */
7374 int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model,
7375                                       ANeuralNetworksOperationType type, uint32_t inputCount,
7376                                       const uint32_t* inputs, uint32_t outputCount,
7377                                       const uint32_t* outputs) __INTRODUCED_IN(27);
7378 
7379 /**
7380  * Specifies which operands will be the model's inputs and
7381  * outputs. Every model must have at least one input and one output.
7382  *
7383  * An operand cannot be used for both input and output. Doing so will
7384  * return an error.
7385  *
7386  * @param model The model to be modified.
7387  * @param inputCount The number of entries in the inputs array.
7388  * @param inputs An array of indexes identifying the input operands.
7389  * @param outputCount The number of entries in the outputs array.
7390  * @param outputs An array of indexes identifying the output operands.
7391  *
7392  * The operands specified by inputs and outputs must have been
7393  * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
7394  *
7395  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
7396  * called will return an error.
7397  *
7398  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
7399  *
7400  * Available since API level 27.
7401  *
7402  */
7403 int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount,
7404                                                   const uint32_t* inputs, uint32_t outputCount,
7405                                                   const uint32_t* outputs) __INTRODUCED_IN(27);
7406 
7407 #if __ANDROID_API__ >= 28
7408 
7409 /**
7410  * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be
7411  * calculated with range and/or precision as low as that of the IEEE 754 16-bit
7412  * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}
7413  * must be calculated using at least the range and precision of the IEEE 754
7414  * 32-bit floating-point format.
7415  *
7416  * The relaxComputationFloat32toFloat16 setting of the main model of
7417  * a compilation overrides the values of the referenced models.
7418  *
7419  * @param model The model to be modified.
7420  * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be
7421  *              calculated with range and/or precision as low as that of the
7422  *              IEEE 754 16-bit floating point format. 'false' indicates
7423  *              {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using
7424  *              at least the range and precision of the IEEE 754 32-bit floating
7425  *              point format.
7426  *
7427  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
7428  * called will return an error.
7429  *
7430  * Available since API level 28.
7431  *
7432  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
7433  */
7434 int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow)
7435         __INTRODUCED_IN(28);
7436 
7437 #endif  // __ANDROID_API__ >= 28
7438 
7439 /**
7440  * Create a {@link ANeuralNetworksCompilation} to compile the given model.
7441  *
7442  * The model passed to this function is termed the "main model" of the
7443  * compilation, to distinguish it from other models referred to by an Operand
7444  * of type {@link ANEURALNETWORKS_MODEL} within this compilation.
7445  *
7446  * <p>This function only creates the object. Compilation is only performed once
7447  * {@link ANeuralNetworksCompilation_finish} is invoked.</p>
7448  *
7449  * <p>{@link ANeuralNetworksCompilation_finish} should be called once
7450  * all desired properties have been set on the compilation.</p>
7451  *
7452  * <p>{@link ANeuralNetworksModel_free} should be called once the compilation
7453  * is no longer needed.</p>
7454  *
7455  * <p>The provided model must outlive the compilation.</p>
7456  *
7457  * The model must already have been finished by a call to
7458  * {@link ANeuralNetworksModel_finish}.
7459  *
7460  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
7461  *
7462  * Available since API level 27.
7463  *
7464  * @param model The {@link ANeuralNetworksModel} to be compiled.
7465  * @param compilation The newly created object or NULL if unsuccessful.
7466  *
7467  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
7468  *         if the model is invalid.
7469  */
7470 int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model,
7471                                       ANeuralNetworksCompilation** compilation) __INTRODUCED_IN(27);
7472 
7473 /**
7474  * Destroy a compilation.
7475  *
7476  * The compilation need not have been finished by a call to
7477  * {@link ANeuralNetworksCompilation_finish}.
7478  *
7479  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
7480  *
7481  * Available since API level 27.
7482  *
7483  * @param compilation The compilation to be destroyed. Passing NULL is acceptable and
7484  *                    results in no operation.
7485  */
7486 void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27);
7487 
7488 /**
7489  * Sets the execution preference.
7490  *
7491  * <p>Provides guidance to the runtime when trade-offs are possible. By default the runtime
7492  * uses PREFER_SINGLE_FAST_ANSWER</p>
7493  *
7494  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
7495  *
7496  * Available since API level 27.
7497  *
7498  * @param compilation The compilation to be modified.
7499  * @param preference Either {@link PREFER_LOW_POWER},
7500  *                  {@link PREFER_SINGLE_FAST_ANSWER}, or
7501  *                  {@link PREFER_SUSTAINED_SPEED}.
7502  *
7503  * @return ANEURALNETWORKS_NO_ERROR if successful.
7504  */
7505 int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation* compilation,
7506                                              int32_t preference) __INTRODUCED_IN(27);
7507 
7508 /**
7509  * Indicate that we have finished modifying a compilation. Required before
7510  * calling {@link ANeuralNetworksBurst_create} or
7511  * {@link ANeuralNetworksExecution_create}.
7512  *
7513  * An application must ensure that no other thread uses the compilation at the
7514  * same time.
7515  *
7516  * This function must only be called once for a given compilation.
7517  *
7518  * If {@link ANeuralNetworksCompilation_setTimeout} was called on this
7519  * compilation, and the compilation is not able to be finished before the
7520  * timeout duration is exceeded, then compilation may be aborted, in which case
7521  * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned.
7522  *
7523  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
7524  *
7525  * Available since API level 27.
7526  *
7527  * @param compilation The compilation to be finished.
7528  *
7529  * @return ANEURALNETWORKS_NO_ERROR if successful.
7530  */
7531 int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27);
7532 
7533 #if __ANDROID_API__ >= 30
7534 
7535 /**
7536  * Set the execution priority.
7537  *
7538  * Execution priorities are relative to other executions created by the same
7539  * application (specifically same uid) for the same device. Specifically,
7540  * priorities of executions from one application will not affect executions from
7541  * another application. Similarly, priorities of executions on one device will
7542  * not affect executions on another device.
7543  *
7544  * Higher priority executions may use more compute resources than lower priority
7545  * executions, and may preempt or starve lower priority executions.
7546  *
7547  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
7548  *
7549  * Available since API level 30.
7550  *
7551  * @param compilation The compilation to be modified.
7552  * @param priority The relative priority of the execution compared to other
7553  *     executions created by the application. Must be one of
7554  *     ANEURALNETWORKS_PRIORITY_*.
7555  *
7556  * @return ANEURALNETWORKS_NO_ERROR if successful.
7557  */
7558 int ANeuralNetworksCompilation_setPriority(ANeuralNetworksCompilation* compilation, int priority)
7559         __INTRODUCED_IN(30);
7560 
7561 /**
7562  * Set the maximum expected duration for compiling the model.
7563  *
7564  * If the device is not able to complete the compilation within the specified
7565  * duration, the compilation may be aborted. The timeout duration begins at the
7566  * call to {@link ANeuralNetworksCompilation_finish}.
7567  *
7568  * This timeout duration acts as a hint to drivers, and can be used to both free
7569  * up compute resources within the driver and return control back to the
7570  * application quicker than is possible without the hint. It enables drivers
7571  * that are able to estimate how long a compilation will take to abort the
7572  * compilation before it has even started if the driver believes the compilation
7573  * cannot be completed within the timeout duration. Similarly, it enables
7574  * drivers to abort an ongoing compilation if it is taking too long. However,
7575  * this call does not guarantee that the compilation will complete or abort
7576  * within the timeout duration.
7577  *
7578  * By default (i.e., unless ANeuralNetworksCompilation_setTimeout is called),
7579  * the timeout duration for compiling the model is considered infinite.
7580  *
7581  * The {@link ANeuralNetworksCompilation} must have been created with
7582  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
7583  * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the
7584  * device has a feature level reported by
7585  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the
7586  * timeout duration hint will be ignored.
7587  *
7588  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
7589  *
7590  * @param compilation The compilation to be modified.
7591  * @param duration The maximum amount of time in nanoseconds that is expected to
7592  *     be spent finishing a compilation. If this duration is exceeded, the
7593  *     compilation may be aborted. If set to 0, the timeout duration is
7594  *     considered infinite.
7595  *
7596  * @return ANEURALNETWORKS_NO_ERROR if successful.
7597  *
7598  * Available since API level 30.
7599  */
7600 int ANeuralNetworksCompilation_setTimeout(ANeuralNetworksCompilation* compilation,
7601                                           uint64_t duration) __INTRODUCED_IN(30);
7602 
7603 #endif  // __ANDROID_API__ >= 30
7604 
7605 /**
7606  * Create a {@link ANeuralNetworksExecution} to apply the given compilation.
7607  * This only creates the object. Computation is only performed once
7608  * {@link ANeuralNetworksExecution_burstCompute},
7609  * {@link ANeuralNetworksExecution_compute},
7610  * {@link ANeuralNetworksExecution_startCompute} or
7611  * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked.
7612  *
7613  * <p>The provided compilation must outlive the execution.</p>
7614  *
7615  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
7616  *
7617  * Available since API level 27.
7618  *
7619  * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
7620  * @param execution The newly created object or NULL if unsuccessful.
7621  *
7622  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
7623  *         if the compilation is invalid.
7624  */
7625 int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation,
7626                                     ANeuralNetworksExecution** execution) __INTRODUCED_IN(27);
7627 
7628 /**
7629  * Destroy an execution.
7630  *
7631  * <p>The execution need not have been scheduled by a call to
7632  * {@link ANeuralNetworksExecution_burstCompute},
7633  * {@link ANeuralNetworksExecution_compute},
7634  * {@link ANeuralNetworksExecution_startCompute} or
7635  * {@link ANeuralNetworksExecution_startComputeWithDependencies}; but if it has been scheduled,
7636  * then the application must not call {@link ANeuralNetworksExecution_free}
7637  * until the execution has completed (i.e.,
7638  * {@link ANeuralNetworksExecution_burstCompute},
7639  * {@link ANeuralNetworksExecution_compute}, or
7640  * {@link ANeuralNetworksEvent_wait} has returned).
7641  *
7642  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
7643  *
7644  * Available since API level 27.
7645  *
7646  * @param execution The execution to be destroyed. Passing NULL is acceptable and
7647  *                  results in no operation.
7648  */
7649 void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) __INTRODUCED_IN(27);
7650 
7651 /**
7652  * Associate a user buffer with an input of the model of the
7653  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
7654  * been scheduled. Once evaluation of the execution has been scheduled, the
7655  * application must not change the content of the buffer until the execution has
7656  * completed. Evaluation of the execution will not change the content of the
7657  * buffer.
7658  *
7659  * <p>The provided buffer must outlive the execution.</p>
7660  *
7661  * If the input is optional, you can indicate that it is omitted by
7662  * passing nullptr for buffer and 0 for length.
7663  *
7664  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
7665  *
7666  * Available since API level 27.
7667  *
7668  * @param execution The execution to be modified.
7669  * @param index The index of the input argument we are setting. It is
7670  *              an index into the lists passed to
7671  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
7672  *              the index associated with
7673  *              {@link ANeuralNetworksModel_addOperand}.
7674  * @param type The {@link ANeuralNetworksOperandType} of the
7675  *             operand. Unless the input is omitted, this should be
7676  *             used to specify the dimensions that were left
7677  *             unspecified when the operand was added to the
7678  *             model. All other properties of the type must be the
7679  *             same as specified in the model. If the type is the same
7680  *             as specified when the model was built, NULL can be
7681  *             passed. Neither the {@link ANeuralNetworksOperandType}
7682  *             nor the dimensions it points to need to outlive the call
7683  *             to {@link ANeuralNetworksExecution_setInput}.
7684  * @param buffer The buffer containing the data.
7685  * @param length The length in bytes of the buffer.
7686  *
7687  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
7688  *         name is not recognized or the buffer is too small for the input.
7689  */
7690 int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index,
7691                                       const ANeuralNetworksOperandType* type, const void* buffer,
7692                                       size_t length) __INTRODUCED_IN(27);
7693 
7694 /**
7695  * Associate a region of a memory object with an input of the model of the
7696  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
7697  * been scheduled. Once evaluation of the execution has been scheduled, the
7698  * application must not change the content of the region until the execution has
7699  * completed. Evaluation of the execution will not change the content of the
7700  * region.
7701  *
7702  * <p>The provided memory must outlive the execution.</p>
7703  *
7704  * If the input is optional, you can indicate that it is omitted by
7705  * using {@link ANeuralNetworksExecution_setInput} instead, passing nullptr for
7706  * buffer and 0 for length.
7707  *
7708  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
7709  * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
7710  * AHardwareBuffer usage.
7711  * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
7712  * created from memory descriptors.
7713  *
7714  * Available since API level 27.
7715  *
7716  * @param execution The execution to be modified.
7717  * @param index The index of the input argument we are setting. It is
7718  *              an index into the lists passed to
7719  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
7720  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
7721  * @param type The {@link ANeuralNetworksOperandType} of the
7722  *             operand. This should be used to specify the dimensions
7723  *             that were left unspecified when the operand was added
7724  *             to the model. All other properties of the type must be
7725  *             the same as specified in the model. If the type is the
7726  *             same as specified when the model was built, NULL can be
7727  *             passed. Neither the {@link ANeuralNetworksOperandType}
7728  *             nor the dimensions it points to need to outlive the call
7729  *             to {@link ANeuralNetworksExecution_setInputFromMemory}.
7730  * @param memory The memory containing the data.
7731  * @param offset This specifies the location of the data within the memory.
7732  *               The offset is in bytes from the start of memory.
7733  * @param length The size in bytes of the data value.
7734  *
7735  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
7736  *         name is not recognized or the buffer is too small for the input.
7737  */
7738 int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execution, int32_t index,
7739                                                 const ANeuralNetworksOperandType* type,
7740                                                 const ANeuralNetworksMemory* memory, size_t offset,
7741                                                 size_t length) __INTRODUCED_IN(27);
7742 
7743 /**
7744  * Associate a user buffer with an output of the model of the
7745  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
7746  * been scheduled. Once evaluation of the execution has been scheduled, the
7747  * application must not change the content of the buffer until the execution has
7748  * completed.
7749  *
7750  * If the output is optional, you can indicate that it is omitted by
7751  * passing nullptr for buffer and 0 for length.
7752  *
7753  * <p>The provided buffer must outlive the execution.</p>
7754  *
7755  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
7756  *
7757  * Available since API level 27.
7758  *
7759  * @param execution The execution to be modified.
7760  * @param index The index of the output argument we are setting. It is
7761  *              an index into the lists passed to
7762  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
7763  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
7764  * @param type The {@link ANeuralNetworksOperandType} of the
7765  *             operand. Unless the output is omitted, this should be
7766  *             used to specify the dimensions that were left
7767  *             unspecified when the operand was added to the
7768  *             model. All other properties of the type must be the
7769  *             same as specified in the model. If the type is the same
7770  *             as specified when the model was built, NULL can be
7771  *             passed. Neither the {@link ANeuralNetworksOperandType}
7772  *             nor the dimensions it points to need to outlive the call
7773  *             to {@link ANeuralNetworksExecution_setOutput}.
7774  *             Since API level 29, the output operand can have unspecified
7775  *             dimensions or rank to be deduced dynamically during the execution.
7776  *             However, the user must provide a large enough buffer. The user
7777  *             can retrieve the output dimensional information after the execution
7778  *             by {@link ANeuralNetworksExecution_getOutputOperandRank} and
7779  *             {@link ANeuralNetworksExecution_getOutputOperandDimensions}.
7780  * @param buffer The buffer where the data is to be written.
7781  * @param length The length in bytes of the buffer.
7782  *
7783  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
7784  *         name is not recognized or the buffer is too small for the output.
7785  */
7786 int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index,
7787                                        const ANeuralNetworksOperandType* type, void* buffer,
7788                                        size_t length) __INTRODUCED_IN(27);
7789 
7790 /**
7791  * Associate a region of a memory object with an output of the model of the
7792  * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
7793  * been scheduled. Once evaluation of the execution has been scheduled, the
7794  * application must not change the content of the region until the execution has
7795  * completed.
7796  *
7797  * If the output is optional, you can indicate that it is omitted by
7798  * using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for
7799  * buffer and 0 for length.
7800  *
7801  * <p>The provided memory must outlive the execution.</p>
7802  *
7803  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
7804  * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
7805  * AHardwareBuffer usage.
7806  * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
7807  * created from memory descriptors.
7808  *
7809  * Available since API level 27.
7810  *
7811  * @param execution The execution to be modified.
7812  * @param index The index of the output argument we are setting. It is
7813  *              an index into the lists passed to
7814  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
7815  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
7816  * @param type The {@link ANeuralNetworksOperandType} of the operand. This should be
7817  *             used to specify the dimensions that were left
7818  *             unspecified when the operand was added to the
7819  *             model. All other properties of the type must be the
7820  *             same as specified in the model. If the type is the same
7821  *             as specified when the model was built, NULL can be
7822  *             passed. Neither the {@link ANeuralNetworksOperandType}
7823  *             nor the dimensions it points to need to outlive the call
7824  *             to {@link ANeuralNetworksExecution_setOutputFromMemory}.
7825  *             Since API level 29, the output operand can have unspecified
7826  *             dimensions or rank to be deduced dynamically during the execution.
7827  *             However, the user must provide a large enough memory. The user
7828  *             can retrieve the output dimensional information after the execution
7829  *             by {@link ANeuralNetworksExecution_getOutputOperandRank} and
7830  *             {@link ANeuralNetworksExecution_getOutputOperandDimensions}.
7831  * @param memory The memory where the data is to be stored.
7832  * @param offset This specifies the location of the data within the memory.
7833  *               The offset is in bytes from the start of memory.
7834  * @param length The length in bytes of the data value.
7835  *
7836  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
7837  *         name is not recognized or the buffer is too small for the output.
7838  */
7839 int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execution, int32_t index,
7840                                                  const ANeuralNetworksOperandType* type,
7841                                                  const ANeuralNetworksMemory* memory, size_t offset,
7842                                                  size_t length) __INTRODUCED_IN(27);
7843 
7844 /**
7845  * Schedule asynchronous evaluation of the execution.
7846  *
7847  * <p>Schedules asynchronous evaluation of the execution. Once the execution
7848  * has completed and the outputs are ready to be consumed, the returned event
7849  * will be signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that
7850  * event.
7851  * </p>
7852  *
7853  * ANeuralNetworksEvent_wait must be called to recuperate the resources used
7854  * by the execution.
7855  *
7856  * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution,
7857  * and the execution is not able to complete before the timeout duration is
7858  * exceeded, then execution may be aborted, in which case
7859  * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned through
7860  * {@link ANeuralNetworksExecution_startCompute} or
7861  * {@link ANeuralNetworksEvent_wait} on the event object. If the device has a
7862  * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that
7863  * is lower than 30, then the timeout duration hint will be ignored.
7864  *
7865  * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
7866  * the condition model does not output false within the loop timeout duration,
7867  * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*}
7868  * will be returned through {@link ANeuralNetworksEvent_wait} on the event
7869  * object.
7870  *
7871  * If the device can detect before the execution has started that the execution
7872  * will not complete within the timeout duration, the device may choose to skip
7873  * the execution and instead return {@link ANEURALNETWORKS_MISSED_DEADLINE_*}.
7874  *
7875  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
7876  *
7877  * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
7878  * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
7879  * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
7880  * asynchronous execution with dependencies.
7881  *
7882  * Available since API level 27.
7883  *
7884  * @param execution The execution to be scheduled and executed.
7885  * @param event The event that will be signaled on completion. event is set to
7886  *              NULL if there's an error.
7887  *
7888  * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled.
7889  */
7890 int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution,
7891                                           ANeuralNetworksEvent** event) __INTRODUCED_IN(27);
7892 
7893 #if __ANDROID_API__ >= 30
7894 
7895 /**
7896  * Set the maximum expected duration of the specified execution.
7897  *
7898  * If the device is not able to complete the execution within the specified
7899  * duration, the execution may be aborted. The timeout duration begins at a
7900  * call to one of:
7901  * - {@link ANeuralNetworksExecution_burstCompute}
7902  * - {@link ANeuralNetworksExecution_compute}
7903  * - {@link ANeuralNetworksExecution_startCompute}
7904  * - {@link ANeuralNetworksExecution_startComputeWithDependencies}
7905  *
7906  * This timeout duration acts as a hint to drivers, and can be used to both free
7907  * up compute resources within the driver and return control back to the
7908  * application quicker than is possible without the hint. It enables drivers
7909  * that are able to estimate how long an execution will take to abort the
7910  * execution before it has even started if the driver believes the execution
7911  * cannot be completed within the timeout duration. Similarly, it enables
7912  * drivers to abort an ongoing execution if it is taking too long. However, this
7913  * call does not guarantee that the execution will complete or abort within the
7914  * timeout duration.
7915  *
7916  * By default (i.e., unless ANeuralNetworksExecution_setTimeout is called),
7917  * the timeout duration for execution is considered infinite.
7918  *
7919  * The {@link ANeuralNetworksExecution} must have been created from an
7920  * {@link ANeuralNetworksCompilation} which in turn was created from
7921  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
7922  * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the
7923  * device has a feature level reported by
7924  * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the
7925  * timeout duration hint will be ignored.
7926  *
7927  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
7928  *
7929  * @param execution The execution to be modified.
7930  * @param duration The maximum amount of time in nanoseconds that is expected to
7931  *     be spent executing a model. If this duration is exceeded, the execution
7932  *     may be aborted. If set to 0, the timeout duration is considered infinite.
7933  *
7934  * @return ANEURALNETWORKS_NO_ERROR if successful.
7935  *
7936  * Available since API level 30.
7937  */
7938 int ANeuralNetworksExecution_setTimeout(ANeuralNetworksExecution* execution, uint64_t duration)
7939         __INTRODUCED_IN(30);
7940 
7941 /**
7942  * Set the maximum duration of WHILE loops in the specified execution.
7943  *
7944  * This is a fuzzy per-loop timeout intended to prevent infinite loops.
7945  *
7946  * If a WHILE loop condition model does not output false within the specified
7947  * duration, the execution will be aborted.
7948  *
7949  * See {@link ANeuralNetworks_getDefaultLoopTimeout} and
7950  * {@link ANeuralNetworks_getMaximumLoopTimeout} for the default
7951  * and maximum timeout values.
7952  *
7953  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
7954  *
7955  * @param execution The execution to be modified.
7956  * @param duration The maximum amount of time in nanoseconds that can be spent
7957  *     executing a WHILE loop. If the specified duration value exceeds the value
7958  *     produced by {@link ANeuralNetworks_getMaximumLoopTimeout}, it will be
7959  *     overridden by that value.
7960  *
7961  * @return ANEURALNETWORKS_NO_ERROR if successful.
7962  *         ANEURALNETWORKS_BAD_STATE if execution has started.
7963  *         ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL.
7964  *
7965  * Available since API level 30.
7966  */
7967 int ANeuralNetworksExecution_setLoopTimeout(ANeuralNetworksExecution* execution, uint64_t duration)
7968         __INTRODUCED_IN(30);
7969 
7970 /**
7971  * Get the default timeout value for WHILE loops.
7972  *
7973  * @return The default timeout value in nanoseconds.
7974  *
7975  * Available since API level 30.
7976  */
7977 uint64_t ANeuralNetworks_getDefaultLoopTimeout() __INTRODUCED_IN(30);
7978 
7979 /**
7980  * Get the maximum timeout value for WHILE loops.
7981  *
7982  * @return The maximum timeout value in nanoseconds.
7983  *
7984  * Available since API level 30.
7985  */
7986 uint64_t ANeuralNetworks_getMaximumLoopTimeout() __INTRODUCED_IN(30);
7987 
7988 #endif  // __ANDROID_API__ >= 30
7989 
7990 /**
7991  * Waits until the execution completes.
7992  *
7993  * More than one thread can wait on an event. When the execution completes,
7994  * all threads will be released.
7995  *
7996  * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution
7997  * corresponding to this event, and the execution is not able to complete
7998  * before the duration is exceeded, the execution may be aborted, in which case
7999  * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned here.
8000  *
8001  * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
8002  * the condition model does not output false within the loop timeout duration,
8003  * the execution will be aborted, and {@link ANEURALNETWORKS_MISSED_DEADLINE_*}
8004  * will be returned here.
8005  *
8006  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
8007  *
8008  * Available since API level 27.
8009  *
8010  * @param event The event that will be signaled on completion.
8011  * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
8012  *         ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot
8013  *         be properly mapped.
8014  */
8015 int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) __INTRODUCED_IN(27);
8016 
8017 /**
8018  * Destroys the event.
8019  *
8020  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
8021  *
8022  * Available since API level 27.
8023  *
8024  * @param event The event object to be destroyed. Passing NULL is acceptable and
8025  *              results in no operation.
8026  */
8027 void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __INTRODUCED_IN(27);
8028 
8029 #endif  // __ANDROID_API__ >= 27
8030 
8031 #if __ANDROID_API__ >= 30
8032 /**
8033  * Create a {@link ANeuralNetworksEvent} from a sync_fence file descriptor.
8034  *
8035  * The newly created ANeuralNetworksEvent does not take ownership of the provided sync_fence_fd,
8036  * it will instead dup the provided sync_fence_fd and own the duplicate.
8037  *
8038  * @param sync_fence_fd The sync_fence file descriptor.
8039  * @param event The newly created object or NULL if unsuccessful.
8040  *
8041  * @return ANEURALNETWORKS_NO_ERROR if successful.
8042  *
8043  * Available since API level 30.
8044  */
8045 int ANeuralNetworksEvent_createFromSyncFenceFd(int sync_fence_fd, ANeuralNetworksEvent** event)
8046         __INTRODUCED_IN(30);
8047 
8048 /**
8049  * Get sync_fence file descriptor from the event.
8050  *
8051  * If the ANeuralNetworksEvent is not backed by a sync fence, the sync_fence_fd
8052  * will be set to -1, and ANEURALNETWORKS_BAD_DATA will be returned.
8053  *
8054  * See {@link ANeuralNetworksEvent_createFromSyncFenceFd} and
8055  * {@link ANeuralNetworksExecution_startComputeWithDependencies} to see how to create
8056  * an event backed by a sync fence.
8057  *
8058  * The user takes ownership of the returned fd, and must close the returned file descriptor when
8059  * it is no longer needed.
8060  *
8061  * @param event An event that is backed by a sync fence.
8062  * @param sync_fence_fd The sync_fence file descriptor. The file descriptor will
8063  *                      be set to -1 if there is an error.
8064  *
8065  * @return ANEURALNETWORKS_NO_ERROR if successful.
8066  *
8067  * Available since API level 30.
8068  */
8069 int ANeuralNetworksEvent_getSyncFenceFd(const ANeuralNetworksEvent* event, int* sync_fence_fd)
8070         __INTRODUCED_IN(30);
8071 
8072 /**
8073  * Schedule asynchronous evaluation of the execution with dependencies.
8074  *
8075  * The execution will wait for all the depending events to be signaled before
8076  * starting the evaluation. Once the execution has completed and the outputs
8077  * are ready to be consumed, the returned event will be signaled. Depending on which
8078  * devices are handling the execution, the event could be backed by a sync fence.
8079  * Use {@link ANeuralNetworksEvent_wait} to wait for that event.
8080  *
8081  * ANeuralNetworksEvent_wait must be called to recurperate the resources used
8082  * by the execution.
8083  *
8084  * If parts of the execution are scheduled on devices that do not support fenced execution,
8085  * the function call may wait for such parts to finish before returning.
8086  *
8087  * The function will return an error if any of the events in dependencies is already in a bad
8088  * state. After the execution is scheduled, if any of the events in dependencies does not complete
8089  * normally, the execution will fail, and {@link ANeuralNetworksEvent_wait} on the returned
8090  * event will return an error.
8091  *
8092  * The function will return an error if any of the execution outputs has a tensor operand type
8093  * that is not fully specified.
8094  *
8095  * The function can be passed a timeout duration in nanoseconds. This timeout
8096  * duration acts as a hint to drivers in the same way that the timeout durations
8097  * in {@link ANeuralNetworksCompilation_setTimeout} and {@link
8098  * ANeuralNetworksExecution_setTimeout} act as hints to drivers. The duration
8099  * begins when all waitFor sync fences have been signaled, and can be used
8100  * together with {@link ANeuralNetworksExecution_setTimeout} which specifies the
8101  * maximum timeout duration beginning at the call to
8102  * {@link ANeuralNetworksExecution_startComputeWithDependencies}.
8103  * If the duration is non-zero, the {@link ANeuralNetworksExecution} must have been created
8104  * from an {@link ANeuralNetworksCompilation} which in turn was created from
8105  * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
8106  * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If either
8107  * the timeout duration from {@link ANeuralNetworksExecution_setTimeout} or the
8108  * timeout duration passed to this call is exceeded, the execution may be
8109  * aborted, in which case {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be
8110  * returned through {@link ANeuralNetworksExecution_startComputeWithDependencies}
8111  * or {@link ANeuralNetworksEvent_wait} on the event object. If the device has a
8112  * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that
8113  * is lower than 30, then the timeout duration hints will be ignored.
8114  *
8115  * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
8116  * the condition model does not output false within the loop timeout duration,
8117  * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*}
8118  * will be returned through {@link ANeuralNetworksEvent_wait} on the event
8119  * object.
8120  *
8121  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
8122  *
8123  * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
8124  * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
8125  * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
8126  *
8127  * @param execution The execution to be scheduled and executed.
8128  * @param dependencies A set of depending events. The actual evaluation will not start
8129  *                     until all the events are signaled.
8130  * @param num_dependencies The number of events in the dependencies set.
8131  * @param duration The maximum amount of time in nanoseconds that is expected to
8132  *                 be spent executing the model after all dependencies are
8133  *                 signaled. If set to 0, the timeout duration is considered
8134  *                 infinite.
8135  * @param event The event that will be signaled on completion. event is set to
8136  *              NULL if there's an error.
8137  *
8138  * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled.
8139  *
8140  * Available since API level 30.
8141  */
8142 int ANeuralNetworksExecution_startComputeWithDependencies(
8143         ANeuralNetworksExecution* execution, const ANeuralNetworksEvent* const* dependencies,
8144         uint32_t num_dependencies, uint64_t duration, ANeuralNetworksEvent** event)
8145         __INTRODUCED_IN(30);
8146 
8147 #endif  // __ANDROID_API__ >= 30
8148 
8149 __END_DECLS
8150 
8151 #endif  // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H
8152 
8153 /** @} */
8154