1 /*
2                                     __
3                                    / _|
4   __ _ _   _ _ __ ___  _ __ __ _  | |_ ___  ___ ___
5  / _` | | | | '__/ _ \| '__/ _` | |  _/ _ \/ __/ __|
6 | (_| | |_| | | | (_) | | | (_| | | || (_) \__ \__ \
7  \__,_|\__,_|_|  \___/|_|  \__,_| |_| \___/|___/___/
8 
9 Copyright (C) 2018 The Android Open Source Project.
10 Copyright (C) 2018-2019 Aurora Free Open Source Software.
11 
12 This file is part of the Aurora Free Open Source Software. This
13 organization promote free and open source software that you can
14 redistribute and/or modify under the terms of the GNU Lesser General
15 Public License Version 3 as published by the Free Software Foundation or
16 (at your option) any later version approved by the Aurora Free Open Source
17 Software Organization. The license is available in the package root path
18 as 'LICENSE' file. Please review the following information to ensure the
19 GNU Lesser General Public License version 3 requirements will be met:
20 https://www.gnu.org/licenses/lgpl.html .
21 
22 Alternatively, this file may be used under the terms of the GNU General
23 Public License version 3 or later as published by the Free Software
24 Foundation. Please review the following information to ensure the GNU
25 General Public License requirements will be met:
26 https://www.gnu.org/licenses/gpl-3.0.html.
27 
28 NOTE: All products, services or anything associated to trademarks and
29 service marks used or referenced on this file are the property of their
30 respective companies/owners or its subsidiaries. Other names and brands
31 may be claimed as the property of others.
32 
33 For more info about intellectual property visit: aurorafoss.org or
34 directly send an email to: contact (at) aurorafoss.org .
35 
36 This file has bindings for an existing code, part of The Android Open Source
37 Project implementation. Check it out at android.googlesource.com .
38 */
39 
40 module aurorafw.android.platform.neuralnetworks;
41 
42 /**
43  * @addtogroup NeuralNetworks
44  * @{
45  */
46 
47 /**
48  * @file aurorafw/android/platform/neuralnetworks.d
49  */
50 
51 version (Android):
52 extern (C):
53 @system:
54 nothrow:
55 @nogc:
56 
57 /******************************************************************
58  *
59  * IMPORTANT NOTICE:
60  *
61  *   This file is part of Android's set of stable system headers
62  *   exposed by the Android NDK (Native Development Kit).
63  *
64  *   Third-party source AND binary code relies on the definitions
65  *   here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES.
66  *
67  *   - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES)
68  *   - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS
69  *   - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY
70  *   - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES
71  */
72 
73 /**
74  * Operand types.
75  *
76  * The type of operands that can be added to a model.
77  *
78  * Although we define many types, most operators accept just a few
79  * types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32},
80  * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
81  * and {@link ANEURALNETWORKS_INT32}.
82  *
83  * Available since API level 27.
84  */
85 enum OperandCode
86 {
87     /** A 32 bit floating point scalar value. */
88     ANEURALNETWORKS_FLOAT32 = 0,
89     /** A signed 32 bit integer scalar value. */
90     ANEURALNETWORKS_INT32 = 1,
91     /** An unsigned 32 bit integer scalar value. */
92     ANEURALNETWORKS_UINT32 = 2,
93 
94     /** A tensor of 32 bit floating point values. */
95     ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
96     /** A tensor of 32 bit integer values. */
97     ANEURALNETWORKS_TENSOR_INT32 = 4,
98     /**
99      * A tensor of 8 bit integers that represent real numbers.
100      *
101      * Attached to this tensor are two numbers that can be used to convert the
102      * 8 bit integer to the real value and vice versa. These two numbers are:
103      * - scale: a 32 bit floating point value greater than zero.
104      * - zeroPoint: a 32 bit integer, in range [0, 255].
105      *
106      * The formula is:
107      * real_value = (integer_value - zeroPoint) * scale.
108      */
109     ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5
110 }
111 
112 /**
113  * Operation types.
114  *
115  * The type of operations that can be added to a model.
116  *
117  * Available since API level 27.
118  */
119 enum OperationCode
120 {
121     /**
122      * Adds two tensors, element-wise.
123      *
124      * Takes two input tensors of identical {@link OperandCode} and compatible
125      * dimensions. The output is the sum of both input tensors, optionally
126      * modified by an activation function.
127      *
128      * Two dimensions are compatible when:
129      *     1. they are equal, or
130      *     2. one of them is 1
131      *
132      * The size of the output is the maximum size along each dimension of the
133      * input operands. It starts with the trailing dimensions, and works its
134      * way forward.
135      *
136      * Example:
137      *
138      *     input1.dimension = {4, 1, 2}
139      *     input2.dimension = {5, 4, 3, 1}
140      *     output.dimension = {5, 4, 3, 2}
141      *
142      * Supported tensor {@link OperandCode}:
143      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
144      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
145      *
146      * Supported tensor rank: up to 4
147      *
148      * Inputs:
149      * * 0: A tensor.
150      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
151      *      as input0.
152      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
153      *      {@link FuseCode} values. Specifies the activation to
154      *      invoke on the result.
155      *
156      * Outputs:
157      * * 0: The sum, a tensor of the same {@link OperandCode} as input0.
158      *
159      * Available since API level 27.
160      */
161     ANEURALNETWORKS_ADD = 0,
162 
163     /**
164      * Performs a 2-D average pooling operation.
165      *
166      * The output dimensions are functions of the filter dimensions, stride, and
167      * padding.
168      *
169      * The values in the output tensor are computed as:
170      *
171      *     output[batch, row, col, channel] =
172      *         sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1)
173      *
174      * Supported tensor {@link OperandCode}:
175      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
176      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
177      *
178      * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
179      * and Channels) data layout.
180      *
181      * Both explicit padding and implicit padding are supported.
182      *
183      * Inputs (explicit padding):
184      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
185      *      the input.
186      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
187      *      the left, in the ‘width’ dimension.
188      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
189      *      the right, in the ‘width’ dimension.
190      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
191      *      the top, in the ‘height’ dimension.
192      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
193      *      the bottom, in the ‘height’ dimension.
194      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
195      *      walking through input in the ‘width’ dimension.
196      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
197      *      walking through input in the ‘height’ dimension.
198      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
199      *      width.
200      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
201      *      height.
202      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
203      *      {@link FuseCode} values. Specifies the activation to
204      *      invoke on the result.
205      *
206      * Inputs (implicit padding):
207      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
208      *      the input.
209      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
210      *      padding scheme, has to be one of the
211      *      {@link PaddingCode} values.
212      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
213      *      walking through input in the ‘width’ dimension.
214      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
215      *      walking through input in the ‘height’ dimension.
216      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
217      *      width.
218      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
219      *      height.
220      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
221      *      {@link FuseCode} values. Specifies the activation to
222      *      invoke on the result.
223      *
224      * Outputs:
225      * * 0: The output 4-D tensor, of shape
226      *      [batches, out_height, out_width, depth].
227      *
228      * Available since API level 27.
229      */
230     ANEURALNETWORKS_AVERAGE_POOL_2D = 1,
231 
232     /**
233      * Concatenates the input tensors along the given dimension.
234      *
235      * The input tensors must have identical {@link OperandCode} and the same
236      * dimensions except the dimension along the concatenation axis.
237      *
238      * Supported tensor {@link OperandCode}:
239      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
240      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
241      *
242      * Supported tensor rank: up to 4
243      *
244      * Inputs:
245      * * 0 ~ n-1: The list of n input tensors, of shape
246      *            [D0, D1, ..., Daxis(i), ..., Dm]. For inputs of
247      *            {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, all input tensors
248      *            must have the same scale and zeroPoint.
249      * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the
250      *      concatenation axis.
251      *
252      * Outputs:
253      * * 0: The output, a tensor of the same {@link OperandCode} as the input
254      *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
255      *
256      * Available since API level 27.
257      */
258     ANEURALNETWORKS_CONCATENATION = 2,
259 
260     /**
261      * Performs an 2-D convolution operation.
262      *
263      * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
264      * batch of images, applying the filter to each window of each image of the
265      * appropriate size.
266      *
267      * The output dimensions are functions of the filter dimensions, stride, and
268      * padding.
269      *
270      * The values in the output tensor are computed as:
271      *
272      *     output[batch, row, col, channel] =
273      *         sum_{i, j} (
274      *             input[batch, row + i, col + j, k] *
275      *             filter[channel, row + i, col + j, k] +
276      *             bias[channel]
277      *         )
278      *
279      * Supported tensor {@link OperandCode}:
280      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
281      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
282      *
283      * Supported tensor rank: 4, with "NHWC" data layout.
284      *
285      * Both explicit padding and implicit padding are supported.
286      *
287      * Inputs (explicit padding):
288      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
289      *      specifying the input.
290      * * 1: A 4-D tensor, of shape
291      *      [depth_out, filter_height, filter_width, depth_in], specifying the
292      *      filter.
293      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
294      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias
295      *      should also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input
296      *      tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias
297      *      should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
298      *      0 and bias_scale == input_scale * filter_scale.
299      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
300      *      the left, in the ‘width’ dimension.
301      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
302      *      the right, in the ‘width’ dimension.
303      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
304      *      the top, in the ‘height’ dimension.
305      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
306      *      the bottom, in the ‘height’ dimension.
307      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
308      *      walking through input in the ‘width’ dimension.
309      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
310      *      walking through input in the ‘height’ dimension.
311      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
312      *      {@link FuseCode} values. Specifies the activation to
313      *      invoke on the result.
314      *
315      * Inputs (implicit padding):
316      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
317      *      specifying the input.
318      * * 1: A 4-D tensor, of shape
319      *      [depth_out, filter_height, filter_width, depth_in], specifying the
320      *      filter.
321      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
322      *      tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
323      *      also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input tensor
324      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be
325      *      of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
326      *      bias_scale == input_scale * filter_scale.
327      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
328      *      padding scheme, has to be one of the
329      *      {@link PaddingCode} values.
330      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
331      *      walking through input in the ‘width’ dimension.
332      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
333      *      walking through input in the ‘height’ dimension.
334      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
335      *      {@link FuseCode} values. Specifies the activation to
336      *      invoke on the result.
337      *
338      * Outputs:
339      * * 0: The output 4-D tensor, of shape
340      *      [batches, out_height, out_width, depth_out]. For output tensor of
341      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following condition
342      *      must be satisfied: output_scale > input_scale * filter_scale.
343      *
344      * Available since API level 27.
345      */
346     ANEURALNETWORKS_CONV_2D = 3,
347 
348     /**
349      * Performs a depthwise 2-D convolution operation.
350      *
351      * Given an input tensor of shape [batches, height, width, depth_in] and a
352      * filter tensor of shape [1, filter_height, filter_width, depth_out]
353      * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
354      * applies a different filter to each input channel (expanding from 1
355      * channel to channel_multiplier channels for each), then concatenates the
356      * results together.
357      *
358      * The output has depth_out = depth_in * depth_multiplier channels.
359      * The output dimensions are functions of the filter dimensions, stride, and
360      * padding.
361      *
362      * The values in the output tensor are computed as:
363      *
364      *     output[b, i, j, k * channel_multiplier + q] =
365      *         sum_{di, dj} (
366      *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
367      *             filter[1, di, dj, k * channel_multiplier + q]
368      *         )
369      *
370      * Supported tensor {@link OperandCode}:
371      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
372      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
373      *
374      * Supported tensor rank: 4, with "NHWC" data layout.
375      *
376      * Both explicit padding and implicit padding are supported.
377      *
378      * Inputs (explicit padding):
379      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
380      *      specifying the input.
381      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
382      *      specifying the filter.
383      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
384      *      tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
385      *      also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input tensor
386      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be
387      *      of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
388      *      bias_scale == input_scale * filter_scale.
389      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
390      *      the left, in the ‘width’ dimension.
391      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
392      *      the right, in the ‘width’ dimension.
393      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
394      *      the top, in the ‘height’ dimension.
395      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
396      *      the bottom, in the ‘height’ dimension.
397      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
398      *      walking through input in the ‘width’ dimension.
399      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
400      *      walking through input in the ‘height’ dimension.
401      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
402      *      multiplier.
403      * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
404      *       {@link FuseCode} values. Specifies the activation to
405      *       invoke on the result.
406      *
407      * Inputs (implicit padding):
408      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
409      *      specifying the input.
410      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
411      *      specifying the filter.
412      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
413      *      tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
414      *      also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input tensor
415      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be
416      *      of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
417      *      bias_scale == input_scale * filter_scale.
418      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
419      *      padding scheme, has to be one of the
420      *      {@link PaddingCode} values.
421      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
422      *      walking through input in the ‘width’ dimension.
423      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
424      *      walking through input in the ‘height’ dimension.
425      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
426      *      multiplier.
427      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
428      *      {@link FuseCode} values. Specifies the activation to
429      *      invoke on the result.
430      *
431      * Outputs:
432      * * 0: The output 4-D tensor, of shape
433      *      [batches, out_height, out_width, depth_out]. For output tensor of
434      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following condition
435      *      must be satisfied: output_scale > input_scale * filter_scale.
436      *
437      * Available since API level 27.
438      */
439     ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4,
440 
441     /**
442      * Rearranges data from depth into blocks of spatial data.
443      *
444      * More specifically, this op outputs a copy of the input tensor where
445      * values from the depth dimension are moved in spatial blocks to the height
446      * and width dimensions. The value block_size indicates the input block size
447      * and how the data is moved.
448      *
449      * Chunks of data of size block_size * block_size from depth are rearranged
450      * into non-overlapping blocks of size block_size x block_size.
451      *
452      * The width of the output tensor is input_depth * block_size, whereas the
453      * height is input_height * block_size. The depth of the input tensor must
454      * be divisible by block_size * block_size
455      *
456      * Supported tensor {@link OperandCode}:
457      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
458      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
459      *
460      * Supported tensor rank: 4, with "NHWC" data layout.
461      *
462      * Inputs:
463      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
464      *      specifying the input.
465      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
466      *      block_size must be >=1 and block_size * block_size must be a divisor
467      *      of the input depth.
468      *
469      * Outputs:
470      * * 0: The output 4-D tensor, of shape [batch, height*block_size,
471      *      width*block_size, depth/(block_size*block_size)].
472      *
473      * Available since API level 27.
474      */
475     ANEURALNETWORKS_DEPTH_TO_SPACE = 5,
476 
477     /**
478      * Dequantizes the input tensor.
479      *
480      * The formula is:
481      *
482      *     output = (input - zeroPoint) * scale.
483      *
484      * Supported tensor {@link OperandCode}:
485      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
486      *
487      * Supported tensor rank: up to 4
488      *
489      * Inputs:
490      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}.
491      *
492      * Outputs:
493      * * 0: The output tensor of same shape as input0, but with
494      *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
495      *
496      * Available since API level 27.
497      */
498     ANEURALNETWORKS_DEQUANTIZE = 6,
499 
500     /**
501      * Looks up sub-tensors in the input tensor.
502      *
503      * This operator takes for input a tensor of values (Values) and
504      * a one-dimensional tensor of selection indices (Lookups).
505      * The output tensor is the concatenation of sub-tensors of Values as
506      * selected by Lookups.
507      *
508      * Think of Values as being sliced along its first dimension:
509      * The entries in Lookups select which slices are concatenated together
510      * to create the output tensor.
511      *
512      * For example, if Values has shape of [40, 200, 300] and
513      * Lookups has shape of [3], all three values found in Lookups are
514      * expected to be between 0 and 39. The resulting tensor must
515      * have shape of [3, 200, 300].
516      *
517      * If a value in Lookups is out of bounds, the operation must fail
518      * and an error must be reported.
519      *
520      * Inputs:
521      * * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.
522      *      The values are indices into the first dimension of Values.
523      * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
524      *      extracted.
525      *
526      * Output:
527      * * 0: A n-D tensor with the same rank and shape as the Values
528      *      tensor, except for the first dimension which has the same size
529      *      as Lookups' only dimension.
530      *
531      * Available since API level 27.
532      */
533     ANEURALNETWORKS_EMBEDDING_LOOKUP = 7,
534 
535     /**
536      * Computes element-wise floor() on the input tensor.
537      *
538      * Supported tensor {@link OperandCode}:
539      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
540      *
541      * Supported tensor rank: up to 4
542      *
543      * Inputs:
544      * * 0: A tensor.
545      *
546      * Outputs:
547      * * 0: The output tensor, of the same {@link OperandCode} and dimensions as
548      *      the input tensor.
549      *
550      * Available since API level 27.
551      */
552     ANEURALNETWORKS_FLOOR = 8,
553 
554     /**
555      * Denotes a fully (densely) connected layer, which connects all elements
556      * in the input tensor with each element in the output tensor.
557      *
558      * This layer implements the operation:
559      *
560      *     outputs = activation(inputs * weights’ + bias)
561      *
562      * Supported tensor {@link OperandCode}:
563      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
564      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
565      *
566      * Supported tensor rank: up to 4.
567      *
568      * Inputs:
569      * * 0: A tensor of at least rank 2, specifying the input. If rank is
570      *      greater than 2, then it gets flattened to a 2-D Tensor. The
571      *      (flattened) 2-D Tensor is reshaped (if necessary) to
572      *      [batch_size, input_size], where "input_size" corresponds to the
573      *      number of inputs to the layer, matching the second dimension of
574      *      weights, and "batch_size" is calculated by dividing the number of
575      *      elements by "input_size".
576      * * 1: A 2-D tensor, specifying the weights, of shape
577      *      [num_units, input_size], where "num_units" corresponds to the number
578      *      of output nodes.
579      * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
580      *      tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
581      *      also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input tensor
582      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be
583      *      of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
584      *      bias_scale == input_scale * filter_scale.
585      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
586      *      {@link FuseCode} values. Specifies the activation to
587      *      invoke on the result.
588      *
589      * Outputs:
590      * * 0: The output tensor, of shape [batch_size, num_units]. For output
591      *      tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following
592      *      condition must be satisfied:
593      *      output_scale > input_scale * filter_scale.
594      *
595      * Available since API level 27.
596      */
597     ANEURALNETWORKS_FULLY_CONNECTED = 9,
598 
599     /**
600      * Looks up sub-tensors in the input tensor using a key-value map.
601      *
602      * This operator takes for input a tensor of values (Values),
603      * a one-dimensional tensor of selection values (Lookups) and
604      * a one-dimensional tensor that maps these values to Values
605      * indexes. The output tensor is the concatenation of sub-tensors of
606      * Values as selected by Lookups via Keys.
607      *
608      * Think of Values as being sliced along its outer-most dimension.
609      * The output is a concatenation of selected slices, with one slice
610      * for each entry of Lookups. The slice selected is the one at the
611      * same index as the Maps entry that matches the value in Lookups.
612      *
613      * For a hit, the corresponding sub-tensor of Values is included
614      * in the Output tensor. For a miss, the corresponding sub-tensor in
615      * Output must have zero values.
616      *
617      * For example, if Values has shape of [40, 200, 300],
618      * Keys should have a shape of [40]. If Lookups tensor has shape
619      * of [3], three slices are being concatenated, so the resulting tensor
620      * must have the shape of [3, 200, 300]. If the first entry in Lookups
621      * has the value 123456, that value must be located in Keys tensor.
622      * If the sixth entry of Keys contains 123456, the sixth slice of Values
623      * must be selected. If no entry in Keys has 123456, a slice of zeroes
624      * must be concatenated.
625      *
626      * Inputs:
627      * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with
628      *      shape [ k ].
629      * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape
630      *      [ n ]; Keys and Values pair represent a map, i.e., the ith element
631      *      in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
632      *      (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
633      *      ascending order.
634      * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
635      *      must be n.
636      *
637      * Outputs:
638      * * 0: Output. A tensor with shape [ k …].
639      * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
640      *      hits (True) or not (False).
641      *      Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0
642      *      and scale 1.0f.
643      *      A non-zero byte represents True, a hit. A zero indicates otherwise.
644      *
645      * Available since API level 27.
646      */
647     ANEURALNETWORKS_HASHTABLE_LOOKUP = 10,
648 
649     /**
650      * Applies L2 normalization along the depth dimension.
651      *
652      * The values in the output tensor are computed as:
653      *
654      *     output[batch, row, col, channel] =
655      *         input[batch, row, col, channel] /
656      *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
657      *
658      * For input tensor with more dimensions, independently normalizes each 1-D
659      * slice along dimension dim.
660      *
661      * Supported tensor {@link OperandCode}:
662      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
663      *
664      * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples,
665      * Height, Width, and Channels).
666      *
667      * Inputs:
668      * * 0: A 4-D tensor, of shape [batches, height, width, depth].
669      *
670      * Outputs:
671      * * 0: The output 4-D tensor, of the same shape as input
672      *      [batches, height, width, depth].
673      *
674      * Available since API level 27.
675      */
676     ANEURALNETWORKS_L2_NORMALIZATION = 11,
677 
678     /**
679      * Performs an 2-D L2 pooling operation.
680      *
681      * The output dimensions are functions of the filter dimensions, stride, and
682      * padding.
683      *
684      * The values in the output tensor are computed as:
685      *
686      *     output[batch, row, col, channel] =
687      *         sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) /
688      *              sum(1))
689      *
690      * Supported tensor {@link OperandCode}:
691      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
692      *
693      * Supported tensor rank: 4, with "NHWC" data layout.
694      *
695      * Both explicit padding and implicit padding are supported.
696      *
697      * Inputs (explicit padding):
698      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
699      *      the input.
700      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
701      *      the left, in the ‘width’ dimension.
702      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
703      *      the right, in the ‘width’ dimension.
704      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
705      *      the top, in the ‘height’ dimension.
706      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
707      *      the bottom, in the ‘height’ dimension.
708      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
709      *      walking through input in the ‘width’ dimension.
710      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
711      *      walking through input in the ‘height’ dimension.
712      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
713      *      width.
714      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
715      *      height.
716      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
717      *      {@link FuseCode} values. Specifies the activation to
718      *      invoke on the result.
719      *
720      * Inputs (implicit padding):
721      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
722      *      the input.
723      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
724      *      padding scheme, has to be one of the
725      *      {@link PaddingCode} values.
726      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
727      *      walking through input in the ‘width’ dimension.
728      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
729      *      walking through input in the ‘height’ dimension.
730      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
731      *      width.
732      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
733      *      height.
734      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
735      *      {@link FuseCode} values. Specifies the activation to
736      *      invoke on the result.
737      *
738      * Outputs:
739      * * 0: The output 4-D tensor, of shape
740      *      [batches, out_height, out_width, depth].
741      *
742      * Available since API level 27.
743      */
744     ANEURALNETWORKS_L2_POOL_2D = 12,
745 
746     /**
747      * Applies Local Response Normalization along the depth dimension.
748      *
749      * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
750      * last dimension), and each vector is normalized independently. Within a
751      * given vector, each component is divided by the weighted, squared sum of
752      * inputs within depth_radius.
753      *
754      * The output is calculated using this formula:
755      *
756      *     sqr_sum[a, b, c, d] = sum(
757      *         pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
758      *     output = input / pow((bias + alpha * sqr_sum), beta)
759      *
760      * Supported tensor {@link OperandCode}:
761      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
762      *
763      * Supported tensor rank: 4, with "NHWC" data layout.
764      *
765      * Inputs:
766      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
767      *      the input.
768      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of
769      *      the normalization window.
770      * * 2: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the bias, must
771      *      not be zero.
772      * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scale
773      *      factor, alpha.
774      * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the exponent,
775      *      beta.
776      *
777      * Outputs:
778      * * 0: The output tensor of same shape as input0.
779      *
780      * Available since API level 27.
781      */
782     ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13,
783 
784     /**
785      * Computes sigmoid activation on the input tensor element-wise.
786      *
787      * The output is calculated using this formula:
788      *
789      *     output = 1 / (1 + exp(-input))
790      *
791      * Supported tensor {@link OperandCode}:
792      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
793      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
794      *
795      * Supported tensor rank: up to 4.
796      *
797      * Inputs:
798      * * 0: A tensor, specifying the input.
799      *
800      * Outputs:
801      * * 0: The output tensor of same shape as input0.
802      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
803      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
804      *
805      * Available since API level 27.
806      */
807     ANEURALNETWORKS_LOGISTIC = 14,
808 
809     /**
810      * Projects an input to a bit vector via locality senstive hashing.
811      *
812      * Inputs:
813      * * 0: Hash functions. Dim.size == 2, DataType: Float.
814      *            Tensor[0].Dim[0]: Number of hash functions.
815      *            Tensor[0].Dim[1]: Number of seeds per hash functions.
816      *            Tensor[0].Dim[1] <= 32 in sparse case.
817      *
818      * * 1: Input. Dim.size >= 1, no restriction on DataType.
819      * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
820      *     If not set, each input element is considered to have the same weight
821      *     of 1.0.
822      *     Tensor[1].Dim[0] == Tensor[2].Dim[0]
823      * * 3: Type:
824      *        Sparse: Value LSHProjectionType_SPARSE(=1).
825      *          Computed bit vector is considered to be sparse.
826      *          Each output element is an int32 made up of multiple bits
827      *          computed from hash functions.
828      *
829      *        Dense: Value LSHProjectionType_DENSE(=2).
830      *          Computed bit vector is considered to be dense. Each output
831      *          element represents a bit and can take the value of either
832      *          0 or 1.
833      *
834      * Outputs:
835      * * 0: If the projection type is sparse:
836      *        Output.Dim == { Tensor[0].Dim[0] }
837      *        A tensor of int32 that represents hash signatures.
838      *      If the projection type is Dense:
839      *        Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
840      *        A flattened tensor that represents projected bit vectors.
841      *
842      * Available since API level 27.
843      */
844     ANEURALNETWORKS_LSH_PROJECTION = 15,
845 
846     /**
847      * Performs a single time step in a Long Short-Term Memory (LSTM) layer
848      *
849      * The LSTM operation is described by the following equations.
850      *
851      * \f{eqnarray*}{
852      * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
853      * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
854      * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
855      *        g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
856      * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
857      *      & & \\
858      *      & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
859      *      & if\ there\ is\ a\ projection; \\
860      * h_t =& & \\
861      *      & o_t \odot g(C_t) & otherwise. \\
862      * \f}
863      * Where:
864      * * \f$x_t\f$ is the input,
865      * * \f$i_t\f$ is the input gate,
866      * * \f$f_t\f$ is the forget gate,
867      * * \f$C_t\f$ is the cell state,
868      * * \f$o_t\f$ is the output,
869      * * \f$h_t\f$ is the output state,
870      * * \f$\sigma\f$ is the logistic sigmoid function,
871      * * \f$g\f$ is the cell input and cell output activation function, usually
872      *   \f$tahn\f$,
873      * * \f$W_{xi}\f$ is the input-to-input weight matrix,
874      * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
875      * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
876      * * \f$b_i\f$ is the input gate bias,
877      * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
878      * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
879      * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
880      * * \f$b_f\f$ is the forget gate bias,
881      * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
882      * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
883      * * \f$b_c\f$ is the cell bias,
884      * * \f$W_{xo}\f$ is the input-to-output weight matrix,
885      * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
886      * * \f$W_{co}\f$ is the cell-to-output weight matrix,
887      * * \f$b_o\f$ is the output gate bias,
888      * * \f$W_{proj}\f$ is the projection weight matrix,
889      * * \f$b_{proj}\f$ is the projection bias,
890      * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
891      * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
892      * * \f$\odot\f$ is the
893      *   <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
894      *   Hadamard product</a> that takes two matrices and produces another
895      *   matrix, each element of which is the product of the corresponding
896      *   elements of the input matrices.
897      *
898      * The operation has the following independently optional inputs:
899      * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
900      *   (\f$W_{hi}\f$), cell-to-input (\f$W_{ci}\f$) weights, and input gate
901      *   bias (\f$b_i\f$) either all have values, or none of them have values
902      *   (i.e., all set to null). If they have no values, coupling of input and
903      *   forget gates (CIFG) is used, in which case the input gate (\f$i_t\f$)
904      *   is calculated using the following equation instead.
905      *   \f{eqnarray*}{
906      *   i_t = 1 - f_t
907      *   \f}
908      * * The cell-to-forget weights (\f$W_{cf}\f$) and cell-to-output weights
909      *   (\f$W_{co}\f$) either both have values or neither of them have values.
910      *   If they have values, the peephole optimization is used. Additionally,
911      *   if CIFG is not used, cell-to-input weights (\f$W_{ci}\f$) is also
912      *   required to have values for peephole optimization.
913      * * The projection weights (\f$W_{proj}\f$) is required only for the
914      *   recurrent projection layer, and should otherwise have no value.
915      * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
916      *   value if the recurrent projection layer exists, and should otherwise
917      *   have no value.
918      *
919      * References:
920      *
921      * The default non-peephole non-CIFG implementation is based on:
922      * http://www.bioinf.jku.at/publications/older/2604.pdf
923      * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
924      * Computation, 9(8):1735-1780, 1997.
925      *
926      * The peephole implementation and projection layer is based on:
927      * https://research.google.com/pubs/archive/43905.pdf
928      * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
929      * recurrent neural network architectures for large scale acoustic
930      * modeling." INTERSPEECH, 2014.
931      * (However, the concept of peephole optimization was introduced in work
932      * prior to this paper.)
933      *
934      * The coupling of input and forget gate (CIFG) is based on:
935      * http://arxiv.org/pdf/1503.04069.pdf
936      * Greff et al. "LSTM: A Search Space Odyssey"
937      *
938      * Supported tensor {@link OperandCode}:
939      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
940      *
941      * Inputs:
942      * * 0: The input (\f$x_t\f$).
943      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
944      *      [batch_size, input_size], where “batch_size” corresponds to the
945      *      batching dimension, and “input_size” is the size of the input.
946      * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
947      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
948      *      [num_units, input_size], where “num_units” corresponds to the
949      *      number of cell units.
950      * * 2: The input-to-forget weights (\f$W_{xf}\f$).
951      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
952      *      [num_units, input_size].
953      * * 3: The input-to-cell weights (\f$W_{xc}\f$).
954      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
955      *      [num_units, input_size].
956      * * 4: The input-to-output weights (\f$W_{xo}\f$).
957      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
958      *      [num_units, input_size].
959      * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
960      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
961      *      [num_units, output_size], where “output_size” corresponds to either
962      *      the number of cell units (i.e., “num_units”), or the second
963      *      dimension of the “projection_weights”, if defined.
964      * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
965      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
966      *      [num_units, output_size].
967      * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
968      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
969      *      [num_units, output_size].
970      * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
971      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
972      *      [num_units, output_size].
973      * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
974      *      A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
975      *      [num_units].
976      * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
977      *      A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
978      *      [num_units].
979      * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
980      *      A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
981      *      [num_units].
982      * * 12:The input gate bias (\f$b_i\f$). Optional.
983      *      A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
984      *      [num_units].
985      * * 13:The forget gate bias (\f$b_f\f$).
986      *      A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
987      *      [num_units].
988      * * 14:The cell bias (\f$b_c\f$).
989      *      A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
990      *      [num_units].
991      * * 15:The output gate bias (\f$b_o\f$).
992      *      A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
993      *      [num_units].
994      * * 16:The projection weights (\f$W_{proj}\f$). Optional.
995      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
996      *      [output_size, num_units].
997      * * 17:The projection bias (\f$b_{proj}\f$). Optional.
998      *      A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
999      *      [output_size].
1000      * * 18:The output state (in) (\f$h_{t-1}\f$).
1001      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1002      *      [batch_size, output_size].
1003      * * 19:The cell state (in) (\f$C_{t-1}\f$).
1004      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1005      *      [batch_size, num_units].
1006      * * 20:The activation function (\f$g\f$).
1007      *      A value indicating the activation function:
1008      *      <ul>
1009      *      <li>0: None;
1010      *      <li>1: Relu;
1011      *      <li>3: Relu6;
1012      *      <li>4: Tanh;
1013      *      <li>6: Sigmoid.
1014      *      </ul>
1015      * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
1016      *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
1017      *      then clipping is disabled.
1018      * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
1019      *      projection layer, such that values are bound within
1020      *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1021      *
1022      * Outputs:
1023      * * 0: The scratch buffer.
1024      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1025      *      [batch_size, num_units * 4] with CIFG, or
1026      *      [batch_size, num_units * 3] without CIFG.
1027      * * 1: The output state (out) (\f$h_t\f$).
1028      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1029      *      [batch_size, output_size].
1030      * * 2: The cell state (out) (\f$C_t\f$).
1031      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1032      *      [batch_size, num_units].
1033      * * 3: The output (\f$o_t\f$).
1034      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1035      *      [batch_size, output_size]. This is effectively the same as the
1036      *      current “output state (out)” value.
1037      *
1038      * Available since API level 27.
1039      */
1040     ANEURALNETWORKS_LSTM = 16,
1041 
1042     /**
1043      * Performs an 2-D max pooling operation.
1044      *
1045      * The output dimensions are functions of the filter dimensions, stride, and
1046      * padding.
1047      *
1048      * The values in the output tensor are computed as:
1049      *
1050      *     output[batch, row, col, channel] =
1051      *         max_{i, j} (input[batch, row + i, col + j, channel])
1052      *
1053      * Supported tensor {@link OperandCode}:
1054      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1055      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1056      *
1057      * Supported tensor rank: 4, with "NHWC" data layout.
1058      *
1059      * Both explicit padding and implicit padding are supported.
1060      *
1061      * Inputs (explicit padding):
1062      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1063      *      the input.
1064      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1065      *      the left, in the ‘width’ dimension.
1066      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1067      *      the right, in the ‘width’ dimension.
1068      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1069      *      the top, in the ‘height’ dimension.
1070      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1071      *      the bottom, in the ‘height’ dimension.
1072      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1073      *      walking through input in the ‘width’ dimension.
1074      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1075      *      walking through input in the ‘height’ dimension.
1076      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1077      *      width.
1078      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1079      *      height.
1080      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1081      *      {@link FuseCode} values. Specifies the activation to
1082      *      invoke on the result.
1083      *
1084      * Inputs (implicit padding):
1085      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1086      *      the input.
1087      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
1088      *      padding scheme, has to be one of the
1089      *      {@link PaddingCode} values.
1090      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1091      *      walking through input in the ‘width’ dimension.
1092      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1093      *      walking through input in the ‘height’ dimension.
1094      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1095      *      width.
1096      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1097      *      height.
1098      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1099      *      {@link FuseCode} values. Specifies the activation to
1100      *      invoke on the result.
1101      *
1102      * Outputs:
1103      * * 0: The output 4-D tensor, of shape
1104      *      [batches, out_height, out_width, depth].
1105      *
1106      * Available since API level 27.
1107      */
1108     ANEURALNETWORKS_MAX_POOL_2D = 17,
1109 
1110     /**
1111      * Multiplies two tensors, element-wise.
1112      *
1113      * Takes two input tensors of identical {@link OperandCode} and compatible
1114      * dimensions. The output is the product of both input tensors, optionally
1115      * modified by an activation function.
1116      *
1117      * Two dimensions are compatible when:
1118      *     1. they are equal, or
1119      *     2. one of them is 1
1120      *
1121      * The size of the resulting output is the maximum size along each dimension
1122      * of the input operands. It starts with the trailing dimensions, and works
1123      * its way forward.
1124      *
1125      * Supported tensor {@link OperandCode}:
1126      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1127      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1128      *
1129      * Supported tensor rank: up to 4
1130      *
1131      * Inputs:
1132      * * 0: A tensor.
1133      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
1134      *      as input0.
1135      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1136      *      {@link FuseCode} values. Specifies the activation to
1137      *      invoke on the result.
1138      *
1139      * Outputs:
1140      * * 0: The product, a tensor of the same {@link OperandCode} as input0.
1141      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1142      *      the following condition must be satisfied:
1143      *      output_scale > input1_scale * input2_scale.
1144      *
1145      * Available since API level 27.
1146      */
1147     ANEURALNETWORKS_MUL = 18,
1148 
1149     /**
1150      * Computes rectified linear activation on the input tensor element-wise.
1151      *
1152      * The output is calculated using this formula:
1153      *
1154      *     output = max(0, input)
1155      *
1156      * Supported tensor {@link OperandCode}:
1157      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1158      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1159      *
1160      * Supported tensor rank: up to 4.
1161      *
1162      * Inputs:
1163      * * 0: A tensor, specifying the input.
1164      *
1165      * Outputs:
1166      * * 0: The output tensor of same shape as input0.
1167      *
1168      * Available since API level 27.
1169      */
1170     ANEURALNETWORKS_RELU = 19,
1171 
1172     /**
1173      * Computes rectified linear 1 activation on the input tensor element-wise.
1174      *
1175      * The output is calculated using this formula:
1176      *
1177      *     output = min(1.f, max(-1.f, input))
1178      *
1179      * Supported tensor {@link OperandCode}:
1180      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1181      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1182      *
1183      * Supported tensor rank: up to 4.
1184      *
1185      * Inputs:
1186      * * 0: A tensor, specifying the input.
1187      *
1188      * Outputs:
1189      * * 0: The output tensor of same shape as input0.
1190      *
1191      * Available since API level 27.
1192      */
1193     ANEURALNETWORKS_RELU1 = 20,
1194 
1195     /**
1196      * Computes rectified linear 6 activation on the input tensor element-wise.
1197      *
1198      * The output is calculated using this formula:
1199      *
1200      *     output = min(6, max(0, input))
1201      *
1202      * Supported tensor {@link OperandCode}:
1203      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1204      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1205      *
1206      * Supported tensor rank: up to 4.
1207      *
1208      * Inputs:
1209      * * 0: A tensor, specifying the input.
1210      *
1211      * Outputs:
1212      * * 0: The output tensor of same shape as input0.
1213      *
1214      * Available since API level 27.
1215      */
1216     ANEURALNETWORKS_RELU6 = 21,
1217 
1218     /**
1219      * Reshapes a tensor.
1220      *
1221      * Given tensor, this operation returns a tensor that has the same values as
1222      * tensor, but with a newly specified shape.
1223      *
1224      * Supported tensor {@link OperandCode}:
1225      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1226      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1227      *
1228      * Supported tensor rank: up to 4.
1229      *
1230      * Inputs:
1231      * * 0: A tensor, specifying the tensor to be reshaped.
1232      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the
1233      *      shape of the output tensor. The number of elements implied by shape
1234      *      must be the same as the number of elements in the input tensor.
1235      *
1236      * Outputs:
1237      * * 0: The output tensor, of shape specified by the input shape.
1238      *
1239      * Available since API level 27.
1240      */
1241     ANEURALNETWORKS_RESHAPE = 22,
1242 
1243     /**
1244      * Resizes images to given size using the bilinear interpretation.
1245      *
1246      * Resized images must be distorted if their output aspect ratio is not the
1247      * same as input aspect ratio. The corner pixels of output may not be the
1248      * same as corner pixels of input.
1249      *
1250      * Supported tensor {@link OperandCode}:
1251      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1252      *
1253      * Supported tensor rank: 4, with "NHWC" data layout.
1254      *
1255      * Inputs:
1256      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1257      *      the input.
1258      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
1259      *      height of the output tensor.
1260      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
1261      *      width of the output tensor.
1262      *
1263      * Outputs:
1264      * * 0: The output 4-D tensor, of shape
1265      *      [batches, new_height, new_width, depth].
1266      *
1267      * Available since API level 27.
1268      */
1269     ANEURALNETWORKS_RESIZE_BILINEAR = 23,
1270 
1271     /**
1272      * A basic recurrent neural network layer.
1273      *
1274      * This layer implements the operation:
1275      * outputs = state = activation(inputs * input_weights +
1276      *                              state * recurrent_weights + bias)
1277      *
1278      * Where:
1279      * * “input_weights” is a weight matrix that multiplies the inputs;
1280      * * “recurrent_weights” is a weight matrix that multiplies the current
1281      *    “state” which itself is the output from the previous time step
1282      *    computation;
1283      * * “bias” is a bias vector (added to each output vector in the batch);
1284      * * “activation” is the function passed as the “fused_activation_function”
1285      *   argument (if not “NONE”).
1286      *
1287      * Supported tensor {@link OperandCode}:
1288      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1289      *
1290      * Inputs:
1291      * * 0: input.
1292      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} of shape
1293      *      [batch_size, input_size], where “batch_size” corresponds to the
1294      *      batching dimension, and “input_size” is the size of the input.
1295      * * 1: weights.
1296      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1297      *      [num_units, input_size], where “num_units” corresponds to the
1298      *      number of units.
1299      * * 2: recurrent_weights.
1300      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1301      *      [num_units, num_units], with columns corresponding to the weights
1302      *      from each unit.
1303      * * 3: bias.
1304      *      A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1305      *      [num_units].
1306      * * 4: hidden state (in).
1307      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1308      *      [batch_size, num_units].
1309      * * 5: fused_activation_function.
1310      *      An optional {@link FuseCode} value indicating the
1311      *      activation function. If “NONE” is specified then it results in a
1312      *      linear activation.
1313      *
1314      * Outputs:
1315      * * 0: hidden state (out).
1316      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1317      *      [batch_size, num_units].
1318      *
1319      * * 1: output.
1320      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1321      *      [batch_size, num_units]. This is effectively the same as the
1322      *      current state value.
1323      *
1324      * Available since API level 27.
1325      */
1326     ANEURALNETWORKS_RNN = 24,
1327 
1328     /**
1329      * Computes the softmax activation on the input tensor element-wise, per
1330      * batch, by normalizing the input vector so the maximum coefficient is
1331      * zero.
1332      *
1333      * The output is calculated using this formula:
1334      *
1335      *     output[batch, i] =
1336      *         exp((input[batch, i] - max(input[batch, :])) * beta) /
1337      *         sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
1338      *
1339      * Supported tensor {@link OperandCode}:
1340      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1341      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1342      *
1343      * Supported tensor rank: 2 or 4.
1344      *
1345      * Inputs:
1346      * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
1347      * * 1: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the positive
1348      *      scaling factor for the exponent, beta.
1349      *
1350      * Outputs:
1351      * * 0: The output tensor of same shape as input0.
1352      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1353      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1354      *
1355      * Available since API level 27.
1356      */
1357     ANEURALNETWORKS_SOFTMAX = 25,
1358 
1359     /**
1360      * Rearranges blocks of spatial data, into depth.
1361      *
1362      * More specifically, this op outputs a copy of the input tensor where
1363      * values from the height and width dimensions are moved to the depth
1364      * dimension. The value block_size indicates the input block size and how
1365      * the data is moved.
1366      *
1367      * Chunks of data of size block_size * block_size from depth are rearranged
1368      * into non-overlapping blocks of size block_size x block_size.
1369      *
1370      * The depth of the output tensor is input_depth * block_size * block_size.
1371      * The input tensor's height and width must be divisible by block_size.
1372      *
1373      * Supported tensor {@link OperandCode}:
1374      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1375      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1376      *
1377      * Supported tensor rank: 4, with "NHWC" data layout.
1378      *
1379      * Inputs:
1380      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
1381      *      specifying the input.
1382      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
1383      *      block_size must be >=1 and block_size must be a divisor of both the
1384      *      input height and width.
1385      *
1386      * Outputs:
1387      * * 0: The output 4-D tensor, of shape [batches, height/block_size,
1388      *      width/block_size, depth_in*block_size*block_size].
1389      *
1390      * Available since API level 27.
1391      */
1392     ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
1393 
1394     /**
1395      * SVDF op is a kind of stateful layer derived from the notion that a
1396      * densely connected layer that's processing a sequence of input frames can
1397      * be approximated by using a singular value decomposition of each of its
1398      * nodes. The implementation is based on:
1399      *
1400      * https://research.google.com/pubs/archive/43813.pdf
1401      *
1402      * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
1403      * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
1404      * INTERSPEECH, 2015.
1405      *
1406      * It processes the incoming input using a 2-stage filtering mechanism:
1407      * * stage 1 performs filtering on the "features" dimension, whose outputs
1408      *   get pushed into a memory of fixed-size memory_size.
1409      * * stage 2 performs filtering on the "time" dimension of the memory_size
1410      *   memoized outputs of stage 1.
1411      *
1412      * Specifically, for rank 1, this layer implements the operation:
1413      *
1414      *     memory = push(conv1d(inputs, weights_feature, feature_dim,
1415      *                          "ANEURALNETWORKS_PADDING_VALID"));
1416      *     outputs = activation(memory * weights_time + bias);
1417      *
1418      * Where:
1419      * * “weights_feature” is a weights matrix that processes the inputs (by
1420      *   convolving the input with every “feature filter”), and whose outputs
1421      *   get pushed, stacked in order, into the fixed-size “memory” (the oldest
1422      *   entry gets dropped);
1423      * * “weights_time” is a weights matrix that processes the “memory” (by a
1424      *   batched matrix multiplication on the num_units);
1425      * * “bias” is an optional bias vector (added to each output vector in the
1426      *   batch); and
1427      * * “activation” is the function passed as the “fused_activation_function”
1428      *   argument (if not “NONE”).
1429      *
1430      * Each rank adds a dimension to the weights matrices by means of stacking
1431      * the filters.
1432      *
1433      * Supported tensor {@link OperandCode}:
1434      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1435      *
1436      * Inputs:
1437      * * 0: input.
1438      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1439      *      [batch_size, input_size], where “batch_size” corresponds to the
1440      *      batching dimension, and “input_size” is the size of the input.
1441      * * 1: weights_feature.
1442      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1443      *      [num_units, input_size], where “num_units” corresponds to the
1444      *      number of units.
1445      * * 2: weights_time.
1446      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1447      *      [num_units, memory_size], where “memory_size” corresponds to the
1448      *      fixed-size of the memory.
1449      * * 3: bias.
1450      *      An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
1451      *      of shape [num_units].
1452      * * 4: state (in).
1453      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1454      *      [batch_size, (memory_size - 1) * num_units * rank].
1455      * * 5: rank.
1456      *      The rank of the SVD approximation.
1457      * * 6: fused_activation_function.
1458      *      An optional {@link FuseCode} value indicating the
1459      *      activation function. If “NONE” is specified then it results in a
1460      *      linear activation.
1461      *
1462      * Outputs:
1463      * * 0: state (out).
1464      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1465      *      [batch_size, (memory_size - 1) * num_units * rank].
1466      * * 1: output.
1467      *      A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
1468      *      [batch_size, num_units].
1469      *
1470      * Available since API level 27.
1471      */
1472     ANEURALNETWORKS_SVDF = 27,
1473 
1474     /**
1475      * Computes hyperbolic tangent of input tensor element-wise.
1476      *
1477      * The output is calculated using this formula:
1478      *
1479      *     output = tanh(input)
1480      *
1481      * Supported tensor {@link OperandCode}:
1482      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1483      *
1484      * Supported tensor rank: up to 4.
1485      *
1486      * Inputs:
1487      * * 0: A tensor, specifying the input.
1488      *
1489      * Outputs:
1490      * * 0: The output tensor of same shape as input0.
1491      *
1492      * Available since API level 27.
1493      */
1494     ANEURALNETWORKS_TANH = 28,
1495 
1496     // TODO: make the description easier to understand.
1497     /**
1498      * BatchToSpace for N-dimensional tensors.
1499      *
1500      * This operation reshapes the batch dimension (dimension 0) into M + 1
1501      * dimensions of shape block_shape + [batch], interleaves these blocks back
1502      * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
1503      * result with the same rank as the input.
1504      *
1505      * This is the reverse of SpaceToBatch.
1506      *
1507      * Supported tensor {@link OperandCode}:
1508      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1509      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1510      *
1511      * Supported tensor rank: 4
1512      *
1513      * Inputs:
1514      * * 0: An n-D tensor, specifying the tensor to be reshaped
1515      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
1516      *      sizes for each spatial dimension of the input tensor. All values
1517      *      must be >= 1.
1518      *
1519      * Outputs:
1520      * * 0: A tensor of the same {@link OperandCode} as input0.
1521      *
1522      * Available since API level 28.
1523      */
1524     ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29,
1525 
1526     /**
1527      * Element-wise division of two tensors.
1528      *
1529      * Takes two input tensors of identical {@link OperandCode} and compatible
1530      * dimensions. The output is the result of dividing the first input tensor
1531      * by the second, optionally modified by an activation function.
1532      *
1533      * Two dimensions are compatible when:
1534      *     1. they are equal, or
1535      *     2. one of them is 1
1536      *
1537      * The size of the output is the maximum size along each dimension of the
1538      * input operands. It starts with the trailing dimensions, and works its way
1539      * forward.
1540      *
1541      * Example:
1542      *     input1.dimension =    {4, 1, 2}
1543      *     input2.dimension = {5, 4, 3, 1}
1544      *     output.dimension = {5, 4, 3, 2}
1545      *
1546      * Supported tensor {@link OperandCode}:
1547      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1548      *
1549      * Supported tensor rank: up to 4
1550      *
1551      * Inputs:
1552      * * 0: An n-D tensor, specifying the first input.
1553      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
1554      *      as input0.
1555      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1556      *      {@link FuseCode} values. Specifies the activation to
1557      *      invoke on the result.
1558      *
1559      * Outputs:
1560      * * 0: A tensor of the same {@link OperandCode} as input0.
1561      *
1562      * Available since API level 28.
1563      */
1564     ANEURALNETWORKS_DIV = 30,
1565 
1566     /**
1567      * Computes the mean of elements across dimensions of a tensor.
1568      *
1569      * Reduces the input tensor along the given dimensions to reduce. Unless
1570      * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
1571      * in axis. If keep_dims is true, the reduced dimensions are retained with
1572      * length 1.
1573      *
1574      * If dimensions to reduce have no entries, all dimensions are reduced, and
1575      * a tensor with a single element is returned.
1576      *
1577      * Supported tensor {@link OperandCode}:
1578      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1579      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1580      *
1581      * Supported tensor rank: up to 4
1582      *
1583      * Inputs:
1584      * * 0: A tensor, specifying the input.
1585      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
1586      *      to reduce. If None (the default), reduces all dimensions. Must be in
1587      *      the range [-rank(input_tensor), rank(input_tensor)).
1588      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive,
1589      *      retains reduced dimensions with length 1.
1590      *
1591      * Outputs:
1592      * * 0: A tensor of the same {@link OperandCode} as input0.
1593      *
1594      * Available since API level 28.
1595      */
1596     ANEURALNETWORKS_MEAN = 31,
1597 
1598     /**
1599      * Pads a tensor.
1600      *
1601      * This operation pads a tensor according to the specified paddings.
1602      *
1603      * Supported tensor {@link OperandCode}:
1604      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1605      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1606      *
1607      * Supported tensor rank: up to 4
1608      *
1609      * Inputs:
1610      * * 0: An n-D tensor, specifying the tensor to be padded.
1611      * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
1612      *      for each spatial dimension of the input tensor. The shape of the
1613      *      tensor must be {rank(input0), 2}.
1614      *      padding[i, 0] specifies the number of elements to be padded in the
1615      *      front of dimension i.
1616      *      padding[i, 1] specifies the number of elements to be padded after the
1617      *      end of dimension i.
1618      *
1619      * Outputs:
1620      * * 0: A tensor of the same {@link OperandCode} as input0. The
1621      *      output tensor has the same rank as input0, and each
1622      *      dimension of the output tensor has the same size as the
1623      *      corresponding dimension of the input tensor plus the size
1624      *      of the padding:
1625      *          output0.dimension[i] =
1626      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
1627      *
1628      * Available since API level 28.
1629      */
1630     ANEURALNETWORKS_PAD = 32,
1631 
1632     // TODO: make the description easier to understand.
1633     /**
1634      * SpaceToBatch for N-Dimensional tensors.
1635      *
1636      * This operation divides "spatial" dimensions [1, ..., M] of the input into
1637      * a grid of blocks of shape block_shape, and interleaves these blocks with
1638      * the "batch" dimension (0) such that in the output, the spatial dimensions
1639      * [1, ..., M] correspond to the position within the grid, and the batch
1640      * dimension combines both the position within a spatial block and the
1641      * original batch position. Prior to division into blocks, the spatial
1642      * dimensions of the input are optionally zero padded according to paddings.
1643      *
1644      * Supported tensor {@link OperandCode}:
1645      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1646      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1647      *
1648      * Supported tensor rank: 4
1649      *
1650      * Inputs:
1651      * * 0: An n-D tensor, specifying the input.
1652      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
1653      *      sizes for each spatial dimension of the input tensor. All values
1654      *      must be >= 1.
1655      * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
1656      *      for each spatial dimension of the input tensor. All values must be
1657      *      >= 0. The shape of the tensor must be {rank(input0), 2}.
1658      *      padding[i, 0] specifies the number of element to be padded in the
1659      *      front of dimension i.
1660      *      padding[i, 1] specifies the number of element to be padded after the
1661      *      end of dimension i.
1662      *
1663      * Outputs:
1664      * * 0: A tensor of the same {@link OperandCode} as input0.
1665      *
1666      * Available since API level 28.
1667      */
1668     ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33,
1669 
1670     /**
1671      * Removes dimensions of size 1 from the shape of a tensor.
1672      *
1673      * Given a tensor input, this operation returns a tensor of the same
1674      * {@link OperandCode} with all dimensions of size 1 removed. If you don't
1675      * want to remove all size 1 dimensions, you can remove specific size 1
1676      * dimensions by specifying the axes (input1).
1677      *
1678      * Supported tensor {@link OperandCode}:
1679      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1680      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1681      *
1682      * Supported tensor rank: up to 4
1683      *
1684      * Inputs:
1685      * * 0: An n-D tensor, the tensor to be squeezed.
1686      * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
1687      *      dimensions to squeeze. If specified only squeezes the dimensions
1688      *      listed. Otherwise, squeezes all dimensions. The dimension index
1689      *      starts at 0. An error must be reported if squeezing a dimension that
1690      *      is not 1.
1691      *
1692      * Outputs:
1693      * * 0: A tensor of the same {@link OperandCode} as input0. Contains the
1694      *      same data as input, but has one or more dimensions of size 1
1695      *      removed.
1696      *
1697      * Available since API level 28.
1698      */
1699     ANEURALNETWORKS_SQUEEZE = 34,
1700 
1701     /**
1702      * Extracts a strided slice of a tensor.
1703      *
1704      * Roughly speaking, this op extracts a slice of size (end - begin) / stride
1705      * from the given input tensor. Starting at the location specified by begin
1706      * the slice continues by adding stride to the index until all dimensions
1707      * are not less than end. Note that a stride can be negative, which causes a
1708      * reverse slice.
1709      *
1710      * Supported tensor {@link OperandCode}:
1711      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1712      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1713      *
1714      * Supported tensor rank: up to 4
1715      *
1716      * Inputs:
1717      * * 0: An n-D tensor, specifying the tensor to be sliced.
1718      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the starts of
1719      *      the dimensions of the input tensor to be sliced. The length must be
1720      *      of rank(input0).
1721      * * 2: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the ends of
1722      *      the dimensions of the input tensor to be sliced. The length must be
1723      *      of rank(input0).
1724      * * 3: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the strides of
1725      *      the dimensions of the input tensor to be sliced. The length must be
1726      *      of rank(input0).
1727      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, begin_mask. If the ith bit
1728      *      of begin_mask is set, begin[i] is ignored and the fullest possible
1729      *      range in that dimension is used instead.
1730      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, end_mask. If the ith bit of
1731      *      end_mask is set, end[i] is ignored and the fullest possible range in
1732      *      that dimension is used instead.
1733      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, shrink_axis_mask. An int32
1734      *      mask. If the ith bit of shrink_axis_mask is set, it implies that the
1735      *      ith specification shrinks the dimensionality by 1. A slice of size 1
1736      *      starting from begin[i] in the dimension must be preserved.
1737      *
1738      * Outputs:
1739      * * 0: A tensor of the same {@link OperandCode} as input0.
1740      *
1741      * Available since API level 28.
1742      */
1743     ANEURALNETWORKS_STRIDED_SLICE = 35,
1744 
1745     /**
1746      * Element-wise subtraction of two tensors.
1747      *
1748      * Takes two input tensors of identical {@link OperandCode} and compatible
1749      * dimensions. The output is the result of subtracting the second input
1750      * tensor from the first one, optionally modified by an activation function.
1751      *
1752      * Two dimensions are compatible when:
1753      *     1. they are equal, or
1754      *     2. one of them is 1
1755      *
1756      * The size of the output is the maximum size along each dimension of the
1757      * input operands. It starts with the trailing dimensions, and works its way
1758      * forward.
1759      *
1760      * Example:
1761      *     input1.dimension =    {4, 1, 2}
1762      *     input2.dimension = {5, 4, 3, 1}
1763      *     output.dimension = {5, 4, 3, 2}
1764      *
1765      * Supported tensor {@link OperandCode}:
1766      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1767      *
1768      * Supported tensor rank: up to 4
1769      *
1770      * Inputs:
1771      * * 0: An n-D tensor, specifying the first input.
1772      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
1773      *      as input0.
1774      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1775      *      {@link FuseCode} values. Specifies the activation to
1776      *      invoke on the result.
1777      *
1778      * Outputs:
1779      * * 0: A tensor of the same {@link OperandCode} as input0.
1780      *
1781      * Available since API level 28.
1782      */
1783     ANEURALNETWORKS_SUB = 36,
1784 
1785     /**
1786      * Transposes the input tensor, permuting the dimensions according to the
1787      * perm tensor.
1788      *
1789      * The returned tensor's dimension i corresponds to the input dimension
1790      * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
1791      * rank of the input tensor. Hence by default, this operation performs a
1792      * regular matrix transpose on 2-D input Tensors.
1793      *
1794      * Supported tensor {@link OperandCode}:
1795      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1796      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1797      *
1798      * Supported tensor rank: up to 4
1799      *
1800      * Inputs:
1801      * * 0: An n-D tensor, specifying the tensor to be transposed.
1802      * * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
1803      *      the permutation of the dimensions of the input tensor.
1804      *
1805      * Outputs:
1806      * * 0: A tensor of the same {@link OperandCode} as input0.
1807      *
1808      * Available since API level 28.
1809      */
1810     ANEURALNETWORKS_TRANSPOSE = 37
1811 }
1812 
1813 /**
1814  * Fused activation function types.
1815  *
1816  *
1817  * Available since API level 27.
1818  */
1819 enum FuseCode
1820 {
1821     /** NO fused activation function. */
1822     ANEURALNETWORKS_FUSED_NONE = 0,
1823     /** Fused ReLU activation function. */
1824     ANEURALNETWORKS_FUSED_RELU = 1,
1825     /** Fused ReLU1 activation function. */
1826     ANEURALNETWORKS_FUSED_RELU1 = 2,
1827     /** Fused ReLU6 activation function. */
1828     ANEURALNETWORKS_FUSED_RELU6 = 3
1829 }
1830 
1831 /**
1832  * Implicit padding algorithms.
1833  *
1834  *
1835  * Available since API level 27.
1836  */
1837 enum PaddingCode
1838 {
1839     /**
1840      * SAME padding.
1841      * Padding on both ends are the "same":
1842      *     padding_to_beginning =  total_padding / 2
1843      *     padding_to_end       = (total_padding + 1)/2.
1844      * i.e., for even number of padding, padding to both ends are exactly
1845      * the same; for odd number of padding, padding to the ending is bigger
1846      * than the padding to the beginning by 1.
1847      *
1848      * total_padding is a function of input, stride and filter size.
1849      * It could be computed as follows:
1850      *    out_size = (input + stride - 1) / stride;
1851      *    needed_input = (out_size - 1) * stride + filter_size
1852      *    total_padding = max(0, needed_input - input_size)
1853      *  The computation is the same for the horizontal and vertical directions.
1854      */
1855     ANEURALNETWORKS_PADDING_SAME = 1,
1856 
1857     /**
1858      * VALID padding.
1859      * No padding. When the input size is not evenly divisible by
1860      * the filter size, the input at the end that could not fill
1861      * the whole filter tile will simply be ignored.
1862      */
1863     ANEURALNETWORKS_PADDING_VALID = 2
1864 }
1865 
1866 /**
1867  * Execution preferences.
1868  *
1869  * Available since API level 27.
1870  */
1871 enum PreferenceCode
1872 {
1873     /**
1874      * Prefer executing in a way that minimizes battery drain.
1875      * This is desirable for compilations that will be executed often.
1876      */
1877     ANEURALNETWORKS_PREFER_LOW_POWER = 0,
1878     /**
1879      * Prefer returning a single answer as fast as possible, even if this causes
1880      * more power consumption.
1881      */
1882     ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1,
1883     /**
1884      * Prefer maximizing the throughput of successive frames, for example when
1885      * processing successive frames coming from the camera.
1886      */
1887     ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2
1888 }
1889 
1890 /**
1891  * Result codes.
1892  *
1893  * <p>Any NNAPI function can return any result code, including result codes not
1894  * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR}
1895  * indicates a failure of some kind.</p>
1896  *
1897  * <p>Additional information about the nature of a failure can be obtained from
1898  * the device log after enabling NNAPI debugging by setting the debug.nn.vlog
1899  * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p>
1900  *
1901  * Available since API level 27.
1902  */
1903 enum ResultCode
1904 {
1905     /**
1906      * Operation was succesful.
1907      */
1908     ANEURALNETWORKS_NO_ERROR = 0,
1909 
1910     /**
1911      * Failure caused by not enough available memory.
1912      */
1913     ANEURALNETWORKS_OUT_OF_MEMORY = 1,
1914 
1915     ANEURALNETWORKS_INCOMPLETE = 2,
1916 
1917     /**
1918      * Failure caused by unexpected null argument.
1919      */
1920     ANEURALNETWORKS_UNEXPECTED_NULL = 3,
1921 
1922     /**
1923      * Failure caused by invalid function arguments, invalid model definition,
1924      * invalid execution definition or invalid data at execution time.
1925      */
1926     ANEURALNETWORKS_BAD_DATA = 4,
1927 
1928     /**
1929      * Failure caused by failed model execution.
1930      */
1931     ANEURALNETWORKS_OP_FAILED = 5,
1932 
1933     /**
1934      * Failure caused by object being in the wrong state.
1935      */
1936     ANEURALNETWORKS_BAD_STATE = 6,
1937 
1938     /**
1939      * Failure caused by not being able to map a file into memory.
1940      * This may be caused by a file descriptor not being mappable.
1941      * Mitigate by reading its content into memory.
1942      */
1943     ANEURALNETWORKS_UNMAPPABLE = 7
1944 }
1945 
1946 /**
1947  * For {@link ANeuralNetworksModel_setOperandValue}, values with a
1948  * length smaller or equal to this will be immediately copied into
1949  * the model. The size is in bytes.
1950  *
1951  * Available since API level 27.
1952  */
1953 enum
1954 {
1955     ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128
1956 }
1957 
1958 /**
1959  * ANeuralNetworksMemory is an opaque type that represents memory.
1960  *
1961  * This type is used to represent shared memory, memory mapped files,
1962  * and similar memories.
1963  *
1964  * By using shared memory, a program can efficiently communicate to the
1965  * runtime and drivers the tensors that define a model. See
1966  * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application
1967  * should typically create one shared memory object that contains every tensor
1968  * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be
1969  * used to create shared memory from a file handle.
1970  *
1971  * Memory objects can also be used to specify the input and output arguments of
1972  * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory}
1973  * and {@link ANeuralNetworksExecution_setOutputFromMemory}.
1974  *
1975  * Available since API level 27.
1976  */
1977 struct ANeuralNetworksMemory;
1978 
1979 /**
1980  * ANeuralNetworksModel is an opaque type that contains a description of the
1981  * mathematical operations that constitute the model.
1982  *
1983  * <p>Build the model by calling<ul>
1984  * <li>{@link ANeuralNetworksModel_create}</li>
1985  * <li>{@link ANeuralNetworksModel_addOperation}</li>
1986  * <li>{@link ANeuralNetworksModel_addOperand}</li>
1987  * </ul>
1988  *
1989  * This forms a graph in which each operation and operand is a node, a
1990  * directed edge from an operand to an operation indicates that the
1991  * operand is an input to the operation, and a directed edge from an
1992  * operation to an operand indicates that the operand is an output
1993  * from the operation. This graph must be acyclic.
1994  *
1995  * A model is completed by calling {@link ANeuralNetworksModel_finish}.
1996  * A model is destroyed by calling {@link ANeuralNetworksModel_free}.
1997  *
1998  * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish}
1999  * has been called on it.</p>
2000  *
2001  * <p>It is the application's responsibility to make sure that only one thread
2002  * modifies a model at a given time. It is however safe for more than one
2003  * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p>
2004  *
2005  * <p>It is also the application's responsibility to ensure that there are no other
2006  * uses of the model after calling {@link ANeuralNetworksModel_free}.
2007  * This includes any compilation or execution object created using the model.</p>
2008  *
2009  * Available since API level 27.
2010  */
2011 struct ANeuralNetworksModel;
2012 
2013 /**
2014  * ANeuralNetworksCompilation is an opaque type that can be used to compile
2015  * a machine learning model.
2016  *
2017  * <p>To use:<ul>
2018  *    <li>Create a new compilation instance by calling the
2019  *        {@link ANeuralNetworksCompilation_create} function.</li>
2020  *    <li>Set any desired properties on the compilation (for example,
2021  *        {@link ANeuralNetworksCompilation_setPreference}).</li>
2022  *    <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li>
2023  *    <li>Use the compilation as many times as needed
2024  *        with {@link ANeuralNetworksExecution_create}.</li>
2025  *    <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free}
2026  *        once all executions using the compilation have completed.</li></ul></p>
2027  *
2028  * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}.
2029  * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}.
2030  *
2031  * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish}
2032  * has been called on it.</p>
2033  *
2034  * <p>It is the application's responsibility to make sure that only
2035  * one thread modifies a compilation at a given time. It is however
2036  * safe for more than one thread to use the compilation once
2037  * {@link ANeuralNetworksCompilation_finish} has returned.</p>
2038  *
2039  * <p>It is also the application's responsibility to ensure that there are no other
2040  * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}.
2041  * This includes any execution object created using the compilation.</p>
2042  *
2043  * Available since API level 27.
2044  */
2045 struct ANeuralNetworksCompilation;
2046 
2047 /**
2048  * ANeuralNetworksExecution is an opaque type that can be used to apply a machine
2049  * learning model to a set of inputs.
2050  *
2051  * <p>To use:<ul>
2052  *    <li>Create a new execution instance by calling the
2053  *        {@link ANeuralNetworksExecution_create} function.</li>
2054  *    <li>Associate input buffers or memory regions to the model inputs with
2055  *        {@link ANeuralNetworksExecution_setInput} or
2056  *        {@link ANeuralNetworksExecution_setInputFromMemory}.</li>
2057  *    <li>Associate output buffers or memory regions to the model outputs with
2058  *        {@link ANeuralNetworksExecution_setOutput} or
2059  *        {@link ANeuralNetworksExecution_setOutputFromMemory}.</li>
2060  *    <li>Apply the model with {@link ANeuralNetworksExecution_startCompute}.</li>
2061  *    <li>Wait for the execution to complete with {@link
2062  *        ANeuralNetworksEvent_wait}.</li>
2063  *    <li>Destroy the execution with
2064  *        {@link ANeuralNetworksExecution_free}.</li></ul></p>
2065  *
2066  * <p>An output buffer or memory region must not overlap with any
2067  * other output buffer or memory region, with an input buffer or
2068  * memory region, or with an operand value in a memory object
2069  * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p>
2070  *
2071  * <p>An execution cannot be modified once {@link ANeuralNetworksExecution_startCompute}
2072  * has been called on it.</p>
2073  *
2074  * <p>An execution can be applied to a model with
2075  * {@link ANeuralNetworksExecution_startCompute} only once. Create new executions
2076  * to do new evaluations of the model.</p>
2077  *
2078  * <p>It is the application's responsibility to make sure that only one thread
2079  * modifies an execution at a given time. It is however safe for more than one
2080  * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p>
2081  *
2082  * <p>It is also the application's responsibility to ensure that there are no other
2083  * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p>
2084  *
2085  * Available since API level 27.
2086  */
2087 struct ANeuralNetworksExecution;
2088 
2089 /**
2090  * ANeuralNetworksOperandType describes the type of an operand.
2091  * This structure is used to describe both scalars and tensors.
2092  *
2093  * A tensor operand type must have a specified rank (number of
2094  * dimensions) but may have any of its dimensions unspecified.
2095  *
2096  * A tensor operand type with all dimensions specified is "fully
2097  * specified".  Whenever possible (i.e., whenever the dimensions are
2098  * known at model construction time), a tensor operand type should be
2099  * (but is not required to be) fully specified, in order to enable the
2100  * best possible performance.
2101  *
2102  * If a tensor operand's type is not fully specified, the dimensions
2103  * of the operand are deduced from the operand types and values of the
2104  * operation for which that operand is an output.
2105  *
2106  * <p>In the following situations, a tensor operand type must be fully
2107  * specified:<ul>
2108  *     <li>The operand has a constant value, set by
2109  *         {@link ANeuralNetworksModel_setOperandValue} (with a
2110  *         non-nullptr buffer) or
2111  *         {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
2112  *     <li>The operand is a model input or model output (see
2113  *         {@link ANeuralNetworksModel_identifyInputsAndOutputs}).  A
2114  *         fully specified tensor operand type must either be provided
2115  *         to {@link ANeuralNetworksModel_addOperand}; or it must be
2116  *         provided to the corresponding
2117  *         {@link ANeuralNetworksExecution_setInput},
2118  *         {@link ANeuralNetworksExecution_setInputFromMemory},
2119  *         {@link ANeuralNetworksExecution_setOutput}, or
2120  *         {@link ANeuralNetworksModel_setOperandValueFromMemory}.
2121  *         EXCEPTION: If the input or output is optional and omitted
2122  *         (by passing nullptr for buffer to
2123  *         {@link ANeuralNetworksExecution_setInput} or
2124  *         {@link ANeuralNetworksExecution_setOutput}) then it need
2125  *         not have a fully specified tensor operand type.</li></ul>
2126  *
2127  * A tensor operand type with some number of unspecified dimensions is
2128  * represented by setting each unspecified dimension to 0.
2129  *
2130  * Available since API level 27.
2131  */
2132 struct ANeuralNetworksOperandType
2133 {
2134     /** The data type, e.g ANEURALNETWORKS_INT8. */
2135     int type;
2136     /** The number of dimensions (rank). It should be 0 for scalars. */
2137     uint dimensionCount;
2138     /** The dimensions of the tensor. It should be nullptr for scalars. */
2139     const(uint)* dimensions;
2140     /** These two fields are only used for quantized tensors.
2141      * They should be zero for scalars and non-fixed point tensors.
2142      * The dequantized value of each entry is (value - zeroPoint) * scale.
2143      */
2144     float scale;
2145     int zeroPoint;
2146 }
2147 
2148 alias ANeuralNetworksOperationType = int;
2149 
2150 /**
2151  * ANeuralNetworksEvent is an opaque type that represents an event
2152  * that will be signaled once an execution completes.
2153  *
2154  * Available since API level 27.
2155  */
2156 struct ANeuralNetworksEvent;
2157 
2158 /**
2159  * Creates a shared memory object from a file descriptor.
2160  *
2161  * The shared memory is backed by a file descriptor via mmap.
2162  * See {@link ANeuralNetworksMemory} for a description on how to use
2163  * this shared memory.
2164  *
2165  * Available since API level 27.
2166  *
2167  * @param size The requested size in bytes.
2168  *             Must not be larger than the file size.
2169  * @param prot The desired memory protection for the mapping.
2170  *             It is either PROT_NONE or the bitwise OR of one or
2171  *             more of the following flags: PROT_READ, PROT_WRITE.
2172  * @param fd The requested file descriptor.
2173  *           The file descriptor has to be mmap-able. The file
2174  *           descriptor will be duplicated.
2175  * @param offset The offset to the beginning of the file of the area to map.
2176  *               The offset has to be aligned to a page size.
2177  * @param memory The memory object to be created.
2178  *               Set to NULL if unsuccessful.
2179  *
2180  * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
2181  */
2182 int ANeuralNetworksMemory_createFromFd (
2183     size_t size,
2184     int protect,
2185     int fd,
2186     size_t offset,
2187     ANeuralNetworksMemory** memory);
2188 
2189 /**
2190  * Delete a memory object.
2191  *
2192  * Destroys the object used by the run time to keep track of the memory.
2193  * This will free the underlying actual memory if no other code has open
2194  * handles to this memory.
2195  *
2196  * Available since API level 27.
2197  *
2198  * @param memory The memory object to be freed.
2199  */
2200 void ANeuralNetworksMemory_free (ANeuralNetworksMemory* memory);
2201 
2202 /**
2203  * Create an empty {@link ANeuralNetworksModel}.
2204  *
2205  * <p>This only creates the object. Computation is performed once
2206  * {@link ANeuralNetworksExecution_startCompute} is invoked.
2207  *
2208  * The model should be constructed with calls to
2209  * {@link ANeuralNetworksModel_addOperation} and
2210  * {@link ANeuralNetworksModel_addOperand}
2211  *
2212  * <p>{@link ANeuralNetworksModel_finish} should be called once the model
2213  * has been fully constructed.</p>
2214  *
2215  * <p>{@link ANeuralNetworksModel_free} should be called once the model
2216  * is no longer needed.</p>
2217  *
2218  * Available since API level 27.
2219  *
2220  * @param model The {@link ANeuralNetworksModel} to be created.
2221  *              Set to NULL if unsuccessful.
2222  *
2223  * @return ANEURALNETWORKS_NO_ERROR if successful.
2224  */
2225 int ANeuralNetworksModel_create (ANeuralNetworksModel** model);
2226 
2227 /**
2228  * Destroy a model.
2229  *
2230  * The model need not have been finished by a call to
2231  * {@link ANeuralNetworksModel_finish}.
2232  *
2233  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
2234  *
2235  * Available since API level 27.
2236  *
2237  * @param model The model to be destroyed. Passing NULL is acceptable and
2238  *              results in no operation.
2239  */
2240 void ANeuralNetworksModel_free (ANeuralNetworksModel* model);
2241 
2242 /**
2243  * Indicate that we have finished modifying a model. Required before
2244  * calling {@link ANeuralNetworksCompilation_create}.
2245  *
2246  * An application is responsible to make sure that no other thread uses
2247  * the model at the same time.
2248  *
2249  * This function must only be called once for a given model.
2250  *
2251  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
2252  *
2253  * Available since API level 27.
2254  *
2255  * @param model The model to be finished.
2256  *
2257  * @return ANEURALNETWORKS_NO_ERROR if successful.
2258  */
2259 int ANeuralNetworksModel_finish (ANeuralNetworksModel* model);
2260 
2261 /**
2262  * Add an operand to a model.
2263  *
2264  * The order in which the operands are added is important. The first one added
2265  * to a model will have the index value 0, the second 1, etc. These indexes are
2266  * used as operand identifiers in
2267  * {@link ANeuralNetworksModel_addOperation},
2268  * {@link ANeuralNetworksModel_identifyInputsAndOutputs},
2269  * {@link ANeuralNetworksModel_setOperandValue},
2270  * {@link ANeuralNetworksModel_setOperandValueFromMemory},
2271  * {@link ANeuralNetworksExecution_setInput},
2272  * {@link ANeuralNetworksExecution_setInputFromMemory},
2273  * {@link ANeuralNetworksExecution_setOutput},
2274  * {@link ANeuralNetworksExecution_setOutputFromMemory} and
2275  * {@link ANeuralNetworksExecution_setOperandValue}.
2276  *
2277  * <p>Every operand must be referenced in exactly one of the following
2278  * ways:<ul>
2279  *    <li>It is identified as a model input with
2280  *        {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</li>
2281  *    <li>It is identified as a constant with
2282  *        {@link ANeuralNetworksModel_setOperandValue} or
2283  *        {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
2284  *    <li>It is identified as an output of exactly one operation with
2285  *        {@link ANeuralNetworksModel_addOperation}.</li></p>
2286  * <p>An operand that is identified as a model input or as a constant
2287  * must not also be identified as a model output with
2288  * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</p>
2289  *
2290  * To build a model that can accommodate inputs of various sizes, as
2291  * you may want to do for a CNN, leave unspecified the dimensions that
2292  * will vary at run time.  If you do so, fully specify dimensions
2293  * when calling {@link ANeuralNetworksExecution_setInput} or
2294  * {@link ANeuralNetworksExecution_setInputFromMemory}.
2295  *
2296  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
2297  * called will return an error.
2298  *
2299  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
2300  *
2301  * Available since API level 27.
2302  *
2303  * @param model The model to be modified.
2304  * @param type The {@link ANeuralNetworksOperandType} that describes the shape
2305  *             of the operand.  Neither the {@link ANeuralNetworksOperandType}
2306  *             nor the dimensions it points to need to outlive the call to
2307  *             {@link ANeuralNetworksModel_addOperand}.
2308  *
2309  * @return ANEURALNETWORKS_NO_ERROR if successful.
2310  */
2311 int ANeuralNetworksModel_addOperand (
2312     ANeuralNetworksModel* model,
2313     const(ANeuralNetworksOperandType)* type);
2314 
2315 /**
2316  * Sets an operand to a constant value.
2317  *
2318  * Values of length smaller or equal to
2319  * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}
2320  * are immediately copied into the model.
2321  *
2322  * For values of length greater than {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES},
2323  * a pointer to the buffer is stored within the model. The application is responsible
2324  * for not changing the content of this region until all executions using this model
2325  * have completed. As the data may be copied during processing, modifying the data
2326  * after this call yields undefined results.
2327  *
2328  * For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory}
2329  * is likely to be more efficient.
2330  *
2331  * To indicate that an optional operand should be considered missing,
2332  * pass nullptr for buffer and 0 for length.
2333  *
2334  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
2335  * called will return an error.
2336  *
2337  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
2338  *
2339  * Available since API level 27.
2340  *
2341  * @param model The model to be modified.
2342  * @param index The index of the model operand we're setting.
2343  * @param buffer A pointer to the data to use.
2344  * @param length The size in bytes of the data value.
2345  *
2346  * @return ANEURALNETWORKS_NO_ERROR if successful.
2347  */
2348 int ANeuralNetworksModel_setOperandValue (
2349     ANeuralNetworksModel* model,
2350     int index,
2351     const(void)* buffer,
2352     size_t length);
2353 
2354 /**
2355  * Sets an operand to a value stored in a memory object.
2356  *
2357  * The content of the memory is not copied. A reference to that memory is stored
2358  * inside the model. The application is responsible for not changing the content
2359  * of the memory region until all executions using this model have completed.
2360  * As the data may be copied during processing, modifying the data after this call
2361  * yields undefined results.
2362  *
2363  * To indicate that an optional operand should be considered missing,
2364  * use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer.
2365  *
2366  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
2367  * called will return an error.
2368  *
2369  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
2370  *
2371  * Available since API level 27.
2372  *
2373  * @param model The model to be modified.
2374  * @param index The index of the model operand we're setting.
2375  * @param buffer A pointer to the data to use.
2376  * @param memory The memory containing the data.
2377  * @param offset This specifies the location of the data within the memory.
2378  *               The offset is in bytes from the start of memory.
2379  * @param length The size in bytes of the data value.
2380  *
2381  * @return ANEURALNETWORKS_NO_ERROR if successful.
2382  */
2383 int ANeuralNetworksModel_setOperandValueFromMemory (
2384     ANeuralNetworksModel* model,
2385     int index,
2386     const(ANeuralNetworksMemory)* memory,
2387     size_t offset,
2388     size_t length);
2389 
2390 /**
2391  * Add an operation to a model.
2392  *
2393  * @param model The model to be modified.
2394  * @param type The {@link ANeuralNetworksOperationType} of the operation.
2395  * @param inputCount The number of entries in the inputs array.
2396  * @param inputs An array of indexes identifying each operand.
2397  * @param outputCount The number of entries in the outputs array.
2398  * @param outputs An array of indexes identifying each operand.
2399  *
2400  * The operands specified by inputs and outputs must have been
2401  * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
2402  *
2403  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
2404  * called will return an error.
2405  *
2406  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
2407  *
2408  * Available since API level 27.
2409  *
2410  * @return ANEURALNETWORKS_NO_ERROR if successful.
2411  */
2412 int ANeuralNetworksModel_addOperation (
2413     ANeuralNetworksModel* model,
2414     ANeuralNetworksOperationType type,
2415     uint inputCount,
2416     const(uint)* inputs,
2417     uint outputCount,
2418     const(uint)* outputs);
2419 
2420 /**
2421  * Specifies which operands will be the model's inputs and
2422  * outputs. Every model must have at least one input and one output.
2423  *
2424  * An operand cannot be used for both input and output. Doing so will
2425  * return an error.
2426  *
2427  * @param model The model to be modified.
2428  * @param inputCount The number of entries in the inputs array.
2429  * @param inputs An array of indexes identifying the input operands.
2430  * @param outputCount The number of entries in the outputs array.
2431  * @param outputs An array of indexes identifying the output operands.
2432  *
2433  * The operands specified by inputs and outputs must have been
2434  * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
2435  *
2436  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
2437  * called will return an error.
2438  *
2439  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
2440  *
2441  * Available since API level 27.
2442  *
2443  */
2444 int ANeuralNetworksModel_identifyInputsAndOutputs (
2445     ANeuralNetworksModel* model,
2446     uint inputCount,
2447     const(uint)* inputs,
2448     uint outputCount,
2449     const(uint)* outputs);
2450 
2451 /**
2452  * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be
2453  * calculated with range and/or precision as low as that of the IEEE 754 16-bit
2454  * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2455  * must be calculated using at least the range and precision of the IEEE 754
2456  * 32-bit floating-point format.
2457  *
2458  * @param model The model to be modified.
2459  * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be
2460  *              calculated with range and/or precision as low as that of the
2461  *              IEEE 754 16-bit floating point format. 'false' indicates
2462  *              {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using
2463  *              at least the range and precision of the IEEE 754 32-bit floating
2464  *              point format.
2465  *
2466  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
2467  * called will return an error.
2468  *
2469  * Available since API level 28.
2470  *
2471  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
2472  */
2473 int ANeuralNetworksModel_relaxComputationFloat32toFloat16 (ANeuralNetworksModel* model, bool allow);
2474 
2475 // __ANDROID_API__ >= 28
2476 
2477 /**
2478  * Create a {@link ANeuralNetworksCompilation} to compile the given model.
2479  *
2480  * <p>This only creates the object. Compilation is only performed once
2481  * {@link ANeuralNetworksCompilation_finish} is invoked.</p>
2482  *
2483  * <p>{@link ANeuralNetworksCompilation_finish} should be called once
2484  * all desired properties have been set on the compilation.</p>
2485  *
2486  * <p>{@link ANeuralNetworksModel_free} should be called once the compilation
2487  * is no longer needed.</p>
2488  *
2489  * <p>The provided model must outlive the compilation.</p>
2490  *
2491  * The model must already have been finished by a call to
2492  * {@link ANeuralNetworksModel_finish}.
2493  *
2494  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2495  *
2496  * Available since API level 27.
2497  *
2498  * @param model The {@link ANeuralNetworksModel} to be compiled.
2499  * @param compilation The newly created object or NULL if unsuccessful.
2500  *
2501  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
2502  *         if the model is invalid.
2503  */
2504 int ANeuralNetworksCompilation_create (
2505     ANeuralNetworksModel* model,
2506     ANeuralNetworksCompilation** compilation);
2507 
2508 /**
2509  * Destroy a compilation.
2510  *
2511  * The compilation need not have been finished by a call to
2512  * {@link ANeuralNetworksModel_finish}.
2513  *
2514  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2515  *
2516  * Available since API level 27.
2517  *
2518  * @param compilation The compilation to be destroyed. Passing NULL is acceptable and
2519  *                    results in no operation.
2520  */
2521 void ANeuralNetworksCompilation_free (ANeuralNetworksCompilation* compilation);
2522 
2523 /**
2524  * Sets the execution preference.
2525  *
2526  * <p>Provides guidance to the runtime when trade-offs are possible.</p>
2527  *
2528  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2529  *
2530  * Available since API level 27.
2531  *
2532  * @param compilation The compilation to be modified.
2533  * @param preference Either {@link PREFER_LOW_POWER},
2534  *                  {@link PREFER_SINGLE_FAST_ANSWER}, or
2535  *                  {@link PREFER_SUSTAINED_SPEED}.
2536  *
2537  * @return ANEURALNETWORKS_NO_ERROR if successful.
2538  */
2539 int ANeuralNetworksCompilation_setPreference (
2540     ANeuralNetworksCompilation* compilation,
2541     int preference);
2542 
2543 /**
2544  * Indicate that we have finished modifying a compilation. Required before
2545  * calling {@link ANeuralNetworksExecution_create}.
2546  *
2547  * An application is responsible to make sure that no other thread uses
2548  * the compilation at the same time.
2549  *
2550  * This function must only be called once for a given compilation.
2551  *
2552  * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2553  *
2554  * Available since API level 27.
2555  *
2556  * @param compilation The compilation to be finished.
2557  *
2558  * @return ANEURALNETWORKS_NO_ERROR if successful.
2559  */
2560 int ANeuralNetworksCompilation_finish (ANeuralNetworksCompilation* compilation);
2561 
2562 /**
2563  * Create a {@link ANeuralNetworksExecution} to apply the given compilation.
2564  * This only creates the object. Computation is only performed once
2565  * {@link ANeuralNetworksExecution_startCompute} is invoked.
2566  *
2567  * <p>The provided compilation must outlive the execution.</p>
2568  *
2569  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2570  *
2571  * Available since API level 27.
2572  *
2573  * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
2574  * @param execution The newly created object or NULL if unsuccessful.
2575  *
2576  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
2577  *         if the compilation is invalid.
2578  */
2579 int ANeuralNetworksExecution_create (
2580     ANeuralNetworksCompilation* compilation,
2581     ANeuralNetworksExecution** execution);
2582 
2583 /**
2584  * Destroy an execution.
2585  *
2586  * <p>If called on an execution for which
2587  * {@link ANeuralNetworksExecution_startCompute} has been called, the
2588  * function will return immediately but will mark the execution to be deleted
2589  * once the computation completes. The related {@link ANeuralNetworksEvent}
2590  * will be signaled and the {@link ANeuralNetworksEvent_wait} will return
2591  * ANEURALNETWORKS_ERROR_DELETED.
2592  *
2593  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2594  *
2595  * Available since API level 27.
2596  *
2597  * @param execution The execution to be destroyed. Passing NULL is acceptable and
2598  *                  results in no operation.
2599  */
2600 void ANeuralNetworksExecution_free (ANeuralNetworksExecution* execution);
2601 
2602 /**
2603  * Associate a user buffer with an input of the model of the
2604  * {@link ANeuralNetworksExecution}.
2605  *
2606  * <p>The provided buffer must outlive the execution.</p>
2607  *
2608  * If the input is optional, you can indicate that it is omitted by
2609  * passing nullptr for buffer and 0 for length.
2610  *
2611  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2612  *
2613  * Available since API level 27.
2614  *
2615  * @param execution The execution to be modified.
2616  * @param index The index of the input argument we are setting. It is
2617  *              an index into the lists passed to
2618  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2619  *              the index associated with
2620  *              {@link ANeuralNetworksModel_addOperand}.
2621  * @param type The {@link ANeuralNetworksOperandType} of the
2622  *             operand. Unless the input is omitted, this should be
2623  *             used to specify the dimensions that were left
2624  *             unspecified when the operand was added to the
2625  *             model. All other properties of the type must be the
2626  *             same as specified in the model. If the type is the same
2627  *             as specified when the model was built, NULL can be
2628  *             passed. Neither the {@link ANeuralNetworksOperandType}
2629  *             nor the dimensions it points to need to outlive the call
2630  *             to {@link ANeuralNetworksExecution_setInput}.
2631  * @param buffer The buffer containing the data.
2632  * @param length The length in bytes of the buffer.
2633  *
2634  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
2635  *         name is not recognized or the buffer is too small for the input.
2636  */
2637 int ANeuralNetworksExecution_setInput (
2638     ANeuralNetworksExecution* execution,
2639     int index,
2640     const(ANeuralNetworksOperandType)* type,
2641     const(void)* buffer,
2642     size_t length);
2643 
2644 /**
2645  * Associate part of a memory object with an input of the model of the
2646  * {@link ANeuralNetworksExecution}.
2647  *
2648  * <p>The provided memory must outlive the execution.</p>
2649  *
2650  * If the input is optional, you can indicate that it is omitted by
2651  * using {@link ANeuralNetworks_setInput} instead, passing nullptr for buffer
2652  * and 0 for length.
2653  *
2654  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2655  *
2656  * Available since API level 27.
2657  *
2658  * @param execution The execution to be modified.
2659  * @param index The index of the input argument we are setting. It is
2660  *              an index into the lists passed to
2661  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2662  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2663  * @param type The {@link ANeuralNetworksOperandType} of the
2664  *             operand. This should be used to specify the dimensions
2665  *             that were left unspecified when the operand was added
2666  *             to the model. All other properties of the type must be
2667  *             the same as specified in the model. If the type is the
2668  *             same as specified when the model was built, NULL can be
2669  *             passed. Neither the {@link ANeuralNetworksOperandType}
2670  *             nor the dimensions it points to need to outlive the call
2671  *             to {@link ANeuralNetworksExecution_setInputFromMemory}.
2672  * @param memory The memory containing the data.
2673  * @param offset This specifies the location of the data within the memory.
2674  *               The offset is in bytes from the start of memory.
2675  * @param length The size in bytes of the data value.
2676  *
2677  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
2678  *         name is not recognized or the buffer is too small for the input.
2679  */
2680 int ANeuralNetworksExecution_setInputFromMemory (
2681     ANeuralNetworksExecution* execution,
2682     int index,
2683     const(ANeuralNetworksOperandType)* type,
2684     const(ANeuralNetworksMemory)* memory,
2685     size_t offset,
2686     size_t length);
2687 
2688 /**
2689  * Associate a user buffer with an output of the model of the
2690  * {@link ANeuralNetworksExecution}.
2691  *
2692  * If the output is optional, you can indicate that it is omitted by
2693  * passing nullptr for buffer and 0 for length.
2694  *
2695  * <p>The provided buffer must outlive the execution.</p>
2696  *
2697  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2698  *
2699  * Available since API level 27.
2700  *
2701  * @param execution The execution to be modified.
2702  * @param index The index of the output argument we are setting. It is
2703  *              an index into the lists passed to
2704  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2705  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2706  * @param type The {@link ANeuralNetworksOperandType} of the
2707  *             operand. Unless the output is omitted, this should be
2708  *             used to specify the dimensions that were left
2709  *             unspecified when the operand was added to the
2710  *             model. All other properties of the type must be the
2711  *             same as specified in the model. If the type is the same
2712  *             as specified when the model was built, NULL can be
2713  *             passed. Neither the {@link ANeuralNetworksOperandType}
2714  *             nor the dimensions it points to need to outlive the call
2715  *             to {@link ANeuralNetworksExecution_setOutput}.
2716  * @param buffer The buffer where the data is to be written.
2717  * @param length The length in bytes of the buffer.
2718  *
2719  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
2720  *         name is not recognized or the buffer is too small for the output.
2721  */
2722 int ANeuralNetworksExecution_setOutput (
2723     ANeuralNetworksExecution* execution,
2724     int index,
2725     const(ANeuralNetworksOperandType)* type,
2726     void* buffer,
2727     size_t length);
2728 
2729 /**
2730  * Associate part of a memory object with an output of the model of the
2731  * {@link ANeuralNetworksExecution}.
2732  *
2733  * If the output is optional, you can indicate that it is omitted by
2734  * using {@link ANeuralNetworks_setOutput} instead, passing nullptr for buffer
2735  * and 0 for length.
2736  *
2737  * <p>The provided memory must outlive the execution.</p>
2738  *
2739  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2740  *
2741  * Available since API level 27.
2742  *
2743  * @param execution The execution to be modified.
2744  * @param index The index of the output argument we are setting. It is
2745  *              an index into the lists passed to
2746  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2747  *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2748  * @param type The {@link ANeuralNetworksOperandType} of the operand. This should be
2749  *             used to specify the dimensions that were left
2750  *             unspecified when the operand was added to the
2751  *             model. All other properties of the type must be the
2752  *             same as specified in the model. If the type is the same
2753  *             as specified when the model was built, NULL can be
2754  *             passed. Neither the {@link ANeuralNetworksOperandType}
2755  *             nor the dimensions it points to need to outlive the call
2756  *             to {@link ANeuralNetworksExecution_setOutputFromMemory}.
2757  * @param memory The memory where the data is to be stored.
2758  * @param offset This specifies the location of the data within the memory.
2759  *               The offset is in bytes from the start of memory.
2760  * @param length The length in bytes of the data value.
2761  *
2762  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
2763  *         name is not recognized or the buffer is too small for the output.
2764  */
2765 int ANeuralNetworksExecution_setOutputFromMemory (
2766     ANeuralNetworksExecution* execution,
2767     int index,
2768     const(ANeuralNetworksOperandType)* type,
2769     const(ANeuralNetworksMemory)* memory,
2770     size_t offset,
2771     size_t length);
2772 
2773 /**
2774  * Schedule evaluation of the execution.
2775  *
2776  * <p>Schedules evaluation of the execution. Once the model has been
2777  * applied and the outputs are ready to be consumed, the returned event will be
2778  * signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that event.
2779  * </p>
2780  *
2781  * Multiple executions can be scheduled and evaluated concurrently. The
2782  * runtime makes no guarantee on the ordering of completion of
2783  * executions. If it's important to the application, the application
2784  * should enforce the ordering by using
2785  * {@link ANeuralNetworksEvent_wait}.
2786  *
2787  * ANeuralNetworksEvent_wait must be called to recuperate the resources used
2788  * by the execution.
2789  *
2790  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2791  *
2792  * Available since API level 27.
2793  *
2794  * @param execution The execution to be scheduled and executed.
2795  * @param event The event that will be signaled on completion. event is set to
2796  *              NULL if there's an error.
2797  *
2798  * @return ANEURALNETWORKS_NO_ERROR if successful.
2799  */
2800 int ANeuralNetworksExecution_startCompute (
2801     ANeuralNetworksExecution* execution,
2802     ANeuralNetworksEvent** event);
2803 
2804 /**
2805  * Waits until the execution completes.
2806  *
2807  * More than one thread can wait on an event. When the execution completes,
2808  * all threads will be released.
2809  *
2810  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2811  *
2812  * Available since API level 27.
2813  *
2814  * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
2815  */
2816 int ANeuralNetworksEvent_wait (ANeuralNetworksEvent* event);
2817 
2818 /**
2819  * Destroys the event.
2820  *
2821  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2822  *
2823  * Available since API level 27.
2824  */
2825 void ANeuralNetworksEvent_free (ANeuralNetworksEvent* event);
2826 
2827 // __ANDROID_API__ >= 27
2828 
2829 // ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H
2830 
2831 /** @} */