1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * @addtogroup NeuralNeworkRuntime
18  * @{
19  *
20  * @brief Provides APIs for accelerating the Neural Network Runtime model inference.
21  *
22  * @since 9
23  * @version 2.0
24  */
25 
26 /**
27  * @file neural_network_runtime_type.h
28  *
29  * @brief Defines the structure and enumeration.
30  *
31  * include "neural_network_runtime/neural_network_runtime_type.h"
32  * @library libneural_network_runtime.so
33  * @kit Neural Network Runtime Kit
34  * @Syscap SystemCapability.Ai.NeuralNetworkRuntime
35  * @since 9
36  * @version 2.0
37  */
38 
39 #ifndef NEURAL_NETWORK_RUNTIME_TYPE_H
40 #define NEURAL_NETWORK_RUNTIME_TYPE_H
41 
42 #ifdef __cplusplus
43 #include <cstddef>
44 #include <cstdint>
45 #else
46 #include <stddef.h>
47 #include <stdint.h>
48 #endif
49 
50 #ifdef __cplusplus
51 extern "C" {
52 #endif
53 
54 /**
55  * @brief Defines the handles of models.
56  *
57  * @since 9
58  * @version 1.0
59  */
60 typedef struct OH_NNModel OH_NNModel;
61 
62 /**
63  * @brief Defines the compilation handle.
64  *
65  * @since 9
66  * @version 1.0
67  */
68 typedef struct OH_NNCompilation OH_NNCompilation;
69 
70 /**
71  * @brief Defines the executor handle.
72  *
73  * @since 9
74  * @version 1.0
75  */
76 typedef struct OH_NNExecutor OH_NNExecutor;
77 
78 /**
79  * @brief Defines the quantization parameter handle.
80  *
81  * @since 11
82  * @version 1.0
83  */
84 typedef struct NN_QuantParam NN_QuantParam;
85 
86 /**
87  * @brief Defines the tensor descriptor handle.
88  *
89  * @since 11
90  * @version 1.0
91  */
92 typedef struct NN_TensorDesc NN_TensorDesc;
93 
94 /**
95  * @brief Defines the tensor handle.
96  *
97  * @since 11
98  * @version 1.0
99  */
100 typedef struct NN_Tensor NN_Tensor;
101 
102 /**
103  * @brief Defines the hardware performance mode.
104  *
105  * @since 9
106  * @version 1.0
107  */
108 typedef enum {
109     /** No performance mode preference */
110     OH_NN_PERFORMANCE_NONE = 0,
111     /** Low power consumption mode*/
112     OH_NN_PERFORMANCE_LOW = 1,
113     /** Medium performance mode */
114     OH_NN_PERFORMANCE_MEDIUM = 2,
115     /** High performance mode */
116     OH_NN_PERFORMANCE_HIGH = 3,
117     /** Ultimate performance mode */
118     OH_NN_PERFORMANCE_EXTREME = 4
119 } OH_NN_PerformanceMode;
120 
121 /**
122  * @brief Defines the model inference task priority.
123  *
124  * @since 9
125  * @version 1.0
126  */
127 typedef enum {
128     /** No priority preference */
129     OH_NN_PRIORITY_NONE = 0,
130     /** Low priority */
131     OH_NN_PRIORITY_LOW = 1,
132     /** Medium priority */
133     OH_NN_PRIORITY_MEDIUM = 2,
134     /** High priority */
135     OH_NN_PRIORITY_HIGH = 3
136 } OH_NN_Priority;
137 
138 /**
139  * @brief Defines error codes.
140  *
141  * @since 9
142  * @version 2.0
143  */
144 typedef enum {
145     /** The operation is successful. */
146     OH_NN_SUCCESS = 0,
147     /** The operation failed. */
148     OH_NN_FAILED = 1,
149     /** Invalid parameter. */
150     OH_NN_INVALID_PARAMETER = 2,
151     /** Memory-related error, for example, insufficient memory, memory data copy failure,
152      *  or memory application failure. */
153     OH_NN_MEMORY_ERROR = 3,
154     /** Invalid operation. */
155     OH_NN_OPERATION_FORBIDDEN = 4,
156     /** Null pointer exception */
157     OH_NN_NULL_PTR = 5,
158     /** Invalid file. */
159     OH_NN_INVALID_FILE = 6,
160     /** A hardware error occurs, for example, HDL service crash.
161      * @deprecated since 11
162      * @useinstead {@link OH_NN_UNAVAILABLE_DEVICE}
163      */
164     OH_NN_UNAVALIDABLE_DEVICE = 7,
165     /** Invalid path. */
166     OH_NN_INVALID_PATH = 8,
167     /** Timeout.
168      * @since 11
169      */
170     OH_NN_TIMEOUT = 9,
171     /** Unsupported.
172      * @since 11
173      */
174     OH_NN_UNSUPPORTED = 10,
175     /** Connection Exception.
176      * @since 11
177      */
178     OH_NN_CONNECTION_EXCEPTION = 11,
179     /** Save cache exception.
180      * @since 11
181      */
182     OH_NN_SAVE_CACHE_EXCEPTION = 12,
183     /** Dynamic shape.
184      * @since 11
185      */
186     OH_NN_DYNAMIC_SHAPE = 13,
187     /** A hardware error occurs, for example, HDL service crash.
188      * @since 11
189      */
190     OH_NN_UNAVAILABLE_DEVICE = 14
191 } OH_NN_ReturnCode;
192 
193 
194 /**
195  * @brief Defines the callback function handle for the post-process when the asynchronous execution has been done.
196  *
197  * Use <b>userData</b> to identify the asynchronous execution you want to get.
198  * It is the argument <b>userData</b> passed to {@link OH_NNExecutor_RunAsync}.\n
199  *
200  * Use <b>errCode</b> of type {@link OH_NN_ReturnCode} to get the error code returned by the asynchronous execution.\n
201  *
202  * The <b>outputTensor</b> and <b>outputCount</b> are the inference results, which is the same as ones passed to
203  * {@link OH_NNExecutor_RunAsync}.\n
204  *
205  * @param userData Asynchronous execution identifier, which is the argument <b>userData</b> passed to
206  *                 {@link OH_NNExecutor_RunAsync}.
207  * @param errCode Error code {@link OH_NN_ReturnCode} returned by the asynchronous execution.
208  * @param outputTensor An array of output tensors {@link NN_Tensor} of the model, which is the same as the argument
209  *                     <b>outputTensor</b> passed to {@link OH_NNExecutor_RunAsync}.
210  * @param outputCount Output tensor count, which is the same as the argument <b>outputCount</b> passed to
211  *                    {@link OH_NNExecutor_RunAsync}.
212  * @since 11
213  * @version 1.0
214  */
215 typedef void (*NN_OnRunDone)(void *userData, OH_NN_ReturnCode errCode, void *outputTensor[], int32_t outputCount);
216 
217 /**
218  * @brief Defines the callback function handle for the post-process when the device driver service is dead during
219  *        asynchronous execution.
220  *
221  * You should recompile the model if this callback function is called.\n
222  *
223  * Use <b>userData</b> to identify the asynchronous execution you want to get.
224  * It is the argument <b>userData</b> passed to {@link OH_NNExecutor_RunAsync}.\n
225  *
226  * @param userData Asynchronous execution identifier, which is the argument <b>userData</b> passed to
227  *                 {@link OH_NNExecutor_RunAsync}.
228  * @since 11
229  * @version 1.0
230  */
231 typedef void (*NN_OnServiceDied)(void *userData);
232 
233 /**
234  * @brief Defines activation function types in the fusion operator.
235  *
236  * @since 9
237  * @version 1.0
238  */
239 typedef enum : int8_t {
240     /** The fusion activation function is not specified. */
241     OH_NN_FUSED_NONE = 0,
242     /** Fusion relu activation function */
243     OH_NN_FUSED_RELU = 1,
244     /** Fusion relu6 activation function */
245     OH_NN_FUSED_RELU6 = 2
246 } OH_NN_FuseType;
247 
248 /**
249  * @brief Defines the layout type of tensor data.
250  *
251  * @since 9
252  * @version 2.0
253  */
254 typedef enum {
255     /** The tensor does not have a specific layout type (such as scalar or vector). */
256     OH_NN_FORMAT_NONE = 0,
257     /** The tensor arranges data in NCHW format.*/
258     OH_NN_FORMAT_NCHW = 1,
259     /** The tensor arranges data in NHWC format.*/
260     OH_NN_FORMAT_NHWC = 2,
261     /** The tensor arranges data in ND format.
262      * @since 11
263      */
264     OH_NN_FORMAT_ND = 3
265 } OH_NN_Format;
266 
267 /**
268  * @brief Defines device types.
269  *
270  * @since 9
271  * @version 1.0
272  */
273 typedef enum {
274     /** Devices that are not CPU, GPU, or dedicated accelerator*/
275     OH_NN_OTHERS = 0,
276     /** CPU device */
277     OH_NN_CPU = 1,
278     /** GPU device */
279     OH_NN_GPU = 2,
280     /** Dedicated hardware accelerator */
281     OH_NN_ACCELERATOR = 3,
282 } OH_NN_DeviceType;
283 
284 /**
285  * @brief Defines tensor data types.
286  *
287  * @since 9
288  * @version 1.0
289  */
290 typedef enum {
291     /** Unknown type */
292     OH_NN_UNKNOWN = 0,
293     /** bool */
294     OH_NN_BOOL = 1,
295     /** int8 */
296     OH_NN_INT8 = 2,
297     /** int16 */
298     OH_NN_INT16 = 3,
299     /** int32 */
300     OH_NN_INT32 = 4,
301     /** int64 */
302     OH_NN_INT64 = 5,
303     /** uint8 */
304     OH_NN_UINT8 = 6,
305     /** uint16 */
306     OH_NN_UINT16 = 7,
307     /** uint32 */
308     OH_NN_UINT32 = 8,
309     /** uint64 */
310     OH_NN_UINT64 = 9,
311     /** float16 */
312     OH_NN_FLOAT16 = 10,
313     /** float32 */
314     OH_NN_FLOAT32 = 11,
315     /** float64 */
316     OH_NN_FLOAT64 = 12
317 } OH_NN_DataType;
318 
319 
320 /**
321  * @brief Defines operator types.
322  *
323  * @since 9
324  * @version 2.0
325  */
326 typedef enum {
327     /**
328      * Returns the tensor of the sum of the elements corresponding to two input tensors.
329      *
330      * Inputs:
331      *
332      * * <b>input1</b>: first input tensor, of the Boolean or number type.
333      * * <b>input2</b>: second input tensor, whose data type must be the same as that of the first tensor.
334      *
335      * Parameters:
336      *
337      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
338      *       The specified activation function is called before output.
339      *
340      * Outputs:
341      *
342      * * <b>output</b>: sum of <b>input1</b> and <b>input2</b>.
343      *       The data shape is the same as that of the input after broadcasting,
344      *       and the data type is the same as that of the input with a higher precision.
345      */
346     OH_NN_OPS_ADD = 1,
347 
348     /**
349      * Apply 2D average pooling to the input tensor, which now must be in NHWC format.
350      * The int8 quantization input is supported.
351      *
352      * If the input contains the <b>padMode</b> parameter:
353      *
354      * Inputs:
355      *
356      * * <b>input</b>: tensor.
357      *
358      * Parameters:
359      *
360      * * <b>kernelSize</b> indicates the kernel size used to obtain the average value.
361      *       It is an int array [kernelHeight, kernelWidth].
362      *       The first number indicates the kernel height, and the second number indicates the kernel width.
363      * * <b>strides</b> indicates the distance of kernel moving. The value is an int array
364      *       [strideHeight, strideWidth]. The first number indicates the moving step in height,
365      *       and the second number indicates the moving step in width.
366      * * <b>padMode</b>: padding mode, which is optional. The value is of the int type and can be <b>0</b> (same)
367      *       or <b>1</b> (valid). The nearest neighbor value is used for padding.
368      *       <b>0</b> (same): The height and width of the output are the same as those of the input.
369      *       The total and padding quantity is calculated horizontally and vertically and
370      *       evenly distributed to the top, bottom, left, right if possible.
371      *       Otherwise, the last additional padding will be completed from the bottom and right.
372      *       <b>1</b> (valid): The possible maximum height and width of the output will be returned in case of no
373      *       padding. Excessive pixels will be discarded.
374      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
375      *       The specified activation function is called before output.
376      * * <b>global</b> Whether to do global pooling.
377      * * <b>roundMode</b> Boundary handling method. When the pool cannot completely cover the input feature map,
378      *       the output feature map is rounded up, 0 means round down, 1 means round up.
379      *
380      * If the input contains the <b>padList</b> parameter:
381      *
382      * Inputs:
383      *
384      * * <b>input</b>: tensor.
385      *
386      * Parameters:
387      *
388      * * <b>kernelSize</b> indicates the kernel size used to obtain the average value.
389      *       It is an int array [kernelHeight, kernelWidth].
390      *       The first number indicates the kernel height, and the second number indicates the kernel width.
391      * * <b>strides</b> indicates the distance of kernel moving. The value is an int array
392      *       [strideHeight, strideWidth]. The first number indicates the moving step in height,
393      *       and the second number indicates the moving step in width.
394      * * <b>padList</b>: padding around <b>input</b>. It is an int array [top, bottom, left, right],
395      *       and the nearest neighbor values are used for padding.
396      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
397      *       The specified activation function is called before output.
398      * * <b>global</b> Whether to do global pooling.
399      * * <b>roundMode</b> Boundary handling method. When the pool cannot completely cover the input feature map,
400      *       the output feature map is rounded up, 0 means round down, 1 means round up.
401      *
402      * Outputs:
403      *
404      * * <b>output</b>: average pooling result of the input.
405      */
406     OH_NN_OPS_AVG_POOL = 2,
407 
408     /**
409      * Performs batch normalization on the input tensors. Apply a transformation to keep the average output
410      * close to 0 and the output standard deviation close to 1.
411      *
412      * Inputs:
413      *
414      * * <b>input</b>: <i>n</i>-dimensional tensor of shape [N, ..., C].
415      *       The <i>n</i>th dimension is the number of channels.
416      * * <b>scale</b>: 1D tensor of the scaling factor used to scale the first normalized tensor.
417      * * <b>offset</b>: 1D tensor used to move to the first normalized tensor.
418      * * <b>mean</b>: 1D tensor of the overall mean value. It is used only for inference.
419      *       In case of training, this parameter must be left empty.
420      * * <b>variance</b>: 1D tensor used for the overall variance. It is used only for inference.
421      *       In case of training, this parameter must be left empty.
422      *
423      * Parameters:
424      *
425      * * <b>epsilon</b>: fixed small additional value.
426      *
427      * Outputs:
428      *
429      * * <b>output</b>: <i>n</i>-dimensional output tensor whose shape
430      *       and data type are the same as those of the input.
431      */
432     OH_NN_OPS_BATCH_NORM = 3,
433 
434     /**
435      * Divides the batch dimension of a 4D tensor into small blocks by <b>blockShape</b>,
436      * and interleaves these blocks back into the spatial dimension.
437      *
438      * Parameters:
439      *
440      * * <b>input</b>: input tensor. The dimension will be divided into small blocks,
441      *       and these blocks will be interleaved into the spatial dimension.
442      *
443      * Outputs:
444      *
445      * * <b>blockSize</b>: size of each block to be interleaved into the spatial dimension.
446      *       The value is an array [heightBlock, widthBlock].
447      * * <b>crops</b>: elements truncated from the spatial dimension of the output. The value is a 2D array
448      *       [[crop0Start, crop0End], [crop1Start, crop1End]] with the shape of (2, 2).
449      *
450      *
451      * Outputs:
452      *
453      * * <b>output</b>. Assume that the shape of <b>input</b> is (n,h,w,c) and
454      *       the shape of <b>output</b> is (n',h',w',c'):
455      *       n' = n / (blockShape[0] * blockShape[1])
456      *       h' = h * blockShape[0] - crops[0][0] - crops[0][1]
457      *       w' = w * blockShape[1] - crops[1][0] - crops[1][1]
458      *       c'= c
459      */
460     OH_NN_OPS_BATCH_TO_SPACE_ND = 4,
461 
462     /**
463      * Offsets the data in each dimension of the input tensor.
464      *
465      * Inputs:
466      *
467      * * <b>input</b>: input tensor, which can have two to five dimensions.
468      * * <b>bias</b>: offset of the number of input dimensions.
469      *
470      * Outputs:
471      *
472      * * <b>output</b>: sum of the input tensor and the bias in each dimension.
473      */
474     OH_NN_OPS_BIAS_ADD = 5,
475 
476     /**
477      * Converts the data type in the input tensor.
478      *
479      * Inputs:
480      *
481      * * <b>input</b>: input tensor.
482      * * <b>type</b>: converted data type.
483      *
484      * Outputs:
485      *
486      * * <b>output</b>: converted tensor.
487      */
488     OH_NN_OPS_CAST = 6,
489 
490     /**
491      * Connects tensors in a specified dimension.
492      *
493      * Inputs:
494      *
495      * * <b>input</b>: <i>N</i> input tensors.
496      *
497      * Parameters:
498      *
499      * * <b>axis</b>: dimension for connecting tensors.
500      *
501      * Outputs:
502      *
503      * * <b>output</b>: result of connecting <i>N</i> tensors along the axis.
504      */
505     OH_NN_OPS_CONCAT = 7,
506 
507     /**
508      * 2D convolutional layer.
509      *
510      * If the input contains the <b>padMode</b> parameter:
511      *
512      * Inputs:
513      *
514      * * <b>input</b>: input tensor.
515      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
516      *       The value of <b>inChannel</b> must be exactly divided by the value of <b>group</b>.
517      *
518      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
519      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
520      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
521      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
522      *
523      * Parameters:
524      *
525      * * <b>stride</b>: movement stride of the convolution kernel in height and width.
526      *       It is an int array [strideHeight, strideWidth].
527      * * <b>dilation</b>: dilation size of the convolution kernel in height and width.
528      *       It is an int array [dilationHeight, dilationWidth]. The value must be greater than
529      *       or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
530      *
531      * * <b>padMode</b>: padding mode of <b>input</b>.
532      *       The value is of the int type and can be <b>0</b> (same) or <b>1</b> (valid).
533      *       <b>0</b> (same): The height and width of the output are the same as those of the input.
534      *       The total padding quantity is calculated horizontally and vertically
535      *       and evenly distributed to the top, bottom, left, and right if possible.
536      *       Otherwise, the last additional padding will be completed from the bottom and right.
537      *
538      *       <b>1</b> (valid): The possible maximum height and width of the output will be returned
539      *       in case of no padding. The excessive pixels will be discarded.
540      * * <b>group</b>: number of groups in which the input is divided by <b>inChannel</b>. The value is of the
541      *       int type. If <b>group</b> is <b>1</b>, it is a conventional convolution. If <b>group</b> is greater
542      *       than <b>1</b> and less than or equal to <b>inChannel</b>, it is a group convolution.
543      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
544      *       The specified activation function is called before output.
545      *
546      * If the input contains the <b>padList</b> parameter:
547      *
548      * Inputs:
549      *
550      * * <b>input</b>: input tensor.
551      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
552      *       The value of <b>inChannel</b> must be exactly divided by the value of <b>group</b>.
553      *
554      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
555      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
556      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
557      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
558      *
559      * Parameters:
560      *
561      * * <b>stride</b>: movement stride of the convolution kernel in height and width.
562      *       It is an int array [strideHeight, strideWidth].
563      * * <b>dilation</b>: dilation size of the convolution kernel in height and width.
564      *       It is an int array [dilationHeight, dilationWidth]. The value must be greater than
565      *       or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
566      * * <b>padList</b>: padding around <b>input</b>. It is an int array [top, bottom, left, right].
567      * * <b>group</b>: number of groups in which the input is divided by <b>inChannel</b>.
568      *       The value is of the int type. If <b>group</b> is <b>1</b>, it is a conventional convolution.
569      *       If <b>group</b> is <b>inChannel</b>, it is depthwiseConv2d. In this case, group==inChannel==outChannel.
570      *       If <b>group</b> is greater than <b>1</b> and less than <b>inChannel</b>, it is a group convolution.
571      *       In this case, outChannel==group.
572      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
573      *       The specified activation function is called before output.
574      *
575      * Outputs:
576      *
577      * * <b>output</b>: convolution computing result.
578      */
579     OH_NN_OPS_CONV2D = 8,
580 
581     /**
582      * 2D convolution transposition.
583      *
584      * If the input contains the <b>padMode</b> parameter:
585      *
586      * Inputs:
587      *
588      * * <b>input</b>: input tensor.
589      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
590      *       The value of <b>inChannel</b> must be exactly divided by the value of <b>group</b>.
591      *
592      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
593      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
594      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
595      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
596      *
597      * * <b>stride</b>: movement stride of the convolution kernel in height and width.
598      *       It is an int array [strideHeight, strideWidth].
599      *
600      * Parameters:
601      *
602      * * <b>dilation</b>: dilation size of the convolution kernel in height and width.
603      *       It is an int array [dilationHeight, dilationWidth]. The value must be greater than
604      *       or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
605      * * <b>padMode</b>: padding mode of <b>input</b>. The value is of the int type and can be <b>0</b> (same) or
606      *       <b>1</b> (valid). <b>0</b> (same): The height and width of the output are the same as those of the input.
607      *       The total padding quantity is calculated horizontally and vertically and evenly distributed to the top,
608      *       bottom, left, and right if possible.
609      *       Otherwise, the last additional padding will be completed from the bottom and right.
610      *       <b>1</b> (valid): The possible maximum height and width of the output will be returned in case of
611      *       no padding. The excessive pixels will be discarded.
612      * * <b>group</b>: number of groups in which the input is divided by <b>inChannel</b>. The value is of the int
613      *       type. If <b>group</b> is <b>1</b>, it is a conventional convolution. If <b>group</b> is greater than
614      *       <b>1</b> and less than or equal to <b>inChannel</b>, it is a group convolution.
615      * * <b>outputPads</b>: padding along the height and width of the output tensor. The value is an int or a tuple.
616      *       It can be a single integer to specify the same value for all spatial dimensions. The amount of output
617      *       padding along a dimension must be less than the stride along this dimension.
618      *
619      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
620      *       The specified activation function is called before output.
621      *
622      * If the input contains the <b>padList</b> parameter:
623      *
624      * Inputs:
625      *
626      * * <b>input</b>: input tensor.
627      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
628      *       The value of <b>inChannel</b> must be exactly divided by the value of <b>group</b>.
629      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
630      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
631      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
632      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
633      *
634      * Parameters:
635      *
636      * * <b>stride</b>: movement stride of the convolution kernel in height and width.
637      *       It is an int array [strideHeight, strideWidth].
638      * * <b>dilation</b>: dilation size of the convolution kernel in height and width.
639      *       It is an int array [dilationHeight, dilationWidth]. The value must be greater than
640      *       or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
641      * * <b>padList</b>: padding around <b>input</b>. It is an int array [top, bottom, left, right].
642      * * <b>group</b>: number of groups in which the input is divided by <b>inChannel</b>. The value is of the int
643      *       type. If <b>group</b> is <b>1</b>, it is a conventional convolution. If <b>group</b> is greater than
644      *       <b>1</b> and less than or equal to <b>inChannel</b>, it is a group convolution.
645      * * <b>outputPads</b>: padding along the height and width of the output tensor. The value is an int or a tuple.
646      *       It can be a single integer to specify the same value for all spatial dimensions. The amount of output
647      *       padding along a dimension must be less than the stride along this dimension.
648      *
649      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
650      *       The specified activation function is called before output.
651      *
652      * Outputs:
653      *
654      * * <b>output</b>: computing result after convolution and transposition.
655      */
656     OH_NN_OPS_CONV2D_TRANSPOSE = 9,
657 
658     /**
659      * 2D depthwise separable convolution.
660      *
661      * If the input contains the <b>padMode</b> parameter:
662      *
663      * Inputs:
664      *
665      * * <b>input</b>: input tensor.
666      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format.
667      *       <b>outChannel</b> is equal to <b>channelMultiplier</b> multiplied by <b>inChannel</b>.
668      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
669      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
670      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
671      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
672      *
673      * Parameters:
674      *
675      * * <b>stride</b>: movement stride of the convolution kernel in height and width.
676      *       It is an int array [strideHeight, strideWidth].
677      * * <b>dilation</b>: dilation size of the convolution kernel in height and width.
678      *       It is an int array [dilationHeight, dilationWidth]. The value must be greater than
679      *       or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
680      * * <b>padMode</b>: padding mode of <b>input</b>.
681      *       The value is of the int type and can be <b>0</b> (same) or <b>1</b> (valid).
682      *       <b>0</b> (same): The height and width of the output are the same as those of the input. The total padding
683      *       quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and
684      *       right if possible. Otherwise, the last additional padding will be completed from the bottom and right.
685      *
686      *       <b>1</b> (valid): The possible maximum height and width of the output will be returned in case of no
687      *       padding. The excessive pixels will be discarded.
688      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
689      *       The specified activation function is called before output.
690      *
691      * If the input contains the <b>padList</b> parameter:
692      *
693      * Inputs:
694      *
695      * * <b>input</b>: input tensor.
696      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format.
697      *       <b>outChannel</b> is equal to <b>channelMultiplier</b> multiplied by <b>inChannel</b>.
698      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
699      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
700      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
701      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
702      *
703      * Parameters:
704      *
705      * * <b>stride</b>: movement stride of the convolution kernel in height and width.
706      *       It is an int array [strideHeight, strideWidth].
707      * * <b>dilation</b>: dilation size of the convolution kernel in height and width.
708      *       It is an int array [dilationHeight, dilationWidth]. The value must be greater than
709      *       or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
710      * * <b>padList</b>: padding around <b>input</b>. It is an int array [top, bottom, left, right].
711      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
712      *       The specified activation function is called before output.
713      *
714      * Outputs:
715      *
716      * * <b>output</b>: convolution computing result.
717      */
718     OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE = 10,
719 
720     /**
721      * Divides two input scalars or tensors.
722      *
723      * Inputs:
724      *
725      * * <b>input1</b>: first input, which is a number, a bool, or a tensor whose data type is number or Boolean.
726      * * <b>input2</b>: second input, which must meet the following requirements:
727      *       If the first input is a tensor, the second input can be a real number, a Boolean value, or a tensor whose
728      *       data type is real number or Boolean value. If the first input is a real number or Boolean value,
729      *       the second input must be a tensor whose data type is real number or Boolean value.
730      *
731      * Parameters:
732      *
733      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
734      *       The specified activation function is called before output.
735      *
736      * Outputs:
737      *
738      * * <b>output</b>: result of dividing <b>input1</b> by <b>input2</b>.
739      */
740     OH_NN_OPS_DIV = 11,
741 
742     /**
743      * Sets parameters to perform product (dot product), sum (addition and subtraction),
744      * or max (larger value) on the input.
745      *
746      * Inputs:
747      *
748      * * <b>input1</b>: first input tensor.
749      * * <b>input2</b>: second input tensor.
750      *
751      * Parameters:
752      *
753      * * <b>mode</b>: operation mode. The value is an enumerated value.
754      *
755      * Outputs:
756      *
757      * * <b>output</b>: computing result, which has the same data type and shape of <b>input1</b>.
758      */
759     OH_NN_OPS_ELTWISE = 12,
760 
761     /**
762      * Adds an additional dimension to a tensor in the given dimension.
763      *
764      * Inputs:
765      *
766      * * <b>input</b>: input tensor.
767      * * <b>axis</b>: index of the dimension to be added.
768      *       The value is of the int32_t type and must be a constant in the range [-dim-1, dim].
769      *
770      * Outputs:
771      *
772      * * <b>output</b>: tensor after dimension expansion.
773      */
774     OH_NN_OPS_EXPAND_DIMS = 13,
775 
776     /**
777      * Creates a tensor of the specified dimensions and fills it with a scalar.
778      *
779      * Inputs:
780      *
781      * * <b>value</b>: scalar used to fill the tensor.
782      * * <b>shape</b>: dimensions of the tensor to be created.
783      *
784      * Outputs:
785      *
786      * * <b>output</b>: generated tensor, which has the same data type as <b>value</b>.
787      *       The tensor shape is specified by the <b>shape</b> parameter.
788      */
789     OH_NN_OPS_FILL = 14,
790 
791     /**
792      * Full connection. The entire input is used as the feature map for feature extraction.
793      *
794      * Inputs:
795      *
796      * * <b>input</b>: full-connection input tensor.
797      * * <b>weight</b>: weight tensor for a full connection.
798      * * <b>bias</b>: full-connection bias. In quantization scenarios, no quantized parameter is required
799      *       for this parameter. If quantization is required, the data must be of the OH_NN_INT32 type.
800      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
801      *
802      * Parameters:
803      *
804      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
805      *       The specified activation function is called before output.
806      * * <b>hasBias</b> Whether to use the bias.
807      *
808      * Outputs:
809      *
810      * * <b>output</b>: computed tensor.
811      *
812      * If the input contains the <b>axis</b> parameter or <b>useAxis</b> parameter:
813      *
814      * Inputs:
815      *
816      * * <b>input</b>: full-connection input tensor.
817      * * <b>weight</b>: weight tensor for a full connection.
818      * * <b>bias</b>: full-connection bias. In quantization scenarios, no quantized parameter is required
819      *       for this parameter. If quantization is required, the data must be of the OH_NN_INT32 type.
820      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
821      *
822      * Parameters:
823      *
824      * * <b>axis</b>: axis in which the full connection is applied. The specified axis and its following axes are
825      *       converted into a 1D tensor for applying the full connection.
826      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
827      *       The specified activation function is called before output.
828      * * <b>useAxis</b> Whether to use the axis.
829      * * <b>hasBias</b> Whether to use the bias.
830      *
831      * Outputs:
832      *
833      * * <b>output</b>: computed tensor.
834      */
835     OH_NN_OPS_FULL_CONNECTION = 15,
836 
837     /**
838      * Returns the slice of the input tensor based on the specified index and axis.
839      *
840      * Inputs:
841      *
842      * * <b>input</b>: tensor to be sliced.
843      * * <b>inputIndices</b>: indices of the specified input on the axis. The value is an array of the int type
844      *       and must be in the range [0,input.shape[axis]).
845      * * <b>axis</b>: axis on which <b>input</b> is sliced. The value is an array with one element of the int32_t type.
846      *
847      * Outputs:
848      *
849      * * <b>output</b>: sliced tensor.
850      */
851     OH_NN_OPS_GATHER = 16,
852 
853     /**
854      * Calculate the <b>Hswish</b> activation value of the input.
855      *
856      * Inputs:
857      *
858      * * An <i>n</i>-dimensional input tensor.
859      *
860      * Outputs:
861      *
862      * * <b>output</b>: <i>n</i>-dimensional <b>Hswish</b> activation value.
863      *       The data type is the same as that of <b>shape</b> and <b>input</b>.
864      */
865     OH_NN_OPS_HSWISH = 17,
866 
867     /**
868      * For <b>input1</b> and <b>input2</b>, calculate the result of input1[i]<=input2[i] for each pair of elements,
869      * where i is the index of each element in the input tensor.
870      *
871      * Inputs:
872      *
873      * * <b>input1</b>, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
874      * * <b>input2</b>, can be a real number or a Boolean value if <b>input1</b> is a tensor and must be a tensor
875      *       with the data type of real number or OH_NN_BOOL if <b>input1</b> is not a tensor.
876      *
877      * Outputs:
878      *
879      * * <b>output</b>: A tensor of the data type OH_NN_BOOL. When a quantization model is used,
880      *       the quantization parameters of the output cannot be omitted.
881      *       However, values of the quantization parameters do not affect the result.
882      */
883     OH_NN_OPS_LESS_EQUAL = 18,
884 
885     /**
886      * Calculate the inner product of <b>input1</b> and <b>input2</b>.
887      *
888      * Inputs:
889      *
890      * * <b>input1</b>: <i>n</i>-dimensional input tensor.
891      * * <b>input2</b>: <i>n</i>-dimensional input tensor.
892      *
893      * Parameters:
894      *
895      * * <b>TransposeX</b>: Boolean value indicating whether to transpose <b>input1</b>.
896      * * <b>TransposeY</b>: Boolean value indicating whether to transpose <b>input2</b>.
897      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
898      *       The specified activation function is called before output.
899      *
900      * Outputs:
901      *
902      * * <b>output</b>: inner product obtained after calculation. In case of type!=NN_UNKNOWN, the output data type is
903      *       determined by <b>type</b>. In case of type==NN_UNKNOWN, the output data type depends on the data type
904      *       converted during computing of <b>inputX</b> and <b>inputY</b>.
905      *
906      */
907     OH_NN_OPS_MATMUL = 19,
908 
909     /**
910      * Calculates the maximum of <b>input1</b> and <b>input2</b> element-wise. The inputs of <b>input1</b>\n
911      * and <b>input2</b> comply with the implicit type conversion rules to make the data types consistent.
912      * The inputs must be two tensors or one tensor and one scalar.
913      * When the inputs are two tensors, their data types cannot be both OH_NN_BOOL.
914      * Their shapes can be broadcast to the same size.
915      * When the inputs are one tensor and one scalar, the scalar must be a constant.
916      *
917      * Inputs:
918      *
919      * * <b>input1</b>: <i>n</i>-dimensional input tensor of the real number or OH_NN_BOOL type.
920      * * <b>input2</b>: <i>n</i>-dimensional input tensor of the real number or OH_NN_BOOL type.
921      *
922      * Outputs:
923      *
924      * * <b>output</b>: <i>n</i>-dimensional output tensor. The <b>shape</b> and data type of
925      *       <b>output</b> are the same as those of the two inputs with a higher precision.
926      */
927     OH_NN_OPS_MAXIMUM = 20,
928 
929     /**
930      * Applies 2D maximum pooling to the input tensor.
931      *
932      * If the input contains the <b>padMode</b> parameter:
933      *
934      * Inputs:
935      *
936      * * <b>input</b>: tensor.
937      *
938      * Parameters:
939      *
940      * * <b>kernelSize</b>: kernel size used to obtain the maximum. It is an int array [kernelHeight, kernelWidth].
941      *       The first number indicates the kernel height, and the second number indicates the kernel width.
942      * * <b>strides</b> indicates the distance of kernel moving. The value is an int array
943      *       [strideHeight, strideWidth]. The first number indicates the moving step in height,
944      *       and the second number indicates the moving step in width.
945      * * <b>padMode</b>: padding mode, which is optional. The value is of the int type and can be <b>0</b> (same)
946      *       or <b>1</b> (valid). The nearest neighbor value is used for padding.
947      *       <b>0</b> (same): The height and width of the output are the same as those of the input. The total padding
948      *       quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and
949      *       right if possible. Otherwise, the last additional padding will be completed from the bottom and right.
950      *       <b>1</b> (valid): The possible maximum height and width of the output will be returned in case of
951      *       no padding. The excessive pixels will be discarded.
952      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
953      *       The specified activation function is called before output.
954      * * <b>global</b> Whether to do global pooling.
955      * * <b>roundMode</b> Boundary handling method. When the pool cannot completely cover the input feature map,
956      *       the output feature map is rounded up, 0 means round down, 1 means round up.
957      *
958      * If the input contains the <b>padList</b> parameter:
959      *
960      * Inputs:
961      *
962      * * <b>input</b>: tensor.
963      *
964      * Parameters:
965      *
966      * * <b>kernelSize</b>: kernel size used to obtain the maximum. It is an int array [kernelHeight, kernelWidth].
967      *       The first number indicates the kernel height, and the second number indicates the kernel width.
968      * * <b>strides</b> indicates the distance of kernel moving. The value is an int array
969      *       [strideHeight, strideWidth]. The first number indicates the moving step in height,
970      *       and the second number indicates the moving step in width.
971      * * <b>padList</b>: padding around <b>input</b>. It is an int array [top, bottom, left, right],
972      *       and the nearest neighbor values are used for padding.
973      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
974      *       The specified activation function is called before output.
975      * * <b>global</b> Whether to do global pooling.
976      * * <b>roundMode</b> Boundary handling method. When the pool cannot completely cover the input feature map,
977      *       the output feature map is rounded up, 0 means round down, 1 means round up.
978      *
979      * Outputs:
980      *
981      * * <b>output</b>: tensor obtained after maximum pooling is applied to the input.
982      */
983     OH_NN_OPS_MAX_POOL = 21,
984 
985     /**
986      * Multiplies elements in the same positions of <b>input1</b> and <b>input2</b> to obtain the output.
987      * If <b>input1</b> and <b>input2</b> have different shapes, expand them to the same shape
988      * through broadcast and then perform multiplication.
989      *
990      * Inputs:
991      *
992      * * <b>input1</b>: <i>n</i>-dimensional tensor.
993      * * <b>input2</b>: <i>n</i>-dimensional tensor.
994      *
995      * Parameters:
996      *
997      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
998      *       The specified activation function is called before output.
999      *
1000      * Outputs:
1001      *
1002      * * <b>output</b>: Product of each element of <b>input1</b> and <b>input2</b>.
1003      */
1004     OH_NN_OPS_MUL = 22,
1005 
1006     /**
1007      * Generates a one-hot tensor based on the positions specified by <b>indices</b>. The positions specified by
1008      * <b>indices</b> are determined by <b>onValue</b>, and other positions are determined by <b>offValue</b>.
1009      *
1010      * Inputs:
1011      *
1012      * * <b>indices</b>: <i>n</i>-dimensional tensor. Each element in <b>indices</b> determines the position of
1013      *       <b>onValue</b> in each one-hot vector.
1014      * * <b>depth</b>: integer scalar that determines the depth of the one-hot vector. The value of <b>depth</b>
1015      *       must be greater than <b>0</b>.
1016      * * <b>onValue</b>: scalar that specifies a valid value in the one-hot vector.
1017      * * <b>offValue</b>: scalar that specifies the values of other posistions in the one-hot vector except
1018      *       the valid value.
1019      *
1020      * Parameters:
1021      *
1022      * * <b>axis</b>: integer scalar that specifies the dimension for inserting the one-hot. Assume that the shape
1023      *       of <b>indices</b> is [N, C], and the value of <b>depth</b> is D.
1024      *       When <b>axis</b> is <b>0</b>, the shape of the output is [D, N, C].
1025      *       When <b>axis</b> is <b>-1</b>, the shape of the output is [N, C, D].
1026      *       When <b>axis</b> is <b>1</b>, the shape of the output is [N, D, C].
1027      *
1028      * Outputs:
1029      *
1030      * * <b>output</b>: (<i>n</i>+1)-dimensional tensor if <b>indices</b> is an <i>n</i>-dimensional tensor.
1031      *       The output shape is determined by <b>indices</b> and <b>axis</b>.
1032      */
1033     OH_NN_OPS_ONE_HOT = 23,
1034 
1035     /**
1036      * Pads <b>inputX</b> in the specified dimensions.
1037      *
1038      * Inputs:
1039      *
1040      * * <b>inputX</b>: <i>n</i>-dimensional tensor in [BatchSize, ...] format.
1041      * * <b>paddings</b>: 2D tensor that specifies the length to pad in each dimension. The shape is [n, 2].
1042      *       For example, <b>paddings[i][0]</b> indicates the number of paddings to be added preceding
1043      *       <b>inputX</b> in the <i>i</i>th dimension.
1044      *       <b>paddings[i][1]</b> indicates the number of paddings to be added following <b>inputX</b>
1045      *       in the <i>i</i>th dimension.
1046      *
1047      * Parameters:
1048      *
1049      * * <b>constantValue</b>: value to be added to the pad operation.
1050      *       The value is a constant with the same data type as <b>inputX</b>.
1051      * * <b>paddingMode</b>: Padding mode.
1052      *
1053      * Outputs:
1054      *
1055      * * <b>output</b>: <i>n</i>-dimensional tensor after padding, with the same dimensions and data type as
1056      *       <b>inputX</b>. The shape is determined by <b>inputX</b> and <b>paddings</b>.
1057      *       output.shape[i] = input.shape[i] + paddings[i][0]+paddings[i][1]
1058      */
1059     OH_NN_OPS_PAD = 24,
1060 
1061     /**
1062      * Calculates the <b>y</b> power of each element in <b>input</b>.
1063      * The inputs must be two tensors or one tensor and one scalar.
1064      * When the inputs are two tensors, their data types cannot be both OH_NN_BOOL, and their shapes must be the same.
1065      * When the inputs are one tensor and one scalar, the scalar must be a constant.
1066      *
1067      * Inputs:
1068      *
1069      * * <b>input</b>: real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
1070      * * <b>y</b>: real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
1071      *
1072      * Parameters:
1073      * * <b>scale</b>: A OH_NN_FLOAT32 scalar that represents the factor of the scale blend.
1074      * * <b>shift</b>: A OH_NN_FLOAT32 scalar that represents the bias of the scale blend.
1075      *
1076      * Outputs:
1077      *
1078      * * <b>output</b>: tensor, whose shape is determined by the shape of <b>input</b> and <b>y</b> after broadcasting.
1079      */
1080     OH_NN_OPS_POW = 25,
1081 
1082     /**
1083      * Scales a tensor.
1084      *
1085      * Inputs:
1086      *
1087      * * <b>input</b>: <i>n</i>-dimensional tensor.
1088      * * <b>scale</b>: scaling tensor.
1089      * * <b>bias</b>: bias tensor.
1090      *
1091      * Parameters:
1092      *
1093      * * <b>axis</b>: dimensions to be scaled.
1094      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
1095      *       The specified activation function is called before output.
1096      *
1097      * Outputs:
1098      *
1099      * * <b>output</b>: scaled <i>n</i>-dimensional tensor, whose data type is the same as that of <b>input</b> and
1100      *       shape is determined by <b>axis</b>.
1101      */
1102     OH_NN_OPS_SCALE = 26,
1103 
1104     /**
1105      * Calculates the shape of the input tensor.
1106      *
1107      * Inputs:
1108      *
1109      * * <b>input</b>: <i>n</i>-dimensional tensor.
1110      *
1111      * Outputs:
1112      *
1113      * * <b>output</b>: integer array representing the dimensions of the input tensor.
1114      */
1115     OH_NN_OPS_SHAPE = 27,
1116 
1117     /**
1118      * Applies the <b>sigmoid</b> operation to the input tensor.
1119      *
1120      * Inputs:
1121      *
1122      * * <b>input</b>: <i>n</i>-dimensional tensor.
1123      *
1124      * Outputs:
1125      *
1126      * * <b>output</b>: result of the <b>sigmoid</b> operation. It is an <i>n</i>-dimensional tensor
1127      *       with the same data type and shape as <b>input</b>.
1128      */
1129     OH_NN_OPS_SIGMOID = 28,
1130 
1131     /**
1132      * Slices a tensor of the specified size from the input in each dimension.
1133      *
1134      * Inputs:
1135      *
1136      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1137      * * <b>begin</b>: start of the slice, which is an array of integers greater than or equal to 0.
1138      * * <b>size</b>: slice length, which is an array of integers greater than or equal to 0.
1139      *       Assume that a dimension is <b>i</b> and 1<=size[i]<=input.shape[i]-begin[i].
1140      *
1141      * Parameters:
1142      *
1143      * * <b>axes</b>: Dimensions on which the tensor is sliced.
1144      *
1145      * Outputs:
1146      *
1147      * * <b>output</b>: <i>n</i>-dimensional tensor obtained by slicing.
1148      *       The <b>TensorType</b>, shape, and size of the output are the same as those of the input.
1149      */
1150     OH_NN_OPS_SLICE = 29,
1151 
1152     /**
1153      * Applies the <b>softmax</b> operation to the input tensor.
1154      *
1155      * Inputs:
1156      *
1157      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1158      *
1159      * Parameters:
1160      *
1161      * * <b>axis</b>: dimension in which the <b>softmax</b> operation is performed.
1162      *       The value is of the int64 type. It is an integer in the range [-n, n).
1163      *
1164      * Outputs:
1165      *
1166      * * <b>output</b>: result of the <b>softmax</b> operation. It is an <i>n</i>-dimensional tensor with
1167      *       the same data type and shape as <b>input</b>.
1168      */
1169     OH_NN_OPS_SOFTMAX = 30,
1170 
1171     /**
1172      * Divides a 4D tensor into small blocks and combines these blocks in the original batch.
1173      * The number of blocks is <b>blockShape[0]</b> multiplied by <b>blockShape[1]</b>.
1174      *
1175      * Inputs:
1176      *
1177      * * <b>input</b>: 4D tensor.
1178      *
1179      * Parameters:
1180      *
1181      * * <b>blockShape</b>: a pair of integers. Each of them is greater than or equal to <b>1</b>.
1182      * * <b>paddings</b>: a pair of arrays. Each of them consists of two integers. The four integers that from
1183      *       <b>paddings</b> must be greater than or equal to <b>0</b>. <b>paddings[0][0]</b> and <b>paddings[0][1]</b>
1184      *       specify the number of paddings in the third dimension, and <b>paddings[1][0]</b> and <b>paddings[1][1]</b>
1185      *       specify the number of paddings in the fourth dimension.
1186      *
1187      * Outputs:
1188      *
1189      * * <b>output</b>: 4D tensor with the same data type as <b>input</b>. The shape is determined by <b>input</b>,
1190      *       <b>blockShape</b>, and <b>paddings</b>. Assume that the input shape is [n,c,h,w], then:
1191      *       output.shape[0] = n * blockShape[0] * blockShape[1]
1192      *       output.shape[1] = c
1193      *       output.shape[2] = (h + paddings[0][0] + paddings[0][1]) / blockShape[0]
1194      *       output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1]
1195      *       (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]) is exactly divisible by
1196      *       (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]).
1197      *
1198      */
1199     OH_NN_OPS_SPACE_TO_BATCH_ND = 31,
1200 
1201     /**
1202      * Splits the input into multiple tensors along the axis dimension.
1203      * The number of tensors is specified by <b>outputNum</b>.
1204      *
1205      * Inputs:
1206      *
1207      * * <b>input</b>: <i>n</i>-dimensional tensor.
1208      *
1209      * Parameters:
1210      *
1211      * * <b>outputNum</b>: number of output tensors. The data type is long.
1212      * * <b>sizeSplits</b>: size of each tensor split from the input. The value is a 1D tensor of the int type. If
1213      *       <b>sizeSplits</b> is empty, the input will be evenly split into tensors of the same size. In this case,
1214      *       <b>input.shape[axis]</b> can be exactly divisible by <b>outputNum</b>.
1215      *       If <b>sizeSplits</b> is not empty, the sum of all its elements must be equal to <b>input.shape[axis]</b>.
1216      * * <b>axis</b>: splitting dimension of the int type.
1217      *
1218      * Outputs:
1219      *
1220      * * <b>outputs</b>: array of <i>n</i>-dimensional tensors, with the same data type and dimensions.
1221      *       The data type of each tensor is the same as that of <b>input</b>.
1222      */
1223     OH_NN_OPS_SPLIT = 32,
1224 
1225     /**
1226      * Calculates the square root of a tensor.
1227      *
1228      * Inputs:
1229      *
1230      * * <b>input</b>: <i>n</i>-dimensional tensor.
1231      *
1232      * Outputs:
1233      *
1234      * * <b>output</b>: square root of the input.
1235      *       It is an <i>n</i>-dimensional tensor with the same data type and shape as <b>input</b>.
1236      */
1237     OH_NN_OPS_SQRT = 33,
1238 
1239     /**
1240      * Calculates the square of the difference between two tensors. The <b>SquaredDifference</b> operator supports
1241      * tensor and tensor subtraction. If two tensors have different <b>TensorTypes</b>, the Sub operator
1242      * converts the low-precision tensor to a high-precision one. If two tensors have different shapes,
1243      * the two tensors can be extended to tensors with the same shape through broadcast.
1244      *
1245      * Inputs:
1246      *
1247      * * <b>input1</b>: minuend, which is a tensor of the OH_NN_FLOAT16, OH_NN_FLOAT32, OH_NN_INT32,
1248      *       or OH_NN_BOOL type.
1249      * * <b>input2</b>: subtrahend, which is a tensor of the OH_NN_FLOAT16, OH_NN_FLOAT32, OH_NN_INT32,
1250      *       or OH_NN_BOOL type.
1251      *
1252      * Outputs:
1253      *
1254      * * <b>output</b>: square of the difference between two inputs. The output shape is determined
1255      *       by<b>input1</b> and <b>input2</b>. If they have the same shape, the output tensor has the same
1256      *       shape as them. If they have different shapes, perform the broadcast operation on
1257      *       <b>input1</b> and <b>input2</b> and perform subtraction.
1258      *       <b>TensorType</b> of the output is the same as that of the input tensor with higher precision.
1259      */
1260     OH_NN_OPS_SQUARED_DIFFERENCE = 34,
1261 
1262     /**
1263      * Removes the dimension with a length of 1 from the specified axis. The int8 quantization input is supported.
1264      * Assume that the input shape is [2, 1, 1, 2, 2] and axis is [0,1], the output shape is [2, 1, 2, 2],
1265      * which means the dimension whose length is 0 between dimensions 0 and dimension 1 is removed.
1266      *
1267      * Inputs:
1268      *
1269      * * <b>input</b>: <i>n</i>-dimensional tensor.
1270      *
1271      * Parameters:
1272      *
1273      * * <b>axis</b>: dimension to be removed.
1274      *       The value is of int64_t type and can be an integer in the range [-n, n) or an array.
1275      *
1276      * Outputs:
1277      *
1278      * * <b>output</b>: output tensor.
1279      */
1280     OH_NN_OPS_SQUEEZE = 35,
1281 
1282     /**
1283      * Stacks multiple tensors along the specified axis. If each tensor has <i>n</i> dimensions before stacking,
1284      * the output tensor will have <i>n</i>+1 dimensions.
1285      *
1286      * Inputs:
1287      *
1288      * * <b>input</b>: input for stacking, which can contain multiple <i>n</i>-dimensional tensors.
1289      *       Each of them must have the same shape and type.
1290      *
1291      * Parameters:
1292      *
1293      * * <b>axis</b>: dimension for tensor stacking, which is an integer. The value range is [-(n+1),(n+1)),
1294      *       which means a negative number is allowed.
1295      *
1296      * Outputs:
1297      *
1298      * * <b>output</b>: stacking result of the input along the axis dimension.
1299      *       The value is an <i>n</i>+1-dimensional tensor and has the same <b>TensorType</b> as the input.
1300      */
1301     OH_NN_OPS_STACK = 36,
1302 
1303     /**
1304      * Slices a tensor with the specified stride.
1305      *
1306      * Inputs:
1307      *
1308      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1309      * * <b>begin</b>: start of slicing, which is a 1D tensor. The length of <b>begin</b> is <i>n</i>.
1310      *       <b>begin[i]</b> specifies the start of slicing in the <i>i</i>th dimension.
1311      * * <b>end</b>: end of slicing, which is a 1D tensor. The length of <b>end</b> is <i>n</i>.
1312      *       <b>end[i]</b> specifies the end of slicing in the <i>i</i>th dimension.
1313      * * <b>strides</b>: slicing stride, which is a 1D tensor. The length of <b>strides</b> is <i>n</i>.
1314      *       strides[i] specifies the stride at which the tensor is sliced in the <i>i</i>th dimension.
1315      *
1316      * Parameters:
1317      *
1318      * * <b>beginMask</b>: an integer used to mask <b>begin</b>. <b>beginMask</b> is represented in binary code.
1319      *       In case of binary(beginMask)[i]==1, for the <i>i</i>th dimension,
1320      *       elements are sliced from the first element at <b>strides[i]</b> until the end[i]-1 element.
1321      *
1322      * * <b>endMask</b>: an integer used to mask <b>end</b>. <b>endMask</b> is represented in binary code.
1323      *       In case of binary(endMask)[i]==1, elements are sliced from the element at the <b>begin[i]</b> position
1324      *       in the <i>i</i>th dimension until the tensor boundary at <b>strides[i]</b>.
1325      *
1326      * * <b>ellipsisMask</b>: integer used to mask <b>begin</b> and <b>end</b>.
1327      *       <b>ellipsisMask</b> is represented in binary code. In case of binary(ellipsisMask)[i]==1,
1328      *       elements are sliced from the first element at <b>strides[i]</b> in the <i>i</i>th dimension
1329      *       until the tensor boundary. Only one bit of <b>binary(ellipsisMask)</b> can be a non-zero value.
1330      *
1331      * * <b>newAxisMask</b>: new dimension, which is an integer. <b>newAxisMask</b> is represented in binary code.
1332      *       In case of binary(newAxisMask)[i]==1,
1333      *       a new dimension whose length is 1 is inserted into the <i>i</i>th dimension.
1334      * * <b>shrinkAxisMask</b>: shrinking dimension, which is an integer. * <b>shrinkAxisMask</b> is
1335      *       represented in binary code. In the case of binary(shrinkAxisMask)[i]==1, all elements in the
1336      *       <i>i</i>th dimension will be discarded, and the length of the <i>i</i>th dimension is shrunk to <b>1</b>.
1337      *
1338      * Outputs:
1339      *
1340      * * <b>output</b>: A tensor, with the same data type as <b>input</b>.
1341      *       The number of dimensions of the output tensor is rank(input[0])+1.
1342      */
1343     OH_NN_OPS_STRIDED_SLICE = 37,
1344 
1345     /**
1346      * Calculates the difference between two tensors.
1347      *
1348      * Inputs:
1349      *
1350      * * <b>input1</b>: minuend, which is a tensor.
1351      * * <b>input2</b>: subtrahend, which is a tensor.
1352      *
1353      * Parameters:
1354      *
1355      * * <b>activationType</b> is an integer constant which is contained in <b>OH_NN_FuseType</b>.
1356      *       The specified activation function is called before output.
1357      *
1358      * Outputs:
1359      *
1360      * * <b>output</b>: difference between the two tensors. The output shape is determined by<b>input1</b> and
1361      *       <b>input2</b>. If they have the same shape, the output tensor has the same shape as them.
1362      *       If they have different shapes,
1363      *       perform the broadcast operation on <b>input1</b> and <b>input2</b> and perform subtraction.
1364      *       <b>TensorType</b> of the output is the same as that of the input tensor with higher precision.
1365      */
1366     OH_NN_OPS_SUB = 38,
1367 
1368     /**
1369      * Computes hyperbolic tangent of the input tensor.
1370      *
1371      * Inputs:
1372      *
1373      * * <b>input</b>: <i>n</i>-dimensional tensor.
1374      *
1375      * Outputs:
1376      *
1377      * * <b>output</b>: hyperbolic tangent of the input.
1378      *       The <b>TensorType</b> and tensor shape are the same as those of the input.
1379      */
1380     OH_NN_OPS_TANH = 39,
1381 
1382     /**
1383      * Copies a tensor the specified times.
1384      *
1385      * Inputs:
1386      * * <b>input</b>: <i>n</i>-dimensional tensor.
1387      * * <b>multiples</b>: number of times that the input tensor is copied in each dimension. The value is a 1D tensor.
1388      *       The length <i>m</i> is not less than the number of dimensions, that is, <i>n</i>.
1389      *
1390      * Parameters:
1391      *
1392      * * <b>dims</b> A 1D tensor that specifies the number of times that data is copied in each dimension.
1393      * The length <b>m</b> is not less than the number of dimensions of <b>x</b>.
1394      *
1395      * Outputs:
1396      * * An <i>m</i>-dimensional tensor whose <b>TensorType</b> is the same as that of the input. If <b>input</b> and
1397      *       <b>multiples</b> have the same length, <b>input</b> and <b>output</b> have the same number of dimensions.
1398      *       If the length of <b>multiples</b> is greater than <i>n</i>, 1 is used to fill the input dimension, and
1399      *       then the input is copied in each dimension the specified times to obtain the <i>m</i>-dimensional tensor.
1400      */
1401     OH_NN_OPS_TILE = 40,
1402 
1403     /**
1404      * Transposes data of <b>input</b> based on <b>permutation</b>.
1405      *
1406      * Inputs:
1407      *
1408      * * <b>input</b>: <i>n</i>-dimensional tensor to be transposed.
1409      * * <b>permutation</b>: The value is a 1D tensor whose length is the same as the number of
1410      *       dimensions of <b>input</b>.
1411      *
1412      * Outputs:
1413      *
1414      * * <b>output</b>: <i>n</i>-dimensional tensor. <b>TensorType</b> of <b>output</b> is the same as that of
1415      *       <b>input</b>, and the output shape is determined by the shape and <b>permutation</b> of <b>input</b>.
1416      */
1417     OH_NN_OPS_TRANSPOSE = 41,
1418 
1419     /**
1420      * Calculates the average value in the specified dimension.
1421      * If <b>keepDims</b> is set to <b>false</b>, the number of dimensions is reduced for the input;
1422      * if <b>keepDims</b> is set to <b>true</b>, the number of dimensions is retained.
1423      *
1424      * Inputs:
1425      *
1426      * * <b>input</b>: <i>n</i>-dimensional input tensor, where <i>n</i> is less than 8.
1427      * * <b>axis</b>: dimension used to calculate the average value. The value is a 1D tensor.
1428      *       The value range of each element in <b>axis</b> is [–n, n).
1429      *
1430      * Parameters:
1431      *
1432      * * <b>keepDims</b>: indicates whether to retain the dimension. The value is a Boolean value.
1433      * * <b>reduceToEnd</b>: boolean value, indicates whether the reduce operation needs to be performed
1434      *       until the last axis.
1435      * * <b>coeff</b>: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
1436      *
1437      * Outputs:
1438      *
1439      * * <b>output</b>: <i>m</i>-dimensional output tensor whose data type is the same as that of the input.
1440      *       If <b>keepDims</b> is <b>false</b>, m<n. If <b>keepDims</b> is <b>true</b>, m==n.
1441      */
1442     OH_NN_OPS_REDUCE_MEAN = 42,
1443 
1444     /**
1445      * The Bilinear method is used to deform the input based on the given parameters.
1446      *
1447      * Inputs:
1448      *
1449      * * <b>input</b>: 4D input tensor. Each element in the input cannot be less than 0.
1450      *       The input layout must be [batchSize, height, width, channels].
1451      *
1452      * Parameters:
1453      *
1454      * * <b>newHeight</b>: resized height of the 4D tensor.
1455      * * <b>newWidth</b>: resized width of the 4D tensor.
1456      * * <b>preserveAspectRatio</b>: indicates whether to maintain the height/width
1457      *       ratio of <b>input</b> after resizing.
1458      * * <b>coordinateTransformMode</b>: coordinate transformation method used by the resize operation.
1459      *       The value is an int32 integer. Currently, the following methods are supported:
1460      *       0 means ASYMMETRIC, 1 means ALIGN_CORNERS, 2 means HALF_PIXEL.
1461      * * <b>excludeOutside</b>: an int64 floating point number. When its value is <b>1</b>,
1462      *       the sampling weight of the part that
1463      *       exceeds the boundary of <b>input</b> is set to <b>0</b>, and other weights are normalized.
1464      *
1465      * Outputs:
1466      *
1467      * * <b>output</b>: <i>n</i>-dimensional tensor, with the same shape and data type as <b>input</b>.
1468      */
1469     OH_NN_OPS_RESIZE_BILINEAR = 43,
1470 
1471     /**
1472      * Calculates the reciprocal of the square root of a tensor.
1473      *
1474      * Inputs:
1475      *
1476      * * <b>input</b>: <i>n</i>-dimensional tensor, where <i>n</i> is less than 8.
1477      *       Each element of the tensor cannot be less than 0.
1478      *
1479      * Outputs:
1480      *
1481      * * <b>output</b>: <i>n</i>-dimensional tensor, with the same shape and data type as <b>input</b>.
1482      */
1483     OH_NN_OPS_RSQRT = 44,
1484 
1485     /**
1486      * Reshapes a tensor.
1487      *
1488      * Inputs:
1489      *
1490      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1491      * * <b>InputShape</b>: shape of the output tensor. The value is a 1D constant tensor.
1492      *
1493      * Outputs:
1494      *
1495      * * <b>output</b>: tensor whose data type is the same as that of <b>input</b>
1496      *       and shape is determined by <b>InputShape</b>.
1497      */
1498     OH_NN_OPS_RESHAPE = 45,
1499 
1500     /**
1501      * Calculates the PReLU activation value of <b>input</b> and <b>weight</b>.
1502      *
1503      * Inputs:
1504      *
1505      * * <b>input</b>: <i>n</i>-dimensional tensor. If <i>n</i> is greater than or equal to 2,
1506      *       <b>input</b> must be [BatchSize, ..., Channels]. The second dimension is the number of channels.
1507      * * <b>weight</b>: 1D tensor. The length of <b>weight</b> must be 1 or equal to the number of channels.
1508      *       If the length of <b>weight</b> is 1, all channels share the same weight.
1509      *       If the length of <b>weight</b> is equal to the number of channels, each channel exclusively has a weight.
1510      *       If <i>n</i> is less than 2 for <b>input</b>, the <b>weight</b> length must be 1.
1511      *
1512      * Outputs:
1513      *
1514      * * <b>output</b>: PReLU activation value of <b>input</b>, with the same shape and data type as <b>input</b>.
1515      */
1516     OH_NN_OPS_PRELU = 46,
1517 
1518     /**
1519      * Calculates the Relu activation value of <b>input</b>.
1520      *
1521      * Inputs:
1522      *
1523      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1524      *
1525      * Outputs:
1526      *
1527      * * <b>output</b>: <i>n</i>-dimensional tensor, with the same data type and shape as the input tensor.
1528      */
1529     OH_NN_OPS_RELU = 47,
1530 
1531     /**
1532      * Calculates the Relu6 activation value of the input, that is,
1533      * calculate min(max(x, 0), 6) for each element x in the input.
1534      *
1535      * Inputs:
1536      *
1537      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1538      *
1539      * Outputs:
1540      *
1541      * * <b>output</b>: <i>n</i>-dimensional Relu6 tensor, with the same data type and shape as the input tensor.
1542      */
1543     OH_NN_OPS_RELU6 = 48,
1544 
1545     /**
1546      * Applies layer normalization for a tensor from the specified axis.
1547      *
1548      * Inputs:
1549      *
1550      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1551      * * <b>gamma</b>: <i>m</i>-dimensional tensor. The dimensions of <b>gamma</b> must be the same as
1552      *       the shape of the part of the input tensor to normalize.
1553      * * <b>beta</b>: <i>m</i>-dimensional tensor with the same shape as <b>gamma</b>.
1554      *
1555      * Parameters:
1556      *
1557      * * <b>beginAxis</b>: an OH_NN_INT32 scalar that specifies the axis from which normalization starts.
1558      *       The value range is [1, rank(input)).
1559      * * <b>epsilon</b>: a scalar of OH_NN_FLOAT32. It is a tiny amount in the normalization formula.
1560      *       The common value is 0.00001f.
1561      * * <b>beginParamsAxis</b>: an OH_NN_INT32 scalar that specifies the start axis of layer normalization
1562      *       of input parameter (gamma, beta).
1563      *
1564      * Outputs:
1565      *
1566      * * <b>output</b>: <i>n</i>-dimensional tensor, with the same data type and shape as the input tensor.
1567      */
1568     OH_NN_OPS_LAYER_NORM = 49,
1569 
1570     /**
1571      * Calculates the accumulated value for a tensor along the specified dimension. If <b>keepDims</b> is set to
1572      * <b>false</b>, the number of dimensions is reduced for the input; if <b>keepDims</b> is set to <b>true</b>,
1573      * the number of dimensions is retained.
1574      *
1575      * Inputs:
1576      *
1577      * * <b>input</b>: <i>n</i>-dimensional input tensor, where <i>n</i> is less than 8.
1578      * * <b>axis</b>: dimension used to calculate the product. The value is a 1D tensor.
1579      *       The value range of each element in <b>axis</b> is [–n, n).
1580      *
1581      * Parameters:
1582      *
1583      * * <b>keepDims</b>: indicates whether to retain the dimension. The value is a Boolean value.
1584      * * <b>reduceToEnd</b>: boolean value, indicates whether the reduce operation needs to be performed
1585      *       until the last axis.
1586      * * <b>coeff</b>: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
1587      *
1588      * Outputs:
1589      *
1590      * * <b>output</b>: <i>m</i>-dimensional output tensor whose data type is the same as that of the input.
1591      *       If <b>keepDims</b> is <b>false</b>, m<n. If <b>keepDims</b> is <b>true</b>, m==n.
1592      */
1593     OH_NN_OPS_REDUCE_PROD = 50,
1594 
1595     /**
1596      * Calculates the logical and value for input tensor along the specified dimension. If <b>keepDims</b> is set to
1597      * <b>false</b>, the number of dimensions is reduced for the input; if <b>keepDims</b> is set to <b>true</b>,
1598      * the number of dimensions is retained.
1599      *
1600      * Inputs:
1601      *
1602      * * <b>input</b>: <i>n</i>-dimensional input tensor, where <i>n</i> is less than 8.
1603      * * <b>axis</b>: dimension used to calculate the logical and value. The value is a 1D tensor.
1604      *       The value range of each element in <b>axis</b> is [–n, n).
1605      *
1606      * Parameters:
1607      *
1608      * * <b>keepDims</b>: indicates whether to retain the dimension. The value is a Boolean value.
1609      * * <b>reduceToEnd</b>: boolean value, indicates whether the reduce operation needs to be performed
1610      *       until the last axis.
1611      * * <b>coeff</b>: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
1612      *
1613      * Outputs:
1614      * * <b>output</b>: <i>m</i>-dimensional output tensor whose data type is the same as that of the input.
1615      *       If <b>keepDims</b> is <b>false</b>, m<n. If <b>keepDims</b> is <b>true</b>, m==n.
1616      */
1617     OH_NN_OPS_REDUCE_ALL = 51,
1618 
1619     /**
1620      * Converts the data type.
1621      *
1622      * Inputs:
1623      *
1624      * * <b>input</b>: <i>n</i>-dimensional tensor. If it is a conversion between a quantized type and
1625      *       a floating-point type, the input tensor should contain quantized parameters.
1626      *
1627      * Parameters:
1628      *
1629      * * <b>srcT</b>: data type of the input.
1630      * * <b>dstT</b>: data type of the output.
1631      * * <b>axis</b>: appoint the dimensions from which the quantization parameters are extracted.
1632      *       If the size of the input tensor quantization parameter is 1, the operator function is
1633      *       layer quantization conversion, and this parameter does not take effect. If the size of
1634      *       the input tensor quantization parameter is greater than 1, the operator function is the
1635      *       quantization conversion along the specific channels, and this parameter takes effect.
1636      *
1637      * Outputs:
1638      *
1639      * * <b>output</b>: <i>n</i>-dimensional tensor. The data type is determined by <b>dstT</b>.
1640      *       The output shape is the same as the input shape.
1641      */
1642     OH_NN_OPS_QUANT_DTYPE_CAST = 52,
1643 
1644     /**
1645      * Obtains the values and indices of the largest <i>k</i> entries in the last dimension.
1646      *
1647      * Inputs:
1648      *
1649      * * <b>input</b>: <i>n</i>-dimensional tensor.
1650      * * <b>k</b>: first <i>k</i> records of data and their indices.
1651      *
1652      * Parameters:
1653      *
1654      * * <b>sorted</b>: order of sorting. The value <b>true</b> means descending and <b>false</b> means ascending.
1655      * * <b>axis</b>: A OH_NN_INT32 scalar that specifies the dimension that needs to be sorted, default -1,
1656      *       pointing to the last dimension.
1657      *
1658      * Outputs:
1659      *
1660      * * <b>output0</b>: largest <i>k</i> elements in each slice of the last dimension.
1661      * * <b>output1</b>: index of the value in the last dimension of the input.
1662      */
1663     OH_NN_OPS_TOP_K = 53,
1664 
1665     /**
1666      * Returns the index of the maximum tensor value across axes.
1667      *
1668      * Inputs:
1669      *
1670      * * <b>input</b>: <i>n</i>-dimensional tensor (N, ∗), where ∗ means any number of additional dimensions.
1671      *
1672      * Parameters:
1673      *
1674      * * <b>axis</b>: dimension for calculating the index of the maximum.
1675      * * <b>keepDims</b>: indicates whether to maintain the input tensor dimension. The value is a Boolean value.
1676      * * <b>topK</b>: Whether to keep the output dimensions the same as the input dimensions.
1677      * * <b>outMaxValue</b>: Return the index if the value is <b>false</b>.
1678      *       Return the value if the value is <b>true</b>. The default value is <b>false</b>.
1679      *
1680      * Outputs:
1681      * * <b>output</b>: index of the maximum input tensor on the axis. The value is a tensor.
1682      */
1683     OH_NN_OPS_ARG_MAX = 54,
1684 
1685     /**
1686      * Adds a dimension based on the value of <b>axis</b>.
1687      *
1688      * Inputs:
1689      *
1690      * * <b>input</b>: <i>n</i>-dimensional tensor.
1691      *
1692      * Parameters:
1693      *
1694      * * <b>axis</b>: dimension to be added. The value of <b>axis</b> can be an integer or an array of integers.
1695      *       The value range of the integer is [-n, n).
1696      *
1697      * Outputs:
1698      *
1699      * * <b>output</b>: output tensor.
1700      */
1701     OH_NN_OPS_UNSQUEEZE = 55,
1702 
1703     /**
1704      * Gaussian error linear unit activation function. The int quantization input is not supported.
1705      * output=0.5∗input∗(1+tanh(input/2))
1706      *
1707      * Inputs:
1708      *
1709      * * <b>input</b>: An <i>n</i>-dimensional input tensor.
1710      *
1711      * Parameters:
1712      * * <b>approximate</b>: Whether to use the approximation algorithm.
1713      *
1714      * Outputs:
1715      *
1716      * * <b>output</b>: <i>n</i>-dimensional tensor, with the same data type and shape as the input tensor.
1717      */
1718     OH_NN_OPS_GELU = 56,
1719 
1720     /**
1721      * Unpacks the input tensors base on the given dimension of axis.
1722      * Unpacks tensors from <b>input</b> by chipping it along the <b>axis</b> dimension.
1723      * For example, given a tensor of shape (A, B, C, D);
1724      * If axis == 0, then the i'th tensor in output is the slice value[i, :, :, :],\n
1725      * and each tensor in output will have shape (B, C, D).
1726      * If axis == 1, then the i'th tensor in output is the slice value[:, i, :, :],\n
1727      * and each tensor in output will have shape (A, C, D). Etc.
1728      * This is the opposite of <b>OH_NN_OPS_STACK</b>.
1729      *
1730      * Inputs:
1731      *
1732      * * <b>input</b>: <i>n</i>-dimensional tensor.
1733      *
1734      * Parameters:
1735      *
1736      * * <b>axis</b>: dimension along witch to unpack. Default 0. The range is [-n, n).
1737      *
1738      * Outputs:
1739      *
1740      * * <b>output</b>: A tuple of tensors, the shape of each objects is same.
1741      *
1742      * @since 12
1743      */
1744     OH_NN_OPS_UNSTACK = 57,
1745 
1746     /**
1747      * Obtains the absolute value of the input tensor.
1748      *
1749      * Inputs:
1750      *
1751      * * <b>input</b>: <i>n</i>-dimensional tensor.
1752      *
1753      * Outputs:
1754      *
1755      * * <b>output</b>: <i>n</i>-dimensional tensor. The absolute value of the input tensor.
1756      *       The shape and data type is the same as inputs'.
1757      *
1758      * @since 12
1759      */
1760     OH_NN_OPS_ABS = 58,
1761 
1762     /**
1763      * Computes the Gauss error function of input element-wise.
1764      *
1765      * Inputs:
1766      *
1767      * * <b>input</b>: <i>n</i>-dimensional tensor. The dimension should be less than 8,
1768      *       and the data type only support OH_NN_FLOAT32 and OH_NN_FLOAT16.
1769      *
1770      * Outputs:
1771      *
1772      * * <b>output</b>: <i>n</i>-dimensional tensor. The shape and data type is the same as inputs'.
1773      *
1774      * @since 12
1775      */
1776     OH_NN_OPS_ERF = 59,
1777 
1778     /**
1779      * Calculates the exponential of the given input tensor element-wise.
1780      * ExpFusion computes outputs by formula <b>output = base ^ (shift + scale * input)</b>, for base > 0.
1781      * And the base is default set to -1, which means nature logarithm 'e',
1782      * and the calculate formula changes to <b>output = exp(shift + scale * input)</b>.
1783      *
1784      * Inputs:
1785      *
1786      * * <b>input</b>: <i>n</i>-dimensional tensor.
1787      *
1788      * Parameters:
1789      *
1790      * * <b>base</b>: The base of exponential function. Default set to -1 representing nature logarithm 'e'.
1791      *       Input value must be > 0.
1792      * * <b>scale</b>: The amplifcation factor of exponential value, default 1.
1793      * * <b>shift</b>: The offset of exponential value, default 0.
1794      *
1795      * Outputs:
1796      *
1797      * * <b>output</b>: <i>n</i>-dimensional tensor. The element-wise exponential result of the input tensor.
1798      *
1799      * @since 12
1800      */
1801     OH_NN_OPS_EXP = 60,
1802 
1803     /**
1804      * For <b>input1</b> and <b>input2</b>, calculate the result of input1[i] < input2[i] for each pair of elements,
1805      * where i is the index of each element in the input tensor.
1806      *
1807      * Inputs:
1808      *
1809      * * <b>input1</b>: can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
1810      * * <b>input2</b>: can be a real number or a Boolean value if <b>input1</b> is a tensor and must be a tensor
1811      *       with the data type of real number or OH_NN_BOOL if <b>input1</b> is not a tensor.
1812      *
1813      * Outputs:
1814      *
1815      * * <b>output</b>: A tensor of the data type OH_NN_BOOL. When a quantization model is used, the quantization
1816      *       parameters of the output cannot be omitted. However, values of the quantization parameters do not
1817      *       affect the result.
1818      *
1819      * @since 12
1820      */
1821     OH_NN_OPS_LESS = 61,
1822 
1823     /**
1824      * Selects output elements from input1 or input2, depending on condition.
1825      * If condition is true, choose elements from input1. Otherwise, choose elements from input2 if condition is false.
1826      * The three inputs, <b>condition</b> , <b>input1</b> and <b>input2</b> must share the same shape.
1827      *
1828      * Inputs:
1829      *
1830      * * <b>condition</b>: <i>n</i>-dimensional tensor or scalar.
1831      *       The condition tensor, decides which element is chosen.
1832      * * <b>input1</b>: <i>n</i>-dimensional tensor. First input tensor to be chosen.
1833      *       If condition is rank 1, input1 may have higher rank, but its first dimension must match the
1834      *       size of condition.
1835      * * <b>input2</b>: <i>n</i>-dimensional tensor. Second input tensor to be chosen.
1836      *
1837      * Outputs:
1838      *
1839      * * <b>output</b>: A tensor, has the same shape and data type as the input.
1840      *
1841      * @since 12
1842      */
1843     OH_NN_OPS_SELECT = 62,
1844 
1845     /**
1846      * Calculates the square of input tensor element-wise.
1847      *
1848      * Inputs:
1849      *
1850      * * <b>input</b>: <i>n</i>-dimensional tensor.
1851      *
1852      * Outputs:
1853      *
1854      * * <b>output</b>: <i>n</i>-dimensional tensor, has the same shape and dtype as the input.
1855      *
1856      * @since 12
1857      */
1858     OH_NN_OPS_SQUARE = 63,
1859 
1860     /**
1861      * Flattens the input tensor into a 2D matrix. If input tensor has shape (d_0, d_1, … d_n),
1862      * then the output will have shape (d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).
1863      *
1864      * Inputs:
1865      *
1866      * * <b>input</b>: <i>n</i>-dimensional tensor. The rank of input should be greater or equal to axis.
1867      *
1868      * Parameters:
1869      *
1870      * * <b>axis</b>: Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension
1871      *       of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor.
1872      *       Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is
1873      *       (1, (d_0 X d_1 … d_n)), where the shape of the input tensor is (d_0, d_1, … d_n).
1874      *
1875      * Outputs:
1876      *
1877      * * <b>output</b>: 2-dimensional tensor after flattened.
1878      *
1879      * @since 12
1880      */
1881     OH_NN_OPS_FLATTEN = 64,
1882 
1883     /**
1884      * DepthToSpace rearranges (permutes) data from depth into blocks of spatial data.
1885      * This is the reverse transformation of SpaceToDepth. More specifically, this op outputsa copy of the input tensor
1886      * where values from the depth dimension are moved in spatial blocks to the height and width dimensions.
1887      * By default, mode = DCR. In the DCR mode, elements along the depth dimension from the input tensor are rearranged
1888      * in the following order: depth, column, and then row.
1889      *
1890      * Inputs:
1891      *
1892      * * <b>input</b>: 4-dimensional tensor with specific format of NHWC or NCHW.
1893      *       where N is the batch axis, H is the height, W is the width and C is the channel or depth.
1894      *
1895      * Parameters:
1896      *
1897      * * <b>blockSize</b>: Blocks of [blocksize, blocksize] are moved.
1898      * * <b>mode</b>: DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order.
1899      *
1900      * Outputs:
1901      *
1902      * * <b>output</b>: Output tensor of [N, H * blocksize, W * blocksize, C/(blocksize * blocksize)] for NHWC format
1903      *       or [N, C/(blocksize * blocksize), H * blocksize, W * blocksize] for NCHW format.
1904      *
1905      * @since 12
1906      */
1907     OH_NN_OPS_DEPTH_TO_SPACE = 65,
1908 
1909     /**
1910      * Generate a tensor containing a sequence of numbers that begin at <b>start</b>\n
1911      * and extends by increments of <b>delta</b> up to <b>limit<b>.
1912      *
1913      * Inputs:
1914      *
1915      * * <b>input</b>: <i>n</i>-dimensional tensor.
1916      *
1917      * Parameters:
1918      *
1919      * * <b>start</b>: Scalar. First entry for the range of output values.
1920      * * <b>limit</b>: Scalar. Exclusive upper limit for the range of output values.
1921      * * <b>delta</b>: Scalar. Value to step by.
1922      *
1923      * Outputs:
1924      *
1925      * * <b>output</b>: <i>1</i>-dimensional tensor with specific data type containing generated range of values.
1926      *
1927      * @since 12
1928      */
1929     OH_NN_OPS_RANGE = 66,
1930 
1931     /**
1932      * Normalize each channel of the input. Make the mean of each channel of the input is 0 and the variance is 1.
1933      *
1934      * Inputs:
1935      *
1936      * * <b>input</b>: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W),
1937      *       where N is the batch size, C is the number of channels, and H and W are the height and the width of
1938      *       the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the
1939      *       batch size.
1940      * * <b>scale</b>: The input 1-dimensional scale tensor of channel size.
1941      * * <b>bias</b>: The input 1-dimensional bias tensor of channel size.
1942      *
1943      * Parameters:
1944      *
1945      * * <b>epsilon</b>: The epsilon value to use to avoid division by zero.
1946      *
1947      * Outputs:
1948      *
1949      * * <b>output</b>: The output tensor of the same shape as input.
1950      *
1951      * @since 12
1952      */
1953     OH_NN_OPS_INSTANCE_NORM = 67,
1954 
1955     /**
1956      * Generate a tensor with given value and shape.
1957      *
1958      * Inputs:
1959      *
1960      * * <b>input</b>: <i>1</i>-dimensional tensor. Indicates the shape of the expected output tensor.
1961      *       All values must be >= 0.
1962      *
1963      * Parameters:
1964      *
1965      * * <b>dataType</b>: The data type of the output tensor.
1966      * * <b>value</b>: The value of the output elements.
1967      *
1968      * Outputs:
1969      *
1970      * * <b>output</b>: A tensor, has the same shape as the input.
1971      *
1972      * @since 12
1973      */
1974     OH_NN_OPS_CONSTANT_OF_SHAPE = 68,
1975 
1976     /**
1977      * Broadcast a tensor for a compatiable shape.
1978      *
1979      * Inputs:
1980      *
1981      * * <b>input</b>: <i>n</i>-dimensional tensor.
1982      *
1983      * Parameters:
1984      *
1985      * * <b>shape</b>: A 1-dimensional Tensor, the shape of the desired output.
1986      *
1987      * Outputs:
1988      *
1989      * * <b>output</b>: A tensor after broadcasted.
1990      *
1991      * @since 12
1992      */
1993     OH_NN_OPS_BROADCAST_TO = 69,
1994 
1995     /**
1996      * For <b>input1</b> and <b>input2</b>, calculate the result of input1[i] = input2[i] for each pair of elements,
1997      * where i is the index of each element in the input tensor.
1998      *
1999      * Inputs:
2000      *
2001      * * <b>input1</b>, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
2002      * * <b>input2</b>, can be a real number or a Boolean value if <b>input1</b> is a tensor and must be a tensor
2003      *       with the data type of real number or OH_NN_BOOL if <b>input1</b> is not a tensor.
2004      *
2005      * Outputs:
2006      *
2007      * * <b>output</b>: A tensor of the data type OH_NN_BOOL. When a quantization model is used,
2008      *       the quantization output cannot be omitted. However, values of the quantization
2009      *       parameters do not affect the result.
2010      *
2011      * @since 12
2012      */
2013     OH_NN_OPS_EQUAL = 70,
2014 
2015      /**
2016      * For <b>input1</b> and <b>input2</b>, calculate the result of input1[i] > input2[i] for each pair of elements,
2017      * where i is the index of each element in the input tensor.
2018      *
2019      * Inputs:
2020      *
2021      * * <b>input1</b>, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
2022      * * <b>input2</b>, can be a real number or a Boolean value if <b>input1</b> is a tensor and must be a tensor
2023      *       with the data type of real number or OH_NN_BOOL if <b>input1</b> is not a tensor.
2024      *
2025      * Outputs:
2026      *
2027      * * <b>output</b>: A tensor of the data type OH_NN_BOOL. When a quantization model is used,
2028      *       the quantization parameters of the output cannot be omitted. However,
2029      *       values of the quantization parameters do not affect the result.
2030      *
2031      * @since 12
2032      */
2033     OH_NN_OPS_GREATER = 71,
2034 
2035     /**
2036      * For <b>input1</b> and <b>input2</b>, calculate the result of input1[i] != input2[i] for each pair of elements,
2037      * where i is the index of each element in the input tensor.
2038      *
2039      * Inputs:
2040      *
2041      * * <b>input1</b>, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
2042      * * <b>input2</b>, can be a real number or a Boolean value if <b>input1</b> is a tensor and must be a tensor
2043      *       with the data type of real number or OH_NN_BOOL if <b>input1</b> is not a tensor.
2044      *
2045      * Outputs:
2046      *
2047      * * <b>output</b>: A tensor of the data type OH_NN_BOOL. When a quantization model is used,
2048      *       the quantization parameters of the output cannot be omitted. However,
2049      *       values of the quantization parameters do not affect the result.
2050      *
2051      * @since 12
2052      */
2053     OH_NN_OPS_NOT_EQUAL = 72,
2054 
2055     /**
2056      * For <b>input1</b> and <b>input2</b>, calculate the result of input1[i] >= input2[i] for each pair of elements,
2057      * where i is the index of each element in the input tensor.
2058      *
2059      * Inputs:
2060      *
2061      * * <b>input1</b>, can be a real number, Boolean value, or tensor whose data type is real number or OH_NN_BOOL.
2062      * * <b>input2</b>, can be a real number or a Boolean value if <b>input1</b> is a tensor and must be a tensor
2063      *       with the data type of real number or OH_NN_BOOL if <b>input1</b> is not a tensor.
2064      *
2065      * Outputs:
2066      *
2067      * * <b>output</b>: A tensor of the data type OH_NN_BOOL. When a quantization model is used,
2068      *       the quantization parameters of the output cannot be omitted. However,
2069      *       values of the quantization parameters do not affect the result.
2070      *
2071      * @since 12
2072      */
2073     OH_NN_OPS_GREATER_EQUAL = 73,
2074 
2075     /**
2076      * LeakyRelu takes input data (Tensor) and an argument alpha, and produces one output data (Tensor)
2077      * where the function f(x) = alpha * x for x < 0, f(x) = x for x >= 0,
2078      * is applied to the data tensor elementwise.
2079      *
2080      * Inputs:
2081      *
2082      * * <b>input</b>: <i>n</i>-dimensional input tensor.
2083      *
2084      * Parameters:
2085      *
2086      * * <b>negativeSlope</b>: Coefficient of leakage.
2087      *
2088      * Outputs:
2089      *
2090      * * <b>output</b>: A tensor, with the same data type and shape as the input tensor.
2091      *
2092      * @since 12
2093      */
2094     OH_NN_OPS_LEAKY_RELU = 74,
2095 
2096     /**
2097      * Computes an one-layer LSTM. This operator is usually supported via some custom implementation.
2098      *
2099      * Inputs:
2100      *
2101      * * <b>input</b>: <i>n</i>-dimensional tensor, shape is [seqLen, batchSize, inputSize].
2102      * * <b>wIh</b>: Weight tensor of input-layer to hidden-layer,
2103      *       shape is [numDirections* numLayers, 4 * hiddenSize, inputSize].
2104      * * <b>wHh</b>: Weight tensor of hidden-layer to hidden-layer,
2105      *       shape is [numDirections* numLayers, 4 * hiddenSize, hiddenSize].
2106      * * <b>bias</b>: Bias tensor of input-layer and hidden-layer to hidden-layer,
2107      *       shape is [numDirections* numLayers, 8 * hiddenSize].
2108      * * <b>hx</b>: Init state of hidden-layer, shape is [numDirections * numLayers, batchSize, hiddenSize].
2109      * * <b>cx</b>: Init state of cell, shape is [numDirections * numLayers, batchSize, hiddenSize].
2110      *
2111      * Parameters:
2112      *
2113      * * <b>bidirectional</b>: Whether the LSTM operation is bidirectional.
2114      * * <b>hasBias</b>: Whether the operation contains bias.
2115      * * <b>inputSize</b>: Size of input tensor.
2116      * * <b>hiddenSize</b>: Size of hidden state tensor.
2117      * * <b>numLayers</b>: Layers of LSTM network.
2118      * * <b>numDirections</b>: Number of directions, value is 2 if direction == bidirectional else 1.
2119      * * <b>dropout</b>: Dropout probalility of each layer except first-layer.
2120      * * <b>zoneoutCell</b>: Probalility that the cell state retains the previous state. Default: 0.
2121      * * <b>zoneoutHidden</b>: Probalility that the hidden state retains the previous state. Default: 0.
2122      * * <b>projSize</b>: If projSize > 0, will use LSTM with projections of corresponding size. Default: 0.
2123      *
2124      * Outputs:
2125      *
2126      * * <b>output</b>: A tensor that concats all the intermediate output tensor of the hidden,
2127      *       shape is [seqLen, batchSize, numDirections * realHiddenSize].
2128      * * <b>hy</b>: The last output tensor of the hidden-layer,
2129      *       shape is [numDirections * numLayers, batchSize, realHiddenSize].
2130      * * <b>cy</b>: The last output tensor of the cell,
2131      *       shape is [numDirections * numLayers, batchSize, hiddenSize].
2132      *
2133      * @since 12
2134      */
2135     OH_NN_OPS_LSTM = 75,
2136 
2137     /**
2138      * Returns a tensor of the same type and shape as input tensor with its value clipped to min and max.
2139      * Any values less than <b>min</b> are set to <b>min</b>. Any values greater than <b>max</b> are set to <b>max</b>.
2140      *
2141      * Inputs:
2142      *
2143      * * <b>input</b>: <i>n</i>-dimensional tensor.
2144      *
2145      * Parameters:
2146      *
2147      * * <b>max</b>: Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).
2148      * * <b>min</b>: Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).
2149      *
2150      * Outputs:
2151      *
2152      * * <b>output</b>: <i>n</i>-dimensional tensor., with the same data type and shape as the input tensor.
2153      *
2154      * @since 12
2155      */
2156     OH_NN_OPS_CLIP = 76,
2157 
2158     /**
2159      * Determine whether all emements in a given tensor are non-zero. It returns a boolean tensor
2160      * where each element is 'True' if corresponding element in the input tensor is non-zero, and 'False' otherwise.
2161      *
2162      * Inputs:
2163      *
2164      * * <b>input</b>: <i>n</i>-dimensional tensor of shape <b>(N,*)</b>,
2165      *       where * indicates any number of additional dimensions.
2166      * * <b>aixs</b>: scalar or tensor, indices the dimension to be computed.
2167      *
2168      * Parameters:
2169      *
2170      * * <b>keepDims</b>: Whether to keep dimension info.
2171      *
2172      * Outputs:
2173      *
2174      * * <b>output</b>: 1-dimension or n-dimension tensor with boolean data type.
2175      *
2176      * @since 12
2177      */
2178     OH_NN_OPS_ALL = 77,
2179 
2180     /**
2181      * Asserts that the given condition si true.
2182      * If <b>condition</b> evalutes to false, print the list of tensors in data.
2183      * Summerize determines how many entries of the tensors to print.
2184      *
2185      * Inputs:
2186      *
2187      * * <b>condition</b>: The condition to evalute.
2188      * * <b>data</b>: The tensors to print out when condition is false.
2189      *
2190      * Parameters:
2191      *
2192      * * <b>summarize</b>: The number of entries for each tensor is printed.
2193      *
2194      * Outputs:
2195      *
2196      * * <b>output</b>: Result value judged by condition. If the condition is not true, an Error is returned.
2197      *
2198      * @since 12
2199      */
2200     OH_NN_OPS_ASSERT = 78,
2201 
2202     /**
2203      * Calculates the cosine of the given input tensor, element-wise.
2204      *
2205      * Inputs:
2206      *
2207      * * <b>input</b>: <i>n</i>-dimensional tensor.
2208      *
2209      * Outputs:
2210      *
2211      * * <b>output</b>: <i>n</i>-dimensional tensor. The cosine of the input tensor computed element-wise.
2212      *
2213      * @since 12
2214      */
2215     OH_NN_OPS_COS = 79,
2216 
2217     /**
2218      * Calculates the result of nature logarithm of the input.
2219      *
2220      * Inputs:
2221      *
2222      * * <b>input</b>: <i>n</i>-dimensional tensor. The value must be greater than 0.
2223      *
2224      * Outputs:
2225      *
2226      * * <b>output</b>: <i>n</i>-dimensional tensor with the same shape as the input tensor.
2227      *
2228      * @since 12
2229      */
2230     OH_NN_OPS_LOG = 80,
2231 
2232     /**
2233      * Calculates the logical value of <b>input1</b> and <b>input2</b> element-wise.
2234      *
2235      * Inputs:
2236      *
2237      * * <b>input1</b>: Tensor of type boolean or convert to boolean implicitly.
2238      * * <b>input2</b>: Tensor of type boolean or convert to boolean implicitly.
2239      *
2240      * Outputs:
2241      *
2242      * * <b>output</b>: <i>n</i>-dimensional tensor. The calculation result of logical-and
2243      *       and the numeric type is OH_NN_BOOL.
2244      *
2245      * @since 12
2246      */
2247     OH_NN_OPS_LOGICAL_AND = 81,
2248 
2249     /**
2250      * Calculates the logical value of NOT <b>input</b> element-wise.
2251      *
2252      * Inputs:
2253      *
2254      * * <b>input</b>: Tensor of type boolean or convert to boolean implicitly.
2255      *
2256      * Outputs:
2257      *
2258      * * <b>output</b>: <i>n</i>-dimensional tensor. The calculation result of logical-not
2259      *       and the numeric type is OH_NN_BOOL.
2260      *
2261      * @since 12
2262      */
2263     OH_NN_OPS_LOGICAL_NOT = 82,
2264 
2265     /**
2266      * Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
2267      * Inputs of input1 and input2 comply with the implicit type conversion rules to make the data types consistent.
2268      * The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
2269      * both dtypes cannot be bool, and the shapes of them could be broadcast.
2270      * When the inputs are one tensor and one scalar, the scalar could only be a constant.
2271      *
2272      * Inputs:
2273      *
2274      * * <b>input1</b>: The remainder of the scalar or tensor, numeric or OH_NN_BOOL type,
2275      *       or the <i>n</i>-dimensional tensor of the numeric dimension numeric type.
2276      * * <b>input2</b>: Remainder factor. When the first input is an n-dimensional tensor,
2277      *       the second input can be a numeric tensor, a OH_NN_BOOL type, or an n-dimensional
2278      *       tensor of a numeric type dimension, and when the first input is a numeric or OH_NN_BOOL tensor,
2279      *       the second input must be a tensor of the numeric dimension of the data type.
2280      *
2281      * Outputs:
2282      *
2283      * * <b>output</b>: <i>n</i>-dimensional tensor. The shape is the same as the input after broadcasting,
2284      *       and the data type is the data type with the highest accuracy of the two inputs.
2285      *
2286      * @since 12
2287      */
2288     OH_NN_OPS_MOD = 83,
2289 
2290     /**
2291      * Calculate the opposite value of the input tensor element-wise.
2292      *
2293      * Inputs:
2294      *
2295      * * <b>input</b>: <i>n</i>-dimensional tensor with numeric data type.
2296      *
2297      * Outputs:
2298      *
2299      * * <b>output</b>: <i>n</i>-dimensional tensor with the same shape and data type as the input tensor.
2300      *
2301      * @since 12
2302      */
2303     OH_NN_OPS_NEG = 84,
2304 
2305     /**
2306      * Calculate reciprocal of a tensor element-wise.
2307      *
2308      * Inputs:
2309      *
2310      * * <b>input</b>: <i>n</i>-dimensional tensor.
2311      *
2312      * Outputs:
2313      *
2314      * * <b>output</b>: <i>n</i>-dimensional tensor with the same shape and data type as the input tensor.
2315      *
2316      * @since 12
2317      */
2318     OH_NN_OPS_RECIPROCAL = 85,
2319 
2320     /**
2321      * Calculate sine of the input element-wise.
2322      *
2323      * Inputs:
2324      *
2325      * * <b>input</b>: <i>n</i>-dimensional tensor.
2326      *
2327      * Outputs:
2328      *
2329      * * <b>output</b>: <i>n</i>-dimensional tensor the same data type and shape as the input tensor.
2330      *
2331      * @since 12
2332      */
2333     OH_NN_OPS_SIN = 86,
2334 
2335     /**
2336      * Selects elements from input1 or input2 based on condition and returns a tensor.
2337      *
2338      * Inputs:
2339      *
2340      * * <b>condition</b>: <i>n</i>-dimensional tensor or scalar. Judging conditions. If the OH_NN_BOOL element
2341      *       is True, then the element corresponding to the position of input1 is selected, and if the OH_NN_BOOL
2342      *       element is False, the element corresponding to the position of input2 is selected.
2343      * * <b>input1</b>: <i>n</i>-dimensional tensor. First tensor to be chosen.
2344      * * <b>input2</b>: <i>n</i>-dimensional tensor. Second tensor to be chosen.
2345      *
2346      * Outputs:
2347      *
2348      * * <b>output</b>: <i>n</i>-dimensional tensor with the same shape and data type as the input1 and input2.
2349      *
2350      * @since 12
2351      */
2352     OH_NN_OPS_WHERE = 87,
2353 
2354     /**
2355      * Converts a sparse tensor into a dense tensor.
2356      *
2357      * Inputs:
2358      *
2359      * * <b>indices</b>: 2-dimensional tensor. Position of an ellement in a sparse tensor.
2360      *       Each element value must be non-negative. The shape is (N, 2).
2361      * * <b>values</b>: 1-dimensional tensor. The value corresponding to the location of indices. The shape is (N).
2362      * * <b>sparseShape</b>: 2-dimensional tensor. The shape of a sparse tensor. The value consists of
2363      *       two positive integers, indicating that the shape of the sparse tensor is (N, C).
2364      *
2365      * Outputs:
2366      *
2367      * * <b>output</b>: A tensor. The data type is the same as values, and the shape is specified by sparseShape.
2368      *
2369      * @since 12
2370      */
2371     OH_NN_OPS_SPARSE_TO_DENSE = 88,
2372 
2373     /**
2374      * Calculates the logical value of <b>input1</b> or <b>input2</b> element-wise.
2375      *
2376      * Inputs:
2377      *
2378      * * <b>input1</b>: Tensor of type boolean or convert to boolean implicitly.
2379      * * <b>input2</b>: Tensor of type boolean or convert to boolean implicitly.
2380      *
2381      * Outputs:
2382      *
2383      * * <b>output</b>: <i>n</i>--dimensional tensor. The calculation result of logical-or
2384      *       and the numeric type is OH_NN_BOOL.
2385      *
2386      * @since 12
2387      */
2388     OH_NN_OPS_LOGICAL_OR = 89,
2389 
2390     /**
2391      * Returns element-wise smallest integer in not less than input.
2392      *
2393      * Inputs:
2394      *
2395      * * <b>input</b>: <i>n</i>-dimensional tensor.
2396      *
2397      * Outputs:
2398      *
2399      * * <b>output</b>: A tensor after ceiled.
2400      *
2401      * @since 12
2402      */
2403     OH_NN_OPS_CEIL = 90,
2404 
2405     /**
2406      * Crop given tensor acrodding to axis and offset.
2407      *
2408      * Inputs:
2409      *
2410      * * <b>input</b>: <i>n</i>-dimensional tensor.
2411      * * <b>shape</b>: <i>1</i>-dimensional tensor, indices cropped windows dimension.
2412      *
2413      * Parameters:
2414      *
2415      * * <b>axis</b>: Cropped dimension.
2416      * * <b>offset</b>: Cropped offset per dimension.
2417      *
2418      * Outputs:
2419      *
2420      * * <b>output</b>: Cropped output tensor.
2421      *
2422      * @since 12
2423      */
2424     OH_NN_OPS_CROP = 91,
2425 
2426     /**
2427      * The output of the object detection model is post-processed, including decoding the bounding box,
2428      * class probability and score of the model output, and then performing non-maximum suppression (NMS)
2429      * to remove the overlapping bounding box, and finally outputting the detection result.
2430      *
2431      * Inputs:
2432      *
2433      * * <b>bbox</b>: Boxes to be predicted.
2434      * * <b>scores</b>: Socres of all boxes.
2435      * * <b>anchors</b>: Information of boxes, includes box, variance and coordinates.
2436      *
2437      * Parameters:
2438      * * <b>inputSize</b>: The size of the input tensor.
2439      * * <b>scale</b>: The scaling factor used to convert the output from
2440      *       the normalized form to the original image coordinates.
2441      * * <b>nmsIoUThreshold</b>: The threshold of overlapping region during NMS.
2442      * * <b>nmsScoreThreshold</b>: The socre threshold used to select target bbox duing NMS.
2443      * * <b>maxDetections</b>: Maximum of bboxes per image.
2444      * * <b>detectionsPerClass</b>: Maximum of bboxes per class.
2445      * * <b>maxClassesPerDetection</b>: Maximum of reserved classes per bboxes.
2446      * * <b>numClasses</b>: Number of target classes to be detected.
2447      * * <b>useRegularNms</b>: Whether use NMS based on IoU threshold.
2448      * * <b>outQuantized</b>: Whether need to quantize.
2449      *
2450      * Outputs:
2451      *
2452      * * <b>bboxes</b>: The corrdinates of target detected bboxes.
2453      * * <b>classes</b>: The target class index of target detected bboxes.
2454      * * <b>confidences</b>: The score of target detected bboxes.
2455      * * <b>numDetections</b>: The number of target detected bboxes.
2456      *
2457      * @since 12
2458      */
2459     OH_NN_OPS_DETECTION_POST_PROCESS = 92,
2460 
2461     /**
2462      * Returns element-wise largest integer not greater than x.
2463      *
2464      * Inputs:
2465      *
2466      * * <b>input</b>: <i>n</i>-dimensional tensor.
2467      *
2468      * Outputs:
2469      *
2470      * * <b>output</b>: A tensor after floored.
2471      *
2472      * @since 12
2473      */
2474     OH_NN_OPS_FLOOR = 93,
2475 
2476     /**
2477      * Calculate the L2-normalize of the input using the given axis.
2478      *
2479      * Inputs:
2480      *
2481      * * <b>input</b>: Input to compute the L2-normalization.
2482      *
2483      * Parameters:
2484      *
2485      * * <b>axis</b>: The axis on which to apply normalization, -1 means last axis, default: 0.
2486      * * <b>epsilon</b>: Value added for numerical stability. default: 1e-6;
2487      * * <b>activationType</b>: Activation function type.
2488      *
2489      * Outputs:
2490      *
2491      * * <b>output</b>: Result tensor with the same type and shape as input <b>input</b>.
2492      *
2493      * @since 12
2494      */
2495     OH_NN_OPS_L2_NORMALIZE = 94,
2496 
2497     /**
2498      * Computes the log-softmax function to n-dimensional input tensor.
2499      * The input is transformed by the Softmax function and then by the log function to lie in range[-inf,0).
2500      *
2501      * Inputs:
2502      *
2503      * * <b>input</b>: <i>n</i>-dimensional tensor.
2504      *
2505      * Parameters:
2506      *
2507      * * <b>axis</b>: The axis to apply LogSoftmax operation, -1 means the last dimension.
2508      *
2509      * Outputs:
2510      *
2511      * * <b>output</b>: Tensor output. Has the same data type and shape as input.
2512      *
2513      * @since 12
2514      */
2515     OH_NN_OPS_LOG_SOFTMAX = 95,
2516 
2517     /**
2518      * Normalize over local input regions.
2519      *
2520      * Inputs:
2521      *
2522      * * <b>input</b>: <i>n</i>-dimensional tensor.
2523      *
2524      * Parameters:
2525      *
2526      * * <b>depthRadius</b>: Half-width of the 1-dimension normalization window.
2527      * * <b>bias</b>: Offset.
2528      * * <b>alpha</b>: Scale factor.
2529      * * <b>beta</b>: Exponent.
2530      * * <b>normRegion</b>: Specifies normalization region. Options: "ACROSS_CHNNEL".
2531      *
2532      * Outputs:
2533      *
2534      * * <b>output</b>: Result output tensor.
2535      *
2536      * @since 12
2537      */
2538     OH_NN_OPS_LRN = 96,
2539 
2540     /**
2541      * Calculates the minimum of <b>input1</b> and <b>input2</b> element-wise. The inputs of <b>input1</b> and
2542      * <b>input2</b> comply with the implicit type conversion rules to make the data types are consistent.
2543      *
2544      * The input must be two tensors or one tensor and one scalar. When the input is two tensors, the data types
2545      * cannot be Boolean at the same time, and their shapes can be broadcast to the same size. When the inputs are
2546      * one tensor and one scalar, the scalar must be a constant.
2547      *
2548      * Inputs:
2549      *
2550      * * <b>input1</b>: <i>n</i>-dimensional tensor, whose data type can be number or Boolean.
2551      * * <b>input2</b>: <i>n</i>-dimensional tensor, whose data type can be number or Boolean.
2552      *
2553      * Outputs:
2554      *
2555      * * <b>output</b>: Minimum value of the elements of the two tensors.
2556      *
2557      * @since 12
2558      */
2559     OH_NN_OPS_MINIMUM = 97,
2560 
2561     /**
2562      * Calculate the rank of a tensor.
2563      * The rank of a tensor is the number of indices required to uniquely select each element of the tensor.
2564      *
2565      * Inputs:
2566      *
2567      * * <b>input</b>: <i>n</i>-dimensional tensor.
2568      *
2569      * Outputs:
2570      *
2571      * * <b>output</b>: Result tensor. 0-D int32 Tensor representing the rank of input.
2572      *
2573      * @since 12
2574      */
2575     OH_NN_OPS_RANK = 98,
2576 
2577     /**
2578      * Calculates the maximum value for input tensor along the specified dimension. If <b>keepDims</b> is set to
2579      * <b>false</b>, the number of dimensions is reduced for the input; if <b>keepDims</b> is set to <b>true</b>,
2580      * the number of dimensions is retained.
2581      *
2582      * Inputs:
2583      *
2584      * * <b>input</b>: <i>n</i>-dimensional input tensor, where <i>n</i> is less than 8.
2585      * * <b>axis</b>: dimension used to calculate the maximum value. The value is a 1D tensor.
2586      *       The value range of each element in <b>axis</b> is [–n, n).
2587      *
2588      * Parameters:
2589      *
2590      * * <b>keepDims</b>: indicates whether to retain the dimension. The value is a Boolean value.
2591      * * <b>reduceToEnd</b>: boolean value, indicates whether the reduce operation needs to be performed
2592      *       until the last axis.
2593      * * <b>coeff</b>: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
2594      *
2595      * Outputs:
2596      *
2597      * * <b>output</b>: <i>m</i>-dimensional output tensor whose data type is the same as that of the input.
2598      *       If <b>keepDims</b> is <b>false</b>, m<n. If <b>keepDims</b> is <b>true</b>, m==n.
2599      *
2600      * @since 12
2601      */
2602     OH_NN_OPS_REDUCE_MAX = 99,
2603 
2604     /**
2605      * Calculates the minimum value for input tensor along the specified dimension. If <b>keepDims</b> is set to
2606      * <b>false</b>, the number of dimensions is reduced for the input; if <b>keepDims</b> is set to <b>true</b>,
2607      * the number of dimensions is retained.
2608      *
2609      * Inputs:
2610      *
2611      * * <b>input</b>: <i>n</i>-dimensional input tensor, where <i>n</i> is less than 8.
2612      * * <b>axis</b>: dimension used to calculate the minimum value. The value is a 1D tensor.
2613      *       The value range of each element in <b>axis</b> is [–n, n).
2614      *
2615      * Parameters:
2616      *
2617      * * <b>keepDims</b>: indicates whether to retain the dimension. The value is a Boolean value.
2618      * * <b>reduceToEnd</b>: boolean value, indicates whether the reduce operation needs to be performed
2619      *       until the last axis.
2620      * * <b>coeff</b>: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
2621      *
2622      * Outputs:
2623      *
2624      * * <b>output</b>: <i>m</i>-dimensional output tensor whose data type is the same as that of the input.
2625      *       If <b>keepDims</b> is <b>false</b>, m<n. If <b>keepDims</b> is <b>true</b>, m==n.
2626      *
2627      * @since 12
2628      */
2629     OH_NN_OPS_REDUCE_MIN = 100,
2630 
2631     /**
2632      * Calculates the numerical sum value for input tensor along the specified dimension. If <b>keepDims</b> is set to
2633      * <b>false</b>, the number of dimensions is reduced for the input; if <b>keepDims</b> is set to <b>true</b>,
2634      * the number of dimensions is retained.
2635      *
2636      * Inputs:
2637      *
2638      * * <b>input</b>: <i>n</i>-dimensional input tensor, where <i>n</i> is less than 8.
2639      * * <b>axis</b>: dimension used to calculate the sum value. The value is a 1D tensor.
2640      *       The value range of each element in <b>axis</b> is [–n, n).
2641      *
2642      * Parameters:
2643      *
2644      * * <b>keepDims</b>: indicates whether to retain the dimension. The value is a Boolean value.
2645      * * <b>reduceToEnd</b>: boolean value, indicates whether the reduce operation needs to be performed
2646      *       until the last axis.
2647      * * <b>coeff</b>: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
2648      *
2649      * Outputs:
2650      *
2651      * * <b>output</b>: <i>m</i>-dimensional output tensor whose data type is the same as that of the input.
2652      *       If <b>keepDims</b> is <b>false</b>, m<n. If <b>keepDims</b> is <b>true</b>, m==n.
2653      *
2654      * @since 12
2655      */
2656     OH_NN_OPS_REDUCE_SUM = 101,
2657 
2658     /**
2659      * Calculate half to even of a tensor element-wise.
2660      *
2661      * Inputs:
2662      *
2663      * * <b>input</b>: <i>n</i>-dimensional tensor.
2664      *
2665      * Outputs:
2666      *
2667      * * <b>output</b>: Result tensor with the same shape as the input.
2668      *
2669      * @since 12
2670      */
2671     OH_NN_OPS_ROUND = 102,
2672 
2673     /**
2674      * Scatters a tensor into a new tensor depending on the specified indices.
2675      *
2676      * Inputs:
2677      *
2678      * * <b>indices</b>: The index of scattering in the new tensor with int32 or int64 data type.
2679      *       The rank of indices must be at least 2 and indicesShape[-1] <= len(shape).
2680      * * <b>updates</b>: The source tensor to be scattered. It has shape indicesShape[:-1]+shape[indicesShape[-1]:].
2681      * * <b>shape</b>: The shape of the output tensor, has the same data type as <b>indices</b>.
2682      *
2683      * Outputs:
2684      *
2685      * * <b>output</b>: Result tensor with the same type as <b>update</b> and the same shape as <b>shape</b>.
2686      *
2687      * @since 12
2688      */
2689     OH_NN_OPS_SCATTER_ND = 103,
2690 
2691     /**
2692      * Rearrange blocks of spatial data into depth.
2693      * The output tensor’s height dimension is height / blocksize;
2694      * The output tensor’s weight dimension is weight / blocksize;
2695      * The depth of output tensor is blocksize * blocksize * inputDepth;
2696      * The input tensor’s height and width must be divisible by blocksize.
2697      *
2698      * Inputs:
2699      *
2700      * * <b>input</b>: <i>4</i>-dimensional tensor.
2701      *
2702      * Parameters:
2703      *
2704      * * <b>blocksize</b>: The block size used to divide spatial data. It must be >= 2.
2705      *
2706      * Outputs:
2707      *
2708      * * <b>output</b>: Result tensor with the same dataType as the input.
2709      *
2710      * @since 12
2711      */
2712     OH_NN_OPS_SPACE_TO_DEPTH = 104,
2713 
2714     /**
2715      * Swish activation function
2716      *
2717      * Inputs:
2718      *
2719      * * <b>input</b>: <i>n</i>-dimensional tensor.
2720      *
2721      * Outputs:
2722      *
2723      * * <b>output</b>: Output tensor.
2724      *
2725      * @since 12
2726      */
2727     OH_NN_OPS_SWISH = 105,
2728 
2729     /**
2730      * Calculates the L2 norm of the input tensor along the specified axis,
2731      * replacing other elements of the dimension with the L2 norm value of the specified dimension to
2732      * remove the dimension, or to reduce the dimension size to 1. Control whether the dimensions of the
2733      * output and input are the same by specifying the keepDims parameter.
2734      *
2735      * Inputs:
2736      *
2737      * * <b>input</b>: input tensor.
2738      * * <b>axis</b>: Dimensions to perform L2-Norm calculations.
2739      *
2740      * Parameters:
2741      *
2742      * * <b>keepDims</b>: indicates whether to retain the dimension. The value is a Boolean value.
2743      * * <b>reduceToEnd</b>: boolean value, indicates whether the reduce operation needs to be performed
2744      *       until the last axis.
2745      * * <b>coeff</b>: A OH_NN_FLOAT32 scalar that represents the scale factor of the output.
2746      *
2747      * Outputs:
2748      *
2749      * * <b>output</b>: Result tensor with the same dataType as the input.
2750      *
2751      * @since 12
2752      */
2753     OH_NN_OPS_REDUCE_L2 = 106,
2754 
2755     /**
2756      * HardSigmoid activation function. Calculate the output by element.
2757      *
2758      * Inputs:
2759      *
2760      * * <b>input</b>: <i>n</i>-dimensional tensor.
2761      *
2762      * Outputs:
2763      *
2764      * * <b>output</b>: Result tensor with the same shape and dataType as the input.
2765      *
2766      * @since 12
2767      */
2768     OH_NN_OPS_HARD_SIGMOID = 107,
2769 
2770     /**
2771      * Gets the element at the location specified by the input tensor according to the index.
2772      *
2773      * Inputs:
2774      *
2775      * * <b>input</b>: <i>n</i>-dimensional tensor.
2776      * * <b>indices</b>: index tensor.
2777      *
2778      * Outputs:
2779      *
2780      * * <b>output</b>: Result tensor with the same shape as the input.
2781      *
2782      * @since 12
2783      */
2784     OH_NN_OPS_GATHER_ND = 108,
2785 } OH_NN_OperationType;
2786 
2787 /**
2788  * @brief Enumerates the tensor data types.
2789  *
2790  * Tensors are usually used to set the input, output, and operator parameters of a model. When a tensor is used
2791  * as the input or output of a model (or operator), set the tensor type to {@link OH_NN_TENSOR}.
2792  * When the tensor is used as an operator parameter, select an enumerated value other than {@link OH_NN_TENSOR} as the
2793  * tensor type. Assume that the <b>pad</b> parameter of the {@link OH_NN_OPS_CONV2D} operator is being set.
2794  * You need to set the <b>type</b> attribute of the {@link OH_NN_Tensor} instance to {@link OH_NN_CONV2D_PAD}.
2795  * The settings of other operator parameters are similar. The enumerated values are named
2796  * in the format OH_NN_{<i>Operator name</i>}_{<i>Attribute name</i>}.
2797  *
2798  * @since 9
2799  * @version 2.0
2800  */
2801 typedef enum {
2802     /** This enumerated value is used when the tensor is used as the input or output of a model (or operator). */
2803     OH_NN_TENSOR = 0,
2804 
2805     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
2806      *  of the Add operator. */
2807     OH_NN_ADD_ACTIVATIONTYPE = 1,
2808 
2809     /** This enumerated value is used when the tensor is used as the <b>kernelSize</b> parameter
2810      *  of the AvgPool operator. */
2811     OH_NN_AVG_POOL_KERNEL_SIZE = 2,
2812     /** This enumerated value is used when the tensor is used as the <b>stride</b> parameter
2813      *  of the AvgPool operator. */
2814     OH_NN_AVG_POOL_STRIDE = 3,
2815     /** This enumerated value is used when the tensor is used as the <b>padMode</b> parameter
2816      *  of the AvgPool operator. */
2817     OH_NN_AVG_POOL_PAD_MODE = 4,
2818     /** This enumerated value is used when the tensor is used as the <b>pad</b> parameter of the AvgPool operator. */
2819     OH_NN_AVG_POOL_PAD = 5,
2820     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
2821      *  of the AvgPool operator. */
2822     OH_NN_AVG_POOL_ACTIVATION_TYPE = 6,
2823 
2824     /** This enumerated value is used when the tensor is used as the <b>eosilon</b> parameter
2825      *  of the BatchNorm operator. */
2826     OH_NN_BATCH_NORM_EPSILON = 7,
2827 
2828     /** This enumerated value is used when the tensor is used as the <b>blockSize</b> parameter
2829      *  of the BatchToSpaceND operator. */
2830     OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE = 8,
2831     /** This enumerated value is used when the tensor is used as the <b>crops</b> parameter
2832      *  of the BatchToSpaceND operator. */
2833     OH_NN_BATCH_TO_SPACE_ND_CROPS = 9,
2834 
2835     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the Concat operator. */
2836     OH_NN_CONCAT_AXIS = 10,
2837 
2838     /** This enumerated value is used when the tensor is used as the <b>strides</b> parameter
2839      *  of the Conv2D operator. */
2840     OH_NN_CONV2D_STRIDES = 11,
2841     /** This enumerated value is used when the tensor is used as the <b>pad</b> parameter of the Conv2D operator. */
2842     OH_NN_CONV2D_PAD = 12,
2843     /** This enumerated value is used when the tensor is used as the <b>dilation</b> parameter
2844      *  of the Conv2D operator. */
2845     OH_NN_CONV2D_DILATION = 13,
2846     /** This enumerated value is used when the tensor is used as the <b>padMode</b> parameter
2847      *  of the Conv2D operator. */
2848     OH_NN_CONV2D_PAD_MODE = 14,
2849     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
2850      *  of the Conv2D operator. */
2851     OH_NN_CONV2D_ACTIVATION_TYPE = 15,
2852     /** This enumerated value is used when the tensor is used as the <b>group</b> parameter of the Conv2D operator. */
2853     OH_NN_CONV2D_GROUP = 16,
2854 
2855     /** This enumerated value is used when the tensor is used as the <b>strides</b> parameter
2856      *  of the Conv2DTranspose operator. */
2857     OH_NN_CONV2D_TRANSPOSE_STRIDES = 17,
2858     /** This enumerated value is used when the tensor is used as the <b>pad</b> parameter
2859      *  of the Conv2DTranspose operator. */
2860     OH_NN_CONV2D_TRANSPOSE_PAD = 18,
2861     /** This enumerated value is used when the tensor is used as the <b>dilation</b> parameter
2862      *  of the Conv2DTranspose operator. */
2863     OH_NN_CONV2D_TRANSPOSE_DILATION = 19,
2864     /** This enumerated value is used when the tensor is used as the <b>outputPaddings</b> parameter
2865      *  of the Conv2DTranspose operator. */
2866     OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS = 20,
2867     /** This enumerated value is used when the tensor is used as the <b>padMode</b> parameter
2868      *  of the Conv2DTranspose operator. */
2869     OH_NN_CONV2D_TRANSPOSE_PAD_MODE = 21,
2870     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
2871      *  of the Conv2DTranspose operator. */
2872     OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE = 22,
2873     /** This enumerated value is used when the tensor is used as the <b>group</b> parameter
2874      *  of the Conv2DTranspose operator. */
2875     OH_NN_CONV2D_TRANSPOSE_GROUP = 23,
2876 
2877     /** This enumerated value is used when the tensor is used as the <b>strides</b> parameter
2878      *  of the DepthwiseConv2dNative operator. */
2879     OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES = 24,
2880     /** This enumerated value is used when the tensor is used as the <b>pad</b> parameter
2881      *  of the DepthwiseConv2dNative operator. */
2882     OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD = 25,
2883     /** This enumerated value is used when the tensor is used as the <b>dilation</b> parameter
2884      *  of the DepthwiseConv2dNative operator. */
2885     OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION = 26,
2886     /** This enumerated value is used when the tensor is used as the <b>padMode</b> parameter
2887      *  of the DepthwiseConv2dNative operator. */
2888     OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE = 27,
2889     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
2890      *  of the DepthwiseConv2dNative operator. */
2891     OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE = 28,
2892 
2893     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
2894      *  of the Div operator. */
2895     OH_NN_DIV_ACTIVATIONTYPE = 29,
2896 
2897     /** This enumerated value is used when the tensor is used as the <b>mode</b> parameter of the Eltwise operator. */
2898     OH_NN_ELTWISE_MODE = 30,
2899 
2900     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter
2901      *  of the FullConnection operator. */
2902     OH_NN_FULL_CONNECTION_AXIS = 31,
2903     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
2904      *  of the FullConnection operator. */
2905     OH_NN_FULL_CONNECTION_ACTIVATIONTYPE = 32,
2906 
2907     /** This enumerated value is used when the tensor is used as the <b>transposeA</b> parameter
2908      *  of the Matmul operator. */
2909     OH_NN_MATMUL_TRANSPOSE_A = 33,
2910     /** This enumerated value is used when the tensor is used as the <b>transposeB</b> parameter
2911      *  of the Matmul operator. */
2912     OH_NN_MATMUL_TRANSPOSE_B = 34,
2913     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
2914      *  of the Matmul operator. */
2915     OH_NN_MATMUL_ACTIVATION_TYPE = 35,
2916 
2917     /** This enumerated value is used when the tensor is used as the <b>kernelSize</b> parameter
2918      *  of the MaxPool operator. */
2919     OH_NN_MAX_POOL_KERNEL_SIZE = 36,
2920     /** This enumerated value is used when the tensor is used as the <b>stride</b> parameter
2921      *  of the MaxPool operator. */
2922     OH_NN_MAX_POOL_STRIDE = 37,
2923     /** This enumerated value is used when the tensor is used as the <b>padMode</b> parameter
2924      *  of the MaxPool operator. */
2925     OH_NN_MAX_POOL_PAD_MODE = 38,
2926     /** This enumerated value is used when the tensor is used as the <b>pad</b> parameter of the MaxPool operator. */
2927     OH_NN_MAX_POOL_PAD = 39,
2928     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
2929      *  of the MaxPool operator. */
2930     OH_NN_MAX_POOL_ACTIVATION_TYPE = 40,
2931 
2932     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
2933      *  of the Mul operator. */
2934     OH_NN_MUL_ACTIVATION_TYPE = 41,
2935 
2936     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the OneHot operator. */
2937     OH_NN_ONE_HOT_AXIS = 42,
2938 
2939     /** This enumerated value is used when the tensor is used as the <b>constantValue</b> parameter
2940      *  of the Pad operator. */
2941     OH_NN_PAD_CONSTANT_VALUE = 43,
2942 
2943     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
2944      *  of the Scale operator. */
2945     OH_NN_SCALE_ACTIVATIONTYPE = 44,
2946     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the Scale operator. */
2947     OH_NN_SCALE_AXIS = 45,
2948 
2949     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the Softmax operator. */
2950     OH_NN_SOFTMAX_AXIS = 46,
2951 
2952     /** This enumerated value is used when the tensor is used as the <b>BlockShape</b> parameter
2953      *  of the SpaceToBatchND operator. */
2954     OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE = 47,
2955     /** This enumerated value is used when the tensor is used as the <b>Paddings</b> parameter
2956      *  of the SpaceToBatchND operator. */
2957     OH_NN_SPACE_TO_BATCH_ND_PADDINGS = 48,
2958 
2959     /** This enumerated value is used when the tensor is used as the <b>Axis</b> parameter of the Split operator. */
2960     OH_NN_SPLIT_AXIS = 49,
2961     /** This enumerated value is used when the tensor is used as the <b>OutputNum</b> parameter
2962      *  of the Split operator. */
2963     OH_NN_SPLIT_OUTPUT_NUM = 50,
2964     /** This enumerated value is used when the tensor is used as the <b>SizeSplits</b> parameter
2965      *  of the Split operator. */
2966     OH_NN_SPLIT_SIZE_SPLITS = 51,
2967 
2968     /** This enumerated value is used when the tensor is used as the <b>Axis</b> parameter of the Squeeze operator. */
2969     OH_NN_SQUEEZE_AXIS = 52,
2970 
2971     /** This enumerated value is used when the tensor is used as the <b>Axis</b> parameter of the Stack operator. */
2972     OH_NN_STACK_AXIS = 53,
2973 
2974     /** This enumerated value is used when the tensor is used as the <b>BeginMask</b> parameter
2975      *  of the StridedSlice operator. */
2976     OH_NN_STRIDED_SLICE_BEGIN_MASK = 54,
2977     /** This enumerated value is used when the tensor is used as the <b>EndMask</b> parameter
2978      *  of the StridedSlice operator. */
2979     OH_NN_STRIDED_SLICE_END_MASK = 55,
2980     /** This enumerated value is used when the tensor is used as the <b>EllipsisMask</b> parameter
2981      *  of the StridedSlice operator. */
2982     OH_NN_STRIDED_SLICE_ELLIPSIS_MASK = 56,
2983     /** This enumerated value is used when the tensor is used as the <b>NewAxisMask</b> parameter
2984      *  of the StridedSlice operator. */
2985     OH_NN_STRIDED_SLICE_NEW_AXIS_MASK = 57,
2986     /** This enumerated value is used when the tensor is used as the <b>ShrinkAxisMask</b> parameter
2987      *  of the StridedSlice operator. */
2988     OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK = 58,
2989 
2990     /** This enumerated value is used when the tensor is used as the <b>ActivationType</b> parameter
2991      *  of the Sub operator. */
2992     OH_NN_SUB_ACTIVATIONTYPE = 59,
2993 
2994     /** This enumerated value is used when the tensor is used as the <b>keepDims</b> parameter
2995      *  of the ReduceMean operator. */
2996     OH_NN_REDUCE_MEAN_KEEP_DIMS = 60,
2997 
2998     /** This enumerated value is used when the tensor is used as the <b>newHeight</b> parameter
2999      *  of the ResizeBilinear operator. */
3000     OH_NN_RESIZE_BILINEAR_NEW_HEIGHT = 61,
3001     /** This enumerated value is used when the tensor is used as the <b>newWidth</b> parameter
3002      *  of the ResizeBilinear operator. */
3003     OH_NN_RESIZE_BILINEAR_NEW_WIDTH = 62,
3004     /** This enumerated value is used when the tensor is used as the <b>preserveAspectRatio</b> parameter
3005      *  of the ResizeBilinear operator. */
3006     OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO = 63,
3007     /** This enumerated value is used when the tensor is used as the <b>coordinateTransformMode</b> parameter
3008      *  of the ResizeBilinear operator. */
3009     OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE = 64,
3010     /** This enumerated value is used when the tensor is used as the <b>excludeOutside</b> parameter
3011      *  of the ResizeBilinear operator. */
3012     OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE = 65,
3013 
3014     /** This enumerated value is used when the tensor is used as the <b>beginNormAxis</b> parameter
3015      *  of the LayerNorm operator. */
3016     OH_NN_LAYER_NORM_BEGIN_NORM_AXIS = 66,
3017     /** This enumerated value is used when the tensor is used as the <b>epsilon</b> parameter
3018      *  of the LayerNorm operator. */
3019     OH_NN_LAYER_NORM_EPSILON = 67,
3020     /** This enumerated value is used when the tensor is used as the <b>beginParamsAxis</b> parameter
3021      *  of the LayerNorm operator. */
3022     OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS = 68,
3023     /** This enumerated value is used when the tensor is used as the <b>elementwiseAffine</b> parameter
3024      *  of the LayerNorm operator. */
3025     OH_NN_LAYER_NORM_ELEMENTWISE_AFFINE = 69,
3026 
3027     /** This enumerated value is used when the tensor is used as the <b>keepDims</b> parameter
3028      *  of the ReduceProd operator. */
3029     OH_NN_REDUCE_PROD_KEEP_DIMS = 70,
3030 
3031     /** This enumerated value is used when the tensor is used as the <b>keepDims</b> parameter
3032      *  of the ReduceAll operator. */
3033     OH_NN_REDUCE_ALL_KEEP_DIMS = 71,
3034 
3035     /** This enumerated value is used when the tensor is used as the <b>src_t</b> parameter
3036      *  of the QuantDTypeCast operator. */
3037     OH_NN_QUANT_DTYPE_CAST_SRC_T = 72,
3038     /** This enumerated value is used when the tensor is used as the <b>dst_t</b> parameter
3039      *  of the QuantDTypeCast operator. */
3040     OH_NN_QUANT_DTYPE_CAST_DST_T = 73,
3041 
3042     /** This enumerated value is used when the tensor is used as the <b>Sorted</b> parameter
3043      *  of the Topk operator. */
3044     OH_NN_TOP_K_SORTED = 74,
3045 
3046     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter
3047      *  of the ArgMax operator. */
3048     OH_NN_ARG_MAX_AXIS = 75,
3049     /** This enumerated value is used when the tensor is used as the <b>keepDims</b> parameter
3050      *  of the ArgMax operator. */
3051     OH_NN_ARG_MAX_KEEPDIMS = 76,
3052 
3053     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter
3054      *  of the Unsqueeze operator. */
3055     OH_NN_UNSQUEEZE_AXIS = 77,
3056 
3057     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the Unstack operator.
3058      *  @since 12
3059      */
3060     OH_NN_UNSTACK_AXIS = 78,
3061 
3062     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the Flatten operator.
3063      *  @since 12
3064      */
3065     OH_NN_FLATTEN_AXIS = 79,
3066 
3067     /** This enumerated value is used when the tensor is used as the <b>blockSize</b> parameter
3068      *  of the DepthToSpace operator.
3069      *  @since 12
3070      */
3071     OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE = 80,
3072     /** This enumerated value is used when the tensor is used as the <b>mode</b> parameter
3073      *  of the DepthToSpace operator.
3074      *  @since 12
3075      */
3076     OH_NN_DEPTH_TO_SPACE_MODE = 81,
3077 
3078     /** This enumerated value is used when the tensor is used as the <b>start</b> parameter of the Range operator.
3079      *  @since 12
3080      */
3081     OH_NN_RANGE_START = 82,
3082     /** This enumerated value is used when the tensor is used as the <b>limit</b> parameter of the Range operator.
3083      *  @since 12
3084      */
3085     OH_NN_RANGE_LIMIT = 83,
3086     /** This enumerated value is used when the tensor is used as the <b>delta</b> parameter of the Range operator.
3087      *  @since 12
3088      */
3089     OH_NN_RANGE_DELTA = 84,
3090 
3091     /** This enumerated value is used when the tensor is used as the <b>dataType</b> parameter
3092      *  of the ConstantOfShape operator.
3093      *  @since 12
3094      */
3095     OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE = 85,
3096     /** This enumerated value is used when the tensor is used as the <b>value</b> parameter
3097      *  of the ConstantOfShape operator.
3098      *  @since 12
3099      */
3100     OH_NN_CONSTANT_OF_SHAPE_VALUE = 86,
3101 
3102     /** This enumerated value is used when the tensor is used as the <b>shape</b> parameter
3103      *  of the BroadcastTo operator.
3104      *  @since 12
3105      */
3106     OH_NN_BROADCAST_TO_SHAPE = 87,
3107 
3108     /** This enumerated value is used when the tensor is used as the <b>epsilon</b> parameter
3109      *  of the InstanceNorm operator.
3110      *  @since 12
3111      */
3112     OH_NN_INSTANCE_NORM_EPSILON = 88,
3113 
3114     /** This enumerated value is used when the tensor is used as the <b>base</b> parameter of the Exp operator.
3115      *  @since 12
3116      */
3117     OH_NN_EXP_BASE = 89,
3118     /** This enumerated value is used when the tensor is used as the <b>scale</b> parameter of the Exp operator.
3119      *  @since 12
3120      */
3121     OH_NN_EXP_SCALE = 90,
3122     /** This enumerated value is used when the tensor is used as the <b>shift</b> parameter of the Exp operator.
3123      *  @since 12
3124      */
3125     OH_NN_EXP_SHIFT = 91,
3126 
3127     /** This enumerated value is used when the tensor is used as the <b>negativeSlope</b> parameter
3128      *  of the LeakyRelu operator.
3129      *  @since 12
3130      */
3131     OH_NN_LEAKY_RELU_NEGATIVE_SLOPE = 92,
3132 
3133     /** This enumerated value is used when the tensor is used as the <b>bidirectional</b> parameter
3134      *  of the LSTM operator.
3135      *  @since 12
3136      */
3137     OH_NN_LSTM_BIDIRECTIONAL = 93,
3138     /** This enumerated value is used when the tensor is used as the <b>hasBias</b> parameter of the LSTM operator.
3139      *  @since 12
3140      */
3141     OH_NN_LSTM_HAS_BIAS = 94,
3142     /** This enumerated value is used when the tensor is used as the <b>inputSize</b> parameter
3143      *  of the LSTM operator.
3144      *  @since 12
3145      */
3146     OH_NN_LSTM_INPUT_SIZE = 95,
3147     /** This enumerated value is used when the tensor is used as the <b>hiddenSize</b> parameter
3148      *  of the LSTM operator.
3149      *  @since 12
3150      */
3151     OH_NN_LSTM_HIDDEN_SIZE = 96,
3152     /** This enumerated value is used when the tensor is used as the <b>numLayers</b> parameter
3153      *  of the LSTM operator.
3154      *  @since 12
3155      */
3156     OH_NN_LSTM_NUM_LAYERS = 97,
3157     /** This enumerated value is used when the tensor is used as the <b>numDirections</b> parameter
3158      *  of the LSTM operator.
3159      *  @since 12
3160      */
3161     OH_NN_LSTM_NUM_DIRECTIONS = 98,
3162     /** This enumerated value is used when the tensor is used as the <b>dropout</b> parameter of the LSTM operator.
3163      *  @since 12
3164      */
3165     OH_NN_LSTM_DROPOUT = 99,
3166     /** This enumerated value is used when the tensor is used as the <b>zoneoutCell</b> parameter
3167      *  of the LSTM operator.
3168      *  @since 12
3169      */
3170     OH_NN_LSTM_ZONEOUT_CELL = 100,
3171     /** This enumerated value is used when the tensor is used as the <b>zoneoutHidden</b> parameter
3172      *  of the LSTM operator.
3173      *  @since 12
3174      */
3175     OH_NN_LSTM_ZONEOUT_HIDDEN = 101,
3176     /** This enumerated value is used when the tensor is used as the <b>projSize</b> parameter
3177      *  of the LSTM operator.
3178      *  @since 12
3179      */
3180     OH_NN_LSTM_PROJ_SIZE = 102,
3181 
3182     /** This enumerated value is used when the tensor is used as the <b>max</b> parameter of the Clip operator.
3183      *  @since 12
3184      */
3185     OH_NN_CLIP_MAX = 103,
3186     /** This enumerated value is used when the tensor is used as the <b>min</b> parameter of the Clip operator.
3187      *  @since 12
3188      */
3189     OH_NN_CLIP_MIN = 104,
3190 
3191     /** This enumerated value is used when the tensor is used as the <b>keepDims</b> parameter of the All operator.
3192      *  @since 12
3193      */
3194     OH_NN_ALL_KEEP_DIMS = 105,
3195 
3196     /** This enumerated value is used when the tensor is used as the <b>summarize</b> parameter
3197      *  of the Assert operator.
3198      *  @since 12
3199      */
3200     OH_NN_ASSERT_SUMMARIZE = 106,
3201 
3202     /** This enumerated value is used when the tensor is used as the <b>scale</b> parameter of the pow operator.
3203      *  @since 12
3204      */
3205     OH_NN_POW_SCALE = 107,
3206     /** This enumerated value is used when the tensor is used as the <b>shift</b> parameter of the pow operator.
3207      *  @since 12
3208      */
3209     OH_NN_POW_SHIFT = 108,
3210 
3211     /** This enumerated value is used when the tensor is used as the <b>roundMode</b> parameter
3212      *  of the AvgPool operator.
3213      *  @since 12
3214      */
3215     OH_NN_AVG_POOL_ROUND_MODE = 109,
3216     /** This enumerated value is used when the tensor is used as the <b>global</b> parameter
3217      *  of the AvgPool operator.
3218      *  @since 12
3219      */
3220     OH_NN_AVG_POOL_GLOBAL = 110,
3221 
3222     /** This enumerated value is used when the tensor is used as the <b>hasBias</b> parameter
3223      *  of the FullConnection operator.
3224      *  @since 12
3225      */
3226     OH_NN_FULL_CONNECTION_HAS_BIAS = 111,
3227     /** This enumerated value is used when the tensor is used as the <b>useAxis</b> parameter
3228      *  of the FullConnection operator.
3229      *  @since 12
3230      */
3231     OH_NN_FULL_CONNECTION_USE_AXIS = 112,
3232 
3233     /** This enumerated value is used when the tensor is used as the <b>approximate</b> parameter
3234      *  of the GeLU operator.
3235      *  @since 12
3236      */
3237     OH_NN_GELU_APPROXIMATE = 113,
3238 
3239     /** This enumerated value is used when the tensor is used as the <b>roundMode</b> parameter
3240      *  of the MaxPool operator.
3241      *  @since 12
3242      */
3243     OH_NN_MAX_POOL_ROUND_MODE = 114,
3244     /** This enumerated value is used when the tensor is used as the <b>global</b> parameter
3245      *  of the MaxPool operator.
3246      *  @since 12
3247      */
3248     OH_NN_MAX_POOL_GLOBAL = 115,
3249 
3250     /** This enumerated value is used when the tensor is used as the <b>paddingMode</b> parameter
3251      *  of the Pad operator.
3252      *  @since 12
3253      */
3254     OH_NN_PAD_PADDING_MODE = 116,
3255 
3256     /** This enumerated value is used when the tensor is used as the <b>reduceToEnd</b> parameter
3257      *  of the ReduceMean operator.
3258      *  @since 12
3259      */
3260     OH_NN_REDUCE_MEAN_REDUCE_TO_END = 117,
3261     /** This enumerated value is used when the tensor is used as the <b>coeff</b> parameter
3262      *  of the ReduceMean operator.
3263      *  @since 12
3264      */
3265     OH_NN_REDUCE_MEAN_COEFF = 118,
3266 
3267     /** This enumerated value is used when the tensor is used as the <b>reduceToEnd</b> parameter
3268      *  of the ReduceProd operator.
3269      *  @since 12
3270      */
3271     OH_NN_REDUCE_PROD_REDUCE_TO_END = 119,
3272     /** This enumerated value is used when the tensor is used as the <b>coeff</b> parameter
3273      *  of the ReduceProd operator.
3274      *  @since 12
3275      */
3276     OH_NN_REDUCE_PROD_COEFF = 120,
3277 
3278     /** This enumerated value is used when the tensor is used as the <b>reduceToEnd</b> parameter
3279      *  of the ReduceAll operator.
3280      *  @since 12
3281      */
3282     OH_NN_REDUCE_ALL_REDUCE_TO_END = 121,
3283     /** This enumerated value is used when the tensor is used as the <b>coeff</b> parameter
3284      *  of the ReduceAll operator.
3285      *  @since 12
3286      */
3287     OH_NN_REDUCE_ALL_COEFF = 122,
3288 
3289     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter
3290      *  of the Topk operator.
3291      *  @since 12
3292      */
3293     OH_NN_TOP_K_AXIS = 123,
3294 
3295     /** This enumerated value is used when the tensor is used as the <b>topK</b> parameter
3296      *  of the ArgMax operator.
3297      *  @since 12
3298      */
3299     OH_NN_ARG_MAX_TOP_K = 124,
3300     /** This enumerated value is used when the tensor is used as the <b>outMaxValue</b> parameter
3301      *  of the ArgMax operator.
3302      *  @since 12
3303      */
3304     OH_NN_ARG_MAX_OUT_MAX_VALUE = 125,
3305 
3306     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter
3307      *  of the QuantDTypeCast operator.
3308      *  @since 12
3309      */
3310     OH_NN_QUANT_DTYPE_CAST_AXIS = 126,
3311 
3312     /** This enumerated value is used when the tensor is used as the <b>axes</b> parameter of the Slice operator.
3313      *  @since 12
3314      */
3315     OH_NN_SLICE_AXES = 127,
3316 
3317     /** This enumerated value is used when the tensor is used as the <b>dims</b> parameter of the Tile operator.
3318      *  @since 12
3319      */
3320     OH_NN_TILE_DIMS = 128,
3321 
3322     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the crop operator.
3323      *  @since 12
3324      */
3325     OH_NN_CROP_AXIS = 129,
3326     /** This enumerated value is used when the tensor is used as the <b>offset</b> parameter of the crop operator.
3327      *  @since 12
3328      */
3329     OH_NN_CROP_OFFSET = 130,
3330 
3331     /** This enumerated value is used when the tensor is used as the <b>inputSize</b> parameter
3332      *  of the detectionPostProcess operator.
3333      *  @since 12
3334      */
3335     OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE = 131,
3336     /** This enumerated value is used when the tensor is used as the <b>scale</b> parameter
3337      *  of the detectionPostProcess operator.
3338      *  @since 12
3339      */
3340     OH_NN_DETECTION_POST_PROCESS_SCALE = 132,
3341     /** This enumerated value is used when the tensor is used as the <b>nmsIoUThreshold</b>
3342      *  parameter of the detectionPostProcess operator.
3343      *  @since 12
3344      */
3345     OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD = 133,
3346     /** This enumerated value is used when the tensor is used as the <b>nmsScoreThreshold</b> parameter
3347      *  of the detectionPostProcess operator.
3348      *  @since 12
3349      */
3350     OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD = 134,
3351     /** This enumerated value is used when the tensor is used as the <b>maxDetections</b> parameter
3352      *  of the detectionPostProcess operator.
3353      *  @since 12
3354      */
3355     OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS = 135,
3356     /** This enumerated value is used when the tensor is used as the <b>detectionsPerClass</b> parameter
3357      *  of the detectionPostProcess operator.
3358      *  @since 12
3359      */
3360     OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS = 136,
3361     /** This enumerated value is used when the tensor is used as the <b>maxClassesPerDetection</b> parameter
3362      *  of the detectionPostProcess operator.
3363      *  @since 12
3364      */
3365     OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION = 137,
3366     /** This enumerated value is used when the tensor is used as the <b>numClasses</b> parameter
3367      *  of the detectionPostProcess operator.
3368      *  @since 12
3369      */
3370     OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES = 138,
3371     /** This enumerated value is used when the tensor is used as the <b>useRegularNms</b> parameter
3372      *  of the detectionPostProcess operator.
3373      *  @since 12
3374      */
3375     OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS = 139,
3376     /** This enumerated value is used when the tensor is used as the <b>outQuantized</b> parameter
3377      *  of the detectionPostProcess operator.
3378      *  @since 12
3379      */
3380     OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED = 140,
3381 
3382     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter
3383      *  of the L2Normalize operator.
3384      *  @since 12
3385      */
3386     OH_NN_L2_NORMALIZE_AXIS = 141,
3387     /** This enumerated value is used when the tensor is used as the <b>epsilon</b> parameter
3388      *  of the L2Normalize operator.
3389      *  @since 12
3390      */
3391     OH_NN_L2_NORMALIZE_EPSILON = 142,
3392     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter
3393      *  of the L2Normalize operator.
3394      *  @since 12
3395      */
3396     OH_NN_L2_NORMALIZE_ACTIVATION_TYPE = 143,
3397 
3398     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the softmax operator.
3399      *  @since 12
3400      */
3401     OH_NN_LOG_SOFTMAX_AXIS = 144,
3402 
3403     /** This enumerated value is used when the tensor is used as the <b>depthRedius</b>
3404      *  parameter of the LRN operator.
3405      *  @since 12
3406      */
3407     OH_NN_LRN_DEPTH_RADIUS = 145,
3408     /** This enumerated value is used when the tensor is used as the <b>bias</b> parameter of the LRN operator.
3409      *  @since 12
3410      */
3411     OH_NN_LRN_BIAS = 146,
3412     /** This enumerated value is used when the tensor is used as the <b>alpha</b> parameter of the LRN operator.
3413      *  @since 12
3414      */
3415     OH_NN_LRN_ALPHA = 147,
3416     /** This enumerated value is used when the tensor is used as the <b>beta</b> parameter of the LRN operator.
3417      *  @since 12
3418      */
3419     OH_NN_LRN_BETA = 148,
3420     /** This enumerated value is used when the tensor is used as the <b>normRegion</b> parameter
3421      *  of the LRN operator.
3422      *  @since 12
3423      */
3424     OH_NN_LRN_NORM_REGION = 149,
3425 
3426     /** This enumerated value is used when the tensor is used as the <b>blockSize</b> parameter
3427      *  of the spaceToDepth operator.
3428      *  @since 12
3429      */
3430     OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE = 150,
3431 
3432     /** This enumerated value is used when the tensor is used as the <b>keepDims</b> parameter
3433      *  of the ReduceMax operator.
3434      *  @since 12
3435      */
3436     OH_NN_REDUCE_MAX_KEEP_DIMS = 151,
3437     /** This enumerated value is used when the tensor is used as the <b>reduceToEnd</b> parameter
3438      *  of the ReduceMax operator.
3439      *  @since 12
3440      */
3441     OH_NN_REDUCE_MAX_REDUCE_TO_END = 152,
3442     /** This enumerated value is used when the tensor is used as the <b>coeff</b> parameter
3443      *  of the ReduceMax operator.
3444      *  @since 12
3445      */
3446     OH_NN_REDUCE_MAX_COEFF = 153,
3447 
3448     /** This enumerated value is used when the tensor is used as the <b>keepDims</b> parameter
3449      *  of the ReduceMin operator.
3450      *  @since 12
3451      */
3452     OH_NN_REDUCE_MIN_KEEP_DIMS = 154,
3453     /** This enumerated value is used when the tensor is used as the <b>reduceToEnd</b> parameter
3454      *  of the ReduceMin operator.
3455      *  @since 12
3456      */
3457     OH_NN_REDUCE_MIN_REDUCE_TO_END = 155,
3458     /** This enumerated value is used when the tensor is used as the <b>coeff</b> parameter
3459      *  of the ReduceMin operator.
3460      *  @since 12
3461      */
3462     OH_NN_REDUCE_MIN_COEFF = 156,
3463 
3464     /** This enumerated value is used when the tensor is used as the <b>keepDims</b> parameter
3465      *  of the ReduceSum operator.
3466      *  @since 12
3467      */
3468     OH_NN_REDUCE_SUM_KEEP_DIMS = 157,
3469     /** This enumerated value is used when the tensor is used as the <b>reduceToEnd</b> parameter
3470      *  of the ReduceSum operator.
3471      *  @since 12
3472      */
3473     OH_NN_REDUCE_SUM_REDUCE_TO_END = 158,
3474     /** This enumerated value is used when the tensor is used as the <b>coeff</b> parameter
3475      *  of the ReduceSum operator.
3476      *  @since 12
3477      */
3478     OH_NN_REDUCE_SUM_COEFF = 159,
3479 
3480     /** This enumerated value is used when the tensor is used as the <b>keepDims</b> parameter
3481      *  of the ReduceL2 operator.
3482      *  @since 12
3483      */
3484     OH_NN_REDUCE_L2_KEEP_DIMS = 160,
3485     /** This enumerated value is used when the tensor is used as the <b>reduceToEnd</b> parameter
3486      *  of the ReduceL2 operator.
3487      *  @since 12
3488      */
3489     OH_NN_REDUCE_L2_REDUCE_TO_END = 161,
3490     /** This enumerated value is used when the tensor is used as the <b>coeff</b> parameter
3491      *  of the ReduceL2 operator.
3492      *  @since 12
3493      */
3494     OH_NN_REDUCE_L2_COEFF = 162,
3495 } OH_NN_TensorType;
3496 
3497 /**
3498  * @brief This structure is used to store a 32-bit unsigned integer array.
3499  *
3500  * @since 9
3501  * @version 1.0
3502  */
3503 typedef struct OH_NN_UInt32Array {
3504     /** Pointer to the unsigned integer array */
3505     uint32_t *data;
3506     /** Array length */
3507     uint32_t size;
3508 } OH_NN_UInt32Array;
3509 
3510 /**
3511  * @brief Quantization information.
3512  *
3513  * In quantization scenarios, the 32-bit floating-point data type is quantized into
3514  * the fixed-point data type according to the following formula:
3515  \f[
3516     q = clamp(round(\frac{r}{s}+z), q_{min}, q_{max})
3517  \f]
3518  * s and z are quantization parameters, which are stored by <b>scale</b> and <b>zeroPoint</b>
3519  * in {@link OH_NN_QuantParam}.
3520  * r is a floating point number, q is the quantization result, q_min is the lower bound of the quantization result, and
3521  * q_max is an upper bound of a quantization result. The calculation method is as follows:
3522  *
3523  \f[
3524   \text{clamp}(x,min,max) =
3525   \begin{cases}
3526        q_{min} = -(1 << (numBits - 1)) \\
3527        q_{max} = (1 << (numBits - 1)) \\
3528    \end{cases}
3529  \f]
3530  * The clamp function is defined as follows:
3531  \f[
3532   \text{clamp}(x,min,max) =
3533   \begin{cases}
3534        \text{max} & \text{ if } x > \text{ max } \\
3535        \text{min} & \text{ if } x < \text{ min } \\
3536        x & \text{ otherwise } \\
3537    \end{cases}
3538  \f]
3539  *
3540  * @deprecated since 11
3541  * @useinstead {@link NN_QuantParam}
3542  * @since 9
3543  * @version 1.0
3544  */
3545 typedef struct OH_NN_QuantParam {
3546     /** Specifies the length of the numBits, scale, and zeroPoint arrays. In the per-layer quantization scenario,
3547      *  <b>quantCount</b> is usually set to <b>1</b>.
3548      *       That is, all channels of a tensor share a set of quantization parameters.
3549      *  In the per-channel quantization scenario, <b>quantCount</b> is usually the same as the number of tensor
3550      *  channels, and each channel uses its own quantization parameters.
3551      */
3552     uint32_t quantCount;
3553     /** Number of quantization bits */
3554     const uint32_t *numBits;
3555     /** Pointer to the scale data in the quantization formula */
3556     const double *scale;
3557     /** Pointer to the zero point data in the quantization formula */
3558     const int32_t *zeroPoint;
3559 } OH_NN_QuantParam;
3560 
3561 /**
3562  * @brief Defines the tensor structure.
3563  *
3564  * It is usually used to construct data nodes and operator parameters in a model graph. When constructing a tensor,
3565  * you need to specify the data type, number of dimensions, dimension information, and quantization information.
3566  *
3567  * @deprecated since 11
3568  * @useinstead {@link NN_TensorDesc}
3569  * @since 9
3570  * @version 1.0
3571  */
3572 typedef struct OH_NN_Tensor {
3573     /** Data type of the specified tensor. The value must be an enumerated value of {@link OH_NN_DataType}. */
3574     OH_NN_DataType dataType;
3575     /** Number of dimensions of the specified tensor */
3576     uint32_t dimensionCount;
3577     /** Dimension information (shape) of the specified tensor*/
3578     const int32_t *dimensions;
3579     /** Quantization information of the specified tensor. The data type must be {@link OH_NN_QuantParam}. */
3580     const OH_NN_QuantParam *quantParam;
3581     /** Specifies the tensor type. The value of <b>type</b> is related to the tensor usage.
3582      *  When the tensor is used as the input or output of the model, set <b>type</b> to {@link OH_NN_TENSOR}.
3583      *  When a tensor is used as an operator parameter, select any enumerated value except {@link OH_NN_TENSOR}
3584      *  from {@link OH_NN_TensorType}.
3585      */
3586     OH_NN_TensorType type;
3587 } OH_NN_Tensor;
3588 
3589 /**
3590  * @brief Defines the memory structure.
3591  *
3592  * @deprecated since 11
3593  * @useinstead {@link NN_Tensor}
3594  * @since 9
3595  * @version 1.0
3596  */
3597 typedef struct OH_NN_Memory {
3598     /** Pointer to the shared memory. The shared memory is usually allocated by the underlying hardware driver. */
3599     void * const data;
3600     /** Records the length of the shared memory, in bytes. */
3601     const size_t length;
3602 } OH_NN_Memory;
3603 
3604 #ifdef __cplusplus
3605 }
3606 #endif // __cplusplus
3607 
3608 /** @} */
3609 #endif // NEURAL_NETWORK_RUNTIME_TYPE_H
3610