1%% template file for generating NeuralNetworksTypes.h. 2%% see README.md. 3/* 4 * Copyright (C) 2017 The Android Open Source Project 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19/** 20 * @addtogroup NeuralNetworks 21 * @{ 22 */ 23 24/** 25 * @file NeuralNetworksTypes.h 26 */ 27 28#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H 29#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H 30 31/****************************************************************** 32 * 33 * IMPORTANT NOTICE: 34 * 35 * This file is part of Android's set of stable system headers 36 * exposed by the Android NDK (Native Development Kit). 37 * 38 * Third-party source AND binary code relies on the definitions 39 * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. 40 * 41 * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) 42 * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS 43 * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY 44 * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES 45 */ 46 47#include <android/hardware_buffer.h> 48#include <stdbool.h> 49#include <stddef.h> 50#include <stdint.h> 51#include <sys/cdefs.h> 52 53__BEGIN_DECLS 54 55%insert Operand_1.0_Comment 56typedef enum { 57%insert Operand_1.0 58%insert Operand_1.2 59%insert Operand_1.3 60} OperandCode; 61 62%insert Operation_1.0_Comment 63typedef enum { 64 // Operations below are available since NNAPI feature level 1. 65 66%insert Operation_1.0 67 68 // Operations below are available since NNAPI feature level 2. 69 70%insert Operation_1.1 71 72 // Operations below are available since NNAPI feature level 3. 73 74%insert Operation_1.2 75 76 // Operations below are available since NNAPI feature level 4. 77 78%insert Operation_1.3 79} OperationCode; 80 81%insert FusedActivationFunc 82 83/** 84 * Implicit padding algorithms. 85 * 86 * 87 * Available since NNAPI feature level 1. 88 */ 89typedef enum { 90 /** 91 * SAME padding. 92 * Padding on both ends are the "same": 93 * padding_to_beginning = total_padding / 2 94 * padding_to_end = (total_padding + 1)/2. 95 * i.e., for even number of padding, padding to both ends are exactly 96 * the same; for odd number of padding, padding to the ending is bigger 97 * than the padding to the beginning by 1. 98 * 99 * total_padding is a function of input, stride, dilation and filter size. 100 * It could be computed as follows: 101 * out_size = (input + stride - 1) / stride 102 * effective_filter_size = (filter_size - 1) * dilation + 1 103 * needed_input = (out_size - 1) * stride + effective_filter_size 104 * total_padding = max(0, needed_input - input_size) 105 * The computation is the same for the horizontal and vertical directions. 106 */ 107 ANEURALNETWORKS_PADDING_SAME = 1, 108 109 /** 110 * VALID padding. 111 * No padding. When the input size is not evenly divisible by 112 * the filter size, the input at the end that could not fill 113 * the whole filter tile will simply be ignored. 114 */ 115 ANEURALNETWORKS_PADDING_VALID = 2, 116} PaddingCode; 117 118%insert ExecutionPreference 119 120%insert DeviceType 121 122/** 123 * NNAPI feature levels. 124 * 125 * Each update of the NNAPI specification yields a new NNAPI feature level enum value. 126 * NNAPI feature level corrseponds to an NNAPI specification version that a driver 127 * and/or the NNAPI runtime can implement. 128 * 129 * A feature level up to and including "FEATURE_LEVEL_5" maps directly to 130 * the Android API level that introduced the corresponding update of the NNAPI 131 * specification. Feature levels after Android API level 31 have no association with 132 * API level because the NNAPI specification can be updated between Android API 133 * releases. Outputs of {@link ANeuralNetworksDevice_getFeatureLevel} and 134 * {@link ANeuralNetworks_getRuntimeFeatureLevel} must be compared against 135 * these enum values instead of the Android API level. 136 */ 137typedef enum { 138 /** NNAPI specification available in Android O-MR1, Android NNAPI feature level 1 */ 139 ANEURALNETWORKS_FEATURE_LEVEL_1 = 27, 140 /** NNAPI specification available in Android P, Android NNAPI feature level 2 */ 141 ANEURALNETWORKS_FEATURE_LEVEL_2 = 28, 142 /** NNAPI specification available in Android Q, Android NNAPI feature level 3 */ 143 ANEURALNETWORKS_FEATURE_LEVEL_3 = 29, 144 /** NNAPI specification available in Android R, Android NNAPI feature level 4 */ 145 ANEURALNETWORKS_FEATURE_LEVEL_4 = 30, 146 /** 147 * NNAPI specification available in Android S, Android NNAPI feature level 5. 148 * After Android S, the NNAPI specification can be updated between Android 149 * API releases. 150 */ 151 ANEURALNETWORKS_FEATURE_LEVEL_5 = 31, 152} FeatureLevelCode; 153 154/** 155 * Result codes. 156 * 157 * <p>Any NNAPI function can return any result code, including result codes not 158 * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR} 159 * indicates a failure of some kind.</p> 160 * 161 * <p>Additional information about the nature of a failure can be obtained from 162 * the device log after enabling NNAPI debugging by setting the debug.nn.vlog 163 * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p> 164 * 165 * Available since NNAPI feature level 1. 166 */ 167typedef enum { 168 /** 169 * Operation was successful. 170 */ 171 ANEURALNETWORKS_NO_ERROR = 0, 172 173 /** 174 * Failure caused by not enough available memory. 175 */ 176 ANEURALNETWORKS_OUT_OF_MEMORY = 1, 177 178 ANEURALNETWORKS_INCOMPLETE = 2, 179 180 /** 181 * Failure caused by unexpected null argument. 182 */ 183 ANEURALNETWORKS_UNEXPECTED_NULL = 3, 184 185 /** 186 * Failure caused by invalid function arguments, invalid model definition, 187 * invalid execution definition or invalid data at execution time. 188 */ 189 ANEURALNETWORKS_BAD_DATA = 4, 190 191 /** 192 * Failure caused by failed model execution. 193 */ 194 ANEURALNETWORKS_OP_FAILED = 5, 195 196 /** 197 * Failure caused by object being in the wrong state. 198 */ 199 ANEURALNETWORKS_BAD_STATE = 6, 200 201 /** 202 * Failure caused by not being able to map a file into memory. 203 * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer 204 * not supported by the device. 205 * Mitigate by reading its content into memory. 206 */ 207 ANEURALNETWORKS_UNMAPPABLE = 7, 208 209 /** 210 * Failure caused by insufficient buffer size provided to a model output. 211 */ 212 ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8, 213 214 /** 215 * Failure caused by a device not being available. 216 */ 217 ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9, 218 219 /** 220 * Failure because a deadline could not be met for a task, but future 221 * deadlines may still be met for the same task after a short delay. 222 * 223 * Available since NNAPI feature level 4. 224 */ 225 ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10, 226 227 /** 228 * Failure because a deadline could not be met for a task, and future 229 * deadlines will likely also not be met for the same task even after a 230 * short delay. 231 * 232 * Available since NNAPI feature level 4. 233 */ 234 ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11, 235 236 /** 237 * Failure because of a resource limitation within the driver, but future 238 * calls for the same task may still succeed after a short delay. 239 * 240 * Available since NNAPI feature level 4. 241 */ 242 ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12, 243 244 /** 245 * Failure because of a resource limitation within the driver, and future 246 * calls for the same task will likely also fail even after a short 247 * delay. 248 * 249 * Available since NNAPI feature level 4. 250 */ 251 ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13, 252 253 /** 254 * Failure indicating an object is in a dead state. 255 * 256 * Available since NNAPI feature level 4. 257 */ 258 ANEURALNETWORKS_DEAD_OBJECT = 14, 259} ResultCode; 260 261/** 262 * For {@link ANeuralNetworksModel_setOperandValue}, values with a 263 * length smaller or equal to this will be immediately copied into 264 * the model. The size is in bytes. 265 * 266 * Available since NNAPI feature level 1. 267 */ 268enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 }; 269 270/** 271 * For {@link ANeuralNetworksCompilation_setCaching}, specify the size 272 * of the cache token required from the application. The size is in bytes. 273 * 274 * Available since NNAPI feature level 3. 275 */ 276enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 }; 277 278/** 279 * Different duration measurements. 280 * 281 * Durations are measured in nanoseconds. 282 * 283 * Available since NNAPI feature level 3. 284 */ 285typedef enum { 286 // Execution time on hardware (not driver, which runs on host processor). 287 ANEURALNETWORKS_DURATION_ON_HARDWARE = 0, 288 // Execution time in driver (including time on hardware). Excludes overhead 289 // such as that of the runtime itself and the IPC needed for the runtime to 290 // communicate with the driver. 291 ANEURALNETWORKS_DURATION_IN_DRIVER = 1, 292 // Execution time on hardware, after all dependencies have been signaled. 293 // If no dependencies specified (for example, if the execution was scheduled other 294 // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the 295 // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE. 296 // Available since NNAPI feature level 4. 297 ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2, 298 // Execution time in driver, after all dependencies have been signaled. Excludes 299 // overhead such as that of the runtime itself and the IPC needed for the runtime 300 // to communicate with the driver. 301 // If no dependencies specified (for example, if the execution was scheduled other 302 // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the 303 // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER. 304 // Available since NNAPI feature level 4. 305 ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3, 306} DurationCode; 307 308%insert Priority 309 310/** 311 * ANeuralNetworksMemory is an opaque type that represents memory. 312 * 313 * This type is used to represent shared memory, memory mapped files, 314 * and similar memories. 315 * 316 * By using shared memory, a program can efficiently communicate to the 317 * runtime and drivers the tensors that define a model. See 318 * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application 319 * should typically create one shared memory object that contains every constant tensor 320 * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to 321 * create shared memory from a file handle. 322 * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to 323 * create shared memory from an AHardwareBuffer handle. 324 * 325 * Memory objects can also be used to specify the input and output arguments of 326 * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory} 327 * and {@link ANeuralNetworksExecution_setOutputFromMemory}. 328 * 329 * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory}, 330 * {@link ANeuralNetworksExecution_setInputFromMemory} and 331 * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared 332 * memory object must be aligned on a boundary of a byte size that is a multiple 333 * of the element type byte size, e.g., a tensor with 334 * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary. 335 * 336 * It is the application's responsibility to ensure that there are no uses of 337 * the memory after calling {@link ANeuralNetworksMemory_free}. This includes 338 * any model which references this memory because of a call to 339 * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation 340 * created using such a model, any execution object or burst object created 341 * using such a compilation, or any execution which references this memory 342 * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or 343 * {@link ANeuralNetworksExecution_setOutputFromMemory}. 344 * 345 * Available since NNAPI feature level 1. 346 * 347 * Starting at NNAPI feature level 4, the application may request creation of device native memory 348 * from {@link ANeuralNetworksMemoryDesc} to avoid potential memory copying and transformation 349 * overhead between executions. See also {@link ANeuralNetworksMemoryDesc} and 350 * {@link ANeuralNetworksMemory_createFromDesc}. 351 */ 352typedef struct ANeuralNetworksMemory ANeuralNetworksMemory; 353 354/** 355 * ANeuralNetworksModel is an opaque type that contains a description of the 356 * mathematical operations that constitute the model. 357 * 358 * <p>Build the model by calling<ul> 359 * <li>{@link ANeuralNetworksModel_create}</li> 360 * <li>{@link ANeuralNetworksModel_addOperation}</li> 361 * <li>{@link ANeuralNetworksModel_addOperand}</li> 362 * </ul> 363 * 364 * This forms a graph in which each operation and operand is a node, a 365 * directed edge from an operand to an operation indicates that the 366 * operand is an input to the operation, and a directed edge from an 367 * operation to an operand indicates that the operand is an output 368 * from the operation. This graph must be acyclic. 369 * 370 * A model is completed by calling {@link ANeuralNetworksModel_finish}. 371 * A model is destroyed by calling {@link ANeuralNetworksModel_free}. 372 * 373 * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish} 374 * has been called on it.</p> 375 * 376 * <p>It is the application's responsibility to make sure that only one thread 377 * modifies a model at a given time. It is however safe for more than one 378 * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p> 379 * 380 * <p>It is also the application's responsibility to ensure that there are no 381 * other uses of the model after calling {@link ANeuralNetworksModel_free}. 382 * This includes any compilation, execution object or burst object created using 383 * the model.</p> 384 * 385 * Available since NNAPI feature level 1. 386 */ 387typedef struct ANeuralNetworksModel ANeuralNetworksModel; 388 389/** 390 * ANeuralNetworksCompilation is an opaque type that can be used to compile 391 * a machine learning model. 392 * 393 * <p>To use:<ul> 394 * <li>Create a new compilation instance by calling the 395 * {@link ANeuralNetworksCompilation_create} function or 396 * {@link ANeuralNetworksCompilation_createForDevices}.</li> 397 * <li>Set any desired properties on the compilation (for example, 398 * {@link ANeuralNetworksCompilation_setPreference}).</li> 399 * <li>Optionally, set the caching signature and the cache directory on the 400 * compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li> 401 * <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li> 402 * <li>Use the compilation as many times as needed 403 * with {@link ANeuralNetworksExecution_create} and 404 * {@link ANeuralNetworksBurst_create}.</li> 405 * <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free} 406 * once all executions using the compilation have completed.</li></ul></p> 407 * 408 * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}. 409 * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}. 410 * 411 * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish} 412 * has been called on it.</p> 413 * 414 * <p>It is the application's responsibility to make sure that only 415 * one thread modifies a compilation at a given time. It is however 416 * safe for more than one thread to use the compilation once 417 * {@link ANeuralNetworksCompilation_finish} has returned.</p> 418 * 419 * <p>It is also the application's responsibility to ensure that there are no other 420 * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}. 421 * This includes any execution object or burst object created using the compilation, 422 * or any memory descriptor with the compilation as part of one of the roles specified by 423 * {@link ANeuralNetworksMemoryDesc_addInputRole} or 424 * {@link ANeuralNetworksMemoryDesc_addOutputRole}.</p> 425 * 426 * Available since NNAPI feature level 1. 427 */ 428typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; 429 430/** 431 * ANeuralNetworksExecution is an opaque type that can be used to apply a machine 432 * learning model to a set of inputs. 433 * 434 * <p>To use:<ul> 435 * <li>Create a new execution instance by calling the 436 * {@link ANeuralNetworksExecution_create} function.</li> 437 * <li>Associate input buffers or memory regions to the model inputs with 438 * {@link ANeuralNetworksExecution_setInput} or 439 * {@link ANeuralNetworksExecution_setInputFromMemory}.</li> 440 * <li>Associate output buffers or memory regions to the model outputs with 441 * {@link ANeuralNetworksExecution_setOutput} or 442 * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li> 443 * <li>Optionally, configure the execution with 444 * {@link ANeuralNetworksExecution_setLoopTimeout}, 445 * {@link ANeuralNetworksExecution_setMeasureTiming}, 446 * {@link ANeuralNetworksExecution_setReusable}, or 447 * {@link ANeuralNetworksExecution_setTimeout}. 448 * <li>Apply the model with one of the following:</li><ul> 449 * <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute} 450 * or with {@link ANeuralNetworksExecution_startComputeWithDependencies}, 451 * waiting for the execution to complete with 452 * {@link ANeuralNetworksEvent_wait}.</li> 453 * <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li> 454 * <li>Synchronously as part of an execution burst with 455 * {@link ANeuralNetworksExecution_burstCompute}.</li></ul> 456 * If the execution has been marked as reusable, then you can 457 * apply the model more than once. 458 * <li>Destroy the execution with 459 * {@link ANeuralNetworksExecution_free}.</li></ul></p> 460 * 461 * <p>An output buffer or memory region must not overlap with any 462 * other output buffer or memory region, with an input buffer or 463 * memory region, or with an operand value in a memory object 464 * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p> 465 * 466 * <p>An execution is in the preparation state after it is created by 467 * {@link ANeuralNetworksExecution_create}. An execution may only be modified in the preparation 468 * state. Scheduling a computation by calling {@link ANeuralNetworksExecution_burstCompute}, 469 * {@link ANeuralNetworksExecution_compute}, {@link ANeuralNetworksExecution_startCompute}, 470 * or {@link ANeuralNetworksExecution_startComputeWithDependencies} will change the state of 471 * the execution object to the computation state. When the computation completes, the state of 472 * the execution object will change from the computation state to the completed state. 473 * The computation is completed when {@link ANeuralNetworksExecution_compute}, 474 * {@link ANeuralNetworksExecution_burstCompute}, or {@link ANeuralNetworksEvent_wait} 475 * has returned.</p> 476 * 477 * <p>An execution can be applied to a model with 478 * {@link ANeuralNetworksExecution_burstCompute}, 479 * {@link ANeuralNetworksExecution_compute}, 480 * {@link ANeuralNetworksExecution_startCompute} or 481 * {@link ANeuralNetworksExecution_startComputeWithDependencies} only once. Create new 482 * executions to do new evaluations of the model.</p> 483 * 484 * <p>Starting at NNAPI feature level 5, the application may call 485 * {@link ANeuralNetworksExecution_setReusable} to set an execution to be reusable for multiple 486 * computations. The application may schedule and evaluate a computation again from the completed 487 * state of a reusable execution. The execution cannot be modified between computations.</p> 488 * 489 * <p>It is the application's responsibility to make sure that only one thread 490 * modifies an execution at a given time. It is however safe for more than one 491 * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p> 492 * 493 * <p>It is also the application's responsibility to ensure that the execution 494 * either has never been scheduled or has completed (i.e., that 495 * {@link ANeuralNetworksExecution_burstCompute}, 496 * {@link ANeuralNetworksExecution_compute}, or 497 * {@link ANeuralNetworksEvent_wait} has returned) before calling 498 * {@link ANeuralNetworksExecution_free}.</p>. 499 * 500 * <p>It is also the application's responsibility to ensure that there are no other 501 * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p> 502 * 503 * <p>It is the application's responsibility to ensure that there are no concurrent computations 504 * scheduled and evaluated on the same execution, either by means of 505 * {@link ANeuralNetworksExecution_compute} or 506 * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous) 507 * in different threads, or by means of 508 * {@link ANeuralNetworksExecution_startCompute} or 509 * {@link ANeuralNetworksExecution_startComputeWithDependencies} (which are asynchronous). 510 * It is however safe to schedule and evaluate multiple computations on different executions 511 * concurrently. (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on 512 * different burst objects.) The runtime makes no guarantee on the ordering of 513 * completion of executions. If it's important to the application, the 514 * application should enforce the ordering by ensuring that one execution 515 * completes before the next is scheduled (for example, by scheduling all 516 * executions synchronously within a single thread, or by scheduling all 517 * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between 518 * calls to {@link ANeuralNetworksExecution_startCompute}); or by using 519 * {@link ANeuralNetworksExecution_startComputeWithDependencies} to make the execution wait for a 520 * list of events to be signaled before starting the actual evaluation.</p> 521 * 522 * Available since NNAPI feature level 1. 523 */ 524typedef struct ANeuralNetworksExecution ANeuralNetworksExecution; 525 526%insert SymmPerChannelQuantParams_Comment 527typedef struct ANeuralNetworksSymmPerChannelQuantParams { 528 /** The index of the channel dimension. */ 529 uint32_t channelDim; 530 /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */ 531 uint32_t scaleCount; 532 /** The array of scaling values for each channel. Each value must be greater than zero. */ 533 const float* scales; 534} ANeuralNetworksSymmPerChannelQuantParams; 535 536/** 537 * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency 538 * of a rapid sequence of executions. It will likely cause overhead if only used 539 * for a single execution. 540 * 541 * ANeuralNetworksBurst serves as a context object for any number of inferences 542 * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst 543 * object and the {@link ANeuralNetworksExecution} objects used with it must all 544 * have been created from the same {@link ANeuralNetworksCompilation} object. 545 * 546 * This object is also used as a hint to drivers, providing insight to the 547 * lifetime of a rapid sequence of executions. For example, a driver may choose 548 * to increase the clock frequency of its accelerator for the lifetime of a 549 * burst object. 550 * 551 * <p>To use:<ul> 552 * <li>Create a new burst object by calling the 553 * {@link ANeuralNetworksBurst_create} function.</li> 554 * <li>For each execution:</li><ul> 555 * <li>Create {@link ANeuralNetworksExecution} and configure its 556 * properties (see {@link ANeuralNetworksExecution} for details).</li> 557 * <li>Apply the model synchronously with 558 * {@link ANeuralNetworksExecution_burstCompute}, reusing the same 559 * {@link ANeuralNetworksBurst} with the new 560 * {@link ANeuralNetworksExecution}.</li> 561 * <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul> 562 * <li>Destroy the burst with 563 * {@link ANeuralNetworksBurst_free}.</li></ul></p> 564 * 565 * Available since NNAPI feature level 3. 566 */ 567typedef struct ANeuralNetworksBurst ANeuralNetworksBurst; 568 569/** 570 * ANeuralNetworksOperandType describes the type of an operand. 571 * 572 * This structure is used to describe both scalars and tensors. 573 * 574 * A tensor operand type with all dimensions specified is "fully 575 * specified". Whenever possible (i.e., whenever the dimensions are 576 * known at model construction time), a tensor operand type should be 577 * (but is not required to be) fully specified, in order to enable the 578 * best possible performance. 579 * 580 * If a tensor operand's type is not fully specified, the dimensions 581 * of the operand are deduced from the operand types and values of the 582 * operation for which that operand is an output or from the corresponding 583 * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input 584 * operand type in the case of referenced model input operands. 585 * 586 * <p>In the following situations, a tensor operand type must be fully 587 * specified:<ul> 588 * <li>The operand has a constant value, set by 589 * {@link ANeuralNetworksModel_setOperandValue} (with a 590 * non-nullptr buffer) or 591 * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> 592 * <li>The operand is a model input (see 593 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main 594 * model within a compilation. A fully specified tensor operand type 595 * must either be provided to {@link ANeuralNetworksModel_addOperand}; 596 * or it must be provided to the corresponding 597 * {@link ANeuralNetworksExecution_setInput}, or 598 * {@link ANeuralNetworksExecution_setInputFromMemory}. 599 * EXCEPTION: If the input is optional and omitted 600 * (by passing nullptr for buffer to 601 * {@link ANeuralNetworksExecution_setInput}) then it need 602 * not have a fully specified tensor operand type.</li> 603 * <li>The operand is a model output (see 604 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main 605 * model within a compilation and is to be used with {@link 606 * ANeuralNetworksExecution_startComputeWithDependencies}. 607 * A fully specified tensor operand type must either be provided 608 * to {@link ANeuralNetworksModel_addOperand}; or it must be 609 * provided to the corresponding 610 * {@link ANeuralNetworksExecution_setOutput}, or 611 * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul> 612 * 613 * A tensor operand type of specified rank but some number of 614 * unspecified dimensions is represented by setting dimensionCount to 615 * the rank and each unspecified dimension to 0. 616 * 617 * Available since NNAPI feature level 1. 618 * 619 * Starting at NNAPI feature level 3, a tensor operand type of unspecified rank is 620 * represented by setting dimensionCount to 0 and dimensions to NULL (just as if 621 * it were a scalar operand type). 622 */ 623typedef struct ANeuralNetworksOperandType { 624 /** 625 * The data type, e.g ANEURALNETWORKS_FLOAT32. 626 */ 627 int32_t type; 628 629 /** 630 * The number of dimensions (rank). 631 * 632 * Must be 0 for scalars. 633 */ 634 uint32_t dimensionCount; 635 636 /** 637 * The dimensions of the tensor. 638 * 639 * Must be nullptr for scalars. 640 */ 641 const uint32_t* dimensions; 642 643 /** 644 * The quantization scale. 645 * 646 * Must be 0 when not applicable to an operand type. 647 * 648 * See {@link OperandCode}. 649 */ 650 float scale; 651 652 /** 653 * The quantization zero point. 654 * 655 * Must be 0 when not applicable to an operand type. 656 * 657 * See {@link OperandCode}. 658 */ 659 int32_t zeroPoint; 660} ANeuralNetworksOperandType; 661 662/** 663 * Aliasing to {@link OperationCode}, used in function 664 * {@link ANeuralNetworksModel_addOperation}. 665 */ 666typedef int32_t ANeuralNetworksOperationType; 667 668/** 669 * ANeuralNetworksEvent is an opaque type that represents an event 670 * that will be signaled once an execution completes. 671 * 672 * Available since NNAPI feature level 1. 673 */ 674typedef struct ANeuralNetworksEvent ANeuralNetworksEvent; 675 676/** 677 * ANeuralNetworksDevice is an opaque type that represents a device. 678 * 679 * This type is used to query basic properties and supported operations of the corresponding 680 * device, and control which device(s) a model is to be run on. 681 * 682 * Available since NNAPI feature level 3. 683 */ 684typedef struct ANeuralNetworksDevice ANeuralNetworksDevice; 685 686/** 687 * ANeuralNetworksMemoryDesc is an opaque type that represents a memory descriptor. 688 * 689 * A memory descriptor describes the properties of a memory object, and is used by 690 * {@link ANeuralNetworksMemory_createFromDesc}. 691 * 692 * To use: 693 * - Create a new memory descriptor by calling {@link ANeuralNetworksMemoryDesc_create}. 694 * - Specify all of the intended input and output roles by calling 695 * {@link ANeuralNetworksMemoryDesc_addInputRole} and 696 * {@link ANeuralNetworksMemoryDesc_addOutputRole}. 697 * - Optionally, specify the memory dimensions by calling 698 * {@link ANeuralNetworksMemoryDesc_setDimensions}. 699 * - Complete the memory descriptor with {@link ANeuralNetworksMemoryDesc_finish}. 700 * - Use the memory descriptor as many times as needed with 701 * {@link ANeuralNetworksMemory_createFromDesc}. 702 * - Destroy the memory descriptor with {@link ANeuralNetworksMemoryDesc_free}. 703 * 704 * A memory descriptor is completed by calling {@link ANeuralNetworksMemoryDesc_finish}. 705 * A memory descriptor is destroyed by calling {@link ANeuralNetworksMemoryDesc_free}. 706 * 707 * A memory descriptor must not be modified once {@link ANeuralNetworksMemoryDesc_finish} 708 * has been called on it. 709 * 710 * It is the application's responsibility to make sure that only 711 * one thread modifies a memory descriptor at a given time. It is however 712 * safe for more than one thread to use the memory descriptor once 713 * {@link ANeuralNetworksMemoryDesc_finish} has returned. 714 * 715 * It is also the application's responsibility to ensure that there are no other 716 * uses of the memory descriptor after calling {@link ANeuralNetworksMemoryDesc_free}. 717 * It is however safe to continue using a {@link ANeuralNetworksMemory} object created 718 * from the memory descriptor. 719 * 720 * Available since NNAPI feature level 4. 721 */ 722typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc; 723 724__END_DECLS 725 726#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H 727 728/** @} */ 729