OSDN Git Service

Merge "Relax tolerance for FP comparison in test harness" into oc-mr1-dev
[android-x86/hardware-interfaces.git] / neuralnetworks / 1.0 / types.hal
index ccc17f1..5e3cf54 100644 (file)
 package android.hardware.neuralnetworks@1.0;
 
 // The types an operand can have.
-// These values are the same as found in the NeuralNetworks.h file.
-// When modifying, be sure to update HAL_NUM_OPERAND_TYPES in HalIntefaces.h.
-enum OperandType : uint32_t {
-    FLOAT16                   = 0,
-    FLOAT32                   = 1,
-    INT8                      = 2,
-    UINT8                     = 3,
-    INT16                     = 4,
-    UINT16                    = 5,
-    INT32                     = 6,
-    UINT32                    = 7,
-    TENSOR_FLOAT16            = 8,
-    TENSOR_FLOAT32            = 9,
-    TENSOR_SYMMETRICAL_QUANT8 = 10,
+// These values are the same as found in the NeuralNetworks.h and NeuralNetworksOEM.h files.
+enum OperandType : int32_t {
+    FLOAT32                   = 0,
+    INT32                     = 1,
+    UINT32                    = 2,
+    TENSOR_FLOAT32            = 3,
+    TENSOR_INT32              = 4,
+    TENSOR_QUANT8_ASYMM       = 5,
+
+    OEM                       = 10000,
+    TENSOR_OEM_BYTE           = 10001,
 };
 
-// The type of operations.  Unlike the operation types found in
-// NeuralNetworks.h file, these specify the data type they operate on.
+// The type of operations.  Unlike the operation types found in the
+// NeuralNetworks.h and NeuralNetworksOEM.h files, these specify the data type they operate on.
 // This is done to simplify the work of drivers.
 // TODO: Currently they are the same.  Add a conversion when finalizing the model.
-// When modifying, be sure to update HAL_NUM_OPERATION_TYPES in HalIntefaces.h.
-enum OperationType : uint32_t {
-    AVERAGE_POOL_FLOAT32                 = 0,
-    CONCATENATION_FLOAT32                = 1,
-    CONV_FLOAT32                         = 2,
-    DEPTHWISE_CONV_FLOAT32               = 3,
-    MAX_POOL_FLOAT32                     = 4,
-    L2_POOL_FLOAT32                      = 5,
-    DEPTH_TO_SPACE_FLOAT32               = 6,
-    SPACE_TO_DEPTH_FLOAT32               = 7,
-    LOCAL_RESPONSE_NORMALIZATION_FLOAT32 = 8,
-    SOFTMAX_FLOAT32                      = 9,
-    RESHAPE_FLOAT32                      = 10,
-    SPLIT_FLOAT32                        = 11,
-    FAKE_QUANT_FLOAT32                   = 12,
-    ADD_FLOAT32                          = 13,
-    FULLY_CONNECTED_FLOAT32              = 14,
-    CAST_FLOAT32                         = 15,
-    MUL_FLOAT32                          = 16,
-    L2_NORMALIZATION_FLOAT32             = 17,
-    LOGISTIC_FLOAT32                     = 18,
-    RELU_FLOAT32                         = 19,
-    RELU6_FLOAT32                        = 20,
-    RELU1_FLOAT32                        = 21,
-    TANH_FLOAT32                         = 22,
-    DEQUANTIZE_FLOAT32                   = 23,
-    FLOOR_FLOAT32                        = 24,
-    GATHER_FLOAT32                       = 25,
-    RESIZE_BILINEAR_FLOAT32              = 26,
-    LSH_PROJECTION_FLOAT32               = 27,
-    LSTM_FLOAT32                         = 28,
-    SVDF_FLOAT32                         = 29,
-    RNN_FLOAT32                          = 30,
-    N_GRAM_FLOAT32                       = 31,
-    LOOKUP_FLOAT32                       = 32,
+enum OperationType : int32_t {
+    ADD                          = 0,
+    AVERAGE_POOL_2D              = 1,
+    CONCATENATION                = 2,
+    CONV_2D                      = 3,
+    DEPTHWISE_CONV_2D            = 4,
+    DEPTH_TO_SPACE               = 5,
+    DEQUANTIZE                   = 6,
+    EMBEDDING_LOOKUP             = 7,
+    FLOOR                        = 8,
+    FULLY_CONNECTED              = 9,
+    HASHTABLE_LOOKUP             = 10,
+    L2_NORMALIZATION             = 11,
+    L2_POOL_2D                   = 12,
+    LOCAL_RESPONSE_NORMALIZATION = 13,
+    LOGISTIC                     = 14,
+    LSH_PROJECTION               = 15,
+    LSTM                         = 16,
+    MAX_POOL_2D                  = 17,
+    MUL                          = 18,
+    RELU                         = 19,
+    RELU1                        = 20,
+    RELU6                        = 21,
+    RESHAPE                      = 22,
+    RESIZE_BILINEAR              = 23,
+    RNN                          = 24,
+    SOFTMAX                      = 25,
+    SPACE_TO_DEPTH               = 26,
+    SVDF                         = 27,
+    TANH                         = 28,
+
+    OEM_OPERATION                = 10000,
 };
 
-// Two special values that can be used instead of a regular poolIndex.
-enum LocationValues : uint32_t {
-    // The location will be specified at runtime. It's either a temporary
-    // variable, an input, or an output.
-    LOCATION_AT_RUN_TIME = 0xFFFFFFFF,
-    // The operand's value is stored in the
-    // TODO: Only for old
-    LOCATION_SAME_BLOCK = 0xFFFFFFFE
+// Fused activation functions
+enum FusedActivationFunc : int32_t {
+    NONE  = 0,
+    RELU  = 1,
+    RELU1 = 2,
+    RELU6 = 3,
+};
+
+// How an operand is used.
+enum OperandLifeTime : int32_t {
+    // The operand is internal to the model.  It's created by an operation
+    // and consumed by other operations.
+    TEMPORARY_VARIABLE,
+    // The operand is an input of the model. An operand can't be both
+    // input and output of a model.
+    MODEL_INPUT,
+    // The operand is an output of the model.
+    MODEL_OUTPUT,
+    // The operand is a constant found in Model.operandValues.
+    CONSTANT_COPY,
+    // The operand is a constant that was specified via a Memory object.
+    CONSTANT_REFERENCE
 };
 
 // Status of a device.
-enum DeviceStatus : uint32_t {
+enum DeviceStatus : int32_t {
     AVAILABLE,
     BUSY,
     OFFLINE,
@@ -102,14 +110,18 @@ struct PerformanceInfo {
     float powerUsage;  // in picoJoules
 };
 
+struct OperationTuple {
+    // The type of operation.
+    OperationType operationType;
+    // The input data type of operation.
+    OperandType operandType;
+};
+
 // The capabilities of a driver.
 struct Capabilities {
-    vec<OperationType> supportedOperationTypes;
-    // TODO Do the same for baseline model IDs
+    vec<OperationTuple> supportedOperationTuples;
     bool cachesCompilation;
     // TODO revisit the data types and scales.
-    float bootupTime;  // in nanoseconds
-    PerformanceInfo float16Performance;
     PerformanceInfo float32Performance;
     PerformanceInfo quantized8Performance;
 };
@@ -136,14 +148,27 @@ struct Operand {
     float scale;
     int32_t zeroPoint;
 
+    // How the operand is used.
+    OperandLifeTime lifetime;
+
     // Where to find the data for this operand.
+    // If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, or MODEL_OUTPUT:
+    // - All the fields will be 0.
+    // If the lifetime is CONSTANT_COPY:
+    // - location.poolIndex is 0.
+    // - location.offset is the offset in bytes into Model.operandValues.
+    // - location.length is set.
+    // If the lifetime is CONSTANT_REFERENCE:
+    // - location.poolIndex is set.
+    // - location.offset is the offset in bytes into the specified pool.
+    // - location.length is set.
     DataLocation location;
 };
 
 // Describes one operation of the graph.
 struct Operation {
-    // The type of operation.
-    OperationType type;
+    // The tuple describing the operation type and input type.
+    OperationTuple opTuple;
     // Describes the table that contains the indexes of the inputs of the
     // operation. The offset is the index in the operandIndexes table.
     vec<uint32_t> inputs;
@@ -152,12 +177,6 @@ struct Operation {
     vec<uint32_t> outputs;
 };
 
-struct InputOutputInfo {
-    DataLocation location;
-    // If dimensions.size() > 0, we have updated dimensions.
-    vec<uint32_t> dimensions;
-};
-
 struct Model {
     vec<Operand> operands;
     vec<Operation> operations;
@@ -167,8 +186,26 @@ struct Model {
     vec<memory> pools;
 };
 
+struct RequestArgument {
+    // The location within one of the memory pools
+    DataLocation location;
+    // If dimensions.size() > 0, dimension information was provided along with the
+    // argument.  This can be the case for models that accept inputs of varying size.
+    // This can't change the rank, just the value of the dimensions that were
+    // unspecified in the model.
+    vec<uint32_t> dimensions;
+};
+
 struct Request {
-    vec<InputOutputInfo> inputs;
-    vec<InputOutputInfo> outputs;
+    vec<RequestArgument> inputs;
+    vec<RequestArgument> outputs;
     vec<memory> pools;
 };
+
+enum ErrorStatus : int32_t {
+    NONE,
+    DEVICE_UNAVAILABLE,
+    GENERAL_FAILURE,
+    OUTPUT_INSUFFICIENT_SIZE,
+    INVALID_ARGUMENT,
+};