OSDN Git Service

Add negative tests to the Neuralnetworks HAL
[android-x86/hardware-interfaces.git] / neuralnetworks / 1.0 / vts / functional / VtsHalNeuralnetworksV1_0TargetTest.cpp
index 453e3e5..59d66ba 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "VtsHalNeuralnetworksV1_0TargetTest.h"
 #include "Event.h"
+#include "Models.h"
 #include "TestHarness.h"
 
 #include <android-base/logging.h>
@@ -65,6 +66,32 @@ void NeuralnetworksHidlTest::SetUp() {
 
 void NeuralnetworksHidlTest::TearDown() {}
 
+sp<IPreparedModel> NeuralnetworksHidlTest::doPrepareModelShortcut(const Model& model) {
+    sp<IPreparedModel> preparedModel;
+    ErrorStatus prepareStatus;
+    sp<Event> preparationEvent = new Event();
+    if (preparationEvent.get() == nullptr) {
+        return nullptr;
+    }
+
+    Return<void> prepareRet = device->prepareModel(
+        model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
+            prepareStatus = status;
+            preparedModel = prepared;
+        });
+
+    if (!prepareRet.isOk() || prepareStatus != ErrorStatus::NONE ||
+        preparedModel.get() == nullptr) {
+        return nullptr;
+    }
+    Event::Status eventStatus = preparationEvent->wait();
+    if (eventStatus != Event::Status::SUCCESS) {
+        return nullptr;
+    }
+
+    return preparedModel;
+}
+
 // create device test
 TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
 
@@ -91,185 +118,137 @@ TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
     EXPECT_TRUE(ret.isOk());
 }
 
-namespace {
-// create the model
-Model createTestModel() {
-    const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
-    const uint32_t size = operand2Data.size() * sizeof(float);
-
-    const uint32_t operand1 = 0;
-    const uint32_t operand2 = 1;
-    const uint32_t operand3 = 2;
-    const uint32_t operand4 = 3;
-
-    const std::vector<Operand> operands = {
-        {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {1, 2, 2, 1},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_INPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {1, 2, 2, 1},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = 0, .length = size},
-        },
-        {
-            .type = OperandType::INT32,
-            .dimensions = {},
-            .numberOfConsumers = 1,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::CONSTANT_COPY,
-            .location = {.poolIndex = 0, .offset = size, .length = sizeof(int32_t)},
-        },
-        {
-            .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {1, 2, 2, 1},
-            .numberOfConsumers = 0,
-            .scale = 0.0f,
-            .zeroPoint = 0,
-            .lifetime = OperandLifeTime::MODEL_OUTPUT,
-            .location = {.poolIndex = 0, .offset = 0, .length = 0},
-        },
-    };
-
-    const std::vector<Operation> operations = {{
-        .opTuple = {OperationType::ADD, OperandType::TENSOR_FLOAT32},
-        .inputs = {operand1, operand2, operand3},
-        .outputs = {operand4},
-    }};
-
-    const std::vector<uint32_t> inputIndexes = {operand1};
-    const std::vector<uint32_t> outputIndexes = {operand4};
-    std::vector<uint8_t> operandValues(
-        reinterpret_cast<const uint8_t*>(operand2Data.data()),
-        reinterpret_cast<const uint8_t*>(operand2Data.data()) + size);
-    int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
-    operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
-                         reinterpret_cast<const uint8_t*>(&activation[1]));
-
-    const std::vector<hidl_memory> pools = {};
-
-    return {
-        .operands = operands,
-        .operations = operations,
-        .inputIndexes = inputIndexes,
-        .outputIndexes = outputIndexes,
-        .operandValues = operandValues,
-        .pools = pools,
-    };
+// supported operations positive test
+TEST_F(NeuralnetworksHidlTest, SupportedOperationsPositiveTest) {
+    Model model = createValidTestModel();
+    Return<void> ret = device->getSupportedOperations(
+        model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
+            EXPECT_EQ(ErrorStatus::NONE, status);
+            EXPECT_EQ(model.operations.size(), supported.size());
+        });
+    EXPECT_TRUE(ret.isOk());
 }
-}  // anonymous namespace
 
-// allocator helper
-hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
-    hidl_memory memory;
-
-    sp<IAllocator> allocator = IAllocator::getService(type);
-    if (!allocator.get()) {
-        return {};
-    }
-
-    Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
-        ASSERT_TRUE(success);
-        memory = mem;
-    });
-    if (!ret.isOk()) {
-        return {};
-    }
-
-    return memory;
+// supported operations negative test 1
+TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest1) {
+    Model model = createInvalidTestModel1();
+    Return<void> ret = device->getSupportedOperations(
+        model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
+            EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+            (void)supported;
+        });
+    EXPECT_TRUE(ret.isOk());
 }
 
-// supported subgraph test
-TEST_F(NeuralnetworksHidlTest, SupportedOperationsTest) {
-    Model model = createTestModel();
+// supported operations negative test 2
+TEST_F(NeuralnetworksHidlTest, SupportedOperationsNegativeTest2) {
+    Model model = createInvalidTestModel2();
     Return<void> ret = device->getSupportedOperations(
         model, [&](ErrorStatus status, const hidl_vec<bool>& supported) {
-            EXPECT_EQ(ErrorStatus::NONE, status);
-            EXPECT_EQ(model.operations.size(), supported.size());
+            EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+            (void)supported;
         });
     EXPECT_TRUE(ret.isOk());
 }
 
-// execute simple graph
-TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphTest) {
-    std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
-    std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
-    std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
-    const uint32_t INPUT = 0;
-    const uint32_t OUTPUT = 1;
-
-    // prepare request
-    Model model = createTestModel();
-    sp<IPreparedModel> preparedModel;
+// prepare simple model positive test
+TEST_F(NeuralnetworksHidlTest, SimplePrepareModelPositiveTest) {
+    Model model = createValidTestModel();
     sp<Event> preparationEvent = new Event();
     ASSERT_NE(nullptr, preparationEvent.get());
     Return<void> prepareRet = device->prepareModel(
         model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
             EXPECT_EQ(ErrorStatus::NONE, status);
-            preparedModel = prepared;
+            (void)prepared;
+        });
+    ASSERT_TRUE(prepareRet.isOk());
+}
+
+// prepare simple model negative test 1
+TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest1) {
+    Model model = createInvalidTestModel1();
+    sp<Event> preparationEvent = new Event();
+    ASSERT_NE(nullptr, preparationEvent.get());
+    Return<void> prepareRet = device->prepareModel(
+        model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
+            EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+            (void)prepared;
+        });
+    ASSERT_TRUE(prepareRet.isOk());
+}
+
+// prepare simple model negative test 2
+TEST_F(NeuralnetworksHidlTest, SimplePrepareModelNegativeTest2) {
+    Model model = createInvalidTestModel2();
+    sp<Event> preparationEvent = new Event();
+    ASSERT_NE(nullptr, preparationEvent.get());
+    Return<void> prepareRet = device->prepareModel(
+        model, preparationEvent, [&](ErrorStatus status, const sp<IPreparedModel>& prepared) {
+            EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+            (void)prepared;
         });
     ASSERT_TRUE(prepareRet.isOk());
+}
+
+// execute simple graph positive test
+TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphPositiveTest) {
+    Model model = createValidTestModel();
+    sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
     ASSERT_NE(nullptr, preparedModel.get());
-    Event::Status preparationStatus = preparationEvent->wait();
-    EXPECT_EQ(Event::Status::SUCCESS, preparationStatus);
-
-    // prepare inputs
-    uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
-    uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
-    std::vector<RequestArgument> inputs = {{
-        .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
-    }};
-    std::vector<RequestArgument> outputs = {{
-        .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
-    }};
-    std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
-                                      allocateSharedMemory(outputSize)};
-    ASSERT_NE(0ull, pools[INPUT].size());
-    ASSERT_NE(0ull, pools[OUTPUT].size());
-
-    // load data
-    sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
-    sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
-    ASSERT_NE(nullptr, inputMemory.get());
-    ASSERT_NE(nullptr, outputMemory.get());
-    float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
-    float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
-    ASSERT_NE(nullptr, inputPtr);
-    ASSERT_NE(nullptr, outputPtr);
-    inputMemory->update();
-    outputMemory->update();
-    std::copy(inputData.begin(), inputData.end(), inputPtr);
-    std::copy(outputData.begin(), outputData.end(), outputPtr);
-    inputMemory->commit();
-    outputMemory->commit();
+    Request request = createValidTestRequest();
 
-    // execute request
     sp<Event> executionEvent = new Event();
     ASSERT_NE(nullptr, executionEvent.get());
-    Return<ErrorStatus> executeStatus = preparedModel->execute(
-        {.inputs = inputs, .outputs = outputs, .pools = pools}, executionEvent);
+    Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
     ASSERT_TRUE(executeStatus.isOk());
     EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executeStatus));
     Event::Status eventStatus = executionEvent->wait();
     EXPECT_EQ(Event::Status::SUCCESS, eventStatus);
 
-    // validate results { 1+5, 2+6, 3+7, 4+8 }
+    std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
+    std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
+    const uint32_t OUTPUT = 1;
+
+    sp<IMemory> outputMemory = mapMemory(request.pools[OUTPUT]);
+    ASSERT_NE(nullptr, outputMemory.get());
+    float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
+    ASSERT_NE(nullptr, outputPtr);
     outputMemory->read();
     std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
     outputMemory->commit();
     EXPECT_EQ(expectedData, outputData);
 }
 
+// execute simple graph negative test 1
+TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest1) {
+    Model model = createValidTestModel();
+    sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
+    ASSERT_NE(nullptr, preparedModel.get());
+    Request request = createInvalidTestRequest1();
+
+    sp<Event> executionEvent = new Event();
+    ASSERT_NE(nullptr, executionEvent.get());
+    Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
+    ASSERT_TRUE(executeStatus.isOk());
+    EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
+    executionEvent->wait();
+}
+
+// execute simple graph negative test 2
+TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphNegativeTest2) {
+    Model model = createValidTestModel();
+    sp<IPreparedModel> preparedModel = doPrepareModelShortcut(model);
+    ASSERT_NE(nullptr, preparedModel.get());
+    Request request = createInvalidTestRequest2();
+
+    sp<Event> executionEvent = new Event();
+    ASSERT_NE(nullptr, executionEvent.get());
+    Return<ErrorStatus> executeStatus = preparedModel->execute(request, executionEvent);
+    ASSERT_TRUE(executeStatus.isOk());
+    EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeStatus));
+    executionEvent->wait();
+}
+
 // Mixed-typed examples
 typedef MixedTypedExampleType MixedTypedExample;