OSDN Git Service

Refactor how arguments are tracked.
[android-x86/hardware-interfaces.git] / neuralnetworks / 1.0 / vts / functional / VtsHalNeuralnetworksV1_0TargetTest.cpp
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19 #include "Event.h"
20 #include "VtsHalNeuralnetworksV1_0TargetTest.h"
21 #include <android-base/logging.h>
22 #include <android/hidl/memory/1.0/IMemory.h>
23 #include <hidlmemory/mapping.h>
24 #include <string>
25
26 namespace android {
27 namespace hardware {
28 namespace neuralnetworks {
29 namespace V1_0 {
30 namespace vts {
31 namespace functional {
32
33 using ::android::hardware::neuralnetworks::V1_0::implementation::Event;
34
35 // A class for test environment setup
36 NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {}
37
38 NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {}
39
40 NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
41     // This has to return a "new" object because it is freed inside
42     // ::testing::AddGlobalTestEnvironment when the gtest is being torn down
43     static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
44     return instance;
45 }
46
47 void NeuralnetworksHidlEnvironment::registerTestServices() {
48     registerTestService<IDevice>();
49 }
50
51 // The main test class for NEURALNETWORK HIDL HAL.
52 NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {}
53
54 void NeuralnetworksHidlTest::SetUp() {
55     device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>(
56         NeuralnetworksHidlEnvironment::getInstance());
57     ASSERT_NE(nullptr, device.get());
58 }
59
60 void NeuralnetworksHidlTest::TearDown() {}
61
62 // create device test
63 TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
64
65 // status test
66 TEST_F(NeuralnetworksHidlTest, StatusTest) {
67     DeviceStatus status = device->getStatus();
68     EXPECT_EQ(DeviceStatus::AVAILABLE, status);
69 }
70
71 // initialization
72 TEST_F(NeuralnetworksHidlTest, InitializeTest) {
73     Return<void> ret = device->initialize([](const Capabilities& capabilities) {
74         EXPECT_NE(nullptr, capabilities.supportedOperationTuples.data());
75         EXPECT_NE(0ull, capabilities.supportedOperationTuples.size());
76         EXPECT_EQ(0u, static_cast<uint32_t>(capabilities.cachesCompilation) & ~0x1);
77         EXPECT_LT(0.0f, capabilities.bootupTime);
78         EXPECT_LT(0.0f, capabilities.float16Performance.execTime);
79         EXPECT_LT(0.0f, capabilities.float16Performance.powerUsage);
80         EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
81         EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
82         EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
83         EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
84     });
85     EXPECT_TRUE(ret.isOk());
86 }
87
88 namespace {
89 // create the model
90 Model createTestModel() {
91     const std::vector<float> operand2Data = {5.0f, 6.0f, 7.0f, 8.0f};
92     const uint32_t size = operand2Data.size() * sizeof(float);
93
94     const uint32_t operand1 = 0;
95     const uint32_t operand2 = 1;
96     const uint32_t operand3 = 2;
97     const uint32_t operand4 = 3;
98
99     const std::vector<Operand> operands = {
100         {
101             .type = OperandType::TENSOR_FLOAT32,
102             .dimensions = {1, 2, 2, 1},
103             .numberOfConsumers = 1,
104             .scale = 0.0f,
105             .zeroPoint = 0,
106             .lifetime = OperandLifeTime::MODEL_INPUT,
107             .location = {.poolIndex = 0,
108                          .offset = 0,
109                          .length = 0},
110         },
111         {
112             .type = OperandType::TENSOR_FLOAT32,
113             .dimensions = {1, 2, 2, 1},
114             .numberOfConsumers = 1,
115             .scale = 0.0f,
116             .zeroPoint = 0,
117             .lifetime = OperandLifeTime::CONSTANT_COPY,
118             .location = {.poolIndex = 0,
119                          .offset = 0,
120                          .length = size},
121         },
122         {
123             .type = OperandType::INT32,
124             .dimensions = {},
125             .numberOfConsumers = 1,
126             .scale = 0.0f,
127             .zeroPoint = 0,
128             .lifetime = OperandLifeTime::CONSTANT_COPY,
129             .location = {.poolIndex = 0,
130                          .offset = size,
131                          .length = sizeof(int32_t)},
132         },
133         {
134             .type = OperandType::TENSOR_FLOAT32,
135             .dimensions = {1, 2, 2, 1},
136             .numberOfConsumers = 0,
137             .scale = 0.0f,
138             .zeroPoint = 0,
139             .lifetime = OperandLifeTime::MODEL_OUTPUT,
140             .location = {.poolIndex = 0,
141                          .offset = 0,
142                          .length = 0},
143         },
144     };
145
146     const std::vector<Operation> operations = {{
147         .opTuple = {OperationType::ADD, OperandType::TENSOR_FLOAT32},
148         .inputs = {operand1, operand2, operand3},
149         .outputs = {operand4},
150     }};
151
152     const std::vector<uint32_t> inputIndexes = {operand1};
153     const std::vector<uint32_t> outputIndexes = {operand4};
154     std::vector<uint8_t> operandValues(
155         reinterpret_cast<const uint8_t*>(operand2Data.data()),
156         reinterpret_cast<const uint8_t*>(operand2Data.data()) + size);
157     int32_t activation[1] = {static_cast<int32_t>(FusedActivationFunc::NONE)};
158     operandValues.insert(operandValues.end(), reinterpret_cast<const uint8_t*>(&activation[0]),
159                          reinterpret_cast<const uint8_t*>(&activation[1]));
160
161     const std::vector<hidl_memory> pools = {};
162
163     return {
164         .operands = operands,
165         .operations = operations,
166         .inputIndexes = inputIndexes,
167         .outputIndexes = outputIndexes,
168         .operandValues = operandValues,
169         .pools = pools,
170     };
171 }
172
173 // allocator helper
174 hidl_memory allocateSharedMemory(int64_t size, const std::string& type = "ashmem") {
175     hidl_memory memory;
176
177     sp<IAllocator> allocator = IAllocator::getService(type);
178     if (!allocator.get()) {
179         return {};
180     }
181
182     Return<void> ret = allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
183         ASSERT_TRUE(success);
184         memory = mem;
185     });
186     if (!ret.isOk()) {
187         return {};
188     }
189
190     return memory;
191 }
192 }  // anonymous namespace
193
194 // supported subgraph test
195 TEST_F(NeuralnetworksHidlTest, SupportedSubgraphTest) {
196     Model model = createTestModel();
197     std::vector<bool> supported;
198     Return<void> ret = device->getSupportedSubgraph(
199         model, [&](const hidl_vec<bool>& hidl_supported) { supported = hidl_supported; });
200     ASSERT_TRUE(ret.isOk());
201     EXPECT_EQ(/*model.operations.size()*/ 0ull, supported.size());
202 }
203
204 // execute simple graph
205 TEST_F(NeuralnetworksHidlTest, SimpleExecuteGraphTest) {
206     std::vector<float> inputData = {1.0f, 2.0f, 3.0f, 4.0f};
207     std::vector<float> outputData = {-1.0f, -1.0f, -1.0f, -1.0f};
208     std::vector<float> expectedData = {6.0f, 8.0f, 10.0f, 12.0f};
209     const uint32_t INPUT = 0;
210     const uint32_t OUTPUT = 1;
211
212     // prpeare request
213     Model model = createTestModel();
214     sp<IPreparedModel> preparedModel = device->prepareModel(model);
215     ASSERT_NE(nullptr, preparedModel.get());
216
217     // prepare inputs
218     uint32_t inputSize = static_cast<uint32_t>(inputData.size() * sizeof(float));
219     uint32_t outputSize = static_cast<uint32_t>(outputData.size() * sizeof(float));
220     std::vector<RequestArgument> inputs = {{
221         .location = {.poolIndex = INPUT, .offset = 0, .length = inputSize}, .dimensions = {},
222     }};
223     std::vector<RequestArgument> outputs = {{
224         .location = {.poolIndex = OUTPUT, .offset = 0, .length = outputSize}, .dimensions = {},
225     }};
226     std::vector<hidl_memory> pools = {allocateSharedMemory(inputSize),
227                                       allocateSharedMemory(outputSize)};
228     ASSERT_NE(0ull, pools[INPUT].size());
229     ASSERT_NE(0ull, pools[OUTPUT].size());
230
231     // load data
232     sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
233     sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
234     ASSERT_NE(nullptr, inputMemory.get());
235     ASSERT_NE(nullptr, outputMemory.get());
236     float* inputPtr = reinterpret_cast<float*>(static_cast<void*>(inputMemory->getPointer()));
237     float* outputPtr = reinterpret_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
238     ASSERT_NE(nullptr, inputPtr);
239     ASSERT_NE(nullptr, outputPtr);
240     inputMemory->update();
241     outputMemory->update();
242     std::copy(inputData.begin(), inputData.end(), inputPtr);
243     std::copy(outputData.begin(), outputData.end(), outputPtr);
244     inputMemory->commit();
245     outputMemory->commit();
246
247     // execute request
248     sp<Event> event = sp<Event>(new Event());
249     ASSERT_NE(nullptr, event.get());
250     bool success = preparedModel->execute({.inputs = inputs, .outputs = outputs, .pools = pools},
251                                           event);
252     EXPECT_TRUE(success);
253     Event::Status status = event->wait();
254     EXPECT_EQ(Event::Status::SUCCESS, status);
255
256     // validate results { 1+5, 2+6, 3+7, 4+8 }
257     outputMemory->read();
258     std::copy(outputPtr, outputPtr + outputData.size(), outputData.begin());
259     outputMemory->commit();
260     EXPECT_EQ(expectedData, outputData);
261 }
262
263 // TODO: Add tests for execution failure, or wait_for/wait_until timeout.
264 //       Discussion: https://googleplex-android-review.git.corp.google.com/#/c/platform/hardware/interfaces/+/2654636/5/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworksV1_0TargetTest.cpp@222
265
266 }  // namespace functional
267 }  // namespace vts
268 }  // namespace V1_0
269 }  // namespace neuralnetworks
270 }  // namespace hardware
271 }  // namespace android
272
273 using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment;
274
275 int main(int argc, char** argv) {
276     ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
277     ::testing::InitGoogleTest(&argc, argv);
278     NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
279
280     int status = RUN_ALL_TESTS();
281     return status;
282 }