1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "neuralnetworks_hidl_hal_test"
18
19 #include <android-base/logging.h>
20 #include <gtest/gtest.h>
21
22 #include "1.3/Callbacks.h"
23 #include "1.3/Utils.h"
24 #include "GeneratedTestHarness.h"
25 #include "MemoryUtils.h"
26 #include "TestHarness.h"
27 #include "Utils.h"
28 #include "VtsHalNeuralnetworks.h"
29
30 namespace android::hardware::neuralnetworks::V1_3::vts::functional {
31
32 using namespace test_helper;
33 using implementation::ExecutionCallback;
34 using implementation::PreparedModelCallback;
35 using V1_0::RequestArgument;
36 using V1_1::ExecutionPreference;
37 using V1_2::Constant;
38 using V1_2::MeasureTiming;
39 using V1_2::OutputShape;
40 using V1_2::Timing;
41
42 namespace {
43
44 const auto kNamedDeviceChoices = testing::ValuesIn(getNamedDevices());
45
46 // A 1.3 driver is likely to support at least one of the following operand types.
47 const std::vector<TestOperandType> kTestOperandTypeChoicesVector = {
48 TestOperandType::TENSOR_FLOAT32,
49 TestOperandType::TENSOR_FLOAT16,
50 TestOperandType::TENSOR_QUANT8_ASYMM,
51 TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
52 };
53 const auto kTestOperandTypeChoices = testing::ValuesIn(kTestOperandTypeChoicesVector);
54
isInChoices(TestOperandType type)55 bool isInChoices(TestOperandType type) {
56 return std::count(kTestOperandTypeChoicesVector.begin(), kTestOperandTypeChoicesVector.end(),
57 type) > 0;
58 }
59
isFloat(TestOperandType type)60 bool isFloat(TestOperandType type) {
61 CHECK(isInChoices(type));
62 return type == TestOperandType::TENSOR_FLOAT32 || type == TestOperandType::TENSOR_FLOAT16;
63 }
64
65 // Create dummy buffers for model constants as well as inputs and outputs.
66 // We only care about the size here because we will not check accuracy in validation tests.
createDummyData(TestModel * testModel)67 void createDummyData(TestModel* testModel) {
68 for (auto& operand : testModel->main.operands) {
69 if (operand.data != nullptr) continue;
70 switch (operand.lifetime) {
71 case TestOperandLifeTime::SUBGRAPH_INPUT:
72 case TestOperandLifeTime::SUBGRAPH_OUTPUT:
73 case TestOperandLifeTime::CONSTANT_COPY:
74 case TestOperandLifeTime::CONSTANT_REFERENCE: {
75 const uint32_t size = nn::nonExtensionOperandSizeOfData(
76 static_cast<OperandType>(operand.type), operand.dimensions);
77 operand.data = TestBuffer(size);
78 } break;
79 default:
80 break;
81 }
82 }
83 }
84
createInt32Scalar(int32_t value)85 TestOperand createInt32Scalar(int32_t value) {
86 return {
87 .type = TestOperandType::INT32,
88 .dimensions = {},
89 .numberOfConsumers = 1,
90 .scale = 0.0f,
91 .zeroPoint = 0,
92 .lifetime = TestOperandLifeTime::CONSTANT_COPY,
93 .data = TestBuffer::createFromVector<int32_t>({value}),
94 };
95 }
96
97 // Construct a test model with multiple CONV_2D operations with the given operand as inputs.
98 // The dimensions of the filters are chosen to ensure outputs has the same dimensions as inputs.
99 // We choose CONV_2D operation because it is commonly supported by most drivers.
createConvModel(const TestOperand & operand,uint32_t numOperations)100 TestModel createConvModel(const TestOperand& operand, uint32_t numOperations) {
101 CHECK(isInChoices(operand.type));
102
103 TestOperand weight = {.type = operand.type,
104 .dimensions = {operand.dimensions[3], 3, 3, operand.dimensions[3]},
105 .numberOfConsumers = 1,
106 .scale = isFloat(operand.type) ? 0.0f : 1.0f,
107 .zeroPoint = 0,
108 .lifetime = TestOperandLifeTime::CONSTANT_COPY};
109
110 TestOperand bias = {
111 .type = isFloat(operand.type) ? operand.type : TestOperandType::TENSOR_INT32,
112 .dimensions = {operand.dimensions[3]},
113 .numberOfConsumers = 1,
114 .scale = operand.scale * weight.scale,
115 .zeroPoint = 0,
116 .lifetime = TestOperandLifeTime::CONSTANT_COPY};
117
118 TestOperand output = operand;
119 output.numberOfConsumers = 0;
120 output.lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT;
121
122 const std::vector<TestOperand> operands = {
123 operand,
124 std::move(weight),
125 std::move(bias),
126 createInt32Scalar(1), // same padding
127 createInt32Scalar(1), // width stride
128 createInt32Scalar(1), // height stride
129 createInt32Scalar(0), // activation = NONE
130 std::move(output),
131 };
132
133 TestModel model;
134 for (uint32_t i = 0; i < numOperations; i++) {
135 model.main.operands.insert(model.main.operands.end(), operands.begin(), operands.end());
136 const uint32_t inputIndex = operands.size() * i;
137 const uint32_t outputIndex = inputIndex + operands.size() - 1;
138 std::vector<uint32_t> inputs(operands.size() - 1);
139 std::iota(inputs.begin(), inputs.end(), inputIndex);
140 model.main.operations.push_back({.type = TestOperationType::CONV_2D,
141 .inputs = std::move(inputs),
142 .outputs = {outputIndex}});
143 model.main.inputIndexes.push_back(inputIndex);
144 model.main.outputIndexes.push_back(outputIndex);
145 }
146 createDummyData(&model);
147 return model;
148 }
149
150 // Construct a test model with a single ADD operation with the given operand as input0 and input1.
151 // This is to cover additional cases that the CONV_2D model does not support, e.g. arbitrary input
152 // operand rank, scalar input operand. We choose ADD operation because it is commonly supported by
153 // most drivers.
createSingleAddModel(const TestOperand & operand)154 TestModel createSingleAddModel(const TestOperand& operand) {
155 CHECK(isInChoices(operand.type));
156
157 TestOperand act = {
158 .type = TestOperandType::INT32,
159 .dimensions = {},
160 .numberOfConsumers = 1,
161 .scale = 0.0f,
162 .zeroPoint = 0,
163 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
164 };
165
166 TestOperand output = operand;
167 output.numberOfConsumers = 0;
168 output.lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT;
169
170 TestModel model = {
171 .main =
172 {
173 .operands =
174 {
175 operand,
176 operand,
177 std::move(act),
178 output,
179 },
180 .operations = {{.type = TestOperationType::ADD,
181 .inputs = {0, 1, 2},
182 .outputs = {3}}},
183 .inputIndexes = {0, 1, 2},
184 .outputIndexes = {3},
185 },
186 };
187 createDummyData(&model);
188 return model;
189 }
190
191 // A dummy invalid IPreparedModel class for MemoryDomainAllocateTest.InvalidPreparedModel
192 class InvalidPreparedModel : public IPreparedModel {
193 public:
execute(const V1_0::Request &,const sp<V1_0::IExecutionCallback> &)194 Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
195 const sp<V1_0::IExecutionCallback>&) override {
196 return V1_0::ErrorStatus::GENERAL_FAILURE;
197 }
execute_1_2(const V1_0::Request &,V1_2::MeasureTiming,const sp<V1_2::IExecutionCallback> &)198 Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, V1_2::MeasureTiming,
199 const sp<V1_2::IExecutionCallback>&) override {
200 return V1_0::ErrorStatus::GENERAL_FAILURE;
201 }
execute_1_3(const V1_3::Request &,V1_2::MeasureTiming,const V1_3::OptionalTimePoint &,const V1_3::OptionalTimeoutDuration &,const sp<V1_3::IExecutionCallback> &)202 Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, V1_2::MeasureTiming,
203 const V1_3::OptionalTimePoint&,
204 const V1_3::OptionalTimeoutDuration&,
205 const sp<V1_3::IExecutionCallback>&) override {
206 return V1_3::ErrorStatus::GENERAL_FAILURE;
207 }
executeSynchronously(const V1_0::Request &,V1_2::MeasureTiming,executeSynchronously_cb)208 Return<void> executeSynchronously(const V1_0::Request&, V1_2::MeasureTiming,
209 executeSynchronously_cb) override {
210 return Void();
211 }
executeSynchronously_1_3(const V1_3::Request &,V1_2::MeasureTiming,const V1_3::OptionalTimePoint &,const V1_3::OptionalTimeoutDuration &,executeSynchronously_1_3_cb)212 Return<void> executeSynchronously_1_3(const V1_3::Request&, V1_2::MeasureTiming,
213 const V1_3::OptionalTimePoint&,
214 const V1_3::OptionalTimeoutDuration&,
215 executeSynchronously_1_3_cb) override {
216 return Void();
217 }
configureExecutionBurst(const sp<V1_2::IBurstCallback> &,const MQDescriptorSync<V1_2::FmqRequestDatum> &,const MQDescriptorSync<V1_2::FmqResultDatum> &,configureExecutionBurst_cb)218 Return<void> configureExecutionBurst(const sp<V1_2::IBurstCallback>&,
219 const MQDescriptorSync<V1_2::FmqRequestDatum>&,
220 const MQDescriptorSync<V1_2::FmqResultDatum>&,
221 configureExecutionBurst_cb) override {
222 return Void();
223 }
executeFenced(const V1_3::Request &,const hidl_vec<hidl_handle> &,V1_2::MeasureTiming,const V1_3::OptionalTimePoint &,const V1_3::OptionalTimeoutDuration &,const V1_3::OptionalTimeoutDuration &,executeFenced_cb)224 Return<void> executeFenced(const V1_3::Request&, const hidl_vec<hidl_handle>&,
225 V1_2::MeasureTiming, const V1_3::OptionalTimePoint&,
226 const V1_3::OptionalTimeoutDuration&,
227 const V1_3::OptionalTimeoutDuration&, executeFenced_cb) override {
228 return Void();
229 }
230 };
231
232 } // namespace
233
234 class MemoryDomainTestBase : public testing::Test {
235 protected:
MemoryDomainTestBase(sp<IDevice> device,TestOperandType type)236 MemoryDomainTestBase(sp<IDevice> device, TestOperandType type)
237 : kDevice(std::move(device)),
238 kTestOperandType(type),
239 kTestOperand(kTestOperandMap.at(type)),
240 kTestOperandDataSize(nn::nonExtensionOperandSizeOfData(static_cast<OperandType>(type),
241 kTestOperand.dimensions)) {}
242
SetUp()243 void SetUp() override {
244 testing::Test::SetUp();
245 ASSERT_NE(kDevice, nullptr);
246 }
247
createConvPreparedModel(const TestOperand & testOperand,uint32_t numOperations=1)248 sp<IPreparedModel> createConvPreparedModel(const TestOperand& testOperand,
249 uint32_t numOperations = 1) {
250 const TestModel testModel = createConvModel(testOperand, numOperations);
251 const Model model = createModel(testModel);
252 sp<IPreparedModel> preparedModel;
253 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false);
254 return preparedModel;
255 }
256
createAddPreparedModel(const TestOperand & testOperand)257 sp<IPreparedModel> createAddPreparedModel(const TestOperand& testOperand) {
258 const TestModel testModel = createSingleAddModel(testOperand);
259 const Model model = createModel(testModel);
260 sp<IPreparedModel> preparedModel;
261 createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false);
262 return preparedModel;
263 }
264
265 static const std::map<TestOperandType, TestOperand> kTestOperandMap;
266
267 const sp<IDevice> kDevice;
268 const TestOperandType kTestOperandType;
269 const TestOperand& kTestOperand;
270 const uint32_t kTestOperandDataSize;
271 };
272
273 const std::map<TestOperandType, TestOperand> MemoryDomainTestBase::kTestOperandMap = {
274 {TestOperandType::TENSOR_FLOAT32,
275 {
276 .type = TestOperandType::TENSOR_FLOAT32,
277 .dimensions = {1, 32, 32, 8},
278 .numberOfConsumers = 1,
279 .scale = 0.0f,
280 .zeroPoint = 0,
281 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
282 }},
283 {TestOperandType::TENSOR_FLOAT16,
284 {
285 .type = TestOperandType::TENSOR_FLOAT16,
286 .dimensions = {1, 32, 32, 8},
287 .numberOfConsumers = 1,
288 .scale = 0.0f,
289 .zeroPoint = 0,
290 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
291 }},
292 {TestOperandType::TENSOR_QUANT8_ASYMM,
293 {
294 .type = TestOperandType::TENSOR_QUANT8_ASYMM,
295 .dimensions = {1, 32, 32, 8},
296 .numberOfConsumers = 1,
297 .scale = 0.5f,
298 .zeroPoint = 0,
299 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
300 }},
301 {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
302 {
303 .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
304 .dimensions = {1, 32, 32, 8},
305 .numberOfConsumers = 1,
306 .scale = 0.5f,
307 .zeroPoint = 0,
308 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
309 }},
310 };
311
312 using MemoryDomainAllocateTestParam = std::tuple<NamedDevice, TestOperandType>;
313 class MemoryDomainAllocateTest : public MemoryDomainTestBase,
314 public testing::WithParamInterface<MemoryDomainAllocateTestParam> {
315 protected:
MemoryDomainAllocateTest()316 MemoryDomainAllocateTest()
317 : MemoryDomainTestBase(getData(std::get<NamedDevice>(GetParam())),
318 std::get<TestOperandType>(GetParam())) {}
319
320 struct AllocateTestArgs {
321 hidl_vec<uint32_t> dimensions;
322 hidl_vec<sp<IPreparedModel>> preparedModels;
323 hidl_vec<BufferRole> inputRoles;
324 hidl_vec<BufferRole> outputRoles;
325 };
326
327 // Validation test for IDevice::allocate. The driver is expected to fail with INVALID_ARGUMENT,
328 // or GENERAL_FAILURE if memory domain is not supported.
validateAllocate(AllocateTestArgs args)329 void validateAllocate(AllocateTestArgs args) {
330 const auto ret = kDevice->allocate(
331 {.dimensions = std::move(args.dimensions)}, std::move(args.preparedModels),
332 std::move(args.inputRoles), std::move(args.outputRoles),
333 [](ErrorStatus status, const sp<IBuffer>& buffer, uint32_t token) {
334 EXPECT_TRUE(status == ErrorStatus::INVALID_ARGUMENT ||
335 status == ErrorStatus::GENERAL_FAILURE);
336 EXPECT_EQ(buffer, nullptr);
337 EXPECT_EQ(token, 0);
338 });
339 ASSERT_TRUE(ret.isOk());
340 }
341
testConflictOperands(const sp<IPreparedModel> & model1,const sp<IPreparedModel> & model2)342 void testConflictOperands(const sp<IPreparedModel>& model1, const sp<IPreparedModel>& model2) {
343 validateAllocate({
344 .preparedModels = {model1, model2},
345 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
346 {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
347 });
348 validateAllocate({
349 .preparedModels = {model1, model2},
350 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
351 .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
352 });
353 validateAllocate({
354 .preparedModels = {model1, model2},
355 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
356 {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
357 });
358 }
359 };
360
TEST_P(MemoryDomainAllocateTest,EmptyRole)361 TEST_P(MemoryDomainAllocateTest, EmptyRole) {
362 // Test with empty prepared models and roles.
363 validateAllocate({});
364
365 auto preparedModel = createConvPreparedModel(kTestOperand);
366 if (preparedModel == nullptr) return;
367
368 // Test again with non-empty prepared models but empty roles.
369 validateAllocate({
370 .preparedModels = {preparedModel},
371 });
372 }
373
TEST_P(MemoryDomainAllocateTest,NullptrPreparedModel)374 TEST_P(MemoryDomainAllocateTest, NullptrPreparedModel) {
375 // Test with nullptr prepared model as input role.
376 validateAllocate({
377 .preparedModels = {nullptr},
378 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
379 });
380
381 // Test with nullptr prepared model as output role.
382 validateAllocate({
383 .preparedModels = {nullptr},
384 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
385 });
386 }
387
TEST_P(MemoryDomainAllocateTest,InvalidPreparedModel)388 TEST_P(MemoryDomainAllocateTest, InvalidPreparedModel) {
389 sp<InvalidPreparedModel> invalidPreparedModel = new InvalidPreparedModel();
390
391 // Test with invalid prepared model as input role.
392 validateAllocate({
393 .preparedModels = {invalidPreparedModel},
394 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
395 });
396
397 // Test with invalid prepared model as output role.
398 validateAllocate({
399 .preparedModels = {invalidPreparedModel},
400 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
401 });
402 }
403
TEST_P(MemoryDomainAllocateTest,InvalidModelIndex)404 TEST_P(MemoryDomainAllocateTest, InvalidModelIndex) {
405 auto preparedModel = createConvPreparedModel(kTestOperand);
406 if (preparedModel == nullptr) return;
407
408 // This should fail, because the model index is out of bound.
409 validateAllocate({
410 .preparedModels = {preparedModel},
411 .inputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
412 });
413
414 // This should fail, because the model index is out of bound.
415 validateAllocate({
416 .preparedModels = {preparedModel},
417 .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
418 });
419 }
420
TEST_P(MemoryDomainAllocateTest,InvalidIOIndex)421 TEST_P(MemoryDomainAllocateTest, InvalidIOIndex) {
422 auto preparedModel = createConvPreparedModel(kTestOperand);
423 if (preparedModel == nullptr) return;
424
425 // This should fail, because the model only has one input.
426 validateAllocate({
427 .preparedModels = {preparedModel},
428 .inputRoles = {{.modelIndex = 0, .ioIndex = 1, .frequency = 1.0f}},
429 });
430
431 // This should fail, because the model only has one output.
432 validateAllocate({
433 .preparedModels = {preparedModel},
434 .outputRoles = {{.modelIndex = 0, .ioIndex = 1, .frequency = 1.0f}},
435 });
436 }
437
TEST_P(MemoryDomainAllocateTest,InvalidFrequency)438 TEST_P(MemoryDomainAllocateTest, InvalidFrequency) {
439 auto preparedModel = createConvPreparedModel(kTestOperand);
440 if (preparedModel == nullptr) return;
441
442 for (float invalidFreq : {10.0f, 0.0f, -0.5f}) {
443 // Test with invalid frequency for input roles.
444 validateAllocate({
445 .preparedModels = {preparedModel},
446 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = invalidFreq}},
447 });
448 // Test with invalid frequency for output roles.
449 validateAllocate({
450 .preparedModels = {preparedModel},
451 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = invalidFreq}},
452 });
453 }
454 }
455
TEST_P(MemoryDomainAllocateTest,SameRoleSpecifiedTwice)456 TEST_P(MemoryDomainAllocateTest, SameRoleSpecifiedTwice) {
457 auto preparedModel = createConvPreparedModel(kTestOperand);
458 if (preparedModel == nullptr) return;
459
460 // Same role with same model index.
461 validateAllocate({
462 .preparedModels = {preparedModel},
463 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
464 {.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
465 });
466 validateAllocate({
467 .preparedModels = {preparedModel},
468 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
469 {.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
470 });
471
472 // Different model indexes, but logically referring to the same role.
473 validateAllocate({
474 .preparedModels = {preparedModel, preparedModel},
475 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
476 {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
477 });
478 validateAllocate({
479 .preparedModels = {preparedModel, preparedModel},
480 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
481 {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
482 });
483 }
484
TEST_P(MemoryDomainAllocateTest,ConflictOperandType)485 TEST_P(MemoryDomainAllocateTest, ConflictOperandType) {
486 const std::map<TestOperandType, TestOperandType> conflictTypeMap = {
487 {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16},
488 {TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_FLOAT32},
489 {TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
490 {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, TestOperandType::TENSOR_QUANT8_ASYMM},
491 };
492
493 TestOperand conflictTestOperand = kTestOperand;
494 const auto it = conflictTypeMap.find(kTestOperandType);
495 ASSERT_FALSE(it == conflictTypeMap.end());
496 conflictTestOperand.type = it->second;
497
498 auto preparedModel = createConvPreparedModel(kTestOperand);
499 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
500 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
501 testConflictOperands(preparedModel, conflictPreparedModel);
502 }
503
TEST_P(MemoryDomainAllocateTest,ConflictScale)504 TEST_P(MemoryDomainAllocateTest, ConflictScale) {
505 if (isFloat(kTestOperandType)) return;
506
507 TestOperand conflictTestOperand = kTestOperand;
508 ASSERT_NE(conflictTestOperand.scale, 1.0f);
509 conflictTestOperand.scale = 1.0f;
510
511 auto preparedModel = createConvPreparedModel(kTestOperand);
512 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
513 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
514 testConflictOperands(preparedModel, conflictPreparedModel);
515 }
516
TEST_P(MemoryDomainAllocateTest,ConflictZeroPoint)517 TEST_P(MemoryDomainAllocateTest, ConflictZeroPoint) {
518 if (isFloat(kTestOperandType)) return;
519
520 TestOperand conflictTestOperand = kTestOperand;
521 ASSERT_NE(conflictTestOperand.zeroPoint, 10);
522 conflictTestOperand.zeroPoint = 10;
523
524 auto preparedModel = createConvPreparedModel(kTestOperand);
525 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
526 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
527 testConflictOperands(preparedModel, conflictPreparedModel);
528 }
529
TEST_P(MemoryDomainAllocateTest,ConflictRankBetweenRoles)530 TEST_P(MemoryDomainAllocateTest, ConflictRankBetweenRoles) {
531 TestOperand conflictTestOperand = kTestOperand;
532 conflictTestOperand.dimensions.pop_back();
533
534 auto preparedModel = createAddPreparedModel(kTestOperand);
535 auto conflictPreparedModel = createAddPreparedModel(conflictTestOperand);
536 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
537 testConflictOperands(preparedModel, conflictPreparedModel);
538 }
539
TEST_P(MemoryDomainAllocateTest,ConflictDimensionsBetweenRoles)540 TEST_P(MemoryDomainAllocateTest, ConflictDimensionsBetweenRoles) {
541 TestOperand conflictTestOperand = kTestOperand;
542 conflictTestOperand.dimensions[0] = 4;
543
544 auto preparedModel = createConvPreparedModel(kTestOperand);
545 auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
546 if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
547 testConflictOperands(preparedModel, conflictPreparedModel);
548 }
549
TEST_P(MemoryDomainAllocateTest,ConflictRankBetweenRoleAndDesc)550 TEST_P(MemoryDomainAllocateTest, ConflictRankBetweenRoleAndDesc) {
551 auto preparedModel = createConvPreparedModel(kTestOperand);
552 if (preparedModel == nullptr) return;
553
554 auto badDimensions = kTestOperand.dimensions;
555 badDimensions.pop_back();
556
557 validateAllocate({
558 .dimensions = badDimensions,
559 .preparedModels = {preparedModel},
560 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
561 });
562 validateAllocate({
563 .dimensions = badDimensions,
564 .preparedModels = {preparedModel},
565 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
566 });
567 }
568
TEST_P(MemoryDomainAllocateTest,ConflictDimensionsBetweenRoleAndDesc)569 TEST_P(MemoryDomainAllocateTest, ConflictDimensionsBetweenRoleAndDesc) {
570 auto preparedModel = createConvPreparedModel(kTestOperand);
571 if (preparedModel == nullptr) return;
572
573 auto badDimensions = kTestOperand.dimensions;
574 badDimensions[0] = 4;
575
576 validateAllocate({
577 .dimensions = badDimensions,
578 .preparedModels = {preparedModel},
579 .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
580 });
581 validateAllocate({
582 .dimensions = badDimensions,
583 .preparedModels = {preparedModel},
584 .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
585 });
586 }
587
TEST_P(MemoryDomainAllocateTest,ConflictRankWithScalarRole)588 TEST_P(MemoryDomainAllocateTest, ConflictRankWithScalarRole) {
589 auto preparedModel = createAddPreparedModel(kTestOperand);
590 if (preparedModel == nullptr) return;
591
592 // This should fail, because the target operand is a scalar but a non-empty dimension is
593 // specified.
594 validateAllocate({
595 .dimensions = {1},
596 .preparedModels = {preparedModel},
597 .inputRoles = {{.modelIndex = 0, .ioIndex = 2, .frequency = 1.0f}},
598 });
599 }
600
printMemoryDomainAllocateTest(const testing::TestParamInfo<MemoryDomainAllocateTestParam> & info)601 std::string printMemoryDomainAllocateTest(
602 const testing::TestParamInfo<MemoryDomainAllocateTestParam>& info) {
603 const auto& [namedDevice, operandType] = info.param;
604 const std::string type = toString(static_cast<OperandType>(operandType));
605 return gtestCompliantName(getName(namedDevice) + "_" + type);
606 }
607
608 INSTANTIATE_TEST_CASE_P(TestMemoryDomain, MemoryDomainAllocateTest,
609 testing::Combine(kNamedDeviceChoices, kTestOperandTypeChoices),
610 printMemoryDomainAllocateTest);
611
612 class MemoryDomainCopyTestBase : public MemoryDomainTestBase {
613 protected:
MemoryDomainCopyTestBase(sp<IDevice> device,TestOperandType type)614 MemoryDomainCopyTestBase(sp<IDevice> device, TestOperandType type)
615 : MemoryDomainTestBase(std::move(device), type) {}
616
617 // Allocates device memory for roles of a single prepared model.
618 // Returns {IBuffer, token} if success; returns {nullptr, 0} if not supported.
allocateBuffer(const sp<IPreparedModel> & preparedModel,const std::vector<uint32_t> & inputIndexes,const std::vector<uint32_t> & outputIndexes,const std::vector<uint32_t> & dimensions)619 std::pair<sp<IBuffer>, uint32_t> allocateBuffer(const sp<IPreparedModel>& preparedModel,
620 const std::vector<uint32_t>& inputIndexes,
621 const std::vector<uint32_t>& outputIndexes,
622 const std::vector<uint32_t>& dimensions) {
623 if (preparedModel == nullptr) {
624 return {nullptr, 0};
625 }
626
627 hidl_vec<BufferRole> inputRoles(inputIndexes.size()), outputRoles(outputIndexes.size());
628 auto trans = [](uint32_t ind) -> BufferRole {
629 return {.modelIndex = 0, .ioIndex = ind, .frequency = 1.0f};
630 };
631 std::transform(inputIndexes.begin(), inputIndexes.end(), inputRoles.begin(), trans);
632 std::transform(outputIndexes.begin(), outputIndexes.end(), outputRoles.begin(), trans);
633
634 sp<IBuffer> buffer;
635 uint32_t token = 0;
636 const auto ret = kDevice->allocate(
637 {.dimensions = dimensions}, {preparedModel}, std::move(inputRoles),
638 std::move(outputRoles),
639 [&buffer, &token](ErrorStatus err, const sp<IBuffer>& buf, uint32_t tok) {
640 if (err == ErrorStatus::NONE) {
641 EXPECT_NE(buf, nullptr);
642 EXPECT_GT(tok, 0);
643 buffer = buf;
644 token = tok;
645 } else {
646 EXPECT_EQ(err, ErrorStatus::GENERAL_FAILURE);
647 EXPECT_EQ(buf, nullptr);
648 EXPECT_EQ(tok, 0);
649 }
650 });
651 EXPECT_TRUE(ret.isOk());
652 return {std::move(buffer), token};
653 }
654
allocateBuffer(const sp<IPreparedModel> & preparedModel,const std::vector<uint32_t> & inputIndexes,const std::vector<uint32_t> & outputIndexes)655 std::pair<sp<IBuffer>, uint32_t> allocateBuffer(const sp<IPreparedModel>& preparedModel,
656 const std::vector<uint32_t>& inputIndexes,
657 const std::vector<uint32_t>& outputIndexes) {
658 return allocateBuffer(preparedModel, inputIndexes, outputIndexes, {});
659 }
660
allocateSharedMemory(uint32_t size)661 hidl_memory allocateSharedMemory(uint32_t size) {
662 hidl_memory memory = nn::allocateSharedMemory(size);
663 EXPECT_EQ(memory.size(), size);
664 return memory;
665 }
666
testCopyFrom(const sp<IBuffer> & buffer,const hidl_memory & memory,const std::vector<uint32_t> & dimensions,ErrorStatus expectedStatus)667 void testCopyFrom(const sp<IBuffer>& buffer, const hidl_memory& memory,
668 const std::vector<uint32_t>& dimensions, ErrorStatus expectedStatus) {
669 const auto ret = buffer->copyFrom(memory, dimensions);
670 ASSERT_TRUE(ret.isOk());
671 ASSERT_EQ(static_cast<ErrorStatus>(ret), expectedStatus);
672 }
673
testCopyTo(const sp<IBuffer> & buffer,const hidl_memory & memory,ErrorStatus expectedStatus)674 void testCopyTo(const sp<IBuffer>& buffer, const hidl_memory& memory,
675 ErrorStatus expectedStatus) {
676 const auto ret = buffer->copyTo(memory);
677 ASSERT_TRUE(ret.isOk());
678 ASSERT_EQ(static_cast<ErrorStatus>(ret), expectedStatus);
679 }
680
initializeDeviceMemory(const sp<IBuffer> & buffer)681 void initializeDeviceMemory(const sp<IBuffer>& buffer) {
682 hidl_memory memory = nn::allocateSharedMemory(kTestOperandDataSize);
683 ASSERT_EQ(memory.size(), kTestOperandDataSize);
684 testCopyFrom(buffer, memory, kTestOperand.dimensions, ErrorStatus::NONE);
685 }
686 };
687
688 using MemoryDomainCopyTestParam = std::tuple<NamedDevice, TestOperandType>;
689 class MemoryDomainCopyTest : public MemoryDomainCopyTestBase,
690 public testing::WithParamInterface<MemoryDomainCopyTestParam> {
691 protected:
MemoryDomainCopyTest()692 MemoryDomainCopyTest()
693 : MemoryDomainCopyTestBase(getData(std::get<NamedDevice>(GetParam())),
694 std::get<TestOperandType>(GetParam())) {}
695 };
696
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidMemorySize)697 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidMemorySize) {
698 auto preparedModel = createConvPreparedModel(kTestOperand);
699 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
700 if (buffer == nullptr) return;
701
702 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
703 hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
704 hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
705 testCopyFrom(buffer, badMemory1, {}, ErrorStatus::INVALID_ARGUMENT);
706 testCopyFrom(buffer, badMemory2, {}, ErrorStatus::INVALID_ARGUMENT);
707 }
708
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidMemorySize_DynamicShape)709 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidMemorySize_DynamicShape) {
710 TestOperand testOperand = kTestOperand;
711 testOperand.dimensions[0] = 0;
712 auto preparedModel = createConvPreparedModel(testOperand);
713 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
714 if (buffer == nullptr) return;
715
716 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
717 hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
718 hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
719 hidl_memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
720
721 auto badDimensions = kTestOperand.dimensions;
722 badDimensions[0] = 2;
723
724 testCopyFrom(buffer, badMemory1, kTestOperand.dimensions, ErrorStatus::INVALID_ARGUMENT);
725 testCopyFrom(buffer, badMemory2, kTestOperand.dimensions, ErrorStatus::INVALID_ARGUMENT);
726 testCopyFrom(buffer, goodMemory, kTestOperand.dimensions, ErrorStatus::NONE);
727 testCopyFrom(buffer, goodMemory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
728 }
729
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidDimensions)730 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidDimensions) {
731 auto preparedModel = createConvPreparedModel(kTestOperand);
732 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
733 if (buffer == nullptr) return;
734
735 hidl_memory memory = allocateSharedMemory(kTestOperandDataSize);
736
737 std::vector<uint32_t> badDimensions;
738 badDimensions = kTestOperand.dimensions;
739 badDimensions.pop_back();
740 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
741
742 badDimensions = kTestOperand.dimensions;
743 badDimensions[0] = 2;
744 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
745
746 badDimensions = kTestOperand.dimensions;
747 badDimensions[0] = 0;
748 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
749
750 testCopyFrom(buffer, memory, {}, ErrorStatus::NONE);
751 testCopyFrom(buffer, memory, kTestOperand.dimensions, ErrorStatus::NONE);
752 }
753
TEST_P(MemoryDomainCopyTest,CopyFrom_InvalidDimensions_DynamicShape)754 TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidDimensions_DynamicShape) {
755 TestOperand testOperand = kTestOperand;
756 testOperand.dimensions[0] = 0;
757 auto preparedModel = createConvPreparedModel(testOperand);
758 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
759 if (buffer == nullptr) return;
760
761 hidl_memory memory = allocateSharedMemory(kTestOperandDataSize);
762
763 std::vector<uint32_t> badDimensions;
764 badDimensions = kTestOperand.dimensions;
765 badDimensions.pop_back();
766 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
767
768 badDimensions = kTestOperand.dimensions;
769 badDimensions[0] = 2;
770 badDimensions[3] = 4;
771 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
772
773 badDimensions = kTestOperand.dimensions;
774 badDimensions[0] = 1;
775 badDimensions[3] = 0;
776 testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
777
778 testCopyFrom(buffer, memory, {}, ErrorStatus::INVALID_ARGUMENT);
779 testCopyFrom(buffer, memory, kTestOperand.dimensions, ErrorStatus::NONE);
780 }
781
TEST_P(MemoryDomainCopyTest,CopyTo_UninitializedMemory)782 TEST_P(MemoryDomainCopyTest, CopyTo_UninitializedMemory) {
783 auto preparedModel = createConvPreparedModel(kTestOperand);
784 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
785 if (buffer == nullptr) return;
786
787 hidl_memory memory = allocateSharedMemory(kTestOperandDataSize);
788 testCopyTo(buffer, memory, ErrorStatus::GENERAL_FAILURE);
789 }
790
TEST_P(MemoryDomainCopyTest,CopyTo_InvalidMemorySize)791 TEST_P(MemoryDomainCopyTest, CopyTo_InvalidMemorySize) {
792 auto preparedModel = createConvPreparedModel(kTestOperand);
793 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
794 if (buffer == nullptr) return;
795
796 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
797 hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
798 hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
799 hidl_memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
800
801 initializeDeviceMemory(buffer);
802 testCopyTo(buffer, badMemory1, ErrorStatus::INVALID_ARGUMENT);
803 testCopyTo(buffer, badMemory2, ErrorStatus::INVALID_ARGUMENT);
804 testCopyTo(buffer, goodMemory, ErrorStatus::NONE);
805 }
806
TEST_P(MemoryDomainCopyTest,CopyTo_InvalidMemorySize_DynamicShape)807 TEST_P(MemoryDomainCopyTest, CopyTo_InvalidMemorySize_DynamicShape) {
808 TestOperand testOperand = kTestOperand;
809 testOperand.dimensions[0] = 0;
810 auto preparedModel = createConvPreparedModel(testOperand);
811 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
812 if (buffer == nullptr) return;
813
814 uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
815 hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
816 hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
817 hidl_memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
818
819 initializeDeviceMemory(buffer);
820 testCopyTo(buffer, badMemory1, ErrorStatus::INVALID_ARGUMENT);
821 testCopyTo(buffer, badMemory2, ErrorStatus::INVALID_ARGUMENT);
822 testCopyTo(buffer, goodMemory, ErrorStatus::NONE);
823 }
824
printMemoryDomainCopyTest(const testing::TestParamInfo<MemoryDomainCopyTestParam> & info)825 std::string printMemoryDomainCopyTest(
826 const testing::TestParamInfo<MemoryDomainCopyTestParam>& info) {
827 const auto& [namedDevice, operandType] = info.param;
828 const std::string type = toString(static_cast<OperandType>(operandType));
829 return gtestCompliantName(getName(namedDevice) + "_" + type);
830 }
831
832 INSTANTIATE_TEST_CASE_P(TestMemoryDomain, MemoryDomainCopyTest,
833 testing::Combine(kNamedDeviceChoices, kTestOperandTypeChoices),
834 printMemoryDomainCopyTest);
835
836 using MemoryDomainExecutionTestParam = std::tuple<NamedDevice, TestOperandType, Executor>;
837 class MemoryDomainExecutionTest
838 : public MemoryDomainCopyTestBase,
839 public testing::WithParamInterface<MemoryDomainExecutionTestParam> {
840 protected:
MemoryDomainExecutionTest()841 MemoryDomainExecutionTest()
842 : MemoryDomainCopyTestBase(getData(std::get<NamedDevice>(GetParam())),
843 std::get<TestOperandType>(GetParam())) {}
844
createSharedMemoryPool(uint32_t size)845 Request::MemoryPool createSharedMemoryPool(uint32_t size) {
846 hidl_memory memory = allocateSharedMemory(size);
847 Request::MemoryPool pool;
848 pool.hidlMemory(memory);
849 return pool;
850 }
851
createDeviceMemoryPool(uint32_t token)852 Request::MemoryPool createDeviceMemoryPool(uint32_t token) {
853 Request::MemoryPool pool;
854 pool.token(token);
855 return pool;
856 }
857
testExecution(const sp<IPreparedModel> & preparedModel,const Request & request,ErrorStatus expectedStatus)858 void testExecution(const sp<IPreparedModel>& preparedModel, const Request& request,
859 ErrorStatus expectedStatus) {
860 switch (kExecutor) {
861 case Executor::ASYNC:
862 EXPECT_EQ(executeAsync(preparedModel, request), expectedStatus);
863 break;
864 case Executor::SYNC:
865 EXPECT_EQ(executeSync(preparedModel, request), expectedStatus);
866 break;
867 case Executor::FENCED:
868 EXPECT_EQ(executeFenced(preparedModel, request), expectedStatus);
869 break;
870 default:
871 ASSERT_TRUE(false);
872 }
873 }
874
executeAsync(const sp<IPreparedModel> & preparedModel,const Request & request)875 ErrorStatus executeAsync(const sp<IPreparedModel>& preparedModel, const Request& request) {
876 ErrorStatus executionStatus;
877
878 // launch execution
879 sp<ExecutionCallback> executionCallback = new ExecutionCallback();
880 const auto ret =
881 preparedModel->execute_1_3(request, MeasureTiming::NO, {}, {}, executionCallback);
882 EXPECT_TRUE(ret.isOk());
883 executionStatus = static_cast<ErrorStatus>(ret);
884
885 // retrieve execution status
886 executionCallback->wait();
887 if (executionStatus == ErrorStatus::NONE) {
888 executionStatus = executionCallback->getStatus();
889 } else {
890 EXPECT_EQ(executionStatus, executionCallback->getStatus());
891 }
892 const auto timing = executionCallback->getTiming();
893 EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
894 EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
895 if (executionStatus != ErrorStatus::NONE) {
896 EXPECT_EQ(executionCallback->getOutputShapes().size(), 0);
897 }
898 return executionStatus;
899 }
900
executeSync(const sp<IPreparedModel> & preparedModel,const Request & request)901 ErrorStatus executeSync(const sp<IPreparedModel>& preparedModel, const Request& request) {
902 ErrorStatus executionStatus;
903 const auto ret = preparedModel->executeSynchronously_1_3(
904 request, MeasureTiming::NO, {}, {},
905 [&executionStatus](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
906 const Timing& time) {
907 executionStatus = error;
908 EXPECT_EQ(UINT64_MAX, time.timeOnDevice);
909 EXPECT_EQ(UINT64_MAX, time.timeInDriver);
910 if (executionStatus != ErrorStatus::NONE) {
911 EXPECT_EQ(shapes.size(), 0);
912 }
913 });
914 EXPECT_TRUE(ret.isOk());
915 return executionStatus;
916 }
917
executeFenced(const sp<IPreparedModel> & preparedModel,const Request & request)918 ErrorStatus executeFenced(const sp<IPreparedModel>& preparedModel, const Request& request) {
919 ErrorStatus executionStatus;
920 hidl_handle syncFenceHandle;
921 sp<IFencedExecutionCallback> fencedCallback;
922 const auto callbackFunc = [&executionStatus, &syncFenceHandle, &fencedCallback](
923 ErrorStatus error, const hidl_handle& handle,
924 const sp<IFencedExecutionCallback>& callback) {
925 executionStatus = error;
926 syncFenceHandle = handle;
927 fencedCallback = callback;
928 };
929 Return<void> ret = preparedModel->executeFenced(request, {}, MeasureTiming::NO, {}, {}, {},
930 callbackFunc);
931 EXPECT_TRUE(ret.isOk());
932 if (executionStatus != ErrorStatus::NONE) {
933 EXPECT_EQ(syncFenceHandle.getNativeHandle(), nullptr);
934 EXPECT_EQ(fencedCallback, nullptr);
935 return executionStatus;
936 }
937 if (syncFenceHandle.getNativeHandle()) {
938 waitForSyncFence(syncFenceHandle.getNativeHandle()->data[0]);
939 }
940 EXPECT_NE(fencedCallback, nullptr);
941 ret = fencedCallback->getExecutionInfo(
942 [&executionStatus](ErrorStatus error, Timing t, Timing) {
943 executionStatus = error;
944 EXPECT_EQ(UINT64_MAX, t.timeOnDevice);
945 EXPECT_EQ(UINT64_MAX, t.timeInDriver);
946 });
947 EXPECT_TRUE(ret.isOk());
948 return executionStatus;
949 }
950
951 const Executor kExecutor = std::get<Executor>(GetParam());
952 };
953
TEST_P(MemoryDomainExecutionTest,InvalidToken)954 TEST_P(MemoryDomainExecutionTest, InvalidToken) {
955 auto preparedModel = createConvPreparedModel(kTestOperand);
956 if (preparedModel == nullptr) return;
957
958 Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
959 Request::MemoryPool badDeviceMemory1 = createDeviceMemoryPool(0); // Invalid token.
960 Request::MemoryPool badDeviceMemory2 = createDeviceMemoryPool(100); // Unknown token.
961 RequestArgument sharedMemoryArg = {
962 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
963 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
964
965 testExecution(preparedModel,
966 {.inputs = {deviceMemoryArg},
967 .outputs = {sharedMemoryArg},
968 .pools = {sharedMemory, badDeviceMemory1}},
969 ErrorStatus::INVALID_ARGUMENT);
970 testExecution(preparedModel,
971 {.inputs = {deviceMemoryArg},
972 .outputs = {sharedMemoryArg},
973 .pools = {sharedMemory, badDeviceMemory2}},
974 ErrorStatus::INVALID_ARGUMENT);
975 testExecution(preparedModel,
976 {.inputs = {sharedMemoryArg},
977 .outputs = {deviceMemoryArg},
978 .pools = {sharedMemory, badDeviceMemory1}},
979 ErrorStatus::INVALID_ARGUMENT);
980 testExecution(preparedModel,
981 {.inputs = {sharedMemoryArg},
982 .outputs = {deviceMemoryArg},
983 .pools = {sharedMemory, badDeviceMemory2}},
984 ErrorStatus::INVALID_ARGUMENT);
985 }
986
TEST_P(MemoryDomainExecutionTest,InvalidPreparedModel)987 TEST_P(MemoryDomainExecutionTest, InvalidPreparedModel) {
988 auto preparedModel = createConvPreparedModel(kTestOperand);
989 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
990 if (buffer == nullptr) return;
991 auto badPreparedModel = createConvPreparedModel(kTestOperand);
992 if (badPreparedModel == nullptr) return;
993
994 Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
995 Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
996 RequestArgument sharedMemoryArg = {
997 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
998 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
999
1000 // This should fail, because the buffer is not allocated for badPreparedModel.
1001 initializeDeviceMemory(buffer);
1002 testExecution(badPreparedModel,
1003 {.inputs = {deviceMemoryArg},
1004 .outputs = {sharedMemoryArg},
1005 .pools = {sharedMemory, deviceMemory}},
1006 ErrorStatus::INVALID_ARGUMENT);
1007 testExecution(badPreparedModel,
1008 {.inputs = {sharedMemoryArg},
1009 .outputs = {deviceMemoryArg},
1010 .pools = {sharedMemory, deviceMemory}},
1011 ErrorStatus::INVALID_ARGUMENT);
1012 }
1013
TEST_P(MemoryDomainExecutionTest,InvalidIOIndex)1014 TEST_P(MemoryDomainExecutionTest, InvalidIOIndex) {
1015 auto preparedModel = createConvPreparedModel(kTestOperand, 2);
1016 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {});
1017 if (buffer == nullptr) return;
1018
1019 Request::MemoryPool sharedMemory1 = createSharedMemoryPool(kTestOperandDataSize);
1020 Request::MemoryPool sharedMemory2 = createSharedMemoryPool(kTestOperandDataSize);
1021 Request::MemoryPool sharedMemory3 = createSharedMemoryPool(kTestOperandDataSize);
1022 Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
1023 RequestArgument sharedMemoryArg1 = {
1024 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1025 RequestArgument sharedMemoryArg2 = {
1026 .location = {.poolIndex = 1, .offset = 0, .length = kTestOperandDataSize}};
1027 RequestArgument sharedMemoryArg3 = {
1028 .location = {.poolIndex = 2, .offset = 0, .length = kTestOperandDataSize}};
1029 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 3}};
1030
1031 // This should fail, because the device memory is not allocated for input 1.
1032 initializeDeviceMemory(buffer);
1033 testExecution(preparedModel,
1034 {.inputs = {sharedMemoryArg1, deviceMemoryArg},
1035 .outputs = {sharedMemoryArg2, sharedMemoryArg3},
1036 .pools = {sharedMemory1, sharedMemory2, sharedMemory3, deviceMemory}},
1037 ErrorStatus::INVALID_ARGUMENT);
1038
1039 // This should fail, because the device memory is not allocated for output 1.
1040 testExecution(preparedModel,
1041 {.inputs = {sharedMemoryArg1, sharedMemoryArg2},
1042 .outputs = {sharedMemoryArg3, deviceMemoryArg},
1043 .pools = {sharedMemory1, sharedMemory2, sharedMemory3, deviceMemory}},
1044 ErrorStatus::INVALID_ARGUMENT);
1045 }
1046
TEST_P(MemoryDomainExecutionTest,InvalidIOType)1047 TEST_P(MemoryDomainExecutionTest, InvalidIOType) {
1048 auto preparedModel = createConvPreparedModel(kTestOperand);
1049 auto [inputBuffer, inputToken] = allocateBuffer(preparedModel, {0}, {});
1050 auto [outputBuffer, outputToken] = allocateBuffer(preparedModel, {}, {0});
1051 if (inputBuffer == nullptr || outputBuffer == nullptr) return;
1052
1053 Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1054 Request::MemoryPool deviceMemory = createDeviceMemoryPool(inputToken);
1055 RequestArgument sharedMemoryArg = {
1056 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1057 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1058
1059 // This should fail, because the device memory is allocated for input but used as output.
1060 testExecution(preparedModel,
1061 {.inputs = {sharedMemoryArg},
1062 .outputs = {deviceMemoryArg},
1063 .pools = {sharedMemory, deviceMemory}},
1064 ErrorStatus::INVALID_ARGUMENT);
1065
1066 // This should fail, because the device memory is allocated for output but used as input.
1067 deviceMemory.token(outputToken);
1068 initializeDeviceMemory(outputBuffer);
1069 testExecution(preparedModel,
1070 {.inputs = {deviceMemoryArg},
1071 .outputs = {sharedMemoryArg},
1072 .pools = {sharedMemory, deviceMemory}},
1073 ErrorStatus::INVALID_ARGUMENT);
1074 }
1075
TEST_P(MemoryDomainExecutionTest,UninitializedMemory)1076 TEST_P(MemoryDomainExecutionTest, UninitializedMemory) {
1077 auto preparedModel = createConvPreparedModel(kTestOperand);
1078 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
1079 if (buffer == nullptr) return;
1080
1081 Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1082 Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
1083 RequestArgument sharedMemoryArg = {
1084 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1085 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1086
1087 // This should fail, because the device memory is not initialized.
1088 testExecution(preparedModel,
1089 {.inputs = {deviceMemoryArg},
1090 .outputs = {sharedMemoryArg},
1091 .pools = {sharedMemory, deviceMemory}},
1092 ErrorStatus::GENERAL_FAILURE);
1093
1094 // This should initialize the device memory.
1095 testExecution(preparedModel,
1096 {.inputs = {sharedMemoryArg},
1097 .outputs = {deviceMemoryArg},
1098 .pools = {sharedMemory, deviceMemory}},
1099 ErrorStatus::NONE);
1100
1101 // Test again with initialized device memory.
1102 testExecution(preparedModel,
1103 {.inputs = {deviceMemoryArg},
1104 .outputs = {sharedMemoryArg},
1105 .pools = {sharedMemory, deviceMemory}},
1106 ErrorStatus::NONE);
1107 }
1108
TEST_P(MemoryDomainExecutionTest,SameRequestMultipleRoles)1109 TEST_P(MemoryDomainExecutionTest, SameRequestMultipleRoles) {
1110 auto preparedModel = createConvPreparedModel(kTestOperand, 2);
1111 auto [buffer, token] = allocateBuffer(preparedModel, {0, 1}, {0, 1});
1112 if (buffer == nullptr) return;
1113
1114 Request::MemoryPool sharedMemory1 = createSharedMemoryPool(kTestOperandDataSize);
1115 Request::MemoryPool sharedMemory2 = createSharedMemoryPool(kTestOperandDataSize);
1116 Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
1117 RequestArgument sharedMemoryArg1 = {
1118 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
1119 RequestArgument sharedMemoryArg2 = {
1120 .location = {.poolIndex = 1, .offset = 0, .length = kTestOperandDataSize}};
1121 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 2}};
1122
1123 // This should fail, because the same device memory cannot be used for both input and output.
1124 initializeDeviceMemory(buffer);
1125 testExecution(preparedModel,
1126 {.inputs = {deviceMemoryArg, sharedMemoryArg1},
1127 .outputs = {deviceMemoryArg, sharedMemoryArg2},
1128 .pools = {sharedMemory1, sharedMemory2, deviceMemory}},
1129 ErrorStatus::INVALID_ARGUMENT);
1130
1131 // This should fail, because the same device memory cannot be used for multiple outputs.
1132 testExecution(preparedModel,
1133 {.inputs = {sharedMemoryArg1, sharedMemoryArg2},
1134 .outputs = {deviceMemoryArg, deviceMemoryArg},
1135 .pools = {sharedMemory1, sharedMemory2, deviceMemory}},
1136 ErrorStatus::INVALID_ARGUMENT);
1137
1138 // The same device memory can be used for multiple inputs.
1139 initializeDeviceMemory(buffer);
1140 testExecution(preparedModel,
1141 {.inputs = {deviceMemoryArg, deviceMemoryArg},
1142 .outputs = {sharedMemoryArg1, sharedMemoryArg2},
1143 .pools = {sharedMemory1, sharedMemory2, deviceMemory}},
1144 ErrorStatus::NONE);
1145 }
1146
TEST_P(MemoryDomainExecutionTest,InvalidDimensions)1147 TEST_P(MemoryDomainExecutionTest, InvalidDimensions) {
1148 // FENCED execution does not support dynamic shape.
1149 if (kExecutor == Executor::FENCED) return;
1150
1151 TestOperand testOperand = kTestOperand;
1152 testOperand.dimensions[0] = 0;
1153 auto preparedModel = createConvPreparedModel(testOperand);
1154 auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0}, kTestOperand.dimensions);
1155 if (buffer == nullptr) return;
1156
1157 Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
1158 Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
1159 auto badDimensions = kTestOperand.dimensions;
1160 badDimensions[0] = 2;
1161 RequestArgument sharedMemoryArg = {
1162 .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize},
1163 .dimensions = badDimensions};
1164 RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
1165 RequestArgument deviceMemoryArgWithBadDimensions = {.location = {.poolIndex = 1},
1166 .dimensions = badDimensions};
1167
1168 initializeDeviceMemory(buffer);
1169 testExecution(preparedModel,
1170 {.inputs = {deviceMemoryArgWithBadDimensions},
1171 .outputs = {sharedMemoryArg},
1172 .pools = {sharedMemory, deviceMemory}},
1173 ErrorStatus::INVALID_ARGUMENT);
1174
1175 testExecution(preparedModel,
1176 {.inputs = {sharedMemoryArg},
1177 .outputs = {deviceMemoryArgWithBadDimensions},
1178 .pools = {sharedMemory, deviceMemory}},
1179 ErrorStatus::INVALID_ARGUMENT);
1180
1181 testExecution(preparedModel,
1182 {.inputs = {sharedMemoryArg},
1183 .outputs = {deviceMemoryArg},
1184 .pools = {sharedMemory, deviceMemory}},
1185 ErrorStatus::GENERAL_FAILURE);
1186 }
1187
1188 const auto kExecutorChoices = testing::Values(Executor::ASYNC, Executor::SYNC, Executor::FENCED);
1189
printMemoryDomainExecutionTest(const testing::TestParamInfo<MemoryDomainExecutionTestParam> & info)1190 std::string printMemoryDomainExecutionTest(
1191 const testing::TestParamInfo<MemoryDomainExecutionTestParam>& info) {
1192 const auto& [namedDevice, operandType, executor] = info.param;
1193 const std::string type = toString(static_cast<OperandType>(operandType));
1194 const std::string executorStr = toString(executor);
1195 return gtestCompliantName(getName(namedDevice) + "_" + type + "_" + executorStr);
1196 }
1197
1198 INSTANTIATE_TEST_CASE_P(TestMemoryDomain, MemoryDomainExecutionTest,
1199 testing::Combine(kNamedDeviceChoices, kTestOperandTypeChoices,
1200 kExecutorChoices),
1201 printMemoryDomainExecutionTest);
1202
1203 } // namespace android::hardware::neuralnetworks::V1_3::vts::functional
1204