1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <gtest/gtest.h>
18 
19 #include <vector>
20 
21 #include "FibonacciDriver.h"
22 #include "FibonacciExtension.h"
23 #include "HalInterfaces.h"
24 #include "Manager.h"
25 #include "NeuralNetworks.h"
26 #include "NeuralNetworksExtensions.h"
27 #include "NeuralNetworksWrapperExtensions.h"
28 #include "TestNeuralNetworksWrapper.h"
29 #include "TypeManager.h"
30 #include "Utils.h"
31 #include "ValidateHal.h"
32 
33 namespace android {
34 namespace nn {
35 namespace {
36 
37 using ::android::nn::test_wrapper::ExtensionModel;
38 using ::android::nn::test_wrapper::ExtensionOperandParams;
39 using ::android::nn::test_wrapper::ExtensionOperandType;
40 using ::android::nn::test_wrapper::Type;
41 
42 class FibonacciExtensionTest : public ::testing::Test {
43    protected:
SetUp()44     virtual void SetUp() {
45         if (DeviceManager::get()->getUseCpuOnly()) {
46             // This test requires the use a custom driver.
47             GTEST_SKIP();
48         }
49 
50         // Real world extension tests should run against actual hardware
51         // implementations, but there is no hardware supporting the test
52         // extension. Hence the sample software driver.
53         DeviceManager::get()->forTest_registerDevice(sample_driver::FibonacciDriver::kDriverName,
54                                                      new sample_driver::FibonacciDriver());
55         // Discover extensions provided by registered devices.
56         TypeManager::get()->forTest_reset();
57 
58         uint32_t numDevices = 0;
59         ASSERT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
60         for (uint32_t i = 0; i < numDevices; i++) {
61             ANeuralNetworksDevice* device = nullptr;
62             EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
63             mAllDevices.push_back(device);
64             bool supportsFibonacciExtension;
65             ASSERT_EQ(
66                     ANeuralNetworksDevice_getExtensionSupport(
67                             device, EXAMPLE_FIBONACCI_EXTENSION_NAME, &supportsFibonacciExtension),
68                     ANEURALNETWORKS_NO_ERROR);
69             if (supportsFibonacciExtension) {
70                 ASSERT_EQ(mFibonacciDevice, nullptr) << "Found multiple Fibonacci drivers";
71                 mFibonacciDevice = device;
72             } else if (DeviceManager::get()->forTest_isCpuDevice(device)) {
73                 ASSERT_EQ(mCpuDevice, nullptr) << "Found multiple CPU drivers";
74                 mCpuDevice = device;
75             }
76         }
77         ASSERT_NE(mFibonacciDevice, nullptr) << "Expecting Fibonacci driver to be available";
78         ASSERT_NE(mCpuDevice, nullptr) << "Expecting CPU driver to be available";
79         mDevices = {mFibonacciDevice, mCpuDevice};
80     }
81 
TearDown()82     virtual void TearDown() {
83         if (mExecution) {
84             ANeuralNetworksExecution_free(mExecution);
85         }
86         if (mCompilation) {
87             ANeuralNetworksCompilation_free(mCompilation);
88         }
89         DeviceManager::get()->forTest_reInitializeDeviceList();
90         TypeManager::get()->forTest_reset();
91     }
92 
checkSupportedOperations(const std::vector<bool> & expected,const std::vector<ANeuralNetworksDevice * > devices)93     void checkSupportedOperations(const std::vector<bool>& expected,
94                                   const std::vector<ANeuralNetworksDevice*> devices) {
95         const uint32_t kMaxNumberOperations = 256;
96         EXPECT_LE(expected.size(), kMaxNumberOperations);
97         bool supported[kMaxNumberOperations] = {false};
98         EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(
99                           mModel.getHandle(), devices.data(), devices.size(), supported),
100                   ANEURALNETWORKS_NO_ERROR);
101         for (size_t i = 0; i < expected.size(); ++i) {
102             SCOPED_TRACE(::testing::Message() << "i = " << i);
103             EXPECT_EQ(supported[i], expected[i]);
104         }
105     }
106 
checkSupportedOperations(const std::vector<bool> & expected)107     void checkSupportedOperations(const std::vector<bool>& expected) {
108         checkSupportedOperations(expected, mDevices);
109     }
110 
prepareForExecution()111     void prepareForExecution() {
112         ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
113                                                               mDevices.size(), &mCompilation),
114                   ANEURALNETWORKS_NO_ERROR);
115         ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
116         ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &mExecution),
117                   ANEURALNETWORKS_NO_ERROR);
118     }
119 
120     ANeuralNetworksDevice* mFibonacciDevice = nullptr;
121     ANeuralNetworksDevice* mCpuDevice = nullptr;
122     std::vector<ANeuralNetworksDevice*> mDevices;  // Fibonacci and CPU devices.
123     std::vector<ANeuralNetworksDevice*> mAllDevices;
124     ANeuralNetworksExecution* mExecution = nullptr;
125     ANeuralNetworksCompilation* mCompilation = nullptr;
126     ExtensionModel mModel;
127 };
128 
addNopOperation(ExtensionModel * model,ExtensionOperandType inputType,uint32_t input,uint32_t output)129 void addNopOperation(ExtensionModel* model, ExtensionOperandType inputType, uint32_t input,
130                      uint32_t output) {
131     // Our NOP operation is ADD, which has no extension type support.
132     ASSERT_EQ(inputType.operandType.type, ANEURALNETWORKS_TENSOR_FLOAT32);
133     ASSERT_EQ(inputType.dimensions.size(), 1u);
134 
135     uint32_t inputZeros = model->addOperand(&inputType);
136     uint32_t inputSize = inputType.dimensions[0];
137     uint32_t inputLength = sizeof(float) * inputSize;
138     const float kZeros[100] = {};
139     ASSERT_GE(sizeof(kZeros), inputLength);
140     model->setOperandValue(inputZeros, &kZeros, inputLength);
141 
142     ExtensionOperandType scalarType(Type::INT32, {});
143     uint32_t activation = model->addOperand(&scalarType);
144     int32_t kNoActivation = ANEURALNETWORKS_FUSED_NONE;
145     model->setOperandValue(activation, &kNoActivation, sizeof(kNoActivation));
146 
147     model->addOperation(ANEURALNETWORKS_ADD, {input, inputZeros, activation}, {output});
148 }
149 
createModel(ExtensionModel * model,ExtensionOperandType inputType,ExtensionOperandType outputType,bool addNopOperations)150 void createModel(ExtensionModel* model, ExtensionOperandType inputType,
151                  ExtensionOperandType outputType, bool addNopOperations) {
152     uint32_t fibonacciInput = model->addOperand(&inputType);
153     uint32_t fibonacciOutput = model->addOperand(&outputType);
154 
155     uint32_t modelInput = addNopOperations ? model->addOperand(&inputType) : fibonacciInput;
156     uint32_t modelOutput = addNopOperations ? model->addOperand(&outputType) : fibonacciOutput;
157 
158     if (addNopOperations) {
159         addNopOperation(model, inputType, modelInput, fibonacciInput);
160     }
161     model->addOperation(
162             model->getExtensionOperationType(EXAMPLE_FIBONACCI_EXTENSION_NAME, EXAMPLE_FIBONACCI),
163             {fibonacciInput}, {fibonacciOutput});
164     if (addNopOperations) {
165         addNopOperation(model, outputType, fibonacciOutput, modelOutput);
166     }
167 
168     model->identifyInputsAndOutputs({modelInput}, {modelOutput});
169     model->finish();
170     ASSERT_TRUE(model->isValid());
171 }
172 
TEST_F(FibonacciExtensionTest,ModelWithExtensionOperandTypes)173 TEST_F(FibonacciExtensionTest, ModelWithExtensionOperandTypes) {
174     constexpr uint32_t N = 10;
175     constexpr double scale = 0.5;
176     constexpr int64_t zeroPoint = 10;
177 
178     ExtensionOperandType inputType(static_cast<Type>(mModel.getExtensionOperandType(
179                                            EXAMPLE_FIBONACCI_EXTENSION_NAME, EXAMPLE_INT64)),
180                                    {});
181     ExtensionOperandType outputType(
182             static_cast<Type>(mModel.getExtensionOperandType(EXAMPLE_FIBONACCI_EXTENSION_NAME,
183                                                              EXAMPLE_TENSOR_QUANT64_ASYMM)),
184             {N},
185             ExtensionOperandParams(ExampleQuant64AsymmParams{
186                     .scale = scale,
187                     .zeroPoint = zeroPoint,
188             }));
189     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
190     checkSupportedOperations({true});
191     prepareForExecution();
192 
193     int64_t input = N;
194     EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
195               ANEURALNETWORKS_NO_ERROR);
196 
197     int64_t output[N] = {};
198     EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
199               ANEURALNETWORKS_NO_ERROR);
200 
201     ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_NO_ERROR);
202 
203     EXPECT_EQ(output[0], 1 / scale + zeroPoint);
204     EXPECT_EQ(output[1], 1 / scale + zeroPoint);
205     EXPECT_EQ(output[2], 2 / scale + zeroPoint);
206     EXPECT_EQ(output[3], 3 / scale + zeroPoint);
207     EXPECT_EQ(output[4], 5 / scale + zeroPoint);
208     EXPECT_EQ(output[5], 8 / scale + zeroPoint);
209     EXPECT_EQ(output[6], 13 / scale + zeroPoint);
210     EXPECT_EQ(output[7], 21 / scale + zeroPoint);
211     EXPECT_EQ(output[8], 34 / scale + zeroPoint);
212     EXPECT_EQ(output[9], 55 / scale + zeroPoint);
213 }
214 
TEST_F(FibonacciExtensionTest,ModelWithTemporaries)215 TEST_F(FibonacciExtensionTest, ModelWithTemporaries) {
216     constexpr uint32_t N = 10;
217 
218     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
219     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {N});
220     createModel(&mModel, inputType, outputType, /*addNopOperations=*/true);
221     checkSupportedOperations({true, true, true});
222     prepareForExecution();
223 
224     float input[] = {N};
225     EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
226               ANEURALNETWORKS_NO_ERROR);
227 
228     float output[N] = {};
229     EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
230               ANEURALNETWORKS_NO_ERROR);
231 
232     ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_NO_ERROR);
233 
234     EXPECT_EQ(output[0], 1);
235     EXPECT_EQ(output[1], 1);
236     EXPECT_EQ(output[2], 2);
237     EXPECT_EQ(output[3], 3);
238     EXPECT_EQ(output[4], 5);
239     EXPECT_EQ(output[5], 8);
240     EXPECT_EQ(output[6], 13);
241     EXPECT_EQ(output[7], 21);
242     EXPECT_EQ(output[8], 34);
243     EXPECT_EQ(output[9], 55);
244 }
245 
TEST_F(FibonacciExtensionTest,InvalidInputType)246 TEST_F(FibonacciExtensionTest, InvalidInputType) {
247     ExtensionOperandType inputType(Type::TENSOR_INT32, {1});  // Unsupported type.
248     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
249     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
250     checkSupportedOperations({false});  // The driver reports that it doesn't support the operation.
251     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
252                                                           mDevices.size(), &mCompilation),
253               ANEURALNETWORKS_NO_ERROR);
254     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
255 }
256 
TEST_F(FibonacciExtensionTest,InvalidOutputType)257 TEST_F(FibonacciExtensionTest, InvalidOutputType) {
258     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
259     ExtensionOperandType outputType(Type::TENSOR_INT32, {1});  // Unsupported type.
260     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
261     checkSupportedOperations({false});  // The driver reports that it doesn't support the operation.
262     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
263                                                           mDevices.size(), &mCompilation),
264               ANEURALNETWORKS_NO_ERROR);
265     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
266 }
267 
TEST_F(FibonacciExtensionTest,InvalidInputValue)268 TEST_F(FibonacciExtensionTest, InvalidInputValue) {
269     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
270     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
271     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
272     checkSupportedOperations({true});
273     prepareForExecution();
274 
275     float input[] = {-1};  // Invalid input value.
276     EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
277               ANEURALNETWORKS_NO_ERROR);
278 
279     float output[1] = {};
280     EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
281               ANEURALNETWORKS_NO_ERROR);
282 
283     ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_OP_FAILED);
284 }
285 
TEST_F(FibonacciExtensionTest,InvalidNumInputs)286 TEST_F(FibonacciExtensionTest, InvalidNumInputs) {
287     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
288     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
289     uint32_t input1 = mModel.addOperand(&inputType);
290     uint32_t input2 = mModel.addOperand(&inputType);  // Extra input.
291     uint32_t output = mModel.addOperand(&outputType);
292     mModel.addOperation(
293             mModel.getExtensionOperationType(EXAMPLE_FIBONACCI_EXTENSION_NAME, EXAMPLE_FIBONACCI),
294             {input1, input2}, {output});
295     mModel.identifyInputsAndOutputs({input1, input2}, {output});
296     mModel.finish();
297     ASSERT_TRUE(mModel.isValid());
298     checkSupportedOperations({false});
299     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
300                                                           mDevices.size(), &mCompilation),
301               ANEURALNETWORKS_NO_ERROR);
302     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
303 }
304 
TEST_F(FibonacciExtensionTest,InvalidNumOutputs)305 TEST_F(FibonacciExtensionTest, InvalidNumOutputs) {
306     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
307     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
308     uint32_t input = mModel.addOperand(&inputType);
309     uint32_t output1 = mModel.addOperand(&outputType);
310     uint32_t output2 = mModel.addOperand(&outputType);  // Extra output.
311     mModel.addOperation(
312             mModel.getExtensionOperationType(EXAMPLE_FIBONACCI_EXTENSION_NAME, EXAMPLE_FIBONACCI),
313             {input}, {output1, output2});
314     mModel.identifyInputsAndOutputs({input}, {output1, output2});
315     mModel.finish();
316     ASSERT_TRUE(mModel.isValid());
317     checkSupportedOperations({false});
318     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
319                                                           mDevices.size(), &mCompilation),
320               ANEURALNETWORKS_NO_ERROR);
321     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
322 }
323 
TEST_F(FibonacciExtensionTest,InvalidOperation)324 TEST_F(FibonacciExtensionTest, InvalidOperation) {
325     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
326     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
327     uint32_t input = mModel.addOperand(&inputType);
328     uint32_t output = mModel.addOperand(&outputType);
329     mModel.addOperation(mModel.getExtensionOperationType(
330                                 EXAMPLE_FIBONACCI_EXTENSION_NAME,
331                                 EXAMPLE_FIBONACCI + 1),  // This operation should not exist.
332                         {input}, {output});
333     mModel.identifyInputsAndOutputs({input}, {output});
334     mModel.finish();
335     ASSERT_TRUE(mModel.isValid());
336     checkSupportedOperations({false});
337     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
338                                                           mDevices.size(), &mCompilation),
339               ANEURALNETWORKS_NO_ERROR);
340     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
341 }
342 
TEST_F(FibonacciExtensionTest,GetSupportedOperations)343 TEST_F(FibonacciExtensionTest, GetSupportedOperations) {
344     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
345     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
346     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
347 
348     for (ANeuralNetworksDevice* device : mAllDevices) {
349         const char* name = nullptr;
350         ASSERT_EQ(ANeuralNetworksDevice_getName(device, &name), ANEURALNETWORKS_NO_ERROR);
351         SCOPED_TRACE(::testing::Message() << "device = " << name);
352         // Only Fibonacci device should support Fibonacci operation.
353         checkSupportedOperations({device == mFibonacciDevice}, {device});
354     }
355 }
356 
357 }  // namespace
358 }  // namespace nn
359 }  // namespace android
360