1 /**
2  * Copyright 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "NN_RAND_MODEL"
18 
19 #include <android-base/logging.h>
20 #include <jni.h>
21 
22 #include <algorithm>
23 #include <fstream>
24 #include <memory>
25 #include <optional>
26 #include <random>
27 #include <set>
28 #include <sstream>
29 #include <string>
30 #include <vector>
31 
32 #include "GeneratedTestUtils.h"
33 #include "fuzzing/OperationManager.h"
34 #include "fuzzing/RandomGraphGenerator.h"
35 #include "fuzzing/RandomGraphGeneratorUtils.h"
36 
JNI_OnLoad(JavaVM * vm,void * reserved)37 extern "C" JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void* reserved) {
38   android::base::InitLogging(nullptr, android::base::LogdLogger());
39   android::base::SetMinimumLogSeverity(android::base::INFO);
40   return JNI_VERSION_1_6;
41 }
42 
43 enum RandomModelExecutionResult {
44   kSuccess = 0,
45   kFailedCompilation,
46   kFailedExecution,
47   kFailedOtherNnApiCall,
48   // The following conditions are for internal retry
49   kInvalidModelGenerated,
50   kUnsupportedModelGenerated
51 };
52 
53 class FuzzerLogRAII {
54  public:
FuzzerLogRAII(const std::string & nnapiLogPath)55   FuzzerLogRAII(const std::string& nnapiLogPath) {
56     using android::nn::fuzzing_test::alignedString;
57     using android::nn::fuzzing_test::Logger;
58     using android::nn::fuzzing_test::LoggerStream;
59 
60     NN_FUZZER_LOG_WRITE_FATAL_TO_SYSLOG(LOG_TAG);
61 
62     mFuzzerLogOpen = false;
63     if (!nnapiLogPath.empty()) {
64       // Checking if we can write to target file
65       std::ofstream os;
66       os.open(nnapiLogPath);
67 
68       if (os.fail()) {
69         LOG(ERROR) << "Opening file " << nnapiLogPath << " failed";
70       } else {
71         NN_FUZZER_LOG_INIT(nnapiLogPath);
72         LOG(INFO) << "Logging NNAPI to file " << nnapiLogPath;
73         mFuzzerLogOpen = true;
74       }
75     }
76   }
~FuzzerLogRAII()77   ~FuzzerLogRAII() {
78     if (mFuzzerLogOpen) {
79       using android::nn::fuzzing_test::alignedString;
80       using android::nn::fuzzing_test::Logger;
81       using android::nn::fuzzing_test::LoggerStream;
82 
83       NN_FUZZER_LOG_CLOSE;
84     }
85   }
86 
87  private:
88   bool mFuzzerLogOpen;
89 };
90 
getOperationsInModel(const test_helper::TestModel & testModel)91 std::vector<test_helper::TestOperationType> getOperationsInModel(
92     const test_helper::TestModel& testModel) {
93   std::vector<test_helper::TestOperationType> result;
94   testModel.forEachSubgraph(
95       [&result](const test_helper::TestSubgraph& subgraph) {
96         for (const auto& operation : subgraph.operations) {
97           result.push_back(operation.type);
98         }
99       });
100 
101   return result;
102 }
103 
findDeviceByName(const char * deviceName)104 const ANeuralNetworksDevice* findDeviceByName(const char* deviceName) {
105   if (!deviceName) return nullptr;
106 
107   std::string deviceNameStr(deviceName);
108   uint32_t numDevices = 0;
109   ANeuralNetworks_getDeviceCount(&numDevices);
110 
111   for (uint32_t i = 0; i < numDevices; i++) {
112     ANeuralNetworksDevice* device = nullptr;
113     const char* buffer = nullptr;
114     int getDeviceResult = ANeuralNetworks_getDevice(i, &device);
115     if (getDeviceResult != ANEURALNETWORKS_NO_ERROR) {
116       LOG(ERROR) << "Unable to get NNAPI device " << i << ": "
117                  << getDeviceResult;
118       return nullptr;
119     }
120 
121     int getDeviceNameResult = ANeuralNetworksDevice_getName(device, &buffer);
122     if (getDeviceNameResult != ANEURALNETWORKS_NO_ERROR) {
123       LOG(ERROR) << "Unable to get name of NNAPI device " << i << ": "
124                  << getDeviceNameResult;
125       return nullptr;
126     }
127 
128     if (deviceNameStr == buffer) {
129       return device;
130     }
131   }
132 
133   LOG(ERROR) << "No device with name " << deviceNameStr;
134   return nullptr;
135 }
136 
getNnApiReferenceDevice()137 const ANeuralNetworksDevice* getNnApiReferenceDevice() {
138   return findDeviceByName("nnapi-reference");
139 }
140 
141 class RandomGraphGenerator {
142  public:
RandomGraphGenerator(const ANeuralNetworksDevice * device,const std::string & deviceName,const std::string & testName,uint32_t numOperations,uint32_t dimensionRange,std::string nnapiLogPath,std::string failedModelDumpPath)143   RandomGraphGenerator(const ANeuralNetworksDevice* device,
144                        const std::string& deviceName,
145                        const std::string& testName, uint32_t numOperations,
146                        uint32_t dimensionRange, std::string nnapiLogPath,
147                        std::string failedModelDumpPath)
148       : mTestName(testName),
149         mDevice(device),
150         mDeviceName(deviceName),
151         mNnApiReference(getNnApiReferenceDevice()),
152         mSupportedOpsFilter(),
153         mNumOperations(numOperations),
154         mDimensionRange(dimensionRange),
155         nnapiFuzzerLogRAII(nnapiLogPath),
156         mFailedModelDumpPath(failedModelDumpPath) {}
157 
init()158   RandomModelExecutionResult init() {
159     // Limiting the ops in the generator to a subset we know the target device
160     // supports to avoid failing the test because we are unable to find a
161     // suitable model to compile.
162     RandomModelExecutionResult filterInitResult;
163     filterInitResult =
164         HalVersionsSupportedByDevice(&mSupportedOpsFilter.versions);
165     if (filterInitResult != kSuccess) return filterInitResult;
166 
167     filterInitResult =
168         OperandTypesSupportedByDevice(&mSupportedOpsFilter.dataTypes);
169     if (filterInitResult != kSuccess) return filterInitResult;
170 
171     return OperationsSupportedByDevice(mSupportedOpsFilter,
172                                        &mSupportedOpsFilter.opcodes);
173   }
174 
runRandomModel(bool compilationOnly)175   RandomModelExecutionResult runRandomModel(bool compilationOnly) {
176     using android::nn::generated_tests::createModel;
177     using android::nn::generated_tests::createRequest;
178     using android::nn::generated_tests::GeneratedModel;
179     using android::nn::test_wrapper::Compilation;
180     using android::nn::test_wrapper::Execution;
181     using android::nn::wrapper::Result;
182 
183     std::optional<test_helper::TestModel> testModel =
184         createRandomModel(mSupportedOpsFilter);
185     if (!testModel) {
186       LOG(ERROR) << mTestName << ": No model generated";
187       return kInvalidModelGenerated;
188     }
189 
190     GeneratedModel model;
191     createModel(*testModel, &model);
192     if (!model.isValid()) {
193       LOG(ERROR) << mTestName << ": Randomly generated model is not valid";
194       return kInvalidModelGenerated;
195     }
196     auto modelFinishResult = model.finish();
197     if (modelFinishResult != Result::NO_ERROR) {
198       LOG(ERROR) << mTestName << ": Failed to finish model, result is "
199                  << static_cast<int>(modelFinishResult);
200       return kInvalidModelGenerated;
201     }
202 
203     bool fullySupportedModel = false;
204     if (mDevice) {
205       std::unique_ptr<bool[]> opsSupportedFlags =
206           std::make_unique<bool[]>(mNumOperations);
207       std::fill(opsSupportedFlags.get(),
208                 opsSupportedFlags.get() + mNumOperations, false);
209       // Check if the device fully supports the graph.
210       int supportedOpResult =
211           ANeuralNetworksModel_getSupportedOperationsForDevices(
212               model.getHandle(), &mDevice, 1, opsSupportedFlags.get());
213       if (supportedOpResult != ANEURALNETWORKS_NO_ERROR) {
214         return kFailedOtherNnApiCall;
215       }
216 
217       // accepting the model even if partially supported since we found that it
218       // is extremely difficult to have fully supported models.
219       // We could consider a minimum number (or percentage of total number) of
220       // operations to be supported to consider the model  acceptable. For the
221       // moment we just accept any model that has any supported op.
222       bool supported = std::any_of(opsSupportedFlags.get(),
223                                    opsSupportedFlags.get() + mNumOperations,
224                                    [](bool v) { return v; });
225       if (!supported) {
226         return kUnsupportedModelGenerated;
227       }
228 
229       fullySupportedModel = std::all_of(
230           opsSupportedFlags.get(), opsSupportedFlags.get() + mNumOperations,
231           [](bool v) { return v; });
232     }
233 
234     std::vector<const ANeuralNetworksDevice*> devices;
235     if (mDevice) {
236       devices.push_back(mDevice);
237       if (!fullySupportedModel) {
238         // If model is not fully supported we allow NNAPI to use reference
239         // implementation. This is to avoid having this test constantly
240         // nullified by the inability of finding a fully supported model.
241         LOG(VERBOSE) << "Allowing model to be partially executed on NNAPI reference device";
242         devices.push_back(mNnApiReference);
243       }
244     }
245 
246     auto [compilationResult, compilation] = CreateCompilation(model, devices);
247     if (compilationResult != Result::NO_ERROR) {
248       LOG(WARNING) << mTestName << ": Compilation preparation failed with result "
249                    << static_cast<int>(compilationResult);
250 
251       dumpModel(*testModel);
252       return kFailedCompilation;
253     }
254     compilationResult = compilation.finish();
255     if (compilationResult != Result::NO_ERROR) {
256       LOG(WARNING) << mTestName << ": Compilation failed with result "
257                    << static_cast<int>(compilationResult);
258 
259       dumpModel(*testModel);
260       return kFailedCompilation;
261     }
262 
263     if (!compilationOnly) {
264       Execution execution(&compilation);
265       std::vector<test_helper::TestBuffer> outputs;
266       createRequest(*testModel, &execution, &outputs);
267 
268       // Compute result.
269       Result executeReturn = execution.compute();
270       if (executeReturn != Result::NO_ERROR) {
271         LOG(WARNING) << mTestName << ": Execution failed with result "
272                      << static_cast<int>(executeReturn);
273 
274         dumpModel(*testModel);
275         return kFailedExecution;
276       }
277     }
278 
279     return kSuccess;
280   }
281 
282   const std::string mTestName;
283 
284  private:
285   android::nn::fuzzing_test::RandomGraph mRandomGraph;
286   std::random_device mSeedGenerator;
287   const ANeuralNetworksDevice* mDevice;
288   // empty string if mDevice is null
289   const std::string mDeviceName;
290   const ANeuralNetworksDevice* mNnApiReference;
291   android::nn::fuzzing_test::OperationFilter mSupportedOpsFilter;
292   const uint32_t mNumOperations;
293   const uint32_t mDimensionRange;
294   FuzzerLogRAII nnapiFuzzerLogRAII;
295   const std::string mFailedModelDumpPath;
296 
createRandomModel(const android::nn::fuzzing_test::OperationFilter & opFilter)297   std::optional<test_helper::TestModel> createRandomModel(
298       const android::nn::fuzzing_test::OperationFilter& opFilter) {
299     android::nn::fuzzing_test::OperationManager::get()->applyFilter(opFilter);
300 
301     auto seed = mSeedGenerator();
302     if (!mRandomGraph.generate(seed, mNumOperations, mDimensionRange)) {
303       return std::nullopt;
304     }
305 
306     return {mRandomGraph.createTestModel()};
307   }
308 
HalVersionsSupportedByDevice(std::vector<test_helper::TestHalVersion> * result)309   RandomModelExecutionResult HalVersionsSupportedByDevice(
310       std::vector<test_helper::TestHalVersion>* result) {
311     if (!mDevice) {
312       return kSuccess;
313     }
314 
315     int64_t featureLevel;
316     auto getDeviceFeatureLevelResult =
317         ANeuralNetworksDevice_getFeatureLevel(mDevice, &featureLevel);
318     if (getDeviceFeatureLevelResult != ANEURALNETWORKS_NO_ERROR) {
319       LOG(ERROR) << mTestName << ": Unable to query device feature level";
320       return kFailedOtherNnApiCall;
321     }
322 
323     if (featureLevel == 27) *result = {test_helper::TestHalVersion::V1_0};
324     if (featureLevel == 28) *result = {test_helper::TestHalVersion::V1_1};
325     if (featureLevel == 29) *result = {test_helper::TestHalVersion::V1_2};
326 
327     return kSuccess;
328   }
329 
OperandTypesSupportedByDevice(std::vector<test_helper::TestOperandType> * result)330   RandomModelExecutionResult OperandTypesSupportedByDevice(
331       std::vector<test_helper::TestOperandType>* result) {
332     if (!mDevice) {
333       return kSuccess;
334     }
335 
336     int32_t deviceType;
337     auto getDeviceTypeResult =
338         ANeuralNetworksDevice_getType(mDevice, &deviceType);
339     if (getDeviceTypeResult != ANEURALNETWORKS_NO_ERROR) {
340       LOG(ERROR) << mTestName << ": Unable to query device type";
341       return kFailedOtherNnApiCall;
342     }
343     using test_helper::TestOperandType;
344     switch (deviceType) {
345       case ANEURALNETWORKS_DEVICE_GPU:
346         // No quantized types
347         *result = {
348             TestOperandType::FLOAT32,        TestOperandType::INT32,
349             TestOperandType::UINT32,         TestOperandType::TENSOR_FLOAT32,
350             TestOperandType::TENSOR_INT32,   TestOperandType::BOOL,
351             TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_BOOL8,
352             TestOperandType::FLOAT16};
353         break;
354       case ANEURALNETWORKS_DEVICE_CPU:
355       case ANEURALNETWORKS_DEVICE_ACCELERATOR:
356         result->clear();  // no filter
357         break;
358       case ANEURALNETWORKS_DEVICE_UNKNOWN:
359       case ANEURALNETWORKS_DEVICE_OTHER:
360         if (mDeviceName.find("dsp") != std::string::npos) {
361           *result = {TestOperandType::INT32,
362                      TestOperandType::UINT32,
363                      TestOperandType::TENSOR_INT32,
364                      TestOperandType::BOOL,
365                      TestOperandType::TENSOR_BOOL8,
366                      TestOperandType::TENSOR_QUANT8_ASYMM,
367                      TestOperandType::TENSOR_QUANT16_SYMM,
368                      TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL,
369                      TestOperandType::TENSOR_QUANT16_ASYMM,
370                      TestOperandType::TENSOR_QUANT8_SYMM,
371                      TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED};
372           break;
373         }
374         FALLTHROUGH_INTENDED;
375       default:
376         result->clear();  // no filter
377     }
378     return kSuccess;
379   }
380 
381   /// Finds some operations supported by the device
OperationsSupportedByDevice(const android::nn::fuzzing_test::OperationFilter & basicFilter,std::vector<test_helper::TestOperationType> * result)382   RandomModelExecutionResult OperationsSupportedByDevice(
383       const android::nn::fuzzing_test::OperationFilter& basicFilter,
384       std::vector<test_helper::TestOperationType>* result) {
385     if (!mDevice) {
386       return kSuccess;
387     }
388 
389     constexpr int kNumOfAttempts = 50;
390     std::set<test_helper::TestOperationType> supportedOps;
391     for (int i = 0; i < kNumOfAttempts; i++) {
392       std::optional<test_helper::TestModel> testModel =
393           createRandomModel(basicFilter);
394       if (!testModel) {
395         LOG(ERROR)
396             << mTestName
397             << ": Unable to generate a model trying to understand the ops "
398                "supported by target device";
399         continue;
400       }
401 
402       android::nn::generated_tests::GeneratedModel model;
403       createModel(*testModel, &model);
404       if (!model.isValid()) {
405         LOG(WARNING) << mTestName << ": Randomly generated model is not valid";
406         continue;
407       }
408       auto modelFinishResult = model.finish();
409       if (modelFinishResult != android::nn::wrapper::Result::NO_ERROR) {
410         LOG(WARNING) << "Model::finish call failed, result is "
411                      << static_cast<int>(modelFinishResult);
412         continue;
413       }
414 
415       std::unique_ptr<bool[]> opsSupportedFlags =
416           std::make_unique<bool[]>(mNumOperations);
417       std::fill(opsSupportedFlags.get(),
418                 opsSupportedFlags.get() + mNumOperations, false);
419 
420       // Check if the device fully supports the graph.
421       int supportedOpResult =
422           ANeuralNetworksModel_getSupportedOperationsForDevices(
423               model.getHandle(), &mDevice, 1, opsSupportedFlags.get());
424       if (supportedOpResult != ANEURALNETWORKS_NO_ERROR) {
425         return kFailedOtherNnApiCall;
426       }
427 
428       std::vector<test_helper::TestOperationType> opsInModel =
429           getOperationsInModel(*testModel);
430       for (int opIndex = 0; opIndex < mNumOperations; opIndex++) {
431         test_helper::TestOperationType currOp = opsInModel[opIndex];
432         if (opsSupportedFlags[opIndex]) {
433           supportedOps.insert(currOp);
434         }
435       }
436     }
437     std::copy(supportedOps.begin(), supportedOps.end(),
438               std::back_inserter(*result));
439 
440     if (result->empty()) {
441       LOG(WARNING)
442           << mTestName
443           << ": Could not find any operation supported by target device."
444           << " Returning no filter.";
445     } else {
446       LOG(INFO) << mTestName << ": Filtering to " << result->size()
447                 << " supported operations";
448     }
449 
450     return kSuccess;
451   }
452 
dumpModel(const test_helper::TestModel & testModel)453   void dumpModel(const test_helper::TestModel& testModel) {
454     if (mFailedModelDumpPath.empty()) return;
455 
456     LOG(INFO) << mTestName << ": Dumping model failing tests to "
457               << mFailedModelDumpPath;
458 
459     std::ofstream os(mFailedModelDumpPath);
460     ASSERT_TRUE(os.is_open());
461     os << "# Generated from " << mTestName << ". Do not edit.\n\n";
462     test_helper::SpecDumper dumper(testModel, os);
463     dumper.dumpTestModel();
464   }
465 
466   std::pair<android::nn::wrapper::Result,
467             android::nn::test_wrapper::Compilation>
CreateCompilation(const android::nn::generated_tests::GeneratedModel & model,const std::vector<const ANeuralNetworksDevice * > & devices)468   CreateCompilation(const android::nn::generated_tests::GeneratedModel& model,
469                     const std::vector<const ANeuralNetworksDevice*>& devices) {
470     using android::nn::test_wrapper::Compilation;
471     if (!devices.empty())
472       return Compilation::createForDevices(&model, devices);
473     else
474       return {android::nn::wrapper::Result::NO_ERROR, Compilation(&model)};
475   }
476 };
477 
478 extern "C" JNIEXPORT jint JNICALL
Java_com_android_nn_crashtest_core_RandomGraphTest_runRandomModel(JNIEnv * env,jclass,jlong _generatorHandle,jboolean _compilationOnly,jlong _maxModelSearchTimeSeconds)479 Java_com_android_nn_crashtest_core_RandomGraphTest_runRandomModel(
480     JNIEnv* env, jclass /* static method */, jlong _generatorHandle,
481     jboolean _compilationOnly, jlong _maxModelSearchTimeSeconds) {
482   RandomGraphGenerator* graphGenerator =
483       reinterpret_cast<RandomGraphGenerator*>(_generatorHandle);
484 
485   std::time_t startTime = std::time(nullptr);
486 
487   int result = kSuccess;
488   int modelSearchAttempt = 0;
489   while (std::difftime(std::time(nullptr), startTime) <
490          _maxModelSearchTimeSeconds) {
491     modelSearchAttempt++;
492 
493     result = graphGenerator->runRandomModel(_compilationOnly);
494 
495     // if by chance we generated an invalid model or a model that couldn't run
496     // on the target accelerator we will try again.
497     if (result != kInvalidModelGenerated &&
498         result != kUnsupportedModelGenerated) {
499       break;
500     }
501   }
502 
503   if (result == kInvalidModelGenerated ||
504       result == kUnsupportedModelGenerated) {
505     LOG(WARNING) << graphGenerator->mTestName
506                  << ": Max time to search for a model of "
507                  << static_cast<long>(_maxModelSearchTimeSeconds)
508                  << "seconds reached. Aborting test at attempt "
509                  << modelSearchAttempt;
510   }
511 
512   return result;
513 }
514 
515 extern "C" JNIEXPORT jlong JNICALL
com_android_nn_crashtest_core_RandomGraphTest_RandomGraphTest_createRandomGraphGenerator(JNIEnv * env,jclass,jstring _nnApiDeviceName,jint _numOperations,jint _dimensionRange,jstring _testName,jstring _nnapiLogPath,jstring _failedModelDumpPath)516 com_android_nn_crashtest_core_RandomGraphTest_RandomGraphTest_createRandomGraphGenerator(
517     JNIEnv* env, jclass /* static method */, jstring _nnApiDeviceName,
518     jint _numOperations, jint _dimensionRange, jstring _testName,
519     jstring _nnapiLogPath, jstring _failedModelDumpPath) {
520   const char* nnApiDeviceName =
521       _nnApiDeviceName ? env->GetStringUTFChars(_nnApiDeviceName, nullptr)
522                        : nullptr;
523 
524   std::string nnApiDeviceNameStr{nnApiDeviceName ? nnApiDeviceName : ""};
525   const ANeuralNetworksDevice* device = nullptr;
526   if (nnApiDeviceName) {
527     device = findDeviceByName(nnApiDeviceName);
528     if (!device) {
529       LOG(ERROR) << ": Unable to find accelerator " << nnApiDeviceName;
530       env->ReleaseStringUTFChars(_nnApiDeviceName, nnApiDeviceName);
531       return reinterpret_cast<jlong>(nullptr);
532     }
533     env->ReleaseStringUTFChars(_nnApiDeviceName, nnApiDeviceName);
534   }
535 
536   std::string testName{"no-test-name"};
537   if (_testName) {
538     const char* testNameBuf = env->GetStringUTFChars(_testName, nullptr);
539     testName = testNameBuf;
540     env->ReleaseStringUTFChars(_testName, testNameBuf);
541   }
542 
543   std::string nnapiLogPath;
544   if (_nnapiLogPath) {
545     const char* nnapiLogPathTmp =
546         env->GetStringUTFChars(_nnapiLogPath, nullptr);
547     nnapiLogPath = nnapiLogPathTmp;
548     env->ReleaseStringUTFChars(_nnapiLogPath, nnapiLogPathTmp);
549   }
550 
551   std::string failedModelDumpPath;
552   if (_failedModelDumpPath) {
553     const char* failedModelDumpPathTmp =
554         env->GetStringUTFChars(_failedModelDumpPath, nullptr);
555     failedModelDumpPath = failedModelDumpPathTmp;
556     env->ReleaseStringUTFChars(_failedModelDumpPath, failedModelDumpPathTmp);
557   }
558 
559   uint32_t numOperations = static_cast<uint32_t>(_numOperations);
560   uint32_t dimensionRange = static_cast<uint32_t>(_dimensionRange);
561 
562   RandomGraphGenerator* result = new RandomGraphGenerator(
563       device, nnApiDeviceNameStr, testName, numOperations, dimensionRange,
564       nnapiLogPath, failedModelDumpPath);
565 
566   if (result->init() != kSuccess) {
567     delete result;
568     return reinterpret_cast<jlong>(nullptr);
569   }
570 
571   return reinterpret_cast<jlong>(result);
572 }
573 
574 extern "C" JNIEXPORT void JNICALL
com_android_nn_crashtest_core_RandomGraphTest_RandomGraphTest_destroyRandomGraphGenerator(JNIEnv * env,jclass,jlong generatorHandle)575 com_android_nn_crashtest_core_RandomGraphTest_RandomGraphTest_destroyRandomGraphGenerator(
576     JNIEnv* env, jclass /* static method */, jlong generatorHandle) {
577   RandomGraphGenerator* graphGenerator =
578       reinterpret_cast<RandomGraphGenerator*>(generatorHandle);
579   delete graphGenerator;
580 }
581