/frameworks/ml/nn/driver/sample/ |
D | SampleDriverUtils.cpp | 29 const sp<SamplePreparedModel>& preparedModel) { in notify() argument 30 const auto ret = callback->notify(convertToV1_0(status), preparedModel); in notify() 37 const sp<SamplePreparedModel>& preparedModel) { in notify() argument 38 const auto ret = callback->notify_1_2(convertToV1_0(status), preparedModel); in notify() 46 const sp<SamplePreparedModel>& preparedModel) { in notify() argument 47 const auto ret = callback->notify_1_3(status, preparedModel); in notify()
|
D | SampleDriverUtils.h | 29 const sp<SamplePreparedModel>& preparedModel); 32 const sp<SamplePreparedModel>& preparedModel); 35 const sp<SamplePreparedModel>& preparedModel); 78 sp<SamplePreparedModel> preparedModel = 80 if (!preparedModel->initialize()) { 84 notify(callback, hal::ErrorStatus::NONE, preparedModel);
|
D | SampleDriver.cpp | 231 const sp<IPreparedModel>& preparedModel) { in castToSamplePreparedModel() argument 232 if (preparedModel->isRemote()) { in castToSamplePreparedModel() 237 return static_cast<const SamplePreparedModel*>(preparedModel.get()); in castToSamplePreparedModel() 250 auto getModel = [](const sp<V1_3::IPreparedModel>& preparedModel) -> const V1_3::Model* { in allocate() argument 251 const auto* samplePreparedModel = castToSamplePreparedModel(preparedModel); in allocate() 373 const SamplePreparedModel* preparedModel) { in createRunTimePoolInfos() argument 396 bufferWrapper->validateRequest(i, request, preparedModel); in createRunTimePoolInfos() 451 const SamplePreparedModel* preparedModel, in asyncExecute() argument 460 createRunTimePoolInfos(request, driver, preparedModel); in asyncExecute() 505 const SampleDriver& driver, const SamplePreparedModel* preparedModel, in executeBase() argument [all …]
|
/frameworks/ml/nn/runtime/ |
D | Callbacks.cpp | 36 const sp<V1_0::IPreparedModel>& preparedModel) { in notifyInternal() argument 48 mPreparedModel = preparedModel; in notifyInternal() 57 const sp<V1_0::IPreparedModel>& preparedModel) { in notify() argument 58 return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), preparedModel); in notify() 62 const sp<V1_2::IPreparedModel>& preparedModel) { in notify_1_2() argument 63 return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), preparedModel); in notify_1_2() 67 const sp<V1_3::IPreparedModel>& preparedModel) { in notify_1_3() argument 68 return notifyInternal(false, errorStatus, preparedModel); in notify_1_3()
|
D | Callbacks.h | 89 const sp<hal::V1_0::IPreparedModel>& preparedModel) override; 115 const sp<hal::V1_2::IPreparedModel>& preparedModel) override; 143 const sp<hal::V1_3::IPreparedModel>& preparedModel) override; 195 const sp<hal::V1_0::IPreparedModel>& preparedModel);
|
D | Memory.cpp | 337 auto callback = [&roles](const auto* preparedModel, IOType type, uint32_t index) { in addRole() argument 338 roles.emplace_back(preparedModel, type, index); in addRole() 391 for (const auto& [preparedModel, type, ind] : roles) { in addRole() 392 uint32_t modelIndex = mDesc.preparedModels.add(preparedModel); in addRole() 430 for (const auto* preparedModel : desc.preparedModels) { in logMemoryDescriptorToInfo() local 431 LOG(INFO) << " service = " << preparedModel->getDevice()->getName(); in logMemoryDescriptorToInfo() 446 for (const auto* preparedModel : desc.preparedModels) { in getDevices() local 447 const auto* device = preparedModel->getDevice(); in getDevices()
|
D | Manager.cpp | 124 const std::shared_ptr<VersionedIPreparedModel>& preparedModel) in DriverPreparedModel() argument 125 : mDevice(device), mPreparedModel(preparedModel) { in DriverPreparedModel() 247 const auto [n, preparedModel] = kInterface->prepareModel(makeModel, preference, priority, in prepareModel() 252 CHECK(preparedModel != nullptr) << "prepareModel returned nullptr without error code"; in prepareModel() 253 return {ANEURALNETWORKS_NO_ERROR, std::make_shared<DriverPreparedModel>(this, preparedModel)}; in prepareModel() 262 [](const auto* preparedModel) { in allocate() argument 263 const auto versionedPreparedModel = preparedModel->getInterface(); in allocate() 686 std::shared_ptr<PreparedModel> preparedModel = in create() local 688 return {ANEURALNETWORKS_NO_ERROR, std::move(preparedModel)}; in create()
|
D | VersionedInterfaces.cpp | 156 sp<V1_0::IPreparedModel> preparedModel) { in makeVersionedIPreparedModel() argument 157 CHECK(preparedModel != nullptr) in makeVersionedIPreparedModel() 167 const Return<bool> ret = preparedModel->linkToDeath(deathHandler, 0); in makeVersionedIPreparedModel() 187 std::move(preparedModel), std::move(deathHandler))}; in makeVersionedIPreparedModel() 190 VersionedIPreparedModel::VersionedIPreparedModel(sp<V1_0::IPreparedModel> preparedModel, in VersionedIPreparedModel() argument 192 : mPreparedModelV1_0(std::move(preparedModel)), in VersionedIPreparedModel() 1191 const sp<V1_0::IPreparedModel> preparedModel = callback.getPreparedModel(); in prepareModelResult() local 1198 if (preparedModel == nullptr) { in prepareModelResult() 1203 return makeVersionedIPreparedModel(preparedModel); in prepareModelResult() 1476 const auto [n, preparedModel] = in prepareModel() [all …]
|
D | ExecutionPlan.cpp | 74 std::shared_ptr<PreparedModel>* preparedModel) { in compile() argument 76 CHECK(preparedModel != nullptr); in compile() 77 *preparedModel = nullptr; in compile() 93 *preparedModel = returnedPreparedModel; in compile() 788 if (const auto preparedModel = in makeBursts() local 793 preparedModel->configureExecutionBurst(preferPowerOverLatency)); in makeBursts() 804 if (const auto preparedModel = simpleBody->mPreparedModel) { in makeBursts() local 807 burst.push_back(preparedModel->configureExecutionBurst(preferPowerOverLatency)); in makeBursts()
|
D | ExecutionBuilder.cpp | 980 std::shared_ptr<PreparedModel> preparedModel, const ExecutionStep* step) in StepExecutor() argument 985 mPreparedModel(preparedModel), in StepExecutor() 1138 auto [n, preparedModel] = mDevice->prepareModel(makeModel, preference, priority, {}, {}, {}); in computeOnCpuFallback() 1139 mPreparedModel = std::move(preparedModel); in computeOnCpuFallback()
|
D | ExecutionBuilder.h | 230 std::shared_ptr<Device> device, std::shared_ptr<PreparedModel> preparedModel,
|
D | VersionedInterfaces.h | 594 VersionedIPreparedModel(sp<hal::V1_0::IPreparedModel> preparedModel,
|
/frameworks/ml/nn/runtime/test/ |
D | TestVersionedInterfaces.cpp | 317 const sp<MockPreparedModel>& preparedModel) { in makePreparedModelReturn() argument 318 return [launchStatus, returnStatus, preparedModel]( in makePreparedModelReturn() 321 cb->notify(returnStatus, preparedModel).isOk(); in makePreparedModelReturn() 326 const sp<MockPreparedModel>& preparedModel) { in makePreparedModel_1_1Return() argument 327 return [launchStatus, returnStatus, preparedModel]( in makePreparedModel_1_1Return() 330 cb->notify(returnStatus, preparedModel).isOk(); in makePreparedModel_1_1Return() 335 const sp<MockPreparedModel>& preparedModel) { in makePreparedModel_1_2Return() argument 336 return [launchStatus, returnStatus, preparedModel]( in makePreparedModel_1_2Return() 340 cb->notify_1_2(returnStatus, preparedModel).isOk(); in makePreparedModel_1_2Return() 345 const sp<MockPreparedModel>& preparedModel) { in makePreparedModel_1_3Return() argument [all …]
|
D | TestExecution.cpp | 74 TestPreparedModelLatest(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus) in TestPreparedModelLatest() argument 75 : mPreparedModelV1_0(preparedModel), in TestPreparedModelLatest() 76 mPreparedModelV1_2(V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)), in TestPreparedModelLatest() 77 mPreparedModelV1_3(V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)), in TestPreparedModelLatest() 250 TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus) in TestPreparedModel12() argument 251 : mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {} in TestPreparedModel12() 284 TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus) in TestPreparedModel10() argument 285 : mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {} in TestPreparedModel10()
|
/frameworks/ml/nn/runtime/test/android_fuzzing/ |
D | DriverFuzzTest.cpp | 263 const sp<V1_3::IPreparedModel>& preparedModel) override { in notify_1_3() argument 265 (status == V1_3::ErrorStatus::NONE ? preparedModel : nullptr); in notify_1_3() 296 void execute(const sp<V1_3::IPreparedModel>& preparedModel, const V1_3::Request& request) { in execute() argument 302 preparedModel->executeSynchronously_1_3(request, V1_2::MeasureTiming::YES, {}, {}, cb); in execute() 316 const auto preparedModel = prepareModel(device, model); in nnapiFuzzTest() local 317 if (preparedModel == nullptr) return; in nnapiFuzzTest() 323 execute(preparedModel, request); in nnapiFuzzTest()
|
/frameworks/ml/nn/common/ |
D | BufferTracker.cpp | 62 const IPreparedModel* preparedModel) const { in validateRequest() 73 if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) { in validateRequest() 100 if (kRoles.count({preparedModel, IOType::OUTPUT, i}) == 0) { in validateRequest()
|
D | ExecutionBurstServer.cpp | 56 DefaultBurstExecutorWithCache(V1_2::IPreparedModel* preparedModel) in DefaultBurstExecutorWithCache() argument 57 : mpPreparedModel(preparedModel) {} in DefaultBurstExecutorWithCache() 520 const MQDescriptorSync<FmqResultDatum>& resultChannel, V1_2::IPreparedModel* preparedModel, in create() argument 523 if (preparedModel == nullptr) { in create() 530 std::make_shared<DefaultBurstExecutorWithCache>(preparedModel); in create()
|
D | ValidateHal.cpp | 869 const auto& preparedModel = preparedModels[role.modelIndex]; in validateMemoryDesc() local 870 NN_RET_CHECK(preparedModel != nullptr); in validateMemoryDesc() 871 const auto* model = getModel(preparedModel); in validateMemoryDesc() 877 const auto [it, success] = roles.emplace(preparedModel.get(), IOType::INPUT, role.ioIndex); in validateMemoryDesc() 883 const auto& preparedModel = preparedModels[role.modelIndex]; in validateMemoryDesc() local 884 NN_RET_CHECK(preparedModel != nullptr); in validateMemoryDesc() 885 const auto* model = getModel(preparedModel); in validateMemoryDesc() 891 const auto [it, success] = roles.emplace(preparedModel.get(), IOType::OUTPUT, role.ioIndex); in validateMemoryDesc()
|
D | ExecutionBurstController.cpp | 475 const sp<V1_2::IPreparedModel>& preparedModel, in create() argument 478 if (preparedModel == nullptr) { in create() 506 const Return<void> ret = preparedModel->configureExecutionBurst( in create()
|
/frameworks/ml/nn/common/include/ |
D | BufferTracker.h | 52 const hal::IPreparedModel* preparedModel) const;
|
D | ExecutionBurstServer.h | 308 hardware::neuralnetworks::V1_2::IPreparedModel* preparedModel,
|
D | ExecutionBurstController.h | 294 const sp<hardware::neuralnetworks::V1_2::IPreparedModel>& preparedModel,
|