1 /* 2 * This file is auto-generated. DO NOT MODIFY. 3 * Using: out/host/linux-x86/bin/aidl --lang=ndk --structured --version 4 --hash 53178f8de9b8861df391cf0593f6f3e08adad33d -t --stability vintf --min_sdk_version 30 -pout/soong/.intermediates/hardware/interfaces/common/aidl/android.hardware.common_interface/2/preprocessed.aidl -pout/soong/.intermediates/hardware/interfaces/graphics/common/aidl/android.hardware.graphics.common_interface/6/preprocessed.aidl --ninja -d out/soong/.intermediates/hardware/interfaces/neuralnetworks/aidl/android.hardware.neuralnetworks-V4-ndk-source/gen/staging/android/hardware/neuralnetworks/IPreparedModel.cpp.d -h out/soong/.intermediates/hardware/interfaces/neuralnetworks/aidl/android.hardware.neuralnetworks-V4-ndk-source/gen/include/staging -o out/soong/.intermediates/hardware/interfaces/neuralnetworks/aidl/android.hardware.neuralnetworks-V4-ndk-source/gen/staging -Nhardware/interfaces/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/4 hardware/interfaces/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/4/android/hardware/neuralnetworks/IPreparedModel.aidl 4 * 5 * DO NOT CHECK THIS FILE INTO A CODE TREE (e.g. git, etc..). 6 * ALWAYS GENERATE THIS FILE FROM UPDATED AIDL COMPILER 7 * AS A BUILD INTERMEDIATE ONLY. THIS IS NOT SOURCE CODE. 8 */ 9 #pragma once 10 11 #include "aidl/android/hardware/neuralnetworks/IPreparedModel.h" 12 13 #include <android/binder_ibinder.h> 14 #include <cassert> 15 16 #ifndef __BIONIC__ 17 #ifndef __assert2 18 #define __assert2(a,b,c,d) ((void)0) 19 #endif 20 #endif 21 22 namespace aidl { 23 namespace android { 24 namespace hardware { 25 namespace neuralnetworks { 26 class BnPreparedModel : public ::ndk::BnCInterface<IPreparedModel> { 27 public: 28 BnPreparedModel(); 29 virtual ~BnPreparedModel(); 30 ::ndk::ScopedAStatus getInterfaceVersion(int32_t* _aidl_return) final; 31 ::ndk::ScopedAStatus getInterfaceHash(std::string* _aidl_return) final; 32 protected: 33 ::ndk::SpAIBinder createBinder() override; 34 private: 35 }; 36 class IPreparedModelDelegator : public BnPreparedModel { 37 public: IPreparedModelDelegator(const std::shared_ptr<IPreparedModel> & impl)38 explicit IPreparedModelDelegator(const std::shared_ptr<IPreparedModel> &impl) : _impl(impl) { 39 int32_t _impl_ver = 0; 40 if (!impl->getInterfaceVersion(&_impl_ver).isOk()) {; 41 __assert2(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Delegator failed to get version of the implementation."); 42 } 43 if (_impl_ver != IPreparedModel::version) { 44 __assert2(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Mismatched versions of delegator and implementation is not allowed."); 45 } 46 } 47 executeSynchronously(const::aidl::android::hardware::neuralnetworks::Request & in_request,bool in_measureTiming,int64_t in_deadlineNs,int64_t in_loopTimeoutDurationNs,::aidl::android::hardware::neuralnetworks::ExecutionResult * _aidl_return)48 ::ndk::ScopedAStatus executeSynchronously(const ::aidl::android::hardware::neuralnetworks::Request& in_request, bool in_measureTiming, int64_t in_deadlineNs, int64_t in_loopTimeoutDurationNs, ::aidl::android::hardware::neuralnetworks::ExecutionResult* _aidl_return) override { 49 return _impl->executeSynchronously(in_request, in_measureTiming, in_deadlineNs, in_loopTimeoutDurationNs, _aidl_return); 50 } executeFenced(const::aidl::android::hardware::neuralnetworks::Request & in_request,const std::vector<::ndk::ScopedFileDescriptor> & in_waitFor,bool in_measureTiming,int64_t in_deadlineNs,int64_t in_loopTimeoutDurationNs,int64_t in_durationNs,::aidl::android::hardware::neuralnetworks::FencedExecutionResult * _aidl_return)51 ::ndk::ScopedAStatus executeFenced(const ::aidl::android::hardware::neuralnetworks::Request& in_request, const std::vector<::ndk::ScopedFileDescriptor>& in_waitFor, bool in_measureTiming, int64_t in_deadlineNs, int64_t in_loopTimeoutDurationNs, int64_t in_durationNs, ::aidl::android::hardware::neuralnetworks::FencedExecutionResult* _aidl_return) override { 52 return _impl->executeFenced(in_request, in_waitFor, in_measureTiming, in_deadlineNs, in_loopTimeoutDurationNs, in_durationNs, _aidl_return); 53 } configureExecutionBurst(std::shared_ptr<::aidl::android::hardware::neuralnetworks::IBurst> * _aidl_return)54 ::ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<::aidl::android::hardware::neuralnetworks::IBurst>* _aidl_return) override { 55 return _impl->configureExecutionBurst(_aidl_return); 56 } createReusableExecution(const::aidl::android::hardware::neuralnetworks::Request & in_request,const::aidl::android::hardware::neuralnetworks::ExecutionConfig & in_config,std::shared_ptr<::aidl::android::hardware::neuralnetworks::IExecution> * _aidl_return)57 ::ndk::ScopedAStatus createReusableExecution(const ::aidl::android::hardware::neuralnetworks::Request& in_request, const ::aidl::android::hardware::neuralnetworks::ExecutionConfig& in_config, std::shared_ptr<::aidl::android::hardware::neuralnetworks::IExecution>* _aidl_return) override { 58 return _impl->createReusableExecution(in_request, in_config, _aidl_return); 59 } executeSynchronouslyWithConfig(const::aidl::android::hardware::neuralnetworks::Request & in_request,const::aidl::android::hardware::neuralnetworks::ExecutionConfig & in_config,int64_t in_deadlineNs,::aidl::android::hardware::neuralnetworks::ExecutionResult * _aidl_return)60 ::ndk::ScopedAStatus executeSynchronouslyWithConfig(const ::aidl::android::hardware::neuralnetworks::Request& in_request, const ::aidl::android::hardware::neuralnetworks::ExecutionConfig& in_config, int64_t in_deadlineNs, ::aidl::android::hardware::neuralnetworks::ExecutionResult* _aidl_return) override { 61 return _impl->executeSynchronouslyWithConfig(in_request, in_config, in_deadlineNs, _aidl_return); 62 } executeFencedWithConfig(const::aidl::android::hardware::neuralnetworks::Request & in_request,const std::vector<::ndk::ScopedFileDescriptor> & in_waitFor,const::aidl::android::hardware::neuralnetworks::ExecutionConfig & in_config,int64_t in_deadlineNs,int64_t in_durationNs,::aidl::android::hardware::neuralnetworks::FencedExecutionResult * _aidl_return)63 ::ndk::ScopedAStatus executeFencedWithConfig(const ::aidl::android::hardware::neuralnetworks::Request& in_request, const std::vector<::ndk::ScopedFileDescriptor>& in_waitFor, const ::aidl::android::hardware::neuralnetworks::ExecutionConfig& in_config, int64_t in_deadlineNs, int64_t in_durationNs, ::aidl::android::hardware::neuralnetworks::FencedExecutionResult* _aidl_return) override { 64 return _impl->executeFencedWithConfig(in_request, in_waitFor, in_config, in_deadlineNs, in_durationNs, _aidl_return); 65 } 66 protected: 67 private: 68 std::shared_ptr<IPreparedModel> _impl; 69 }; 70 71 } // namespace neuralnetworks 72 } // namespace hardware 73 } // namespace android 74 } // namespace aidl 75