1 /*
2  * This file is auto-generated.  DO NOT MODIFY.
3  * Using: out/host/linux-x86/bin/aidl --lang=ndk --structured --version 4 --hash 53178f8de9b8861df391cf0593f6f3e08adad33d -t --stability vintf --min_sdk_version 30 -pout/soong/.intermediates/hardware/interfaces/common/aidl/android.hardware.common_interface/2/preprocessed.aidl -pout/soong/.intermediates/hardware/interfaces/graphics/common/aidl/android.hardware.graphics.common_interface/6/preprocessed.aidl --ninja -d out/soong/.intermediates/hardware/interfaces/neuralnetworks/aidl/android.hardware.neuralnetworks-V4-ndk-source/gen/staging/android/hardware/neuralnetworks/IDevice.cpp.d -h out/soong/.intermediates/hardware/interfaces/neuralnetworks/aidl/android.hardware.neuralnetworks-V4-ndk-source/gen/include/staging -o out/soong/.intermediates/hardware/interfaces/neuralnetworks/aidl/android.hardware.neuralnetworks-V4-ndk-source/gen/staging -Nhardware/interfaces/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/4 hardware/interfaces/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/4/android/hardware/neuralnetworks/IDevice.aidl
4  *
5  * DO NOT CHECK THIS FILE INTO A CODE TREE (e.g. git, etc..).
6  * ALWAYS GENERATE THIS FILE FROM UPDATED AIDL COMPILER
7  * AS A BUILD INTERMEDIATE ONLY. THIS IS NOT SOURCE CODE.
8  */
9 #pragma once
10 
11 #include "aidl/android/hardware/neuralnetworks/IDevice.h"
12 
13 #include <android/binder_ibinder.h>
14 #include <cassert>
15 
16 #ifndef __BIONIC__
17 #ifndef __assert2
18 #define __assert2(a,b,c,d) ((void)0)
19 #endif
20 #endif
21 
22 namespace aidl {
23 namespace android {
24 namespace hardware {
25 namespace neuralnetworks {
26 class BnDevice : public ::ndk::BnCInterface<IDevice> {
27 public:
28   BnDevice();
29   virtual ~BnDevice();
30   ::ndk::ScopedAStatus getInterfaceVersion(int32_t* _aidl_return) final;
31   ::ndk::ScopedAStatus getInterfaceHash(std::string* _aidl_return) final;
32 protected:
33   ::ndk::SpAIBinder createBinder() override;
34 private:
35 };
36 class IDeviceDelegator : public BnDevice {
37 public:
IDeviceDelegator(const std::shared_ptr<IDevice> & impl)38   explicit IDeviceDelegator(const std::shared_ptr<IDevice> &impl) : _impl(impl) {
39      int32_t _impl_ver = 0;
40      if (!impl->getInterfaceVersion(&_impl_ver).isOk()) {;
41         __assert2(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Delegator failed to get version of the implementation.");
42      }
43      if (_impl_ver != IDevice::version) {
44         __assert2(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Mismatched versions of delegator and implementation is not allowed.");
45      }
46   }
47 
allocate(const::aidl::android::hardware::neuralnetworks::BufferDesc & in_desc,const std::vector<::aidl::android::hardware::neuralnetworks::IPreparedModelParcel> & in_preparedModels,const std::vector<::aidl::android::hardware::neuralnetworks::BufferRole> & in_inputRoles,const std::vector<::aidl::android::hardware::neuralnetworks::BufferRole> & in_outputRoles,::aidl::android::hardware::neuralnetworks::DeviceBuffer * _aidl_return)48   ::ndk::ScopedAStatus allocate(const ::aidl::android::hardware::neuralnetworks::BufferDesc& in_desc, const std::vector<::aidl::android::hardware::neuralnetworks::IPreparedModelParcel>& in_preparedModels, const std::vector<::aidl::android::hardware::neuralnetworks::BufferRole>& in_inputRoles, const std::vector<::aidl::android::hardware::neuralnetworks::BufferRole>& in_outputRoles, ::aidl::android::hardware::neuralnetworks::DeviceBuffer* _aidl_return) override {
49     return _impl->allocate(in_desc, in_preparedModels, in_inputRoles, in_outputRoles, _aidl_return);
50   }
getCapabilities(::aidl::android::hardware::neuralnetworks::Capabilities * _aidl_return)51   ::ndk::ScopedAStatus getCapabilities(::aidl::android::hardware::neuralnetworks::Capabilities* _aidl_return) override {
52     return _impl->getCapabilities(_aidl_return);
53   }
getNumberOfCacheFilesNeeded(::aidl::android::hardware::neuralnetworks::NumberOfCacheFiles * _aidl_return)54   ::ndk::ScopedAStatus getNumberOfCacheFilesNeeded(::aidl::android::hardware::neuralnetworks::NumberOfCacheFiles* _aidl_return) override {
55     return _impl->getNumberOfCacheFilesNeeded(_aidl_return);
56   }
getSupportedExtensions(std::vector<::aidl::android::hardware::neuralnetworks::Extension> * _aidl_return)57   ::ndk::ScopedAStatus getSupportedExtensions(std::vector<::aidl::android::hardware::neuralnetworks::Extension>* _aidl_return) override {
58     return _impl->getSupportedExtensions(_aidl_return);
59   }
getSupportedOperations(const::aidl::android::hardware::neuralnetworks::Model & in_model,std::vector<bool> * _aidl_return)60   ::ndk::ScopedAStatus getSupportedOperations(const ::aidl::android::hardware::neuralnetworks::Model& in_model, std::vector<bool>* _aidl_return) override {
61     return _impl->getSupportedOperations(in_model, _aidl_return);
62   }
getType(::aidl::android::hardware::neuralnetworks::DeviceType * _aidl_return)63   ::ndk::ScopedAStatus getType(::aidl::android::hardware::neuralnetworks::DeviceType* _aidl_return) override {
64     return _impl->getType(_aidl_return);
65   }
getVersionString(std::string * _aidl_return)66   ::ndk::ScopedAStatus getVersionString(std::string* _aidl_return) override {
67     return _impl->getVersionString(_aidl_return);
68   }
prepareModel(const::aidl::android::hardware::neuralnetworks::Model & in_model,::aidl::android::hardware::neuralnetworks::ExecutionPreference in_preference,::aidl::android::hardware::neuralnetworks::Priority in_priority,int64_t in_deadlineNs,const std::vector<::ndk::ScopedFileDescriptor> & in_modelCache,const std::vector<::ndk::ScopedFileDescriptor> & in_dataCache,const std::vector<uint8_t> & in_token,const std::shared_ptr<::aidl::android::hardware::neuralnetworks::IPreparedModelCallback> & in_callback)69   ::ndk::ScopedAStatus prepareModel(const ::aidl::android::hardware::neuralnetworks::Model& in_model, ::aidl::android::hardware::neuralnetworks::ExecutionPreference in_preference, ::aidl::android::hardware::neuralnetworks::Priority in_priority, int64_t in_deadlineNs, const std::vector<::ndk::ScopedFileDescriptor>& in_modelCache, const std::vector<::ndk::ScopedFileDescriptor>& in_dataCache, const std::vector<uint8_t>& in_token, const std::shared_ptr<::aidl::android::hardware::neuralnetworks::IPreparedModelCallback>& in_callback) override {
70     return _impl->prepareModel(in_model, in_preference, in_priority, in_deadlineNs, in_modelCache, in_dataCache, in_token, in_callback);
71   }
prepareModelFromCache(int64_t in_deadlineNs,const std::vector<::ndk::ScopedFileDescriptor> & in_modelCache,const std::vector<::ndk::ScopedFileDescriptor> & in_dataCache,const std::vector<uint8_t> & in_token,const std::shared_ptr<::aidl::android::hardware::neuralnetworks::IPreparedModelCallback> & in_callback)72   ::ndk::ScopedAStatus prepareModelFromCache(int64_t in_deadlineNs, const std::vector<::ndk::ScopedFileDescriptor>& in_modelCache, const std::vector<::ndk::ScopedFileDescriptor>& in_dataCache, const std::vector<uint8_t>& in_token, const std::shared_ptr<::aidl::android::hardware::neuralnetworks::IPreparedModelCallback>& in_callback) override {
73     return _impl->prepareModelFromCache(in_deadlineNs, in_modelCache, in_dataCache, in_token, in_callback);
74   }
prepareModelWithConfig(const::aidl::android::hardware::neuralnetworks::Model & in_model,const::aidl::android::hardware::neuralnetworks::PrepareModelConfig & in_config,const std::shared_ptr<::aidl::android::hardware::neuralnetworks::IPreparedModelCallback> & in_callback)75   ::ndk::ScopedAStatus prepareModelWithConfig(const ::aidl::android::hardware::neuralnetworks::Model& in_model, const ::aidl::android::hardware::neuralnetworks::PrepareModelConfig& in_config, const std::shared_ptr<::aidl::android::hardware::neuralnetworks::IPreparedModelCallback>& in_callback) override {
76     return _impl->prepareModelWithConfig(in_model, in_config, in_callback);
77   }
78 protected:
79 private:
80   std::shared_ptr<IDevice> _impl;
81 };
82 
83 }  // namespace neuralnetworks
84 }  // namespace hardware
85 }  // namespace android
86 }  // namespace aidl
87