/aosp_15_r20/external/ComputeLibrary/src/runtime/NEON/functions/ |
H A D | NEElementwiseOperations.cpp | 34 struct NEElementwiseMax::Impl struct in arm_compute::NEElementwiseMax 36 const ITensor *src_0{ nullptr }; 37 const ITensor *src_1{ nullptr }; 38 ITensor *dst{ nullptr }; 39 std::unique_ptr<cpu::CpuElementwiseMax> op{ nullptr }; 75 struct NEElementwiseMin::Impl struct in arm_compute::NEElementwiseMin 77 const ITensor *src_0{ nullptr }; 78 const ITensor *src_1{ nullptr }; 79 ITensor *dst{ nullptr }; 80 std::unique_ptr<cpu::CpuElementwiseMin> op{ nullptr }; [all …]
|
H A D | NEDepthwiseConvolutionLayer.cpp | 40 struct NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::Impl struct in arm_compute::NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal 42 ITensor *src{ nullptr }; // SRC_0 43 ITensor *dst{ nullptr }; // DST_0 44 const ITensor *weights 48 const ITensor *biases 52 Tensor permuted_input{}; // INT_0 53 Tensor permuted_weights{}; // INT_1 54 Tensor permuted_output{}; // INT_2 55 Tensor workspace{}; // INT_3 56 Tensor packed_weights{}; // INT_4 [all …]
|
H A D | NEWinogradConvolutionLayer.cpp | 42 struct NEWinogradConvolutionLayer::Impl struct in arm_compute::NEWinogradConvolutionLayer 44 MemoryGroup memory_group{}; 45 std::unique_ptr<cpu::CpuWinogradConv2d> op{ nullptr }; 46 ITensorPack run_pack{}; 47 ITensorPack prep_pack{}; 48 WorkspaceData<Tensor> workspace{}; 49 experimental::MemoryRequirements aux_mem_req{}; 50 const ITensor *original_weights{ nullptr }; 51 bool is_prepared{ false }; 52 bool is_activationlayer_enabled{ false }; [all …]
|
H A D | NEGEMMLowpMatrixMultiplyCore.cpp | 40 struct NEGEMMLowpMatrixMultiplyCore::Impl struct in arm_compute::NEGEMMLowpMatrixMultiplyCore 42 const ITensor *b{ nullptr }; 43 std::unique_ptr<cpu::CpuGemmLowpMatrixMultiplyCore> op{ nullptr }; 44 ITensorPack run_pack{}; 45 ITensorPack prep_pack{}; 46 MemoryGroup memory_group{}; 47 IWeightsManager *weights_manager{ nullptr }; 48 MemoryRequirements aux_mem_req{}; 49 WorkspaceData<Tensor> workspace_tensors{}; 50 bool is_prepared{ false };
|
H A D | NEPixelWiseMultiplication.cpp | 33 struct NEPixelWiseMultiplication::Impl struct in arm_compute::NEPixelWiseMultiplication 35 const ITensor *src_0{ nullptr }; 36 const ITensor *src_1{ nullptr }; 37 ITensor *dst{ nullptr }; 38 std::unique_ptr<cpu::CpuMul> op{ nullptr }; 72 struct NEComplexPixelWiseMultiplication::Impl struct in arm_compute::NEComplexPixelWiseMultiplication 74 ITensor *src_0{ nullptr }; 75 ITensor *src_1{ nullptr }; 76 ITensor *dst{ nullptr }; 77 std::unique_ptr<cpu::CpuComplexMul> op{ nullptr };
|
H A D | NEGEMM.cpp | 39 struct NEGEMM::Impl struct in arm_compute::NEGEMM 41 MemoryGroup memory_group{}; 42 IWeightsManager *weights_manager{ nullptr }; 44 std::unique_ptr<cpu::CpuGemm> op{ nullptr }; 46 const ITensor *original_b{ nullptr }; 47 bool is_prepared{ false }; 49 ITensorPack run_pack{}; 50 ITensorPack prep_pack{}; 51 WorkspaceData<Tensor> workspace{}; 52 experimental::MemoryRequirements aux_mem_req{};
|
H A D | NEGEMMConv2d.cpp | 36 struct NEGEMMConv2d::Impl struct in arm_compute::NEGEMMConv2d 38 const ITensor *weights{ nullptr }; 39 std::unique_ptr<OperatorType> op{ nullptr }; 40 ITensorPack run_pack{}; 41 ITensorPack prep_pack{}; 42 WorkspaceData<Tensor> workspace{}; 43 MemoryGroup memory_group{}; 44 bool is_prepared{ false }; 45 experimental::MemoryRequirements aux_mem_req{};
|
H A D | NEFullyConnectedLayer.cpp | 38 struct NEFullyConnectedLayer::Impl struct in arm_compute::NEFullyConnectedLayer 40 MemoryGroup memory_group{}; 41 IWeightsManager *weights_manager{ nullptr }; 43 std::unique_ptr<cpu::CpuFullyConnected> op{ nullptr }; 45 const ITensor *original_weights{ nullptr }; 47 ITensorPack run_pack{}; 48 WorkspaceData<Tensor> workspace{}; 49 experimental::MemoryRequirements aux_mem_req{}; 51 bool is_prepared{ false };
|
H A D | NEPoolingLayer.cpp | 34 struct NEPoolingLayer::Impl struct in arm_compute::NEPoolingLayer 36 ITensor *src{ nullptr }; 37 ITensor *dst{ nullptr }; 38 ITensor *indices{ nullptr }; 39 std::unique_ptr<cpu::CpuPool2d> op{ nullptr }; 40 MemoryGroup memory_group{}; 41 ITensorPack run_pack{}; 42 WorkspaceData<Tensor> workspace_tensors{};
|
H A D | NEConvolutionLayer.cpp | 42 struct NEConvolutionLayer::Impl struct in arm_compute::NEConvolutionLayer 44 MemoryGroup memory_group{}; 45 std::shared_ptr<IMemoryManager> memory_manager{}; 46 std::unique_ptr<cpu::ICpuOperator> op{ nullptr }; 47 ITensorPack run_pack{}; 48 ITensorPack prep_pack{}; 49 WorkspaceData<Tensor> workspace{}; 50 experimental::MemoryRequirements aux_mem_req{}; 51 std::unique_ptr<IFunction> func{ nullptr };
|
H A D | NEGEMMConvolutionLayer.cpp | 37 struct NEGEMMConvolutionLayer::Impl struct in arm_compute::NEGEMMConvolutionLayer 39 const ITensor *weights{ nullptr }; 40 std::unique_ptr<cpu::CpuGemmConv2d> op{ nullptr }; 41 ITensorPack run_pack{}; 42 MemoryGroup memory_group{}; 43 IWeightsManager *weights_manager{ nullptr }; 44 MemoryRequirements aux_mem_req{}; 45 WorkspaceData<Tensor> workspace_tensors{}; 46 bool is_prepared{ false };
|
H A D | NESoftmaxLayer.cpp | 36 struct NESoftmaxLayerGeneric<IS_LOG>::Impl struct in arm_compute::NESoftmaxLayerGeneric 38 const ITensor *src{ nullptr }; 39 ITensor *dst{ nullptr }; 40 Tensor max{ nullptr }; 41 std::unique_ptr<cpu::CpuSoftmaxGeneric<IS_LOG>> op{ nullptr }; 42 MemoryGroup memory_group{}; 43 ITensorPack run_pack{}; 44 WorkspaceData<Tensor> workspace_tensors{};
|
/aosp_15_r20/external/ComputeLibrary/src/runtime/CL/functions/ |
H A D | CLElementwiseOperations.cpp | 37 struct CLArithmeticAddition::Impl struct in arm_compute::CLArithmeticAddition 39 const ICLTensor *src_0{ nullptr }; 40 const ICLTensor *src_1{ nullptr }; 41 ICLTensor *dst{ nullptr }; 42 std::unique_ptr<opencl::ClAdd> op{ nullptr }; 83 struct CLArithmeticSubtraction::Impl struct in arm_compute::CLArithmeticSubtraction 85 const ICLTensor *src_0{ nullptr }; 86 const ICLTensor *src_1{ nullptr }; 87 ICLTensor *dst{ nullptr }; 88 std::unique_ptr<opencl::ClSub> op{ nullptr }; [all …]
|
H A D | CLElementwiseUnaryLayer.cpp | 33 struct CLRsqrtLayer::Impl struct in arm_compute::CLRsqrtLayer 35 const ICLTensor *src{ nullptr }; 36 ICLTensor *dst{ nullptr }; 37 std::unique_ptr<opencl::ClRsqrt> op{ nullptr }; 75 struct CLExpLayer::Impl struct in arm_compute::CLExpLayer 77 const ICLTensor *src{ nullptr }; 78 ICLTensor *dst{ nullptr }; 79 std::unique_ptr<opencl::ClExp> op{ nullptr }; 117 struct CLNegLayer::Impl struct in arm_compute::CLNegLayer 119 const ICLTensor *src{ nullptr }; [all …]
|
H A D | CLWinogradConvolutionLayer.cpp | 36 struct CLWinogradConvolutionLayer::Impl struct in arm_compute::CLWinogradConvolutionLayer 38 const ICLTensor *src{ nullptr }; 39 const ICLTensor *weights{ nullptr }; 40 const ICLTensor *biases{ nullptr }; 41 ICLTensor *dst{ nullptr }; 42 std::unique_ptr<opencl::ClWinogradConv2d> op{ nullptr }; 43 ITensorPack run_pack{}; 44 MemoryGroup memory_group{}; 45 WorkspaceData<CLTensor> workspace_tensors{}; 46 bool is_prepared{ false };
|
H A D | CLGEMM.cpp | 41 struct CLGEMM::Impl struct in arm_compute::CLGEMM 43 const ICLTensor *b{ nullptr }; 44 std::unique_ptr<OperatorType> op{ nullptr }; 45 MemoryGroup memory_group{}; 46 IWeightsManager *weights_manager{ nullptr }; 47 ITensorPack run_pack{}; 48 ITensorPack prep_pack{}; 49 MemoryRequirements aux_mem_req{}; 50 WorkspaceData<CLTensor> workspace_tensors{}; 51 bool is_prepared{ false };
|
H A D | CLPixelWiseMultiplication.cpp | 35 struct CLPixelWiseMultiplication::Impl struct in arm_compute::CLPixelWiseMultiplication 37 const ICLTensor *src_0{ nullptr }; 38 const ICLTensor *src_1{ nullptr }; 39 ICLTensor *dst{ nullptr }; 40 std::unique_ptr<opencl::ClMul> op{ nullptr }; 83 struct CLComplexPixelWiseMultiplication::Impl struct in arm_compute::CLComplexPixelWiseMultiplication 85 const ICLTensor *src_0{ nullptr }; 86 const ICLTensor *src_1{ nullptr }; 87 ICLTensor *dst{ nullptr }; 88 std::unique_ptr<opencl::ClComplexMul> op{ nullptr };
|
H A D | CLFullyConnectedLayer.cpp | 35 struct CLFullyConnectedLayer::Impl struct in arm_compute::CLFullyConnectedLayer 37 MemoryGroup memory_group{}; 38 IWeightsManager *weights_manager{ nullptr }; 40 std::unique_ptr<opencl::ClFullyConnected> op{ nullptr }; 42 const ITensor *original_weights{ nullptr }; 44 ITensorPack run_pack{}; 45 WorkspaceData<CLTensor> workspace{}; 46 experimental::MemoryRequirements aux_mem_req{}; 48 bool is_prepared{ false };
|
H A D | CLGEMMConvolutionLayer.cpp | 49 struct CLGEMMConvolutionLayer::Impl struct in arm_compute::CLGEMMConvolutionLayer 51 const ITensor *weights{ nullptr }; 52 std::unique_ptr<opencl::ClGemmConv2d> op{ nullptr }; 53 ITensorPack run_pack{}; 54 ITensorPack prep_pack{}; 55 MemoryGroup memory_group{}; 56 IWeightsManager *weights_manager{ nullptr }; 57 MemoryRequirements aux_mem_req{}; 58 WorkspaceData<CLTensor> workspace_tensors{}; 59 bool is_prepared{ false };
|
/aosp_15_r20/frameworks/av/media/codec2/vndk/util/ |
H A D | C2InterfaceUtils.cpp | 326 struct C2FieldSupportedValuesHelper<T>::Impl { struct in C2FieldSupportedValuesHelper 327 Impl(const C2FieldSupportedValues &values) in Impl() argument 336 typedef typename _C2FieldValueHelper<T>::ValueType ValueType; 337 C2FieldSupportedValues::type_t _mType; 338 C2SupportedRange<ValueType> _mRange; 339 C2SupportedValueSet<ValueType> _mValues; 340 C2SupportedValueSet<ValueType> _mFlags; 382 struct C2ParamFieldValuesBuilder<T>::Impl { struct in C2ParamFieldValuesBuilder 383 Impl(const C2ParamField &field) in Impl() function 394 operator C2ParamFieldValues() const { in operator C2ParamFieldValues() [all …]
|
/aosp_15_r20/packages/modules/Bluetooth/system/bta/le_audio/ |
D | codec_interface.cc | 36 struct CodecInterface::Impl { struct in bluetooth::le_audio::CodecInterface 37 Impl(const types::LeAudioCodecId& codec_id) : codec_id_(codec_id) {} in Impl() function 38 ~Impl() { Cleanup(); } in ~Impl() 40 bool IsReady() { return pcm_config_.has_value(); } in IsReady() 42 CodecInterface::Status InitEncoder(const LeAudioCodecConfiguration& pcm_config, in InitEncoder() 73 CodecInterface::Status InitDecoder(const LeAudioCodecConfiguration& codec_config, in InitDecoder() 115 std::vector<int16_t>& GetDecodedSamples() { return output_channel_data_; } in GetDecodedSamples() 116 CodecInterface::Status Decode(uint8_t* data, uint16_t size) { in Decode() 140 CodecInterface::Status Encode(const uint8_t* data, int stride, uint16_t out_size, in Encode() 184 void Cleanup() { in Cleanup() [all …]
|
/aosp_15_r20/frameworks/av/media/codec2/hal/hidl/1.0/utils/ |
H A D | InputSurfaceConnection.cpp | 72 struct InputSurfaceConnection::Impl : public ComponentWrapper { struct in android::hardware::media::c2::V1_0::utils::InputSurfaceConnection 74 Impl(const sp<GraphicBufferSource>& source, in Impl() argument 81 Impl(const sp<GraphicBufferSource>& source, in Impl() function 110 virtual ~Impl() { in ~Impl() 115 bool init() { in init() 186 virtual status_t submitBuffer( in submitBuffer() 238 virtual status_t submitEos(int32_t bufferId) override { in submitEos() 256 virtual void dispatchDataSpaceChanged( in dispatchDataSpaceChanged() 268 struct ConfigurableIntf : public ConfigurableC2Intf { 291 c2_status_t queryFromSink( in queryFromSink() [all …]
|
/aosp_15_r20/external/ComputeLibrary/src/runtime/CPP/ |
H A D | CPPScheduler.cpp | 311 struct CPPScheduler::Impl final struct in arm_compute::CPPScheduler 313 constexpr static unsigned int m_default_wake_fanout = 4; 314 enum class Mode 319 enum class ModeToggle 325 explicit Impl(unsigned int thread_hint) in Impl() argument 342 void set_num_threads(unsigned int num_threads, unsigned int thread_hint) in set_num_threads() 348 …id set_num_threads_with_affinity(unsigned int num_threads, unsigned int thread_hint, BindFunc func) in set_num_threads_with_affinity() 363 void auto_switch_mode(unsigned int num_threads_to_use) in auto_switch_mode() 377 void set_linear_mode() in set_linear_mode() 386 void set_fanout_mode(unsigned int wake_fanout, unsigned int num_threads_to_use) in set_fanout_mode() [all …]
|
/aosp_15_r20/external/skia/src/sksl/ |
H A D | SkSLModuleLoader.cpp | 91 struct ModuleLoader::Impl { struct in SkSL::ModuleLoader 98 SkMutex fMutex; 99 const BuiltinTypes fBuiltinTypes; 101 std::unique_ptr<const Module> fRootModule; 103 std::unique_ptr<const Module> fSharedModule; // [Root] + Public intrinsics 104 std::unique_ptr<const Module> fGPUModule; // [Shared] + Non-public intrinsics/ 106 std::unique_ptr<const Module> fVertexModule; // [GPU] + Vertex stage decls 107 std::unique_ptr<const Module> fFragmentModule; // [GPU] + Fragment stage decls 108 std::unique_ptr<const Module> fComputeModule; // [GPU] + Compute stage decls 109 std::unique_ptr<const Module> fGraphiteVertexModule; // [Vert] + Graphite vertex helpers [all …]
|
/aosp_15_r20/external/cronet/base/allocator/dispatcher/ |
H A D | dispatcher.cc | 25 struct Dispatcher::Impl { struct in base::allocator::dispatcher::Dispatcher 26 void Initialize(const internal::DispatchData& dispatch_data) { in Initialize() 35 void Reset() { in Reset() 53 static void ConnectToEmitters(const internal::DispatchData& dispatch_data) { in ConnectToEmitters() 72 static void DisconnectFromEmitters(internal::DispatchData& dispatch_data) { in DisconnectFromEmitters() 86 internal::DispatchData dispatch_data_; 91 std::atomic_flag is_initialized_check_flag_ = ATOMIC_FLAG_INIT; 93 std::atomic_flag is_initialized_check_flag_;
|