xref: /aosp_15_r20/external/executorch/backends/qualcomm/runtime/backends/QnnGraphCommon.h (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Qualcomm Innovation Center, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 #pragma once
9 
10 #include <executorch/backends/qualcomm/aot/wrappers/TensorWrapper.h>
11 #include <executorch/backends/qualcomm/runtime/Logging.h>
12 #include <executorch/backends/qualcomm/runtime/backends/QnnContextCommon.h>
13 #include <executorch/backends/qualcomm/runtime/backends/QnnImplementation.h>
14 #include <executorch/backends/qualcomm/runtime/backends/QnnProfiler.h>
15 
16 #include <vector>
17 
18 #include "QnnCommon.h"
19 namespace executorch {
20 namespace backends {
21 namespace qnn {
22 // qnn graph
23 class QnnGraph {
24  public:
QnnGraph(const QnnImplementation & implementation,QnnBackend * backend,QnnContext * context,const QnnExecuTorchProfileLevel & profile_level)25   explicit QnnGraph(
26       const QnnImplementation& implementation,
27       QnnBackend* backend,
28       QnnContext* context,
29       const QnnExecuTorchProfileLevel& profile_level)
30       : implementation_(implementation),
31         backend_(backend),
32         context_(context),
33         profile_level_(profile_level) {}
34 
~QnnGraph()35   virtual ~QnnGraph(){};
36 
37   executorch::runtime::Error Configure(const std::string& graph_name);
38 
39   Qnn_ErrorHandle_t GraphExecute(
40       const std::string& graph_name,
41       const std::vector<Qnn_Tensor_t>& input_tensor_structs,
42       std::vector<Qnn_Tensor_t>& output_tensor_structs);
43 
GraphAddNode(const std::string & graph_name,const Qnn_OpConfig_t & op_config)44   Qnn_ErrorHandle_t GraphAddNode(
45       const std::string& graph_name,
46       const Qnn_OpConfig_t& op_config) {
47     return implementation_.GetQnnInterface().qnn_graph_add_node(
48         handle_[graph_name], op_config);
49   };
50   executorch::runtime::Error EnsureTensorInQnnGraph(
51       const std::string& graph_name,
52       const std::shared_ptr<TensorWrapper>& tensor_wrapper);
53 
GraphFinalize(const std::string & graph_name)54   Qnn_ErrorHandle_t GraphFinalize(const std::string& graph_name) {
55     return implementation_.GetQnnInterface().qnn_graph_finalize(
56         handle_[graph_name],
57         profile_[graph_name]->GetHandle(),
58         nullptr /* signal_handle */);
59   };
ProfileExecuteData(const std::string & graph_name,executorch::runtime::EventTracer * event_tracer)60   Qnn_ErrorHandle_t ProfileExecuteData(
61       const std::string& graph_name,
62       executorch::runtime::EventTracer* event_tracer) {
63     return profile_[graph_name]->ProfileData(event_tracer);
64   };
GetHandle(const std::string & graph_name)65   Qnn_GraphHandle_t GetHandle(const std::string& graph_name) {
66     return handle_[graph_name];
67   }
68 
GetProfile(const std::string & graph_name)69   QnnProfile* GetProfile(const std::string& graph_name) {
70     return profile_[graph_name].get();
71   }
72 
73  protected:
MakeConfig(std::vector<const QnnGraph_Config_t * > & config)74   virtual executorch::runtime::Error MakeConfig(
75       std::vector<const QnnGraph_Config_t*>& config) {
76     return executorch::runtime::Error::Ok;
77   };
78 
79  private:
80   std::unordered_map<std::string, Qnn_GraphHandle_t> handle_;
81   const QnnImplementation& implementation_;
82   QnnBackend* backend_;
83   QnnContext* context_;
84   QnnExecuTorchProfileLevel profile_level_;
85   std::unordered_map<std::string, std::unique_ptr<QnnProfile>> profile_;
86 };
87 } // namespace qnn
88 } // namespace backends
89 } // namespace executorch
90