xref: /aosp_15_r20/external/executorch/backends/qualcomm/builders/op_relu.py (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1# Copyright (c) Qualcomm Innovation Center, Inc.
2# All rights reserved
3#
4# This source code is licensed under the BSD-style license found in the
5# LICENSE file in the root directory of this source tree.
6from typing import Dict
7
8import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
9
10import torch
11
12from .node_visitor import NodeVisitor, register_node_visitor
13from .qnn_constants import OpRelu, QNN_OP_PACKAGE_NAME_QTI_AISW
14
15
16@register_node_visitor
17class Relu(NodeVisitor):
18    target = ["aten.relu.default"]
19
20    def __init__(self, *args) -> None:
21        super().__init__(*args)
22
23    def define_node(
24        self,
25        node: torch.fx.Node,
26        nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
27    ) -> PyQnnWrapper.PyQnnOpWrapper:
28        input_node = node.args[0]
29        input_tensor = self.get_tensor(input_node, node)
30        relu_inp_tensor_wrapper = self.define_tensor(
31            input_node,
32            input_tensor,
33            PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
34            nodes_to_wrappers,
35            is_input_tensor=True,
36        )
37        relu_input_tensors = [relu_inp_tensor_wrapper]
38
39        output_tensor = self.get_tensor(node, node)
40        output_tensor_wrapper = self.define_tensor(
41            node,
42            output_tensor,
43            PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
44            nodes_to_wrappers,
45            is_input_tensor=False,
46        )
47        relu_output_tensors = [output_tensor_wrapper]
48
49        relu_op = PyQnnWrapper.PyQnnOpWrapper(
50            node.name,
51            QNN_OP_PACKAGE_NAME_QTI_AISW,
52            OpRelu.op_name,
53        )
54        relu_op.AddInputTensors(relu_input_tensors)
55        relu_op.AddOutputTensors(relu_output_tensors)
56
57        return relu_op
58