1# Copyright (c) Qualcomm Innovation Center, Inc. 2# All rights reserved 3# 4# This source code is licensed under the BSD-style license found in the 5# LICENSE file in the root directory of this source tree. 6 7from typing import Dict 8 9import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper 10import numpy as np 11 12import torch 13from executorch.backends.qualcomm.utils.constants import QCOM_DATA 14 15from .node_visitor import NodeVisitor, register_node_visitor 16from .qnn_constants import OpElementWiseNeuron, QNN_OP_PACKAGE_NAME_QTI_AISW 17 18 19@register_node_visitor 20class HardSigmoidVisitor(NodeVisitor): 21 target = ["aten.hardsigmoid.default"] 22 23 def __init__(self, *args) -> None: 24 super().__init__(*args) 25 26 def define_node( 27 self, 28 node: torch.fx.Node, 29 nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper], 30 ) -> PyQnnWrapper.PyQnnOpWrapper: 31 input_node = node.args[0] 32 input_tensor = self.get_tensor(input_node, node) 33 input_tensor_wrapper = self.define_tensor( 34 input_node, 35 input_tensor, 36 PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE, 37 nodes_to_wrappers, 38 is_input_tensor=True, 39 ) 40 41 output_tensor = self.get_tensor(node, node) 42 output_tensor_wrapper = self.define_tensor( 43 node, 44 output_tensor, 45 PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE, 46 nodes_to_wrappers, 47 is_input_tensor=False, 48 ) 49 50 hardsigmoid_op = PyQnnWrapper.PyQnnOpWrapper( 51 node.name, 52 QNN_OP_PACKAGE_NAME_QTI_AISW, 53 OpElementWiseNeuron.op_name, 54 ) 55 hardsigmoid_op.AddInputTensors([input_tensor_wrapper]) 56 hardsigmoid_op.AddOutputTensors([output_tensor_wrapper]) 57 58 # The operation enum of hardsigmoid in QNN 59 hardsigmoid_op.AddScalarParam( 60 OpElementWiseNeuron.param_operation, 61 PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_UINT_32, 62 {QCOM_DATA: np.uint32(2)}, 63 ) 64 65 # The parameter used in Pytorch definition for hardsigmoid 66 hardsigmoid_op.AddScalarParam( 67 OpElementWiseNeuron.param_alpha, 68 PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_FLOAT_32, 69 {QCOM_DATA: np.float32(1 / 6)}, 70 ) 71 hardsigmoid_op.AddScalarParam( 72 OpElementWiseNeuron.param_beta, 73 PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_FLOAT_32, 74 {QCOM_DATA: np.float32(1 / 2)}, 75 ) 76 77 return hardsigmoid_op 78