1# Copyright (c) Qualcomm Innovation Center, Inc. 2# All rights reserved 3# 4# This source code is licensed under the BSD-style license found in the 5# LICENSE file in the root directory of this source tree. 6from typing import Dict 7 8import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper 9 10import torch 11 12from .node_visitor import NodeVisitor, register_node_visitor 13from .qnn_constants import OpReshape, QNN_OP_PACKAGE_NAME_QTI_AISW 14 15 16@register_node_visitor 17class Squeeze(NodeVisitor): 18 target = ["aten.squeeze_copy.dims", "aten.squeeze.dims"] 19 20 def __init__(self, *args) -> None: 21 super().__init__(*args) 22 23 def define_node( 24 self, 25 node: torch.fx.Node, 26 nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper], 27 ) -> PyQnnWrapper.PyQnnOpWrapper: 28 input_node = node.args[0] 29 input_tensor = self.get_tensor(input_node, node) 30 31 input_tensor_wrapper = self.define_tensor( 32 input_node, 33 input_tensor, 34 PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE, 35 nodes_to_wrappers, 36 is_input_tensor=True, 37 ) 38 39 output_tensor = self.get_tensor(node, node) 40 output_tensor_wrapper = self.define_tensor( 41 node, 42 output_tensor, 43 PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE, 44 nodes_to_wrappers, 45 is_input_tensor=False, 46 ) 47 48 squeeze_op = PyQnnWrapper.PyQnnOpWrapper( 49 node.name, 50 QNN_OP_PACKAGE_NAME_QTI_AISW, 51 OpReshape.op_name, 52 ) 53 squeeze_op.AddInputTensors([input_tensor_wrapper]) 54 squeeze_op.AddOutputTensors([output_tensor_wrapper]) 55 56 return squeeze_op 57