1# Copyright (c) Meta Platforms, Inc. and affiliates. 2# All rights reserved. 3# 4# This source code is licensed under the BSD-style license found in the 5# LICENSE file in the root directory of this source tree. 6 7import unittest 8 9import torch 10from executorch.backends.xnnpack.test.tester import Tester 11 12 13class TestLeakyRelu(unittest.TestCase): 14 class LeakyReLU(torch.nn.Module): 15 def __init__(self, **kwargs): 16 super().__init__() 17 self.relu = torch.nn.LeakyReLU(**kwargs) 18 19 def forward(self, x): 20 y = x + x 21 z = self.relu(y) 22 return z 23 24 class LeakyReLUFunctional(torch.nn.Module): 25 def forward(self, x): 26 return torch.nn.functional.leaky_relu(x) 27 28 def _test_leaky_relu(self, module, inputs): 29 ( 30 Tester(module, inputs) 31 .export() 32 .check_count({"torch.ops.aten.leaky_relu.default": 1}) 33 .to_edge_transform_and_lower() 34 .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) 35 .check_not( 36 [ 37 "executorch_exir_dialects_edge__ops_aten_leaky_relu_default", 38 ] 39 ) 40 .to_executorch() 41 .serialize() 42 .run_method_and_compare_outputs() 43 ) 44 45 def test_fp16_leaky_relu(self): 46 inputs = (torch.randn(1, 3, 3).to(torch.float16),) 47 module = self.LeakyReLUFunctional() 48 self._test_leaky_relu(module, inputs) 49 50 def test_fp32_leaky_relu(self): 51 inputs = (torch.randn(1, 3, 3),) 52 module = self.LeakyReLU(negative_slope=0.2) 53 self._test_leaky_relu(module, inputs) 54 55 def test_fp32_leaky_relu_functional(self): 56 inputs = (torch.randn(1, 3, 3),) 57 ( 58 Tester(self.LeakyReLUFunctional(), inputs) 59 .export() 60 .check_count({"torch.ops.aten.leaky_relu.default": 1}) 61 .to_edge_transform_and_lower() 62 .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) 63 .check_not( 64 [ 65 "executorch_exir_dialects_edge__ops_aten_leaky_relu_default", 66 ] 67 ) 68 .to_executorch() 69 .serialize() 70 .run_method_and_compare_outputs() 71 ) 72 73 @unittest.skip("T172863987 - Missing quantizer support.") 74 def _test_qs8_leaky_relu(self): 75 inputs = (torch.randn(1, 3, 3),) 76 ( 77 Tester(self.LeakyReLU(negative_slope=0.2), inputs) 78 .quantize() 79 .export() 80 .check_node_count( 81 { 82 "leaky_relu::default": 1, 83 "quantized_decomposed::quantize_per_tensor": 3, 84 } 85 ) 86 .to_edge_transform_and_lower() 87 .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) 88 .check_not( 89 [ 90 "executorch_exir_dialects_edge__ops_aten_leaky_relu_default", 91 "torch.ops.quantized_decomposed", 92 ] 93 ) 94 .to_executorch() 95 .serialize() 96 .run_method_and_compare_outputs() 97 ) 98 99 @unittest.skip("T172863987 - Missing quantizer support.") 100 def _test_qs8_leaky_relu_default_slope(self): 101 """ 102 The leaky_relu visitor has logic to handle the default slope, since it's apparently not 103 passed through on export. This test ensures that this matches the eager mode behavior. 104 """ 105 106 inputs = (torch.randn(1, 3, 3),) 107 ( 108 Tester(self.LeakyReLU(), inputs) 109 .quantize() 110 .export() 111 .check_node_count( 112 { 113 "leaky_relu::default": 1, 114 "quantized_decomposed::quantize_per_tensor": 3, 115 } 116 ) 117 .to_edge_transform_and_lower() 118 .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) 119 .check_not( 120 [ 121 "executorch_exir_dialects_edge__ops_aten_leaky_relu_default", 122 "torch.ops.quantized_decomposed", 123 ] 124 ) 125 .to_executorch() 126 .serialize() 127 .run_method_and_compare_outputs() 128 ) 129