Home
last modified time | relevance | path

Searched full:hardtanh (Results 1 – 25 of 162) sorted by relevance

1234567

/aosp_15_r20/external/executorch/backends/arm/test/ops/
H A Dtest_hardtanh.py36 """Tests HardTanh Operator."""
38 class HardTanh(torch.nn.Module): class in TestHardTanh
43 self.hardTanh = torch.nn.Hardtanh()
46 return self.hardTanh(x)
58 .check(["torch.ops.aten.hardtanh.default"])
80 .check_count({"torch.ops.aten.hardtanh.default": 1})
102 .check_count({"torch.ops.aten.hardtanh.default": 1})
117 self._test_hardtanh_tosa_MI_pipeline(self.HardTanh(), (test_data,))
121 self._test_hardtanh_tosa_BI_pipeline(self.HardTanh(), (test_data,))
125 self._test_hardtanh_tosa_u55_BI_pipeline(self.HardTanh(), (test_data,))
/aosp_15_r20/external/executorch/backends/xnnpack/test/ops/
H A Dhardtanh.py14 class HardTanh(torch.nn.Module): class in TestHardTanh
22 z = torch.nn.Hardtanh(self.min_val, self.max_val)(y)
29 Tester(self.HardTanh(), (input,))
31 .check_count({"torch.ops.aten.hardtanh.default": 1})
44 Tester(self.HardTanh(-2.0, 2.0), (input,))
46 .check_count({"torch.ops.aten.hardtanh.default": 1})
59 Tester(self.HardTanh(), (input,))
64 # Expect three quantize ops - one for input, hardtanh, and add.
66 torch.ops.aten.hardtanh.default: 1,
H A Dconv2d.py105 self.hardtanh = torch.nn.Hardtanh()
118 y = self.hardtanh(y)
121 y = self.hardtanh(y)
264 This test makes sure that we can fuse batchnorm and hardtanh
281 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6)
286 x = self.hardtanh(x)
/aosp_15_r20/external/executorch/backends/qualcomm/tests/
H A Dmodels.py287 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6)
293 x3 = self.hardtanh(x2)
540 class HardTanh(torch.nn.Module): class
543 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6)
546 return self.hardtanh(x)
800 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6.0)
808 x5 = self.hardtanh(x4)
900 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6)
926 z5 = self.hardtanh(z4)
/aosp_15_r20/external/pytorch/test/quantization/pt2e/
H A Dtest_metadata_porting.py24 self.hardtanh = torch.nn.Hardtanh()
31 x = self.hardtanh(x)
145 conv2d -> avgpool -> hardtanh -> linear
211 conv2d -> avgpool -> hardtanh -> linear
254 conv2d -> avgpool -> hardtanh -> linear
325 conv2d -> avgpool -> hardtanh -> linear
388 conv2d -> avgpool -> hardtanh -> linear
H A Dtest_duplicate_dq.py36 self.hardtanh = torch.nn.Hardtanh()
43 x = self.hardtanh(x)
124 conv2d -> avgpool -> hardtanh -> linear
H A Dtest_x86inductor_quantizer.py618 …Test pattern of conv2d with unary post ops (such as relu, hardtanh, hardswish, relu6) with X86Indu…
623 "hardtanh": [
624 torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=False),
625 torch.ops.aten.hardtanh.default,
628 torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=True),
631 "relu6": [torch.nn.ReLU6(inplace=False), torch.ops.aten.hardtanh.default],
1756 "hardtanh": [
1757 torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=False),
1758 torch.ops.aten.hardtanh.default,
1761 torch.nn.Hardtanh(min_val=0.0, max_val=6.0, inplace=True),
[all …]
/aosp_15_r20/external/pytorch/test/
H A Dtest_xnnpack_integration.py1062 # Not inplace hardtanh fusion test.
1064 "aten::hardtanh": 2,
1071 M(F.hardtanh), pattern_count_map, data_shape, prepack_removal=True
1075 pattern_count_map["aten::hardtanh"] = -1
1077 M(F.hardtanh),
1084 # Inplace hardtanh fusion test.
1123 o = F.hardtanh(o)
1126 # Unfusable hardtanh.
1128 "aten::hardtanh": 1, # hardtanh cannot be.
1159 o = F.hardtanh(o, min, max)
[all …]
H A Dtest_metal.py137 o = F.hardtanh(o)
145 pattern_count_map["aten::hardtanh"] = 1
152 pattern_count_map["aten::hardtanh"] = -1
H A Dtest_vulkan.py140 o = F.hardtanh(o)
148 pattern_count_map["aten::hardtanh"] = 1
155 pattern_count_map["aten::hardtanh"] = -1
/aosp_15_r20/external/executorch/backends/xnnpack/test/passes/
H A Dtest_activation_fusion.py145 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
152 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
162 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
173 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
184 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
195 activation=torch.nn.Hardtanh(min_val=-1.0, max_val=1.0),
H A Dtest_channels_last_tagged_reshape.py126 self.hardtanh = torch.nn.Hardtanh(min_val=0, max_val=6)
132 x = self.hardtanh(x)
156 …users=1] = call_function[target=executorch.exir.dialects.edge._ops.aten.hardtanh.default](args = (…
/aosp_15_r20/external/pytorch/aten/src/ATen/native/metal/ops/
H A DMetalClamp.mm30 static Tensor hardtanh(
58 return hardtanh(input, min.value(), max.value());
63 m.impl(TORCH_SELECTIVE_NAME("aten::hardtanh"), TORCH_FN(hardtanh));
/aosp_15_r20/external/executorch/backends/xnnpack/partition/
H A Dconfigs.py47 exir_ops.edge.aten.hardtanh.default,
57 exir_ops.edge.aten.hardtanh.default,
89 exir_ops.edge.aten.hardtanh.default,
92 exir_ops.edge.aten.hardtanh.default,
/aosp_15_r20/external/pytorch/test/jit/
H A Dtest_optimize_for_mobile_preserve_debug_info.py236 linear_activation=F.hardtanh,
237 linear_activation_kind="aten::hardtanh",
247 conv2d_activation=F.hardtanh,
248 conv2d_activation_kind="aten::hardtanh",
/aosp_15_r20/external/pytorch/torch/ao/ns/fx/
H A Dmappings.py248 # F.hardtanh
250 nn.Hardtanh,
251 F.hardtanh,
547 F.hardtanh,
687 nn.Hardtanh,
/aosp_15_r20/external/executorch/backends/transforms/
H A Dfuse_conv_with_clamp.py19 …Some activations like ReLU and hardtanh can be fused with certain operators (e.g. convolution) pre…
27 exir_ops.edge.aten.hardtanh.default,
34 elif activation_node.target == exir_ops.edge.aten.hardtanh.default:
/aosp_15_r20/external/pytorch/torch/_refs/nn/functional/
H A D__init__.py39 "hardtanh",
1004 @register_decomposition(aten.hardtanh)
1012 def hardtanh( function
1019 Reference implementation of torch.nn.functional.hardtanh
1024 raise RuntimeError("Bool inputs not supported for hardtanh")
1032 "Cannot do hardtanh on an unsigned type with negative limits"
1163 # It may be better to use clamp here, but we use hardtanh to replicate
1165 return torch.nn.functional.hardtanh(a, 0, 6)
/aosp_15_r20/external/executorch/kernels/portable/cpu/
H A Dop_hardtanh.cpp49 ET_SWITCH_REAL_TYPES(in_type, ctx, "hardtanh.out", CTYPE, [&]() { in hardtanh_out()
51 ET_SWITCH_SCALAR_OBJ_TYPES(min_type, ctx, "hardtanh.out", CTYPE_MIN, [&]() { in hardtanh_out()
58 ET_SWITCH_SCALAR_OBJ_TYPES(max_type, ctx, "hardtanh.out", CTYPE_MAX, [&]() { in hardtanh_out()
/aosp_15_r20/external/executorch/backends/xnnpack/_passes/
H A Dfuse_activation_pass.py20 Some activations like ReLU and hardtanh can be fused with certain operators preceding it.
36 exir_ops.edge.aten.hardtanh.default,
53 elif activation_node.target == exir_ops.edge.aten.hardtanh.default:
/aosp_15_r20/external/pytorch/docs/source/
H A Dmobile_optimizer.rst17Hardtanh fusion**: XNNPACK ops support fusion of clamping. That is clamping of output activation i…
/aosp_15_r20/external/pytorch/torch/nn/modules/
H A Dactivation.py19 "Hardtanh",
200 class Hardtanh(Module): class
201 r"""Applies the HardTanh function element-wise.
203 HardTanh is defined as:
206 \text{HardTanh}(x) = \begin{cases}
224 .. image:: ../scripts/activation_images/Hardtanh.png
228 >>> m = nn.Hardtanh(-2, 2)
269 return F.hardtanh(input, self.min_val, self.max_val, self.inplace)
276 class ReLU6(Hardtanh):
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/
H A DUnaryOp.cpp135 DEFINE_CLAMP_FN(hardtanh);
147 VK_REGISTER_OP(aten.hardtanh.default, hardtanh);
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/modules/
H A Dactivation.h119 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hardtanh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
121 /// Applies the HardTanh function element-wise.
122 /// See https://pytorch.org/docs/main/nn.html#torch.nn.Hardtanh to learn
130 /// Hardtanh
141 /// Pretty prints the `Hardtanh` module into the given `stream`.
150 /// provides, and examples of how to use `Hardtanh` with
153 TORCH_MODULE(Hardtanh);
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/
H A Dfunctional.py31 "hardtanh",
592 def hardtanh( function
595 r"""This is the quantized version of :func:`~torch.nn.functional.hardtanh`."""
597 raise ValueError("Input to 'quantized.hardtanh' must be quantized!")
600 return torch._C._nn.hardtanh(input, min_val, max_val)

1234567