Home
last modified time | relevance | path

Searched full:relu (Results 1 – 25 of 1904) sorted by relevance

12345678910>>...77

/aosp_15_r20/external/pytorch/torch/ao/nn/intrinsic/modules/
H A Dfused.py11 ReLU,
43 r"""This is a sequential container which calls the Conv1d and ReLU modules.
46 def __init__(self, conv, relu): argument
49 and type_before_parametrizations(relu) == ReLU
50 …ct types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}"
51 super().__init__(conv, relu)
55 r"""This is a sequential container which calls the Conv2d and ReLU modules.
58 def __init__(self, conv, relu): argument
61 and type_before_parametrizations(relu) == ReLU
62 …ct types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}"
[all …]
/aosp_15_r20/external/XNNPACK/scripts/
H A Dgenerate-f32-vbinary.sh26 ….in -D OP=ADD -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vadd-relu-sc…
27 ….in -D OP=ADD -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vadd-relu-sc…
28 ….in -D OP=ADD -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vadd-relu-sc…
29 ….in -D OP=ADD -D BATCH_TILE=8 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vadd-relu-sc…
30 ….in -D OP=DIV -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vdiv-relu-sc…
31 ….in -D OP=DIV -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vdiv-relu-sc…
32 ….in -D OP=DIV -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vdiv-relu-sc…
33 ….in -D OP=DIV -D BATCH_TILE=8 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vdiv-relu-sc…
34 ….in -D OP=MUL -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vmul-relu-sc…
35 ….in -D OP=MUL -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vmul-relu-sc…
[all …]
H A Dgenerate-f32-igemm.sh14 …igemm/scalar.c.in -D MR=1 -D NR=4 -D WASM=0 -D ACTIVATION=RELU -o src/f32-igemm/gen/1x4-relu-sca…
15 …igemm/scalar.c.in -D MR=2 -D NR=4 -D WASM=0 -D ACTIVATION=RELU -o src/f32-igemm/gen/2x4-relu-sca…
16 …igemm/scalar.c.in -D MR=4 -D NR=2 -D WASM=0 -D ACTIVATION=RELU -o src/f32-igemm/gen/4x2-relu-sca…
17 …igemm/scalar.c.in -D MR=4 -D NR=4 -D WASM=0 -D ACTIVATION=RELU -o src/f32-igemm/gen/4x4-relu-sca…
25 …igemm/scalar.c.in -D MR=1 -D NR=4 -D WASM=1 -D ACTIVATION=RELU -o src/f32-igemm/gen/1x4-relu-was…
26 …igemm/scalar.c.in -D MR=2 -D NR=4 -D WASM=1 -D ACTIVATION=RELU -o src/f32-igemm/gen/2x4-relu-was…
27 …igemm/scalar.c.in -D MR=4 -D NR=2 -D WASM=1 -D ACTIVATION=RELU -o src/f32-igemm/gen/4x2-relu-was…
28 …igemm/scalar.c.in -D MR=4 -D NR=4 -D WASM=1 -D ACTIVATION=RELU -o src/f32-igemm/gen/4x4-relu-was…
61 …splat.c.in -D MR=1 -D NR=8 -D FMA=0 -D ACTIVATION=RELU -o src/f32-igemm/gen/1x8-
62 …splat.c.in -D MR=3 -D NR=8 -D FMA=0 -D ACTIVATION=RELU -o src/f32-igemm/gen/3x8-
[all …]
/aosp_15_r20/external/ComputeLibrary/examples/
H A Dgraph_inception_v4.cpp93 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_1a_3x3/Relu") in do_setup()
105 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2a_3x3/Relu") in do_setup()
117 …ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2b_3x3/Relu"); in do_setup()
217 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_3a/Branch_1/Conv2d_0a_3x3… in get_mixed_3a()
237 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_0/Conv2d_0a_1x1… in get_mixed_4a()
248 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_0/Conv2d_1a_3x3… in get_mixed_4a()
261 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_1/Conv2d_0a_1x1… in get_mixed_4a()
272 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_1/Conv2d_0b_1x7… in get_mixed_4a()
283 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_1/Conv2d_0c_7x1… in get_mixed_4a()
294 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_1/Conv2d_1a_3x3… in get_mixed_4a()
[all …]
H A Dgraph_inception_resnet_v2.cpp104 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_1a_3x3/Relu") in do_setup()
117 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2a_3x3/Relu") in do_setup()
130 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2b_3x3/Relu") in do_setup()
145 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_3b_1x1/Relu") in do_setup()
158 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_4a_3x3/Relu") in do_setup()
182 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_7b_1x1/Relu") in do_setup()
233 …LayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_5b/Branch_0/Conv2d_1x1/R… in block_mixed_5b()
248 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_5b/Branch_1/Conv2d_0a_1x1… in block_mixed_5b()
260 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_5b/Branch_1/Conv2d_0b_5x5… in block_mixed_5b()
275 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_5b/Branch_2/Conv2d_0a_1x1… in block_mixed_5b()
[all …]
H A Dgraph_inception_v3.cpp92 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_1a_3x3/Relu") in do_setup()
105 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2a_3x3/Relu") in do_setup()
119 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2b_3x3/Relu") in do_setup()
135 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_3b_1x1/Relu") in do_setup()
149 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_4a_3x3/Relu") in do_setup()
258 …rInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_0/Conv2d_0a_1… in get_inception_node_A()
274 …ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d" + conv_id… in get_inception_node_A()
288 …ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d" + conv_id… in get_inception_node_A()
304 …rInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0a_1… in get_inception_node_A()
318 …rInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0b_3… in get_inception_node_A()
[all …]
H A Dgraph_inception_resnet_v1.cpp120 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_1a_3x3/Relu") in do_setup()
133 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2a_3x3/Relu") in do_setup()
146 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2b_3x3/Relu") in do_setup()
161 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_3b_1x1/Relu") in do_setup()
174 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_4a_3x3/Relu") in do_setup()
187 …ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_4b_3x3/Relu"); in do_setup()
268 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "Branch_0/Conv2d_1x1… in block35_repeat()
283 …erInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "Branch_1/Conv2d_0a_1x… in block35_repeat()
295 …erInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "Branch_1/Conv2d_0b_3x… in block35_repeat()
310 …erInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "Branch_2/Conv2d_0a_1x… in block35_repeat()
[all …]
H A Dgraph_vgg19.cpp85 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv1_1/Relu") in do_setup()
92 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv1_2/Relu") in do_setup()
101 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv2_1/Relu") in do_setup()
108 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv2_2/Relu") in do_setup()
117 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_1/Relu") in do_setup()
124 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_2/Relu") in do_setup()
131 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_3/Relu") in do_setup()
138 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_4/Relu") in do_setup()
147 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_1/Relu") in do_setup()
154 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_2/Relu") in do_setup()
[all …]
H A Dgraph_vgg16.cpp87 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv1_1/Relu") in do_setup()
95 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv1_2/Relu") in do_setup()
104 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv2_1/Relu") in do_setup()
112 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv2_2/Relu") in do_setup()
121 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_1/Relu") in do_setup()
129 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_2/Relu") in do_setup()
137 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_3/Relu") in do_setup()
146 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_1/Relu") in do_setup()
154 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_2/Relu") in do_setup()
162 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_3/Relu") in do_setup()
[all …]
/aosp_15_r20/external/pytorch/torch/ao/quantization/backend_config/
H A D_common_operator_config_utils.py153 (op_with_quantized_bop_scalar_variant, nn.ReLU),
154 (op_with_quantized_bop_scalar_variant, F.relu),
155 (op_with_quantized_bop_scalar_variant, torch.relu),
209 # (2) Linear + relu
211 # 2.1 linear module + relu fusion config
212 # linear relu, linear module + relu module
214 BackendPatternConfig((torch.nn.Linear, torch.nn.ReLU))
219 # linear relu, linear module + functional relu
221 BackendPatternConfig((torch.nn.Linear, torch.nn.functional.relu))
227 # 2.2 linear module + relu, fused module configs
[all …]
H A Dexecutorch.py179 # (2) Conv + relu
181 # conv module + relu module
183 BackendPatternConfig((convs.root, nn.ReLU))
188 # conv module + functional relu
190 BackendPatternConfig((convs.root, F.relu))
195 # fused conv relu module
204 # conv relu, qat fused module
212 # functional conv + relu module
214 BackendPatternConfig((convs.func, nn.ReLU))
218 # functional conv + functional relu
[all …]
H A Donednn.py309 # (2) Conv2d + Add + Relu
315 # relu
318 def _fuse_conv_add_relu_left(is_qat, relu, add_pattern): argument
320 return nni.ConvAddReLU2d(conv, add, relu)
324 relu, add_pattern = pattern
333 relu, add_pattern = pattern
344 # relu
347 def _fuse_conv_bn_add_relu_left(is_qat, relu, add_pattern): argument
351 raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add, relu)}")
354 return nni.ConvAddReLU2d(fused_conv, add, relu)
[all …]
/aosp_15_r20/external/pytorch/test/quantization/eager/
H A Dtest_fuse_eager.py51 msg="Fused Conv + BN + Relu first layer")
53 msg="Fused Conv + BN + Relu (skipped BN)")
55 msg="Fused Conv + BN + Relu (skipped Relu)")
63 self.assertEqual(type(model.sub2.relu), torch.nn.ReLU,
64 msg="Non-fused submodule ReLU")
75 self.assertEqual(type(model.sub2.relu), nn.ReLU)
88 self.assertEqual(type(model.sub2.relu), nn.ReLU)
116 msg="Fused Conv + BN + Relu first layer (BN is folded)")
118 msg="Fused Conv + BN + Relu (Conv + folded BN only)")
119 self.assertEqual(type(model.conv1[1]), nn.ReLU,
[all …]
/aosp_15_r20/external/pytorch/test/fx/
H A Dtest_source_matcher_utils.py32 self.relu = torch.nn.ReLU()
38 x = self.relu(x)
47 gm.graph, [torch.nn.Linear, torch.nn.ReLU]
52 self.assertEqual(len(module_partitions[torch.nn.ReLU]), 1)
57 module_partitions[torch.nn.ReLU][0],
63 module_partitions[torch.nn.ReLU][0],
69 module_partitions[torch.nn.ReLU][0],
88 self.relu = torch.nn.ReLU()
96 return self.maxpool(self.relu(z))
105 gm.graph, [torch.nn.Conv2d, torch.nn.ReLU, torch.nn.MaxPool2d]
[all …]
/aosp_15_r20/external/pytorch/test/cpp/tensorexpr/
H A Dtest_memplanning.cpp107 Compute("relu", {M, N}, [&](const ExprHandle& m, const ExprHandle& n) { in TEST()
123 // Intermediate buffers and their liveness ranges: gemm [0, 1], relu [1, 2], in TEST()
139 // relu[i_3, i_4] = (gemm[i_3, i_4])<0.f ? 0.f : (gemm[i_3, i_4]); in TEST()
144 // E[i_5, i_6] = quint8((relu[i_5, i_6]) + (relu[i_5, i_6])); in TEST()
160 # CHECK: Allocate(relu); // dtype=float, dims=[4, 4] in TEST()
162 # CHECK: Free(relu); in TEST()
189 # CHECK: Allocate(relu); // dtype=float, dims=[4, 4] in TEST()
191 # CHECK: Free(relu); in TEST()
219 Compute("relu", {M, N}, [&](const ExprHandle& m, const ExprHandle& n) { in TEST()
235 // Intermediate buffers and their liveness ranges: gemm [0, 1], relu [1, 2], in TEST()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/common_runtime/
H A Dquantize_training_test.cc82 Relu Identity in TEST_F()
92 Node* relu = test::graph::Relu(g, a); in TEST_F() local
94 Node* m1 = test::graph::Matmul(g, relu, identity, false, false); in TEST_F()
102 Relu Identity in TEST_F()
118 // Quantize_and_dequantize node for relu should have signed_input==false. in TEST_F()
121 FindNode(g, strings::StrCat(relu->name(), "/QuantizeAndDequantizeV2"), in TEST_F()
133 Relu Relu6 in TEST_F()
143 Node* relu = test::graph::Relu(g, a); in TEST_F() local
145 Node* m1 = test::graph::Matmul(g, relu, relu6, false, false); in TEST_F()
153 Relu Relu6 in TEST_F()
[all …]
/aosp_15_r20/external/pytorch/test/quantization/pt2e/
H A Dtest_x86inductor_quantizer.py100 self.relu = nn.ReLU()
112 tmp += self.relu(x)
118 return tmp + self.relu(x)
121 tmp = self.relu(x)
125 return self.relu(x) + self.conv(x)
160 self.relu = nn.ReLU()
163 self.relu2 = nn.ReLU(inplace=inplace_relu)
173 tmp += self.relu(x)
179 return self.relu2(tmp + self.relu(x))
182 tmp = self.relu(x)
[all …]
/aosp_15_r20/external/pytorch/test/
H A Dtest_fx_passes.py43 relu = add_6.relu()
45 return add_4, add_6, relu
69 relu_1 = add_2.relu()
72 relu_2 = add_4.relu()
81 relu_1 = add_1.relu() # blocked by this
105 relu = add.relu()
108 return relu, add_1
115 relu = add.relu()
117 relu_1 = add.relu()
118 return relu, relu_1
[all …]
/aosp_15_r20/external/pytorch/test/jit/
H A Dtest_models.py53 x = F.relu(F.max_pool2d(self.conv1(x), 2))
54 x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
56 x = F.relu(self.fc1(x))
72 nn.ReLU(True),
76 nn.ReLU(True),
80 nn.ReLU(True),
84 nn.ReLU(True),
177 self.relu = torch.nn.ReLU()
180 y = self.relu(self.in1(self.conv1(X)))
181 y = self.relu(self.in2(self.conv2(y)))
[all …]
H A Dtest_autodiff_subgraph_slicing.py185 return torch.nn.functional.linear(x, weight, bias).relu() + 2
243 o1 = torch.relu(o)
245 o2 = torch.relu(o)
250 oo1 = torch.relu(o)
252 oo2 = torch.relu(o)
256 oo1 = torch.relu(o)
258 oo2 = torch.relu(o)
290 return torch.nn.functional.relu(input + bias)
460 # Case 1: aliasing between relu and t
462 # to merge both split_with_sizes in relu in one graph
[all …]
/aosp_15_r20/external/pytorch/test/quantization/jit/
H A Dtest_quantize_jit.py277 self.relu = torch.nn.ReLU()
282 x = self.relu(x)
285 x = self.relu(x)
574 return F.relu(self.conv(x))
580 self.relu = torch.nn.ReLU()
583 return self.relu(self.conv(x))
588 self.relu = torch.nn.ReLU()
594 return self.relu(out)
604 return F.relu(out)
614 # observer for input of conv and output of relu
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/quantization/
H A Dinsert_observers.cpp444 // Find and mark known patterns such as conv-relu (and others) where
462 // the output value of conv in the conv - relu pattern
464 // the value is the value we want to observe, e.g. output of relu
466 // example, assuming we want to delay conv-relu:
468 // %x2 = relu(%x1)
538 // nn.Linear + nn.ReLU
541 graph(%input, %linear, %relu):
543 %second_output = prim::CallMethod[name="forward\\d*"](%relu, %first_output)
547 // nn.Linear + F.relu
550 graph(%input, %linear, %relu, %inplace):
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/
H A Dgpu_fusion.mlir10 %relu = "tf.Relu"(%y#0) : (tensor<8x8x8x8xf32>) -> tensor<8x8x8x8xf32>
11 func.return %relu : tensor<8x8x8x8xf32>
20 %relu = "tf.Relu"(%add) : (tensor<8x8x8x8xf32>) -> tensor<8x8x8x8xf32>
21 func.return %relu : tensor<8x8x8x8xf32>
27 // Relu activation and we only fuse the add.
29 // CHECK-NEXT: %[[relu:[a-z0-9]*]] ={{.*}}Relu"(%[[Y]]
30 // CHECK-NEXT: return %[[relu]]
33 %relu = "tf.Relu"(%add) : (tensor<8x8x8x8xf32>) -> tensor<8x8x8x8xf32>
34 func.return %relu, %add : tensor<8x8x8x8xf32>, tensor<8x8x8x8xf32>
41 // CHECK-NEXT: %[[relu:[a-z0-9]*]] ={{.*}}Relu"(%[[Y]]
[all …]
/aosp_15_r20/external/pytorch/test/quantization/fx/
H A Dtest_quantize_fx.py251 if relu_callable is torch.nn.ReLU:
252 self.relu = torch.nn.ReLU()
254 self.relu = relu_callable
260 x = self.relu(x)
262 x = self.relu(x)
286 self.relu = nn.ReLU()
297 x = self.relu(x)
300 x = self.relu(x)
303 x = self.relu(x)
322 ns.call_module(nn.ReLU): 0
[all …]
/aosp_15_r20/external/pytorch/torch/ao/quantization/
H A Dfuser_method_mappings.py64 def fuse_conv_bn_relu(is_qat, conv, bn, relu): argument
79 >>> r1 = nn.ReLU(inplace=False)
84 conv.training == bn.training == relu.training
102 return fused_module(conv, bn, relu)
104 raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, relu)}")
114 return fused_module(fused_conv, relu)
116 raise NotImplementedError(f"Cannot fuse eval modules: {(conv, bn, relu)}")
196 (nn.Conv1d, nn.BatchNorm1d, nn.ReLU): fuse_conv_bn_relu,
198 (nn.Conv2d, nn.BatchNorm2d, nn.ReLU): fuse_conv_bn_relu,
200 (nn.Conv3d, nn.BatchNorm3d, nn.ReLU): fuse_conv_bn_relu,
[all …]

12345678910>>...77