Home
last modified time | relevance | path

Searched full:convolution (Results 1 – 25 of 1424) sorted by relevance

12345678910>>...57

/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/
H A Dconvolution.c71 pytorch_qnnp_operator_t convolution = NULL; in pytorch_qnnp_create_convolution_ndhwc_q8() local
84 "failed to create convolution with %" PRIu32 "x%" PRIu32 in pytorch_qnnp_create_convolution_ndhwc_q8()
93 "failed to create convolution with %" PRIu32 "x%" PRIu32 in pytorch_qnnp_create_convolution_ndhwc_q8()
103 "failed to create convolution with %" PRIu32 "x%" PRIu32 in pytorch_qnnp_create_convolution_ndhwc_q8()
115 "inefficiency in convolution with %" PRIu32 "x%" PRIu32 in pytorch_qnnp_create_convolution_ndhwc_q8()
118 …subsampling is greater than kernel height; subsampling should be performed before the convolution", in pytorch_qnnp_create_convolution_ndhwc_q8()
127 "inefficiency in convolution with %" PRIu32 "x%" PRIu32 in pytorch_qnnp_create_convolution_ndhwc_q8()
130 … subsampling is greater than kernel width; subsampling should be performed before the convolution", in pytorch_qnnp_create_convolution_ndhwc_q8()
139 "inefficiency in convolution with %" PRIu32 "x%" PRIu32 "x%" PRIu32 in pytorch_qnnp_create_convolution_ndhwc_q8()
152 "inefficiency in convolution with %" PRIu32 "x%" PRIu32 "x%" PRIu32 in pytorch_qnnp_create_convolution_ndhwc_q8()
[all …]
H A Dconv-run.cc333 const pytorch_qnnp_operator_t convolution, in qnnpackConv() argument
348 const size_t groups = convolution->groups; in qnnpackConv()
349 const size_t input_pixel_stride = convolution->group_input_channels * groups; in qnnpackConv()
351 convolution->group_output_channels * groups; in qnnpackConv()
352 const size_t kernel_width = convolution->kernel_width; in qnnpackConv()
353 const size_t kernel_height = convolution->kernel_height; in qnnpackConv()
354 const size_t kernel_depth = convolution->kernel_depth; in qnnpackConv()
364 if (convolution->ukernel_type == pytorch_qnnp_ukernel_type_xzp_gemm) { in qnnpackConv()
381 // Convolution op caches a few things. in qnnpackConv()
385 if (convolution->input != input || convolution->batch_size != batch_size || in qnnpackConv()
[all …]
H A Dconv-prepack.cc11 const pytorch_qnnp_operator_t convolution, in PrePackConvWeights() argument
15 enum pytorch_qnnp_ukernel_type ukernel_type = convolution->ukernel_type; in PrePackConvWeights()
16 const uint32_t kernel_width = convolution->kernel_width; in PrePackConvWeights()
17 const uint32_t kernel_height = convolution->kernel_height; in PrePackConvWeights()
20 convolution->kernel_depth ? convolution->kernel_depth : 1; in PrePackConvWeights()
21 const uint32_t groups = convolution->groups; in PrePackConvWeights()
23 if (convolution->transpose && in PrePackConvWeights()
165 (convolution->group_output_channels + (nr - 1)) & -nr; in PrePackConvWeights()
167 (convolution->group_input_channels + (kr - 1)) & -kr; in PrePackConvWeights()
184 convolution->group_output_channels, in PrePackConvWeights()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/legacy_tf_layers/
H A Dconvolutional.py29 """1D convolution layer (e.g. temporal convolution).
31 This layer creates a convolution kernel that is convolved
39 of filters in the convolution).
41 length of the 1D convolution window.
43 specifying the stride length of the convolution.
56 the dilation rate to use for dilated convolution.
62 kernel_initializer: An initializer for the convolution kernel.
65 kernel_regularizer: Optional regularizer for the convolution kernel.
140 """Functional interface for 1D convolution layer (e.g. temporal convolution).
142 This layer creates a convolution kernel that is convolved
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/
H A Dconvolution_group_converter.cc42 // ConvolutionVisitor traverses the HLO computation and rewrites Convolution
52 Status HandleConvolution(HloInstruction* convolution) override;
54 Status HandleBatchGroupCount(HloInstruction* convolution);
62 // Returns whether any convolution ops were rewritten.
129 // Create a mask for grouped convolution that will make a normal convolution
130 // produce the same results as a grouped convolution. For a [2, 1, 6]
204 Status ConvolutionVisitor::HandleBatchGroupCount(HloInstruction* convolution) { in HandleBatchGroupCount() argument
205 auto dim_numbers = convolution->convolution_dimension_numbers(); in HandleBatchGroupCount()
206 auto activation = convolution->mutable_operand(0); in HandleBatchGroupCount()
207 auto filter = convolution->mutable_operand(1); in HandleBatchGroupCount()
[all …]
H A Dspace_to_batch_converter.cc62 // ConvolutionVisitor traverses the HLO computation and rewrites Convolution
68 Status PerformSpaceToBatchOnConvolution(HloInstruction* convolution);
70 // Struct containing details about a convolution.
79 // performing space-to-batch on a convolution.
80 ConvDetails GetConvolutionDetails(HloInstruction* convolution,
87 // Returns if the convolution is a forward window dilated convolution.
88 bool IsForwardWindowDilatedConv(HloInstruction* convolution,
162 // Perform space-to-batch propagation on the convolution. Assumes the
164 Status PropagateOnConv(HloInstruction* convolution);
175 // Perform space-to-batch propagation on the backprop filter convolution.
[all …]
H A Dtranspose_folding.cc42 const HloInstruction& convolution, in CanFoldOperandsIntoConvolution() argument
45 if (HloOpcode::kConvolution != convolution.opcode()) { in CanFoldOperandsIntoConvolution()
50 for (int64_t i = 0; i < convolution.operand_count(); ++i) { in CanFoldOperandsIntoConvolution()
51 auto& operand = *convolution.operand(i); in CanFoldOperandsIntoConvolution()
57 return transposable_conv_operands(convolution, operand_set); in CanFoldOperandsIntoConvolution()
111 // Folds the operands of `convolution` that are foldable transposes.
112 // `computation` is the parent HLO computation of `convolution`.
116 auto& convolution = *pair.first; in FoldTransposeIntoConvolution() local
124 convolution.convolution_dimension_numbers(); in FoldTransposeIntoConvolution()
130 HloInstruction& transpose = *convolution.mutable_operand(kLhsIdx); in FoldTransposeIntoConvolution()
[all …]
H A Dspace_to_batch_converter_test.cc42 ROOT %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3}, in TEST_F()
61 EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution()); in TEST_F()
77 %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3}, in TEST_F()
79 ROOT tr = bf16[1,256,256,32] transpose(%convolution), dimensions={0,2,1,3} in TEST_F()
100 EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution()); in TEST_F()
115 %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3}, in TEST_F()
118 %tuple = (bf16[1,256,256,32], bf16[3])tuple(%convolution, %constant) in TEST_F()
143 ROOT %convolution = bf16[2,256,256,32] convolution(%p0, %p1), window={size=3x3}, in TEST_F()
163 %convolution.1 = bf16[64,76,76,1]{0,2,1,3} convolution( in TEST_F()
166 ROOT custom-call.5079 = bf16[64,152,152,1]{0,2,1,3} custom-call(%convolution.1), in TEST_F()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/gpu/
H A Dgpu_conv_rewriter.cc56 // or convolution (all dimensions reversed). in CanImplementAsGpuForwardConv()
69 VLOG(2) << "Trying to match convolution backward filter."; in MatchBackwardFilter()
75 << " is a forward convolution. All grouped backward filters are " in MatchBackwardFilter()
87 // Backward filter convolution is implemented in XLA as the forward in MatchBackwardFilter()
88 // convolution of padded activations and dilated gradients. Padding on in MatchBackwardFilter()
90 // of the forward convolution. in MatchBackwardFilter()
95 // Convolution in MatchBackwardFilter()
99 // Step 2: match paddings and dimension numbers of the forward convolution. in MatchBackwardFilter()
113 VLOG(1) << "Forward convolution's window " in MatchBackwardFilter()
119 VLOG(1) << "Forward convolution's window " in MatchBackwardFilter()
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/options/
H A Dconv.h25 /// Options for a `D`-dimensional convolution or convolution transpose module.
41 /// The number of output channels the convolution should produce.
46 /// For a `D`-dim convolution, must be a single number or a list of `D`
51 /// The stride of the convolution.
52 /// For a `D`-dim convolution, must be a single number or a list of `D`
58 /// For a `D`-dim convolution, must be a single number or a list of `D`
69 /// For a `D`-dim convolution, must be a single number or a list of `D`
80 /// For a `D`-dim convolution, must be a single number or a list of `D`
85 /// The number of convolution groups.
102 /// Options for a `D`-dimensional convolution module.
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/layers/
H A Dconvolutional.py15 """Keras convolution layers and image transformation layers."""
47 """Abstract N-D convolution layer (private, used as implementation base).
49 This layer creates a convolution kernel that is convolved
59 rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
61 of filters in the convolution). Could be "None", eg in the case of
62 depth wise convolution.
64 length of the convolution window.
66 specifying the stride length of the convolution.
80 the dilation rate to use for dilated convolution.
91 kernel_initializer: An initializer for the convolution kernel. If None, the
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tensorflow/g3doc/
H A Dspace_to_depth.md9 convolution in the new MLIR bridge to improve MXU efficiency of low batch size
21 transposed to the shape that the convolution emitter expects. The input also
23 convolution efficient. Although a 2x2 space-to-depth transform works only when
24 the first convolution has a stride of 2, many image models, ResNet-like in
25 particular, have a stride-2 convolution in the first layer.
28 speedup and reduce memory usage in the first convolution.
30 The first convolution in many image models, including ResNet or ResNet-like, is
31 a (kernel=7, stride=2) 2D convolution. The input of the convolution is images,
32 which usually has RGB channels. The input of this first convolution is of shape
35 convolution's input to [batch\_size, height // stride, width // stride, 3 \*
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/
H A DQnnpackUtils.h200 pytorch_qnnp_operator_t convolution{nullptr}; in PackedConvWeightsQnnp()
202 convolution = static_cast<pytorch_qnnp_operator_t>( in PackedConvWeightsQnnp()
204 if (convolution == nullptr) { in PackedConvWeightsQnnp()
212 convolution); in PackedConvWeightsQnnp()
215 convolution->ukernel_type = ukernel_type; in PackedConvWeightsQnnp()
216 convolution->groups = groups; in PackedConvWeightsQnnp()
217 convolution->group_input_channels = group_input_channels; in PackedConvWeightsQnnp()
218 convolution->group_output_channels = group_output_channels; in PackedConvWeightsQnnp()
219 convolution->kernel_depth = kernel_depth; in PackedConvWeightsQnnp()
220 convolution->kernel_height = kernel_height; in PackedConvWeightsQnnp()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/mlir_hlo/tests/Dialect/mhlo/
H A Dverifier_conv_op.mlir5 // Valid: Generic convolution
9 %result = "mhlo.convolution"(%arg0, %arg1) {
32 // Valid: Test convolution i8xi8 -> i32.
36 %result = "mhlo.convolution"(%arg0, %arg1) {
61 // CHECK: mhlo.convolution
67 %0 = mhlo.convolution(%arg0, %arg1)
86 %1 = "mhlo.convolution"(%arg0, %arg1) {
104 …// expected-error@+1 {{expects convolution arguments to have same number of dimensions. Got: 'tens…
105 %0 = mhlo.convolution(%arg0, %arg1)
122 …// expected-error@+1 {{expects convolution arguments to have >= 2 dimensions. Got: 'tensor<1xf32>'…
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/mlir_hlo/stablehlo/tests/
H A Dverify_conv.mlir5 // Valid: Generic convolution
9 %result = "stablehlo.convolution"(%arg0, %arg1) {
32 // Valid: Test convolution i8xi8 -> i32.
36 %result = "stablehlo.convolution"(%arg0, %arg1) {
61 // CHECK: stablehlo.convolution
67 %0 = stablehlo.convolution(%arg0, %arg1)
86 %1 = "stablehlo.convolution"(%arg0, %arg1) {
104 …// expected-error@+1 {{expects convolution arguments to have same number of dimensions. Got: 'tens…
105 %0 = stablehlo.convolution(%arg0, %arg1)
122 …// expected-error@+1 {{expects convolution arguments to have >= 2 dimensions. Got: 'tensor<1xf32>'…
[all …]
/aosp_15_r20/external/ComputeLibrary/examples/
H A Dgraph_inception_v3.cpp83 .set_name("Conv2d_1a_3x3/convolution") in do_setup()
96 .set_name("Conv2d_2a_3x3/convolution") in do_setup()
110 .set_name("Conv2d_2b_3x3/convolution") in do_setup()
126 .set_name("Conv2d_3b_1x1/convolution") in do_setup()
140 .set_name("Conv2d_4a_3x3/convolution") in do_setup()
195 .set_name("Logits/Conv2d_1c_1x1/convolution") in do_setup()
250 .set_name(param_path + "/Branch_0/Conv2d_0a_1x1/convolution") in get_inception_node_A()
266 .set_name(param_path + "/Branch_1/Conv2d" + conv_id0 + "1x1/convolution") in get_inception_node_A()
280 .set_name(param_path + "/Branch_1/Conv2d" + conv_id1 + "5x5/convolution") in get_inception_node_A()
296 .set_name(param_path + "/Branch_2/Conv2d_0a_1x1/convolution") in get_inception_node_A()
[all …]
H A Dgraph_inception_resnet_v2.cpp97 .set_name("Conv2d_1a_3x3/convolution") in do_setup()
110 .set_name("Conv2d_2a_3x3/convolution") in do_setup()
123 .set_name("Conv2d_2b_3x3/convolution") in do_setup()
138 .set_name("Conv2d_3b_1x1/convolution") in do_setup()
151 .set_name("Conv2d_4a_3x3/convolution") in do_setup()
175 .set_name("Conv2d_7b_1x1/convolution") in do_setup()
226 .set_name("Mixed_5b/Branch_0/Conv2d_1x1/convolution") in block_mixed_5b()
241 .set_name("Mixed_5b/Branch_1/Conv2d_0a_1x1/convolution") in block_mixed_5b()
253 .set_name("Mixed_5b/Branch_1/Conv2d_0b_5x5/convolution") in block_mixed_5b()
268 .set_name("Mixed_5b/Branch_2/Conv2d_0a_1x1/convolution") in block_mixed_5b()
[all …]
/aosp_15_r20/external/ComputeLibrary/arm_compute/graph/nodes/
H A DFusedConvolutionWithPostOpNode.h35 /** Convolution node */
41 * @param[in] info Convolution layer attributes
43 * @param[in] method (Optional) Convolution method to use
52 /** Sets the convolution layer method to use
54 * @param[in] method Method to use for convolution
57 /** Convolution layer method accessor
59 * @note This is an indication on which convolution layer implementation to use,
62 * @return Convolution layer method to be used by the node
67 * @param[in] hint Hint to use for convolution
75 /** Convolution metadata accessor
[all …]
H A DConvolutionLayerNode.h33 /** Convolution Layer node */
39 * @param[in] info Convolution layer attributes
41 * @param[in] method (Optional) Convolution method to use
50 /** Sets the convolution layer method to use
52 * @param[in] method Method to use for convolution
55 /** Convolution layer method accessor
57 * @note This is an indication on which convolution layer implementation to use,
60 * @return Convolution layer method to be used by the node
65 * @param[in] hint Hint to use for convolution
73 /** Convolution metadata accessor
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/gpu/common/transformations/
H A Dfuse_mul_to_conv.h27 // Fuse Multiply Scalar or Multiply Broadcast after Convolution(Convolution2D,
29 // convolution.
32 // Fuse Multiply Scalar or Multiply Broadcast before Convolution(Convolution2D,
34 // convolution.
37 // Modify Convolution2DAttributes so that after making convolution with
38 // modified attributes we will have the same result as convolution
44 // convolution with modified attributes we will have the same result as depth
45 // wise convolution with old attributes and following multiply operation.
50 // Modify ConvolutionTransposedAttributes so that after making convolution
52 // convolution transposed with old attributes and following multiply operation.
[all …]
/aosp_15_r20/external/eigen/bench/tensors/
H A Dtensor_benchmarks_cpu.cc146 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 4);
147 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 8);
148 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 12);
150 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 4);
151 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 8);
152 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 12);
154 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 4);
155 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 8);
156 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 12);
158 BM_FuncWithKernelDimsCPU(convolution, 4, 7, 4);
[all …]
/aosp_15_r20/external/pytorch/torch/nn/utils/
H A Dmemory_format.py19 convolution in cuDNN, as it is beneficial to run convolution in NHWC,
22 Hence our strategy here is to convert only the weight of convolution to
24 1. Fast convolution kernels will be used, the benefit of which could
29 The optimal case is that, layers between convolution layers are channels
31 encounters the first convolution layer and stay in that memory format.
34 In case where a channels last incompatible layer is between convolution
38 another convolution layer. There's no point in propagating that
44 immediately before a convolution.
93 convolution in cuDNN, as it is beneficial to run convolution in NDHWC,
96 Hence our strategy here is to convert only the weight of convolution to
[all …]
/aosp_15_r20/external/ComputeLibrary/src/core/CL/kernels/
H A DCLFuseBatchNormalizationKernel.h34 /** OpenCL kernel to fuse the batch normalization node to a preceding convolution node */
52 …* @param[in] input_weights Input weights tensor for convolution or depthwise convolution layer. D…
57 …param[in] input_bias (Optional) Input bias tensor for convolution or depthwise convolution lay…
63 * @param[in] fbn_type (Optional) Fused batch normalization type. Defaults to CONVOLUTION.
67 …t epsilon = 0.001f, FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
71 …* @param[in] input_weights Input weights tensor for convolution or depthwise convolution layer.…
76 …ram[in] input_bias (Optional) Input bias tensor for convolution or depthwise convolution lay…
82 … * @param[in] fbn_type (Optional) Fused batch normalization type. Defaults to CONVOLUTION.
86 …t epsilon = 0.001f, FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
89 …* @param[in] input_weights Input weights tensor info for convolution or depthwise convolution laye…
[all …]
/aosp_15_r20/external/ComputeLibrary/arm_compute/runtime/CL/functions/
H A DCLFuseBatchNormalization.h40 /** Basic function to fuse the batch normalization node to a preceding convolution node */
68 …* @param[in] input_weights Input weights tensor for convolution or depthwise convolution layer. D…
73 …param[in] input_bias (Optional) Input bias tensor for convolution or depthwise convolution lay…
79 * @param[in] fbn_type (Optional) Fused batch normalization type. Defaults to Convolution.
83 …t epsilon = 0.001f, FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
87 …* @param[in] input_weights Input weights tensor for convolution or depthwise convolution layer.…
92 …ram[in] input_bias (Optional) Input bias tensor for convolution or depthwise convolution lay…
98 … * @param[in] fbn_type (Optional) Fused batch normalization type. Defaults to Convolution.
102 …t epsilon = 0.001f, FuseBatchNormalizationType fbn_type = FuseBatchNormalizationType::CONVOLUTION);
105 …* @param[in] input_weights Input weights tensor info for convolution or depthwise convolution laye…
[all …]
/aosp_15_r20/external/ComputeLibrary/tests/validate_examples/
H A Dgraph_depthwiseconvolution.cpp50 /** Depthwise Convolution command line options used to configure the graph examples
133 conv_mode->set_help("Set convolution method"); in DepthConvolutionOptions()
180 common_params.convolution.padding_mode = padding_mode->value(); in consume_parameters()
181 common_params.convolution.padding_top = padding_top->value(); in consume_parameters()
182 common_params.convolution.padding_bottom = padding_bottom->value(); in consume_parameters()
183 common_params.convolution.padding_left = padding_left->value(); in consume_parameters()
184 common_params.convolution.padding_right = padding_right->value(); in consume_parameters()
185 common_params.convolution.padding_stride_x = stride_x->value(); in consume_parameters()
186 common_params.convolution.padding_stride_y = stride_y->value(); in consume_parameters()
187 common_params.convolution.depth_multiplier = depth_multiplier->value(); in consume_parameters()
[all …]

12345678910>>...57