/aosp_15_r20/external/pytorch/test/quantization/pt2e/ |
H A D | test_metadata_porting.py | 25 self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) 30 x = self.adaptive_avg_pool2d(x) 161 annotated_partitions = OP_TO_ANNOTATOR["adaptive_avg_pool2d"]( 165 backend_string, "adaptive_avg_pool2d", annotated_partitions 201 torch.ops.aten.adaptive_avg_pool2d.default: "BackendA_adaptive_avg_pool2d_0", 269 annotated_partitions = OP_TO_ANNOTATOR["adaptive_avg_pool2d"]( 273 backend_string, "adaptive_avg_pool2d", annotated_partitions 438 OP_TO_ANNOTATOR["adaptive_avg_pool2d"](gm, quantization_config)
|
H A D | test_duplicate_dq.py | 37 self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) 42 x = self.adaptive_avg_pool2d(x) 71 self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) 75 w = self.adaptive_avg_pool2d(x) 136 OP_TO_ANNOTATOR["adaptive_avg_pool2d"](gm, quantization_config)
|
/aosp_15_r20/external/executorch/backends/arm/quantizer/quantization_annotation/ |
H A D | adaptive_ang_pool2d_annotator.py | 25 @register_annotator("adaptive_avg_pool2d") 31 """Always annotate adaptive_avg_pool2d op""" 33 gm.graph, [torch.nn.AdaptiveAvgPool2d, F.adaptive_avg_pool2d], filter_fn 41 or pool_node.target != torch.ops.aten.adaptive_avg_pool2d.default 43 raise ValueError(f"{pool_node} is not an aten adaptive_avg_pool2d operator")
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | AdaptiveAveragePooling.cpp | 29 TORCH_CHECK(output_size.size() == 2, "adaptive_avg_pool2d: output_size must be 2"); in adaptive_avg_pool2d_out_cpu_template() 32 "adaptive_avg_pool2d(): Expected 3D or 4D tensor, but got ", input.sizes()); in adaptive_avg_pool2d_out_cpu_template() 35 "adaptive_avg_pool2d(): Expected input to have non-zero size for non-batch dimensions, " in adaptive_avg_pool2d_out_cpu_template() 110 TORCH_CHECK(output_size.size() == 2, "adaptive_avg_pool2d: output_size must be 2"); in adaptive_avg_pool2d_symint() 113 "adaptive_avg_pool2d: elements of output_size must be greater than or equal to 0 ", in adaptive_avg_pool2d_symint()
|
H A D | Pooling.cpp | 13 #include <ATen/ops/adaptive_avg_pool2d.h> 47 auto output = at::adaptive_avg_pool2d( in adaptive_avg_pool1d()
|
/aosp_15_r20/external/executorch/backends/arm/test/ops/ |
H A D | test_mean_dim.py | 45 self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d(output_size=(1, 1)) 48 return self.adaptive_avg_pool2d(x) 87 .check(["torch.ops.aten.adaptive_avg_pool2d.default"]) 108 .check_count({"torch.ops.aten.adaptive_avg_pool2d.default": 1}) 132 .check(["torch.ops.aten.adaptive_avg_pool2d.default"])
|
H A D | test_conv_combos.py | 98 self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) 105 return self.adaptive_avg_pool2d(x)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
H A D | Pool.cpp | 13 Tensor adaptive_avg_pool2d( in adaptive_avg_pool2d() function 18 "Vulkan adaptive_avg_pool2d expects 4-dimensional input!"); in adaptive_avg_pool2d() 66 VK_KERNEL(adaptive_avg_pool2d), in adaptive_avg_pool2d() 288 TORCH_FN(adaptive_avg_pool2d)); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/quantizer/ |
H A D | xnnpack_quantizer_utils.py | 636 @register_annotator("adaptive_avg_pool2d") 642 """Always annotate adaptive_avg_pool2d op""" 644 gm.graph, [torch.nn.AdaptiveAvgPool2d, F.adaptive_avg_pool2d], filter_fn 652 or pool_node.target != torch.ops.aten.adaptive_avg_pool2d.default 654 raise ValueError(f"{pool_node} is not an aten adaptive_avg_pool2d operator") 1012 torch.ops.aten.adaptive_avg_pool2d.default,
|
H A D | xnnpack_quantizer.py | 79 "adaptive_avg_pool2d": [ 81 [F.adaptive_avg_pool2d], 255 "adaptive_avg_pool2d",
|
H A D | x86_inductor_quantizer.py | 88 torch.ops.aten.adaptive_avg_pool2d.default, 211 [torch.nn.AdaptiveAvgPool2d, F.adaptive_avg_pool2d], 212 torch.ops.aten.adaptive_avg_pool2d.default,
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/metal/ops/ |
H A D | MetalPooling.mm | 72 static Tensor adaptive_avg_pool2d(const Tensor& input, IntArrayRef output_size) { 106 m.impl(TORCH_SELECTIVE_NAME("aten::adaptive_avg_pool2d"), TORCH_FN(adaptive_avg_pool2d));
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/ |
H A D | pooling.h | 568 inline Tensor adaptive_avg_pool2d( in adaptive_avg_pool2d() function 573 return torch::adaptive_avg_pool2d(input, output_size_); in adaptive_avg_pool2d() 579 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.adaptive_avg_pool2d 589 /// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3)); 591 inline Tensor adaptive_avg_pool2d( in adaptive_avg_pool2d() function 594 return detail::adaptive_avg_pool2d(input, options.output_size()); in adaptive_avg_pool2d()
|
/aosp_15_r20/external/executorch/backends/arm/quantizer/ |
H A D | arm_quantizer.py | 74 "adaptive_avg_pool2d": [ 76 [F.adaptive_avg_pool2d], 265 "adaptive_avg_pool2d",
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/ |
H A D | functional.py | 20 "adaptive_avg_pool2d", 133 def adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor: function 148 "Input to 'quantized.functional.adaptive_avg_pool2d' must be quantized!" 150 return torch.nn.functional.adaptive_avg_pool2d(input, output_size)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | AdaptiveAveragePooling.cu | 449 TORCH_CHECK(output_size.size() == 2, "adaptive_avg_pool2d: output_size must be 2"); in adaptive_avg_pool2d_out_cuda_template() 452 "adaptive_avg_pool2d(): Expected 3D or 4D tensor, but got ", input.sizes()); in adaptive_avg_pool2d_out_cuda_template() 455 "adaptive_avg_pool2d(): Expected input to have non-zero size for non-batch dimensions, " in adaptive_avg_pool2d_out_cuda_template() 465 "adaptive_avg_pool2d(): Expected 4D tensor, but got ", in adaptive_avg_pool2d_out_cuda_template()
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_mkldnn.py | 929 adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d(7) 932 y1 = adaptive_avg_pool2d(x1) 933 y2 = adaptive_avg_pool2d(x2).to_dense() 950 adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d(7) 953 y = adaptive_avg_pool2d(x.to_mkldnn()).to_dense() 954 y_bf16 = adaptive_avg_pool2d(x.to_mkldnn()).to_dense(torch.float32) 960 lambda: adaptive_avg_pool2d(x_bf16.to_mkldnn()))
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/ |
H A D | AdaptivePooling.mm | 12 #include <ATen/ops/adaptive_avg_pool2d.h> 66 … "adaptive_avg_pool2d(): Expected input to have non-zero size for non-batch dimensions, "
|
/aosp_15_r20/external/pytorch/test/jit/ |
H A D | test_dtype_analysis.py | 40 "nn.functional.adaptive_avg_pool2d", 270 return torch._C._nn.adaptive_avg_pool2d(input, output_size)
|
/aosp_15_r20/external/pytorch/torch/ao/ns/fx/ |
H A D | mappings.py | 76 F.adaptive_avg_pool2d, 544 F.adaptive_avg_pool2d,
|
/aosp_15_r20/external/pytorch/torch/csrc/lazy/core/ |
H A D | shape_inference.cpp | 901 output_size.size() == 2, "adaptive_avg_pool2d: output_size must be 2"); in compute_shape__adaptive_avg_pool2d() 904 "adaptive_avg_pool2d: elements of output_size must be greater than or equal to 0 ", in compute_shape__adaptive_avg_pool2d() 914 "adaptive_avg_pool2d(): Expected self to have non-zero size for non-batch dimensions, " in compute_shape__adaptive_avg_pool2d() 924 "adaptive_avg_pool2d(): Expected 3D or 4D tensor, but got ", in compute_shape__adaptive_avg_pool2d()
|
/aosp_15_r20/external/executorch/backends/example/example_operators/ |
H A D | TARGETS | 8 "adaptive_avg_pool2d.py",
|
H A D | ops.py | 9 from executorch.backends.example.example_operators.adaptive_avg_pool2d import (
|
/aosp_15_r20/external/pytorch/torch/_functorch/ |
H A D | top_operators_github_usage.py | 288 ("nn.functional.adaptive_avg_pool2d", 633), 419 ("nn.AdaptiveAvgPool2d", 59071, "nn.functional.adaptive_avg_pool2d"),
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/options/ |
H A D | pooling.h | 315 /// Options for `torch::nn::functional::adaptive_avg_pool2d`. 323 /// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3));
|