xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/Pooling.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 #include <ATen/core/Tensor.h>
3 #include <ATen/TensorUtils.h>
4 #include <ATen/NamedTensorUtils.h>
5 #include <ATen/native/xnnpack/Engine.h>
6 #include <c10/util/Exception.h>
7 
8 #ifndef AT_PER_OPERATOR_HEADERS
9 #include <ATen/Functions.h>
10 #include <ATen/NativeFunctions.h>
11 #else
12 #include <ATen/ops/adaptive_avg_pool1d_native.h>
13 #include <ATen/ops/adaptive_avg_pool2d.h>
14 #include <ATen/ops/adaptive_max_pool1d_native.h>
15 #include <ATen/ops/adaptive_max_pool2d.h>
16 #include <ATen/ops/avg_pool1d_native.h>
17 #include <ATen/ops/avg_pool2d.h>
18 #include <ATen/ops/max_pool1d_with_indices_native.h>
19 #include <ATen/ops/max_pool2d_native.h>
20 #include <ATen/ops/max_pool2d_with_indices.h>
21 #include <ATen/ops/max_pool3d_native.h>
22 #include <ATen/ops/max_pool3d_with_indices.h>
23 #include <ATen/ops/mkldnn_max_pool2d.h>
24 #include <ATen/ops/mkldnn_max_pool3d.h>
25 #include <ATen/ops/quantized_max_pool2d.h>
26 #include <ATen/ops/quantized_max_pool3d.h>
27 #endif
28 
29 #include <tuple>
30 
31 namespace at::native {
32 
check1d(const char * function_name,const char * argument_name,IntArrayRef x)33 static void check1d(
34     const char* function_name,
35     const char* argument_name,
36     IntArrayRef x) {
37   TORCH_CHECK(
38       x.size() == 1,
39       function_name, "() argument '", argument_name,
40       "' should contain one int (got ", x.size(), ")");
41 }
42 
adaptive_avg_pool1d(const Tensor & self,IntArrayRef output_size)43 Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size) {
44   checkDimRange("adaptive_avg_pool1d", TensorArg(self, "self", 1), 2, 4 /* exclusive */);
45   check1d("adaptive_avg_pool1d", "output_size", output_size);
46 
47   auto output = at::adaptive_avg_pool2d(
48       self.unsqueeze(-2),
49       {1, output_size[0]});
50 
51   return output.squeeze(-2);
52 }
53 
adaptive_max_pool1d(const Tensor & self,IntArrayRef output_size)54 std::tuple<Tensor,Tensor> adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size) {
55   checkDimRange("adaptive_max_pool1d", TensorArg(self, "self", 1), 2, 4 /* exclusive */);
56   check1d("adaptive_max_pool1d", "output_size", output_size);
57 
58   int ndim = self.ndimension();
59   for (const auto i : c10::irange(1, ndim)) {
60     TORCH_CHECK(
61         self.sym_size(i) > 0,
62         "adaptive_max_pool1d(): ",
63         "Expected input to have non-zero size for non-batch dimensions, "
64         "but input has sizes ",
65         self.sym_sizes(),
66         " with dimension ",
67         i,
68         " being empty");
69   }
70 
71   auto [output, indices] = at::adaptive_max_pool2d(
72       self.unsqueeze(-2),
73       {1, output_size[0]});
74 
75   return std::make_tuple(output.squeeze(-2), indices.squeeze(-2));
76 }
77 
max_pool1d_with_indices(const Tensor & self,IntArrayRef kernel_size,IntArrayRef stride,IntArrayRef padding,IntArrayRef dilation,bool ceil_mode)78 std::tuple<Tensor, Tensor> max_pool1d_with_indices(
79     const Tensor& self,
80     IntArrayRef kernel_size,
81     IntArrayRef stride,
82     IntArrayRef padding,
83     IntArrayRef dilation,
84     bool ceil_mode) {
85   if (stride.empty()) {
86     stride = kernel_size;
87   }
88   checkDimRange("max_pool1d", TensorArg(self, "self", 1), 2, 4 /* exclusive */);
89   check1d("max_pool1d", "kernel_size", kernel_size);
90   check1d("max_pool1d", "stride", stride);
91   check1d("max_pool1d", "padding", padding);
92   check1d("max_pool1d", "dilation", dilation);
93 
94   NoNamesGuard guard;
95 
96   auto [output, indices] = at::max_pool2d_with_indices(
97       self.unsqueeze(-2),
98       {1, kernel_size[0]},
99       {1, stride[0]},
100       {0, padding[0]},
101       {1, dilation[0]},
102       ceil_mode);
103 
104   output  = output.squeeze(-2);
105   indices = indices.squeeze(-2);
106 
107   guard.reset();
108   namedinference::propagate_names(output, self);
109   namedinference::propagate_names(indices, self);
110 
111   return std::make_tuple(output, indices);
112 }
113 
avg_pool1d(const Tensor & self,IntArrayRef kernel_size,IntArrayRef stride,IntArrayRef padding,bool ceil_mode,bool count_include_pad)114 Tensor avg_pool1d(
115     const Tensor& self,
116     IntArrayRef kernel_size,
117     IntArrayRef stride,
118     IntArrayRef padding,
119     bool ceil_mode,
120     bool count_include_pad) {
121   if (stride.empty()) {
122     stride = kernel_size;
123   }
124   checkDimRange("avg_pool1d", TensorArg(self, "self", 1), 2, 4 /* exclusive */);
125   check1d("avg_pool1d", "kernel_size", kernel_size);
126   check1d("avg_pool1d", "stride", stride);
127   check1d("avg_pool1d", "padding", padding);
128 
129   auto output = at::avg_pool2d(
130       self.unsqueeze(-2),
131       {1, kernel_size[0]},
132       {1, stride[0]},
133       {0, padding[0]},
134       ceil_mode,
135       count_include_pad);
136 
137   return output.squeeze(-2);
138 }
139 
max_pool2d(const Tensor & self,IntArrayRef kernel_size,IntArrayRef stride,IntArrayRef padding,IntArrayRef dilation,bool ceil_mode)140 Tensor max_pool2d(
141     const Tensor& self,
142     IntArrayRef kernel_size,
143     IntArrayRef stride,
144     IntArrayRef padding,
145     IntArrayRef dilation,
146     bool ceil_mode) {
147   if (self.is_quantized()) {
148     return at::quantized_max_pool2d(self, kernel_size, stride, padding,
149                                     dilation, ceil_mode);
150   }
151   if (self.is_mkldnn()) {
152     return at::mkldnn_max_pool2d(
153         self, kernel_size, stride, padding, dilation, ceil_mode);
154   }
155 #if defined(C10_MOBILE)
156   if(xnnpack::use_max_pool2d(self, kernel_size, padding, stride,
157                              dilation, ceil_mode)) {
158     return xnnpack::max_pool2d(
159         self, kernel_size, padding, stride, dilation, ceil_mode);
160   }
161 #endif
162   auto output_and_indices = at::max_pool2d_with_indices(
163       self, kernel_size, stride, padding, dilation, ceil_mode);
164   return std::get<0>(output_and_indices);
165 }
166 
max_pool3d(const Tensor & self,IntArrayRef kernel_size,IntArrayRef stride,IntArrayRef padding,IntArrayRef dilation,bool ceil_mode)167 Tensor max_pool3d(
168     const Tensor& self,
169     IntArrayRef kernel_size,
170     IntArrayRef stride,
171     IntArrayRef padding,
172     IntArrayRef dilation,
173     bool ceil_mode) {
174   if (self.is_quantized()) {
175     return at::quantized_max_pool3d(self, kernel_size, stride, padding,
176                                     dilation, ceil_mode);
177   }
178   if (self.is_mkldnn()) {
179     return at::mkldnn_max_pool3d(
180         self, kernel_size, stride, padding, dilation, ceil_mode);
181   }
182   auto output_and_indices = at::max_pool3d_with_indices(
183       self, kernel_size, stride, padding, dilation, ceil_mode);
184   return std::get<0>(output_and_indices);
185 }
186 
187 } // namespace at::native
188