xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/AdaptivePooling.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 
3 #include <ATen/core/Tensor.h>
4 #include <ATen/native/DispatchStub.h>
5 #include <c10/util/ArrayRef.h>
6 #include <c10/util/irange.h>
7 #include <cmath>
8 
9 namespace at::native {
10 
11 using adaptive_avg_pooling2d_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
12 using adaptive_avg_pooling2d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
13 DECLARE_DISPATCH(adaptive_avg_pooling2d_fn, adaptive_avg_pool2d_kernel);
14 DECLARE_DISPATCH(adaptive_avg_pooling2d_backward_fn, adaptive_avg_pool2d_backward_kernel);
15 
16 using adaptive_max_pooling2d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
17 using adaptive_max_pooling2d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
18 DECLARE_DISPATCH(adaptive_max_pooling2d_fn, adaptive_max_pool2d_kernel);
19 DECLARE_DISPATCH(adaptive_max_pooling2d_backward_fn, adaptive_max_pool2d_backward_kernel);
20 
21 using adaptive_avg_pooling3d_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
22 using adaptive_avg_pooling3d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
23 DECLARE_DISPATCH(adaptive_avg_pooling3d_fn, adaptive_avg_pool3d_kernel);
24 DECLARE_DISPATCH(adaptive_avg_pooling3d_backward_fn, adaptive_avg_pool3d_backward_kernel);
25 
26 using adaptive_max_pooling3d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
27 using adaptive_max_pooling3d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
28 DECLARE_DISPATCH(adaptive_max_pooling3d_fn, adaptive_max_pool3d_kernel);
29 DECLARE_DISPATCH(adaptive_max_pooling3d_backward_fn, adaptive_max_pool3d_backward_kernel);
30 
start_index(int64_t a,int64_t b,int64_t c)31 inline int64_t start_index(int64_t a, int64_t b, int64_t c) {
32   return (a / b) * c + ((a % b) * c) / b;
33 }
34 
end_index(int64_t a,int64_t b,int64_t c)35 inline int64_t end_index(int64_t a, int64_t b, int64_t c) {
36   return 1 + ((a + 1) * c - 1) / b;
37 }
38 
adaptive_pool_empty_output_check(const Tensor & gradOutput_,const char * arg_name)39 inline void adaptive_pool_empty_output_check(const Tensor& gradOutput_, const char* arg_name) {
40   int64_t ndim = gradOutput_.ndimension();
41   for (const auto i : c10::irange(1, ndim)) {
42     TORCH_CHECK(gradOutput_.size(i) > 0,
43       arg_name, "(): Expected grad_output to have non-zero size for non-batch dimensions, "
44       "but grad_output has sizes ", gradOutput_.sizes(), " with dimension ", i,
45       " being empty");
46   }
47 }
48 
49 } // namespace at::native
50