xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/Integration.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 #include <ATen/core/Tensor.h>
3 #include <ATen/core/DimVector.h>
4 #include <ATen/TensorOperators.h>
5 #include <ATen/WrapDimUtils.h>
6 #include <c10/util/Exception.h>
7 #include <c10/util/irange.h>
8 #include <c10/core/ScalarType.h>
9 #include <c10/core/Scalar.h>
10 
11 #ifndef AT_PER_OPERATOR_HEADERS
12 #include <ATen/Functions.h>
13 #include <ATen/NativeFunctions.h>
14 #else
15 #include <ATen/ops/cumulative_trapezoid_native.h>
16 #include <ATen/ops/trapezoid_native.h>
17 #include <ATen/ops/trapz_native.h>
18 #include <ATen/ops/zeros.h>
19 #endif
20 
21 namespace at::native {
22 namespace {
23 
24 // The estimated integral of a function y of x,
25 // sampled at points (y_1, ..., y_n) that are separated by distance (dx_1, ..., dx_{n-1}),
26 // is given by the trapezoid rule:
27 //
28 // \sum_{i=1}^{n-1}  dx_i * (y_i + y_{i+1}) / 2
29 //
30 // TODO: if we extend TensorIterator to accept 3 inputs,
31 // we can probably make this a bit more performant.
do_trapezoid(const Tensor & y,const Tensor & dx,int64_t dim)32 Tensor do_trapezoid(const Tensor& y, const Tensor& dx, int64_t dim) {
33     Tensor left = y.slice(dim, 0, -1);
34     Tensor right = y.slice(dim, 1);
35     // If the dimensions of 'dx' and '(left + right)' do not match
36     // broadcasting is attempted here.
37     return ((left + right) * dx).sum(dim) / 2.;
38 }
39 
40 // When dx is constant, the above formula simplifies
41 // to dx * [(\sum_{i=1}^n y_i) - (y_1 + y_n)/2]
do_trapezoid(const Tensor & y,double dx,int64_t dim)42 Tensor do_trapezoid(const Tensor& y, double dx, int64_t dim) {
43     return (y.sum(dim) - (y.select(dim, 0) + y.select(dim, -1)) * (0.5)) * dx;
44 }
45 
zeros_like_except(const Tensor & y,int64_t dim)46 Tensor zeros_like_except(const Tensor& y, int64_t dim) {
47     auto sizes = y.sym_sizes().vec();
48     dim = maybe_wrap_dim(dim, y.dim());
49     sizes.erase(sizes.begin() + dim);
50     return at::zeros_symint(sizes, y.options());
51 }
52 
do_cumulative_trapezoid(const Tensor & y,const Tensor & dx,int64_t dim)53 Tensor do_cumulative_trapezoid(const Tensor& y, const Tensor& dx, int64_t dim) {
54     Tensor left = y.slice(dim, 0, -1);
55     Tensor right = y.slice(dim, 1);
56 
57     return ((left + right) * dx).cumsum(dim) / 2.;
58 }
59 
do_cumulative_trapezoid(const Tensor & y,double dx,int64_t dim)60 Tensor do_cumulative_trapezoid(const Tensor& y, double dx, int64_t dim) {
61     Tensor left = y.slice(dim, 0, -1);
62     Tensor right = y.slice(dim, 1);
63 
64     return (dx /2. * (left + right)).cumsum(dim);
65 }
66 // Given the current shape of a Tensor and a target number of dimensions,
67 // returns a new shape with the same values as the original shape,
68 // but with '1's padded in the beginning to match the target number of dimensions.
69 // For example, curr_shape = (5,5,5) and target_n_dim = 6 ==> (1,1,1,5,5,5)
70 // Note that no padding will be added if the current shape has the greater than or equal
71 // number of dimensions than the target numbers of dimensions.
add_padding_to_shape(SymIntArrayRef curr_shape,int64_t target_n_dim)72 SymDimVector add_padding_to_shape(SymIntArrayRef curr_shape, int64_t target_n_dim) {
73     const auto curr_size = static_cast<int64_t>(curr_shape.size());
74     if (curr_size >= target_n_dim){
75         target_n_dim = curr_size;
76     }
77     SymDimVector new_shape(target_n_dim, 1);
78     for (const auto i : c10::irange(curr_size)) {
79         new_shape[target_n_dim-i-1] = curr_shape[curr_size-i-1];
80     }
81     return new_shape;
82 }
83 }
84 
trapezoid(const Tensor & y,const Tensor & x,int64_t dim)85 Tensor trapezoid(const Tensor& y, const Tensor& x, int64_t dim) {
86     dim = maybe_wrap_dim(dim, y);
87     // asking for the integral with zero samples is a bit nonsensical,
88     // but we'll return "0" to match numpy behavior.
89     if (y.sym_size(dim) == 0) {
90         return zeros_like_except(y, dim);
91     }
92     TORCH_CHECK(y.scalar_type() != kBool && x.scalar_type() != kBool, "trapezoid: received a bool input for `x` or `y`, but bool is not supported")
93     Tensor x_viewed;
94     // Note that we explicitly choose not to broadcast 'x' to match the shape of 'y' here because
95     // we want to follow NumPy's behavior of broadcasting 'dx' and 'dy' together after the differences are taken.
96     if (x.dim() == 1) {
97         // This step takes 'x' with dimension (n,), and returns 'x_view' with
98         // dimension (1,1,...,n,...,1,1) based on dim and y.dim() so that, later on, 'dx'
99         // can be broadcast to match 'dy' at the correct dimensions.
100         TORCH_CHECK(x.sym_size(0) == y.sym_size(dim), "trapezoid: There must be one `x` value for each sample point");
101         SymDimVector new_sizes(y.dim(), 1); // shape = [1] * y.
102         new_sizes[dim] = x.sym_size(0); // shape[axis] = d.shape[0]
103         x_viewed = x.view_symint(new_sizes);
104     } else if (x.dim() < y.dim()) {
105         // When 'y' has more dimension than 'x', this step takes 'x' with dimension (n_1, n_2, ...),
106         // and add '1's as dimensions in front to become (1, 1, ..., n_1, n_2), matching the dimension of 'y'.
107         // This allows the subsequent slicing operations to proceed with any 'dim' without going out of bound.
108         SymDimVector new_sizes = add_padding_to_shape(x.sym_sizes(), y.dim());
109         x_viewed = x.view_symint(new_sizes);
110     } else {
111         x_viewed = x;
112     }
113     // Note the .slice operation reduces the dimension along 'dim' by 1,
114     // while the sizes of other dimensions are untouched.
115     Tensor x_left = x_viewed.slice(dim, 0, -1);
116     Tensor x_right = x_viewed.slice(dim, 1);
117 
118     Tensor dx = x_right - x_left;
119     return do_trapezoid(y, dx, dim);
120 }
121 
trapezoid(const Tensor & y,const Scalar & dx,int64_t dim)122 Tensor trapezoid(const Tensor& y, const Scalar& dx, int64_t dim) {
123     // see above
124     if (y.sym_size(dim) == 0) {
125         return zeros_like_except(y, dim);
126     }
127     TORCH_CHECK(y.scalar_type() != kBool, "trapezoid: received a bool input for `y`, but bool is not supported")
128     TORCH_CHECK(!(dx.isComplex() ||  dx.isBoolean()), "trapezoid: Currently, we only support dx as a real number.");
129     return do_trapezoid(y, dx.toDouble(), dim);
130 }
131 
trapz(const Tensor & y,const Tensor & x,int64_t dim)132 Tensor trapz(const Tensor& y, const Tensor& x, int64_t dim) {
133     return at::native::trapezoid(y, x, dim);
134 }
135 
trapz(const Tensor & y,double dx,int64_t dim)136 Tensor trapz(const Tensor& y, double dx, int64_t dim) {
137     return at::native::trapezoid(y, dx, dim);
138 }
139 
cumulative_trapezoid(const Tensor & y,const Tensor & x,int64_t dim)140 Tensor cumulative_trapezoid(const Tensor& y, const Tensor& x, int64_t dim) {
141     dim = maybe_wrap_dim(dim, y);
142     TORCH_CHECK(y.scalar_type() != kBool && x.scalar_type() != kBool, "cumulative_trapezoid: received a bool input for `x` or `y`, but bool is not supported")
143     Tensor x_viewed;
144     if (x.dim() == 1) {
145         // See trapezoid for implementation notes
146         TORCH_CHECK(x.sym_size(0) == y.sym_size(dim), "cumulative_trapezoid: There must be one `x` value for each sample point");
147         SymDimVector new_sizes(y.dim(), 1); // shape = [1] * y.
148         new_sizes[dim] = x.sym_size(0); // shape[axis] = d.shape[0]
149         x_viewed = x.view_symint(new_sizes);
150     } else if (x.dim() < y.dim()) {
151         // See trapezoid for implementation notes
152         SymDimVector new_sizes = add_padding_to_shape(x.sym_sizes(), y.dim());
153         x_viewed = x.view_symint(new_sizes);
154     } else {
155         x_viewed = x;
156     }
157     Tensor x_left = x_viewed.slice(dim, 0, -1);
158     Tensor x_right = x_viewed.slice(dim, 1);
159     Tensor dx = x_right - x_left;
160 
161     return do_cumulative_trapezoid(y, dx, dim);
162 }
163 
cumulative_trapezoid(const Tensor & y,const Scalar & dx,int64_t dim)164 Tensor cumulative_trapezoid(const Tensor& y, const Scalar& dx, int64_t dim) {
165     TORCH_CHECK(y.scalar_type() != kBool, "cumulative_trapezoid: received a bool input for `y`, but bool is not supported")
166     TORCH_CHECK(!(dx.isComplex() || dx.isBoolean()), "cumulative_trapezoid: Currently, we only support dx as a real number.");
167 
168     return do_cumulative_trapezoid(y, dx.toDouble(), dim);
169 }
170 
171 } // namespace at::native
172