xref: /aosp_15_r20/external/pytorch/aten/src/ATen/test/native_test.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include <gtest/gtest.h>
2 
3 #include <ATen/ATen.h>
4 #include <c10/util/irange.h>
5 
6 using namespace at;
7 
8 #define ASSERT_EQUAL(t1, t2) ASSERT_TRUE(t1.equal(t2));
9 
10 #define ASSERT_ALLCLOSE(t1, t2)     \
11   ASSERT_TRUE(t1.is_same_size(t2)); \
12   ASSERT_TRUE(t1.allclose(t2));
13 
14 #define ASSERT_ALLCLOSE_TOLERANCES(t1, t2, atol, rtol) \
15   ASSERT_TRUE(t1.is_same_size(t2));                    \
16   ASSERT_TRUE(t1.allclose(t2, atol, rtol));
17 
requireEqualTensorList(TensorList t1,TensorList t2)18 void requireEqualTensorList(TensorList t1, TensorList t2) {
19   ASSERT_EQ(t1.size(), t2.size());
20   for (const auto i : c10::irange(t1.size())) {
21     ASSERT_EQUAL(t1[i], t2[i]);
22   }
23 }
24 
25 // split: test method, namespace give same result
TestSplit(TensorOptions T,Tensor & t)26 void TestSplit(TensorOptions T, Tensor& t) {
27   auto splitMethod = t.split(1, 0);
28   auto splitNs = at::split(t, 1, 0);
29   requireEqualTensorList(splitMethod, splitNs);
30 
31   // test rebuilding with cat
32   ASSERT_EQUAL(at::cat(splitMethod, 0), t);
33 }
34 
35 // chunk: test method, namespace give same result
TestChunk(TensorOptions T,Tensor & t)36 void TestChunk(TensorOptions T, Tensor& t) {
37   // test method, type, namespace give same result
38   auto chunkMethod = t.chunk(3, 0);
39   auto chunkNs = at::chunk(t, 3, 0);
40   requireEqualTensorList(chunkMethod, chunkNs);
41 
42   // test rebuilding with cat
43   ASSERT_EQUAL(at::cat(chunkMethod, 0), t);
44 }
45 
46 typedef Tensor StackFunc (TensorList, int64_t);
47 
48 // helper function for TestStack
_test_stack(TensorList inputs,int64_t dim,StackFunc stack_func)49 void _test_stack(TensorList inputs, int64_t dim, StackFunc stack_func) {
50   auto const &x = inputs[0];
51 
52   auto res = stack_func(inputs, dim);
53   auto res_neg = stack_func(inputs, dim - x.dim() - 1);
54   std::vector<int64_t> expected_size;
55   expected_size.insert(
56       expected_size.end(), x.sizes().begin(), x.sizes().begin() + dim);
57   expected_size.insert(expected_size.end(), inputs.size());
58   expected_size.insert(
59       expected_size.end(), x.sizes().begin() + dim, x.sizes().end());
60 
61   ASSERT_EQUAL(res, res_neg);
62   ASSERT_TRUE(res.sizes().equals(expected_size));
63 
64   int d = 0;
65   for (auto& t : inputs) {
66     ASSERT_EQUAL(res.select(dim, d), t);
67     d++;
68   }
69 }
70 
TestStack(TensorOptions T,Tensor & t)71 void TestStack(TensorOptions T, Tensor& t) {
72   { // at::stack
73     auto x = rand({2, 3, 4});
74     auto y = rand({2, 3, 4});
75     auto z = rand({2, 3, 4});
76 
77     auto inputs = {x, y, z};
78     for (const auto dim : c10::irange(4)) {
79       _test_stack(inputs, dim, at::stack);
80     }
81   }
82 
83   { // at::native::_stack
84     auto x = rand({2, 3, 4});
85     auto y = rand({2, 3, 4});
86     auto z = rand({2, 3, 4});
87 
88     auto inputs = {x, y, z};
89     for (const auto dim : c10::irange(4)) {
90       _test_stack(inputs, dim, at::native::_stack);
91     }
92   }
93 
94   { // at::native::_stack_cpu
95     auto x = rand({2, 3, 4});
96     auto y = rand({2, 3, 4});
97     auto z = rand({2, 3, 4});
98 
99     auto inputs = {x, y, z};
100     for (const auto dim : c10::irange(4)) {
101       _test_stack(inputs, dim, at::native::_stack_cpu);
102     }
103   }
104 }
105 
106 // size / stride
TestSize(TensorOptions T,Tensor & t)107 void TestSize(TensorOptions T, Tensor& t) {
108   auto scalar = randn({}, T);
109   // Throw StartsWith("dimension specified as 0 but tensor has no dimensions")
110   // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
111   ASSERT_ANY_THROW(scalar.size(0));
112   // Throw StartsWith("dimension specified as -1 but tensor has no dimensions")
113   // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
114   ASSERT_ANY_THROW(scalar.size(-1));
115   // Throw StartsWith("dimension specified as 0 but tensor has no dimensions")
116   // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
117   ASSERT_ANY_THROW(scalar.stride(0));
118   // Throw StartsWith("dimension specified as -1 but tensor has no dimensions")
119   // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
120   ASSERT_ANY_THROW(scalar.stride(-1));
121 
122   auto empty = randn({0}, T);
123   ASSERT_EQ(empty.size(0), 0);
124   ASSERT_EQ(empty.size(-1), 0);
125   ASSERT_EQ(empty.stride(0), 1);
126   ASSERT_EQ(empty.stride(-1), 1);
127 }
128 
TestMatmul(TensorOptions T,Tensor & t,TensorOptions AccT)129 void TestMatmul(TensorOptions T, Tensor& t, TensorOptions AccT) {
130   auto scalar = randn({}, T);
131   auto d1 = randn({3}, T);
132   auto d2 = randn({2, 3}, T);
133 
134   // 0-d
135   // Throw StartsWith("both arguments to matmul need to be at least 1D")
136   // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
137   ASSERT_ANY_THROW(scalar.matmul(d2));
138   // Throw StartsWith("both arguments to matmul need to be at least 1D")
139   // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
140   ASSERT_ANY_THROW(d2.matmul(scalar));
141 
142   // 1-d
143   ASSERT_ALLCLOSE(d1.matmul(d1), d1.dot(d1));
144   ASSERT_ALLCLOSE(d2.matmul(d1), d2.mv(d1));
145   auto d1o = randn({2}, T);
146   ASSERT_ALLCLOSE(d1o.matmul(d2), d1o.unsqueeze(0).mm(d2).squeeze(0));
147 
148   // 2-d
149   auto d2o = randn({3, 5}, T);
150   ASSERT_ALLCLOSE(d2.matmul(d2o), d2.mm(d2o));
151 
152   // > 2-d, 1-d
153   auto d3 = randn({5, 2, 3}, T);
154   ASSERT_ALLCLOSE(
155       d3.matmul(d1), d3.bmm(d1.view({1, 3, 1}).expand({5, 3, 1})).view({5, 2}));
156   ASSERT_ALLCLOSE(d1o.matmul(d3), d1o.expand({5, 1, 2}).bmm(d3).view({5, 3}));
157 
158   auto d5 = randn({3, 2, 4, 2, 3}, T);
159   ASSERT_ALLCLOSE(
160       d5.matmul(d1),
161       d5.view({24, 2, 3})
162           .bmm(d1.view({1, 3, 1}).expand({24, 3, 1}))
163           .view({3, 2, 4, 2}));
164   ASSERT_ALLCLOSE(
165       d1o.matmul(d5),
166       d1o.expand({24, 1, 2}).bmm(d5.view({24, 2, 3})).view({3, 2, 4, 3}));
167 
168   // > 2-d, 2-d
169   // we use a "folding" algorithm in this case of matmul, so the direct
170   // comparison to bmm doesn't work; instead, compare to the higher precision
171   // computation (technically, we should always do this). Tolerances are
172   // selected empirically.
173   double atol = 1e-04;
174   double rtol = 1e-06;
175   d2 = randn({3, 4}, T);
176   d2o = randn({4, 2}, T);
177   auto result = d5.matmul(d2).to(AccT);
178 
179   auto d5Acc = d5.to(AccT);
180   auto d2Acc = d2.to(AccT);
181   auto acc_result = d5Acc.view({24, 2, 3})
182                         .bmm(d2Acc.expand({24, 3, 4}))
183                         .view({3, 2, 4, 2, 4});
184   ASSERT_ALLCLOSE_TOLERANCES(result, acc_result, atol, rtol);
185   ASSERT_ALLCLOSE(
186       d2o.matmul(d5),
187       d2o.expand({24, 4, 2}).bmm(d5.view({24, 2, 3})).view({3, 2, 4, 4, 3}));
188 
189   // > 2-d, > 2-d
190   auto d5o = randn({2, 1, 2, 4, 3, 2}, T);
191   auto d5_bmm_view =
192       d5.expand({2, 3, 2, 4, 2, 3}).contiguous().view({48, 2, 3});
193   auto d5o_bmm_view =
194       d5o.expand({2, 3, 2, 4, 3, 2}).contiguous().view({48, 3, 2});
195   ASSERT_ALLCLOSE(
196       d5.matmul(d5o), d5_bmm_view.bmm(d5o_bmm_view).view({2, 3, 2, 4, 2, 2}));
197 
198   // non-expandable case
199   auto d5wrong = randn({2, 4, 2, 4, 3, 2}, T);
200   // Throw Contains("must match the size")
201   // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
202   ASSERT_ANY_THROW(d5.matmul(d5wrong));
203 }
204 
TestStandardGammaGrad(TensorOptions T,Tensor & t)205 void TestStandardGammaGrad(TensorOptions T, Tensor& t) {
206   // check empty
207   auto empty = ones({0}, T);
208   ASSERT_EQUAL(empty, at::_standard_gamma_grad(empty, empty));
209 
210   // check scalar equals one element
211   auto one_scalar = ones({}, T).mul(5);
212   auto one_with_dim = ones({1}, T).mul(5);
213   ASSERT_ALLCLOSE(
214       at::_standard_gamma_grad(one_scalar, one_scalar),
215       at::_standard_gamma_grad(one_with_dim, one_with_dim).sum());
216 
217   // check mixing types
218   auto t1 = randn({3, 4}, T);
219   auto t2 = randn({3, 4}, T).toType(kDouble);
220   // Throw StartsWith("expected scalar type")
221   // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
222   ASSERT_ANY_THROW(at::_standard_gamma_grad(t1, t2));
223 }
224 
TestWhere(TensorOptions T,Tensor & t)225 void TestWhere(TensorOptions T, Tensor& t) {
226   // empty
227   auto empty = ones({0}, T);
228   auto bT = T.dtype(kByte);
229   auto empty_byte = ones({0}, bT);
230   ASSERT_EQUAL(empty, at::where(empty_byte, empty, empty));
231 
232   // check scalar equals one element
233   auto x_scalar = ones({}, T).mul(5);
234   auto y_scalar = ones({}, T).mul(7);
235   auto cond_scalar = zeros({}, bT);
236   auto x_1d = x_scalar.unsqueeze(0);
237   auto y_1d = y_scalar.unsqueeze(0);
238   auto cond_1d = cond_scalar.unsqueeze(0);
239   ASSERT_ALLCLOSE(
240       at::where(cond_scalar, x_scalar, y_scalar).unsqueeze(0),
241       at::where(cond_1d, x_1d, y_1d));
242 }
243 
test(TensorOptions T,TensorOptions AccT)244 void test(TensorOptions T, TensorOptions AccT) {
245   auto t = randn({3, 3}, T);
246   TestSplit(T, t);
247   TestChunk(T, t);
248   TestStack(T, t);
249   TestSize(T, t);
250   TestMatmul(T, t, AccT);
251   TestStandardGammaGrad(T, t);
252   TestWhere(T, t);
253 }
254 
TEST(TestNative,NativeTestCPU)255 TEST(TestNative, NativeTestCPU) {
256   manual_seed(123);
257 
258   test(at::device(kCPU).dtype(kFloat),
259        at::device(kCPU).dtype(kDouble));
260 }
261 
TEST(TestNative,NativeTestGPU)262 TEST(TestNative, NativeTestGPU) {
263   manual_seed(123);
264 
265   if (at::hasCUDA()) {
266     test(at::device(kCUDA).dtype(kFloat),
267          at::device(kCUDA).dtype(kDouble));
268   }
269 }
270