xref: /aosp_15_r20/external/eigen/unsupported/test/cxx11_tensor_morphing_sycl.cpp (revision bf2c37156dfe67e5dfebd6d394bad8b2ab5804d4)
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2016
5 // Mehdi Goli    Codeplay Software Ltd.
6 // Ralph Potter  Codeplay Software Ltd.
7 // Luke Iwanski  Codeplay Software Ltd.
8 // Contact: <[email protected]>
9 // Benoit Steiner <[email protected]>
10 //
11 // This Source Code Form is subject to the terms of the Mozilla
12 // Public License v. 2.0. If a copy of the MPL was not distributed
13 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
14 
15 
16 #define EIGEN_TEST_NO_LONGDOUBLE
17 #define EIGEN_TEST_NO_COMPLEX
18 
19 #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
20 #define EIGEN_USE_SYCL
21 
22 
23 #include "main.h"
24 #include <unsupported/Eigen/CXX11/Tensor>
25 
26 using Eigen::array;
27 using Eigen::SyclDevice;
28 using Eigen::Tensor;
29 using Eigen::TensorMap;
30 
31 template <typename DataType, int DataLayout, typename IndexType>
test_simple_reshape(const Eigen::SyclDevice & sycl_device)32 static void test_simple_reshape(const Eigen::SyclDevice& sycl_device)
33 {
34   typename Tensor<DataType, 5 ,DataLayout, IndexType>::Dimensions dim1(2,3,1,7,1);
35   typename Tensor<DataType, 3 ,DataLayout, IndexType>::Dimensions dim2(2,3,7);
36   typename Tensor<DataType, 2 ,DataLayout, IndexType>::Dimensions dim3(6,7);
37   typename Tensor<DataType, 2 ,DataLayout, IndexType>::Dimensions dim4(2,21);
38 
39   Tensor<DataType, 5, DataLayout, IndexType> tensor1(dim1);
40   Tensor<DataType, 3, DataLayout, IndexType> tensor2(dim2);
41   Tensor<DataType, 2, DataLayout, IndexType> tensor3(dim3);
42   Tensor<DataType, 2, DataLayout, IndexType> tensor4(dim4);
43 
44   tensor1.setRandom();
45 
46   DataType* gpu_data1  = static_cast<DataType*>(sycl_device.allocate(tensor1.size()*sizeof(DataType)));
47   DataType* gpu_data2  = static_cast<DataType*>(sycl_device.allocate(tensor2.size()*sizeof(DataType)));
48   DataType* gpu_data3  = static_cast<DataType*>(sycl_device.allocate(tensor3.size()*sizeof(DataType)));
49   DataType* gpu_data4  = static_cast<DataType*>(sycl_device.allocate(tensor4.size()*sizeof(DataType)));
50 
51   TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu1(gpu_data1, dim1);
52   TensorMap<Tensor<DataType, 3,DataLayout, IndexType>> gpu2(gpu_data2, dim2);
53   TensorMap<Tensor<DataType, 2,DataLayout, IndexType>> gpu3(gpu_data3, dim3);
54   TensorMap<Tensor<DataType, 2,DataLayout, IndexType>> gpu4(gpu_data4, dim4);
55 
56   sycl_device.memcpyHostToDevice(gpu_data1, tensor1.data(),(tensor1.size())*sizeof(DataType));
57 
58   gpu2.device(sycl_device)=gpu1.reshape(dim2);
59   sycl_device.memcpyDeviceToHost(tensor2.data(), gpu_data2,(tensor1.size())*sizeof(DataType));
60 
61   gpu3.device(sycl_device)=gpu1.reshape(dim3);
62   sycl_device.memcpyDeviceToHost(tensor3.data(), gpu_data3,(tensor3.size())*sizeof(DataType));
63 
64   gpu4.device(sycl_device)=gpu1.reshape(dim2).reshape(dim4);
65   sycl_device.memcpyDeviceToHost(tensor4.data(), gpu_data4,(tensor4.size())*sizeof(DataType));
66   for (IndexType i = 0; i < 2; ++i){
67     for (IndexType j = 0; j < 3; ++j){
68       for (IndexType k = 0; k < 7; ++k){
69         VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor2(i,j,k));      ///ColMajor
70         if (static_cast<int>(DataLayout) == static_cast<int>(ColMajor)) {
71           VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor3(i+2*j,k));    ///ColMajor
72           VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor4(i,j+3*k));    ///ColMajor
73         }
74         else{
75           //VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor2(i,j,k));      /// RowMajor
76           VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor4(i,j*7 +k));   /// RowMajor
77           VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor3(i*3 +j,k));   /// RowMajor
78         }
79       }
80     }
81   }
82   sycl_device.deallocate(gpu_data1);
83   sycl_device.deallocate(gpu_data2);
84   sycl_device.deallocate(gpu_data3);
85   sycl_device.deallocate(gpu_data4);
86 }
87 
88 
89 template<typename DataType, int DataLayout, typename IndexType>
test_reshape_as_lvalue(const Eigen::SyclDevice & sycl_device)90 static void test_reshape_as_lvalue(const Eigen::SyclDevice& sycl_device)
91 {
92   typename Tensor<DataType, 3, DataLayout, IndexType>::Dimensions dim1(2,3,7);
93   typename Tensor<DataType, 2, DataLayout, IndexType>::Dimensions dim2(6,7);
94   typename Tensor<DataType, 5, DataLayout, IndexType>::Dimensions dim3(2,3,1,7,1);
95   Tensor<DataType, 3, DataLayout, IndexType> tensor(dim1);
96   Tensor<DataType, 2, DataLayout, IndexType> tensor2d(dim2);
97   Tensor<DataType, 5, DataLayout, IndexType> tensor5d(dim3);
98 
99   tensor.setRandom();
100 
101   DataType* gpu_data1  = static_cast<DataType*>(sycl_device.allocate(tensor.size()*sizeof(DataType)));
102   DataType* gpu_data2  = static_cast<DataType*>(sycl_device.allocate(tensor2d.size()*sizeof(DataType)));
103   DataType* gpu_data3  = static_cast<DataType*>(sycl_device.allocate(tensor5d.size()*sizeof(DataType)));
104 
105   TensorMap< Tensor<DataType, 3, DataLayout, IndexType> > gpu1(gpu_data1, dim1);
106   TensorMap< Tensor<DataType, 2, DataLayout, IndexType> > gpu2(gpu_data2, dim2);
107   TensorMap< Tensor<DataType, 5, DataLayout, IndexType> > gpu3(gpu_data3, dim3);
108 
109   sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType));
110 
111   gpu2.reshape(dim1).device(sycl_device)=gpu1;
112   sycl_device.memcpyDeviceToHost(tensor2d.data(), gpu_data2,(tensor2d.size())*sizeof(DataType));
113 
114   gpu3.reshape(dim1).device(sycl_device)=gpu1;
115   sycl_device.memcpyDeviceToHost(tensor5d.data(), gpu_data3,(tensor5d.size())*sizeof(DataType));
116 
117 
118   for (IndexType i = 0; i < 2; ++i){
119     for (IndexType j = 0; j < 3; ++j){
120       for (IndexType k = 0; k < 7; ++k){
121         VERIFY_IS_EQUAL(tensor5d(i,j,0,k,0), tensor(i,j,k));
122         if (static_cast<int>(DataLayout) == static_cast<int>(ColMajor)) {
123           VERIFY_IS_EQUAL(tensor2d(i+2*j,k), tensor(i,j,k));    ///ColMajor
124         }
125         else{
126           VERIFY_IS_EQUAL(tensor2d(i*3 +j,k),tensor(i,j,k));   /// RowMajor
127         }
128       }
129     }
130   }
131   sycl_device.deallocate(gpu_data1);
132   sycl_device.deallocate(gpu_data2);
133   sycl_device.deallocate(gpu_data3);
134 }
135 
136 
137 template <typename DataType, int DataLayout, typename IndexType>
test_simple_slice(const Eigen::SyclDevice & sycl_device)138 static void test_simple_slice(const Eigen::SyclDevice &sycl_device)
139 {
140   IndexType sizeDim1 = 2;
141   IndexType sizeDim2 = 3;
142   IndexType sizeDim3 = 5;
143   IndexType sizeDim4 = 7;
144   IndexType sizeDim5 = 11;
145   array<IndexType, 5> tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}};
146   Tensor<DataType, 5,DataLayout, IndexType> tensor(tensorRange);
147   tensor.setRandom();
148   array<IndexType, 5> slice1_range ={{1, 1, 1, 1, 1}};
149   Tensor<DataType, 5,DataLayout, IndexType> slice1(slice1_range);
150 
151   DataType* gpu_data1  = static_cast<DataType*>(sycl_device.allocate(tensor.size()*sizeof(DataType)));
152   DataType* gpu_data2  = static_cast<DataType*>(sycl_device.allocate(slice1.size()*sizeof(DataType)));
153   TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu1(gpu_data1, tensorRange);
154   TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu2(gpu_data2, slice1_range);
155   Eigen::DSizes<IndexType, 5> indices(1,2,3,4,5);
156   Eigen::DSizes<IndexType, 5> sizes(1,1,1,1,1);
157   sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType));
158   gpu2.device(sycl_device)=gpu1.slice(indices, sizes);
159   sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(DataType));
160   VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5));
161 
162 
163   array<IndexType, 5> slice2_range ={{1,1,2,2,3}};
164   Tensor<DataType, 5,DataLayout, IndexType> slice2(slice2_range);
165   DataType* gpu_data3  = static_cast<DataType*>(sycl_device.allocate(slice2.size()*sizeof(DataType)));
166   TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu3(gpu_data3, slice2_range);
167   Eigen::DSizes<IndexType, 5> indices2(1,1,3,4,5);
168   Eigen::DSizes<IndexType, 5> sizes2(1,1,2,2,3);
169   gpu3.device(sycl_device)=gpu1.slice(indices2, sizes2);
170   sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(DataType));
171   for (IndexType i = 0; i < 2; ++i) {
172     for (IndexType j = 0; j < 2; ++j) {
173       for (IndexType k = 0; k < 3; ++k) {
174         VERIFY_IS_EQUAL(slice2(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k));
175       }
176     }
177   }
178   sycl_device.deallocate(gpu_data1);
179   sycl_device.deallocate(gpu_data2);
180   sycl_device.deallocate(gpu_data3);
181 }
182 
183 
184 template <typename DataType, int DataLayout, typename IndexType>
test_strided_slice_as_rhs_sycl(const Eigen::SyclDevice & sycl_device)185 static void test_strided_slice_as_rhs_sycl(const Eigen::SyclDevice &sycl_device)
186 {
187   IndexType sizeDim1 = 2;
188   IndexType sizeDim2 = 3;
189   IndexType sizeDim3 = 5;
190   IndexType sizeDim4 = 7;
191   IndexType sizeDim5 = 11;
192   typedef Eigen::DSizes<IndexType, 5> Index5;
193   Index5 strides(1L,1L,1L,1L,1L);
194   Index5 indicesStart(1L,2L,3L,4L,5L);
195   Index5 indicesStop(2L,3L,4L,5L,6L);
196   Index5 lengths(1L,1L,1L,1L,1L);
197 
198   array<IndexType, 5> tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}};
199   Tensor<DataType, 5, DataLayout, IndexType> tensor(tensorRange);
200   tensor.setRandom();
201 
202   array<IndexType, 5> slice1_range ={{1, 1, 1, 1, 1}};
203   Tensor<DataType, 5,DataLayout, IndexType> slice1(slice1_range);
204   Tensor<DataType, 5, DataLayout, IndexType> slice_stride1(slice1_range);
205 
206   DataType* gpu_data1  = static_cast<DataType*>(sycl_device.allocate(tensor.size()*sizeof(DataType)));
207   DataType* gpu_data2  = static_cast<DataType*>(sycl_device.allocate(slice1.size()*sizeof(DataType)));
208   DataType* gpu_data_stride2  = static_cast<DataType*>(sycl_device.allocate(slice_stride1.size()*sizeof(DataType)));
209 
210   TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu1(gpu_data1, tensorRange);
211   TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu2(gpu_data2, slice1_range);
212   TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu_stride2(gpu_data_stride2, slice1_range);
213 
214   Eigen::DSizes<IndexType, 5> indices(1,2,3,4,5);
215   Eigen::DSizes<IndexType, 5> sizes(1,1,1,1,1);
216   sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType));
217   gpu2.device(sycl_device)=gpu1.slice(indices, sizes);
218   sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(DataType));
219 
220   gpu_stride2.device(sycl_device)=gpu1.stridedSlice(indicesStart,indicesStop,strides);
221   sycl_device.memcpyDeviceToHost(slice_stride1.data(), gpu_data_stride2,(slice_stride1.size())*sizeof(DataType));
222 
223   VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5));
224   VERIFY_IS_EQUAL(slice_stride1(0,0,0,0,0), tensor(1,2,3,4,5));
225 
226   array<IndexType, 5> slice2_range ={{1,1,2,2,3}};
227   Tensor<DataType, 5,DataLayout, IndexType> slice2(slice2_range);
228   Tensor<DataType, 5, DataLayout, IndexType> strideSlice2(slice2_range);
229 
230   DataType* gpu_data3  = static_cast<DataType*>(sycl_device.allocate(slice2.size()*sizeof(DataType)));
231   DataType* gpu_data_stride3  = static_cast<DataType*>(sycl_device.allocate(strideSlice2.size()*sizeof(DataType)));
232   TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu3(gpu_data3, slice2_range);
233   TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu_stride3(gpu_data_stride3, slice2_range);
234   Eigen::DSizes<IndexType, 5> indices2(1,1,3,4,5);
235   Eigen::DSizes<IndexType, 5> sizes2(1,1,2,2,3);
236   Index5 strides2(1L,1L,1L,1L,1L);
237   Index5 indicesStart2(1L,1L,3L,4L,5L);
238   Index5 indicesStop2(2L,2L,5L,6L,8L);
239 
240   gpu3.device(sycl_device)=gpu1.slice(indices2, sizes2);
241   sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(DataType));
242 
243   gpu_stride3.device(sycl_device)=gpu1.stridedSlice(indicesStart2,indicesStop2,strides2);
244   sycl_device.memcpyDeviceToHost(strideSlice2.data(), gpu_data_stride3,(strideSlice2.size())*sizeof(DataType));
245 
246   for (IndexType i = 0; i < 2; ++i) {
247     for (IndexType j = 0; j < 2; ++j) {
248       for (IndexType k = 0; k < 3; ++k) {
249         VERIFY_IS_EQUAL(slice2(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k));
250         VERIFY_IS_EQUAL(strideSlice2(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k));
251       }
252     }
253   }
254   sycl_device.deallocate(gpu_data1);
255   sycl_device.deallocate(gpu_data2);
256   sycl_device.deallocate(gpu_data3);
257 }
258 
259 template<typename DataType, int DataLayout, typename IndexType>
test_strided_slice_write_sycl(const Eigen::SyclDevice & sycl_device)260 static void test_strided_slice_write_sycl(const Eigen::SyclDevice& sycl_device)
261 {
262   typedef Tensor<DataType, 2, DataLayout, IndexType> Tensor2f;
263   typedef Eigen::DSizes<IndexType, 2> Index2;
264   IndexType sizeDim1 = 7L;
265   IndexType sizeDim2 = 11L;
266   array<IndexType, 2> tensorRange = {{sizeDim1, sizeDim2}};
267   Tensor<DataType, 2, DataLayout, IndexType> tensor(tensorRange),tensor2(tensorRange);
268   IndexType sliceDim1 = 2;
269   IndexType sliceDim2 = 3;
270   array<IndexType, 2> sliceRange = {{sliceDim1, sliceDim2}};
271   Tensor2f slice(sliceRange);
272   Index2 strides(1L,1L);
273   Index2 indicesStart(3L,4L);
274   Index2 indicesStop(5L,7L);
275   Index2 lengths(2L,3L);
276 
277   DataType* gpu_data1  = static_cast<DataType*>(sycl_device.allocate(tensor.size()*sizeof(DataType)));
278   DataType* gpu_data2  = static_cast<DataType*>(sycl_device.allocate(tensor2.size()*sizeof(DataType)));
279   DataType* gpu_data3  = static_cast<DataType*>(sycl_device.allocate(slice.size()*sizeof(DataType)));
280   TensorMap<Tensor<DataType, 2,DataLayout,IndexType>> gpu1(gpu_data1, tensorRange);
281   TensorMap<Tensor<DataType, 2,DataLayout,IndexType>> gpu2(gpu_data2, tensorRange);
282   TensorMap<Tensor<DataType, 2,DataLayout,IndexType>> gpu3(gpu_data3, sliceRange);
283 
284 
285   tensor.setRandom();
286   sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType));
287   gpu2.device(sycl_device)=gpu1;
288 
289   slice.setRandom();
290   sycl_device.memcpyHostToDevice(gpu_data3, slice.data(),(slice.size())*sizeof(DataType));
291 
292 
293   gpu1.slice(indicesStart,lengths).device(sycl_device)=gpu3;
294   gpu2.stridedSlice(indicesStart,indicesStop,strides).device(sycl_device)=gpu3;
295   sycl_device.memcpyDeviceToHost(tensor.data(), gpu_data1,(tensor.size())*sizeof(DataType));
296   sycl_device.memcpyDeviceToHost(tensor2.data(), gpu_data2,(tensor2.size())*sizeof(DataType));
297 
298   for(IndexType i=0;i<sizeDim1;i++)
299     for(IndexType j=0;j<sizeDim2;j++){
300     VERIFY_IS_EQUAL(tensor(i,j), tensor2(i,j));
301   }
302   sycl_device.deallocate(gpu_data1);
303   sycl_device.deallocate(gpu_data2);
304   sycl_device.deallocate(gpu_data3);
305 }
306 
307 template <typename OutIndex, typename DSizes>
To32BitDims(const DSizes & in)308 Eigen::array<OutIndex, DSizes::count> To32BitDims(const DSizes& in) {
309   Eigen::array<OutIndex, DSizes::count> out;
310   for (int i = 0; i < DSizes::count; ++i) {
311     out[i] = in[i];
312   }
313   return out;
314 }
315 
316 template <class DataType, int DataLayout, typename IndexType, typename ConvertedIndexType>
run_eigen(const SyclDevice & sycl_device)317 int run_eigen(const SyclDevice& sycl_device) {
318   using TensorI64 = Tensor<DataType, 5, DataLayout, IndexType>;
319   using TensorI32 = Tensor<DataType, 5, DataLayout, ConvertedIndexType>;
320   using TensorMI64 = TensorMap<TensorI64>;
321   using TensorMI32 = TensorMap<TensorI32>;
322   Eigen::array<IndexType, 5> tensor_range{{4, 1, 1, 1, 6}};
323   Eigen::array<IndexType, 5> slice_range{{4, 1, 1, 1, 3}};
324 
325   TensorI64 out_tensor_gpu(tensor_range);
326   TensorI64 out_tensor_cpu(tensor_range);
327   out_tensor_cpu.setRandom();
328 
329   TensorI64 sub_tensor(slice_range);
330   sub_tensor.setRandom();
331 
332   DataType* out_gpu_data = static_cast<DataType*>(sycl_device.allocate(out_tensor_cpu.size() * sizeof(DataType)));
333   DataType* sub_gpu_data = static_cast<DataType*>(sycl_device.allocate(sub_tensor.size() * sizeof(DataType)));
334   TensorMI64 out_gpu(out_gpu_data, tensor_range);
335   TensorMI64 sub_gpu(sub_gpu_data, slice_range);
336 
337   sycl_device.memcpyHostToDevice(out_gpu_data, out_tensor_cpu.data(), out_tensor_cpu.size() * sizeof(DataType));
338   sycl_device.memcpyHostToDevice(sub_gpu_data, sub_tensor.data(), sub_tensor.size() * sizeof(DataType));
339 
340   Eigen::array<ConvertedIndexType, 5> slice_offset_32{{0, 0, 0, 0, 3}};
341   Eigen::array<ConvertedIndexType, 5> slice_range_32{{4, 1, 1, 1, 3}};
342   TensorMI32 out_cpu_32(out_tensor_cpu.data(), To32BitDims<ConvertedIndexType>(out_tensor_cpu.dimensions()));
343   TensorMI32 sub_cpu_32(sub_tensor.data(), To32BitDims<ConvertedIndexType>(sub_tensor.dimensions()));
344   TensorMI32 out_gpu_32(out_gpu.data(), To32BitDims<ConvertedIndexType>(out_gpu.dimensions()));
345   TensorMI32 sub_gpu_32(sub_gpu.data(), To32BitDims<ConvertedIndexType>(sub_gpu.dimensions()));
346 
347   out_gpu_32.slice(slice_offset_32, slice_range_32).device(sycl_device) = sub_gpu_32;
348 
349   out_cpu_32.slice(slice_offset_32, slice_range_32) = sub_cpu_32;
350 
351   sycl_device.memcpyDeviceToHost(out_tensor_gpu.data(), out_gpu_data, out_tensor_cpu.size() * sizeof(DataType));
352   int has_err = 0;
353   for (IndexType i = 0; i < out_tensor_cpu.size(); ++i) {
354     auto exp = out_tensor_cpu(i);
355     auto val = out_tensor_gpu(i);
356     if (val != exp) {
357       std::cout << "#" << i << " got " << val << " but expected " << exp << std::endl;
358       has_err = 1;
359     }
360   }
361   sycl_device.deallocate(out_gpu_data);
362   sycl_device.deallocate(sub_gpu_data);
363   return has_err;
364 }
365 
sycl_morphing_test_per_device(dev_Selector s)366 template<typename DataType, typename dev_Selector> void sycl_morphing_test_per_device(dev_Selector s){
367   QueueInterface queueInterface(s);
368   auto sycl_device = Eigen::SyclDevice(&queueInterface);
369   test_simple_slice<DataType, RowMajor, int64_t>(sycl_device);
370   test_simple_slice<DataType, ColMajor, int64_t>(sycl_device);
371   test_simple_reshape<DataType, RowMajor, int64_t>(sycl_device);
372   test_simple_reshape<DataType, ColMajor, int64_t>(sycl_device);
373   test_reshape_as_lvalue<DataType, RowMajor, int64_t>(sycl_device);
374   test_reshape_as_lvalue<DataType, ColMajor, int64_t>(sycl_device);
375   test_strided_slice_write_sycl<DataType, ColMajor, int64_t>(sycl_device);
376   test_strided_slice_write_sycl<DataType, RowMajor, int64_t>(sycl_device);
377   test_strided_slice_as_rhs_sycl<DataType, ColMajor, int64_t>(sycl_device);
378   test_strided_slice_as_rhs_sycl<DataType, RowMajor, int64_t>(sycl_device);
379   run_eigen<float, RowMajor, long, int>(sycl_device);
380 }
EIGEN_DECLARE_TEST(cxx11_tensor_morphing_sycl)381 EIGEN_DECLARE_TEST(cxx11_tensor_morphing_sycl)
382 {
383   for (const auto& device :Eigen::get_sycl_supported_devices()) {
384     CALL_SUBTEST(sycl_morphing_test_per_device<float>(device));
385   }
386 }
387