1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 #include <ATen/core/Tensor.h>
3 #include <ATen/Context.h>
4 #include <ATen/NamedTensorUtils.h>
5 #include <ATen/detail/CUDAHooksInterface.h>
6 #include <ATen/native/TensorProperties.h>
7
8 #ifndef AT_PER_OPERATOR_HEADERS
9 #include <ATen/Functions.h>
10 #include <ATen/NativeFunctions.h>
11 #else
12 #include <ATen/ops/_nested_tensor_size_native.h>
13 #include <ATen/ops/contiguous_native.h>
14 #include <ATen/ops/cudnn_is_acceptable_native.h>
15 #include <ATen/ops/detach_native.h>
16 #include <ATen/ops/equal.h>
17 #include <ATen/ops/is_same_size_native.h>
18 #include <ATen/ops/is_set_to_native.h>
19 #include <ATen/ops/size_native.h>
20 #include <ATen/ops/stride_native.h>
21 #include <ATen/ops/sym_numel_native.h>
22 #include <ATen/ops/sym_size_native.h>
23 #include <ATen/ops/sym_storage_offset_native.h>
24 #include <ATen/ops/sym_stride_native.h>
25 #endif
26
27 #include <c10/util/irange.h>
28
29 namespace at::native {
30
is_same_size(const Tensor & self,const Tensor & other)31 bool is_same_size(const Tensor& self, const Tensor& other) {
32 return self.sym_sizes().equals(other.sym_sizes());
33 }
34
nested_is_same_size(const Tensor & self,const Tensor & other)35 bool nested_is_same_size(const Tensor& self, const Tensor& other) {
36 TORCH_CHECK(
37 self.is_nested() && other.is_nested(),
38 "Expected both self and other to be nested tensors. ",
39 "Self ", self.is_nested()? "is " : "is not ",
40 "nested. While Other ",
41 other.is_nested()? "is " : "is not ",
42 "nested.")
43 const auto self_nt_size = _nested_tensor_size(self);
44 const auto other_nt_size = _nested_tensor_size(other);
45 return at::equal(self_nt_size, other_nt_size);
46 }
size(const Tensor & self,int64_t dim)47 int64_t size(const Tensor& self, int64_t dim) {
48 return self.size(dim);
49 }
50
stride(const Tensor & self,int64_t dim)51 int64_t stride(const Tensor& self, int64_t dim) {
52 return self.stride(dim);
53 }
54
sym_size(const Tensor & self,int64_t dim)55 c10::SymInt sym_size(const Tensor& self, int64_t dim) {
56 return self.sym_size(dim);
57 }
58
sym_stride(const Tensor & self,int64_t dim)59 c10::SymInt sym_stride(const Tensor& self, int64_t dim) {
60 return self.sym_stride(dim);
61 }
62
sym_numel(const Tensor & self)63 c10::SymInt sym_numel(const Tensor& self) {
64 return self.sym_numel();
65 }
66
sym_storage_offset(const Tensor & self)67 c10::SymInt sym_storage_offset(const Tensor& self) {
68 return self.sym_storage_offset();
69 }
70
size(const Tensor & self,Dimname dim)71 int64_t size(const Tensor& self, Dimname dim) {
72 size_t pos_dim = dimname_to_position(self, dim);
73 return self.sizes()[pos_dim];
74 }
75
stride(const Tensor & self,Dimname dim)76 int64_t stride(const Tensor& self, Dimname dim) {
77 size_t pos_dim = dimname_to_position(self, dim);
78 return self.strides()[pos_dim];
79 }
80
cudnn_is_acceptable(const TensorBase & self)81 bool cudnn_is_acceptable(const TensorBase& self) {
82 if (!globalContext().userEnabledCuDNN()) return false;
83 if (!self.is_cuda()) return false;
84 auto st = self.scalar_type();
85 if (!(st == kDouble || st == kFloat || st == kHalf)) return false;
86 if (!detail::getCUDAHooks().compiledWithCuDNN()) return false;
87 // cuDNN functions like grid_sampler returns CUDNN_STATUS_BAD_PARAM on empty
88 // tensors. Maybe some cuDNN functions actually support empty tensors, but
89 // native/THNN kernels shouldn't be much slower because the output is also
90 // likely empty.
91 if (self.sym_numel() == 0) return false;
92 // NB: In the old Python code, there was also a test to see if the
93 // cuDNN library was actually dynamically linked or not. I'm not
94 // sure if we can actually test this.
95 return true;
96 }
97
cudnn_is_acceptable(const Tensor & self)98 bool cudnn_is_acceptable(const Tensor& self) {
99 return cudnn_is_acceptable(static_cast<const TensorBase&>(self));
100 }
101
detach_(Tensor & self)102 Tensor & detach_(Tensor & self) {
103 // this just exists to give us a hook in VariableType and an entry in Declarations.yaml
104 //AT_ERROR("detach_ is not implemented for Tensor");
105 return self;
106 }
107
contiguous(const Tensor & self,MemoryFormat memory_format)108 Tensor contiguous(const Tensor& self, MemoryFormat memory_format) {
109 if (self.is_contiguous(memory_format)) {
110 return self;
111 }
112 TORCH_CHECK(
113 memory_format != MemoryFormat::Preserve,
114 "preserve memory format is unsupported by the contiguous operator");
115
116 return self.clone(memory_format);
117 }
118
is_set_to(const Tensor & self,const Tensor & src)119 bool is_set_to(const Tensor& self, const Tensor& src) {
120 if (self.storage().unsafeGetStorageImpl() == src.storage().unsafeGetStorageImpl() &&
121 self.storage_offset() == src.storage_offset() &&
122 self.dim() == src.dim()) {
123 for (const auto d : c10::irange(self.dim())) {
124 if (self.size(d) != src.size(d) || self.stride(d) != src.stride(d)) {
125 return false;
126 }
127 }
128 return true;
129 }
130 return false;
131 }
132
133 } // namespace at::native
134