xref: /aosp_15_r20/external/pytorch/aten/src/ATen/templates/Functions.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 
3 // ${generated_comment}
4 
5 #ifdef TORCH_ASSERT_NO_OPERATORS
6 #error This change adds a dependency on native_functions.yaml,            \
7   meaning the file will need to be re-compiled every time an operator     \
8   is changed or added. Consider if your change would be better placed in  \
9   another file, or if a more specific header might achieve the same goal. \
10   See NOTE: [Tensor vs. TensorBase]
11 #endif
12 
13 #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
14 #error This change adds a dependency on all pytorch operators, meaning the     \
15   file will need to be re-compiled every time an operator is changed or added. \
16   Consider including a specific operator from <ATen/ops/{my_operator}.h> and   \
17   see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
18 #endif
19 
20 // NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS]
21 //
22 // In ATen, certain generated headers files include the definitions of
23 // every single operator in PyTorch. Unfortunately this means every
24 // time an operator signature is updated or changed in
25 // native_functions.yaml, you (and every other PyTorch developer) need
26 // to recompile every source file that includes any of these headers.
27 //
28 // To break up these header dependencies, and improve incremental
29 // build times for all PyTorch developers. These headers are split
30 // into per-operator headers in the `ATen/ops` folder. This limits
31 // incremental builds to only changes to methods of `Tensor`, or files
32 // that use the specific operator being changed. With `at::sum` as an
33 // example, you should include
34 //
35 //   <ATen/ops/sum.h>               // instead of ATen/Functions.h
36 //   <ATen/ops/sum_native.h>        // instead of ATen/NativeFunctions.h
37 //   <ATen/ops/sum_ops.h>           // instead of ATen/Operators.h
38 //   <ATen/ops/sum_cpu_dispatch.h>  // instead of ATen/CPUFunctions.h
39 //
40 // However, even if you're careful to use this in your own code.
41 // `Functions.h` might be included indirectly through another header
42 // without you realising. To avoid this, you can add
43 //
44 //   #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
45 //
46 // to the top of your source file. This way any time the non-specific
47 // headers are included, the compiler will error out.
48 //
49 // Also, be aware that `ops` are not available in all build
50 // configurations (namely fb-internal) so you must guard these
51 // includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g.
52 //
53 //   #ifndef AT_PER_OPERATOR_HEADERS
54 //   #include <ATen/Functions.h>
55 //   #else
56 //   #include <ATen/ops/sum.h>
57 //   #endif
58 
59 #include <ATen/Context.h>
60 #include <ATen/DeviceGuard.h>
61 #include <ATen/TensorUtils.h>
62 #include <ATen/TracerMode.h>
63 #include <ATen/core/Generator.h>
64 #include <ATen/core/Reduction.h>
65 #include <c10/core/SymInt.h>
66 #include <ATen/core/Tensor.h>
67 #include <c10/core/Scalar.h>
68 #include <c10/core/Storage.h>
69 #include <c10/core/TensorOptions.h>
70 #include <c10/util/Deprecated.h>
71 #include <optional>
72 #include <c10/util/OptionalArrayRef.h>
73 
74 #include <ATen/ops/from_blob.h>
75 #include <ATen/ops/tensor.h>
76 
77 ${Functions_includes}
78 
79 namespace at {
80 
81 ${Functions_declarations}
82 
83 // Special C++ only overloads for std()-like functions (See gh-40287)
84 // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
85 // So, for example std(0) would select the std(unbiased=False) overload
var(const Tensor & self,int dim)86 TORCH_API inline Tensor var(const Tensor& self, int dim) {
87   return at::var(self, IntArrayRef{dim});
88 }
var_mean(const Tensor & self,int dim)89 TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
90   return at::var_mean(self, IntArrayRef{dim});
91 }
std(const Tensor & self,int dim)92 TORCH_API inline Tensor std(const Tensor& self, int dim) {
93   return at::std(self, IntArrayRef{dim});
94 }
std_mean(const Tensor & self,int dim)95 TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
96   return at::std_mean(self, IntArrayRef{dim});
97 }
98 
numel(const Tensor & tensor)99 inline int64_t numel(const Tensor& tensor) {
100   return tensor.numel();
101 }
102 
size(const Tensor & tensor,int64_t dim)103 inline int64_t size(const Tensor& tensor, int64_t dim) {
104   return tensor.size(dim);
105 }
106 
stride(const Tensor & tensor,int64_t dim)107 inline int64_t stride(const Tensor& tensor, int64_t dim) {
108   return tensor.stride(dim);
109 }
110 
is_complex(const Tensor & tensor)111 inline bool is_complex(const Tensor& tensor) {
112   return tensor.is_complex();
113 }
114 
is_floating_point(const Tensor & tensor)115 inline bool is_floating_point(const Tensor& tensor) {
116   return tensor.is_floating_point();
117 }
118 
is_signed(const Tensor & tensor)119 inline bool is_signed(const Tensor& tensor) {
120   return tensor.is_signed();
121 }
122 
is_inference(const Tensor & tensor)123 inline bool is_inference(const Tensor& tensor) {
124   return tensor.is_inference();
125 }
126 
_is_zerotensor(const Tensor & tensor)127 inline bool _is_zerotensor(const Tensor& tensor) {
128   return tensor._is_zerotensor();
129 }
130 
is_conj(const Tensor & tensor)131 inline bool is_conj(const Tensor& tensor) {
132   return tensor.is_conj();
133 }
134 
conj(const Tensor & tensor)135 inline Tensor conj(const Tensor& tensor) {
136   return tensor.conj();
137 }
138 
is_neg(const Tensor & tensor)139 inline bool is_neg(const Tensor& tensor) {
140   return tensor.is_neg();
141 }
142 
143 }
144