1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 #include <ATen/core/Tensor.h>
3
4 #ifndef AT_PER_OPERATOR_HEADERS
5 #include <ATen/Functions.h>
6 #include <ATen/NativeFunctions.h>
7 #else
8 #include <ATen/ops/_backward_native.h>
9 #include <ATen/ops/_fw_primal_native.h>
10 #include <ATen/ops/_version_native.h>
11 #include <ATen/ops/alias.h>
12 #include <ATen/ops/data_native.h>
13 #include <ATen/ops/is_leaf_native.h>
14 #include <ATen/ops/output_nr_native.h>
15 #include <ATen/ops/requires_grad_native.h>
16 #include <ATen/ops/retain_grad_native.h>
17 #include <ATen/ops/retains_grad_native.h>
18 #include <ATen/ops/set_data_native.h>
19 #include <ATen/ops/zeros_like_ops.h>
20 #endif
21
22 // The stubs in here are used by dynamic dispatch. It just redirects everything
23 // to the Tensor method we manually bind in TensorBody.h.
24
25 namespace at::native {
26
_backward(const Tensor & self,TensorList inputs,const std::optional<Tensor> & gradient_opt,std::optional<bool> keep_graph,bool create_graph)27 void _backward(const Tensor& self, TensorList inputs, const std::optional<Tensor>& gradient_opt, std::optional<bool> keep_graph, bool create_graph) {
28 return self._backward(inputs, gradient_opt, keep_graph, create_graph);
29 }
30
set_data(Tensor & self,const Tensor & new_data)31 void set_data(Tensor& self, const Tensor& new_data) {
32 return self.set_data(new_data);
33 }
34
data(const Tensor & self)35 Tensor data(const Tensor& self) {
36 return self.data();
37 }
38
is_leaf(const Tensor & self)39 bool is_leaf(const Tensor& self) {
40 return self.is_leaf();
41 }
42
output_nr(const Tensor & self)43 int64_t output_nr(const Tensor& self) {
44 return self.output_nr();
45 }
46
_version(const Tensor & self)47 int64_t _version(const Tensor& self) {
48 return self._version();
49 }
50
requires_grad_(Tensor & self,bool _requires_grad)51 Tensor& requires_grad_(Tensor& self, bool _requires_grad) {
52 self.requires_grad_(_requires_grad);
53 return self;
54 }
55
retain_grad(Tensor & self)56 void retain_grad(Tensor& self) {
57 return self.retain_grad();
58 }
59
retains_grad(const Tensor & self)60 bool retains_grad(const Tensor& self) {
61 return self.retains_grad();
62 }
63
64 // We expect this code to only be reached in inference mode and when all inputs are inference tensors
_fw_primal(const Tensor & self,int64_t level)65 Tensor _fw_primal(const Tensor& self, int64_t level) {
66 TORCH_INTERNAL_ASSERT(
67 InferenceMode::is_enabled() && self.is_inference(),
68 "Expected this method to only be reached in inference mode and when all the "
69 "inputs are inference tensors. You should NOT call this method directly as "
70 "native::_fw_primal. Please use the dispatcher, i.e., at::_fw_primal. Please "
71 "file an issue if you come across this error otherwise.");
72 return at::alias(self);
73 }
74
75 } // namespace at::native
76