1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS 2 // ${generated_comment} 3 4 // Python bindings for torch.* functions implemented through ATen. 5 // 6 // The functions are bound as static methods on a class 7 // torch._C._VariableFunctions which is also aliased as Variable._torch 8 // and also copied into 'torch' module. 9 10 #include <Python.h> 11 12 // Undefine the copysign macro so that at::copysign works as intended with MSVC 13 // https://github.com/python/cpython/blob/c60394c7fc9cc09b16e9675a3eeb5844b6d8523f/PC/pyconfig.h#L196 14 #ifdef _MSC_VER 15 #undef copysign 16 #endif // _MSC_VER 17 18 #include "torch/csrc/autograd/python_torch_functions.h" 19 #include "torch/csrc/autograd/python_variable.h" 20 #include "torch/csrc/autograd/utils/wrap_outputs.h" 21 #include "torch/csrc/Dtype.h" 22 #include "torch/csrc/DynamicTypes.h" 23 #include "torch/csrc/Exceptions.h" 24 #include "torch/csrc/utils/out_types.h" 25 #include "torch/csrc/utils/pybind.h" 26 #include "torch/csrc/utils/pycfunction_helpers.h" 27 #include "torch/csrc/utils/python_arg_parser.h" 28 #include "torch/csrc/utils/tensor_layouts.h" 29 #include "torch/csrc/utils/tensor_new.h" 30 #include "torch/csrc/utils/tensor_numpy.h" 31 #include "torch/csrc/jit/frontend/tracer.h" 32 #include "torch/csrc/autograd/generated/variable_factories.h" 33 #include "torch/csrc/utils/structseq.h" 34 #include "torch/csrc/utils/device_lazy_init.h" 35 #include "torch/csrc/autograd/generated/python_return_types.h" 36 37 #include <ATen/core/Tensor.h> 38 39 #ifndef AT_PER_OPERATOR_HEADERS 40 #include <ATen/Functions.h> 41 #else 42 $ops_headers 43 #endif 44 45 #include <functional> 46 #include <initializer_list> 47 #include <stdexcept> 48 #include <utility> 49 50 using at::Tensor; 51 using at::Device; 52 using at::Layout; 53 using at::Scalar; 54 using at::ScalarType; 55 using at::Backend; 56 using at::OptionalDeviceGuard; 57 using at::DeviceGuard; 58 using at::TensorOptions; 59 using at::IntArrayRef; 60 using at::Generator; 61 using at::TensorList; 62 using at::Dimname; 63 using at::DimnameList; 64 using at::ArrayRef; 65 66 using torch::utils::check_out_type_matches; 67 using namespace torch::autograd::utils; 68 69 // NOTE: See [Sharded File] comment in VariableType 70 71 namespace torch::autograd { 72 73 // generated forward declarations start here 74 75 ${py_forwards} 76 77 static PyMethodDef torch_functions_shard[] = { 78 ${py_method_defs} 79 }; 80 81 void gatherTorchFunctions${shard_id}(std::vector<PyMethodDef> &torch_functions) { 82 constexpr size_t num_functions = sizeof(torch_functions_shard) / sizeof(torch_functions_shard[0]); 83 torch_functions.insert( 84 torch_functions.end(), 85 torch_functions_shard, 86 torch_functions_shard + num_functions); 87 } 88 89 // generated methods start here 90 91 ${py_methods} 92 93 } // namespace torch::autograd 94