1 #pragma once 2 3 // ${generated_comment} 4 5 #include <ATen/core/Tensor.h> 6 #include <ATen/Context.h> 7 8 #include <c10/util/intrusive_ptr.h> 9 10 #include <torch/csrc/Export.h> 11 #include <torch/csrc/autograd/autograd_not_implemented_fallback.h> 12 13 #include <cstdint> // for size_t 14 #include <functional> // for function 15 #include <memory> // for unique_ptr 16 #include <string> 17 #include <vector> 18 19 namespace at { 20 struct Quantizer; 21 }; 22 23 namespace torch { namespace autograd { 24 25 using Variable = at::Tensor; 26 using at::Context; 27 using at::Device; 28 using at::Dimname; 29 using at::DimnameList; 30 using at::Generator; 31 using at::IntArrayRef; 32 using at::MemoryFormat; 33 using at::QScheme; 34 using at::Scalar; 35 using at::ScalarType; 36 using at::Storage; 37 using at::Tensor; 38 using at::TensorList; 39 using at::TensorOptions; 40 using at::Quantizer; 41 // This is temporary typedef to enable Quantizer in aten native function API 42 // we'll remove them when we are actually exposing Quantizer class 43 // to frontend 44 using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&; 45 using std::optional; 46 47 namespace VariableType { 48 TORCH_API std::vector<at::DeprecatedTypeProperties*> allCUDATypes(); 49 TORCH_API std::vector<at::DeprecatedTypeProperties*> allXPUTypes(); 50 TORCH_API std::vector<at::DeprecatedTypeProperties*> allCPUTypes(); 51 TORCH_API std::vector<at::DeprecatedTypeProperties*> allPrivateUser1Types(); 52 53 at::Tensor & unpack(Tensor & t, const char * name, int pos); 54 const at::Tensor & unpack(const Tensor & t, const char * name, int pos); 55 at::Tensor unpack_opt(const Tensor & t, const char * name, int pos); 56 std::vector<at::Tensor> unpack(const at::ITensorListRef& tl, const char *name, int pos); 57 }; 58 59 }} // namespace torch::autograd 60