xref: /aosp_15_r20/external/pytorch/torch/csrc/cuda/MemPool.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include <torch/csrc/python_headers.h>
2 
3 #include <torch/csrc/jit/python/pybind_utils.h>
4 #include <torch/csrc/utils/pybind.h>
5 
6 #include <c10/cuda/CUDACachingAllocator.h>
7 
8 template <typename T>
9 using shared_ptr_class_ = py::class_<T, std::shared_ptr<T>>;
10 
THCPMemPool_init(PyObject * module)11 void THCPMemPool_init(PyObject* module) {
12   auto torch_C_m = py::handle(module).cast<py::module>();
13   shared_ptr_class_<::c10::cuda::MemPool>(torch_C_m, "_MemPool")
14       .def(py::init<c10::cuda::CUDACachingAllocator::CUDAAllocator*, bool>())
15       .def_property_readonly("id", &::c10::cuda::MemPool::id)
16       .def_property_readonly("allocator", &::c10::cuda::MemPool::allocator);
17   shared_ptr_class_<::c10::cuda::MemPoolContext>(torch_C_m, "_MemPoolContext")
18       .def(py::init<c10::cuda::MemPool*>())
19       .def_static(
20           "active_pool", &::c10::cuda::MemPoolContext::getActiveMemPool);
21 }
22