xref: /aosp_15_r20/external/pytorch/torch/csrc/cuda/memory_snapshot.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 
3 #include <torch/csrc/Export.h>
4 #include <cstdint>
5 #include <optional>
6 #include <string>
7 
8 namespace torch::cuda {
9 
10 // C++-only versions of these, for python use
11 // those defined in cuda/Module.cpp which also record python state.
12 TORCH_CUDA_CU_API void _record_memory_history(
13     bool enabled,
14     bool record_context = true,
15     int64_t trace_alloc_max_entries = 1,
16     bool trace_alloc_record_context = false,
17     bool record_cpp_context = false);
18 
19 TORCH_CUDA_CU_API void _record_memory_history(
20     std::optional<std::string> enabled = "all",
21     std::optional<std::string> context = "all",
22     const std::string& stacks = "all",
23     size_t max_entries = SIZE_MAX);
24 
25 TORCH_CUDA_CU_API std::string _memory_snapshot_pickled();
26 
27 } // namespace torch::cuda
28