xref: /aosp_15_r20/external/pytorch/torch/csrc/jit/backends/backend_debug_handler.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include <torch/csrc/jit/backends/backend_debug_handler.h>
2 
3 #include <stack>
4 
5 namespace torch {
6 namespace jit {
7 
8 std::atomic<DebugHandleType> BackendDebugInfoRecorder::unique_debug_handle_{0};
9 
getNextDebugHandle(const Node * node)10 int64_t BackendDebugInfoRecorder::getNextDebugHandle(const Node* node) {
11   InlinedCallStackPtr cs_ptr;
12   if (node->callstack().has_value()) {
13     cs_ptr = node->callstack().value();
14   } else {
15     cs_ptr = c10::intrusive_ptr<InlinedCallStack>();
16   }
17   DebugHandleType debug_handle = unique_debug_handle_;
18   const SourceRange& range = node->sourceRange();
19   handles_to_inlined_callstack_ptrs_[debug_handle] =
20       std::make_tuple(range, node->kind().toQualString(), cs_ptr);
21   // This increment is with seq memory order.
22   // Not trying to perf optimizing this for now.
23   unique_debug_handle_++;
24   return debug_handle;
25 }
26 
stopRecording()27 BackendDebugInfoMapType BackendDebugInfoRecorder::stopRecording() {
28   // Note that this is return by copy and since
29   // InlinedCallStackPtrs are intrusive ptr it will result in
30   // bump of refcount. Not performant, but this is not intented
31   // to be used in perf critical path.
32   // Alternate might be do move but that will be destructive
33   return handles_to_inlined_callstack_ptrs_;
34 }
35 
36 } // namespace jit
37 } // namespace torch
38