1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/devtools/etdump/etdump_flatcc.h>
10
11 #include <cstring>
12
13 #include <executorch/devtools/etdump/emitter.h>
14 #include <executorch/devtools/etdump/etdump_schema_flatcc_builder.h>
15 #include <executorch/devtools/etdump/etdump_schema_flatcc_reader.h>
16 #include <executorch/runtime/core/exec_aten/exec_aten.h>
17 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
18 #include <executorch/runtime/platform/assert.h>
19
20 #include <flatcc/flatcc_types.h>
21
22 using ::exec_aten::Tensor;
23 using ::executorch::runtime::AllocatorID;
24 using ::executorch::runtime::ArrayRef;
25 using ::executorch::runtime::ChainID;
26 using ::executorch::runtime::DebugHandle;
27 using ::executorch::runtime::DelegateDebugIdType;
28 using ::executorch::runtime::EValue;
29 using ::executorch::runtime::EventTracerEntry;
30 using ::executorch::runtime::LoggedEValueType;
31 using ::executorch::runtime::Span;
32 using ::executorch::runtime::Tag;
33
34 namespace executorch {
35 namespace etdump {
36
37 namespace {
38
get_flatbuffer_scalar_type(exec_aten::ScalarType tensor_scalar_type)39 executorch_flatbuffer_ScalarType_enum_t get_flatbuffer_scalar_type(
40 exec_aten::ScalarType tensor_scalar_type) {
41 switch (tensor_scalar_type) {
42 case exec_aten::ScalarType::Byte:
43 return executorch_flatbuffer_ScalarType_BYTE;
44 case exec_aten::ScalarType::Char:
45 return executorch_flatbuffer_ScalarType_CHAR;
46 case exec_aten::ScalarType::Short:
47 return executorch_flatbuffer_ScalarType_SHORT;
48 case exec_aten::ScalarType::Float:
49 return executorch_flatbuffer_ScalarType_FLOAT;
50 case exec_aten::ScalarType::Int:
51 return executorch_flatbuffer_ScalarType_INT;
52 case exec_aten::ScalarType::Long:
53 return executorch_flatbuffer_ScalarType_LONG;
54 case exec_aten::ScalarType::Double:
55 return executorch_flatbuffer_ScalarType_DOUBLE;
56 case exec_aten::ScalarType::Bool:
57 return executorch_flatbuffer_ScalarType_BOOL;
58 case exec_aten::ScalarType::Bits16:
59 return executorch_flatbuffer_ScalarType_BITS16;
60 case exec_aten::ScalarType::UInt16:
61 return executorch_flatbuffer_ScalarType_UINT16;
62 default:
63 ET_CHECK_MSG(
64 0,
65 "This ScalarType = %hhd is not yet supported in ETDump",
66 static_cast<char>(tensor_scalar_type));
67 }
68 }
69
add_tensor_entry(flatcc_builder_t * builder_,const exec_aten::Tensor & tensor,long offset)70 etdump_Tensor_ref_t add_tensor_entry(
71 flatcc_builder_t* builder_,
72 const exec_aten::Tensor& tensor,
73 long offset) {
74 etdump_Tensor_start(builder_);
75
76 etdump_Tensor_scalar_type_add(
77 builder_, get_flatbuffer_scalar_type(tensor.scalar_type()));
78 etdump_Tensor_sizes_start(builder_);
79
80 for (auto dim : tensor.sizes()) {
81 int64_t cast_dim = static_cast<int64_t>(dim);
82 etdump_Tensor_sizes_push(builder_, &cast_dim);
83 }
84 etdump_Tensor_sizes_end(builder_);
85
86 etdump_Tensor_strides_start(builder_);
87 for (auto dim : tensor.strides()) {
88 int64_t cast_dim = static_cast<int64_t>(dim);
89 etdump_Tensor_strides_push(builder_, &cast_dim);
90 }
91 etdump_Tensor_strides_end(builder_);
92 etdump_Tensor_offset_add(builder_, offset);
93
94 return etdump_Tensor_end(builder_);
95 }
96
alignPointer(void * ptr,size_t alignment)97 static uint8_t* alignPointer(void* ptr, size_t alignment) {
98 intptr_t addr = reinterpret_cast<intptr_t>(ptr);
99 if ((addr & (alignment - 1)) == 0) {
100 // Already aligned.
101 return reinterpret_cast<uint8_t*>(ptr);
102 }
103 addr = (addr | (alignment - 1)) + 1;
104 return reinterpret_cast<uint8_t*>(addr);
105 }
106
107 } // namespace
108
109 // Constructor implementation
ETDumpGen(Span<uint8_t> buffer)110 ETDumpGen::ETDumpGen(Span<uint8_t> buffer) {
111 constexpr size_t max_alloc_buf_size = 128 * 1024;
112
113 // Initialize the flatcc builder_ using the buffer and buffer size.
114
115 if (buffer.data() != nullptr) {
116 builder_ = (struct flatcc_builder*)alignPointer(buffer.data(), 64);
117 uintptr_t buffer_with_builder =
118 (uintptr_t)alignPointer(builder_ + sizeof(struct flatcc_builder), 64);
119 size_t buffer_size = buffer.size() -
120 (size_t)(buffer_with_builder - (uintptr_t)buffer.data());
121 alloc_.set_buffer(
122 (uint8_t*)buffer_with_builder,
123 buffer_size,
124 (size_t)((buffer_size / 4 > max_alloc_buf_size) ? max_alloc_buf_size
125 : buffer_size / 4));
126 internal::etdump_flatcc_custom_init(builder_, &alloc_);
127 } else {
128 builder_ = (struct flatcc_builder*)malloc(sizeof(struct flatcc_builder));
129 ET_CHECK_MSG(
130 builder_ != nullptr, "Failed to allocate memory for flatcc builder_.");
131 flatcc_builder_init(builder_);
132 }
133 reset();
134 }
135
~ETDumpGen()136 ETDumpGen::~ETDumpGen() {
137 flatcc_builder_clear(builder_);
138 if (!is_static_etdump()) {
139 free(builder_);
140 }
141 }
142
reset()143 void ETDumpGen::reset() {
144 state_ = State::Init;
145 num_blocks_ = 0;
146 flatcc_builder_reset(builder_);
147 flatbuffers_buffer_start(builder_, etdump_ETDump_file_identifier);
148 etdump_ETDump_start_as_root_with_size(builder_);
149 etdump_ETDump_version_add(builder_, ETDUMP_VERSION);
150 etdump_ETDump_run_data_start(builder_);
151 etdump_ETDump_run_data_push_start(builder_);
152 }
153
create_event_block(const char * name)154 void ETDumpGen::create_event_block(const char* name) {
155 if (state_ == State::AddingEvents) {
156 etdump_RunData_events_end(builder_);
157 } else if (state_ == State::Done) {
158 reset();
159 }
160 if (num_blocks_ > 0) {
161 etdump_ETDump_run_data_push_end(builder_);
162 etdump_ETDump_run_data_push_start(builder_);
163 }
164 ++num_blocks_;
165 etdump_RunData_name_create_strn(builder_, name, strlen(name));
166 if (bundled_input_index_ != -1) {
167 etdump_RunData_bundled_input_index_add(builder_, bundled_input_index_);
168 }
169 state_ = State::BlockCreated;
170 }
171
create_string_entry(const char * name)172 int64_t ETDumpGen::create_string_entry(const char* name) {
173 return flatbuffers_string_create_str(builder_, name);
174 }
175
176 // ETDumpGen has the following possible states, ETDumpGen_Init,
177 // ETDumpGen_Block_Created, ETDumpGen_Adding_Allocators,
178 // ETDumpGen_Adding_Events. Right after boot-up the state of ETDump will be
179 // ETDumpGen_Init. At this point we have an option of adding allocators that
180 // we want to track. Once we've completed adding the allocators we want to track
181 // we will close the allocators table and move ETDumpGen to the
182 // ETDumpGen_Adding_Events state. After this point we can start adding events to
183 // ETDump as we wish.
184 // The reason we need to maintain this state machine inside of ETDumpGen is
185 // because, once a table of one type has been closed and another table of a
186 // different type is opened after it we cannot open another table of the first
187 // type again. In this case once we close the allocators table and start pushing
188 // to the events table we cannot push to the allocators table again.
check_ready_to_add_events()189 void ETDumpGen::check_ready_to_add_events() {
190 if (state_ != State::AddingEvents) {
191 ET_CHECK_MSG(
192 (state_ == State::AddingAllocators || state_ == State::BlockCreated),
193 "ETDumpGen in an invalid state. Cannot add new events now.");
194 if (state_ == State::AddingAllocators) {
195 etdump_RunData_allocators_end(builder_);
196 }
197 etdump_RunData_events_start(builder_);
198 state_ = State::AddingEvents;
199 }
200 }
201
start_profiling(const char * name,ChainID chain_id,DebugHandle debug_handle)202 EventTracerEntry ETDumpGen::start_profiling(
203 const char* name,
204 ChainID chain_id,
205 DebugHandle debug_handle) {
206 EventTracerEntry prof_entry;
207 prof_entry.event_id = name != nullptr ? create_string_entry(name) : -1;
208 prof_entry.delegate_event_id_type = DelegateDebugIdType::kNone;
209
210 if (chain_id == -1) {
211 prof_entry.chain_id = chain_id_;
212 prof_entry.debug_handle = debug_handle_;
213 } else {
214 prof_entry.chain_id = chain_id;
215 prof_entry.debug_handle = debug_handle;
216 }
217 prof_entry.start_time = et_pal_current_ticks();
218 return prof_entry;
219 }
220
221 // TODO: Update all occurrences of the ProfileEvent calls once the
222 // EventTracerEntry struct is updated.
start_profiling_delegate(const char * name,DebugHandle delegate_debug_index)223 EventTracerEntry ETDumpGen::start_profiling_delegate(
224 const char* name,
225 DebugHandle delegate_debug_index) {
226 ET_CHECK_MSG(
227 (name == nullptr) ^ (delegate_debug_index == -1),
228 "Only name or delegate_debug_index can be valid. Check DelegateMappingBuilder documentation for more details.");
229 check_ready_to_add_events();
230 EventTracerEntry prof_entry;
231 DelegateDebugIdType delegate_event_id_type =
232 name == nullptr ? DelegateDebugIdType::kInt : DelegateDebugIdType::kStr;
233 prof_entry.delegate_event_id_type = delegate_event_id_type;
234 prof_entry.chain_id = chain_id_;
235 prof_entry.debug_handle = debug_handle_;
236 prof_entry.event_id = delegate_debug_index == static_cast<unsigned int>(-1)
237 ? create_string_entry(name)
238 : delegate_debug_index;
239 prof_entry.start_time = et_pal_current_ticks();
240 return prof_entry;
241 }
242
end_profiling_delegate(EventTracerEntry event_tracer_entry,const void * metadata,size_t metadata_len)243 void ETDumpGen::end_profiling_delegate(
244 EventTracerEntry event_tracer_entry,
245 const void* metadata,
246 size_t metadata_len) {
247 et_timestamp_t end_time = et_pal_current_ticks();
248 check_ready_to_add_events();
249
250 // Start building the ProfileEvent entry.
251 etdump_ProfileEvent_start(builder_);
252 etdump_ProfileEvent_start_time_add(builder_, event_tracer_entry.start_time);
253 etdump_ProfileEvent_end_time_add(builder_, end_time);
254 etdump_ProfileEvent_chain_index_add(builder_, chain_id_);
255 etdump_ProfileEvent_instruction_id_add(builder_, debug_handle_);
256 // Delegate debug identifier can either be of a string type or an integer
257 // type. If it's a string type then it's a value of type
258 // flatbuffers_string_ref_t type, whereas if it's an integer type then we
259 // write the integer value directly.
260 if (event_tracer_entry.delegate_event_id_type == DelegateDebugIdType::kInt) {
261 etdump_ProfileEvent_delegate_debug_id_int_add(
262 builder_, event_tracer_entry.event_id);
263 } else {
264 etdump_ProfileEvent_delegate_debug_id_str_add(
265 builder_, event_tracer_entry.event_id);
266 }
267 flatbuffers_uint8_vec_ref_t vec_ref = flatbuffers_uint8_vec_create_pe(
268 builder_, (const uint8_t*)metadata, metadata_len);
269 etdump_ProfileEvent_delegate_debug_metadata_add(builder_, vec_ref);
270 etdump_ProfileEvent_ref_t id = etdump_ProfileEvent_end(builder_);
271 etdump_RunData_events_push_start(builder_);
272 etdump_Event_profile_event_add(builder_, id);
273 etdump_RunData_events_push_end(builder_);
274 }
275
log_profiling_delegate(const char * name,DebugHandle delegate_debug_index,et_timestamp_t start_time,et_timestamp_t end_time,const void * metadata,size_t metadata_len)276 void ETDumpGen::log_profiling_delegate(
277 const char* name,
278 DebugHandle delegate_debug_index,
279 et_timestamp_t start_time,
280 et_timestamp_t end_time,
281 const void* metadata,
282 size_t metadata_len) {
283 ET_CHECK_MSG(
284 (name == nullptr) ^ (delegate_debug_index == -1),
285 "Only name or delegate_debug_index can be valid. Check DelegateMappingBuilder documentation for more details.");
286 check_ready_to_add_events();
287 int64_t string_id = name != nullptr ? create_string_entry(name) : -1;
288 etdump_ProfileEvent_start(builder_);
289 etdump_ProfileEvent_start_time_add(builder_, start_time);
290 etdump_ProfileEvent_end_time_add(builder_, end_time);
291 etdump_ProfileEvent_chain_index_add(builder_, chain_id_);
292 etdump_ProfileEvent_instruction_id_add(builder_, debug_handle_);
293 if (string_id == -1) {
294 etdump_ProfileEvent_delegate_debug_id_int_add(
295 builder_, delegate_debug_index);
296 } else {
297 etdump_ProfileEvent_delegate_debug_id_str_add(builder_, string_id);
298 }
299 flatbuffers_uint8_vec_ref_t vec_ref = flatbuffers_uint8_vec_create_pe(
300 builder_, (const uint8_t*)metadata, metadata_len);
301 etdump_ProfileEvent_delegate_debug_metadata_add(builder_, vec_ref);
302 etdump_ProfileEvent_ref_t id = etdump_ProfileEvent_end(builder_);
303 etdump_RunData_events_push_start(builder_);
304 etdump_Event_profile_event_add(builder_, id);
305 etdump_RunData_events_push_end(builder_);
306 }
307
log_intermediate_output_delegate(const char * name,DebugHandle delegate_debug_index,const Tensor & output)308 void ETDumpGen::log_intermediate_output_delegate(
309 const char* name,
310 DebugHandle delegate_debug_index,
311 const Tensor& output) {
312 log_intermediate_output_delegate_helper(name, delegate_debug_index, output);
313 }
314
log_intermediate_output_delegate(const char * name,DebugHandle delegate_debug_index,const ArrayRef<Tensor> output)315 void ETDumpGen::log_intermediate_output_delegate(
316 const char* name,
317 DebugHandle delegate_debug_index,
318 const ArrayRef<Tensor> output) {
319 log_intermediate_output_delegate_helper(name, delegate_debug_index, output);
320 }
321
log_intermediate_output_delegate(const char * name,DebugHandle delegate_debug_index,const int & output)322 void ETDumpGen::log_intermediate_output_delegate(
323 const char* name,
324 DebugHandle delegate_debug_index,
325 const int& output) {
326 log_intermediate_output_delegate_helper(name, delegate_debug_index, output);
327 }
328
log_intermediate_output_delegate(const char * name,DebugHandle delegate_debug_index,const bool & output)329 void ETDumpGen::log_intermediate_output_delegate(
330 const char* name,
331 DebugHandle delegate_debug_index,
332 const bool& output) {
333 log_intermediate_output_delegate_helper(name, delegate_debug_index, output);
334 }
335
log_intermediate_output_delegate(const char * name,DebugHandle delegate_debug_index,const double & output)336 void ETDumpGen::log_intermediate_output_delegate(
337 const char* name,
338 DebugHandle delegate_debug_index,
339 const double& output) {
340 log_intermediate_output_delegate_helper(name, delegate_debug_index, output);
341 }
342
343 template <typename T>
log_intermediate_output_delegate_helper(const char * name,DebugHandle delegate_debug_index,const T & output)344 void ETDumpGen::log_intermediate_output_delegate_helper(
345 const char* name,
346 DebugHandle delegate_debug_index,
347 const T& output) {
348 ET_CHECK_MSG(
349 (name == nullptr) ^ (delegate_debug_index == -1),
350 "Only name or delegate_debug_index can be valid. Check DelegateMappingBuilder documentation for more details.");
351 if (debug_buffer_.empty()) {
352 ET_CHECK_MSG(0, "Must pre-set debug buffer with set_debug_buffer()\n");
353 return;
354 }
355
356 check_ready_to_add_events();
357 int64_t string_id = name != nullptr ? create_string_entry(name) : -1;
358
359 etdump_DebugEvent_start(builder_);
360
361 etdump_DebugEvent_chain_index_add(builder_, chain_id_);
362 etdump_DebugEvent_instruction_id_add(builder_, debug_handle_);
363 if (string_id == -1) {
364 etdump_DebugEvent_delegate_debug_id_int_add(builder_, delegate_debug_index);
365 } else {
366 etdump_DebugEvent_delegate_debug_id_str_add(builder_, string_id);
367 }
368
369 // Check the type of `output` then call the corresponding logging functions
370 if constexpr (std::is_same<T, Tensor>::value) {
371 long offset = copy_tensor_to_debug_buffer(output);
372 etdump_Tensor_ref_t tensor_ref = add_tensor_entry(builder_, output, offset);
373
374 etdump_Value_start(builder_);
375 etdump_Value_val_add(builder_, etdump_ValueType_Tensor);
376 etdump_Value_tensor_add(builder_, tensor_ref);
377
378 } else if constexpr (std::is_same<T, ArrayRef<Tensor>>::value) {
379 etdump_Tensor_vec_start(builder_);
380 for (size_t i = 0; i < output.size(); ++i) {
381 long offset = copy_tensor_to_debug_buffer(output[i]);
382 etdump_Tensor_vec_push(
383 builder_, add_tensor_entry(builder_, output[i], offset));
384 }
385 etdump_Tensor_vec_ref_t tensor_vec_ref = etdump_Tensor_vec_end(builder_);
386 etdump_TensorList_ref_t tensor_list_ref =
387 etdump_TensorList_create(builder_, tensor_vec_ref);
388
389 etdump_Value_start(builder_);
390 etdump_Value_val_add(builder_, etdump_ValueType_TensorList);
391 etdump_Value_tensor_list_add(builder_, tensor_list_ref);
392 } else if constexpr (std::is_same<T, int>::value) {
393 auto int_ref = etdump_Int_create(builder_, output);
394
395 etdump_Value_start(builder_);
396 etdump_Value_val_add(builder_, etdump_ValueType_Int);
397 etdump_Value_int_value_add(builder_, int_ref);
398 } else if constexpr (std::is_same<T, double>::value) {
399 auto double_ref = etdump_Double_create(builder_, output);
400
401 etdump_Value_start(builder_);
402 etdump_Value_double_value_add(builder_, double_ref);
403 etdump_Value_val_add(builder_, etdump_ValueType_Double);
404 } else if constexpr (std::is_same<T, bool>::value) {
405 flatbuffers_bool_t flatbuffer_bool_val =
406 output ? FLATBUFFERS_TRUE : FLATBUFFERS_FALSE;
407 auto bool_ref = etdump_Bool_create(builder_, flatbuffer_bool_val);
408
409 etdump_Value_start(builder_);
410 etdump_Value_bool_value_add(builder_, bool_ref);
411 etdump_Value_val_add(builder_, etdump_ValueType_Bool);
412 } else {
413 ET_CHECK_MSG(0, "Unsupported output type for intermediate logging\n");
414 }
415
416 auto value_ref = etdump_Value_end(builder_);
417 etdump_DebugEvent_debug_entry_add(builder_, value_ref);
418
419 etdump_DebugEvent_ref_t debug_event = etdump_DebugEvent_end(builder_);
420
421 etdump_RunData_events_push_start(builder_);
422 etdump_Event_debug_event_add(builder_, debug_event);
423 etdump_RunData_events_push_end(builder_);
424 }
425
end_profiling(EventTracerEntry prof_entry)426 void ETDumpGen::end_profiling(EventTracerEntry prof_entry) {
427 et_timestamp_t end_time = et_pal_current_ticks();
428 ET_CHECK_MSG(
429 prof_entry.delegate_event_id_type == DelegateDebugIdType::kNone,
430 "Delegate events must use end_profiling_delegate to mark the end of a delegate profiling event.");
431 check_ready_to_add_events();
432
433 etdump_ProfileEvent_start(builder_);
434 etdump_ProfileEvent_start_time_add(builder_, prof_entry.start_time);
435 etdump_ProfileEvent_end_time_add(builder_, end_time);
436 etdump_ProfileEvent_chain_index_add(builder_, prof_entry.chain_id);
437 etdump_ProfileEvent_instruction_id_add(builder_, prof_entry.debug_handle);
438 if (prof_entry.event_id != -1) {
439 etdump_ProfileEvent_name_add(builder_, prof_entry.event_id);
440 }
441 etdump_ProfileEvent_ref_t id = etdump_ProfileEvent_end(builder_);
442 etdump_RunData_events_push_start(builder_);
443 etdump_Event_profile_event_add(builder_, id);
444 etdump_RunData_events_push_end(builder_);
445 }
446
track_allocator(const char * name)447 AllocatorID ETDumpGen::track_allocator(const char* name) {
448 ET_CHECK_MSG(
449 (state_ == State::BlockCreated || state_ == State::AddingAllocators),
450 "Allocators can only be added immediately after a new block is created and before any events are added.");
451 if (state_ != State::AddingAllocators) {
452 etdump_RunData_allocators_start(builder_);
453 state_ = State::AddingAllocators;
454 }
455 flatbuffers_string_ref_t ref = create_string_entry(name);
456 etdump_RunData_allocators_push_create(builder_, ref);
457 return etdump_RunData_allocators_reserved_len(builder_);
458 }
459
track_allocation(AllocatorID allocator_id,size_t allocation_size)460 void ETDumpGen::track_allocation(
461 AllocatorID allocator_id,
462 size_t allocation_size) {
463 check_ready_to_add_events();
464
465 etdump_RunData_events_push_start(builder_);
466 etdump_Event_allocation_event_create(builder_, allocator_id, allocation_size);
467 etdump_RunData_events_push_end(builder_);
468 }
469
get_etdump_data()470 ETDumpResult ETDumpGen::get_etdump_data() {
471 ETDumpResult result;
472 if (state_ == State::AddingEvents) {
473 etdump_RunData_events_end(builder_);
474 } else if (state_ == State::AddingAllocators) {
475 etdump_RunData_allocators_end(builder_);
476 } else if (state_ == State::Init) {
477 result.buf = nullptr;
478 result.size = 0;
479 return result;
480 }
481 etdump_ETDump_run_data_push_end(builder_);
482 etdump_ETDump_run_data_end(builder_);
483 etdump_ETDump_ref_t root = etdump_ETDump_end(builder_);
484 flatbuffers_buffer_end(builder_, root);
485 if (num_blocks_ == 0) {
486 result = {nullptr, 0};
487 } else {
488 if (alloc_.data) {
489 result.buf = alloc_.front_cursor;
490 result.size = alloc_.out_size - alloc_.front_left;
491 } else {
492 result.buf =
493 flatcc_builder_finalize_aligned_buffer(builder_, &result.size);
494 }
495 }
496 state_ = State::Done;
497 return result;
498 }
499
set_debug_buffer(Span<uint8_t> buffer)500 void ETDumpGen::set_debug_buffer(Span<uint8_t> buffer) {
501 debug_buffer_ = buffer;
502 }
503
copy_tensor_to_debug_buffer(exec_aten::Tensor tensor)504 size_t ETDumpGen::copy_tensor_to_debug_buffer(exec_aten::Tensor tensor) {
505 if (tensor.nbytes() == 0) {
506 return static_cast<size_t>(-1);
507 }
508 uint8_t* offset_ptr =
509 alignPointer(debug_buffer_.data() + debug_buffer_offset_, 64);
510 debug_buffer_offset_ = (offset_ptr - debug_buffer_.data()) + tensor.nbytes();
511 ET_CHECK_MSG(
512 debug_buffer_offset_ <= debug_buffer_.size(),
513 "Ran out of space to store intermediate outputs.");
514 memcpy(offset_ptr, tensor.const_data_ptr(), tensor.nbytes());
515 return (size_t)(offset_ptr - debug_buffer_.data());
516 }
517
log_evalue(const EValue & evalue,LoggedEValueType evalue_type)518 void ETDumpGen::log_evalue(const EValue& evalue, LoggedEValueType evalue_type) {
519 if (debug_buffer_.empty()) {
520 return;
521 }
522
523 check_ready_to_add_events();
524
525 etdump_DebugEvent_start(builder_);
526
527 etdump_DebugEvent_chain_index_add(builder_, chain_id_);
528 etdump_DebugEvent_instruction_id_add(builder_, debug_handle_);
529
530 switch (evalue.tag) {
531 case Tag::Tensor: {
532 exec_aten::Tensor tensor = evalue.toTensor();
533 long offset = copy_tensor_to_debug_buffer(tensor);
534 etdump_Tensor_ref_t tensor_ref =
535 add_tensor_entry(builder_, tensor, offset);
536
537 etdump_Value_start(builder_);
538 etdump_Value_val_add(builder_, etdump_ValueType_Tensor);
539 etdump_Value_tensor_add(builder_, tensor_ref);
540 if (evalue_type == LoggedEValueType::kProgramOutput) {
541 auto bool_ref = etdump_Bool_create(builder_, FLATBUFFERS_TRUE);
542 etdump_Value_output_add(builder_, bool_ref);
543 }
544 auto value_ref = etdump_Value_end(builder_);
545
546 etdump_DebugEvent_debug_entry_add(builder_, value_ref);
547 break;
548 }
549
550 case Tag::ListTensor: {
551 exec_aten::ArrayRef<exec_aten::Tensor> tensors = evalue.toTensorList();
552 etdump_Tensor_vec_start(builder_);
553 for (size_t i = 0; i < tensors.size(); ++i) {
554 long offset = copy_tensor_to_debug_buffer(tensors[i]);
555 etdump_Tensor_vec_push(
556 builder_, add_tensor_entry(builder_, tensors[i], offset));
557 }
558 etdump_Tensor_vec_ref_t tensor_vec_ref = etdump_Tensor_vec_end(builder_);
559 etdump_TensorList_ref_t tensor_list_ref =
560 etdump_TensorList_create(builder_, tensor_vec_ref);
561
562 etdump_Value_start(builder_);
563 etdump_Value_val_add(builder_, etdump_ValueType_TensorList);
564 etdump_Value_tensor_list_add(builder_, tensor_list_ref);
565 if (evalue_type == LoggedEValueType::kProgramOutput) {
566 auto bool_ref = etdump_Bool_create(builder_, FLATBUFFERS_TRUE);
567 etdump_Value_output_add(builder_, bool_ref);
568 }
569 auto value_ref = etdump_Value_end(builder_);
570
571 etdump_DebugEvent_debug_entry_add(builder_, value_ref);
572 break;
573 }
574
575 case Tag::Int: {
576 int64_t val = evalue.toInt();
577 auto int_ref = etdump_Int_create(builder_, val);
578
579 etdump_Value_start(builder_);
580 etdump_Value_val_add(builder_, etdump_ValueType_Int);
581 etdump_Value_int_value_add(builder_, int_ref);
582 auto value_ref = etdump_Value_end(builder_);
583 etdump_DebugEvent_debug_entry_add(builder_, value_ref);
584
585 break;
586 }
587
588 case Tag::Double: {
589 double val = evalue.toDouble();
590 auto double_ref = etdump_Double_create(builder_, val);
591
592 etdump_Value_start(builder_);
593 etdump_Value_double_value_add(builder_, double_ref);
594 etdump_Value_val_add(builder_, etdump_ValueType_Double);
595 auto value_ref = etdump_Value_end(builder_);
596 etdump_DebugEvent_debug_entry_add(builder_, value_ref);
597
598 break;
599 }
600
601 case Tag::Bool: {
602 flatbuffers_bool_t flatbuffer_bool_val =
603 evalue.toBool() ? FLATBUFFERS_TRUE : FLATBUFFERS_FALSE;
604 auto bool_ref = etdump_Bool_create(builder_, flatbuffer_bool_val);
605
606 etdump_Value_start(builder_);
607 etdump_Value_bool_value_add(builder_, bool_ref);
608 etdump_Value_val_add(builder_, etdump_ValueType_Bool);
609 auto value_ref = etdump_Value_end(builder_);
610 etdump_DebugEvent_debug_entry_add(builder_, value_ref);
611
612 break;
613 }
614
615 default:
616 ET_CHECK_MSG(
617 0,
618 "This EValue type = %d is not yet supported for logging\n",
619 static_cast<int>(evalue.tag));
620 break;
621 }
622
623 etdump_DebugEvent_ref_t debug_event = etdump_DebugEvent_end(builder_);
624
625 etdump_RunData_events_push_start(builder_);
626 etdump_Event_debug_event_add(builder_, debug_event);
627 etdump_RunData_events_push_end(builder_);
628 }
629
get_num_blocks()630 size_t ETDumpGen::get_num_blocks() {
631 return num_blocks_;
632 }
633
is_static_etdump()634 bool ETDumpGen::is_static_etdump() {
635 return alloc_.data != nullptr;
636 }
637
638 } // namespace etdump
639 } // namespace executorch
640