xref: /aosp_15_r20/external/mesa3d/src/freedreno/vulkan/tu_rmv.cc (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2024 Igalia S.L.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "tu_rmv.h"
7 
8 #include "tu_buffer.h"
9 #include "tu_cmd_buffer.h"
10 #include "tu_cs.h"
11 #include "tu_device.h"
12 #include "tu_event.h"
13 #include "tu_image.h"
14 #include "tu_query_pool.h"
15 
16 #include <cstdio>
17 
18 static VkResult
capture_trace(VkQueue _queue)19 capture_trace(VkQueue _queue)
20 {
21    VK_FROM_HANDLE(tu_queue, queue, _queue);
22    struct tu_device *device = queue->device;
23    assert(device->vk.memory_trace_data.is_enabled);
24 
25    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
26 
27    vk_dump_rmv_capture(&queue->device->vk.memory_trace_data);
28 
29    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
30    return VK_SUCCESS;
31 }
32 
33 static void
tu_rmv_fill_device_info(struct tu_device * device,struct vk_rmv_device_info * info)34 tu_rmv_fill_device_info(struct tu_device *device,
35                         struct vk_rmv_device_info *info)
36 {
37    struct tu_physical_device *physical_device = device->physical_device;
38 
39    /* Turnip backends only set up a single device-local heap. When available,
40     * the kernel-provided VA range is used, otherwise we fall back to that
41     * heap's calculated size.
42     */
43    struct vk_rmv_memory_info *device_local_memory_info =
44       &info->memory_infos[VK_RMV_MEMORY_LOCATION_DEVICE];
45    if (physical_device->has_set_iova) {
46       *device_local_memory_info = {
47          .size = physical_device->va_size,
48          .physical_base_address = physical_device->va_start,
49       };
50    } else {
51       *device_local_memory_info = {
52          .size = physical_device->heap.size, .physical_base_address = 0,
53       };
54    }
55 
56    info->memory_infos[VK_RMV_MEMORY_LOCATION_DEVICE_INVISIBLE] = {
57       .size = 0, .physical_base_address = 0,
58    };
59    info->memory_infos[VK_RMV_MEMORY_LOCATION_HOST] = {
60       .size = 0, .physical_base_address = 0,
61    };
62 
63    /* No PCI-e information to provide. Instead, we can include the device's
64     * chip ID in the device name string.
65     */
66    snprintf(info->device_name, sizeof(info->device_name), "%s (0x%" PRIx64 ")",
67       physical_device->name, physical_device->dev_id.chip_id);
68    info->pcie_family_id = info->pcie_revision_id = info->pcie_device_id = 0;
69 
70    /* TODO: provide relevant information here. */
71    info->vram_type = VK_RMV_MEMORY_TYPE_LPDDR5;
72    info->vram_operations_per_clock = info->vram_bus_width = info->vram_bandwidth = 1;
73    info->minimum_shader_clock = info->minimum_memory_clock = 0;
74    info->maximum_shader_clock = info->maximum_memory_clock = 1;
75 }
76 
77 void
tu_memory_trace_init(struct tu_device * device)78 tu_memory_trace_init(struct tu_device *device)
79 {
80    struct vk_rmv_device_info info;
81    memset(&info, 0, sizeof(info));
82    tu_rmv_fill_device_info(device, &info);
83 
84    vk_memory_trace_init(&device->vk, &info);
85    if (!device->vk.memory_trace_data.is_enabled)
86       return;
87 
88    device->vk.capture_trace = capture_trace;
89 }
90 
91 void
tu_memory_trace_finish(struct tu_device * device)92 tu_memory_trace_finish(struct tu_device *device)
93 {
94    vk_memory_trace_finish(&device->vk);
95 }
96 
97 static inline uint32_t
tu_rmv_get_resource_id_locked(struct tu_device * device,const void * resource)98 tu_rmv_get_resource_id_locked(struct tu_device *device, const void *resource)
99 {
100    return vk_rmv_get_resource_id_locked(&device->vk, (uint64_t) resource);
101 }
102 
103 static inline void
tu_rmv_destroy_resource_id_locked(struct tu_device * device,const void * resource)104 tu_rmv_destroy_resource_id_locked(struct tu_device *device,
105                                   const void *resource)
106 {
107    vk_rmv_destroy_resource_id_locked(&device->vk, (uint64_t) resource);
108 }
109 
110 static inline void
tu_rmv_emit_resource_bind_locked(struct tu_device * device,uint32_t resource_id,uint64_t address,uint64_t size)111 tu_rmv_emit_resource_bind_locked(struct tu_device *device, uint32_t resource_id,
112                                  uint64_t address, uint64_t size)
113 {
114    struct vk_rmv_resource_bind_token token = {
115       .address = address,
116       .size = size,
117       .is_system_memory = false,
118       .resource_id = resource_id,
119    };
120    vk_rmv_emit_token(&device->vk.memory_trace_data,
121                      VK_RMV_TOKEN_TYPE_RESOURCE_BIND, &token);
122 }
123 
124 static inline void
tu_rmv_emit_cpu_map_locked(struct tu_device * device,uint64_t address,bool unmapped)125 tu_rmv_emit_cpu_map_locked(struct tu_device *device, uint64_t address,
126                            bool unmapped)
127 {
128    struct vk_rmv_cpu_map_token token = {
129       .address = address,
130       .unmapped = unmapped,
131    };
132    vk_rmv_emit_token(&device->vk.memory_trace_data,
133                      VK_RMV_TOKEN_TYPE_CPU_MAP, &token);
134 }
135 
136 static inline void
tu_rmv_emit_page_table_update_locked(struct tu_device * device,struct tu_bo * bo,bool is_unmap)137 tu_rmv_emit_page_table_update_locked(struct tu_device *device, struct tu_bo *bo,
138                                      bool is_unmap)
139 {
140    /* These tokens are mainly useful for RMV to properly associate buffer
141     * allocations and deallocations to a specific memory domain.
142     */
143    struct vk_rmv_page_table_update_token token = {
144       .virtual_address = bo->iova,
145       .physical_address = bo->iova,
146       .page_count = DIV_ROUND_UP(bo->size, 4096),
147       .page_size = 4096,
148       .pid = 0,
149       .is_unmap = is_unmap,
150       .type = VK_RMV_PAGE_TABLE_UPDATE_TYPE_UPDATE,
151    };
152    vk_rmv_emit_token(&device->vk.memory_trace_data,
153                      VK_RMV_TOKEN_TYPE_PAGE_TABLE_UPDATE, &token);
154 }
155 
156 void
tu_rmv_log_heap_create(struct tu_device * device,const VkMemoryAllocateInfo * allocate_info,struct tu_device_memory * device_memory)157 tu_rmv_log_heap_create(struct tu_device *device,
158                        const VkMemoryAllocateInfo *allocate_info,
159                        struct tu_device_memory *device_memory)
160 {
161    const VkMemoryAllocateFlagsInfo *flags_info = vk_find_struct_const(
162       allocate_info->pNext, MEMORY_ALLOCATE_FLAGS_INFO);
163 
164    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
165 
166    struct vk_rmv_resource_create_token token = {
167       .resource_id = tu_rmv_get_resource_id_locked(device, device_memory),
168       .is_driver_internal = false,
169       .type = VK_RMV_RESOURCE_TYPE_HEAP,
170       .heap = {
171          .alloc_flags = flags_info ? flags_info->flags : 0,
172          .size = device_memory->bo->size,
173          .alignment = 4096,
174          .heap_index = VK_RMV_MEMORY_LOCATION_DEVICE,
175       },
176    };
177    vk_rmv_emit_token(&device->vk.memory_trace_data,
178                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
179 
180    tu_rmv_emit_resource_bind_locked(device, token.resource_id,
181                                     device_memory->bo->iova,
182                                     device_memory->bo->size);
183 
184    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
185 }
186 
187 void
tu_rmv_log_bo_allocate(struct tu_device * device,struct tu_bo * bo)188 tu_rmv_log_bo_allocate(struct tu_device *device, struct tu_bo *bo)
189 {
190    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
191 
192    tu_rmv_emit_page_table_update_locked(device, bo, false);
193 
194    struct vk_rmv_virtual_allocate_token virtual_allocate_token = {
195       .page_count = DIV_ROUND_UP(bo->size, 4096),
196       .is_driver_internal = false,
197       .is_in_invisible_vram = false,
198       .address = bo->iova,
199       .preferred_domains = VK_RMV_KERNEL_MEMORY_DOMAIN_VRAM,
200    };
201    vk_rmv_emit_token(&device->vk.memory_trace_data,
202                      VK_RMV_TOKEN_TYPE_VIRTUAL_ALLOCATE,
203                      &virtual_allocate_token);
204 
205    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
206 }
207 
208 void
tu_rmv_log_bo_destroy(struct tu_device * device,struct tu_bo * bo)209 tu_rmv_log_bo_destroy(struct tu_device *device, struct tu_bo *bo)
210 {
211    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
212 
213    struct vk_rmv_virtual_free_token virtual_free_token = {
214       .address = bo->iova,
215    };
216    vk_rmv_emit_token(&device->vk.memory_trace_data,
217                      VK_RMV_TOKEN_TYPE_VIRTUAL_FREE, &virtual_free_token);
218 
219    tu_rmv_emit_page_table_update_locked(device, bo, true);
220 
221    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
222 }
223 
224 void
tu_rmv_log_bo_map(struct tu_device * device,struct tu_bo * bo)225 tu_rmv_log_bo_map(struct tu_device *device, struct tu_bo *bo)
226 {
227    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
228 
229    tu_rmv_emit_cpu_map_locked(device, bo->iova, false);
230 
231    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
232 }
233 
234 void
tu_rmv_log_bo_unmap(struct tu_device * device,struct tu_bo * bo)235 tu_rmv_log_bo_unmap(struct tu_device *device, struct tu_bo *bo)
236 {
237    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
238 
239    tu_rmv_emit_cpu_map_locked(device, bo->iova, true);
240 
241    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
242 }
243 
244 void
tu_rmv_log_buffer_create(struct tu_device * device,struct tu_buffer * buffer)245 tu_rmv_log_buffer_create(struct tu_device *device, struct tu_buffer *buffer)
246 {
247    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
248 
249    struct vk_rmv_resource_create_token token = {
250       .resource_id = tu_rmv_get_resource_id_locked(device, buffer),
251       .is_driver_internal = false,
252       .type = VK_RMV_RESOURCE_TYPE_BUFFER,
253       .buffer = {
254          .create_flags = buffer->vk.create_flags,
255          .usage_flags = buffer->vk.usage,
256          .size = buffer->vk.size,
257       },
258    };
259    vk_rmv_emit_token(&device->vk.memory_trace_data,
260                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
261 
262    /* Any sparse data would also be reported here, if supported. */
263 
264    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
265 }
266 
267 void
tu_rmv_log_buffer_destroy(struct tu_device * device,struct tu_buffer * buffer)268 tu_rmv_log_buffer_destroy(struct tu_device *device, struct tu_buffer *buffer)
269 {
270    /* Any sparse data would also be reported here, if supported. */
271    tu_rmv_log_resource_destroy(device, buffer);
272 }
273 
274 void
tu_rmv_log_buffer_bind(struct tu_device * device,struct tu_buffer * buffer)275 tu_rmv_log_buffer_bind(struct tu_device *device, struct tu_buffer *buffer)
276 {
277    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
278 
279    tu_rmv_emit_resource_bind_locked(device,
280                                     tu_rmv_get_resource_id_locked(device, buffer),
281                                     buffer->bo ? buffer->iova : 0,
282                                     buffer->vk.size);
283 
284    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
285 }
286 
287 void
tu_rmv_log_image_create(struct tu_device * device,struct tu_image * image)288 tu_rmv_log_image_create(struct tu_device *device, struct tu_image *image)
289 {
290    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
291 
292    /* TODO: provide the image metadata information */
293    struct vk_rmv_resource_create_token token = {
294       .resource_id = tu_rmv_get_resource_id_locked(device, image),
295       .is_driver_internal = false,
296       .type = VK_RMV_RESOURCE_TYPE_IMAGE,
297       .image = {
298          .create_flags = image->vk.create_flags,
299          .usage_flags = image->vk.usage,
300          .type = image->vk.image_type,
301          .extent = image->vk.extent,
302          .format = image->vk.format,
303          .num_mips = image->vk.mip_levels,
304          .num_slices = image->vk.array_layers,
305          .tiling = image->vk.tiling,
306          .log2_samples = util_logbase2(image->vk.samples),
307          .log2_storage_samples = util_logbase2(image->vk.samples),
308          /* any bound memory should have alignment of 4096 */
309          .alignment_log2 = util_logbase2(4096),
310          .metadata_alignment_log2 = 0,
311          .image_alignment_log2 = util_logbase2(image->layout[0].base_align),
312          .size = image->total_size,
313          .metadata_size = 0,
314          .metadata_header_size = 0,
315          .metadata_offset = 0,
316          .metadata_header_offset = 0,
317          /* TODO: find a better way to determine if an image is presentable */
318          .presentable = image->vk.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
319       },
320    };
321    vk_rmv_emit_token(&device->vk.memory_trace_data,
322                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
323 
324    /* Any sparse data would also be reported here, if supported. */
325 
326    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
327 }
328 
329 void
tu_rmv_log_image_destroy(struct tu_device * device,struct tu_image * image)330 tu_rmv_log_image_destroy(struct tu_device *device, struct tu_image *image)
331 {
332    /* Any sparse data would also be reported here, if supported. */
333    tu_rmv_log_resource_destroy(device, image);
334 }
335 
336 void
tu_rmv_log_image_bind(struct tu_device * device,struct tu_image * image)337 tu_rmv_log_image_bind(struct tu_device *device, struct tu_image *image)
338 {
339    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
340 
341    uint64_t address = image->bo ? image->iova : 0;
342    uint64_t size = image->bo ? image->total_size : 0;
343    tu_rmv_emit_resource_bind_locked(device,
344                                     tu_rmv_get_resource_id_locked(device, image),
345                                     address, size);
346 
347    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
348 }
349 
350 static inline void
tu_rmv_log_command_allocator_create(struct tu_device * device,void * bo,uint64_t address,uint64_t size)351 tu_rmv_log_command_allocator_create(struct tu_device *device, void *bo,
352                                     uint64_t address, uint64_t size)
353 {
354    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
355 
356    struct vk_rmv_resource_create_token token = {
357       .resource_id = tu_rmv_get_resource_id_locked(device, bo),
358       .is_driver_internal = true,
359       .type = VK_RMV_RESOURCE_TYPE_COMMAND_ALLOCATOR,
360       .command_buffer = {
361          .preferred_domain = VK_RMV_KERNEL_MEMORY_DOMAIN_VRAM,
362          .executable_size = size,
363          .app_available_executable_size = size,
364          .embedded_data_size = 0,
365          .app_available_embedded_data_size = 0,
366          .scratch_size = 0,
367          .app_available_scratch_size = 0,
368       },
369    };
370    vk_rmv_emit_token(&device->vk.memory_trace_data,
371                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
372 
373    tu_rmv_emit_resource_bind_locked(device, token.resource_id, address, size);
374 
375    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
376 }
377 
378 void
tu_rmv_log_cmd_buffer_bo_create(struct tu_device * device,struct tu_bo * bo)379 tu_rmv_log_cmd_buffer_bo_create(struct tu_device *device,
380                                 struct tu_bo *bo)
381 {
382    tu_rmv_log_command_allocator_create(device, bo, bo->iova, bo->size);
383 }
384 
385 void
tu_rmv_log_cmd_buffer_suballoc_bo_create(struct tu_device * device,struct tu_suballoc_bo * suballoc_bo)386 tu_rmv_log_cmd_buffer_suballoc_bo_create(struct tu_device *device,
387                                          struct tu_suballoc_bo *suballoc_bo)
388 {
389    tu_rmv_log_command_allocator_create(device, suballoc_bo,
390                                        suballoc_bo->iova, suballoc_bo->size);
391 }
392 
393 void
tu_rmv_log_query_pool_create(struct tu_device * device,struct tu_query_pool * query_pool)394 tu_rmv_log_query_pool_create(struct tu_device *device,
395                              struct tu_query_pool *query_pool)
396 {
397    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
398 
399    struct vk_rmv_resource_create_token token = {
400       .resource_id = tu_rmv_get_resource_id_locked(device, query_pool),
401       .is_driver_internal = false,
402       .type = VK_RMV_RESOURCE_TYPE_QUERY_HEAP,
403       .query_pool = {
404          .type = query_pool->vk.query_type,
405          .has_cpu_access = true,
406       },
407    };
408    vk_rmv_emit_token(&device->vk.memory_trace_data,
409                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
410 
411    tu_rmv_emit_resource_bind_locked(device, token.resource_id,
412                                     query_pool->bo->iova, query_pool->bo->size);
413 
414    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
415 }
416 
417 void
tu_rmv_log_descriptor_pool_create(struct tu_device * device,const VkDescriptorPoolCreateInfo * create_info,struct tu_descriptor_pool * descriptor_pool)418 tu_rmv_log_descriptor_pool_create(struct tu_device *device,
419                                   const VkDescriptorPoolCreateInfo *create_info,
420                                   struct tu_descriptor_pool *descriptor_pool)
421 {
422    size_t pool_sizes_size =
423       create_info->poolSizeCount * sizeof(VkDescriptorPoolSize);
424    VkDescriptorPoolSize *pool_sizes =
425       (VkDescriptorPoolSize *) malloc(pool_sizes_size);
426    if (!pool_sizes)
427       return;
428 
429    memcpy(pool_sizes, create_info->pPoolSizes, pool_sizes_size);
430 
431    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
432 
433    struct vk_rmv_resource_create_token token = {
434       .resource_id = tu_rmv_get_resource_id_locked(device, descriptor_pool),
435       .is_driver_internal = false,
436       .type = VK_RMV_RESOURCE_TYPE_DESCRIPTOR_POOL,
437       .descriptor_pool = {
438          .max_sets = create_info->maxSets,
439          .pool_size_count = create_info->poolSizeCount,
440          .pool_sizes = pool_sizes,
441       },
442    };
443    vk_rmv_emit_token(&device->vk.memory_trace_data,
444                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
445 
446    if (descriptor_pool->bo) {
447       tu_rmv_emit_resource_bind_locked(device, token.resource_id,
448                                        descriptor_pool->bo->iova,
449                                        descriptor_pool->bo->size);
450    }
451 
452    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
453 }
454 
455 static inline void
tu_rmv_log_pipeline_create(struct tu_device * device,struct tu_pipeline * pipeline)456 tu_rmv_log_pipeline_create(struct tu_device *device,
457                            struct tu_pipeline *pipeline)
458 {
459    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
460 
461    struct vk_rmv_resource_create_token token = {
462       .resource_id = tu_rmv_get_resource_id_locked(device, pipeline),
463       .is_driver_internal = false,
464       .type = VK_RMV_RESOURCE_TYPE_PIPELINE,
465       .pipeline = {
466          .is_internal = false,
467          /* TODO: provide pipeline hash data when available. */
468          .hash_lo = 0, .hash_hi = 0,
469          .shader_stages = pipeline->active_stages,
470          .is_ngg = false,
471       },
472    };
473    vk_rmv_emit_token(&device->vk.memory_trace_data,
474                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
475 
476    if (pipeline->bo.bo) {
477       tu_rmv_emit_resource_bind_locked(device, token.resource_id,
478                                        pipeline->bo.iova, pipeline->bo.size);
479    }
480 
481    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
482 }
483 
484 void
tu_rmv_log_graphics_pipeline_create(struct tu_device * device,struct tu_graphics_pipeline * graphics_pipeline)485 tu_rmv_log_graphics_pipeline_create(struct tu_device *device,
486                                     struct tu_graphics_pipeline *graphics_pipeline)
487 {
488    tu_rmv_log_pipeline_create(device, &graphics_pipeline->base);
489 }
490 
491 void
tu_rmv_log_compute_pipeline_create(struct tu_device * device,struct tu_compute_pipeline * compute_pipeline)492 tu_rmv_log_compute_pipeline_create(struct tu_device *device,
493                                    struct tu_compute_pipeline *compute_pipeline)
494 {
495    tu_rmv_log_pipeline_create(device, &compute_pipeline->base);
496 }
497 
498 void
tu_rmv_log_event_create(struct tu_device * device,const VkEventCreateInfo * create_info,struct tu_event * event)499 tu_rmv_log_event_create(struct tu_device *device,
500                         const VkEventCreateInfo *create_info,
501                         struct tu_event *event)
502 {
503    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
504 
505    struct vk_rmv_resource_create_token token = {
506       .resource_id = tu_rmv_get_resource_id_locked(device, event),
507       .is_driver_internal = false,
508       .type = VK_RMV_RESOURCE_TYPE_GPU_EVENT,
509       .event = {
510          .flags = create_info->flags,
511       },
512    };
513    vk_rmv_emit_token(&device->vk.memory_trace_data,
514                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
515 
516    if (event->bo) {
517       tu_rmv_emit_resource_bind_locked(device, token.resource_id,
518                                        event->bo->iova, event->bo->size);
519    }
520 
521    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
522 }
523 
524 void
tu_rmv_log_internal_resource_create(struct tu_device * device,struct tu_bo * bo)525 tu_rmv_log_internal_resource_create(struct tu_device *device, struct tu_bo *bo)
526 {
527    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
528 
529    struct vk_rmv_resource_create_token token = {
530       .resource_id = tu_rmv_get_resource_id_locked(device, bo),
531       .is_driver_internal = true,
532       .type = VK_RMV_RESOURCE_TYPE_MISC_INTERNAL,
533       .misc_internal = {
534          .type = VK_RMV_MISC_INTERNAL_TYPE_PADDING,
535       },
536    };
537    vk_rmv_emit_token(&device->vk.memory_trace_data,
538                      VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
539 
540    tu_rmv_emit_resource_bind_locked(device, token.resource_id,
541                                     bo->iova, bo->size);
542 
543    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
544 }
545 
546 void
tu_rmv_log_resource_name(struct tu_device * device,const void * resource,const char * resource_name)547 tu_rmv_log_resource_name(struct tu_device *device, const void *resource,
548                          const char *resource_name)
549 {
550    size_t name_len = MIN2(strlen(resource_name) + 1, 128);
551    char *name_buf = (char *) malloc(name_len);
552    if (!name_buf)
553       return;
554 
555    strncpy(name_buf, resource_name, name_len);
556    name_buf[name_len - 1] = '\0';
557 
558    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
559 
560    struct vk_rmv_userdata_token token = {
561       .name = name_buf,
562       .resource_id = tu_rmv_get_resource_id_locked(device, resource)
563    };
564    vk_rmv_emit_token(&device->vk.memory_trace_data,
565                      VK_RMV_TOKEN_TYPE_USERDATA, &token);
566 
567    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
568 }
569 
570 void
tu_rmv_log_resource_destroy(struct tu_device * device,const void * resource)571 tu_rmv_log_resource_destroy(struct tu_device *device, const void *resource)
572 {
573    simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
574 
575    struct vk_rmv_resource_destroy_token token = {
576       .resource_id = tu_rmv_get_resource_id_locked(device, resource),
577    };
578    vk_rmv_emit_token(&device->vk.memory_trace_data,
579                      VK_RMV_TOKEN_TYPE_RESOURCE_DESTROY, &token);
580 
581    tu_rmv_destroy_resource_id_locked(device, resource);
582    simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
583 }
584 
585