xref: /aosp_15_r20/external/mesa3d/src/freedreno/vulkan/tu_cs.cc (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "tu_cs.h"
7 
8 #include "tu_device.h"
9 #include "tu_rmv.h"
10 #include "tu_suballoc.h"
11 
12 /**
13  * Initialize a command stream.
14  */
15 void
tu_cs_init(struct tu_cs * cs,struct tu_device * device,enum tu_cs_mode mode,uint32_t initial_size,const char * name)16 tu_cs_init(struct tu_cs *cs,
17            struct tu_device *device,
18            enum tu_cs_mode mode,
19            uint32_t initial_size, const char *name)
20 {
21    assert(mode != TU_CS_MODE_EXTERNAL);
22 
23    memset(cs, 0, sizeof(*cs));
24 
25    cs->device = device;
26    cs->mode = mode;
27    cs->next_bo_size = initial_size;
28    cs->name = name;
29 }
30 
31 /**
32  * Initialize a command stream as a wrapper to an external buffer.
33  */
34 void
tu_cs_init_external(struct tu_cs * cs,struct tu_device * device,uint32_t * start,uint32_t * end,uint64_t iova,bool writeable)35 tu_cs_init_external(struct tu_cs *cs, struct tu_device *device,
36                     uint32_t *start, uint32_t *end, uint64_t iova,
37                     bool writeable)
38 {
39    memset(cs, 0, sizeof(*cs));
40 
41    cs->device = device;
42    cs->mode = TU_CS_MODE_EXTERNAL;
43    cs->start = cs->reserved_end = cs->cur = start;
44    cs->end = end;
45    cs->external_iova = iova;
46    cs->writeable = writeable;
47 }
48 
49 /**
50  * Initialize a sub-command stream as a wrapper to an externally sub-allocated
51  * buffer.
52  */
53 void
tu_cs_init_suballoc(struct tu_cs * cs,struct tu_device * device,struct tu_suballoc_bo * suballoc_bo)54 tu_cs_init_suballoc(struct tu_cs *cs, struct tu_device *device,
55                     struct tu_suballoc_bo *suballoc_bo)
56 {
57    uint32_t *start = (uint32_t *) tu_suballoc_bo_map(suballoc_bo);
58    uint32_t *end = start + (suballoc_bo->size >> 2);
59 
60    memset(cs, 0, sizeof(*cs));
61    cs->device = device;
62    cs->mode = TU_CS_MODE_SUB_STREAM;
63    cs->start = cs->reserved_end = cs->cur = start;
64    cs->end = end;
65    cs->refcount_bo = tu_bo_get_ref(suballoc_bo->bo);
66 }
67 
68 /**
69  * Finish and release all resources owned by a command stream.
70  */
71 void
tu_cs_finish(struct tu_cs * cs)72 tu_cs_finish(struct tu_cs *cs)
73 {
74    for (uint32_t i = 0; i < cs->read_only.bo_count; ++i) {
75       TU_RMV(resource_destroy, cs->device, cs->read_only.bos[i]);
76       tu_bo_finish(cs->device, cs->read_only.bos[i]);
77    }
78 
79    for (uint32_t i = 0; i < cs->read_write.bo_count; ++i) {
80       TU_RMV(resource_destroy, cs->device, cs->read_write.bos[i]);
81       tu_bo_finish(cs->device, cs->read_write.bos[i]);
82    }
83 
84    if (cs->refcount_bo)
85       tu_bo_finish(cs->device, cs->refcount_bo);
86 
87    free(cs->entries);
88    free(cs->read_only.bos);
89    free(cs->read_write.bos);
90 }
91 
92 static struct tu_bo *
tu_cs_current_bo(const struct tu_cs * cs)93 tu_cs_current_bo(const struct tu_cs *cs)
94 {
95    if (cs->refcount_bo) {
96       return cs->refcount_bo;
97    } else {
98       const struct tu_bo_array *bos = cs->writeable ? &cs->read_write : &cs->read_only;
99       assert(bos->bo_count);
100       return bos->bos[bos->bo_count - 1];
101    }
102 }
103 
104 /**
105  * Get the offset of the command packets emitted since the last call to
106  * tu_cs_add_entry.
107  */
108 static uint32_t
tu_cs_get_offset(const struct tu_cs * cs)109 tu_cs_get_offset(const struct tu_cs *cs)
110 {
111    const struct tu_bo_array *bos = cs->writeable ? &cs->read_write : &cs->read_only;
112    return (cs->refcount_bo || bos->bo_count != 0) ? cs->start - (uint32_t *) tu_cs_current_bo(cs)->map : 0;
113 }
114 
115 /* Get the iova for the next dword to be emitted. Useful after
116  * tu_cs_reserve_space() to create a patch point that can be overwritten on
117  * the GPU.
118  */
119 uint64_t
tu_cs_get_cur_iova(const struct tu_cs * cs)120 tu_cs_get_cur_iova(const struct tu_cs *cs)
121 {
122    if (cs->mode == TU_CS_MODE_EXTERNAL)
123       return cs->external_iova + ((char *) cs->cur - (char *) cs->start);
124    return tu_cs_current_bo(cs)->iova + ((char *) cs->cur - (char *) tu_cs_current_bo(cs)->map);
125 }
126 
127 /*
128  * Allocate and add a BO to a command stream.  Following command packets will
129  * be emitted to the new BO.
130  */
131 static VkResult
tu_cs_add_bo(struct tu_cs * cs,uint32_t size)132 tu_cs_add_bo(struct tu_cs *cs, uint32_t size)
133 {
134    /* no BO for TU_CS_MODE_EXTERNAL */
135    assert(cs->mode != TU_CS_MODE_EXTERNAL);
136    /* No adding more BOs if suballocating from a suballoc_bo. */
137    assert(!cs->refcount_bo);
138 
139    /* no dangling command packet */
140    assert(tu_cs_is_empty(cs));
141 
142    struct tu_bo_array *bos = cs->writeable ? &cs->read_write : &cs->read_only;
143 
144    /* grow cs->bos if needed */
145    if (bos->bo_count == bos->bo_capacity) {
146       uint32_t new_capacity = MAX2(4, 2 * bos->bo_capacity);
147       struct tu_bo **new_bos = (struct tu_bo **)
148          realloc(bos->bos, new_capacity * sizeof(struct tu_bo *));
149       if (!new_bos)
150          return VK_ERROR_OUT_OF_HOST_MEMORY;
151 
152       bos->bo_capacity = new_capacity;
153       bos->bos = new_bos;
154    }
155 
156    struct tu_bo *new_bo;
157 
158    VkResult result =
159       tu_bo_init_new(cs->device, NULL, &new_bo, size * sizeof(uint32_t),
160                      (enum tu_bo_alloc_flags)(COND(!cs->writeable,
161                                                    TU_BO_ALLOC_GPU_READ_ONLY) |
162                                               TU_BO_ALLOC_ALLOW_DUMP),
163                      cs->name);
164    if (result != VK_SUCCESS) {
165       return result;
166    }
167 
168    result = tu_bo_map(cs->device, new_bo, NULL);
169    if (result != VK_SUCCESS) {
170       tu_bo_finish(cs->device, new_bo);
171       return result;
172    }
173 
174    TU_RMV(cmd_buffer_bo_create, cs->device, new_bo);
175 
176    bos->bos[bos->bo_count++] = new_bo;
177 
178    cs->start = cs->cur = cs->reserved_end = (uint32_t *) new_bo->map;
179    cs->end = cs->start + new_bo->size / sizeof(uint32_t);
180 
181    return VK_SUCCESS;
182 }
183 
184 /**
185  * Reserve an IB entry.
186  */
187 static VkResult
tu_cs_reserve_entry(struct tu_cs * cs)188 tu_cs_reserve_entry(struct tu_cs *cs)
189 {
190    /* entries are only for TU_CS_MODE_GROW */
191    assert(cs->mode == TU_CS_MODE_GROW);
192 
193    /* grow cs->entries if needed */
194    if (cs->entry_count == cs->entry_capacity) {
195       uint32_t new_capacity = MAX2(4, cs->entry_capacity * 2);
196       struct tu_cs_entry *new_entries = (struct tu_cs_entry *)
197          realloc(cs->entries, new_capacity * sizeof(struct tu_cs_entry));
198       if (!new_entries)
199          return VK_ERROR_OUT_OF_HOST_MEMORY;
200 
201       cs->entry_capacity = new_capacity;
202       cs->entries = new_entries;
203    }
204 
205    return VK_SUCCESS;
206 }
207 
208 
209 /**
210  * Add an IB entry for the command packets emitted since the last call to this
211  * function.
212  */
213 static void
tu_cs_add_entry(struct tu_cs * cs)214 tu_cs_add_entry(struct tu_cs *cs)
215 {
216    /* entries are only for TU_CS_MODE_GROW */
217    assert(cs->mode == TU_CS_MODE_GROW);
218 
219    /* disallow empty entry */
220    assert(!tu_cs_is_empty(cs));
221 
222    /*
223     * because we disallow empty entry, tu_cs_add_bo and tu_cs_reserve_entry
224     * must both have been called
225     */
226    assert(cs->writeable ? cs->read_write.bo_count : cs->read_only.bo_count);
227    assert(cs->entry_count < cs->entry_capacity);
228 
229    /* add an entry for [cs->start, cs->cur] */
230    cs->entries[cs->entry_count++] = (struct tu_cs_entry) {
231       .bo = tu_cs_current_bo(cs),
232       .size = tu_cs_get_size(cs) * sizeof(uint32_t),
233       .offset = tu_cs_get_offset(cs) * sizeof(uint32_t),
234    };
235 
236    cs->start = cs->cur;
237 }
238 
239 /**
240  * same behavior as tu_cs_emit_call but without the indirect
241  */
242 VkResult
tu_cs_add_entries(struct tu_cs * cs,struct tu_cs * target)243 tu_cs_add_entries(struct tu_cs *cs, struct tu_cs *target)
244 {
245    VkResult result;
246 
247    assert(cs->mode == TU_CS_MODE_GROW);
248    assert(target->mode == TU_CS_MODE_GROW);
249 
250    if (!tu_cs_is_empty(cs))
251       tu_cs_add_entry(cs);
252 
253    for (unsigned i = 0; i < target->entry_count; i++) {
254       result = tu_cs_reserve_entry(cs);
255       if (result != VK_SUCCESS)
256          return result;
257       cs->entries[cs->entry_count++] = target->entries[i];
258    }
259 
260    return VK_SUCCESS;
261 }
262 
263 /**
264  * Begin (or continue) command packet emission.  This does nothing but sanity
265  * checks currently.  \a cs must not be in TU_CS_MODE_SUB_STREAM mode.
266  */
267 void
tu_cs_begin(struct tu_cs * cs)268 tu_cs_begin(struct tu_cs *cs)
269 {
270    assert(cs->mode != TU_CS_MODE_SUB_STREAM);
271    assert(tu_cs_is_empty(cs));
272 }
273 
274 /**
275  * End command packet emission.  This adds an IB entry when \a cs is in
276  * TU_CS_MODE_GROW mode.
277  */
278 void
tu_cs_end(struct tu_cs * cs)279 tu_cs_end(struct tu_cs *cs)
280 {
281    assert(cs->mode != TU_CS_MODE_SUB_STREAM);
282 
283    if (cs->mode == TU_CS_MODE_GROW && !tu_cs_is_empty(cs))
284       tu_cs_add_entry(cs);
285 }
286 
287 void
tu_cs_set_writeable(struct tu_cs * cs,bool writeable)288 tu_cs_set_writeable(struct tu_cs *cs, bool writeable)
289 {
290    assert(cs->mode == TU_CS_MODE_GROW || cs->mode == TU_CS_MODE_SUB_STREAM);
291 
292    if (cs->writeable != writeable) {
293       if (cs->mode == TU_CS_MODE_GROW && !tu_cs_is_empty(cs))
294          tu_cs_add_entry(cs);
295       struct tu_bo_array *old_bos = cs->writeable ? &cs->read_write : &cs->read_only;
296       struct tu_bo_array *new_bos = writeable ? &cs->read_write : &cs->read_only;
297 
298       old_bos->start = cs->start;
299       cs->start = cs->cur = cs->reserved_end = new_bos->start;
300       if (new_bos->bo_count) {
301          struct tu_bo *bo = new_bos->bos[new_bos->bo_count - 1];
302          cs->end = (uint32_t *)bo->map + bo->size / sizeof(uint32_t);
303       } else {
304          cs->end = NULL;
305       }
306 
307       cs->writeable = writeable;
308    }
309 }
310 
311 /**
312  * Begin command packet emission to a sub-stream.  \a cs must be in
313  * TU_CS_MODE_SUB_STREAM mode.
314  *
315  * Return \a sub_cs which is in TU_CS_MODE_EXTERNAL mode.  tu_cs_begin and
316  * tu_cs_reserve_space are implied and \a sub_cs is ready for command packet
317  * emission.
318  */
319 VkResult
tu_cs_begin_sub_stream_aligned(struct tu_cs * cs,uint32_t count,uint32_t size,struct tu_cs * sub_cs)320 tu_cs_begin_sub_stream_aligned(struct tu_cs *cs, uint32_t count,
321                                uint32_t size, struct tu_cs *sub_cs)
322 {
323    assert(cs->mode == TU_CS_MODE_SUB_STREAM);
324    assert(size);
325 
326    VkResult result;
327    if (tu_cs_get_space(cs) < count * size) {
328       /* When we have to allocate a new BO, assume that the alignment of the
329        * BO is sufficient.
330        */
331       result = tu_cs_reserve_space(cs, count * size);
332    } else {
333       result = tu_cs_reserve_space(cs, count * size + (size - tu_cs_get_offset(cs)) % size);
334       cs->start += (size - tu_cs_get_offset(cs)) % size;
335    }
336    if (result != VK_SUCCESS)
337       return result;
338 
339    cs->cur = cs->start;
340 
341    tu_cs_init_external(sub_cs, cs->device, cs->cur, cs->reserved_end,
342                        tu_cs_get_cur_iova(cs), cs->writeable);
343    tu_cs_begin(sub_cs);
344    result = tu_cs_reserve_space(sub_cs, count * size);
345    assert(result == VK_SUCCESS);
346 
347    return VK_SUCCESS;
348 }
349 
350 /**
351  * Allocate count*size dwords, aligned to size dwords.
352  * \a cs must be in TU_CS_MODE_SUB_STREAM mode.
353  *
354  */
355 VkResult
tu_cs_alloc(struct tu_cs * cs,uint32_t count,uint32_t size,struct tu_cs_memory * memory)356 tu_cs_alloc(struct tu_cs *cs,
357             uint32_t count,
358             uint32_t size,
359             struct tu_cs_memory *memory)
360 {
361    assert(cs->mode == TU_CS_MODE_SUB_STREAM);
362    assert(size && size <= 1024);
363 
364    if (!count) {
365       /* If you allocated no memory, you'd better not use the iova for anything
366        * (but it's left aligned for sanity).
367        */
368       memory->map = NULL;
369       memory->iova = 0xdead0000;
370       return VK_SUCCESS;
371    }
372 
373    /* TODO: smarter way to deal with alignment? */
374 
375    VkResult result = tu_cs_reserve_space(cs, count * size + (size-1));
376    if (result != VK_SUCCESS)
377       return result;
378 
379    struct tu_bo *bo = tu_cs_current_bo(cs);
380    size_t offset = align(tu_cs_get_offset(cs), size);
381 
382    memory->map = (uint32_t *) bo->map + offset;
383    memory->iova = bo->iova + offset * sizeof(uint32_t);
384    memory->writeable = cs->writeable;
385 
386    cs->start = cs->cur = (uint32_t*) bo->map + offset + count * size;
387 
388    return VK_SUCCESS;
389 }
390 
391 /**
392  * End command packet emission to a sub-stream.  \a sub_cs becomes invalid
393  * after this call.
394  *
395  * Return an IB entry for the sub-stream.  The entry has the same lifetime as
396  * \a cs.
397  */
398 struct tu_cs_entry
tu_cs_end_sub_stream(struct tu_cs * cs,struct tu_cs * sub_cs)399 tu_cs_end_sub_stream(struct tu_cs *cs, struct tu_cs *sub_cs)
400 {
401    assert(cs->mode == TU_CS_MODE_SUB_STREAM);
402    assert(sub_cs->start == cs->cur && sub_cs->end == cs->reserved_end);
403    tu_cs_sanity_check(sub_cs);
404 
405    tu_cs_end(sub_cs);
406 
407    cs->cur = sub_cs->cur;
408 
409    struct tu_cs_entry entry = {
410       .bo = tu_cs_current_bo(cs),
411       .size = tu_cs_get_size(cs) * sizeof(uint32_t),
412       .offset = tu_cs_get_offset(cs) * sizeof(uint32_t),
413    };
414 
415    cs->start = cs->cur;
416 
417    return entry;
418 }
419 
420 /**
421  * Reserve space from a command stream for \a reserved_size uint32_t values.
422  * This never fails when \a cs has mode TU_CS_MODE_EXTERNAL.
423  */
424 VkResult
tu_cs_reserve_space(struct tu_cs * cs,uint32_t reserved_size)425 tu_cs_reserve_space(struct tu_cs *cs, uint32_t reserved_size)
426 {
427    if (tu_cs_get_space(cs) < reserved_size) {
428       if (cs->mode == TU_CS_MODE_EXTERNAL) {
429          unreachable("cannot grow external buffer");
430          return VK_ERROR_OUT_OF_HOST_MEMORY;
431       }
432 
433       /* add an entry for the exiting command packets */
434       if (!tu_cs_is_empty(cs)) {
435          /* no direct command packet for TU_CS_MODE_SUB_STREAM */
436          assert(cs->mode != TU_CS_MODE_SUB_STREAM);
437 
438          tu_cs_add_entry(cs);
439       }
440 
441       for (uint32_t i = 0; i < cs->cond_stack_depth; i++) {
442          /* Subtract one here to account for the DWORD field itself. */
443          *cs->cond_dwords[i] = cs->cur - cs->cond_dwords[i] - 1;
444 
445          /* space for CP_COND_REG_EXEC in next bo */
446          reserved_size += 3;
447       }
448 
449       /* switch to a new BO */
450       uint32_t new_size = MAX2(cs->next_bo_size, reserved_size);
451       VkResult result = tu_cs_add_bo(cs, new_size);
452       if (result != VK_SUCCESS)
453          return result;
454 
455       if (cs->cond_stack_depth) {
456          cs->reserved_end = cs->cur + reserved_size;
457       }
458 
459       /* Re-emit CP_COND_REG_EXECs */
460       for (uint32_t i = 0; i < cs->cond_stack_depth; i++) {
461          tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
462          tu_cs_emit(cs, cs->cond_flags[i]);
463 
464          cs->cond_dwords[i] = cs->cur;
465 
466          /* Emit dummy DWORD field here */
467          tu_cs_emit(cs, RENDER_MODE_CP_COND_REG_EXEC_1_DWORDS(0));
468       }
469 
470       /* double the size for the next bo, also there is an upper
471        * bound on IB size, which appears to be 0x0fffff
472        */
473       new_size = MIN2(new_size << 1, 0x0fffff);
474       if (cs->next_bo_size < new_size)
475          cs->next_bo_size = new_size;
476    }
477 
478    assert(tu_cs_get_space(cs) >= reserved_size);
479    cs->reserved_end = cs->cur + reserved_size;
480 
481    if (cs->mode == TU_CS_MODE_GROW) {
482       /* reserve an entry for the next call to this function or tu_cs_end */
483       return tu_cs_reserve_entry(cs);
484    }
485 
486    return VK_SUCCESS;
487 }
488 
489 /**
490  * Reset a command stream to its initial state.  This discards all comand
491  * packets in \a cs, but does not necessarily release all resources.
492  */
493 void
tu_cs_reset(struct tu_cs * cs)494 tu_cs_reset(struct tu_cs *cs)
495 {
496    if (cs->mode == TU_CS_MODE_EXTERNAL) {
497       assert(!cs->read_only.bo_count && !cs->read_write.bo_count &&
498              !cs->refcount_bo && !cs->entry_count);
499       cs->reserved_end = cs->cur = cs->start;
500       return;
501    }
502 
503    for (uint32_t i = 0; i + 1 < cs->read_only.bo_count; ++i) {
504       TU_RMV(resource_destroy, cs->device, cs->read_only.bos[i]);
505       tu_bo_finish(cs->device, cs->read_only.bos[i]);
506    }
507 
508    for (uint32_t i = 0; i + 1 < cs->read_write.bo_count; ++i) {
509       TU_RMV(resource_destroy, cs->device, cs->read_write.bos[i]);
510       tu_bo_finish(cs->device, cs->read_write.bos[i]);
511    }
512 
513    cs->writeable = false;
514 
515    if (cs->read_only.bo_count) {
516       cs->read_only.bos[0] = cs->read_only.bos[cs->read_only.bo_count - 1];
517       cs->read_only.bo_count = 1;
518 
519       cs->start = cs->cur = cs->reserved_end = (uint32_t *) cs->read_only.bos[0]->map;
520       cs->end = cs->start + cs->read_only.bos[0]->size / sizeof(uint32_t);
521    }
522 
523    if (cs->read_write.bo_count) {
524       cs->read_write.bos[0] = cs->read_write.bos[cs->read_write.bo_count - 1];
525       cs->read_write.bo_count = 1;
526    }
527 
528    cs->entry_count = 0;
529 }
530 
531 uint64_t
tu_cs_emit_data_nop(struct tu_cs * cs,const uint32_t * data,uint32_t size,uint32_t align_dwords)532 tu_cs_emit_data_nop(struct tu_cs *cs,
533                     const uint32_t *data,
534                     uint32_t size,
535                     uint32_t align_dwords)
536 {
537    uint32_t total_size = size + (align_dwords - 1);
538    tu_cs_emit_pkt7(cs, CP_NOP, total_size);
539 
540    uint64_t iova = tu_cs_get_cur_iova(cs);
541    uint64_t iova_aligned = align64(iova, align_dwords * sizeof(uint32_t));
542    size_t offset = (iova_aligned - iova) / sizeof(uint32_t);
543    cs->cur += offset;
544    memcpy(cs->cur, data, size * sizeof(uint32_t));
545 
546    cs->cur += total_size - offset;
547 
548    return iova + offset * sizeof(uint32_t);
549 }
550 
551 void
tu_cs_emit_debug_string(struct tu_cs * cs,const char * string,int len)552 tu_cs_emit_debug_string(struct tu_cs *cs, const char *string, int len)
553 {
554    assert(cs->mode == TU_CS_MODE_GROW);
555 
556    /* max packet size is 0x3fff dwords */
557    len = MIN2(len, 0x3fff * 4);
558 
559    tu_cs_emit_pkt7(cs, CP_NOP, align(len, 4) / 4);
560    const uint32_t *buf = (const uint32_t *) string;
561 
562    tu_cs_emit_array(cs, buf, len / 4);
563    buf += len / 4;
564    len = len % 4;
565 
566    /* copy remainder bytes without reading past end of input string */
567    if (len > 0) {
568       uint32_t w = 0;
569       memcpy(&w, buf, len);
570       tu_cs_emit(cs, w);
571    }
572 }
573 
574 void
tu_cs_emit_debug_magic_strv(struct tu_cs * cs,uint32_t magic,const char * fmt,va_list args)575 tu_cs_emit_debug_magic_strv(struct tu_cs *cs,
576                             uint32_t magic,
577                             const char *fmt,
578                             va_list args)
579 {
580    int fmt_len = vsnprintf(NULL, 0, fmt, args);
581    int len = 4 + fmt_len + 1;
582    char *string = (char *) malloc(len);
583 
584    /* format: <magic><formatted string>\0 */
585    *(uint32_t *) string = magic;
586    vsnprintf(string + 4, fmt_len + 1, fmt, args);
587 
588    tu_cs_emit_debug_string(cs, string, len);
589    free(string);
590 }
591 
592 __attribute__((format(printf, 2, 3))) void
tu_cs_emit_debug_msg(struct tu_cs * cs,const char * fmt,...)593 tu_cs_emit_debug_msg(struct tu_cs *cs, const char *fmt, ...)
594 {
595    va_list args;
596    va_start(args, fmt);
597    tu_cs_emit_debug_magic_strv(cs, CP_NOP_MESG, fmt, args);
598    va_end(args);
599 }
600 
601 void
tu_cs_trace_start(struct u_trace_context * utctx,void * cs,const char * fmt,...)602 tu_cs_trace_start(struct u_trace_context *utctx,
603                   void *cs,
604                   const char *fmt,
605                   ...)
606 {
607    va_list args;
608    va_start(args, fmt);
609    tu_cs_emit_debug_magic_strv((struct tu_cs *) cs, CP_NOP_BEGN, fmt, args);
610    va_end(args);
611 }
612 
613 void
tu_cs_trace_end(struct u_trace_context * utctx,void * cs,const char * fmt,...)614 tu_cs_trace_end(struct u_trace_context *utctx, void *cs, const char *fmt, ...)
615 {
616    va_list args;
617    va_start(args, fmt);
618    tu_cs_emit_debug_magic_strv((struct tu_cs *) cs, CP_NOP_END, fmt, args);
619    va_end(args);
620 }
621