1 /**************************************************************************
2 *
3 * Copyright 2012 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * u_debug_flush.c Debug flush and map-related issues:
31 * - Flush while synchronously mapped.
32 * - Command stream reference while synchronously mapped.
33 * - Synchronous map while referenced on command stream.
34 * - Recursive maps.
35 * - Unmap while not mapped.
36 *
37 * @author Thomas Hellstrom <[email protected]>
38 */
39
40 #if MESA_DEBUG
41 #include "util/compiler.h"
42 #include "util/simple_mtx.h"
43 #include "util/u_debug_stack.h"
44 #include "util/u_debug.h"
45 #include "util/u_memory.h"
46 #include "util/u_debug_flush.h"
47 #include "util/u_hash_table.h"
48 #include "util/list.h"
49 #include "util/u_inlines.h"
50 #include "util/u_string.h"
51 #include "util/u_thread.h"
52 #include <stdio.h>
53
54 /* Future improvement: Use realloc instead? */
55 #define DEBUG_FLUSH_MAP_DEPTH 64
56
57 struct debug_map_item {
58 struct debug_stack_frame *frame;
59 bool persistent;
60 };
61
62 struct debug_flush_buf {
63 /* Atomic */
64 struct pipe_reference reference; /* Must be the first member. */
65 mtx_t mutex;
66 /* Immutable */
67 bool supports_persistent;
68 unsigned bt_depth;
69 /* Protected by mutex */
70 int map_count;
71 bool has_sync_map;
72 int last_sync_map;
73 struct debug_map_item maps[DEBUG_FLUSH_MAP_DEPTH];
74 };
75
76 struct debug_flush_item {
77 struct debug_flush_buf *fbuf;
78 unsigned bt_depth;
79 struct debug_stack_frame *ref_frame;
80 };
81
82 struct debug_flush_ctx {
83 /* Contexts are used by a single thread at a time */
84 unsigned bt_depth;
85 bool catch_map_of_referenced;
86 struct hash_table *ref_hash;
87 struct list_head head;
88 };
89
90 static simple_mtx_t list_mutex = SIMPLE_MTX_INITIALIZER;
91 static struct list_head ctx_list = {&ctx_list, &ctx_list};
92
93 static struct debug_stack_frame *
debug_flush_capture_frame(int start,int depth)94 debug_flush_capture_frame(int start, int depth)
95 {
96 struct debug_stack_frame *frames;
97
98 frames = CALLOC(depth, sizeof(*frames));
99 if (!frames)
100 return NULL;
101
102 debug_backtrace_capture(frames, start, depth);
103 return frames;
104 }
105
106 struct debug_flush_buf *
debug_flush_buf_create(bool supports_persistent,unsigned bt_depth)107 debug_flush_buf_create(bool supports_persistent, unsigned bt_depth)
108 {
109 struct debug_flush_buf *fbuf = CALLOC_STRUCT(debug_flush_buf);
110
111 if (!fbuf)
112 goto out_no_buf;
113
114 fbuf->supports_persistent = supports_persistent;
115 fbuf->bt_depth = bt_depth;
116 pipe_reference_init(&fbuf->reference, 1);
117 (void) mtx_init(&fbuf->mutex, mtx_plain);
118
119 return fbuf;
120 out_no_buf:
121 debug_printf("Debug flush buffer creation failed.\n");
122 debug_printf("Debug flush checking for this buffer will be incomplete.\n");
123 return NULL;
124 }
125
126 void
debug_flush_buf_reference(struct debug_flush_buf ** dst,struct debug_flush_buf * src)127 debug_flush_buf_reference(struct debug_flush_buf **dst,
128 struct debug_flush_buf *src)
129 {
130 struct debug_flush_buf *fbuf = *dst;
131
132 if (pipe_reference(&(*dst)->reference, &src->reference)) {
133 int i;
134
135 for (i = 0; i < fbuf->map_count; ++i) {
136 FREE(fbuf->maps[i].frame);
137 }
138 FREE(fbuf);
139 }
140
141 *dst = src;
142 }
143
144 static void
debug_flush_item_destroy(struct debug_flush_item * item)145 debug_flush_item_destroy(struct debug_flush_item *item)
146 {
147 debug_flush_buf_reference(&item->fbuf, NULL);
148
149 FREE(item->ref_frame);
150
151 FREE(item);
152 }
153
154 struct debug_flush_ctx *
debug_flush_ctx_create(UNUSED bool catch_reference_of_mapped,unsigned bt_depth)155 debug_flush_ctx_create(UNUSED bool catch_reference_of_mapped,
156 unsigned bt_depth)
157 {
158 struct debug_flush_ctx *fctx = CALLOC_STRUCT(debug_flush_ctx);
159
160 if (!fctx)
161 goto out_no_ctx;
162
163 fctx->ref_hash = util_hash_table_create_ptr_keys();
164
165 if (!fctx->ref_hash)
166 goto out_no_ref_hash;
167
168 fctx->bt_depth = bt_depth;
169 simple_mtx_lock(&list_mutex);
170 list_addtail(&fctx->head, &ctx_list);
171 simple_mtx_unlock(&list_mutex);
172
173 return fctx;
174
175 out_no_ref_hash:
176 FREE(fctx);
177 out_no_ctx:
178 debug_printf("Debug flush context creation failed.\n");
179 debug_printf("Debug flush checking for this context will be incomplete.\n");
180 return NULL;
181 }
182
183 static void
debug_flush_alert(const char * s,const char * op,unsigned start,unsigned depth,bool continued,bool capture,const struct debug_stack_frame * frame)184 debug_flush_alert(const char *s, const char *op,
185 unsigned start, unsigned depth,
186 bool continued,
187 bool capture,
188 const struct debug_stack_frame *frame)
189 {
190 if (capture)
191 frame = debug_flush_capture_frame(start, depth);
192
193 if (s)
194 debug_printf("%s ", s);
195 if (frame) {
196 debug_printf("%s backtrace follows:\n", op);
197 debug_backtrace_dump(frame, depth);
198 } else
199 debug_printf("No %s backtrace was captured.\n", op);
200
201 if (continued)
202 debug_printf("**********************************\n");
203 else
204 debug_printf("*********END OF MESSAGE***********\n\n\n");
205
206 if (capture)
207 FREE((void *)frame);
208 }
209
210
211 void
debug_flush_map(struct debug_flush_buf * fbuf,unsigned flags)212 debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
213 {
214 bool map_sync, persistent;
215
216 if (!fbuf)
217 return;
218
219 mtx_lock(&fbuf->mutex);
220 map_sync = !(flags & PIPE_MAP_UNSYNCHRONIZED);
221 persistent = !map_sync || fbuf->supports_persistent ||
222 !!(flags & PIPE_MAP_PERSISTENT);
223
224 /* Recursive maps are allowed if previous maps are persistent,
225 * or if the current map is unsync. In other cases we might flush
226 * with unpersistent maps.
227 */
228 if (fbuf->has_sync_map && !map_sync) {
229 debug_flush_alert("Recursive sync map detected.", "Map",
230 2, fbuf->bt_depth, true, true, NULL);
231 debug_flush_alert(NULL, "Previous map", 0, fbuf->bt_depth, false,
232 false, fbuf->maps[fbuf->last_sync_map].frame);
233 }
234
235 fbuf->maps[fbuf->map_count].frame =
236 debug_flush_capture_frame(1, fbuf->bt_depth);
237 fbuf->maps[fbuf->map_count].persistent = persistent;
238 if (!persistent) {
239 fbuf->has_sync_map = true;
240 fbuf->last_sync_map = fbuf->map_count;
241 }
242
243 fbuf->map_count++;
244 assert(fbuf->map_count < DEBUG_FLUSH_MAP_DEPTH);
245
246 mtx_unlock(&fbuf->mutex);
247
248 if (!persistent) {
249 struct debug_flush_ctx *fctx;
250
251 simple_mtx_lock(&list_mutex);
252 LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
253 struct debug_flush_item *item =
254 util_hash_table_get(fctx->ref_hash, fbuf);
255
256 if (item && fctx->catch_map_of_referenced) {
257 debug_flush_alert("Already referenced map detected.",
258 "Map", 2, fbuf->bt_depth, true, true, NULL);
259 debug_flush_alert(NULL, "Reference", 0, item->bt_depth,
260 false, false, item->ref_frame);
261 }
262 }
263 simple_mtx_unlock(&list_mutex);
264 }
265 }
266
267 void
debug_flush_unmap(struct debug_flush_buf * fbuf)268 debug_flush_unmap(struct debug_flush_buf *fbuf)
269 {
270 if (!fbuf)
271 return;
272
273 mtx_lock(&fbuf->mutex);
274 if (--fbuf->map_count < 0) {
275 debug_flush_alert("Unmap not previously mapped detected.", "Map",
276 2, fbuf->bt_depth, false, true, NULL);
277 } else {
278 if (fbuf->has_sync_map && fbuf->last_sync_map == fbuf->map_count) {
279 int i = fbuf->map_count;
280
281 fbuf->has_sync_map = false;
282 while (i-- && !fbuf->has_sync_map) {
283 if (!fbuf->maps[i].persistent) {
284 fbuf->has_sync_map = true;
285 fbuf->last_sync_map = i;
286 }
287 }
288 FREE(fbuf->maps[fbuf->map_count].frame);
289 fbuf->maps[fbuf->map_count].frame = NULL;
290 }
291 }
292 mtx_unlock(&fbuf->mutex);
293 }
294
295
296 /**
297 * Add the given buffer to the list of active buffers. Active buffers
298 * are those which are referenced by the command buffer currently being
299 * constructed.
300 */
301 void
debug_flush_cb_reference(struct debug_flush_ctx * fctx,struct debug_flush_buf * fbuf)302 debug_flush_cb_reference(struct debug_flush_ctx *fctx,
303 struct debug_flush_buf *fbuf)
304 {
305 struct debug_flush_item *item;
306
307 if (!fctx || !fbuf)
308 return;
309
310 item = util_hash_table_get(fctx->ref_hash, fbuf);
311
312 mtx_lock(&fbuf->mutex);
313 if (fbuf->map_count && fbuf->has_sync_map) {
314 debug_flush_alert("Reference of mapped buffer detected.", "Reference",
315 2, fctx->bt_depth, true, true, NULL);
316 debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, false,
317 false, fbuf->maps[fbuf->last_sync_map].frame);
318 }
319 mtx_unlock(&fbuf->mutex);
320
321 if (!item) {
322 item = CALLOC_STRUCT(debug_flush_item);
323 if (item) {
324 debug_flush_buf_reference(&item->fbuf, fbuf);
325 item->bt_depth = fctx->bt_depth;
326 item->ref_frame = debug_flush_capture_frame(2, item->bt_depth);
327 _mesa_hash_table_insert(fctx->ref_hash, fbuf, item);
328 return;
329 }
330 goto out_no_item;
331 }
332 return;
333
334 out_no_item:
335 debug_printf("Debug flush command buffer reference creation failed.\n");
336 debug_printf("Debug flush checking will be incomplete "
337 "for this command batch.\n");
338 }
339
340 static int
debug_flush_might_flush_cb(UNUSED void * key,void * value,void * data)341 debug_flush_might_flush_cb(UNUSED void *key, void *value, void *data)
342 {
343 struct debug_flush_item *item =
344 (struct debug_flush_item *) value;
345 struct debug_flush_buf *fbuf = item->fbuf;
346
347 mtx_lock(&fbuf->mutex);
348 if (fbuf->map_count && fbuf->has_sync_map) {
349 const char *reason = (const char *) data;
350 char message[80];
351
352 snprintf(message, sizeof(message),
353 "%s referenced mapped buffer detected.", reason);
354
355 debug_flush_alert(message, reason, 3, item->bt_depth, true, true, NULL);
356 debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, true, false,
357 fbuf->maps[fbuf->last_sync_map].frame);
358 debug_flush_alert(NULL, "First reference", 0, item->bt_depth, false,
359 false, item->ref_frame);
360 }
361 mtx_unlock(&fbuf->mutex);
362
363 return 0;
364 }
365
366 /**
367 * Called when we're about to possibly flush a command buffer.
368 * We check if any active buffers are in a mapped state. If so, print an alert.
369 */
370 void
debug_flush_might_flush(struct debug_flush_ctx * fctx)371 debug_flush_might_flush(struct debug_flush_ctx *fctx)
372 {
373 if (!fctx)
374 return;
375
376 util_hash_table_foreach(fctx->ref_hash,
377 debug_flush_might_flush_cb,
378 "Might flush");
379 }
380
381 static int
debug_flush_flush_cb(UNUSED void * key,void * value,UNUSED void * data)382 debug_flush_flush_cb(UNUSED void *key, void *value, UNUSED void *data)
383 {
384 struct debug_flush_item *item =
385 (struct debug_flush_item *) value;
386
387 debug_flush_item_destroy(item);
388
389 return 0;
390 }
391
392
393 /**
394 * Called when we flush a command buffer. Two things are done:
395 * 1. Check if any of the active buffers are currently mapped (alert if so).
396 * 2. Discard/unreference all the active buffers.
397 */
398 void
debug_flush_flush(struct debug_flush_ctx * fctx)399 debug_flush_flush(struct debug_flush_ctx *fctx)
400 {
401 if (!fctx)
402 return;
403
404 util_hash_table_foreach(fctx->ref_hash,
405 debug_flush_might_flush_cb,
406 "Flush");
407 util_hash_table_foreach(fctx->ref_hash,
408 debug_flush_flush_cb,
409 NULL);
410 _mesa_hash_table_clear(fctx->ref_hash, NULL);
411 }
412
413 void
debug_flush_ctx_destroy(struct debug_flush_ctx * fctx)414 debug_flush_ctx_destroy(struct debug_flush_ctx *fctx)
415 {
416 if (!fctx)
417 return;
418
419 list_del(&fctx->head);
420 util_hash_table_foreach(fctx->ref_hash,
421 debug_flush_flush_cb,
422 NULL);
423 _mesa_hash_table_clear(fctx->ref_hash, NULL);
424 _mesa_hash_table_destroy(fctx->ref_hash, NULL);
425 FREE(fctx);
426 }
427 #endif
428