1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file crocus_query.c
25 *
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
29 *
30 * Query object support. This allows measuring various simple statistics
31 * via counters on the GPU. We use GenX code for MI_MATH calculations.
32 */
33
34 #include <stdio.h>
35 #include <errno.h>
36 #include "perf/intel_perf.h"
37 #include "pipe/p_defines.h"
38 #include "pipe/p_state.h"
39 #include "pipe/p_context.h"
40 #include "pipe/p_screen.h"
41 #include "util/u_inlines.h"
42 #include "util/u_upload_mgr.h"
43 #include "crocus_context.h"
44 #include "crocus_defines.h"
45 #include "crocus_fence.h"
46 #include "crocus_monitor.h"
47 #include "crocus_resource.h"
48 #include "crocus_screen.h"
49
50 #include "crocus_genx_macros.h"
51
52 #if GFX_VER == 6
53 // TOOD: Add these to genxml?
54 #define SO_PRIM_STORAGE_NEEDED(n) (0x2280)
55 #define SO_NUM_PRIMS_WRITTEN(n) (0x2288)
56
57 // TODO: remove HS/DS/CS
58 #define GFX6_IA_VERTICES_COUNT_num 0x2310
59 #define GFX6_IA_PRIMITIVES_COUNT_num 0x2318
60 #define GFX6_VS_INVOCATION_COUNT_num 0x2320
61 #define GFX6_HS_INVOCATION_COUNT_num 0x2300
62 #define GFX6_DS_INVOCATION_COUNT_num 0x2308
63 #define GFX6_GS_INVOCATION_COUNT_num 0x2328
64 #define GFX6_GS_PRIMITIVES_COUNT_num 0x2330
65 #define GFX6_CL_INVOCATION_COUNT_num 0x2338
66 #define GFX6_CL_PRIMITIVES_COUNT_num 0x2340
67 #define GFX6_PS_INVOCATION_COUNT_num 0x2348
68 #define GFX6_CS_INVOCATION_COUNT_num 0x2290
69 #define GFX6_PS_DEPTH_COUNT_num 0x2350
70
71 #elif GFX_VER >= 7
72 #define SO_PRIM_STORAGE_NEEDED(n) (GENX(SO_PRIM_STORAGE_NEEDED0_num) + (n) * 8)
73 #define SO_NUM_PRIMS_WRITTEN(n) (GENX(SO_NUM_PRIMS_WRITTEN0_num) + (n) * 8)
74 #endif
75
76 struct crocus_query {
77 struct threaded_query b;
78
79 enum pipe_query_type type;
80 int index;
81
82 bool ready;
83
84 bool stalled;
85
86 uint64_t result;
87
88 struct crocus_state_ref query_state_ref;
89 struct crocus_query_snapshots *map;
90 struct crocus_syncobj *syncobj;
91
92 int batch_idx;
93
94 struct crocus_monitor_object *monitor;
95
96 /* Fence for PIPE_QUERY_GPU_FINISHED. */
97 struct pipe_fence_handle *fence;
98 };
99
100 struct crocus_query_snapshots {
101 /** crocus_render_condition's saved MI_PREDICATE_RESULT value. */
102 uint64_t predicate_result;
103
104 /** Have the start/end snapshots landed? */
105 uint64_t snapshots_landed;
106
107 /** Starting and ending counter snapshots */
108 uint64_t start;
109 uint64_t end;
110 };
111
112 struct crocus_query_so_overflow {
113 uint64_t predicate_result;
114 uint64_t snapshots_landed;
115
116 struct {
117 uint64_t prim_storage_needed[2];
118 uint64_t num_prims[2];
119 } stream[4];
120 };
121
122 #if GFX_VERx10 >= 75
123 static struct mi_value
query_mem64(struct crocus_query * q,uint32_t offset)124 query_mem64(struct crocus_query *q, uint32_t offset)
125 {
126 return mi_mem64(rw_bo(crocus_resource_bo(q->query_state_ref.res),
127 q->query_state_ref.offset + offset));
128 }
129 #endif
130
131 /**
132 * Is this type of query written by PIPE_CONTROL?
133 */
134 static bool
crocus_is_query_pipelined(struct crocus_query * q)135 crocus_is_query_pipelined(struct crocus_query *q)
136 {
137 switch (q->type) {
138 case PIPE_QUERY_OCCLUSION_COUNTER:
139 case PIPE_QUERY_OCCLUSION_PREDICATE:
140 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
141 case PIPE_QUERY_TIMESTAMP:
142 case PIPE_QUERY_TIMESTAMP_DISJOINT:
143 case PIPE_QUERY_TIME_ELAPSED:
144 return true;
145
146 default:
147 return false;
148 }
149 }
150
151 static void
mark_available(struct crocus_context * ice,struct crocus_query * q)152 mark_available(struct crocus_context *ice, struct crocus_query *q)
153 {
154 #if GFX_VERx10 >= 75
155 struct crocus_batch *batch = &ice->batches[q->batch_idx];
156 struct crocus_screen *screen = batch->screen;
157 unsigned flags = PIPE_CONTROL_WRITE_IMMEDIATE;
158 unsigned offset = offsetof(struct crocus_query_snapshots, snapshots_landed);
159 struct crocus_bo *bo = crocus_resource_bo(q->query_state_ref.res);
160 offset += q->query_state_ref.offset;
161
162 if (!crocus_is_query_pipelined(q)) {
163 screen->vtbl.store_data_imm64(batch, bo, offset, true);
164 } else {
165 /* Order available *after* the query results. */
166 flags |= PIPE_CONTROL_FLUSH_ENABLE;
167 crocus_emit_pipe_control_write(batch, "query: mark available",
168 flags, bo, offset, true);
169 }
170 #endif
171 }
172
173 /**
174 * Write PS_DEPTH_COUNT to q->(dest) via a PIPE_CONTROL.
175 */
176 static void
crocus_pipelined_write(struct crocus_batch * batch,struct crocus_query * q,enum pipe_control_flags flags,unsigned offset)177 crocus_pipelined_write(struct crocus_batch *batch,
178 struct crocus_query *q,
179 enum pipe_control_flags flags,
180 unsigned offset)
181 {
182 struct crocus_bo *bo = crocus_resource_bo(q->query_state_ref.res);
183
184 crocus_emit_pipe_control_write(batch, "query: pipelined snapshot write",
185 flags,
186 bo, offset, 0ull);
187 }
188
189 static void
write_value(struct crocus_context * ice,struct crocus_query * q,unsigned offset)190 write_value(struct crocus_context *ice, struct crocus_query *q, unsigned offset)
191 {
192 struct crocus_batch *batch = &ice->batches[q->batch_idx];
193 #if GFX_VER >= 6
194 struct crocus_screen *screen = batch->screen;
195 struct crocus_bo *bo = crocus_resource_bo(q->query_state_ref.res);
196 #endif
197
198 if (!crocus_is_query_pipelined(q)) {
199 crocus_emit_pipe_control_flush(batch,
200 "query: non-pipelined snapshot write",
201 PIPE_CONTROL_CS_STALL |
202 PIPE_CONTROL_STALL_AT_SCOREBOARD);
203 q->stalled = true;
204 }
205
206 switch (q->type) {
207 case PIPE_QUERY_OCCLUSION_COUNTER:
208 case PIPE_QUERY_OCCLUSION_PREDICATE:
209 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
210 crocus_pipelined_write(&ice->batches[CROCUS_BATCH_RENDER], q,
211 PIPE_CONTROL_WRITE_DEPTH_COUNT |
212 PIPE_CONTROL_DEPTH_STALL,
213 offset);
214 break;
215 case PIPE_QUERY_TIME_ELAPSED:
216 case PIPE_QUERY_TIMESTAMP:
217 case PIPE_QUERY_TIMESTAMP_DISJOINT:
218 crocus_pipelined_write(&ice->batches[CROCUS_BATCH_RENDER], q,
219 PIPE_CONTROL_WRITE_TIMESTAMP,
220 offset);
221 break;
222 case PIPE_QUERY_PRIMITIVES_GENERATED:
223 #if GFX_VER >= 6
224 screen->vtbl.store_register_mem64(batch,
225 q->index == 0 ?
226 GENX(CL_INVOCATION_COUNT_num) :
227 SO_PRIM_STORAGE_NEEDED(q->index),
228 bo, offset, false);
229 #endif
230 break;
231 case PIPE_QUERY_PRIMITIVES_EMITTED:
232 #if GFX_VER >= 6
233 screen->vtbl.store_register_mem64(batch,
234 SO_NUM_PRIMS_WRITTEN(q->index),
235 bo, offset, false);
236 #endif
237 break;
238 case PIPE_QUERY_PIPELINE_STATISTICS_SINGLE: {
239 #if GFX_VER >= 6
240 static const uint32_t index_to_reg[] = {
241 GENX(IA_VERTICES_COUNT_num),
242 GENX(IA_PRIMITIVES_COUNT_num),
243 GENX(VS_INVOCATION_COUNT_num),
244 GENX(GS_INVOCATION_COUNT_num),
245 GENX(GS_PRIMITIVES_COUNT_num),
246 GENX(CL_INVOCATION_COUNT_num),
247 GENX(CL_PRIMITIVES_COUNT_num),
248 GENX(PS_INVOCATION_COUNT_num),
249 GENX(HS_INVOCATION_COUNT_num),
250 GENX(DS_INVOCATION_COUNT_num),
251 GENX(CS_INVOCATION_COUNT_num),
252 };
253 uint32_t reg = index_to_reg[q->index];
254
255 #if GFX_VER == 6
256 /* Gfx6 GS code counts full primitives, that is, it won't count individual
257 * triangles in a triangle strip. Use CL_INVOCATION_COUNT for that.
258 */
259 if (q->index == PIPE_STAT_QUERY_GS_PRIMITIVES)
260 reg = GENX(CL_INVOCATION_COUNT_num);
261 #endif
262
263 screen->vtbl.store_register_mem64(batch, reg, bo, offset, false);
264 #endif
265 break;
266 }
267 default:
268 assert(false);
269 }
270 }
271
272 #if GFX_VER >= 6
273 static void
write_overflow_values(struct crocus_context * ice,struct crocus_query * q,bool end)274 write_overflow_values(struct crocus_context *ice, struct crocus_query *q, bool end)
275 {
276 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER];
277 struct crocus_screen *screen = batch->screen;
278 uint32_t count = q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ? 1 : 4;
279 struct crocus_bo *bo = crocus_resource_bo(q->query_state_ref.res);
280 uint32_t offset = q->query_state_ref.offset;
281 crocus_emit_pipe_control_flush(batch,
282 "query: write SO overflow snapshots",
283 PIPE_CONTROL_CS_STALL |
284 PIPE_CONTROL_STALL_AT_SCOREBOARD);
285 for (uint32_t i = 0; i < count; i++) {
286 int s = q->index + i;
287 int g_idx = offset + offsetof(struct crocus_query_so_overflow,
288 stream[s].num_prims[end]);
289 int w_idx = offset + offsetof(struct crocus_query_so_overflow,
290 stream[s].prim_storage_needed[end]);
291 screen->vtbl.store_register_mem64(batch, SO_NUM_PRIMS_WRITTEN(s),
292 bo, g_idx, false);
293 screen->vtbl.store_register_mem64(batch, SO_PRIM_STORAGE_NEEDED(s),
294 bo, w_idx, false);
295 }
296 }
297 #endif
298 static uint64_t
crocus_raw_timestamp_delta(uint64_t time0,uint64_t time1)299 crocus_raw_timestamp_delta(uint64_t time0, uint64_t time1)
300 {
301 if (time0 > time1) {
302 return (1ULL << TIMESTAMP_BITS) + time1 - time0;
303 } else {
304 return time1 - time0;
305 }
306 }
307
308 static bool
stream_overflowed(struct crocus_query_so_overflow * so,int s)309 stream_overflowed(struct crocus_query_so_overflow *so, int s)
310 {
311 return (so->stream[s].prim_storage_needed[1] -
312 so->stream[s].prim_storage_needed[0]) !=
313 (so->stream[s].num_prims[1] - so->stream[s].num_prims[0]);
314 }
315
316 static void
calculate_result_on_cpu(const struct intel_device_info * devinfo,struct crocus_query * q)317 calculate_result_on_cpu(const struct intel_device_info *devinfo,
318 struct crocus_query *q)
319 {
320 switch (q->type) {
321 case PIPE_QUERY_OCCLUSION_PREDICATE:
322 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
323 q->result = q->map->end != q->map->start;
324 break;
325 case PIPE_QUERY_TIMESTAMP:
326 case PIPE_QUERY_TIMESTAMP_DISJOINT:
327 /* The timestamp is the single starting snapshot. */
328 q->result = intel_device_info_timebase_scale(devinfo, q->map->start);
329 q->result &= (1ull << TIMESTAMP_BITS) - 1;
330 break;
331 case PIPE_QUERY_TIME_ELAPSED:
332 q->result = crocus_raw_timestamp_delta(q->map->start, q->map->end);
333 q->result = intel_device_info_timebase_scale(devinfo, q->result);
334 q->result &= (1ull << TIMESTAMP_BITS) - 1;
335 break;
336 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
337 q->result = stream_overflowed((void *) q->map, q->index);
338 break;
339 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
340 q->result = false;
341 for (int i = 0; i < PIPE_MAX_VERTEX_STREAMS; i++)
342 q->result |= stream_overflowed((void *) q->map, i);
343 break;
344 case PIPE_QUERY_PIPELINE_STATISTICS_SINGLE:
345 q->result = q->map->end - q->map->start;
346
347 /* WaDividePSInvocationCountBy4:HSW,BDW */
348 if (GFX_VERx10 >= 75 && q->index == PIPE_STAT_QUERY_PS_INVOCATIONS)
349 q->result /= 4;
350 break;
351 case PIPE_QUERY_OCCLUSION_COUNTER:
352 case PIPE_QUERY_PRIMITIVES_GENERATED:
353 case PIPE_QUERY_PRIMITIVES_EMITTED:
354 default:
355 q->result = q->map->end - q->map->start;
356 break;
357 }
358
359 q->ready = true;
360 }
361
362 #if GFX_VERx10 >= 75
363 /**
364 * Calculate the streamout overflow for stream \p idx:
365 *
366 * (num_prims[1] - num_prims[0]) - (storage_needed[1] - storage_needed[0])
367 */
368 static struct mi_value
calc_overflow_for_stream(struct mi_builder * b,struct crocus_query * q,int idx)369 calc_overflow_for_stream(struct mi_builder *b,
370 struct crocus_query *q,
371 int idx)
372 {
373 #define C(counter, i) query_mem64(q, \
374 offsetof(struct crocus_query_so_overflow, stream[idx].counter[i]))
375
376 return mi_isub(b, mi_isub(b, C(num_prims, 1), C(num_prims, 0)),
377 mi_isub(b, C(prim_storage_needed, 1),
378 C(prim_storage_needed, 0)));
379 #undef C
380 }
381
382 /**
383 * Calculate whether any stream has overflowed.
384 */
385 static struct mi_value
calc_overflow_any_stream(struct mi_builder * b,struct crocus_query * q)386 calc_overflow_any_stream(struct mi_builder *b, struct crocus_query *q)
387 {
388 struct mi_value stream_result[PIPE_MAX_VERTEX_STREAMS];
389 for (int i = 0; i < PIPE_MAX_VERTEX_STREAMS; i++)
390 stream_result[i] = calc_overflow_for_stream(b, q, i);
391
392 struct mi_value result = stream_result[0];
393 for (int i = 1; i < PIPE_MAX_VERTEX_STREAMS; i++)
394 result = mi_ior(b, result, stream_result[i]);
395
396 return result;
397 }
398
399
400 static bool
query_is_boolean(enum pipe_query_type type)401 query_is_boolean(enum pipe_query_type type)
402 {
403 switch (type) {
404 case PIPE_QUERY_OCCLUSION_PREDICATE:
405 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
406 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
407 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
408 return true;
409 default:
410 return false;
411 }
412 }
413
414 /**
415 * Calculate the result using MI_MATH.
416 */
417 static struct mi_value
calculate_result_on_gpu(const struct intel_device_info * devinfo,struct mi_builder * b,struct crocus_query * q)418 calculate_result_on_gpu(const struct intel_device_info *devinfo,
419 struct mi_builder *b,
420 struct crocus_query *q)
421 {
422 struct mi_value result;
423 struct mi_value start_val =
424 query_mem64(q, offsetof(struct crocus_query_snapshots, start));
425 struct mi_value end_val =
426 query_mem64(q, offsetof(struct crocus_query_snapshots, end));
427
428 switch (q->type) {
429 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
430 result = calc_overflow_for_stream(b, q, q->index);
431 break;
432 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
433 result = calc_overflow_any_stream(b, q);
434 break;
435 case PIPE_QUERY_TIMESTAMP: {
436 /* TODO: This discards any fractional bits of the timebase scale.
437 * We would need to do a bit of fixed point math on the CS ALU, or
438 * launch an actual shader to calculate this with full precision.
439 */
440 uint32_t scale = 1000000000ull / devinfo->timestamp_frequency;
441 result = mi_iand(b, mi_imm((1ull << 36) - 1),
442 mi_imul_imm(b, start_val, scale));
443 break;
444 }
445 case PIPE_QUERY_TIME_ELAPSED: {
446 /* TODO: This discards fractional bits (see above). */
447 uint32_t scale = 1000000000ull / devinfo->timestamp_frequency;
448 result = mi_imul_imm(b, mi_isub(b, end_val, start_val), scale);
449 break;
450 }
451 default:
452 result = mi_isub(b, end_val, start_val);
453 break;
454 }
455 /* WaDividePSInvocationCountBy4:HSW,BDW */
456 if (GFX_VERx10 >= 75 &&
457 q->type == PIPE_QUERY_PIPELINE_STATISTICS_SINGLE &&
458 q->index == PIPE_STAT_QUERY_PS_INVOCATIONS)
459 result = mi_ushr32_imm(b, result, 2);
460
461 if (query_is_boolean(q->type))
462 result = mi_iand(b, mi_nz(b, result), mi_imm(1));
463
464 return result;
465 }
466 #endif
467
468 static struct pipe_query *
crocus_create_query(struct pipe_context * ctx,unsigned query_type,unsigned index)469 crocus_create_query(struct pipe_context *ctx,
470 unsigned query_type,
471 unsigned index)
472 {
473 struct crocus_query *q = calloc(1, sizeof(struct crocus_query));
474
475 q->type = query_type;
476 q->index = index;
477 q->monitor = NULL;
478
479 if (q->type == PIPE_QUERY_PIPELINE_STATISTICS_SINGLE &&
480 q->index == PIPE_STAT_QUERY_CS_INVOCATIONS)
481 q->batch_idx = CROCUS_BATCH_COMPUTE;
482 else
483 q->batch_idx = CROCUS_BATCH_RENDER;
484 return (struct pipe_query *) q;
485 }
486
487 static struct pipe_query *
crocus_create_batch_query(struct pipe_context * ctx,unsigned num_queries,unsigned * query_types)488 crocus_create_batch_query(struct pipe_context *ctx,
489 unsigned num_queries,
490 unsigned *query_types)
491 {
492 struct crocus_context *ice = (void *) ctx;
493 struct crocus_query *q = calloc(1, sizeof(struct crocus_query));
494 if (unlikely(!q))
495 return NULL;
496 q->type = PIPE_QUERY_DRIVER_SPECIFIC;
497 q->index = -1;
498 q->monitor = crocus_create_monitor_object(ice, num_queries, query_types);
499 if (unlikely(!q->monitor)) {
500 free(q);
501 return NULL;
502 }
503
504 return (struct pipe_query *) q;
505 }
506
507 static void
crocus_destroy_query(struct pipe_context * ctx,struct pipe_query * p_query)508 crocus_destroy_query(struct pipe_context *ctx, struct pipe_query *p_query)
509 {
510 struct crocus_query *query = (void *) p_query;
511 struct crocus_screen *screen = (void *) ctx->screen;
512 if (query->monitor) {
513 crocus_destroy_monitor_object(ctx, query->monitor);
514 query->monitor = NULL;
515 } else {
516 crocus_syncobj_reference(screen, &query->syncobj, NULL);
517 screen->base.fence_reference(ctx->screen, &query->fence, NULL);
518 }
519 pipe_resource_reference(&query->query_state_ref.res, NULL);
520 free(query);
521 }
522
523
524 static bool
crocus_begin_query(struct pipe_context * ctx,struct pipe_query * query)525 crocus_begin_query(struct pipe_context *ctx, struct pipe_query *query)
526 {
527 struct crocus_context *ice = (void *) ctx;
528 struct crocus_query *q = (void *) query;
529
530 if (q->monitor)
531 return crocus_begin_monitor(ctx, q->monitor);
532
533 void *ptr = NULL;
534 uint32_t size;
535
536 if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
537 q->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
538 size = sizeof(struct crocus_query_so_overflow);
539 else
540 size = sizeof(struct crocus_query_snapshots);
541
542 u_upload_alloc(ice->query_buffer_uploader, 0,
543 size, util_next_power_of_two(size), &q->query_state_ref.offset,
544 &q->query_state_ref.res, &ptr);
545
546 if (!q->query_state_ref.res)
547 return false;
548 if (!crocus_resource_bo(q->query_state_ref.res))
549 return false;
550
551 q->map = ptr;
552 if (!q->map)
553 return false;
554
555 q->result = 0ull;
556 q->ready = false;
557 WRITE_ONCE(q->map->snapshots_landed, false);
558
559 if (q->type == PIPE_QUERY_PRIMITIVES_GENERATED && q->index == 0) {
560 ice->state.prims_generated_query_active = true;
561 ice->state.dirty |= CROCUS_DIRTY_STREAMOUT | CROCUS_DIRTY_CLIP;
562 }
563
564 #if GFX_VER <= 5
565 if (q->type == PIPE_QUERY_OCCLUSION_COUNTER ||
566 q->type == PIPE_QUERY_OCCLUSION_PREDICATE) {
567 ice->state.stats_wm++;
568 ice->state.dirty |= CROCUS_DIRTY_WM | CROCUS_DIRTY_COLOR_CALC_STATE;
569 }
570 #endif
571 #if GFX_VER >= 6
572 if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
573 q->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
574 write_overflow_values(ice, q, false);
575 else
576 #endif
577 write_value(ice, q,
578 q->query_state_ref.offset +
579 offsetof(struct crocus_query_snapshots, start));
580
581 return true;
582 }
583
584 static bool
crocus_end_query(struct pipe_context * ctx,struct pipe_query * query)585 crocus_end_query(struct pipe_context *ctx, struct pipe_query *query)
586 {
587 struct crocus_context *ice = (void *) ctx;
588 struct crocus_query *q = (void *) query;
589
590 if (q->monitor)
591 return crocus_end_monitor(ctx, q->monitor);
592
593 if (q->type == PIPE_QUERY_GPU_FINISHED) {
594 ctx->flush(ctx, &q->fence, PIPE_FLUSH_DEFERRED);
595 return true;
596 }
597
598 struct crocus_batch *batch = &ice->batches[q->batch_idx];
599
600 if (q->type == PIPE_QUERY_TIMESTAMP) {
601 crocus_begin_query(ctx, query);
602 crocus_batch_reference_signal_syncobj(batch, &q->syncobj);
603 mark_available(ice, q);
604 return true;
605 }
606
607 #if GFX_VER <= 5
608 if (q->type == PIPE_QUERY_OCCLUSION_COUNTER ||
609 q->type == PIPE_QUERY_OCCLUSION_PREDICATE) {
610 ice->state.stats_wm--;
611 ice->state.dirty |= CROCUS_DIRTY_WM | CROCUS_DIRTY_COLOR_CALC_STATE;
612 }
613 #endif
614 if (q->type == PIPE_QUERY_PRIMITIVES_GENERATED && q->index == 0) {
615 ice->state.prims_generated_query_active = false;
616 ice->state.dirty |= CROCUS_DIRTY_STREAMOUT | CROCUS_DIRTY_CLIP;
617 }
618
619 #if GFX_VER >= 6
620 if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
621 q->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
622 write_overflow_values(ice, q, true);
623 else
624 #endif
625 write_value(ice, q,
626 q->query_state_ref.offset +
627 offsetof(struct crocus_query_snapshots, end));
628
629 crocus_batch_reference_signal_syncobj(batch, &q->syncobj);
630 mark_available(ice, q);
631
632 return true;
633 }
634
635 /**
636 * See if the snapshots have landed for a query, and if so, compute the
637 * result and mark it ready. Does not flush (unlike crocus_get_query_result).
638 */
639 static void
crocus_check_query_no_flush(struct crocus_context * ice,struct crocus_query * q)640 crocus_check_query_no_flush(struct crocus_context *ice, struct crocus_query *q)
641 {
642 struct crocus_screen *screen = (void *) ice->ctx.screen;
643 const struct intel_device_info *devinfo = &screen->devinfo;
644
645 if (!q->ready && READ_ONCE(q->map->snapshots_landed)) {
646 calculate_result_on_cpu(devinfo, q);
647 }
648 }
649
650 static bool
crocus_get_query_result(struct pipe_context * ctx,struct pipe_query * query,bool wait,union pipe_query_result * result)651 crocus_get_query_result(struct pipe_context *ctx,
652 struct pipe_query *query,
653 bool wait,
654 union pipe_query_result *result)
655 {
656 struct crocus_context *ice = (void *) ctx;
657 struct crocus_query *q = (void *) query;
658
659 if (q->monitor)
660 return crocus_get_monitor_result(ctx, q->monitor, wait, result->batch);
661
662 struct crocus_screen *screen = (void *) ctx->screen;
663 const struct intel_device_info *devinfo = &screen->devinfo;
664
665 if (unlikely(screen->devinfo.no_hw)) {
666 result->u64 = 0;
667 return true;
668 }
669
670 if (!q->ready) {
671 struct crocus_batch *batch = &ice->batches[q->batch_idx];
672 if (q->syncobj == crocus_batch_get_signal_syncobj(batch))
673 crocus_batch_flush(batch);
674
675 #if GFX_VERx10 >= 75
676 while (!READ_ONCE(q->map->snapshots_landed)) {
677 if (wait)
678 crocus_wait_syncobj(ctx->screen, q->syncobj, INT64_MAX);
679 else
680 return false;
681 }
682 assert(READ_ONCE(q->map->snapshots_landed));
683 #else
684 if (crocus_wait_syncobj(ctx->screen, q->syncobj, wait ? INT64_MAX : 0)) {
685 /* if we've waited and timedout, just set the query to ready to avoid infinite loop */
686 if (wait)
687 q->ready = true;
688 return false;
689 }
690 #endif
691 calculate_result_on_cpu(devinfo, q);
692 }
693
694 assert(q->ready);
695
696 result->u64 = q->result;
697
698 return true;
699 }
700
701 #if GFX_VER >= 7
702 static void
crocus_get_query_result_resource(struct pipe_context * ctx,struct pipe_query * query,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * p_res,unsigned offset)703 crocus_get_query_result_resource(struct pipe_context *ctx,
704 struct pipe_query *query,
705 enum pipe_query_flags flags,
706 enum pipe_query_value_type result_type,
707 int index,
708 struct pipe_resource *p_res,
709 unsigned offset)
710 {
711 struct crocus_context *ice = (void *) ctx;
712 struct crocus_query *q = (void *) query;
713 struct crocus_batch *batch = &ice->batches[q->batch_idx];
714 struct crocus_screen *screen = batch->screen;
715 const struct intel_device_info *devinfo = &batch->screen->devinfo;
716 struct crocus_resource *res = (void *) p_res;
717 struct crocus_bo *query_bo = crocus_resource_bo(q->query_state_ref.res);
718 struct crocus_bo *dst_bo = crocus_resource_bo(p_res);
719 unsigned snapshots_landed_offset =
720 offsetof(struct crocus_query_snapshots, snapshots_landed);
721
722 res->bind_history |= PIPE_BIND_QUERY_BUFFER;
723
724 if (index == -1) {
725 /* They're asking for the availability of the result. If we still
726 * have commands queued up which produce the result, submit them
727 * now so that progress happens. Either way, copy the snapshots
728 * landed field to the destination resource.
729 */
730 if (q->syncobj == crocus_batch_get_signal_syncobj(batch))
731 crocus_batch_flush(batch);
732
733 screen->vtbl.copy_mem_mem(batch, dst_bo, offset,
734 query_bo, snapshots_landed_offset,
735 result_type <= PIPE_QUERY_TYPE_U32 ? 4 : 8);
736 return;
737 }
738
739 if (!q->ready && READ_ONCE(q->map->snapshots_landed)) {
740 /* The final snapshots happen to have landed, so let's just compute
741 * the result on the CPU now...
742 */
743 calculate_result_on_cpu(devinfo, q);
744 }
745
746 if (q->ready) {
747 /* We happen to have the result on the CPU, so just copy it. */
748 if (result_type <= PIPE_QUERY_TYPE_U32) {
749 screen->vtbl.store_data_imm32(batch, dst_bo, offset, q->result);
750 } else {
751 screen->vtbl.store_data_imm64(batch, dst_bo, offset, q->result);
752 }
753
754 /* Make sure the result lands before they use bind the QBO elsewhere
755 * and use the result.
756 */
757 // XXX: Why? i965 doesn't do this.
758 crocus_emit_pipe_control_flush(batch,
759 "query: unknown QBO flushing hack",
760 PIPE_CONTROL_CS_STALL);
761 return;
762 }
763
764 #if GFX_VERx10 >= 75
765 bool predicated = !(flags & PIPE_QUERY_WAIT) && !q->stalled;
766
767 struct mi_builder b;
768 mi_builder_init(&b, &batch->screen->devinfo, batch);
769
770 struct mi_value result = calculate_result_on_gpu(devinfo, &b, q);
771 struct mi_value dst =
772 result_type <= PIPE_QUERY_TYPE_U32 ? mi_mem32(rw_bo(dst_bo, offset))
773 : mi_mem64(rw_bo(dst_bo, offset));
774
775 if (predicated) {
776 mi_store(&b, mi_reg32(MI_PREDICATE_RESULT),
777 mi_mem64(ro_bo(query_bo, snapshots_landed_offset)));
778 mi_store_if(&b, dst, result);
779 } else {
780 mi_store(&b, dst, result);
781 }
782 #endif
783 }
784 #endif
785
786 static void
crocus_set_active_query_state(struct pipe_context * ctx,bool enable)787 crocus_set_active_query_state(struct pipe_context *ctx, bool enable)
788 {
789 struct crocus_context *ice = (void *) ctx;
790
791 if (ice->state.statistics_counters_enabled == enable)
792 return;
793
794 // XXX: most packets aren't paying attention to this yet, because it'd
795 // have to be done dynamically at draw time, which is a pain
796 ice->state.statistics_counters_enabled = enable;
797 ice->state.dirty |= CROCUS_DIRTY_CLIP |
798 CROCUS_DIRTY_RASTER |
799 CROCUS_DIRTY_STREAMOUT |
800 CROCUS_DIRTY_WM;
801 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_GS |
802 CROCUS_STAGE_DIRTY_TCS |
803 CROCUS_STAGE_DIRTY_TES |
804 CROCUS_STAGE_DIRTY_VS;
805 }
806
807 static void
set_predicate_enable(struct crocus_context * ice,bool value)808 set_predicate_enable(struct crocus_context *ice, bool value)
809 {
810 if (value)
811 ice->state.predicate = CROCUS_PREDICATE_STATE_RENDER;
812 else
813 ice->state.predicate = CROCUS_PREDICATE_STATE_DONT_RENDER;
814 }
815
816 #if GFX_VER >= 7
817 static void
set_predicate_for_result(struct crocus_context * ice,struct crocus_query * q,bool inverted)818 set_predicate_for_result(struct crocus_context *ice,
819 struct crocus_query *q,
820 bool inverted)
821 {
822 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER];
823 struct crocus_bo *bo = crocus_resource_bo(q->query_state_ref.res);
824
825 #if GFX_VERx10 < 75
826 /* IVB doesn't have enough MI for this */
827 if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
828 q->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
829 ice->state.predicate = CROCUS_PREDICATE_STATE_STALL_FOR_QUERY;
830 return;
831 }
832 #endif
833
834 /* The CPU doesn't have the query result yet; use hardware predication */
835 ice->state.predicate = CROCUS_PREDICATE_STATE_USE_BIT;
836
837 /* Ensure the memory is coherent for MI_LOAD_REGISTER_* commands. */
838 crocus_emit_pipe_control_flush(batch,
839 "conditional rendering: set predicate",
840 PIPE_CONTROL_FLUSH_ENABLE);
841 q->stalled = true;
842
843 #if GFX_VERx10 < 75
844 struct crocus_screen *screen = batch->screen;
845 screen->vtbl.load_register_mem64(batch, MI_PREDICATE_SRC0, bo,
846 q->query_state_ref.offset + offsetof(struct crocus_query_snapshots, start));
847 screen->vtbl.load_register_mem64(batch, MI_PREDICATE_SRC1, bo,
848 q->query_state_ref.offset + offsetof(struct crocus_query_snapshots, end));
849
850 uint32_t mi_predicate = MI_PREDICATE | MI_PREDICATE_COMBINEOP_SET |
851 MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
852 if (inverted)
853 mi_predicate |= MI_PREDICATE_LOADOP_LOAD;
854 else
855 mi_predicate |= MI_PREDICATE_LOADOP_LOADINV;
856 crocus_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
857 #else
858 struct mi_builder b;
859 mi_builder_init(&b, &batch->screen->devinfo, batch);
860
861 struct mi_value result;
862
863 switch (q->type) {
864 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
865 result = calc_overflow_for_stream(&b, q, q->index);
866 break;
867 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
868 result = calc_overflow_any_stream(&b, q);
869 break;
870 default: {
871 /* PIPE_QUERY_OCCLUSION_* */
872 struct mi_value start =
873 query_mem64(q, offsetof(struct crocus_query_snapshots, start));
874 struct mi_value end =
875 query_mem64(q, offsetof(struct crocus_query_snapshots, end));
876 result = mi_isub(&b, end, start);
877 break;
878 }
879 }
880
881 result = inverted ? mi_z(&b, result) : mi_nz(&b, result);
882 result = mi_iand(&b, result, mi_imm(1));
883
884 /* We immediately set the predicate on the render batch, as all the
885 * counters come from 3D operations. However, we may need to predicate
886 * a compute dispatch, which executes in a different GEM context and has
887 * a different MI_PREDICATE_RESULT register. So, we save the result to
888 * memory and reload it in crocus_launch_grid.
889 */
890 mi_value_ref(&b, result);
891
892 mi_store(&b, mi_reg64(MI_PREDICATE_SRC0), result);
893 mi_store(&b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(0));
894
895 unsigned mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
896 MI_PREDICATE_COMBINEOP_SET |
897 MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
898
899 crocus_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
900 mi_store(&b, query_mem64(q, offsetof(struct crocus_query_snapshots,
901 predicate_result)), result);
902 #endif
903 ice->state.compute_predicate = bo;
904 }
905 #endif
906
907 static void
crocus_render_condition(struct pipe_context * ctx,struct pipe_query * query,bool condition,enum pipe_render_cond_flag mode)908 crocus_render_condition(struct pipe_context *ctx,
909 struct pipe_query *query,
910 bool condition,
911 enum pipe_render_cond_flag mode)
912 {
913 struct crocus_context *ice = (void *) ctx;
914 struct crocus_query *q = (void *) query;
915
916 /* The old condition isn't relevant; we'll update it if necessary */
917 ice->state.compute_predicate = NULL;
918 ice->condition.query = q;
919 ice->condition.condition = condition;
920 ice->condition.mode = mode;
921
922 if (!q) {
923 ice->state.predicate = CROCUS_PREDICATE_STATE_RENDER;
924 return;
925 }
926
927 crocus_check_query_no_flush(ice, q);
928
929 if (q->result || q->ready) {
930 set_predicate_enable(ice, (q->result != 0) ^ condition);
931 } else {
932 if (mode == PIPE_RENDER_COND_NO_WAIT ||
933 mode == PIPE_RENDER_COND_BY_REGION_NO_WAIT) {
934 perf_debug(&ice->dbg, "Conditional rendering demoted from "
935 "\"no wait\" to \"wait\".");
936 }
937 #if GFX_VER >= 7
938 set_predicate_for_result(ice, q, condition);
939 #else
940 ice->state.predicate = CROCUS_PREDICATE_STATE_STALL_FOR_QUERY;
941 #endif
942 }
943 }
944
945 static void
crocus_resolve_conditional_render(struct crocus_context * ice)946 crocus_resolve_conditional_render(struct crocus_context *ice)
947 {
948 struct pipe_context *ctx = (void *) ice;
949 struct crocus_query *q = ice->condition.query;
950 struct pipe_query *query = (void *) q;
951 union pipe_query_result result;
952
953 if (ice->state.predicate != CROCUS_PREDICATE_STATE_USE_BIT)
954 return;
955
956 assert(q);
957
958 crocus_get_query_result(ctx, query, true, &result);
959 set_predicate_enable(ice, (q->result != 0) ^ ice->condition.condition);
960 }
961
962 #if GFX_VER >= 7
963 static void
crocus_emit_compute_predicate(struct crocus_batch * batch)964 crocus_emit_compute_predicate(struct crocus_batch *batch)
965 {
966 struct crocus_context *ice = batch->ice;
967 struct crocus_screen *screen = batch->screen;
968 screen->vtbl.load_register_mem32(batch, MI_PREDICATE_SRC0,
969 ice->state.compute_predicate, 0);
970 screen->vtbl.load_register_imm32(batch, MI_PREDICATE_SRC1, 0);
971 unsigned mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
972 MI_PREDICATE_COMBINEOP_SET |
973 MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
974
975 crocus_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
976 }
977 #endif
978
979 void
genX(crocus_init_screen_query)980 genX(crocus_init_screen_query)(struct crocus_screen *screen)
981 {
982 screen->vtbl.resolve_conditional_render = crocus_resolve_conditional_render;
983 #if GFX_VER >= 7
984 screen->vtbl.emit_compute_predicate = crocus_emit_compute_predicate;
985 #endif
986 }
987
988 void
genX(crocus_init_query)989 genX(crocus_init_query)(struct crocus_context *ice)
990 {
991 struct pipe_context *ctx = &ice->ctx;
992
993 ctx->create_query = crocus_create_query;
994 ctx->create_batch_query = crocus_create_batch_query;
995 ctx->destroy_query = crocus_destroy_query;
996 ctx->begin_query = crocus_begin_query;
997 ctx->end_query = crocus_end_query;
998 ctx->get_query_result = crocus_get_query_result;
999 #if GFX_VER >= 7
1000 ctx->get_query_result_resource = crocus_get_query_result_resource;
1001 #endif
1002 ctx->set_active_query_state = crocus_set_active_query_state;
1003 ctx->render_condition = crocus_render_condition;
1004
1005 }
1006