xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/r600/r600_query.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2010 Jerome Glisse <[email protected]>
3  * Copyright 2014 Marek Olšák <[email protected]>
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "r600_query.h"
8 #include "r600_pipe.h"
9 #include "r600_cs.h"
10 #include "util/u_memory.h"
11 #include "util/u_upload_mgr.h"
12 #include "util/os_time.h"
13 #include "tgsi/tgsi_text.h"
14 
15 #define R600_MAX_STREAMS 4
16 
17 struct r600_hw_query_params {
18 	unsigned start_offset;
19 	unsigned end_offset;
20 	unsigned fence_offset;
21 	unsigned pair_stride;
22 	unsigned pair_count;
23 };
24 
25 /* Queries without buffer handling or suspend/resume. */
26 struct r600_query_sw {
27 	struct r600_query b;
28 
29 	uint64_t begin_result;
30 	uint64_t end_result;
31 
32 	uint64_t begin_time;
33 	uint64_t end_time;
34 
35 	/* Fence for GPU_FINISHED. */
36 	struct pipe_fence_handle *fence;
37 };
38 
r600_query_sw_destroy(struct r600_common_screen * rscreen,struct r600_query * rquery)39 static void r600_query_sw_destroy(struct r600_common_screen *rscreen,
40 				  struct r600_query *rquery)
41 {
42 	struct r600_query_sw *query = (struct r600_query_sw *)rquery;
43 
44 	rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL);
45 	FREE(query);
46 }
47 
winsys_id_from_type(unsigned type)48 static enum radeon_value_id winsys_id_from_type(unsigned type)
49 {
50 	switch (type) {
51 	case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
52 	case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
53 	case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
54 	case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
55 	case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
56 	case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
57 	case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
58 	case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
59 	case R600_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
60 	case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
61 	case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
62 	case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
63 	case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
64 	case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
65 	case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
66 	case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
67 	case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
68 	case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
69 	case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
70 	default: unreachable("query type does not correspond to winsys id");
71 	}
72 }
73 
r600_query_sw_begin(struct r600_common_context * rctx,struct r600_query * rquery)74 static bool r600_query_sw_begin(struct r600_common_context *rctx,
75 				struct r600_query *rquery)
76 {
77 	struct r600_query_sw *query = (struct r600_query_sw *)rquery;
78 	enum radeon_value_id ws_id;
79 
80 	switch(query->b.type) {
81 	case PIPE_QUERY_TIMESTAMP_DISJOINT:
82 	case PIPE_QUERY_GPU_FINISHED:
83 		break;
84 	case R600_QUERY_DRAW_CALLS:
85 		query->begin_result = rctx->num_draw_calls;
86 		break;
87 	case R600_QUERY_DECOMPRESS_CALLS:
88 		query->begin_result = rctx->num_decompress_calls;
89 		break;
90 	case R600_QUERY_MRT_DRAW_CALLS:
91 		query->begin_result = rctx->num_mrt_draw_calls;
92 		break;
93 	case R600_QUERY_PRIM_RESTART_CALLS:
94 		query->begin_result = rctx->num_prim_restart_calls;
95 		break;
96 	case R600_QUERY_SPILL_DRAW_CALLS:
97 		query->begin_result = rctx->num_spill_draw_calls;
98 		break;
99 	case R600_QUERY_COMPUTE_CALLS:
100 		query->begin_result = rctx->num_compute_calls;
101 		break;
102 	case R600_QUERY_SPILL_COMPUTE_CALLS:
103 		query->begin_result = rctx->num_spill_compute_calls;
104 		break;
105 	case R600_QUERY_DMA_CALLS:
106 		query->begin_result = rctx->num_dma_calls;
107 		break;
108 	case R600_QUERY_CP_DMA_CALLS:
109 		query->begin_result = rctx->num_cp_dma_calls;
110 		break;
111 	case R600_QUERY_NUM_VS_FLUSHES:
112 		query->begin_result = rctx->num_vs_flushes;
113 		break;
114 	case R600_QUERY_NUM_PS_FLUSHES:
115 		query->begin_result = rctx->num_ps_flushes;
116 		break;
117 	case R600_QUERY_NUM_CS_FLUSHES:
118 		query->begin_result = rctx->num_cs_flushes;
119 		break;
120 	case R600_QUERY_NUM_CB_CACHE_FLUSHES:
121 		query->begin_result = rctx->num_cb_cache_flushes;
122 		break;
123 	case R600_QUERY_NUM_DB_CACHE_FLUSHES:
124 		query->begin_result = rctx->num_db_cache_flushes;
125 		break;
126 	case R600_QUERY_NUM_RESIDENT_HANDLES:
127 		query->begin_result = rctx->num_resident_handles;
128 		break;
129 	case R600_QUERY_TC_OFFLOADED_SLOTS:
130 		query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
131 		break;
132 	case R600_QUERY_TC_DIRECT_SLOTS:
133 		query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
134 		break;
135 	case R600_QUERY_TC_NUM_SYNCS:
136 		query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
137 		break;
138 	case R600_QUERY_REQUESTED_VRAM:
139 	case R600_QUERY_REQUESTED_GTT:
140 	case R600_QUERY_MAPPED_VRAM:
141 	case R600_QUERY_MAPPED_GTT:
142 	case R600_QUERY_VRAM_USAGE:
143 	case R600_QUERY_VRAM_VIS_USAGE:
144 	case R600_QUERY_GTT_USAGE:
145 	case R600_QUERY_GPU_TEMPERATURE:
146 	case R600_QUERY_CURRENT_GPU_SCLK:
147 	case R600_QUERY_CURRENT_GPU_MCLK:
148 	case R600_QUERY_NUM_MAPPED_BUFFERS:
149 		query->begin_result = 0;
150 		break;
151 	case R600_QUERY_BUFFER_WAIT_TIME:
152 	case R600_QUERY_NUM_GFX_IBS:
153 	case R600_QUERY_NUM_SDMA_IBS:
154 	case R600_QUERY_NUM_BYTES_MOVED:
155 	case R600_QUERY_NUM_EVICTIONS:
156 	case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
157 		enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
158 		query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
159 		break;
160 	}
161 	case R600_QUERY_GFX_BO_LIST_SIZE:
162 		ws_id = winsys_id_from_type(query->b.type);
163 		query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
164 		query->begin_time = rctx->ws->query_value(rctx->ws,
165 							  RADEON_NUM_GFX_IBS);
166 		break;
167 	case R600_QUERY_CS_THREAD_BUSY:
168 		ws_id = winsys_id_from_type(query->b.type);
169 		query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
170 		query->begin_time = os_time_get_nano();
171 		break;
172 	case R600_QUERY_GALLIUM_THREAD_BUSY:
173 		query->begin_result =
174 			rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
175 		query->begin_time = os_time_get_nano();
176 		break;
177 	case R600_QUERY_GPU_LOAD:
178 	case R600_QUERY_GPU_SHADERS_BUSY:
179 	case R600_QUERY_GPU_TA_BUSY:
180 	case R600_QUERY_GPU_GDS_BUSY:
181 	case R600_QUERY_GPU_VGT_BUSY:
182 	case R600_QUERY_GPU_IA_BUSY:
183 	case R600_QUERY_GPU_SX_BUSY:
184 	case R600_QUERY_GPU_WD_BUSY:
185 	case R600_QUERY_GPU_BCI_BUSY:
186 	case R600_QUERY_GPU_SC_BUSY:
187 	case R600_QUERY_GPU_PA_BUSY:
188 	case R600_QUERY_GPU_DB_BUSY:
189 	case R600_QUERY_GPU_CP_BUSY:
190 	case R600_QUERY_GPU_CB_BUSY:
191 	case R600_QUERY_GPU_SDMA_BUSY:
192 	case R600_QUERY_GPU_PFP_BUSY:
193 	case R600_QUERY_GPU_MEQ_BUSY:
194 	case R600_QUERY_GPU_ME_BUSY:
195 	case R600_QUERY_GPU_SURF_SYNC_BUSY:
196 	case R600_QUERY_GPU_CP_DMA_BUSY:
197 	case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
198 		query->begin_result = r600_begin_counter(rctx->screen,
199 							 query->b.type);
200 		break;
201 	case R600_QUERY_NUM_COMPILATIONS:
202 		query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
203 		break;
204 	case R600_QUERY_NUM_SHADERS_CREATED:
205 		query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
206 		break;
207 	case R600_QUERY_NUM_SHADER_CACHE_HITS:
208 		query->begin_result =
209 			p_atomic_read(&rctx->screen->num_shader_cache_hits);
210 		break;
211 	case R600_QUERY_GPIN_ASIC_ID:
212 	case R600_QUERY_GPIN_NUM_SIMD:
213 	case R600_QUERY_GPIN_NUM_RB:
214 	case R600_QUERY_GPIN_NUM_SPI:
215 	case R600_QUERY_GPIN_NUM_SE:
216 		break;
217 	default:
218 		unreachable("r600_query_sw_begin: bad query type");
219 	}
220 
221 	return true;
222 }
223 
r600_query_sw_end(struct r600_common_context * rctx,struct r600_query * rquery)224 static bool r600_query_sw_end(struct r600_common_context *rctx,
225 			      struct r600_query *rquery)
226 {
227 	struct r600_query_sw *query = (struct r600_query_sw *)rquery;
228 	enum radeon_value_id ws_id;
229 
230 	switch(query->b.type) {
231 	case PIPE_QUERY_TIMESTAMP_DISJOINT:
232 		break;
233 	case PIPE_QUERY_GPU_FINISHED:
234 		rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
235 		break;
236 	case R600_QUERY_DRAW_CALLS:
237 		query->end_result = rctx->num_draw_calls;
238 		break;
239 	case R600_QUERY_DECOMPRESS_CALLS:
240 		query->end_result = rctx->num_decompress_calls;
241 		break;
242 	case R600_QUERY_MRT_DRAW_CALLS:
243 		query->end_result = rctx->num_mrt_draw_calls;
244 		break;
245 	case R600_QUERY_PRIM_RESTART_CALLS:
246 		query->end_result = rctx->num_prim_restart_calls;
247 		break;
248 	case R600_QUERY_SPILL_DRAW_CALLS:
249 		query->end_result = rctx->num_spill_draw_calls;
250 		break;
251 	case R600_QUERY_COMPUTE_CALLS:
252 		query->end_result = rctx->num_compute_calls;
253 		break;
254 	case R600_QUERY_SPILL_COMPUTE_CALLS:
255 		query->end_result = rctx->num_spill_compute_calls;
256 		break;
257 	case R600_QUERY_DMA_CALLS:
258 		query->end_result = rctx->num_dma_calls;
259 		break;
260 	case R600_QUERY_CP_DMA_CALLS:
261 		query->end_result = rctx->num_cp_dma_calls;
262 		break;
263 	case R600_QUERY_NUM_VS_FLUSHES:
264 		query->end_result = rctx->num_vs_flushes;
265 		break;
266 	case R600_QUERY_NUM_PS_FLUSHES:
267 		query->end_result = rctx->num_ps_flushes;
268 		break;
269 	case R600_QUERY_NUM_CS_FLUSHES:
270 		query->end_result = rctx->num_cs_flushes;
271 		break;
272 	case R600_QUERY_NUM_CB_CACHE_FLUSHES:
273 		query->end_result = rctx->num_cb_cache_flushes;
274 		break;
275 	case R600_QUERY_NUM_DB_CACHE_FLUSHES:
276 		query->end_result = rctx->num_db_cache_flushes;
277 		break;
278 	case R600_QUERY_NUM_RESIDENT_HANDLES:
279 		query->end_result = rctx->num_resident_handles;
280 		break;
281 	case R600_QUERY_TC_OFFLOADED_SLOTS:
282 		query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
283 		break;
284 	case R600_QUERY_TC_DIRECT_SLOTS:
285 		query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
286 		break;
287 	case R600_QUERY_TC_NUM_SYNCS:
288 		query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
289 		break;
290 	case R600_QUERY_REQUESTED_VRAM:
291 	case R600_QUERY_REQUESTED_GTT:
292 	case R600_QUERY_MAPPED_VRAM:
293 	case R600_QUERY_MAPPED_GTT:
294 	case R600_QUERY_VRAM_USAGE:
295 	case R600_QUERY_VRAM_VIS_USAGE:
296 	case R600_QUERY_GTT_USAGE:
297 	case R600_QUERY_GPU_TEMPERATURE:
298 	case R600_QUERY_CURRENT_GPU_SCLK:
299 	case R600_QUERY_CURRENT_GPU_MCLK:
300 	case R600_QUERY_BUFFER_WAIT_TIME:
301 	case R600_QUERY_NUM_MAPPED_BUFFERS:
302 	case R600_QUERY_NUM_GFX_IBS:
303 	case R600_QUERY_NUM_SDMA_IBS:
304 	case R600_QUERY_NUM_BYTES_MOVED:
305 	case R600_QUERY_NUM_EVICTIONS:
306 	case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
307 		enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
308 		query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
309 		break;
310 	}
311 	case R600_QUERY_GFX_BO_LIST_SIZE:
312 		ws_id = winsys_id_from_type(query->b.type);
313 		query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
314 		query->end_time = rctx->ws->query_value(rctx->ws,
315 							RADEON_NUM_GFX_IBS);
316 		break;
317 	case R600_QUERY_CS_THREAD_BUSY:
318 		ws_id = winsys_id_from_type(query->b.type);
319 		query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
320 		query->end_time = os_time_get_nano();
321 		break;
322 	case R600_QUERY_GALLIUM_THREAD_BUSY:
323 		query->end_result =
324 			rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
325 		query->end_time = os_time_get_nano();
326 		break;
327 	case R600_QUERY_GPU_LOAD:
328 	case R600_QUERY_GPU_SHADERS_BUSY:
329 	case R600_QUERY_GPU_TA_BUSY:
330 	case R600_QUERY_GPU_GDS_BUSY:
331 	case R600_QUERY_GPU_VGT_BUSY:
332 	case R600_QUERY_GPU_IA_BUSY:
333 	case R600_QUERY_GPU_SX_BUSY:
334 	case R600_QUERY_GPU_WD_BUSY:
335 	case R600_QUERY_GPU_BCI_BUSY:
336 	case R600_QUERY_GPU_SC_BUSY:
337 	case R600_QUERY_GPU_PA_BUSY:
338 	case R600_QUERY_GPU_DB_BUSY:
339 	case R600_QUERY_GPU_CP_BUSY:
340 	case R600_QUERY_GPU_CB_BUSY:
341 	case R600_QUERY_GPU_SDMA_BUSY:
342 	case R600_QUERY_GPU_PFP_BUSY:
343 	case R600_QUERY_GPU_MEQ_BUSY:
344 	case R600_QUERY_GPU_ME_BUSY:
345 	case R600_QUERY_GPU_SURF_SYNC_BUSY:
346 	case R600_QUERY_GPU_CP_DMA_BUSY:
347 	case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
348 		query->end_result = r600_end_counter(rctx->screen,
349 						     query->b.type,
350 						     query->begin_result);
351 		query->begin_result = 0;
352 		break;
353 	case R600_QUERY_NUM_COMPILATIONS:
354 		query->end_result = p_atomic_read(&rctx->screen->num_compilations);
355 		break;
356 	case R600_QUERY_NUM_SHADERS_CREATED:
357 		query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
358 		break;
359 	case R600_QUERY_NUM_SHADER_CACHE_HITS:
360 		query->end_result =
361 			p_atomic_read(&rctx->screen->num_shader_cache_hits);
362 		break;
363 	case R600_QUERY_GPIN_ASIC_ID:
364 	case R600_QUERY_GPIN_NUM_SIMD:
365 	case R600_QUERY_GPIN_NUM_RB:
366 	case R600_QUERY_GPIN_NUM_SPI:
367 	case R600_QUERY_GPIN_NUM_SE:
368 		break;
369 	default:
370 		unreachable("r600_query_sw_end: bad query type");
371 	}
372 
373 	return true;
374 }
375 
r600_query_sw_get_result(struct r600_common_context * rctx,struct r600_query * rquery,bool wait,union pipe_query_result * result)376 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
377 				     struct r600_query *rquery,
378 				     bool wait,
379 				     union pipe_query_result *result)
380 {
381 	struct r600_query_sw *query = (struct r600_query_sw *)rquery;
382 
383 	switch (query->b.type) {
384 	case PIPE_QUERY_TIMESTAMP_DISJOINT:
385 		/* Convert from cycles per millisecond to cycles per second (Hz). */
386 		result->timestamp_disjoint.frequency =
387 			(uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
388 		result->timestamp_disjoint.disjoint = false;
389 		return true;
390 	case PIPE_QUERY_GPU_FINISHED: {
391 		struct pipe_screen *screen = rctx->b.screen;
392 		struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b;
393 
394 		result->b = screen->fence_finish(screen, ctx, query->fence,
395 						 wait ? OS_TIMEOUT_INFINITE : 0);
396 		return result->b;
397 	}
398 
399 	case R600_QUERY_GFX_BO_LIST_SIZE:
400 		result->u64 = (query->end_result - query->begin_result) /
401 			      (query->end_time - query->begin_time);
402 		return true;
403 	case R600_QUERY_CS_THREAD_BUSY:
404 	case R600_QUERY_GALLIUM_THREAD_BUSY:
405 		result->u64 = (query->end_result - query->begin_result) * 100 /
406 			      (query->end_time - query->begin_time);
407 		return true;
408 	case R600_QUERY_GPIN_ASIC_ID:
409 		result->u32 = 0;
410 		return true;
411 	case R600_QUERY_GPIN_NUM_SIMD:
412 		result->u32 = rctx->screen->info.num_cu;
413 		return true;
414 	case R600_QUERY_GPIN_NUM_RB:
415 		result->u32 = rctx->screen->info.max_render_backends;
416 		return true;
417 	case R600_QUERY_GPIN_NUM_SPI:
418 		result->u32 = 1; /* all supported chips have one SPI per SE */
419 		return true;
420 	case R600_QUERY_GPIN_NUM_SE:
421 		result->u32 = rctx->screen->info.max_se;
422 		return true;
423 	}
424 
425 	result->u64 = query->end_result - query->begin_result;
426 
427 	switch (query->b.type) {
428 	case R600_QUERY_BUFFER_WAIT_TIME:
429 	case R600_QUERY_GPU_TEMPERATURE:
430 		result->u64 /= 1000;
431 		break;
432 	case R600_QUERY_CURRENT_GPU_SCLK:
433 	case R600_QUERY_CURRENT_GPU_MCLK:
434 		result->u64 *= 1000000;
435 		break;
436 	}
437 
438 	return true;
439 }
440 
441 
442 static struct r600_query_ops sw_query_ops = {
443 	.destroy = r600_query_sw_destroy,
444 	.begin = r600_query_sw_begin,
445 	.end = r600_query_sw_end,
446 	.get_result = r600_query_sw_get_result,
447 	.get_result_resource = NULL
448 };
449 
r600_query_sw_create(unsigned query_type)450 static struct pipe_query *r600_query_sw_create(unsigned query_type)
451 {
452 	struct r600_query_sw *query;
453 
454 	query = CALLOC_STRUCT(r600_query_sw);
455 	if (!query)
456 		return NULL;
457 
458 	query->b.type = query_type;
459 	query->b.ops = &sw_query_ops;
460 
461 	return (struct pipe_query *)query;
462 }
463 
r600_query_hw_destroy(struct r600_common_screen * rscreen,struct r600_query * rquery)464 void r600_query_hw_destroy(struct r600_common_screen *rscreen,
465 			   struct r600_query *rquery)
466 {
467 	struct r600_query_hw *query = (struct r600_query_hw *)rquery;
468 	struct r600_query_buffer *prev = query->buffer.previous;
469 
470 	/* Release all query buffers. */
471 	while (prev) {
472 		struct r600_query_buffer *qbuf = prev;
473 		prev = prev->previous;
474 		r600_resource_reference(&qbuf->buf, NULL);
475 		FREE(qbuf);
476 	}
477 
478 	r600_resource_reference(&query->buffer.buf, NULL);
479 	FREE(rquery);
480 }
481 
r600_new_query_buffer(struct r600_common_screen * rscreen,struct r600_query_hw * query)482 static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
483 						   struct r600_query_hw *query)
484 {
485 	unsigned buf_size = MAX2(query->result_size,
486 				 rscreen->info.min_alloc_size);
487 
488 	/* Queries are normally read by the CPU after
489 	 * being written by the gpu, hence staging is probably a good
490 	 * usage pattern.
491 	 */
492 	struct r600_resource *buf = (struct r600_resource*)
493 		pipe_buffer_create(&rscreen->b, 0,
494 				   PIPE_USAGE_STAGING, buf_size);
495 	if (!buf)
496 		return NULL;
497 
498 	if (!query->ops->prepare_buffer(rscreen, query, buf)) {
499 		r600_resource_reference(&buf, NULL);
500 		return NULL;
501 	}
502 
503 	return buf;
504 }
505 
r600_query_hw_prepare_buffer(struct r600_common_screen * rscreen,struct r600_query_hw * query,struct r600_resource * buffer)506 static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
507 					 struct r600_query_hw *query,
508 					 struct r600_resource *buffer)
509 {
510 	/* Callers ensure that the buffer is currently unused by the GPU. */
511 	uint32_t *results = rscreen->ws->buffer_map(rscreen->ws, buffer->buf, NULL,
512 						   PIPE_MAP_WRITE |
513 						   PIPE_MAP_UNSYNCHRONIZED);
514 	if (!results)
515 		return false;
516 
517 	memset(results, 0, buffer->b.b.width0);
518 
519 	if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
520 	    query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
521 	    query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
522 		unsigned max_rbs = rscreen->info.max_render_backends;
523 		unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
524 		unsigned num_results;
525 		unsigned i, j;
526 
527 		/* Set top bits for unused backends. */
528 		num_results = buffer->b.b.width0 / query->result_size;
529 		for (j = 0; j < num_results; j++) {
530 			for (i = 0; i < max_rbs; i++) {
531 				if (!(enabled_rb_mask & (1<<i))) {
532 					results[(i * 4)+1] = 0x80000000;
533 					results[(i * 4)+3] = 0x80000000;
534 				}
535 			}
536 			results += 4 * max_rbs;
537 		}
538 	}
539 
540 	return true;
541 }
542 
543 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
544                                               struct r600_query *rquery,
545                                               enum pipe_query_flags flags,
546                                               enum pipe_query_value_type result_type,
547                                               int index,
548                                               struct pipe_resource *resource,
549                                               unsigned offset);
550 
551 static struct r600_query_ops query_hw_ops = {
552 	.destroy = r600_query_hw_destroy,
553 	.begin = r600_query_hw_begin,
554 	.end = r600_query_hw_end,
555 	.get_result = r600_query_hw_get_result,
556 	.get_result_resource = r600_query_hw_get_result_resource,
557 };
558 
559 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
560 					struct r600_query_hw *query,
561 					struct r600_resource *buffer,
562 					uint64_t va);
563 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
564 				       struct r600_query_hw *query,
565 				       struct r600_resource *buffer,
566 				       uint64_t va);
567 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
568 				     struct r600_query_hw *, void *buffer,
569 				     union pipe_query_result *result);
570 static void r600_query_hw_clear_result(struct r600_query_hw *,
571 				       union pipe_query_result *);
572 
573 static struct r600_query_hw_ops query_hw_default_hw_ops = {
574 	.prepare_buffer = r600_query_hw_prepare_buffer,
575 	.emit_start = r600_query_hw_do_emit_start,
576 	.emit_stop = r600_query_hw_do_emit_stop,
577 	.clear_result = r600_query_hw_clear_result,
578 	.add_result = r600_query_hw_add_result,
579 };
580 
r600_query_hw_init(struct r600_common_screen * rscreen,struct r600_query_hw * query)581 bool r600_query_hw_init(struct r600_common_screen *rscreen,
582 			struct r600_query_hw *query)
583 {
584 	query->buffer.buf = r600_new_query_buffer(rscreen, query);
585 	if (!query->buffer.buf)
586 		return false;
587 
588 	return true;
589 }
590 
r600_query_hw_create(struct r600_common_screen * rscreen,unsigned query_type,unsigned index)591 static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
592 					       unsigned query_type,
593 					       unsigned index)
594 {
595 	struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
596 	if (!query)
597 		return NULL;
598 
599 	query->b.type = query_type;
600 	query->b.ops = &query_hw_ops;
601 	query->ops = &query_hw_default_hw_ops;
602 
603 	switch (query_type) {
604 	case PIPE_QUERY_OCCLUSION_COUNTER:
605 	case PIPE_QUERY_OCCLUSION_PREDICATE:
606   	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
607 		query->result_size = 16 * rscreen->info.max_render_backends;
608 		query->result_size += 16; /* for the fence + alignment */
609 		query->num_cs_dw_begin = 6;
610 		query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
611 		break;
612 	case PIPE_QUERY_TIME_ELAPSED:
613 		query->result_size = 24;
614 		query->num_cs_dw_begin = 8;
615 		query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
616 		break;
617 	case PIPE_QUERY_TIMESTAMP:
618 		query->result_size = 16;
619 		query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
620 		query->flags = R600_QUERY_HW_FLAG_NO_START;
621 		break;
622 	case PIPE_QUERY_PRIMITIVES_EMITTED:
623 	case PIPE_QUERY_PRIMITIVES_GENERATED:
624 	case PIPE_QUERY_SO_STATISTICS:
625 	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
626 		/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
627 		query->result_size = 32;
628 		query->num_cs_dw_begin = 6;
629 		query->num_cs_dw_end = 6;
630 		query->stream = index;
631 		break;
632 	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
633 		/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
634 		query->result_size = 32 * R600_MAX_STREAMS;
635 		query->num_cs_dw_begin = 6 * R600_MAX_STREAMS;
636 		query->num_cs_dw_end = 6 * R600_MAX_STREAMS;
637 		break;
638 	case PIPE_QUERY_PIPELINE_STATISTICS:
639 		/* 11 values on EG, 8 on R600. */
640 		query->result_size = (rscreen->gfx_level >= EVERGREEN ? 11 : 8) * 16;
641 		query->result_size += 8; /* for the fence + alignment */
642 		query->num_cs_dw_begin = 6;
643 		query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
644 		break;
645 	default:
646 		assert(0);
647 		FREE(query);
648 		return NULL;
649 	}
650 
651 	if (!r600_query_hw_init(rscreen, query)) {
652 		FREE(query);
653 		return NULL;
654 	}
655 
656 	return (struct pipe_query *)query;
657 }
658 
r600_update_occlusion_query_state(struct r600_common_context * rctx,unsigned type,int diff)659 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
660 					      unsigned type, int diff)
661 {
662 	if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
663 	    type == PIPE_QUERY_OCCLUSION_PREDICATE ||
664 	    type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
665 		bool old_enable = rctx->num_occlusion_queries != 0;
666 		bool old_perfect_enable =
667 			rctx->num_perfect_occlusion_queries != 0;
668 		bool enable, perfect_enable;
669 
670 		rctx->num_occlusion_queries += diff;
671 		assert(rctx->num_occlusion_queries >= 0);
672 
673 		if (type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
674 			rctx->num_perfect_occlusion_queries += diff;
675 			assert(rctx->num_perfect_occlusion_queries >= 0);
676 		}
677 
678 		enable = rctx->num_occlusion_queries != 0;
679 		perfect_enable = rctx->num_perfect_occlusion_queries != 0;
680 
681 		if (enable != old_enable || perfect_enable != old_perfect_enable) {
682 			struct r600_context *ctx = (struct r600_context*)rctx;
683 			r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
684 		}
685 	}
686 }
687 
event_type_for_stream(unsigned stream)688 static unsigned event_type_for_stream(unsigned stream)
689 {
690 	switch (stream) {
691 	default:
692 	case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
693 	case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
694 	case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
695 	case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
696 	}
697 }
698 
emit_sample_streamout(struct radeon_cmdbuf * cs,uint64_t va,unsigned stream)699 static void emit_sample_streamout(struct radeon_cmdbuf *cs, uint64_t va,
700 				  unsigned stream)
701 {
702 	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
703 	radeon_emit(cs, EVENT_TYPE(event_type_for_stream(stream)) | EVENT_INDEX(3));
704 	radeon_emit(cs, va);
705 	radeon_emit(cs, va >> 32);
706 }
707 
r600_query_hw_do_emit_start(struct r600_common_context * ctx,struct r600_query_hw * query,struct r600_resource * buffer,uint64_t va)708 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
709 					struct r600_query_hw *query,
710 					struct r600_resource *buffer,
711 					uint64_t va)
712 {
713 	struct radeon_cmdbuf *cs = &ctx->gfx.cs;
714 
715 	switch (query->b.type) {
716 	case PIPE_QUERY_OCCLUSION_COUNTER:
717 	case PIPE_QUERY_OCCLUSION_PREDICATE:
718   	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
719 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
720 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
721 		radeon_emit(cs, va);
722 		radeon_emit(cs, va >> 32);
723 		break;
724 	case PIPE_QUERY_PRIMITIVES_EMITTED:
725 	case PIPE_QUERY_PRIMITIVES_GENERATED:
726 	case PIPE_QUERY_SO_STATISTICS:
727 	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
728 		emit_sample_streamout(cs, va, query->stream);
729 		break;
730 	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
731 		for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
732 			emit_sample_streamout(cs, va + 32 * stream, stream);
733 		break;
734 	case PIPE_QUERY_TIME_ELAPSED:
735 		/* Write the timestamp after the last draw is done.
736 		 * (bottom-of-pipe)
737 		 */
738 		r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
739 					 0, EOP_DATA_SEL_TIMESTAMP,
740 					 NULL, va, 0, query->b.type);
741 		break;
742 	case PIPE_QUERY_PIPELINE_STATISTICS:
743 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
744 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
745 		radeon_emit(cs, va);
746 		radeon_emit(cs, va >> 32);
747 		break;
748 	default:
749 		assert(0);
750 	}
751 	r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE |
752 			RADEON_PRIO_QUERY);
753 }
754 
r600_query_hw_emit_start(struct r600_common_context * ctx,struct r600_query_hw * query)755 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
756 				     struct r600_query_hw *query)
757 {
758 	uint64_t va;
759 
760 	if (!query->buffer.buf)
761 		return; // previous buffer allocation failure
762 
763 	r600_update_occlusion_query_state(ctx, query->b.type, 1);
764 	r600_update_prims_generated_query_state(ctx, query->b.type, 1);
765 
766 	ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
767 			       true);
768 
769 	/* Get a new query buffer if needed. */
770 	if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
771 		struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
772 		*qbuf = query->buffer;
773 		query->buffer.results_end = 0;
774 		query->buffer.previous = qbuf;
775 		query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
776 		if (!query->buffer.buf)
777 			return;
778 	}
779 
780 	/* emit begin query */
781 	va = query->buffer.buf->gpu_address + query->buffer.results_end;
782 
783 	query->ops->emit_start(ctx, query, query->buffer.buf, va);
784 
785 	ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
786 }
787 
r600_query_hw_do_emit_stop(struct r600_common_context * ctx,struct r600_query_hw * query,struct r600_resource * buffer,uint64_t va)788 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
789 				       struct r600_query_hw *query,
790 				       struct r600_resource *buffer,
791 				       uint64_t va)
792 {
793 	struct radeon_cmdbuf *cs = &ctx->gfx.cs;
794 	uint64_t fence_va = 0;
795 
796 	switch (query->b.type) {
797 	case PIPE_QUERY_OCCLUSION_COUNTER:
798 	case PIPE_QUERY_OCCLUSION_PREDICATE:
799 	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
800 		va += 8;
801 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
802 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
803 		radeon_emit(cs, va);
804 		radeon_emit(cs, va >> 32);
805 
806 		fence_va = va + ctx->screen->info.max_render_backends * 16 - 8;
807 		break;
808 	case PIPE_QUERY_PRIMITIVES_EMITTED:
809 	case PIPE_QUERY_PRIMITIVES_GENERATED:
810 	case PIPE_QUERY_SO_STATISTICS:
811 	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
812 		va += 16;
813 		emit_sample_streamout(cs, va, query->stream);
814 		break;
815 	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
816 		va += 16;
817 		for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
818 			emit_sample_streamout(cs, va + 32 * stream, stream);
819 		break;
820 	case PIPE_QUERY_TIME_ELAPSED:
821 		va += 8;
822 		FALLTHROUGH;
823 	case PIPE_QUERY_TIMESTAMP:
824 		r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
825 					 0, EOP_DATA_SEL_TIMESTAMP, NULL, va,
826 					 0, query->b.type);
827 		fence_va = va + 8;
828 		break;
829 	case PIPE_QUERY_PIPELINE_STATISTICS: {
830 		unsigned sample_size = (query->result_size - 8) / 2;
831 
832 		va += sample_size;
833 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
834 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
835 		radeon_emit(cs, va);
836 		radeon_emit(cs, va >> 32);
837 
838 		fence_va = va + sample_size;
839 		break;
840 	}
841 	default:
842 		assert(0);
843 	}
844 	r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE |
845 			RADEON_PRIO_QUERY);
846 
847 	if (fence_va)
848 		r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0,
849 					 EOP_DATA_SEL_VALUE_32BIT,
850 					 query->buffer.buf, fence_va, 0x80000000,
851 					 query->b.type);
852 }
853 
r600_query_hw_emit_stop(struct r600_common_context * ctx,struct r600_query_hw * query)854 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
855 				    struct r600_query_hw *query)
856 {
857 	uint64_t va;
858 
859 	if (!query->buffer.buf)
860 		return; // previous buffer allocation failure
861 
862 	/* The queries which need begin already called this in begin_query. */
863 	if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
864 		ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
865 	}
866 
867 	/* emit end query */
868 	va = query->buffer.buf->gpu_address + query->buffer.results_end;
869 
870 	query->ops->emit_stop(ctx, query, query->buffer.buf, va);
871 
872 	query->buffer.results_end += query->result_size;
873 
874 	if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
875 		ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
876 
877 	r600_update_occlusion_query_state(ctx, query->b.type, -1);
878 	r600_update_prims_generated_query_state(ctx, query->b.type, -1);
879 }
880 
emit_set_predicate(struct r600_common_context * ctx,struct r600_resource * buf,uint64_t va,uint32_t op)881 static void emit_set_predicate(struct r600_common_context *ctx,
882 			       struct r600_resource *buf, uint64_t va,
883 			       uint32_t op)
884 {
885 	struct radeon_cmdbuf *cs = &ctx->gfx.cs;
886 
887 	radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
888 	radeon_emit(cs, va);
889 	radeon_emit(cs, op | ((va >> 32) & 0xFF));
890 	r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_READ |
891 			RADEON_PRIO_QUERY);
892 }
893 
r600_emit_query_predication(struct r600_common_context * ctx,struct r600_atom * atom)894 static void r600_emit_query_predication(struct r600_common_context *ctx,
895 					struct r600_atom *atom)
896 {
897 	struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
898 	struct r600_query_buffer *qbuf;
899 	uint32_t op;
900 	bool flag_wait, invert;
901 
902 	if (!query)
903 		return;
904 
905 	invert = ctx->render_cond_invert;
906 	flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
907 		    ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
908 
909 	switch (query->b.type) {
910 	case PIPE_QUERY_OCCLUSION_COUNTER:
911 	case PIPE_QUERY_OCCLUSION_PREDICATE:
912 	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
913 		op = PRED_OP(PREDICATION_OP_ZPASS);
914 		break;
915 	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
916 	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
917 		op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
918 		invert = !invert;
919 		break;
920 	default:
921 		assert(0);
922 		return;
923 	}
924 
925 	/* if true then invert, see GL_ARB_conditional_render_inverted */
926 	if (invert)
927 		op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visible or overflow */
928 	else
929 		op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */
930 
931 	op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
932 
933 	/* emit predicate packets for all data blocks */
934 	for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
935 		unsigned results_base = 0;
936 		uint64_t va_base = qbuf->buf->gpu_address;
937 
938 		while (results_base < qbuf->results_end) {
939 			uint64_t va = va_base + results_base;
940 
941 			if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
942 				for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
943 					emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op);
944 
945 					/* set CONTINUE bit for all packets except the first */
946 					op |= PREDICATION_CONTINUE;
947 				}
948 			} else {
949 				emit_set_predicate(ctx, qbuf->buf, va, op);
950 				op |= PREDICATION_CONTINUE;
951 			}
952 
953 			results_base += query->result_size;
954 		}
955 	}
956 }
957 
r600_create_query(struct pipe_context * ctx,unsigned query_type,unsigned index)958 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
959 {
960 	struct r600_common_screen *rscreen =
961 		(struct r600_common_screen *)ctx->screen;
962 
963 	if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
964 	    query_type == PIPE_QUERY_GPU_FINISHED ||
965 	    query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
966 		return r600_query_sw_create(query_type);
967 
968 	return r600_query_hw_create(rscreen, query_type, index);
969 }
970 
r600_destroy_query(struct pipe_context * ctx,struct pipe_query * query)971 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
972 {
973 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
974 	struct r600_query *rquery = (struct r600_query *)query;
975 
976 	rquery->ops->destroy(rctx->screen, rquery);
977 }
978 
r600_begin_query(struct pipe_context * ctx,struct pipe_query * query)979 static bool r600_begin_query(struct pipe_context *ctx,
980 			     struct pipe_query *query)
981 {
982 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
983 	struct r600_query *rquery = (struct r600_query *)query;
984 
985 	return rquery->ops->begin(rctx, rquery);
986 }
987 
r600_query_hw_reset_buffers(struct r600_common_context * rctx,struct r600_query_hw * query)988 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
989 				 struct r600_query_hw *query)
990 {
991 	struct r600_query_buffer *prev = query->buffer.previous;
992 
993 	/* Discard the old query buffers. */
994 	while (prev) {
995 		struct r600_query_buffer *qbuf = prev;
996 		prev = prev->previous;
997 		r600_resource_reference(&qbuf->buf, NULL);
998 		FREE(qbuf);
999 	}
1000 
1001 	query->buffer.results_end = 0;
1002 	query->buffer.previous = NULL;
1003 
1004 	/* Obtain a new buffer if the current one can't be mapped without a stall. */
1005 	if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
1006 	    !rctx->ws->buffer_wait(rctx->ws, query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
1007 		r600_resource_reference(&query->buffer.buf, NULL);
1008 		query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
1009 	} else {
1010 		if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
1011 			r600_resource_reference(&query->buffer.buf, NULL);
1012 	}
1013 }
1014 
r600_query_hw_begin(struct r600_common_context * rctx,struct r600_query * rquery)1015 bool r600_query_hw_begin(struct r600_common_context *rctx,
1016 			 struct r600_query *rquery)
1017 {
1018 	struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1019 
1020 	if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
1021 		assert(0);
1022 		return false;
1023 	}
1024 
1025 	if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
1026 		r600_query_hw_reset_buffers(rctx, query);
1027 
1028 	r600_query_hw_emit_start(rctx, query);
1029 	if (!query->buffer.buf)
1030 		return false;
1031 
1032 	list_addtail(&query->list, &rctx->active_queries);
1033 	return true;
1034 }
1035 
r600_end_query(struct pipe_context * ctx,struct pipe_query * query)1036 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
1037 {
1038 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1039 	struct r600_query *rquery = (struct r600_query *)query;
1040 
1041 	return rquery->ops->end(rctx, rquery);
1042 }
1043 
r600_query_hw_end(struct r600_common_context * rctx,struct r600_query * rquery)1044 bool r600_query_hw_end(struct r600_common_context *rctx,
1045 		       struct r600_query *rquery)
1046 {
1047 	struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1048 
1049 	if (query->flags & R600_QUERY_HW_FLAG_NO_START)
1050 		r600_query_hw_reset_buffers(rctx, query);
1051 
1052 	r600_query_hw_emit_stop(rctx, query);
1053 
1054 	if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
1055 		list_delinit(&query->list);
1056 
1057 	if (!query->buffer.buf)
1058 		return false;
1059 
1060 	return true;
1061 }
1062 
r600_get_hw_query_params(struct r600_common_context * rctx,struct r600_query_hw * rquery,int index,struct r600_hw_query_params * params)1063 static void r600_get_hw_query_params(struct r600_common_context *rctx,
1064 				     struct r600_query_hw *rquery, int index,
1065 				     struct r600_hw_query_params *params)
1066 {
1067 	unsigned max_rbs = rctx->screen->info.max_render_backends;
1068 
1069 	params->pair_stride = 0;
1070 	params->pair_count = 1;
1071 
1072 	switch (rquery->b.type) {
1073 	case PIPE_QUERY_OCCLUSION_COUNTER:
1074 	case PIPE_QUERY_OCCLUSION_PREDICATE:
1075 	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
1076 		params->start_offset = 0;
1077 		params->end_offset = 8;
1078 		params->fence_offset = max_rbs * 16;
1079 		params->pair_stride = 16;
1080 		params->pair_count = max_rbs;
1081 		break;
1082 	case PIPE_QUERY_TIME_ELAPSED:
1083 		params->start_offset = 0;
1084 		params->end_offset = 8;
1085 		params->fence_offset = 16;
1086 		break;
1087 	case PIPE_QUERY_TIMESTAMP:
1088 		params->start_offset = 0;
1089 		params->end_offset = 0;
1090 		params->fence_offset = 8;
1091 		break;
1092 	case PIPE_QUERY_PRIMITIVES_EMITTED:
1093 		params->start_offset = 8;
1094 		params->end_offset = 24;
1095 		params->fence_offset = params->end_offset + 4;
1096 		break;
1097 	case PIPE_QUERY_PRIMITIVES_GENERATED:
1098 		params->start_offset = 0;
1099 		params->end_offset = 16;
1100 		params->fence_offset = params->end_offset + 4;
1101 		break;
1102 	case PIPE_QUERY_SO_STATISTICS:
1103 		params->start_offset = 8 - index * 8;
1104 		params->end_offset = 24 - index * 8;
1105 		params->fence_offset = params->end_offset + 4;
1106 		break;
1107 	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1108 		params->pair_count = R600_MAX_STREAMS;
1109 		params->pair_stride = 32;
1110 		FALLTHROUGH;
1111 	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1112 		params->start_offset = 0;
1113 		params->end_offset = 16;
1114 
1115 		/* We can reuse the high dword of the last 64-bit value as a
1116 		 * fence: it is initialized as 0, and the high bit is set by
1117 		 * the write of the streamout stats event.
1118 		 */
1119 		params->fence_offset = rquery->result_size - 4;
1120 		break;
1121 	case PIPE_QUERY_PIPELINE_STATISTICS:
1122 	{
1123 		/* Offsets apply to EG+ */
1124 		static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1125 		params->start_offset = offsets[index];
1126 		params->end_offset = 88 + offsets[index];
1127 		params->fence_offset = 2 * 88;
1128 		break;
1129 	}
1130 	default:
1131 		unreachable("r600_get_hw_query_params unsupported");
1132 	}
1133 }
1134 
r600_query_read_result(void * map,unsigned start_index,unsigned end_index,bool test_status_bit)1135 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1136 				       bool test_status_bit)
1137 {
1138 	uint32_t *current_result = (uint32_t*)map;
1139 	uint64_t start, end;
1140 
1141 	start = (uint64_t)current_result[start_index] |
1142 		(uint64_t)current_result[start_index+1] << 32;
1143 	end = (uint64_t)current_result[end_index] |
1144 	      (uint64_t)current_result[end_index+1] << 32;
1145 
1146 	if (!test_status_bit ||
1147 	    ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1148 		return end - start;
1149 	}
1150 	return 0;
1151 }
1152 
r600_query_hw_add_result(struct r600_common_screen * rscreen,struct r600_query_hw * query,void * buffer,union pipe_query_result * result)1153 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
1154 				     struct r600_query_hw *query,
1155 				     void *buffer,
1156 				     union pipe_query_result *result)
1157 {
1158 	unsigned max_rbs = rscreen->info.max_render_backends;
1159 
1160 	switch (query->b.type) {
1161 	case PIPE_QUERY_OCCLUSION_COUNTER: {
1162 		for (unsigned i = 0; i < max_rbs; ++i) {
1163 			unsigned results_base = i * 16;
1164 			result->u64 +=
1165 				r600_query_read_result(buffer + results_base, 0, 2, true);
1166 		}
1167 		break;
1168 	}
1169 	case PIPE_QUERY_OCCLUSION_PREDICATE:
1170 	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
1171 		for (unsigned i = 0; i < max_rbs; ++i) {
1172 			unsigned results_base = i * 16;
1173 			result->b = result->b ||
1174 				r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1175 		}
1176 		break;
1177 	}
1178 	case PIPE_QUERY_TIME_ELAPSED:
1179 		result->u64 += r600_query_read_result(buffer, 0, 2, false);
1180 		break;
1181 	case PIPE_QUERY_TIMESTAMP:
1182 		result->u64 = *(uint64_t*)buffer;
1183 		break;
1184 	case PIPE_QUERY_PRIMITIVES_EMITTED:
1185 		/* SAMPLE_STREAMOUTSTATS stores this structure:
1186 		 * {
1187 		 *    u64 NumPrimitivesWritten;
1188 		 *    u64 PrimitiveStorageNeeded;
1189 		 * }
1190 		 * We only need NumPrimitivesWritten here. */
1191 		result->u64 += r600_query_read_result(buffer, 2, 6, true);
1192 		break;
1193 	case PIPE_QUERY_PRIMITIVES_GENERATED:
1194 		/* Here we read PrimitiveStorageNeeded. */
1195 		result->u64 += r600_query_read_result(buffer, 0, 4, true);
1196 		break;
1197 	case PIPE_QUERY_SO_STATISTICS:
1198 		result->so_statistics.num_primitives_written +=
1199 			r600_query_read_result(buffer, 2, 6, true);
1200 		result->so_statistics.primitives_storage_needed +=
1201 			r600_query_read_result(buffer, 0, 4, true);
1202 		break;
1203 	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1204 		result->b = result->b ||
1205 			r600_query_read_result(buffer, 2, 6, true) !=
1206 			r600_query_read_result(buffer, 0, 4, true);
1207 		break;
1208 	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1209 		for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
1210 			result->b = result->b ||
1211 				r600_query_read_result(buffer, 2, 6, true) !=
1212 				r600_query_read_result(buffer, 0, 4, true);
1213 			buffer = (char *)buffer + 32;
1214 		}
1215 		break;
1216 	case PIPE_QUERY_PIPELINE_STATISTICS:
1217 		if (rscreen->gfx_level >= EVERGREEN) {
1218 			result->pipeline_statistics.ps_invocations +=
1219 				r600_query_read_result(buffer, 0, 22, false);
1220 			result->pipeline_statistics.c_primitives +=
1221 				r600_query_read_result(buffer, 2, 24, false);
1222 			result->pipeline_statistics.c_invocations +=
1223 				r600_query_read_result(buffer, 4, 26, false);
1224 			result->pipeline_statistics.vs_invocations +=
1225 				r600_query_read_result(buffer, 6, 28, false);
1226 			result->pipeline_statistics.gs_invocations +=
1227 				r600_query_read_result(buffer, 8, 30, false);
1228 			result->pipeline_statistics.gs_primitives +=
1229 				r600_query_read_result(buffer, 10, 32, false);
1230 			result->pipeline_statistics.ia_primitives +=
1231 				r600_query_read_result(buffer, 12, 34, false);
1232 			result->pipeline_statistics.ia_vertices +=
1233 				r600_query_read_result(buffer, 14, 36, false);
1234 			result->pipeline_statistics.hs_invocations +=
1235 				r600_query_read_result(buffer, 16, 38, false);
1236 			result->pipeline_statistics.ds_invocations +=
1237 				r600_query_read_result(buffer, 18, 40, false);
1238 			result->pipeline_statistics.cs_invocations +=
1239 				r600_query_read_result(buffer, 20, 42, false);
1240 		} else {
1241 			result->pipeline_statistics.ps_invocations +=
1242 				r600_query_read_result(buffer, 0, 16, false);
1243 			result->pipeline_statistics.c_primitives +=
1244 				r600_query_read_result(buffer, 2, 18, false);
1245 			result->pipeline_statistics.c_invocations +=
1246 				r600_query_read_result(buffer, 4, 20, false);
1247 			result->pipeline_statistics.vs_invocations +=
1248 				r600_query_read_result(buffer, 6, 22, false);
1249 			result->pipeline_statistics.gs_invocations +=
1250 				r600_query_read_result(buffer, 8, 24, false);
1251 			result->pipeline_statistics.gs_primitives +=
1252 				r600_query_read_result(buffer, 10, 26, false);
1253 			result->pipeline_statistics.ia_primitives +=
1254 				r600_query_read_result(buffer, 12, 28, false);
1255 			result->pipeline_statistics.ia_vertices +=
1256 				r600_query_read_result(buffer, 14, 30, false);
1257 		}
1258 #if 0 /* for testing */
1259 		printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1260 		       "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1261 		       "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1262 		       result->pipeline_statistics.ia_vertices,
1263 		       result->pipeline_statistics.ia_primitives,
1264 		       result->pipeline_statistics.vs_invocations,
1265 		       result->pipeline_statistics.hs_invocations,
1266 		       result->pipeline_statistics.ds_invocations,
1267 		       result->pipeline_statistics.gs_invocations,
1268 		       result->pipeline_statistics.gs_primitives,
1269 		       result->pipeline_statistics.c_invocations,
1270 		       result->pipeline_statistics.c_primitives,
1271 		       result->pipeline_statistics.ps_invocations,
1272 		       result->pipeline_statistics.cs_invocations);
1273 #endif
1274 		break;
1275 	default:
1276 		assert(0);
1277 	}
1278 }
1279 
r600_get_query_result(struct pipe_context * ctx,struct pipe_query * query,bool wait,union pipe_query_result * result)1280 static bool r600_get_query_result(struct pipe_context *ctx,
1281 				  struct pipe_query *query, bool wait,
1282 				  union pipe_query_result *result)
1283 {
1284 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1285 	struct r600_query *rquery = (struct r600_query *)query;
1286 
1287 	return rquery->ops->get_result(rctx, rquery, wait, result);
1288 }
1289 
r600_get_query_result_resource(struct pipe_context * ctx,struct pipe_query * query,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned offset)1290 static void r600_get_query_result_resource(struct pipe_context *ctx,
1291                                            struct pipe_query *query,
1292                                            enum pipe_query_flags flags,
1293                                            enum pipe_query_value_type result_type,
1294                                            int index,
1295                                            struct pipe_resource *resource,
1296                                            unsigned offset)
1297 {
1298 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1299 	struct r600_query *rquery = (struct r600_query *)query;
1300 
1301 	rquery->ops->get_result_resource(rctx, rquery, flags, result_type, index,
1302 	                                 resource, offset);
1303 }
1304 
r600_query_hw_clear_result(struct r600_query_hw * query,union pipe_query_result * result)1305 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1306 				       union pipe_query_result *result)
1307 {
1308 	util_query_clear_result(result, query->b.type);
1309 }
1310 
r600_query_hw_get_result(struct r600_common_context * rctx,struct r600_query * rquery,bool wait,union pipe_query_result * result)1311 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1312 			      struct r600_query *rquery,
1313 			      bool wait, union pipe_query_result *result)
1314 {
1315 	struct r600_common_screen *rscreen = rctx->screen;
1316 	struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1317 	struct r600_query_buffer *qbuf;
1318 
1319 	query->ops->clear_result(query, result);
1320 
1321 	for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1322 		unsigned usage = PIPE_MAP_READ |
1323 				 (wait ? 0 : PIPE_MAP_DONTBLOCK);
1324 		unsigned results_base = 0;
1325 		void *map;
1326 
1327 		if (rquery->b.flushed)
1328 			map = rctx->ws->buffer_map(rctx->ws, qbuf->buf->buf, NULL, usage);
1329 		else
1330 			map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage);
1331 
1332 		if (!map)
1333 			return false;
1334 
1335 		while (results_base != qbuf->results_end) {
1336 			query->ops->add_result(rscreen, query, map + results_base,
1337 					       result);
1338 			results_base += query->result_size;
1339 		}
1340 	}
1341 
1342 	/* Convert the time to expected units. */
1343 	if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1344 	    rquery->type == PIPE_QUERY_TIMESTAMP) {
1345 		result->u64 = (1000000 * result->u64) / rscreen->info.clock_crystal_freq;
1346 	}
1347 	return true;
1348 }
1349 
1350 /* Create the compute shader that is used to collect the results.
1351  *
1352  * One compute grid with a single thread is launched for every query result
1353  * buffer. The thread (optionally) reads a previous summary buffer, then
1354  * accumulates data from the query result buffer, and writes the result either
1355  * to a summary buffer to be consumed by the next grid invocation or to the
1356  * user-supplied buffer.
1357  *
1358  * Data layout:
1359  *
1360  * CONST
1361  *  0.x = end_offset
1362  *  0.y = result_stride
1363  *  0.z = result_count
1364  *  0.w = bit field:
1365  *          1: read previously accumulated values
1366  *          2: write accumulated values for chaining
1367  *          4: write result available
1368  *          8: convert result to boolean (0/1)
1369  *         16: only read one dword and use that as result
1370  *         32: apply timestamp conversion
1371  *         64: store full 64 bits result
1372  *        128: store signed 32 bits result
1373  *        256: SO_OVERFLOW mode: take the difference of two successive half-pairs
1374  *  1.x = fence_offset
1375  *  1.y = pair_stride
1376  *  1.z = pair_count
1377  *  1.w = result_offset
1378  *  2.x = buffer0 offset
1379  *
1380  * BUFFER[0] = query result buffer
1381  * BUFFER[1] = previous summary buffer
1382  * BUFFER[2] = next summary buffer or user-supplied buffer
1383  */
r600_create_query_result_shader(struct r600_common_context * rctx)1384 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1385 {
1386 	/* TEMP[0].xy = accumulated result so far
1387 	 * TEMP[0].z = result not available
1388 	 *
1389 	 * TEMP[1].x = current result index
1390 	 * TEMP[1].y = current pair index
1391 	 */
1392 	static const char text_tmpl[] =
1393 		"COMP\n"
1394 		"PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1395 		"PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1396 		"PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1397 		"DCL BUFFER[0]\n"
1398 		"DCL BUFFER[1]\n"
1399 		"DCL BUFFER[2]\n"
1400 		"DCL CONST[0][0..2]\n"
1401 		"DCL TEMP[0..5]\n"
1402 		"IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1403 		"IMM[1] UINT32 {1, 2, 4, 8}\n"
1404 		"IMM[2] UINT32 {16, 32, 64, 128}\n"
1405 		"IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1406 		"IMM[4] UINT32 {256, 0, 0, 0}\n"
1407 
1408 		"AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
1409 		"UIF TEMP[5]\n"
1410 			/* Check result availability. */
1411 			"UADD TEMP[1].x, CONST[0][1].xxxx, CONST[0][2].xxxx\n"
1412 			"LOAD TEMP[1].x, BUFFER[0], TEMP[1].xxxx\n"
1413 			"ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1414 			"MOV TEMP[1], TEMP[0].zzzz\n"
1415 			"NOT TEMP[0].z, TEMP[0].zzzz\n"
1416 
1417 			/* Load result if available. */
1418 			"UIF TEMP[1]\n"
1419 				"UADD TEMP[0].x, IMM[0].xxxx, CONST[0][2].xxxx\n"
1420 				"LOAD TEMP[0].xy, BUFFER[0], TEMP[0].xxxx\n"
1421 			"ENDIF\n"
1422 		"ELSE\n"
1423 			/* Load previously accumulated result if requested. */
1424 			"MOV TEMP[0], IMM[0].xxxx\n"
1425 			"AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
1426 			"UIF TEMP[4]\n"
1427 				"LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1428 			"ENDIF\n"
1429 
1430 			"MOV TEMP[1].x, IMM[0].xxxx\n"
1431 			"BGNLOOP\n"
1432 				/* Break if accumulated result so far is not available. */
1433 				"UIF TEMP[0].zzzz\n"
1434 					"BRK\n"
1435 				"ENDIF\n"
1436 
1437 				/* Break if result_index >= result_count. */
1438 				"USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
1439 				"UIF TEMP[5]\n"
1440 					"BRK\n"
1441 				"ENDIF\n"
1442 
1443 				/* Load fence and check result availability */
1444 				"UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
1445 				"UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n"
1446 				"LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1447 				"ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1448 				"NOT TEMP[0].z, TEMP[0].zzzz\n"
1449 				"UIF TEMP[0].zzzz\n"
1450 					"BRK\n"
1451 				"ENDIF\n"
1452 
1453 				"MOV TEMP[1].y, IMM[0].xxxx\n"
1454 				"BGNLOOP\n"
1455 					/* Load start and end. */
1456 					"UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
1457 					"UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
1458 					"UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n"
1459 					"LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1460 
1461 					"UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
1462 					"LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1463 
1464 					"U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
1465 
1466 					"AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
1467 					"UIF TEMP[5].zzzz\n"
1468 						/* Load second start/end half-pair and
1469 						 * take the difference
1470 						 */
1471 						"UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
1472 						"LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1473 						"LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1474 
1475 						"U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1476 						"U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
1477 					"ENDIF\n"
1478 
1479 					"U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
1480 
1481 					/* Increment pair index */
1482 					"UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1483 					"USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
1484 					"UIF TEMP[5]\n"
1485 						"BRK\n"
1486 					"ENDIF\n"
1487 				"ENDLOOP\n"
1488 
1489 				/* Increment result index */
1490 				"UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1491 			"ENDLOOP\n"
1492 		"ENDIF\n"
1493 
1494 		"AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
1495 		"UIF TEMP[4]\n"
1496 			/* Store accumulated data for chaining. */
1497 			"STORE BUFFER[2].xyz, CONST[0][1].wwww, TEMP[0]\n"
1498 		"ELSE\n"
1499 			"AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
1500 			"UIF TEMP[4]\n"
1501 				/* Store result availability. */
1502 				"NOT TEMP[0].z, TEMP[0]\n"
1503 				"AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1504 				"STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].zzzz\n"
1505 
1506 				"AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1507 				"UIF TEMP[4]\n"
1508 					"STORE BUFFER[2].y, CONST[0][1].wwww, IMM[0].xxxx\n"
1509 				"ENDIF\n"
1510 			"ELSE\n"
1511 				/* Store result if it is available. */
1512 				"NOT TEMP[4], TEMP[0].zzzz\n"
1513 				"UIF TEMP[4]\n"
1514 					/* Apply timestamp conversion */
1515 					"AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
1516 					"UIF TEMP[4]\n"
1517 						"U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1518 						"U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1519 					"ENDIF\n"
1520 
1521 					/* Convert to boolean */
1522 					"AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
1523 					"UIF TEMP[4]\n"
1524 						"U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
1525 						"AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1526 						"MOV TEMP[0].y, IMM[0].xxxx\n"
1527 					"ENDIF\n"
1528 
1529 					"AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1530 					"UIF TEMP[4]\n"
1531 						"STORE BUFFER[2].xy, CONST[0][1].wwww, TEMP[0].xyxy\n"
1532 					"ELSE\n"
1533 						/* Clamping */
1534 						"UIF TEMP[0].yyyy\n"
1535 							"MOV TEMP[0].x, IMM[0].wwww\n"
1536 						"ENDIF\n"
1537 
1538 						"AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
1539 						"UIF TEMP[4]\n"
1540 							"UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1541 						"ENDIF\n"
1542 
1543 						"STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].xxxx\n"
1544 					"ENDIF\n"
1545 				"ENDIF\n"
1546 			"ENDIF\n"
1547 		"ENDIF\n"
1548 
1549 		"END\n";
1550 
1551 	char text[sizeof(text_tmpl) + 32];
1552 	struct tgsi_token tokens[1024];
1553 	struct pipe_compute_state state = {};
1554 
1555 	/* Hard code the frequency into the shader so that the backend can
1556 	 * use the full range of optimizations for divide-by-constant.
1557 	 */
1558 	snprintf(text, sizeof(text), text_tmpl,
1559 		 rctx->screen->info.clock_crystal_freq);
1560 
1561 	if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1562 		assert(false);
1563 		return;
1564 	}
1565 
1566 	state.ir_type = PIPE_SHADER_IR_TGSI;
1567 	state.prog = tokens;
1568 
1569 	rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1570 }
1571 
r600_restore_qbo_state(struct r600_common_context * rctx,struct r600_qbo_state * st)1572 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1573 				   struct r600_qbo_state *st)
1574 {
1575 	rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1576 	rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, true, &st->saved_const0);
1577 	rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo, ~0);
1578 	for (unsigned i = 0; i < 3; ++i)
1579 		pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1580 }
1581 
r600_query_hw_get_result_resource(struct r600_common_context * rctx,struct r600_query * rquery,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned offset)1582 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1583                                               struct r600_query *rquery,
1584                                               enum pipe_query_flags flags,
1585                                               enum pipe_query_value_type result_type,
1586                                               int index,
1587                                               struct pipe_resource *resource,
1588                                               unsigned offset)
1589 {
1590 	struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1591 	struct r600_query_buffer *qbuf;
1592 	struct r600_query_buffer *qbuf_prev;
1593 	struct pipe_resource *tmp_buffer = NULL;
1594 	unsigned tmp_buffer_offset = 0;
1595 	struct r600_qbo_state saved_state = {};
1596 	struct pipe_grid_info grid = {};
1597 	struct pipe_constant_buffer constant_buffer = {};
1598 	struct pipe_shader_buffer ssbo[3];
1599 	struct r600_hw_query_params params;
1600 	struct {
1601 		uint32_t end_offset;
1602 		uint32_t result_stride;
1603 		uint32_t result_count;
1604 		uint32_t config;
1605 		uint32_t fence_offset;
1606 		uint32_t pair_stride;
1607 		uint32_t pair_count;
1608 		uint32_t buffer_offset;
1609 		uint32_t buffer0_offset;
1610 	} consts;
1611 
1612 	if (!rctx->query_result_shader) {
1613 		r600_create_query_result_shader(rctx);
1614 		if (!rctx->query_result_shader)
1615 			return;
1616 	}
1617 
1618 	if (query->buffer.previous) {
1619 		u_suballocator_alloc(&rctx->allocator_zeroed_memory, 16, 256,
1620 				     &tmp_buffer_offset, &tmp_buffer);
1621 		if (!tmp_buffer)
1622 			return;
1623 	}
1624 
1625 	rctx->save_qbo_state(&rctx->b, &saved_state);
1626 
1627 	r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1628 	consts.end_offset = params.end_offset - params.start_offset;
1629 	consts.fence_offset = params.fence_offset - params.start_offset;
1630 	consts.result_stride = query->result_size;
1631 	consts.pair_stride = params.pair_stride;
1632 	consts.pair_count = params.pair_count;
1633 
1634 	constant_buffer.buffer_size = sizeof(consts);
1635 	constant_buffer.user_buffer = &consts;
1636 
1637 	ssbo[1].buffer = tmp_buffer;
1638 	ssbo[1].buffer_offset = tmp_buffer_offset;
1639 	ssbo[1].buffer_size = 16;
1640 
1641 	ssbo[2] = ssbo[1];
1642 
1643 	rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1644 
1645 	grid.block[0] = 1;
1646 	grid.block[1] = 1;
1647 	grid.block[2] = 1;
1648 	grid.grid[0] = 1;
1649 	grid.grid[1] = 1;
1650 	grid.grid[2] = 1;
1651 
1652 	consts.config = 0;
1653 	if (index < 0)
1654 		consts.config |= 4;
1655 	if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1656 	    query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE)
1657 		consts.config |= 8;
1658 	else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1659 		 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1660 		consts.config |= 8 | 256;
1661 	else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1662 		 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1663 		consts.config |= 32;
1664 
1665 	switch (result_type) {
1666 	case PIPE_QUERY_TYPE_U64:
1667 	case PIPE_QUERY_TYPE_I64:
1668 		consts.config |= 64;
1669 		break;
1670 	case PIPE_QUERY_TYPE_I32:
1671 		consts.config |= 128;
1672 		break;
1673 	case PIPE_QUERY_TYPE_U32:
1674 		break;
1675 	}
1676 
1677 	rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1678 
1679 	for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1680 		if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1681 			qbuf_prev = qbuf->previous;
1682 			consts.result_count = qbuf->results_end / query->result_size;
1683 			consts.config &= ~3;
1684 			if (qbuf != &query->buffer)
1685 				consts.config |= 1;
1686 			if (qbuf->previous)
1687 				consts.config |= 2;
1688 		} else {
1689 			/* Only read the last timestamp. */
1690 			qbuf_prev = NULL;
1691 			consts.result_count = 0;
1692 			consts.config |= 16;
1693 			params.start_offset += qbuf->results_end - query->result_size;
1694 		}
1695 
1696 		ssbo[0].buffer = &qbuf->buf->b.b;
1697 		ssbo[0].buffer_offset = params.start_offset & ~0xff;
1698 		ssbo[0].buffer_size = qbuf->results_end - ssbo[0].buffer_offset;
1699 		consts.buffer0_offset = (params.start_offset & 0xff);
1700 		if (!qbuf->previous) {
1701 
1702 			ssbo[2].buffer = resource;
1703 			ssbo[2].buffer_offset = offset & ~0xff;
1704 			ssbo[2].buffer_size = offset + 8;
1705 			consts.buffer_offset = (offset & 0xff);
1706 		} else
1707 			consts.buffer_offset = 0;
1708 
1709 		rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, false, &constant_buffer);
1710 
1711 		rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo, ~0);
1712 
1713 		if ((flags & PIPE_QUERY_WAIT) && qbuf == &query->buffer) {
1714 			uint64_t va;
1715 
1716 			/* Wait for result availability. Wait only for readiness
1717 			 * of the last entry, since the fence writes should be
1718 			 * serialized in the CP.
1719 			 */
1720 			va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1721 			va += params.fence_offset;
1722 
1723 			r600_gfx_wait_fence(rctx, qbuf->buf, va, 0x80000000, 0x80000000);
1724 		}
1725 
1726 		rctx->b.launch_grid(&rctx->b, &grid);
1727 		rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1728 	}
1729 
1730 	r600_restore_qbo_state(rctx, &saved_state);
1731 	pipe_resource_reference(&tmp_buffer, NULL);
1732 }
1733 
r600_render_condition(struct pipe_context * ctx,struct pipe_query * query,bool condition,enum pipe_render_cond_flag mode)1734 static void r600_render_condition(struct pipe_context *ctx,
1735 				  struct pipe_query *query,
1736 				  bool condition,
1737 				  enum pipe_render_cond_flag mode)
1738 {
1739 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1740 	struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1741 	struct r600_query_buffer *qbuf;
1742 	struct r600_atom *atom = &rctx->render_cond_atom;
1743 
1744 	/* Compute the size of SET_PREDICATION packets. */
1745 	atom->num_dw = 0;
1746 	if (query) {
1747 		for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1748 			atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1749 
1750 		if (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1751 			atom->num_dw *= R600_MAX_STREAMS;
1752 	}
1753 
1754 	rctx->render_cond = query;
1755 	rctx->render_cond_invert = condition;
1756 	rctx->render_cond_mode = mode;
1757 
1758 	rctx->set_atom_dirty(rctx, atom, query != NULL);
1759 }
1760 
r600_suspend_queries(struct r600_common_context * ctx)1761 void r600_suspend_queries(struct r600_common_context *ctx)
1762 {
1763 	struct r600_query_hw *query;
1764 
1765 	LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1766 		r600_query_hw_emit_stop(ctx, query);
1767 	}
1768 	assert(ctx->num_cs_dw_queries_suspend == 0);
1769 }
1770 
r600_queries_num_cs_dw_for_resuming(struct r600_common_context * ctx,struct list_head * query_list)1771 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1772 						    struct list_head *query_list)
1773 {
1774 	struct r600_query_hw *query;
1775 	unsigned num_dw = 0;
1776 
1777 	LIST_FOR_EACH_ENTRY(query, query_list, list) {
1778 		/* begin + end */
1779 		num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1780 
1781 		/* Workaround for the fact that
1782 		 * num_cs_dw_nontimer_queries_suspend is incremented for every
1783 		 * resumed query, which raises the bar in need_cs_space for
1784 		 * queries about to be resumed.
1785 		 */
1786 		num_dw += query->num_cs_dw_end;
1787 	}
1788 	/* primitives generated query */
1789 	num_dw += ctx->streamout.enable_atom.num_dw;
1790 	/* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1791 	num_dw += 13;
1792 
1793 	return num_dw;
1794 }
1795 
r600_resume_queries(struct r600_common_context * ctx)1796 void r600_resume_queries(struct r600_common_context *ctx)
1797 {
1798 	struct r600_query_hw *query;
1799 	unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1800 
1801 	assert(ctx->num_cs_dw_queries_suspend == 0);
1802 
1803 	/* Check CS space here. Resuming must not be interrupted by flushes. */
1804 	ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1805 
1806 	LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1807 		r600_query_hw_emit_start(ctx, query);
1808 	}
1809 }
1810 
1811 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
r600_query_fix_enabled_rb_mask(struct r600_common_screen * rscreen)1812 void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1813 {
1814 	struct r600_common_context *ctx =
1815 		(struct r600_common_context*)rscreen->aux_context;
1816 	struct radeon_cmdbuf *cs = &ctx->gfx.cs;
1817 	struct r600_resource *buffer;
1818 	uint32_t *results;
1819 	unsigned i, mask = 0;
1820 	unsigned max_rbs;
1821 
1822 	if (ctx->family == CHIP_JUNIPER) {
1823 		/*
1824 		 * Fix for predication lockups - the chip can only ever have
1825 		 * 4 RBs, however it looks like the predication logic assumes
1826 		 * there's 8, trying to read results from query buffers never
1827 		 * written to. By increasing this number we'll write the
1828 		 * status bit for these as per the normal disabled rb logic.
1829 		 */
1830 		ctx->screen->info.max_render_backends = 8;
1831 	}
1832 	max_rbs = ctx->screen->info.max_render_backends;
1833 
1834 	assert(rscreen->gfx_level <= CAYMAN);
1835 
1836 	/*
1837 	 * if backend_map query is supported by the kernel.
1838 	 * Note the kernel drm driver for a long time never filled in the
1839 	 * associated data on eg/cm, only on r600/r700, hence ignore the valid
1840 	 * bit there if the map is zero.
1841 	 * (Albeit some chips with just one active rb can have a valid 0 map.)
1842 	 */
1843 	if (rscreen->info.r600_gb_backend_map_valid &&
1844 	    (ctx->gfx_level < EVERGREEN || rscreen->info.r600_gb_backend_map != 0)) {
1845 		unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1846 		unsigned backend_map = rscreen->info.r600_gb_backend_map;
1847 		unsigned item_width, item_mask;
1848 
1849 		if (ctx->gfx_level >= EVERGREEN) {
1850 			item_width = 4;
1851 			item_mask = 0x7;
1852 		} else {
1853 			item_width = 2;
1854 			item_mask = 0x3;
1855 		}
1856 
1857 		while (num_tile_pipes--) {
1858 			i = backend_map & item_mask;
1859 			mask |= (1<<i);
1860 			backend_map >>= item_width;
1861 		}
1862 		if (mask != 0) {
1863 			rscreen->info.enabled_rb_mask = mask;
1864 			return;
1865 		}
1866 	}
1867 
1868 	/* otherwise backup path for older kernels */
1869 
1870 	/* create buffer for event data */
1871 	buffer = (struct r600_resource*)
1872 		pipe_buffer_create(ctx->b.screen, 0,
1873 				   PIPE_USAGE_STAGING, max_rbs * 16);
1874 	if (!buffer)
1875 		return;
1876 
1877 	/* initialize buffer with zeroes */
1878 	results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_MAP_WRITE);
1879 	if (results) {
1880 		memset(results, 0, max_rbs * 4 * 4);
1881 
1882 		/* emit EVENT_WRITE for ZPASS_DONE */
1883 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1884 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1885 		radeon_emit(cs, buffer->gpu_address);
1886 		radeon_emit(cs, buffer->gpu_address >> 32);
1887 
1888 		r600_emit_reloc(ctx, &ctx->gfx, buffer,
1889                                 RADEON_USAGE_WRITE | RADEON_PRIO_QUERY);
1890 
1891 		/* analyze results */
1892 		results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_MAP_READ);
1893 		if (results) {
1894 			for(i = 0; i < max_rbs; i++) {
1895 				/* at least highest bit will be set if backend is used */
1896 				if (results[i*4 + 1])
1897 					mask |= (1<<i);
1898 			}
1899 		}
1900 	}
1901 
1902 	r600_resource_reference(&buffer, NULL);
1903 
1904 	if (mask) {
1905 		if (rscreen->debug_flags & DBG_INFO &&
1906 		    mask != rscreen->info.enabled_rb_mask) {
1907 			printf("enabled_rb_mask (fixed) = 0x%x\n", mask);
1908 		}
1909 		rscreen->info.enabled_rb_mask = mask;
1910 	}
1911 }
1912 
1913 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1914 	{ \
1915 		.name = name_, \
1916 		.query_type = R600_QUERY_##query_type_, \
1917 		.type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1918 		.result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1919 		.group_id = group_id_ \
1920 	}
1921 
1922 #define X(name_, query_type_, type_, result_type_) \
1923 	XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1924 
1925 #define XG(group_, name_, query_type_, type_, result_type_) \
1926 	XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1927 
1928 static const struct pipe_driver_query_info r600_driver_query_list[] = {
1929 	X("num-compilations",		NUM_COMPILATIONS,	UINT64, CUMULATIVE),
1930 	X("num-shaders-created",	NUM_SHADERS_CREATED,	UINT64, CUMULATIVE),
1931 	X("num-shader-cache-hits",	NUM_SHADER_CACHE_HITS,	UINT64, CUMULATIVE),
1932 	X("draw-calls",			DRAW_CALLS,		UINT64, AVERAGE),
1933 	X("decompress-calls",		DECOMPRESS_CALLS,	UINT64, AVERAGE),
1934 	X("MRT-draw-calls",		MRT_DRAW_CALLS,		UINT64, AVERAGE),
1935 	X("prim-restart-calls",		PRIM_RESTART_CALLS,	UINT64, AVERAGE),
1936 	X("spill-draw-calls",		SPILL_DRAW_CALLS,	UINT64, AVERAGE),
1937 	X("compute-calls",		COMPUTE_CALLS,		UINT64, AVERAGE),
1938 	X("spill-compute-calls",	SPILL_COMPUTE_CALLS,	UINT64, AVERAGE),
1939 	X("dma-calls",			DMA_CALLS,		UINT64, AVERAGE),
1940 	X("cp-dma-calls",		CP_DMA_CALLS,		UINT64, AVERAGE),
1941 	X("num-vs-flushes",		NUM_VS_FLUSHES,		UINT64, AVERAGE),
1942 	X("num-ps-flushes",		NUM_PS_FLUSHES,		UINT64, AVERAGE),
1943 	X("num-cs-flushes",		NUM_CS_FLUSHES,		UINT64, AVERAGE),
1944 	X("num-CB-cache-flushes",	NUM_CB_CACHE_FLUSHES,	UINT64, AVERAGE),
1945 	X("num-DB-cache-flushes",	NUM_DB_CACHE_FLUSHES,	UINT64, AVERAGE),
1946 	X("num-resident-handles",	NUM_RESIDENT_HANDLES,	UINT64, AVERAGE),
1947 	X("tc-offloaded-slots",		TC_OFFLOADED_SLOTS,     UINT64, AVERAGE),
1948 	X("tc-direct-slots",		TC_DIRECT_SLOTS,	UINT64, AVERAGE),
1949 	X("tc-num-syncs",		TC_NUM_SYNCS,		UINT64, AVERAGE),
1950 	X("CS-thread-busy",		CS_THREAD_BUSY,		UINT64, AVERAGE),
1951 	X("gallium-thread-busy",	GALLIUM_THREAD_BUSY,	UINT64, AVERAGE),
1952 	X("requested-VRAM",		REQUESTED_VRAM,		BYTES, AVERAGE),
1953 	X("requested-GTT",		REQUESTED_GTT,		BYTES, AVERAGE),
1954 	X("mapped-VRAM",		MAPPED_VRAM,		BYTES, AVERAGE),
1955 	X("mapped-GTT",			MAPPED_GTT,		BYTES, AVERAGE),
1956 	X("buffer-wait-time",		BUFFER_WAIT_TIME,	MICROSECONDS, CUMULATIVE),
1957 	X("num-mapped-buffers",		NUM_MAPPED_BUFFERS,	UINT64, AVERAGE),
1958 	X("num-GFX-IBs",		NUM_GFX_IBS,		UINT64, AVERAGE),
1959 	X("num-SDMA-IBs",		NUM_SDMA_IBS,		UINT64, AVERAGE),
1960 	X("GFX-BO-list-size",		GFX_BO_LIST_SIZE,	UINT64, AVERAGE),
1961 	X("num-bytes-moved",		NUM_BYTES_MOVED,	BYTES, CUMULATIVE),
1962 	X("num-evictions",		NUM_EVICTIONS,		UINT64, CUMULATIVE),
1963 	X("VRAM-CPU-page-faults",	NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
1964 	X("VRAM-usage",			VRAM_USAGE,		BYTES, AVERAGE),
1965 	X("VRAM-vis-usage",		VRAM_VIS_USAGE,		BYTES, AVERAGE),
1966 	X("GTT-usage",			GTT_USAGE,		BYTES, AVERAGE),
1967 
1968 	/* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1969 	 * which use it as a fallback path to detect the GPU type.
1970 	 *
1971 	 * Note: The names of these queries are significant for GPUPerfStudio
1972 	 * (and possibly their order as well). */
1973 	XG(GPIN, "GPIN_000",		GPIN_ASIC_ID,		UINT, AVERAGE),
1974 	XG(GPIN, "GPIN_001",		GPIN_NUM_SIMD,		UINT, AVERAGE),
1975 	XG(GPIN, "GPIN_002",		GPIN_NUM_RB,		UINT, AVERAGE),
1976 	XG(GPIN, "GPIN_003",		GPIN_NUM_SPI,		UINT, AVERAGE),
1977 	XG(GPIN, "GPIN_004",		GPIN_NUM_SE,		UINT, AVERAGE),
1978 
1979 	X("temperature",		GPU_TEMPERATURE,	UINT64, AVERAGE),
1980 	X("shader-clock",		CURRENT_GPU_SCLK,	HZ, AVERAGE),
1981 	X("memory-clock",		CURRENT_GPU_MCLK,	HZ, AVERAGE),
1982 
1983 	/* The following queries must be at the end of the list because their
1984 	 * availability is adjusted dynamically based on the DRM version. */
1985 	X("GPU-load",			GPU_LOAD,		UINT64, AVERAGE),
1986 	X("GPU-shaders-busy",		GPU_SHADERS_BUSY,	UINT64, AVERAGE),
1987 	X("GPU-ta-busy",		GPU_TA_BUSY,		UINT64, AVERAGE),
1988 	X("GPU-gds-busy",		GPU_GDS_BUSY,		UINT64, AVERAGE),
1989 	X("GPU-vgt-busy",		GPU_VGT_BUSY,		UINT64, AVERAGE),
1990 	X("GPU-ia-busy",		GPU_IA_BUSY,		UINT64, AVERAGE),
1991 	X("GPU-sx-busy",		GPU_SX_BUSY,		UINT64, AVERAGE),
1992 	X("GPU-wd-busy",		GPU_WD_BUSY,		UINT64, AVERAGE),
1993 	X("GPU-bci-busy",		GPU_BCI_BUSY,		UINT64, AVERAGE),
1994 	X("GPU-sc-busy",		GPU_SC_BUSY,		UINT64, AVERAGE),
1995 	X("GPU-pa-busy",		GPU_PA_BUSY,		UINT64, AVERAGE),
1996 	X("GPU-db-busy",		GPU_DB_BUSY,		UINT64, AVERAGE),
1997 	X("GPU-cp-busy",		GPU_CP_BUSY,		UINT64, AVERAGE),
1998 	X("GPU-cb-busy",		GPU_CB_BUSY,		UINT64, AVERAGE),
1999 	X("GPU-sdma-busy",		GPU_SDMA_BUSY,		UINT64, AVERAGE),
2000 	X("GPU-pfp-busy",		GPU_PFP_BUSY,		UINT64, AVERAGE),
2001 	X("GPU-meq-busy",		GPU_MEQ_BUSY,		UINT64, AVERAGE),
2002 	X("GPU-me-busy",		GPU_ME_BUSY,		UINT64, AVERAGE),
2003 	X("GPU-surf-sync-busy",		GPU_SURF_SYNC_BUSY,	UINT64, AVERAGE),
2004 	X("GPU-cp-dma-busy",		GPU_CP_DMA_BUSY,	UINT64, AVERAGE),
2005 	X("GPU-scratch-ram-busy",	GPU_SCRATCH_RAM_BUSY,	UINT64, AVERAGE),
2006 };
2007 
2008 #undef X
2009 #undef XG
2010 #undef XFULL
2011 
r600_get_num_queries(struct r600_common_screen * rscreen)2012 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
2013 {
2014 	return ARRAY_SIZE(r600_driver_query_list);
2015 }
2016 
r600_get_driver_query_info(struct pipe_screen * screen,unsigned index,struct pipe_driver_query_info * info)2017 static int r600_get_driver_query_info(struct pipe_screen *screen,
2018 				      unsigned index,
2019 				      struct pipe_driver_query_info *info)
2020 {
2021 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
2022 	unsigned num_queries = r600_get_num_queries(rscreen);
2023 
2024 	if (!info) {
2025 		unsigned num_perfcounters =
2026 			r600_get_perfcounter_info(rscreen, 0, NULL);
2027 
2028 		return num_queries + num_perfcounters;
2029 	}
2030 
2031 	if (index >= num_queries)
2032 		return r600_get_perfcounter_info(rscreen, index - num_queries, info);
2033 
2034 	*info = r600_driver_query_list[index];
2035 
2036 	switch (info->query_type) {
2037 	case R600_QUERY_REQUESTED_VRAM:
2038 	case R600_QUERY_VRAM_USAGE:
2039 	case R600_QUERY_MAPPED_VRAM:
2040 		info->max_value.u64 = (uint64_t)rscreen->info.vram_size_kb * 1024;
2041 		break;
2042 	case R600_QUERY_REQUESTED_GTT:
2043 	case R600_QUERY_GTT_USAGE:
2044 	case R600_QUERY_MAPPED_GTT:
2045 		info->max_value.u64 = (uint64_t)rscreen->info.gart_size_kb * 1024;
2046 		break;
2047 	case R600_QUERY_GPU_TEMPERATURE:
2048 		info->max_value.u64 = 125;
2049 		break;
2050 	case R600_QUERY_VRAM_VIS_USAGE:
2051 		info->max_value.u64 = (uint64_t)rscreen->info.vram_vis_size_kb * 1024;
2052 		break;
2053 	}
2054 
2055 	if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
2056 		info->group_id += rscreen->perfcounters->num_groups;
2057 
2058 	return 1;
2059 }
2060 
2061 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
2062  * performance counter groups, so be careful when changing this and related
2063  * functions.
2064  */
r600_get_driver_query_group_info(struct pipe_screen * screen,unsigned index,struct pipe_driver_query_group_info * info)2065 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
2066 					    unsigned index,
2067 					    struct pipe_driver_query_group_info *info)
2068 {
2069 	struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
2070 	unsigned num_pc_groups = 0;
2071 
2072 	if (rscreen->perfcounters)
2073 		num_pc_groups = rscreen->perfcounters->num_groups;
2074 
2075 	if (!info)
2076 		return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
2077 
2078 	if (index < num_pc_groups)
2079 		return r600_get_perfcounter_group_info(rscreen, index, info);
2080 
2081 	index -= num_pc_groups;
2082 	if (index >= R600_NUM_SW_QUERY_GROUPS)
2083 		return 0;
2084 
2085 	info->name = "GPIN";
2086 	info->max_active_queries = 5;
2087 	info->num_queries = 5;
2088 	return 1;
2089 }
2090 
r600_query_init(struct r600_common_context * rctx)2091 void r600_query_init(struct r600_common_context *rctx)
2092 {
2093 	rctx->b.create_query = r600_create_query;
2094 	rctx->b.create_batch_query = r600_create_batch_query;
2095 	rctx->b.destroy_query = r600_destroy_query;
2096 	rctx->b.begin_query = r600_begin_query;
2097 	rctx->b.end_query = r600_end_query;
2098 	rctx->b.get_query_result = r600_get_query_result;
2099 	rctx->b.get_query_result_resource = r600_get_query_result_resource;
2100 	rctx->render_cond_atom.emit = r600_emit_query_predication;
2101 
2102 	if (((struct r600_common_screen*)rctx->b.screen)->info.max_render_backends > 0)
2103 	    rctx->b.render_condition = r600_render_condition;
2104 
2105 	list_inithead(&rctx->active_queries);
2106 }
2107 
r600_init_screen_query_functions(struct r600_common_screen * rscreen)2108 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
2109 {
2110 	rscreen->b.get_driver_query_info = r600_get_driver_query_info;
2111 	rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
2112 }
2113