xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/r600/r600_pipe_common.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * Authors: Marek Olšák <[email protected]>
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "r600_pipe_common.h"
8 #include "r600_cs.h"
9 #include "evergreen_compute.h"
10 #include "util/list.h"
11 #include "util/u_draw_quad.h"
12 #include "util/u_memory.h"
13 #include "util/format/u_format_s3tc.h"
14 #include "util/u_upload_mgr.h"
15 #include "util/os_time.h"
16 #include "util/hex.h"
17 #include "vl/vl_decoder.h"
18 #include "vl/vl_video_buffer.h"
19 #include "radeon_video.h"
20 #include "git_sha1.h"
21 
22 #include <inttypes.h>
23 #include <sys/utsname.h>
24 #include <stdlib.h>
25 
26 #if AMD_LLVM_AVAILABLE
27 #include <llvm-c/TargetMachine.h>
28 #endif
29 
30 struct r600_multi_fence {
31 	struct pipe_reference reference;
32 	struct pipe_fence_handle *gfx;
33 	struct pipe_fence_handle *sdma;
34 
35 	/* If the context wasn't flushed at fence creation, this is non-NULL. */
36 	struct {
37 		struct r600_common_context *ctx;
38 		unsigned ib_index;
39 	} gfx_unflushed;
40 };
41 
42 /*
43  * pipe_context
44  */
45 
46 /**
47  * Write an EOP event.
48  *
49  * \param event		EVENT_TYPE_*
50  * \param event_flags	Optional cache flush flags (TC)
51  * \param data_sel	1 = fence, 3 = timestamp
52  * \param buf		Buffer
53  * \param va		GPU address
54  * \param old_value	Previous fence value (for a bug workaround)
55  * \param new_value	Fence value to write for this event.
56  */
r600_gfx_write_event_eop(struct r600_common_context * ctx,unsigned event,unsigned event_flags,unsigned data_sel,struct r600_resource * buf,uint64_t va,uint32_t new_fence,unsigned query_type)57 void r600_gfx_write_event_eop(struct r600_common_context *ctx,
58 			      unsigned event, unsigned event_flags,
59 			      unsigned data_sel,
60 			      struct r600_resource *buf, uint64_t va,
61 			      uint32_t new_fence, unsigned query_type)
62 {
63 	struct radeon_cmdbuf *cs = &ctx->gfx.cs;
64 	unsigned op = EVENT_TYPE(event) |
65 		      EVENT_INDEX(5) |
66 		      event_flags;
67 	unsigned sel = EOP_DATA_SEL(data_sel);
68 
69 	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
70 	radeon_emit(cs, op);
71 	radeon_emit(cs, va);
72 	radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
73 	radeon_emit(cs, new_fence); /* immediate data */
74 	radeon_emit(cs, 0); /* unused */
75 
76 	if (buf)
77 		r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_WRITE |
78 				RADEON_PRIO_QUERY);
79 }
80 
r600_gfx_write_fence_dwords(struct r600_common_screen * screen)81 unsigned r600_gfx_write_fence_dwords(struct r600_common_screen *screen)
82 {
83 	unsigned dwords = 6;
84 
85 	if (!screen->info.r600_has_virtual_memory)
86 		dwords += 2;
87 
88 	return dwords;
89 }
90 
r600_gfx_wait_fence(struct r600_common_context * ctx,struct r600_resource * buf,uint64_t va,uint32_t ref,uint32_t mask)91 void r600_gfx_wait_fence(struct r600_common_context *ctx,
92 			 struct r600_resource *buf,
93 			 uint64_t va, uint32_t ref, uint32_t mask)
94 {
95 	struct radeon_cmdbuf *cs = &ctx->gfx.cs;
96 
97 	radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
98 	radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
99 	radeon_emit(cs, va);
100 	radeon_emit(cs, va >> 32);
101 	radeon_emit(cs, ref); /* reference value */
102 	radeon_emit(cs, mask); /* mask */
103 	radeon_emit(cs, 4); /* poll interval */
104 
105 	if (buf)
106 		r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_READ |
107 				RADEON_PRIO_QUERY);
108 }
109 
r600_draw_rectangle(struct blitter_context * blitter,void * vertex_elements_cso,blitter_get_vs_func get_vs,int x1,int y1,int x2,int y2,float depth,unsigned num_instances,enum blitter_attrib_type type,const union blitter_attrib * attrib)110 void r600_draw_rectangle(struct blitter_context *blitter,
111 			 void *vertex_elements_cso,
112 			 blitter_get_vs_func get_vs,
113 			 int x1, int y1, int x2, int y2,
114 			 float depth, unsigned num_instances,
115 			 enum blitter_attrib_type type,
116 			 const union blitter_attrib *attrib)
117 {
118 	struct r600_common_context *rctx =
119 		(struct r600_common_context*)util_blitter_get_pipe(blitter);
120 	struct pipe_viewport_state viewport;
121 	struct pipe_resource *buf = NULL;
122 	unsigned offset = 0;
123 	float *vb;
124 
125 	rctx->b.bind_vertex_elements_state(&rctx->b, vertex_elements_cso);
126 	rctx->b.bind_vs_state(&rctx->b, get_vs(blitter));
127 
128 	/* Some operations (like color resolve on r6xx) don't work
129 	 * with the conventional primitive types.
130 	 * One that works is PT_RECTLIST, which we use here. */
131 
132 	/* setup viewport */
133 	viewport.scale[0] = 1.0f;
134 	viewport.scale[1] = 1.0f;
135 	viewport.scale[2] = 1.0f;
136 	viewport.translate[0] = 0.0f;
137 	viewport.translate[1] = 0.0f;
138 	viewport.translate[2] = 0.0f;
139 	rctx->b.set_viewport_states(&rctx->b, 0, 1, &viewport);
140 
141 	/* Upload vertices. The hw rectangle has only 3 vertices,
142 	 * The 4th one is derived from the first 3.
143 	 * The vertex specification should match u_blitter's vertex element state. */
144 	u_upload_alloc(rctx->b.stream_uploader, 0, sizeof(float) * 24,
145 		       rctx->screen->info.tcc_cache_line_size,
146                        &offset, &buf, (void**)&vb);
147 	if (!buf)
148 		return;
149 
150 	vb[0] = x1;
151 	vb[1] = y1;
152 	vb[2] = depth;
153 	vb[3] = 1;
154 
155 	vb[8] = x1;
156 	vb[9] = y2;
157 	vb[10] = depth;
158 	vb[11] = 1;
159 
160 	vb[16] = x2;
161 	vb[17] = y1;
162 	vb[18] = depth;
163 	vb[19] = 1;
164 
165 	switch (type) {
166 	case UTIL_BLITTER_ATTRIB_COLOR:
167 		memcpy(vb+4, attrib->color, sizeof(float)*4);
168 		memcpy(vb+12, attrib->color, sizeof(float)*4);
169 		memcpy(vb+20, attrib->color, sizeof(float)*4);
170 		break;
171 	case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
172 	case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
173 		vb[6] = vb[14] = vb[22] = attrib->texcoord.z;
174 		vb[7] = vb[15] = vb[23] = attrib->texcoord.w;
175 		/* fall through */
176 		vb[4] = attrib->texcoord.x1;
177 		vb[5] = attrib->texcoord.y1;
178 		vb[12] = attrib->texcoord.x1;
179 		vb[13] = attrib->texcoord.y2;
180 		vb[20] = attrib->texcoord.x2;
181 		vb[21] = attrib->texcoord.y1;
182 		break;
183 	default:; /* Nothing to do. */
184 	}
185 
186 	/* draw */
187 	struct pipe_vertex_buffer vbuffer = {};
188 	vbuffer.buffer.resource = buf;
189 	vbuffer.buffer_offset = offset;
190 
191 	util_set_vertex_buffers(&rctx->b, 1, false, &vbuffer);
192 	util_draw_arrays_instanced(&rctx->b, R600_PRIM_RECTANGLE_LIST, 0, 3,
193 				   0, num_instances);
194 	pipe_resource_reference(&buf, NULL);
195 }
196 
r600_dma_emit_wait_idle(struct r600_common_context * rctx)197 static void r600_dma_emit_wait_idle(struct r600_common_context *rctx)
198 {
199 	struct radeon_cmdbuf *cs = &rctx->dma.cs;
200 
201 	if (rctx->gfx_level >= EVERGREEN)
202 		radeon_emit(cs, 0xf0000000); /* NOP */
203 	else {
204 		/* TODO: R600-R700 should use the FENCE packet.
205 		 * CS checker support is required. */
206 	}
207 }
208 
r600_need_dma_space(struct r600_common_context * ctx,unsigned num_dw,struct r600_resource * dst,struct r600_resource * src)209 void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
210                          struct r600_resource *dst, struct r600_resource *src)
211 {
212 	uint64_t vram = (uint64_t)ctx->dma.cs.used_vram_kb * 1024;
213 	uint64_t gtt = (uint64_t)ctx->dma.cs.used_gart_kb * 1024;
214 
215 	if (dst) {
216 		vram += dst->vram_usage;
217 		gtt += dst->gart_usage;
218 	}
219 	if (src) {
220 		vram += src->vram_usage;
221 		gtt += src->gart_usage;
222 	}
223 
224 	/* Flush the GFX IB if DMA depends on it. */
225 	if (radeon_emitted(&ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
226 	    ((dst &&
227 	      ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs, dst->buf,
228 					       RADEON_USAGE_READWRITE)) ||
229 	     (src &&
230 	      ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs, src->buf,
231 					       RADEON_USAGE_WRITE))))
232 		ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
233 
234 	/* Flush if there's not enough space, or if the memory usage per IB
235 	 * is too large.
236 	 *
237 	 * IBs using too little memory are limited by the IB submission overhead.
238 	 * IBs using too much memory are limited by the kernel/TTM overhead.
239 	 * Too long IBs create CPU-GPU pipeline bubbles and add latency.
240 	 *
241 	 * This heuristic makes sure that DMA requests are executed
242 	 * very soon after the call is made and lowers memory usage.
243 	 * It improves texture upload performance by keeping the DMA
244 	 * engine busy while uploads are being submitted.
245 	 */
246 	num_dw++; /* for emit_wait_idle below */
247 	if (!ctx->ws->cs_check_space(&ctx->dma.cs, num_dw) ||
248 	    ctx->dma.cs.used_vram_kb + ctx->dma.cs.used_gart_kb > 64 * 1024 ||
249 	    !radeon_cs_memory_below_limit(ctx->screen, &ctx->dma.cs, vram, gtt)) {
250 		ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
251 		assert((num_dw + ctx->dma.cs.current.cdw) <= ctx->dma.cs.current.max_dw);
252 	}
253 
254 	/* Wait for idle if either buffer has been used in the IB before to
255 	 * prevent read-after-write hazards.
256 	 */
257 	if ((dst &&
258 	     ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs, dst->buf,
259 					      RADEON_USAGE_READWRITE)) ||
260 	    (src &&
261 	     ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs, src->buf,
262 					      RADEON_USAGE_WRITE)))
263 		r600_dma_emit_wait_idle(ctx);
264 
265 	/* If GPUVM is not supported, the CS checker needs 2 entries
266 	 * in the buffer list per packet, which has to be done manually.
267 	 */
268 	if (ctx->screen->info.r600_has_virtual_memory) {
269 		if (dst)
270 			radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
271 						  RADEON_USAGE_WRITE);
272 		if (src)
273 			radeon_add_to_buffer_list(ctx, &ctx->dma, src,
274 						  RADEON_USAGE_READ);
275 	}
276 
277 	/* this function is called before all DMA calls, so increment this. */
278 	ctx->num_dma_calls++;
279 }
280 
r600_preflush_suspend_features(struct r600_common_context * ctx)281 void r600_preflush_suspend_features(struct r600_common_context *ctx)
282 {
283 	/* suspend queries */
284 	if (!list_is_empty(&ctx->active_queries))
285 		r600_suspend_queries(ctx);
286 
287 	ctx->streamout.suspended = false;
288 	if (ctx->streamout.begin_emitted) {
289 		r600_emit_streamout_end(ctx);
290 		ctx->streamout.suspended = true;
291 	}
292 }
293 
r600_postflush_resume_features(struct r600_common_context * ctx)294 void r600_postflush_resume_features(struct r600_common_context *ctx)
295 {
296 	if (ctx->streamout.suspended) {
297 		ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
298 		r600_streamout_buffers_dirty(ctx);
299 	}
300 
301 	/* resume queries */
302 	if (!list_is_empty(&ctx->active_queries))
303 		r600_resume_queries(ctx);
304 }
305 
r600_fence_server_sync(struct pipe_context * ctx,struct pipe_fence_handle * fence)306 static void r600_fence_server_sync(struct pipe_context *ctx,
307 				   struct pipe_fence_handle *fence)
308 {
309 	/* radeon synchronizes all rings by default and will not implement
310 	 * fence imports.
311 	 */
312 }
313 
r600_flush_from_st(struct pipe_context * ctx,struct pipe_fence_handle ** fence,unsigned flags)314 static void r600_flush_from_st(struct pipe_context *ctx,
315 			       struct pipe_fence_handle **fence,
316 			       unsigned flags)
317 {
318 	struct pipe_screen *screen = ctx->screen;
319 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
320 	struct radeon_winsys *ws = rctx->ws;
321 	struct pipe_fence_handle *gfx_fence = NULL;
322 	struct pipe_fence_handle *sdma_fence = NULL;
323 	bool deferred_fence = false;
324 	unsigned rflags = PIPE_FLUSH_ASYNC;
325 
326 	if (flags & PIPE_FLUSH_END_OF_FRAME)
327 		rflags |= PIPE_FLUSH_END_OF_FRAME;
328 
329 	/* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
330 	if (rctx->dma.cs.priv)
331 		rctx->dma.flush(rctx, rflags, fence ? &sdma_fence : NULL);
332 
333 	if (!radeon_emitted(&rctx->gfx.cs, rctx->initial_gfx_cs_size)) {
334 		if (fence)
335 			ws->fence_reference(ws, &gfx_fence, rctx->last_gfx_fence);
336 		if (!(flags & PIPE_FLUSH_DEFERRED))
337 			ws->cs_sync_flush(&rctx->gfx.cs);
338 	} else {
339 		/* Instead of flushing, create a deferred fence. Constraints:
340 		 * - the gallium frontend must allow a deferred flush.
341 		 * - the gallium frontend must request a fence.
342 		 * Thread safety in fence_finish must be ensured by the gallium frontend.
343 		 */
344 		if (flags & PIPE_FLUSH_DEFERRED && fence) {
345 			gfx_fence = rctx->ws->cs_get_next_fence(&rctx->gfx.cs);
346 			deferred_fence = true;
347 		} else {
348 			rctx->gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
349 		}
350 	}
351 
352 	/* Both engines can signal out of order, so we need to keep both fences. */
353 	if (fence) {
354 		struct r600_multi_fence *multi_fence =
355 			CALLOC_STRUCT(r600_multi_fence);
356 		if (!multi_fence) {
357 			ws->fence_reference(ws, &sdma_fence, NULL);
358 			ws->fence_reference(ws, &gfx_fence, NULL);
359 			goto finish;
360 		}
361 
362 		multi_fence->reference.count = 1;
363 		/* If both fences are NULL, fence_finish will always return true. */
364 		multi_fence->gfx = gfx_fence;
365 		multi_fence->sdma = sdma_fence;
366 
367 		if (deferred_fence) {
368 			multi_fence->gfx_unflushed.ctx = rctx;
369 			multi_fence->gfx_unflushed.ib_index = rctx->num_gfx_cs_flushes;
370 		}
371 
372 		screen->fence_reference(screen, fence, NULL);
373 		*fence = (struct pipe_fence_handle*)multi_fence;
374 	}
375 finish:
376 	if (!(flags & PIPE_FLUSH_DEFERRED)) {
377 		if (rctx->dma.cs.priv)
378 			ws->cs_sync_flush(&rctx->dma.cs);
379 		ws->cs_sync_flush(&rctx->gfx.cs);
380 	}
381 }
382 
r600_flush_dma_ring(void * ctx,unsigned flags,struct pipe_fence_handle ** fence)383 static void r600_flush_dma_ring(void *ctx, unsigned flags,
384 				struct pipe_fence_handle **fence)
385 {
386 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
387 	struct radeon_cmdbuf *cs = &rctx->dma.cs;
388 	struct radeon_saved_cs saved;
389 	bool check_vm =
390 		(rctx->screen->debug_flags & DBG_CHECK_VM) &&
391 		rctx->check_vm_faults;
392 
393 	if (!radeon_emitted(cs, 0)) {
394 		if (fence)
395 			rctx->ws->fence_reference(rctx->ws, fence, rctx->last_sdma_fence);
396 		return;
397 	}
398 
399 	if (check_vm)
400 		radeon_save_cs(rctx->ws, cs, &saved, true);
401 
402 	rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence);
403 	if (fence)
404 		rctx->ws->fence_reference(rctx->ws, fence, rctx->last_sdma_fence);
405 
406 	if (check_vm) {
407 		/* Use conservative timeout 800ms, after which we won't wait any
408 		 * longer and assume the GPU is hung.
409 		 */
410 		rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000);
411 
412 		rctx->check_vm_faults(rctx, &saved, AMD_IP_SDMA);
413 		radeon_clear_saved_cs(&saved);
414 	}
415 }
416 
417 /**
418  * Store a linearized copy of all chunks of \p cs together with the buffer
419  * list in \p saved.
420  */
radeon_save_cs(struct radeon_winsys * ws,struct radeon_cmdbuf * cs,struct radeon_saved_cs * saved,bool get_buffer_list)421 void radeon_save_cs(struct radeon_winsys *ws, struct radeon_cmdbuf *cs,
422 		    struct radeon_saved_cs *saved, bool get_buffer_list)
423 {
424 	uint32_t *buf;
425 	unsigned i;
426 
427 	/* Save the IB chunks. */
428 	saved->num_dw = cs->prev_dw + cs->current.cdw;
429 	saved->ib = MALLOC(4 * saved->num_dw);
430 	if (!saved->ib)
431 		goto oom;
432 
433 	buf = saved->ib;
434 	for (i = 0; i < cs->num_prev; ++i) {
435 		memcpy(buf, cs->prev[i].buf, cs->prev[i].cdw * 4);
436 		buf += cs->prev[i].cdw;
437 	}
438 	memcpy(buf, cs->current.buf, cs->current.cdw * 4);
439 
440 	if (!get_buffer_list)
441 		return;
442 
443 	/* Save the buffer list. */
444 	saved->bo_count = ws->cs_get_buffer_list(cs, NULL);
445 	saved->bo_list = CALLOC(saved->bo_count,
446 				sizeof(saved->bo_list[0]));
447 	if (!saved->bo_list) {
448 		FREE(saved->ib);
449 		goto oom;
450 	}
451 	ws->cs_get_buffer_list(cs, saved->bo_list);
452 
453 	return;
454 
455 oom:
456 	fprintf(stderr, "%s: out of memory\n", __func__);
457 	memset(saved, 0, sizeof(*saved));
458 }
459 
radeon_clear_saved_cs(struct radeon_saved_cs * saved)460 void radeon_clear_saved_cs(struct radeon_saved_cs *saved)
461 {
462 	FREE(saved->ib);
463 	FREE(saved->bo_list);
464 
465 	memset(saved, 0, sizeof(*saved));
466 }
467 
r600_get_reset_status(struct pipe_context * ctx)468 static enum pipe_reset_status r600_get_reset_status(struct pipe_context *ctx)
469 {
470 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
471 
472 	return rctx->ws->ctx_query_reset_status(rctx->ctx, false, NULL, NULL);
473 }
474 
r600_set_debug_callback(struct pipe_context * ctx,const struct util_debug_callback * cb)475 static void r600_set_debug_callback(struct pipe_context *ctx,
476 				    const struct util_debug_callback *cb)
477 {
478 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
479 
480 	if (cb)
481 		rctx->debug = *cb;
482 	else
483 		memset(&rctx->debug, 0, sizeof(rctx->debug));
484 }
485 
r600_set_device_reset_callback(struct pipe_context * ctx,const struct pipe_device_reset_callback * cb)486 static void r600_set_device_reset_callback(struct pipe_context *ctx,
487 					   const struct pipe_device_reset_callback *cb)
488 {
489 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
490 
491 	if (cb)
492 		rctx->device_reset_callback = *cb;
493 	else
494 		memset(&rctx->device_reset_callback, 0,
495 		       sizeof(rctx->device_reset_callback));
496 }
497 
r600_check_device_reset(struct r600_common_context * rctx)498 bool r600_check_device_reset(struct r600_common_context *rctx)
499 {
500 	enum pipe_reset_status status;
501 
502 	if (!rctx->device_reset_callback.reset)
503 		return false;
504 
505 	if (!rctx->b.get_device_reset_status)
506 		return false;
507 
508 	status = rctx->b.get_device_reset_status(&rctx->b);
509 	if (status == PIPE_NO_RESET)
510 		return false;
511 
512 	rctx->device_reset_callback.reset(rctx->device_reset_callback.data, status);
513 	return true;
514 }
515 
r600_dma_clear_buffer_fallback(struct pipe_context * ctx,struct pipe_resource * dst,uint64_t offset,uint64_t size,unsigned value)516 static void r600_dma_clear_buffer_fallback(struct pipe_context *ctx,
517 					   struct pipe_resource *dst,
518 					   uint64_t offset, uint64_t size,
519 					   unsigned value)
520 {
521 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
522 
523 	rctx->clear_buffer(ctx, dst, offset, size, value, R600_COHERENCY_NONE);
524 }
525 
r600_resource_commit(struct pipe_context * pctx,struct pipe_resource * resource,unsigned level,struct pipe_box * box,bool commit)526 static bool r600_resource_commit(struct pipe_context *pctx,
527 				 struct pipe_resource *resource,
528 				 unsigned level, struct pipe_box *box,
529 				 bool commit)
530 {
531 	struct r600_common_context *ctx = (struct r600_common_context *)pctx;
532 	struct r600_resource *res = r600_resource(resource);
533 
534 	/*
535 	 * Since buffer commitment changes cannot be pipelined, we need to
536 	 * (a) flush any pending commands that refer to the buffer we're about
537 	 *     to change, and
538 	 * (b) wait for threaded submit to finish, including those that were
539 	 *     triggered by some other, earlier operation.
540 	 */
541 	if (radeon_emitted(&ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
542 	    ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs,
543 					     res->buf, RADEON_USAGE_READWRITE)) {
544 		ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
545 	}
546 	if (radeon_emitted(&ctx->dma.cs, 0) &&
547 	    ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs,
548 					     res->buf, RADEON_USAGE_READWRITE)) {
549 		ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
550 	}
551 
552 	ctx->ws->cs_sync_flush(&ctx->dma.cs);
553 	ctx->ws->cs_sync_flush(&ctx->gfx.cs);
554 
555 	assert(resource->target == PIPE_BUFFER);
556 
557 	return ctx->ws->buffer_commit(ctx->ws, res->buf, box->x, box->width, commit);
558 }
559 
r600_common_context_init(struct r600_common_context * rctx,struct r600_common_screen * rscreen,unsigned context_flags)560 bool r600_common_context_init(struct r600_common_context *rctx,
561 			      struct r600_common_screen *rscreen,
562 			      unsigned context_flags)
563 {
564 	slab_create_child(&rctx->pool_transfers, &rscreen->pool_transfers);
565 	slab_create_child(&rctx->pool_transfers_unsync, &rscreen->pool_transfers);
566 
567 	rctx->screen = rscreen;
568 	rctx->ws = rscreen->ws;
569 	rctx->family = rscreen->family;
570 	rctx->gfx_level = rscreen->gfx_level;
571 
572 	rctx->b.clear_buffer = u_default_clear_buffer;
573 	rctx->b.invalidate_resource = r600_invalidate_resource;
574 	rctx->b.resource_commit = r600_resource_commit;
575 	rctx->b.buffer_map = r600_buffer_transfer_map;
576         rctx->b.texture_map = r600_texture_transfer_map;
577 	rctx->b.transfer_flush_region = r600_buffer_flush_region;
578 	rctx->b.buffer_unmap = r600_buffer_transfer_unmap;
579         rctx->b.texture_unmap = r600_texture_transfer_unmap;
580 	rctx->b.texture_subdata = u_default_texture_subdata;
581 	rctx->b.flush = r600_flush_from_st;
582 	rctx->b.set_debug_callback = r600_set_debug_callback;
583 	rctx->b.fence_server_sync = r600_fence_server_sync;
584 	rctx->dma_clear_buffer = r600_dma_clear_buffer_fallback;
585 
586 	/* evergreen_compute.c has a special codepath for global buffers.
587 	 * Everything else can use the direct path.
588 	 */
589 	if ((rscreen->gfx_level == EVERGREEN || rscreen->gfx_level == CAYMAN) &&
590 	    (context_flags & PIPE_CONTEXT_COMPUTE_ONLY))
591 		rctx->b.buffer_subdata = u_default_buffer_subdata;
592 	else
593 		rctx->b.buffer_subdata = r600_buffer_subdata;
594 
595 	rctx->b.get_device_reset_status = r600_get_reset_status;
596 	rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
597 
598 	r600_init_context_texture_functions(rctx);
599 	r600_init_viewport_functions(rctx);
600 	r600_streamout_init(rctx);
601 	r600_query_init(rctx);
602 	cayman_init_msaa(&rctx->b);
603 
604 	u_suballocator_init(&rctx->allocator_zeroed_memory, &rctx->b, rscreen->info.gart_page_size,
605 			    0, PIPE_USAGE_DEFAULT, 0, true);
606 
607 	rctx->b.stream_uploader = u_upload_create(&rctx->b, 1024 * 1024,
608 						  0, PIPE_USAGE_STREAM, 0);
609 	if (!rctx->b.stream_uploader)
610 		return false;
611 
612 	rctx->b.const_uploader = u_upload_create(&rctx->b, 128 * 1024,
613 						 0, PIPE_USAGE_DEFAULT, 0);
614 	if (!rctx->b.const_uploader)
615 		return false;
616 
617 	rctx->ctx = rctx->ws->ctx_create(rctx->ws, RADEON_CTX_PRIORITY_MEDIUM, false);
618 	if (!rctx->ctx)
619 		return false;
620 
621 	if (rscreen->info.ip[AMD_IP_SDMA].num_queues && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
622 		rctx->ws->cs_create(&rctx->dma.cs, rctx->ctx, AMD_IP_SDMA,
623                                     r600_flush_dma_ring, rctx);
624 		rctx->dma.flush = r600_flush_dma_ring;
625 	}
626 
627 	return true;
628 }
629 
r600_common_context_cleanup(struct r600_common_context * rctx)630 void r600_common_context_cleanup(struct r600_common_context *rctx)
631 {
632 	if (rctx->query_result_shader)
633 		rctx->b.delete_compute_state(&rctx->b, rctx->query_result_shader);
634 
635 	rctx->ws->cs_destroy(&rctx->gfx.cs);
636 	rctx->ws->cs_destroy(&rctx->dma.cs);
637 	if (rctx->ctx)
638 		rctx->ws->ctx_destroy(rctx->ctx);
639 
640 	if (rctx->b.stream_uploader)
641 		u_upload_destroy(rctx->b.stream_uploader);
642 	if (rctx->b.const_uploader)
643 		u_upload_destroy(rctx->b.const_uploader);
644 
645 	slab_destroy_child(&rctx->pool_transfers);
646 	slab_destroy_child(&rctx->pool_transfers_unsync);
647 
648 	u_suballocator_destroy(&rctx->allocator_zeroed_memory);
649 	rctx->ws->fence_reference(rctx->ws, &rctx->last_gfx_fence, NULL);
650 	rctx->ws->fence_reference(rctx->ws, &rctx->last_sdma_fence, NULL);
651 	r600_resource_reference(&rctx->eop_bug_scratch, NULL);
652 }
653 
654 /*
655  * pipe_screen
656  */
657 
658 static const struct debug_named_value common_debug_options[] = {
659 	/* logging */
660 	{ "tex", DBG_TEX, "Print texture info" },
661 	{ "nir", DBG_NIR, "Enable experimental NIR shaders" },
662 	{ "compute", DBG_COMPUTE, "Print compute info" },
663 	{ "vm", DBG_VM, "Print virtual addresses when creating resources" },
664 	{ "info", DBG_INFO, "Print driver information" },
665 
666 	/* shaders */
667 	{ "fs", DBG_FS, "Print fetch shaders" },
668 	{ "vs", DBG_VS, "Print vertex shaders" },
669 	{ "gs", DBG_GS, "Print geometry shaders" },
670 	{ "ps", DBG_PS, "Print pixel shaders" },
671 	{ "cs", DBG_CS, "Print compute shaders" },
672 	{ "tcs", DBG_TCS, "Print tessellation control shaders" },
673 	{ "tes", DBG_TES, "Print tessellation evaluation shaders" },
674 	{ "preoptir", DBG_PREOPT_IR, "Print the LLVM IR before initial optimizations" },
675 	{ "checkir", DBG_CHECK_IR, "Enable additional sanity checks on shader IR" },
676 
677 	{ "testdma", DBG_TEST_DMA, "Invoke SDMA tests and exit." },
678 	{ "testvmfaultcp", DBG_TEST_VMFAULT_CP, "Invoke a CP VM fault test and exit." },
679 	{ "testvmfaultsdma", DBG_TEST_VMFAULT_SDMA, "Invoke a SDMA VM fault test and exit." },
680 	{ "testvmfaultshader", DBG_TEST_VMFAULT_SHADER, "Invoke a shader VM fault test and exit." },
681 
682 	/* features */
683 	{ "nodma", DBG_NO_ASYNC_DMA, "Disable asynchronous DMA" },
684 	{ "nohyperz", DBG_NO_HYPERZ, "Disable Hyper-Z" },
685 	/* GL uses the word INVALIDATE, gallium uses the word DISCARD */
686 	{ "noinvalrange", DBG_NO_DISCARD_RANGE, "Disable handling of INVALIDATE_RANGE map flags" },
687 	{ "no2d", DBG_NO_2D_TILING, "Disable 2D tiling" },
688 	{ "notiling", DBG_NO_TILING, "Disable tiling" },
689 	{ "switch_on_eop", DBG_SWITCH_ON_EOP, "Program WD/IA to switch on end-of-packet." },
690 	{ "forcedma", DBG_FORCE_DMA, "Use asynchronous DMA for all operations when possible." },
691 	{ "nowc", DBG_NO_WC, "Disable GTT write combining" },
692 	{ "check_vm", DBG_CHECK_VM, "Check VM faults and dump debug info." },
693 
694 	DEBUG_NAMED_VALUE_END /* must be last */
695 };
696 
r600_get_vendor(struct pipe_screen * pscreen)697 static const char* r600_get_vendor(struct pipe_screen* pscreen)
698 {
699 	return "Mesa";
700 }
701 
r600_get_device_vendor(struct pipe_screen * pscreen)702 static const char* r600_get_device_vendor(struct pipe_screen* pscreen)
703 {
704 	return "AMD";
705 }
706 
r600_get_family_name(const struct r600_common_screen * rscreen)707 static const char *r600_get_family_name(const struct r600_common_screen *rscreen)
708 {
709 	switch (rscreen->info.family) {
710 	case CHIP_R600: return "AMD R600";
711 	case CHIP_RV610: return "AMD RV610";
712 	case CHIP_RV630: return "AMD RV630";
713 	case CHIP_RV670: return "AMD RV670";
714 	case CHIP_RV620: return "AMD RV620";
715 	case CHIP_RV635: return "AMD RV635";
716 	case CHIP_RS780: return "AMD RS780";
717 	case CHIP_RS880: return "AMD RS880";
718 	case CHIP_RV770: return "AMD RV770";
719 	case CHIP_RV730: return "AMD RV730";
720 	case CHIP_RV710: return "AMD RV710";
721 	case CHIP_RV740: return "AMD RV740";
722 	case CHIP_CEDAR: return "AMD CEDAR";
723 	case CHIP_REDWOOD: return "AMD REDWOOD";
724 	case CHIP_JUNIPER: return "AMD JUNIPER";
725 	case CHIP_CYPRESS: return "AMD CYPRESS";
726 	case CHIP_HEMLOCK: return "AMD HEMLOCK";
727 	case CHIP_PALM: return "AMD PALM";
728 	case CHIP_SUMO: return "AMD SUMO";
729 	case CHIP_SUMO2: return "AMD SUMO2";
730 	case CHIP_BARTS: return "AMD BARTS";
731 	case CHIP_TURKS: return "AMD TURKS";
732 	case CHIP_CAICOS: return "AMD CAICOS";
733 	case CHIP_CAYMAN: return "AMD CAYMAN";
734 	case CHIP_ARUBA: return "AMD ARUBA";
735 	default: return "AMD unknown";
736 	}
737 }
738 
r600_disk_cache_create(struct r600_common_screen * rscreen)739 static void r600_disk_cache_create(struct r600_common_screen *rscreen)
740 {
741 	/* Don't use the cache if shader dumping is enabled. */
742 	if (rscreen->debug_flags & DBG_ALL_SHADERS)
743 		return;
744 
745 	struct mesa_sha1 ctx;
746 	unsigned char sha1[20];
747 	char cache_id[20 * 2 + 1];
748 
749 	_mesa_sha1_init(&ctx);
750 	if (!disk_cache_get_function_identifier(r600_disk_cache_create,
751 						&ctx))
752 		return;
753 
754 	_mesa_sha1_final(&ctx, sha1);
755 	mesa_bytes_to_hex(cache_id, sha1, 20);
756 
757 	/* These flags affect shader compilation. */
758 	rscreen->disk_shader_cache =
759 		disk_cache_create(r600_get_family_name(rscreen),
760 				  cache_id, 0);
761 }
762 
r600_get_disk_shader_cache(struct pipe_screen * pscreen)763 static struct disk_cache *r600_get_disk_shader_cache(struct pipe_screen *pscreen)
764 {
765 	struct r600_common_screen *rscreen = (struct r600_common_screen*)pscreen;
766 	return rscreen->disk_shader_cache;
767 }
768 
r600_get_name(struct pipe_screen * pscreen)769 static const char* r600_get_name(struct pipe_screen* pscreen)
770 {
771 	struct r600_common_screen *rscreen = (struct r600_common_screen*)pscreen;
772 
773 	return rscreen->renderer_string;
774 }
775 
r600_get_paramf(struct pipe_screen * pscreen,enum pipe_capf param)776 static float r600_get_paramf(struct pipe_screen* pscreen,
777 			     enum pipe_capf param)
778 {
779 	switch (param) {
780 	case PIPE_CAPF_MIN_LINE_WIDTH:
781 	case PIPE_CAPF_MIN_LINE_WIDTH_AA:
782 	case PIPE_CAPF_MIN_POINT_SIZE:
783 	case PIPE_CAPF_MIN_POINT_SIZE_AA:
784 		return 1;
785 
786 	case PIPE_CAPF_POINT_SIZE_GRANULARITY:
787 	case PIPE_CAPF_LINE_WIDTH_GRANULARITY:
788 		return 0.1;
789 
790 	case PIPE_CAPF_MAX_LINE_WIDTH:
791 	case PIPE_CAPF_MAX_LINE_WIDTH_AA:
792 	case PIPE_CAPF_MAX_POINT_SIZE:
793 	case PIPE_CAPF_MAX_POINT_SIZE_AA:
794          return 8191.0f;
795 	case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
796 		return 16.0f;
797 	case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
798 		return 16.0f;
799     case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
800     case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
801     case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
802         return 0.0f;
803 	}
804 	return 0.0f;
805 }
806 
r600_get_video_param(struct pipe_screen * screen,enum pipe_video_profile profile,enum pipe_video_entrypoint entrypoint,enum pipe_video_cap param)807 static int r600_get_video_param(struct pipe_screen *screen,
808 				enum pipe_video_profile profile,
809 				enum pipe_video_entrypoint entrypoint,
810 				enum pipe_video_cap param)
811 {
812 	switch (param) {
813 	case PIPE_VIDEO_CAP_SUPPORTED:
814 		return vl_profile_supported(screen, profile, entrypoint);
815 	case PIPE_VIDEO_CAP_NPOT_TEXTURES:
816 		return 1;
817 	case PIPE_VIDEO_CAP_MAX_WIDTH:
818 	case PIPE_VIDEO_CAP_MAX_HEIGHT:
819 		return vl_video_buffer_max_size(screen);
820 	case PIPE_VIDEO_CAP_PREFERED_FORMAT:
821 		return PIPE_FORMAT_NV12;
822 	case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
823 		return false;
824 	case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
825 		return false;
826 	case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
827 		return true;
828 	case PIPE_VIDEO_CAP_MAX_LEVEL:
829 		return vl_level_supported(screen, profile);
830 	default:
831 		return 0;
832 	}
833 }
834 
r600_get_llvm_processor_name(enum radeon_family family)835 const char *r600_get_llvm_processor_name(enum radeon_family family)
836 {
837 	switch (family) {
838 	case CHIP_R600:
839 	case CHIP_RV630:
840 	case CHIP_RV635:
841 	case CHIP_RV670:
842 		return "r600";
843 	case CHIP_RV610:
844 	case CHIP_RV620:
845 	case CHIP_RS780:
846 	case CHIP_RS880:
847 		return "rs880";
848 	case CHIP_RV710:
849 		return "rv710";
850 	case CHIP_RV730:
851 		return "rv730";
852 	case CHIP_RV740:
853 	case CHIP_RV770:
854 		return "rv770";
855 	case CHIP_PALM:
856 	case CHIP_CEDAR:
857 		return "cedar";
858 	case CHIP_SUMO:
859 	case CHIP_SUMO2:
860 		return "sumo";
861 	case CHIP_REDWOOD:
862 		return "redwood";
863 	case CHIP_JUNIPER:
864 		return "juniper";
865 	case CHIP_HEMLOCK:
866 	case CHIP_CYPRESS:
867 		return "cypress";
868 	case CHIP_BARTS:
869 		return "barts";
870 	case CHIP_TURKS:
871 		return "turks";
872 	case CHIP_CAICOS:
873 		return "caicos";
874 	case CHIP_CAYMAN:
875         case CHIP_ARUBA:
876 		return "cayman";
877 
878 	default:
879 		return "";
880 	}
881 }
882 
get_max_threads_per_block(struct r600_common_screen * screen,enum pipe_shader_ir ir_type)883 static unsigned get_max_threads_per_block(struct r600_common_screen *screen,
884 					  enum pipe_shader_ir ir_type)
885 {
886 	if (ir_type != PIPE_SHADER_IR_TGSI &&
887 	    ir_type != PIPE_SHADER_IR_NIR)
888 		return 256;
889 	if (screen->gfx_level >= EVERGREEN)
890 		return 1024;
891 	return 256;
892 }
893 
r600_get_compute_param(struct pipe_screen * screen,enum pipe_shader_ir ir_type,enum pipe_compute_cap param,void * ret)894 static int r600_get_compute_param(struct pipe_screen *screen,
895         enum pipe_shader_ir ir_type,
896         enum pipe_compute_cap param,
897         void *ret)
898 {
899 	struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
900 
901 	//TODO: select these params by asic
902 	switch (param) {
903 	case PIPE_COMPUTE_CAP_IR_TARGET: {
904 		const char *gpu;
905 		const char *triple = "r600--";
906 		gpu = r600_get_llvm_processor_name(rscreen->family);
907 		if (ret) {
908 			sprintf(ret, "%s-%s", gpu, triple);
909 		}
910 		/* +2 for dash and terminating NIL byte */
911 		return (strlen(triple) + strlen(gpu) + 2) * sizeof(char);
912 	}
913 	case PIPE_COMPUTE_CAP_GRID_DIMENSION:
914 		if (ret) {
915 			uint64_t *grid_dimension = ret;
916 			grid_dimension[0] = 3;
917 		}
918 		return 1 * sizeof(uint64_t);
919 
920 	case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
921 		if (ret) {
922 			uint64_t *grid_size = ret;
923 			grid_size[0] = 65535;
924 			grid_size[1] = 65535;
925 			grid_size[2] = 65535;
926 		}
927 		return 3 * sizeof(uint64_t) ;
928 
929 	case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
930 		if (ret) {
931 			uint64_t *block_size = ret;
932 			unsigned threads_per_block = get_max_threads_per_block(rscreen, ir_type);
933 			block_size[0] = threads_per_block;
934 			block_size[1] = threads_per_block;
935 			block_size[2] = threads_per_block;
936 		}
937 		return 3 * sizeof(uint64_t);
938 
939 	case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
940 		if (ret) {
941 			uint64_t *max_threads_per_block = ret;
942 			*max_threads_per_block = get_max_threads_per_block(rscreen, ir_type);
943 		}
944 		return sizeof(uint64_t);
945 	case PIPE_COMPUTE_CAP_ADDRESS_BITS:
946 		if (ret) {
947 			uint32_t *address_bits = ret;
948 			address_bits[0] = 32;
949 		}
950 		return 1 * sizeof(uint32_t);
951 
952 	case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
953 		if (ret) {
954 			uint64_t *max_global_size = ret;
955 			uint64_t max_mem_alloc_size;
956 
957 			r600_get_compute_param(screen, ir_type,
958 				PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE,
959 				&max_mem_alloc_size);
960 
961 			/* In OpenCL, the MAX_MEM_ALLOC_SIZE must be at least
962 			 * 1/4 of the MAX_GLOBAL_SIZE.  Since the
963 			 * MAX_MEM_ALLOC_SIZE is fixed for older kernels,
964 			 * make sure we never report more than
965 			 * 4 * MAX_MEM_ALLOC_SIZE.
966 			 */
967 			*max_global_size = MIN2(4 * max_mem_alloc_size,
968 						rscreen->info.max_heap_size_kb * 1024ull);
969 		}
970 		return sizeof(uint64_t);
971 
972 	case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
973 		if (ret) {
974 			uint64_t *max_local_size = ret;
975 			/* Value reported by the closed source driver. */
976 			*max_local_size = 32768;
977 		}
978 		return sizeof(uint64_t);
979 
980 	case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE:
981 		if (ret) {
982 			uint64_t *max_input_size = ret;
983 			/* Value reported by the closed source driver. */
984 			*max_input_size = 1024;
985 		}
986 		return sizeof(uint64_t);
987 
988 	case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
989 		if (ret) {
990 			uint64_t *max_mem_alloc_size = ret;
991 
992 			*max_mem_alloc_size = (rscreen->info.max_heap_size_kb / 4) * 1024ull;
993 		}
994 		return sizeof(uint64_t);
995 
996 	case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
997 		if (ret) {
998 			uint32_t *max_clock_frequency = ret;
999 			*max_clock_frequency = rscreen->info.max_gpu_freq_mhz;
1000 		}
1001 		return sizeof(uint32_t);
1002 
1003 	case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS:
1004 		if (ret) {
1005 			uint32_t *max_compute_units = ret;
1006 			*max_compute_units = rscreen->info.num_cu;
1007 		}
1008 		return sizeof(uint32_t);
1009 
1010 	case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
1011 		if (ret) {
1012 			uint32_t *images_supported = ret;
1013 			*images_supported = 0;
1014 		}
1015 		return sizeof(uint32_t);
1016 	case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE:
1017 		break; /* unused */
1018 	case PIPE_COMPUTE_CAP_SUBGROUP_SIZES:
1019 		if (ret) {
1020 			uint32_t *subgroup_size = ret;
1021 			*subgroup_size = r600_wavefront_size(rscreen->family);
1022 		}
1023 		return sizeof(uint32_t);
1024 	case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
1025 		if (ret) {
1026 			uint64_t *max_variable_threads_per_block = ret;
1027 			*max_variable_threads_per_block = 0;
1028 		}
1029 		return sizeof(uint64_t);
1030         case PIPE_COMPUTE_CAP_MAX_SUBGROUPS:
1031            return 0;
1032 	}
1033 
1034         fprintf(stderr, "unknown PIPE_COMPUTE_CAP %d\n", param);
1035         return 0;
1036 }
1037 
r600_get_timestamp(struct pipe_screen * screen)1038 static uint64_t r600_get_timestamp(struct pipe_screen *screen)
1039 {
1040 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1041 
1042 	return 1000000 * rscreen->ws->query_value(rscreen->ws, RADEON_TIMESTAMP) /
1043 			rscreen->info.clock_crystal_freq;
1044 }
1045 
r600_fence_reference(struct pipe_screen * screen,struct pipe_fence_handle ** dst,struct pipe_fence_handle * src)1046 static void r600_fence_reference(struct pipe_screen *screen,
1047 				 struct pipe_fence_handle **dst,
1048 				 struct pipe_fence_handle *src)
1049 {
1050 	struct radeon_winsys *ws = ((struct r600_common_screen*)screen)->ws;
1051 	struct r600_multi_fence **rdst = (struct r600_multi_fence **)dst;
1052 	struct r600_multi_fence *rsrc = (struct r600_multi_fence *)src;
1053 
1054 	if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
1055 		ws->fence_reference(ws, &(*rdst)->gfx, NULL);
1056 		ws->fence_reference(ws, &(*rdst)->sdma, NULL);
1057 		FREE(*rdst);
1058 	}
1059         *rdst = rsrc;
1060 }
1061 
r600_fence_finish(struct pipe_screen * screen,struct pipe_context * ctx,struct pipe_fence_handle * fence,uint64_t timeout)1062 static bool r600_fence_finish(struct pipe_screen *screen,
1063 			      struct pipe_context *ctx,
1064 			      struct pipe_fence_handle *fence,
1065 			      uint64_t timeout)
1066 {
1067 	struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws;
1068 	struct r600_multi_fence *rfence = (struct r600_multi_fence *)fence;
1069 	struct r600_common_context *rctx;
1070 	int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
1071 
1072 	ctx = threaded_context_unwrap_sync(ctx);
1073 	rctx = ctx ? (struct r600_common_context*)ctx : NULL;
1074 
1075 	if (rfence->sdma) {
1076 		if (!rws->fence_wait(rws, rfence->sdma, timeout))
1077 			return false;
1078 
1079 		/* Recompute the timeout after waiting. */
1080 		if (timeout && timeout != OS_TIMEOUT_INFINITE) {
1081 			int64_t time = os_time_get_nano();
1082 			timeout = abs_timeout > time ? abs_timeout - time : 0;
1083 		}
1084 	}
1085 
1086 	if (!rfence->gfx)
1087 		return true;
1088 
1089 	/* Flush the gfx IB if it hasn't been flushed yet. */
1090 	if (rctx &&
1091 	    rfence->gfx_unflushed.ctx == rctx &&
1092 	    rfence->gfx_unflushed.ib_index == rctx->num_gfx_cs_flushes) {
1093 		rctx->gfx.flush(rctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
1094 		rfence->gfx_unflushed.ctx = NULL;
1095 
1096 		if (!timeout)
1097 			return false;
1098 
1099 		/* Recompute the timeout after all that. */
1100 		if (timeout && timeout != OS_TIMEOUT_INFINITE) {
1101 			int64_t time = os_time_get_nano();
1102 			timeout = abs_timeout > time ? abs_timeout - time : 0;
1103 		}
1104 	}
1105 
1106 	return rws->fence_wait(rws, rfence->gfx, timeout);
1107 }
1108 
r600_query_memory_info(struct pipe_screen * screen,struct pipe_memory_info * info)1109 static void r600_query_memory_info(struct pipe_screen *screen,
1110 				   struct pipe_memory_info *info)
1111 {
1112 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1113 	struct radeon_winsys *ws = rscreen->ws;
1114 	unsigned vram_usage, gtt_usage;
1115 
1116 	info->total_device_memory = rscreen->info.vram_size_kb;
1117 	info->total_staging_memory = rscreen->info.gart_size_kb;
1118 
1119 	/* The real TTM memory usage is somewhat random, because:
1120 	 *
1121 	 * 1) TTM delays freeing memory, because it can only free it after
1122 	 *    fences expire.
1123 	 *
1124 	 * 2) The memory usage can be really low if big VRAM evictions are
1125 	 *    taking place, but the real usage is well above the size of VRAM.
1126 	 *
1127 	 * Instead, return statistics of this process.
1128 	 */
1129 	vram_usage = ws->query_value(ws, RADEON_REQUESTED_VRAM_MEMORY) / 1024;
1130 	gtt_usage =  ws->query_value(ws, RADEON_REQUESTED_GTT_MEMORY) / 1024;
1131 
1132 	info->avail_device_memory =
1133 		vram_usage <= info->total_device_memory ?
1134 				info->total_device_memory - vram_usage : 0;
1135 	info->avail_staging_memory =
1136 		gtt_usage <= info->total_staging_memory ?
1137 				info->total_staging_memory - gtt_usage : 0;
1138 
1139 	info->device_memory_evicted =
1140 		ws->query_value(ws, RADEON_NUM_BYTES_MOVED) / 1024;
1141 
1142 	/* Just return the number of evicted 64KB pages. */
1143 	info->nr_device_memory_evictions = info->device_memory_evicted / 64;
1144 }
1145 
r600_resource_create_common(struct pipe_screen * screen,const struct pipe_resource * templ)1146 struct pipe_resource *r600_resource_create_common(struct pipe_screen *screen,
1147 						  const struct pipe_resource *templ)
1148 {
1149 	if (templ->target == PIPE_BUFFER) {
1150 		return r600_buffer_create(screen, templ, 256);
1151 	} else {
1152 		return r600_texture_create(screen, templ);
1153 	}
1154 }
1155 
1156 static const void *
r600_get_compiler_options(struct pipe_screen * screen,enum pipe_shader_ir ir,enum pipe_shader_type shader)1157 r600_get_compiler_options(struct pipe_screen *screen,
1158 			  enum pipe_shader_ir ir,
1159 			  enum pipe_shader_type shader)
1160 {
1161        assert(ir == PIPE_SHADER_IR_NIR);
1162 
1163        struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1164 
1165        if (shader != PIPE_SHADER_FRAGMENT)
1166           return &rscreen->nir_options;
1167        else
1168           return &rscreen->nir_options_fs;
1169 }
1170 
1171 extern bool r600_lower_to_scalar_instr_filter(const nir_instr *instr, const void *);
1172 
r600_resource_destroy(struct pipe_screen * screen,struct pipe_resource * res)1173 static void r600_resource_destroy(struct pipe_screen *screen,
1174 				  struct pipe_resource *res)
1175 {
1176 	if (res->target == PIPE_BUFFER) {
1177 		if (r600_resource(res)->compute_global_bo)
1178 			r600_compute_global_buffer_destroy(screen, res);
1179 		else
1180 			r600_buffer_destroy(screen, res);
1181 	} else {
1182 		r600_texture_destroy(screen, res);
1183 	}
1184 }
1185 
r600_get_screen_fd(struct pipe_screen * screen)1186 static int r600_get_screen_fd(struct pipe_screen *screen)
1187 {
1188 	struct radeon_winsys *ws = ((struct r600_common_screen*)screen)->ws;
1189 
1190 	return ws->get_fd(ws);
1191 }
1192 
r600_get_driver_uuid(UNUSED struct pipe_screen * screen,char * uuid)1193 static void r600_get_driver_uuid(UNUSED struct pipe_screen *screen, char *uuid)
1194 {
1195 	const char *driver_id = PACKAGE_VERSION MESA_GIT_SHA1 "r600";
1196 
1197 	/* The driver UUID is used for determining sharability of images and
1198 	 * memory between two Vulkan instances in separate processes, but also
1199 	 * to determining memory objects and sharability between Vulkan and
1200 	 * OpenGL driver. People who want to share memory need to also check
1201 	 * the device UUID.
1202 	 */
1203 	struct mesa_sha1 sha1_ctx;
1204 	_mesa_sha1_init(&sha1_ctx);
1205 
1206 	_mesa_sha1_update(&sha1_ctx, driver_id, strlen(driver_id));
1207 
1208 	uint8_t sha1[SHA1_DIGEST_LENGTH];
1209 	_mesa_sha1_final(&sha1_ctx, sha1);
1210 
1211 	assert(SHA1_DIGEST_LENGTH >= PIPE_UUID_SIZE);
1212 	memcpy(uuid, sha1, PIPE_UUID_SIZE);
1213 }
1214 
r600_get_device_uuid(struct pipe_screen * screen,char * uuid)1215 static void r600_get_device_uuid(struct pipe_screen *screen, char *uuid)
1216 {
1217 	uint32_t *uint_uuid = (uint32_t *)uuid;
1218 	struct r600_common_screen* rs = (struct r600_common_screen*)screen;
1219 
1220 	assert(PIPE_UUID_SIZE >= sizeof(uint32_t) * 4);
1221 
1222 	/* Copied from ac_device_info
1223 	 * Use the device info directly instead of using a sha1. GL/VK UUIDs
1224 	 * are 16 byte vs 20 byte for sha1, and the truncation that would be
1225 	 * required would get rid of part of the little entropy we have.
1226 	 */
1227 	memset(uuid, 0, PIPE_UUID_SIZE);
1228 	if (!rs->info.pci.valid)
1229 		fprintf(stderr,
1230 		"r600 device_uuid output is based on invalid pci bus info.\n");
1231 	uint_uuid[0] = rs->info.pci.domain;
1232 	uint_uuid[1] = rs->info.pci.bus;
1233 	uint_uuid[2] = rs->info.pci.dev;
1234 	uint_uuid[3] = rs->info.pci.func;
1235 }
1236 
r600_common_screen_init(struct r600_common_screen * rscreen,struct radeon_winsys * ws)1237 bool r600_common_screen_init(struct r600_common_screen *rscreen,
1238 			     struct radeon_winsys *ws)
1239 {
1240 	char family_name[32] = {}, kernel_version[128] = {};
1241 	struct utsname uname_data;
1242 	const char *chip_name;
1243 
1244 	ws->query_info(ws, &rscreen->info);
1245 	rscreen->ws = ws;
1246 
1247 	chip_name = r600_get_family_name(rscreen);
1248 
1249 	if (uname(&uname_data) == 0)
1250 		snprintf(kernel_version, sizeof(kernel_version),
1251 			 " / %s", uname_data.release);
1252 
1253 	snprintf(rscreen->renderer_string, sizeof(rscreen->renderer_string),
1254 		 "%s (%sDRM %i.%i.%i%s"
1255 #if AMD_LLVM_AVAILABLE
1256 		 ", LLVM " MESA_LLVM_VERSION_STRING
1257 #endif
1258 		 ")",
1259 		 chip_name, family_name, rscreen->info.drm_major,
1260 		 rscreen->info.drm_minor, rscreen->info.drm_patchlevel,
1261 		 kernel_version);
1262 
1263 	rscreen->b.get_name = r600_get_name;
1264 	rscreen->b.get_vendor = r600_get_vendor;
1265 	rscreen->b.get_device_vendor = r600_get_device_vendor;
1266 	rscreen->b.get_disk_shader_cache = r600_get_disk_shader_cache;
1267 	rscreen->b.get_compute_param = r600_get_compute_param;
1268 	rscreen->b.get_screen_fd = r600_get_screen_fd;
1269 	rscreen->b.get_paramf = r600_get_paramf;
1270 	rscreen->b.get_timestamp = r600_get_timestamp;
1271 	rscreen->b.get_compiler_options = r600_get_compiler_options;
1272 	rscreen->b.fence_finish = r600_fence_finish;
1273 	rscreen->b.fence_reference = r600_fence_reference;
1274 	rscreen->b.resource_destroy = r600_resource_destroy;
1275 	rscreen->b.resource_from_user_memory = r600_buffer_from_user_memory;
1276 	rscreen->b.query_memory_info = r600_query_memory_info;
1277 	rscreen->b.get_device_uuid = r600_get_device_uuid;
1278 	rscreen->b.get_driver_uuid = r600_get_driver_uuid;
1279 
1280 	if (rscreen->info.ip[AMD_IP_UVD].num_queues) {
1281 		rscreen->b.get_video_param = rvid_get_video_param;
1282 		rscreen->b.is_video_format_supported = rvid_is_format_supported;
1283 	} else {
1284 		rscreen->b.get_video_param = r600_get_video_param;
1285 		rscreen->b.is_video_format_supported = vl_video_buffer_is_format_supported;
1286 	}
1287 
1288 	r600_init_screen_texture_functions(rscreen);
1289 	r600_init_screen_query_functions(rscreen);
1290 
1291 	rscreen->family = rscreen->info.family;
1292 	rscreen->gfx_level = rscreen->info.gfx_level;
1293 	rscreen->debug_flags |= debug_get_flags_option("R600_DEBUG", common_debug_options, 0);
1294 
1295 	r600_disk_cache_create(rscreen);
1296 
1297 	slab_create_parent(&rscreen->pool_transfers, sizeof(struct r600_transfer), 64);
1298 
1299 	rscreen->force_aniso = MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
1300 	if (rscreen->force_aniso >= 0) {
1301 		printf("radeon: Forcing anisotropy filter to %ix\n",
1302 		       /* round down to a power of two */
1303 		       1 << util_logbase2(rscreen->force_aniso));
1304 	}
1305 
1306 	(void) mtx_init(&rscreen->aux_context_lock, mtx_plain);
1307 	(void) mtx_init(&rscreen->gpu_load_mutex, mtx_plain);
1308 
1309 	if (rscreen->debug_flags & DBG_INFO) {
1310 		printf("pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n",
1311 		       rscreen->info.pci.domain, rscreen->info.pci.bus,
1312 		       rscreen->info.pci.dev, rscreen->info.pci.func);
1313 		printf("pci_id = 0x%x\n", rscreen->info.pci_id);
1314 		printf("family = %i (%s)\n", rscreen->info.family,
1315 		       r600_get_family_name(rscreen));
1316 		printf("gfx_level = %i\n", rscreen->info.gfx_level);
1317 		printf("pte_fragment_size = %u\n", rscreen->info.pte_fragment_size);
1318 		printf("gart_page_size = %u\n", rscreen->info.gart_page_size);
1319 		printf("gart_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.gart_size_kb, 1024));
1320 		printf("vram_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_size_kb, 1024));
1321 		printf("vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_vis_size_kb, 1024));
1322 		printf("max_heap_size = %i MB\n",
1323 		       (int)DIV_ROUND_UP(rscreen->info.max_heap_size_kb, 1024));
1324 		printf("min_alloc_size = %u\n", rscreen->info.min_alloc_size);
1325 		printf("has_dedicated_vram = %u\n", rscreen->info.has_dedicated_vram);
1326 		printf("r600_has_virtual_memory = %i\n", rscreen->info.r600_has_virtual_memory);
1327 		printf("gfx_ib_pad_with_type2 = %i\n", rscreen->info.gfx_ib_pad_with_type2);
1328 		printf("ip[AMD_IP_UVD] = %u\n", rscreen->info.ip[AMD_IP_UVD].num_queues);
1329 		printf("ip[AMD_IP_SDMA] = %i\n", rscreen->info.ip[AMD_IP_SDMA].num_queues);
1330 		printf("ip[AMD_IP_COMPUTE] = %u\n", rscreen->info.ip[AMD_IP_COMPUTE].num_queues);
1331 		printf("uvd_fw_version = %u\n", rscreen->info.uvd_fw_version);
1332 		printf("vce_fw_version = %u\n", rscreen->info.vce_fw_version);
1333 		printf("me_fw_version = %i\n", rscreen->info.me_fw_version);
1334 		printf("pfp_fw_version = %i\n", rscreen->info.pfp_fw_version);
1335 		printf("vce_harvest_config = %i\n", rscreen->info.vce_harvest_config);
1336 		printf("clock_crystal_freq = %i\n", rscreen->info.clock_crystal_freq);
1337 		printf("tcc_cache_line_size = %u\n", rscreen->info.tcc_cache_line_size);
1338 		printf("drm = %i.%i.%i\n", rscreen->info.drm_major,
1339 		       rscreen->info.drm_minor, rscreen->info.drm_patchlevel);
1340 		printf("has_userptr = %i\n", rscreen->info.has_userptr);
1341 		printf("has_syncobj = %u\n", rscreen->info.has_syncobj);
1342 
1343 		printf("r600_max_quad_pipes = %i\n", rscreen->info.r600_max_quad_pipes);
1344 		printf("max_gpu_freq_mhz = %i\n", rscreen->info.max_gpu_freq_mhz);
1345 		printf("num_cu = %i\n", rscreen->info.num_cu);
1346 		printf("max_se = %i\n", rscreen->info.max_se);
1347 		printf("max_sh_per_se = %i\n", rscreen->info.max_sa_per_se);
1348 
1349 		printf("r600_gb_backend_map = %i\n", rscreen->info.r600_gb_backend_map);
1350 		printf("r600_gb_backend_map_valid = %i\n", rscreen->info.r600_gb_backend_map_valid);
1351 		printf("r600_num_banks = %i\n", rscreen->info.r600_num_banks);
1352 		printf("num_render_backends = %i\n", rscreen->info.max_render_backends);
1353 		printf("num_tile_pipes = %i\n", rscreen->info.num_tile_pipes);
1354 		printf("pipe_interleave_bytes = %i\n", rscreen->info.pipe_interleave_bytes);
1355 		printf("enabled_rb_mask = 0x%" PRIx64 "\n", rscreen->info.enabled_rb_mask);
1356 		printf("max_alignment = %u\n", (unsigned)rscreen->info.max_alignment);
1357 	}
1358 
1359 	const struct nir_shader_compiler_options nir_options = {
1360 		.fuse_ffma16 = true,
1361 		.fuse_ffma32 = true,
1362 		.fuse_ffma64 = true,
1363 		.lower_flrp32 = true,
1364 		.lower_flrp64 = true,
1365 		.lower_fdiv = true,
1366 		.lower_isign = true,
1367 		.lower_fsign = true,
1368 		.lower_fmod = true,
1369 		.lower_uadd_carry = true,
1370 		.lower_usub_borrow = true,
1371 		.lower_bitfield_extract = true,
1372 		.lower_bitfield_insert = true,
1373 		.lower_extract_byte = true,
1374 		.lower_extract_word = true,
1375 		.lower_insert_byte = true,
1376 		.lower_insert_word = true,
1377 		.lower_ldexp = true,
1378 		/* due to a bug in the shader compiler, some loops hang
1379 		 * if they are not unrolled, see:
1380 		 *    https://bugs.freedesktop.org/show_bug.cgi?id=86720
1381 		 */
1382 		.max_unroll_iterations = 255,
1383 		.lower_interpolate_at = true,
1384 		.vectorize_io = true,
1385 		.has_umad24 = true,
1386 		.has_umul24 = true,
1387 		.has_fmulz = true,
1388 		.use_interpolated_input_intrinsics = true,
1389 		.has_fsub = true,
1390 		.has_isub = true,
1391 		.has_find_msb_rev = true,
1392 		.lower_iabs = true,
1393 		.lower_uadd_sat = true,
1394 		.lower_usub_sat = true,
1395 		.has_fused_comp_and_csel = true,
1396 		.lower_ifind_msb = true,
1397 		.lower_ufind_msb = true,
1398 		.lower_to_scalar = true,
1399 		.lower_to_scalar_filter = r600_lower_to_scalar_instr_filter,
1400 		.linker_ignore_precision = true,
1401 		.lower_fpow = true,
1402 		.lower_int64_options = ~0,
1403 		.lower_cs_local_index_to_id = true,
1404 		.lower_uniforms_to_ubo = true,
1405 		.lower_image_offset_to_range_base = 1,
1406 		.vectorize_tess_levels = 1,
1407 		.has_ddx_intrinsics = true,
1408 	};
1409 
1410 	rscreen->nir_options = nir_options;
1411 
1412 	if (rscreen->info.family < CHIP_CEDAR)
1413 		rscreen->nir_options.force_indirect_unrolling_sampler = true;
1414 
1415 	if (rscreen->info.gfx_level >= EVERGREEN) {
1416 		rscreen->nir_options.has_bfe = true;
1417 		rscreen->nir_options.has_bfm = true;
1418 		rscreen->nir_options.has_bitfield_select = true;
1419 	}
1420 
1421 	if (rscreen->info.gfx_level < EVERGREEN) {
1422 		/* Pre-EG doesn't have these ALU ops */
1423 		rscreen->nir_options.lower_bit_count = true;
1424 		rscreen->nir_options.lower_bitfield_reverse = true;
1425 	}
1426 
1427 	if (rscreen->info.gfx_level < CAYMAN) {
1428 		rscreen->nir_options.lower_doubles_options = nir_lower_fp64_full_software;
1429 		rscreen->nir_options.lower_atomic_offset_to_range_base = true;
1430 	} else {
1431 		rscreen->nir_options.lower_doubles_options =
1432 			nir_lower_ddiv |
1433 			nir_lower_dfloor |
1434 			nir_lower_dceil |
1435 			nir_lower_dmod |
1436 			nir_lower_dsub |
1437 			nir_lower_dtrunc |
1438 			nir_lower_dround_even;
1439 	}
1440 
1441         rscreen->nir_options_fs = rscreen->nir_options;
1442 	rscreen->nir_options_fs.lower_all_io_to_temps = true;
1443 
1444 	return true;
1445 }
1446 
r600_destroy_common_screen(struct r600_common_screen * rscreen)1447 void r600_destroy_common_screen(struct r600_common_screen *rscreen)
1448 {
1449 	r600_perfcounters_destroy(rscreen);
1450 	r600_gpu_load_kill_thread(rscreen);
1451 
1452 	mtx_destroy(&rscreen->gpu_load_mutex);
1453 	mtx_destroy(&rscreen->aux_context_lock);
1454 	rscreen->aux_context->destroy(rscreen->aux_context);
1455 
1456 	slab_destroy_parent(&rscreen->pool_transfers);
1457 
1458 	disk_cache_destroy(rscreen->disk_shader_cache);
1459 	rscreen->ws->destroy(rscreen->ws);
1460 	FREE(rscreen);
1461 }
1462 
r600_can_dump_shader(struct r600_common_screen * rscreen,unsigned processor)1463 bool r600_can_dump_shader(struct r600_common_screen *rscreen,
1464 			  unsigned processor)
1465 {
1466 	return rscreen->debug_flags & (1 << processor);
1467 }
1468 
r600_extra_shader_checks(struct r600_common_screen * rscreen,unsigned processor)1469 bool r600_extra_shader_checks(struct r600_common_screen *rscreen, unsigned processor)
1470 {
1471 	return (rscreen->debug_flags & DBG_CHECK_IR) ||
1472 	       r600_can_dump_shader(rscreen, processor);
1473 }
1474 
r600_screen_clear_buffer(struct r600_common_screen * rscreen,struct pipe_resource * dst,uint64_t offset,uint64_t size,unsigned value)1475 void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst,
1476 			      uint64_t offset, uint64_t size, unsigned value)
1477 {
1478 	struct r600_common_context *rctx = (struct r600_common_context*)rscreen->aux_context;
1479 
1480 	mtx_lock(&rscreen->aux_context_lock);
1481 	rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
1482 	rscreen->aux_context->flush(rscreen->aux_context, NULL, 0);
1483 	mtx_unlock(&rscreen->aux_context_lock);
1484 }
1485