xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/r600/r600_hw_context.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2010 Jerome Glisse <[email protected]>
3  * Authors:
4  *      Jerome Glisse
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #include "r600_pipe.h"
9 #include "r600d.h"
10 #include "util/u_memory.h"
11 #include <errno.h>
12 #include <unistd.h>
13 
14 
r600_need_cs_space(struct r600_context * ctx,unsigned num_dw,bool count_draw_in,unsigned num_atomics)15 void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
16 			bool count_draw_in, unsigned num_atomics)
17 {
18 	/* Flush the DMA IB if it's not empty. */
19 	if (radeon_emitted(&ctx->b.dma.cs, 0))
20 		ctx->b.dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
21 
22 	if (!radeon_cs_memory_below_limit(ctx->b.screen, &ctx->b.gfx.cs,
23 					  ctx->b.vram, ctx->b.gtt)) {
24 		ctx->b.gtt = 0;
25 		ctx->b.vram = 0;
26 		ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
27 		return;
28 	}
29 	/* all will be accounted once relocation are emitted */
30 	ctx->b.gtt = 0;
31 	ctx->b.vram = 0;
32 
33 	/* Check available space in CS. */
34 	if (count_draw_in) {
35 		uint64_t mask;
36 
37 		/* The number of dwords all the dirty states would take. */
38 		mask = ctx->dirty_atoms;
39 		while (mask != 0)
40 			num_dw += ctx->atoms[u_bit_scan64(&mask)]->num_dw;
41 
42 		/* The upper-bound of how much space a draw command would take. */
43 		num_dw += R600_MAX_FLUSH_CS_DWORDS + R600_MAX_DRAW_CS_DWORDS;
44 	}
45 
46 	/* add atomic counters, 8 pre + 8 post per counter + 16 post if any counters */
47 	num_dw += (num_atomics * 16) + (num_atomics ? 16 : 0);
48 
49 	/* Count in r600_suspend_queries. */
50 	num_dw += ctx->b.num_cs_dw_queries_suspend;
51 
52 	/* Count in streamout_end at the end of CS. */
53 	if (ctx->b.streamout.begin_emitted) {
54 		num_dw += ctx->b.streamout.num_dw_for_end;
55 	}
56 
57 	/* SX_MISC */
58 	if (ctx->b.gfx_level == R600) {
59 		num_dw += 3;
60 	}
61 
62 	/* Count in framebuffer cache flushes at the end of CS. */
63 	num_dw += R600_MAX_FLUSH_CS_DWORDS;
64 
65 	/* The fence at the end of CS. */
66 	num_dw += 10;
67 
68 	/* Flush if there's not enough space. */
69 	if (!ctx->b.ws->cs_check_space(&ctx->b.gfx.cs, num_dw)) {
70 		ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
71 	}
72 }
73 
r600_flush_emit(struct r600_context * rctx)74 void r600_flush_emit(struct r600_context *rctx)
75 {
76 	struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
77 	unsigned cp_coher_cntl = 0;
78 	unsigned wait_until = 0;
79 
80 	if (!rctx->b.flags) {
81 		return;
82 	}
83 
84 	/* Ensure coherency between streamout and shaders. */
85 	if (rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH)
86 		rctx->b.flags |= r600_get_flush_flags(R600_COHERENCY_SHADER);
87 
88 	if (rctx->b.flags & R600_CONTEXT_WAIT_3D_IDLE) {
89 		wait_until |= S_008040_WAIT_3D_IDLE(1);
90 	}
91 	if (rctx->b.flags & R600_CONTEXT_WAIT_CP_DMA_IDLE) {
92 		wait_until |= S_008040_WAIT_CP_DMA_IDLE(1);
93 	}
94 
95 	if (wait_until) {
96 		/* Use of WAIT_UNTIL is deprecated on Cayman+ */
97 		if (rctx->b.family >= CHIP_CAYMAN) {
98 			/* emit a PS partial flush on Cayman/TN */
99 			rctx->b.flags |= R600_CONTEXT_PS_PARTIAL_FLUSH;
100 		}
101 	}
102 
103 	/* Wait packets must be executed first, because SURFACE_SYNC doesn't
104 	 * wait for shaders if it's not flushing CB or DB.
105 	 */
106 	if (rctx->b.flags & R600_CONTEXT_PS_PARTIAL_FLUSH) {
107 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
108 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
109 	}
110 
111 	if (rctx->b.flags & R600_CONTEXT_CS_PARTIAL_FLUSH) {
112 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
113 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
114 	}
115 
116 	if (wait_until) {
117 		/* Use of WAIT_UNTIL is deprecated on Cayman+ */
118 		if (rctx->b.family < CHIP_CAYMAN) {
119 			/* wait for things to settle */
120 			radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
121 		}
122 	}
123 
124 	if (rctx->b.gfx_level >= R700 &&
125 	    (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_CB_META)) {
126 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
127 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
128 	}
129 
130 	if (rctx->b.gfx_level >= R700 &&
131 	    (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_DB_META)) {
132 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
133 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
134 
135 		/* Set FULL_CACHE_ENA for DB META flushes on r7xx and later.
136 		 *
137 		 * This hack predates use of FLUSH_AND_INV_DB_META, so it's
138 		 * unclear whether it's still needed or even whether it has
139 		 * any effect.
140 		 */
141 		cp_coher_cntl |= S_0085F0_FULL_CACHE_ENA(1);
142 	}
143 
144 	if (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV ||
145 	    (rctx->b.gfx_level == R600 && rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH)) {
146 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
147 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
148 	}
149 
150 	if (rctx->b.flags & R600_CONTEXT_INV_CONST_CACHE) {
151 		/* Direct constant addressing uses the shader cache.
152 		 * Indirect constant addressing uses the vertex cache. */
153 		cp_coher_cntl |= S_0085F0_SH_ACTION_ENA(1) |
154 				 (rctx->has_vertex_cache ? S_0085F0_VC_ACTION_ENA(1)
155 							 : S_0085F0_TC_ACTION_ENA(1));
156 	}
157 	if (rctx->b.flags & R600_CONTEXT_INV_VERTEX_CACHE) {
158 		cp_coher_cntl |= rctx->has_vertex_cache ? S_0085F0_VC_ACTION_ENA(1)
159 							: S_0085F0_TC_ACTION_ENA(1);
160 	}
161 	if (rctx->b.flags & R600_CONTEXT_INV_TEX_CACHE) {
162 		/* Textures use the texture cache.
163 		 * Texture buffer objects use the vertex cache. */
164 		cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1) |
165 				 (rctx->has_vertex_cache ? S_0085F0_VC_ACTION_ENA(1) : 0);
166 	}
167 
168 	/* Don't use the DB CP COHER logic on r6xx.
169 	 * There are hw bugs.
170 	 */
171 	if (rctx->b.gfx_level >= R700 &&
172 	    (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_DB)) {
173 		cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
174 				S_0085F0_DB_DEST_BASE_ENA(1) |
175 				S_0085F0_SMX_ACTION_ENA(1);
176 	}
177 
178 	/* Don't use the CB CP COHER logic on r6xx.
179 	 * There are hw bugs.
180 	 */
181 	if (rctx->b.gfx_level >= R700 &&
182 	    (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_CB)) {
183 		cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
184 				S_0085F0_CB0_DEST_BASE_ENA(1) |
185 				S_0085F0_CB1_DEST_BASE_ENA(1) |
186 				S_0085F0_CB2_DEST_BASE_ENA(1) |
187 				S_0085F0_CB3_DEST_BASE_ENA(1) |
188 				S_0085F0_CB4_DEST_BASE_ENA(1) |
189 				S_0085F0_CB5_DEST_BASE_ENA(1) |
190 				S_0085F0_CB6_DEST_BASE_ENA(1) |
191 				S_0085F0_CB7_DEST_BASE_ENA(1) |
192 				S_0085F0_SMX_ACTION_ENA(1);
193 		if (rctx->b.gfx_level >= EVERGREEN)
194 			cp_coher_cntl |= S_0085F0_CB8_DEST_BASE_ENA(1) |
195 					S_0085F0_CB9_DEST_BASE_ENA(1) |
196 					S_0085F0_CB10_DEST_BASE_ENA(1) |
197 					S_0085F0_CB11_DEST_BASE_ENA(1);
198 	}
199 
200 	if (rctx->b.gfx_level >= R700 &&
201 	    rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH) {
202 		cp_coher_cntl |= S_0085F0_SO0_DEST_BASE_ENA(1) |
203 				S_0085F0_SO1_DEST_BASE_ENA(1) |
204 				S_0085F0_SO2_DEST_BASE_ENA(1) |
205 				S_0085F0_SO3_DEST_BASE_ENA(1) |
206 				S_0085F0_SMX_ACTION_ENA(1);
207 	}
208 
209 	/* Workaround for buggy flushing on some R6xx chipsets. */
210 	if ((rctx->b.flags & (R600_CONTEXT_FLUSH_AND_INV |
211 			      R600_CONTEXT_STREAMOUT_FLUSH)) &&
212 	    (rctx->b.family == CHIP_RV670 ||
213 	     rctx->b.family == CHIP_RS780 ||
214 	     rctx->b.family == CHIP_RS880)) {
215 		cp_coher_cntl |=  S_0085F0_CB1_DEST_BASE_ENA(1) |
216 				  S_0085F0_DEST_BASE_0_ENA(1);
217 	}
218 
219 	if (cp_coher_cntl) {
220 		radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
221 		radeon_emit(cs, cp_coher_cntl);   /* CP_COHER_CNTL */
222 		radeon_emit(cs, 0xffffffff);      /* CP_COHER_SIZE */
223 		radeon_emit(cs, 0);               /* CP_COHER_BASE */
224 		radeon_emit(cs, 0x0000000A);      /* POLL_INTERVAL */
225 	}
226 
227 	if (rctx->b.flags & R600_CONTEXT_START_PIPELINE_STATS) {
228 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
229 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) |
230 			        EVENT_INDEX(0));
231 	} else if (rctx->b.flags & R600_CONTEXT_STOP_PIPELINE_STATS) {
232 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
233 		radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_STOP) |
234 			        EVENT_INDEX(0));
235 	}
236 
237 	/* everything is properly flushed */
238 	rctx->b.flags = 0;
239 }
240 
r600_context_gfx_flush(void * context,unsigned flags,struct pipe_fence_handle ** fence)241 void r600_context_gfx_flush(void *context, unsigned flags,
242 			    struct pipe_fence_handle **fence)
243 {
244 	struct r600_context *ctx = context;
245 	struct radeon_cmdbuf *cs = &ctx->b.gfx.cs;
246 	struct radeon_winsys *ws = ctx->b.ws;
247 
248 	if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size))
249 		return;
250 
251 	if (r600_check_device_reset(&ctx->b))
252 		return;
253 
254 	r600_preflush_suspend_features(&ctx->b);
255 
256 	/* flush the framebuffer cache */
257 	ctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV |
258 		      R600_CONTEXT_FLUSH_AND_INV_CB |
259 		      R600_CONTEXT_FLUSH_AND_INV_DB |
260 		      R600_CONTEXT_FLUSH_AND_INV_CB_META |
261 		      R600_CONTEXT_FLUSH_AND_INV_DB_META |
262 		      R600_CONTEXT_WAIT_3D_IDLE |
263 		      R600_CONTEXT_WAIT_CP_DMA_IDLE;
264 
265 	r600_flush_emit(ctx);
266 
267 	if (ctx->trace_buf)
268 		eg_trace_emit(ctx);
269 	/* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
270 	if (ctx->b.gfx_level == R600) {
271 		radeon_set_context_reg(cs, R_028350_SX_MISC, 0);
272 	}
273 
274 	if (ctx->is_debug) {
275 		/* Save the IB for debug contexts. */
276 		radeon_clear_saved_cs(&ctx->last_gfx);
277 		radeon_save_cs(ws, cs, &ctx->last_gfx, true);
278 		r600_resource_reference(&ctx->last_trace_buf, ctx->trace_buf);
279 		r600_resource_reference(&ctx->trace_buf, NULL);
280 	}
281 	/* Flush the CS. */
282 	ws->cs_flush(cs, flags, &ctx->b.last_gfx_fence);
283 	if (fence)
284 		ws->fence_reference(ws, fence, ctx->b.last_gfx_fence);
285 	ctx->b.num_gfx_cs_flushes++;
286 
287 	if (ctx->is_debug) {
288 		if (!ws->fence_wait(ws, ctx->b.last_gfx_fence, 10000000)) {
289 			const char *fname = getenv("R600_TRACE");
290 			if (!fname)
291 				exit(-1);
292 			FILE *fl = fopen(fname, "w+");
293 			if (fl) {
294 				eg_dump_debug_state(&ctx->b.b, fl, 0);
295 				fclose(fl);
296 			} else
297 				perror(fname);
298 			exit(-1);
299 		}
300 	}
301 	r600_begin_new_cs(ctx);
302 }
303 
r600_begin_new_cs(struct r600_context * ctx)304 void r600_begin_new_cs(struct r600_context *ctx)
305 {
306 	unsigned shader;
307 
308 	if (ctx->is_debug) {
309 		uint32_t zero = 0;
310 
311 		/* Create a buffer used for writing trace IDs and initialize it to 0. */
312 		assert(!ctx->trace_buf);
313 		ctx->trace_buf = (struct r600_resource*)
314 			pipe_buffer_create(ctx->b.b.screen, 0,
315 					   PIPE_USAGE_STAGING, 4);
316 		if (ctx->trace_buf)
317 			pipe_buffer_write_nooverlap(&ctx->b.b, &ctx->trace_buf->b.b,
318 						    0, sizeof(zero), &zero);
319 		ctx->trace_id = 0;
320 	}
321 
322 	if (ctx->trace_buf)
323 		eg_trace_emit(ctx);
324 
325 	ctx->b.flags = 0;
326 	ctx->b.gtt = 0;
327 	ctx->b.vram = 0;
328 
329 	/* Begin a new CS. */
330 	r600_emit_command_buffer(&ctx->b.gfx.cs, &ctx->start_cs_cmd);
331 
332 	/* Re-emit states. */
333 	r600_mark_atom_dirty(ctx, &ctx->alphatest_state.atom);
334 	r600_mark_atom_dirty(ctx, &ctx->blend_color.atom);
335 	r600_mark_atom_dirty(ctx, &ctx->cb_misc_state.atom);
336 	r600_mark_atom_dirty(ctx, &ctx->clip_misc_state.atom);
337 	r600_mark_atom_dirty(ctx, &ctx->clip_state.atom);
338 	r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
339 	r600_mark_atom_dirty(ctx, &ctx->db_state.atom);
340 	r600_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
341 	if (ctx->b.gfx_level >= EVERGREEN) {
342 		r600_mark_atom_dirty(ctx, &ctx->fragment_images.atom);
343 		r600_mark_atom_dirty(ctx, &ctx->fragment_buffers.atom);
344 		r600_mark_atom_dirty(ctx, &ctx->compute_images.atom);
345 		r600_mark_atom_dirty(ctx, &ctx->compute_buffers.atom);
346 	}
347 	r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_PS].atom);
348 	r600_mark_atom_dirty(ctx, &ctx->poly_offset_state.atom);
349 	r600_mark_atom_dirty(ctx, &ctx->vgt_state.atom);
350 	r600_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
351 	ctx->b.scissors.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
352 	r600_mark_atom_dirty(ctx, &ctx->b.scissors.atom);
353 	ctx->b.viewports.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
354 	ctx->b.viewports.depth_range_dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
355 	r600_mark_atom_dirty(ctx, &ctx->b.viewports.atom);
356 	if (ctx->b.gfx_level <= EVERGREEN) {
357 		r600_mark_atom_dirty(ctx, &ctx->config_state.atom);
358 	}
359 	r600_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
360 	r600_mark_atom_dirty(ctx, &ctx->vertex_fetch_shader.atom);
361 	r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_ES].atom);
362 	r600_mark_atom_dirty(ctx, &ctx->shader_stages.atom);
363 	if (ctx->gs_shader) {
364 		r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_GS].atom);
365 		r600_mark_atom_dirty(ctx, &ctx->gs_rings.atom);
366 	}
367 	if (ctx->tes_shader) {
368 		r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[EG_HW_STAGE_HS].atom);
369 		r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[EG_HW_STAGE_LS].atom);
370 	}
371 	r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_VS].atom);
372 	r600_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
373 	r600_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
374 
375 	if (ctx->blend_state.cso)
376 		r600_mark_atom_dirty(ctx, &ctx->blend_state.atom);
377 	if (ctx->dsa_state.cso)
378 		r600_mark_atom_dirty(ctx, &ctx->dsa_state.atom);
379 	if (ctx->rasterizer_state.cso)
380 		r600_mark_atom_dirty(ctx, &ctx->rasterizer_state.atom);
381 
382 	if (ctx->b.gfx_level <= R700) {
383 		r600_mark_atom_dirty(ctx, &ctx->seamless_cube_map.atom);
384 	}
385 
386 	ctx->vertex_buffer_state.dirty_mask = ctx->vertex_buffer_state.enabled_mask;
387 	r600_vertex_buffers_dirty(ctx);
388 
389 	/* Re-emit shader resources. */
390 	for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
391 		struct r600_constbuf_state *constbuf = &ctx->constbuf_state[shader];
392 		struct r600_textures_info *samplers = &ctx->samplers[shader];
393 
394 		constbuf->dirty_mask = constbuf->enabled_mask;
395 		samplers->views.dirty_mask = samplers->views.enabled_mask;
396 		samplers->states.dirty_mask = samplers->states.enabled_mask;
397 
398 		r600_constant_buffers_dirty(ctx, constbuf);
399 		r600_sampler_views_dirty(ctx, &samplers->views);
400 		r600_sampler_states_dirty(ctx, &samplers->states);
401 	}
402 
403 	for (shader = 0; shader < ARRAY_SIZE(ctx->scratch_buffers); shader++) {
404 		ctx->scratch_buffers[shader].dirty = true;
405 	}
406 
407 	r600_postflush_resume_features(&ctx->b);
408 
409 	/* Re-emit the draw state. */
410 	ctx->last_primitive_type = -1;
411 	ctx->last_start_instance = -1;
412 	ctx->last_rast_prim      = -1;
413 	ctx->current_rast_prim   = -1;
414 
415 	assert(!ctx->b.gfx.cs.prev_dw);
416 	ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs.current.cdw;
417 }
418 
r600_emit_pfp_sync_me(struct r600_context * rctx)419 void r600_emit_pfp_sync_me(struct r600_context *rctx)
420 {
421 	struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
422 
423 	if (rctx->b.gfx_level >= EVERGREEN) {
424 		radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
425 		radeon_emit(cs, 0);
426 	} else {
427 		/* Emulate PFP_SYNC_ME by writing a value to memory in ME and
428 		 * waiting for it in PFP.
429 		 */
430 		struct r600_resource *buf = NULL;
431 		unsigned offset, reloc;
432 		uint64_t va;
433 
434 		/* 16-byte address alignment is required by WAIT_REG_MEM. */
435 		u_suballocator_alloc(&rctx->b.allocator_zeroed_memory, 4, 16,
436 				     &offset, (struct pipe_resource**)&buf);
437 		if (!buf) {
438 			/* This is too heavyweight, but will work. */
439 			rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
440 			return;
441 		}
442 
443 		reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, buf,
444 						  RADEON_USAGE_READWRITE |
445 						  RADEON_PRIO_FENCE_TRACE);
446 
447 		va = buf->gpu_address + offset;
448 		assert(va % 16 == 0);
449 
450 		/* Write 1 to memory in ME. */
451 		radeon_emit(cs, PKT3(PKT3_MEM_WRITE, 3, 0));
452 		radeon_emit(cs, va);
453 		radeon_emit(cs, ((va >> 32) & 0xff) | MEM_WRITE_32_BITS);
454 		radeon_emit(cs, 1);
455 		radeon_emit(cs, 0);
456 
457 		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
458 		radeon_emit(cs, reloc);
459 
460 		/* Wait in PFP (PFP can only do GEQUAL against memory). */
461 		radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
462 		radeon_emit(cs, WAIT_REG_MEM_GEQUAL |
463 			        WAIT_REG_MEM_MEMORY |
464 			        WAIT_REG_MEM_PFP);
465 		radeon_emit(cs, va);
466 		radeon_emit(cs, va >> 32);
467 		radeon_emit(cs, 1); /* reference value */
468 		radeon_emit(cs, 0xffffffff); /* mask */
469 		radeon_emit(cs, 4); /* poll interval */
470 
471 		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
472 		radeon_emit(cs, reloc);
473 
474 		r600_resource_reference(&buf, NULL);
475 	}
476 }
477 
478 /* The max number of bytes to copy per packet. */
479 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
480 
r600_cp_dma_copy_buffer(struct r600_context * rctx,struct pipe_resource * dst,uint64_t dst_offset,struct pipe_resource * src,uint64_t src_offset,unsigned size)481 void r600_cp_dma_copy_buffer(struct r600_context *rctx,
482 			     struct pipe_resource *dst, uint64_t dst_offset,
483 			     struct pipe_resource *src, uint64_t src_offset,
484 			     unsigned size)
485 {
486 	struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
487 
488 	assert(size);
489 	assert(rctx->screen->b.has_cp_dma);
490 
491 	/* Mark the buffer range of destination as valid (initialized),
492 	 * so that transfer_map knows it should wait for the GPU when mapping
493 	 * that range. */
494 	util_range_add(dst, &r600_resource(dst)->valid_buffer_range, dst_offset,
495 		       dst_offset + size);
496 
497 	dst_offset += r600_resource(dst)->gpu_address;
498 	src_offset += r600_resource(src)->gpu_address;
499 
500 	/* Flush the caches where the resources are bound. */
501 	rctx->b.flags |= r600_get_flush_flags(R600_COHERENCY_SHADER) |
502 			 R600_CONTEXT_WAIT_3D_IDLE;
503 
504 	/* There are differences between R700 and EG in CP DMA,
505 	 * but we only use the common bits here. */
506 	while (size) {
507 		unsigned sync = 0;
508 		unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
509 		unsigned src_reloc, dst_reloc;
510 
511 		r600_need_cs_space(rctx,
512 				   10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0) +
513 				   3 + R600_MAX_PFP_SYNC_ME_DWORDS, false, 0);
514 
515 		/* Flush the caches for the first copy only. */
516 		if (rctx->b.flags) {
517 			r600_flush_emit(rctx);
518 		}
519 
520 		/* Do the synchronization after the last copy, so that all data is written to memory. */
521 		if (size == byte_count) {
522 			sync = PKT3_CP_DMA_CP_SYNC;
523 		}
524 
525 		/* This must be done after r600_need_cs_space. */
526 		src_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, (struct r600_resource*)src,
527 						  RADEON_USAGE_READ | RADEON_PRIO_CP_DMA);
528 		dst_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, (struct r600_resource*)dst,
529 						  RADEON_USAGE_WRITE | RADEON_PRIO_CP_DMA);
530 
531 		radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
532 		radeon_emit(cs, src_offset);	/* SRC_ADDR_LO [31:0] */
533 		radeon_emit(cs, sync | ((src_offset >> 32) & 0xff));		/* CP_SYNC [31] | SRC_ADDR_HI [7:0] */
534 		radeon_emit(cs, dst_offset);	/* DST_ADDR_LO [31:0] */
535 		radeon_emit(cs, (dst_offset >> 32) & 0xff);		/* DST_ADDR_HI [7:0] */
536 		radeon_emit(cs, byte_count);	/* COMMAND [29:22] | BYTE_COUNT [20:0] */
537 
538 		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
539 		radeon_emit(cs, src_reloc);
540 		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
541 		radeon_emit(cs, dst_reloc);
542 
543 		size -= byte_count;
544 		src_offset += byte_count;
545 		dst_offset += byte_count;
546 	}
547 
548 	/* CP_DMA_CP_SYNC doesn't wait for idle on R6xx, but this does. */
549 	if (rctx->b.gfx_level == R600)
550 		radeon_set_config_reg(cs, R_008040_WAIT_UNTIL,
551 				      S_008040_WAIT_CP_DMA_IDLE(1));
552 
553 	/* CP DMA is executed in ME, but index buffers are read by PFP.
554 	 * This ensures that ME (CP DMA) is idle before PFP starts fetching
555 	 * indices. If we wanted to execute CP DMA in PFP, this packet
556 	 * should precede it.
557 	 */
558 	r600_emit_pfp_sync_me(rctx);
559 }
560 
r600_dma_copy_buffer(struct r600_context * rctx,struct pipe_resource * dst,struct pipe_resource * src,uint64_t dst_offset,uint64_t src_offset,uint64_t size)561 void r600_dma_copy_buffer(struct r600_context *rctx,
562 			  struct pipe_resource *dst,
563 			  struct pipe_resource *src,
564 			  uint64_t dst_offset,
565 			  uint64_t src_offset,
566 			  uint64_t size)
567 {
568 	struct radeon_cmdbuf *cs = &rctx->b.dma.cs;
569 	unsigned i, ncopy, csize;
570 	struct r600_resource *rdst = (struct r600_resource*)dst;
571 	struct r600_resource *rsrc = (struct r600_resource*)src;
572 
573 	/* Mark the buffer range of destination as valid (initialized),
574 	 * so that transfer_map knows it should wait for the GPU when mapping
575 	 * that range. */
576 	util_range_add(&rdst->b.b, &rdst->valid_buffer_range, dst_offset,
577 		       dst_offset + size);
578 
579 	size >>= 2; /* convert to dwords */
580 	ncopy = (size / R600_DMA_COPY_MAX_SIZE_DW) + !!(size % R600_DMA_COPY_MAX_SIZE_DW);
581 
582 	r600_need_dma_space(&rctx->b, ncopy * 5, rdst, rsrc);
583 	for (i = 0; i < ncopy; i++) {
584 		csize = size < R600_DMA_COPY_MAX_SIZE_DW ? size : R600_DMA_COPY_MAX_SIZE_DW;
585 		/* emit reloc before writing cs so that cs is always in consistent state */
586 		radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ);
587 		radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE);
588 		radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, 0, 0, csize));
589 		radeon_emit(cs, dst_offset & 0xfffffffc);
590 		radeon_emit(cs, src_offset & 0xfffffffc);
591 		radeon_emit(cs, (dst_offset >> 32UL) & 0xff);
592 		radeon_emit(cs, (src_offset >> 32UL) & 0xff);
593 		dst_offset += csize << 2;
594 		src_offset += csize << 2;
595 		size -= csize;
596 	}
597 }
598