1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file crocus_blorp.c
25 *
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
29 *
30 * GenX specific code for working with BLORP (blitting, resolves, clears
31 * on the 3D engine). This provides the driver-specific hooks needed to
32 * implement the BLORP API.
33 *
34 * See crocus_blit.c, crocus_clear.c, and so on.
35 */
36
37 #include <assert.h>
38
39 #include "crocus_batch.h"
40 #include "crocus_resource.h"
41 #include "crocus_context.h"
42
43 #include "util/u_upload_mgr.h"
44 #include "intel/common/intel_l3_config.h"
45
46 #include "blorp/blorp_genX_exec_elk.h"
47
48 #if GFX_VER <= 5
49 #include "gen4_blorp_exec.h"
50 #endif
51
52 static uint32_t *
stream_state(struct crocus_batch * batch,unsigned size,unsigned alignment,uint32_t * out_offset,struct crocus_bo ** out_bo)53 stream_state(struct crocus_batch *batch,
54 unsigned size,
55 unsigned alignment,
56 uint32_t *out_offset,
57 struct crocus_bo **out_bo)
58 {
59 uint32_t offset = ALIGN(batch->state.used, alignment);
60
61 if (offset + size >= STATE_SZ && !batch->no_wrap) {
62 crocus_batch_flush(batch);
63 offset = ALIGN(batch->state.used, alignment);
64 } else if (offset + size >= batch->state.bo->size) {
65 const unsigned new_size =
66 MIN2(batch->state.bo->size + batch->state.bo->size / 2,
67 MAX_STATE_SIZE);
68 crocus_grow_buffer(batch, true, batch->state.used, new_size);
69 assert(offset + size < batch->state.bo->size);
70 }
71
72 crocus_record_state_size(batch->state_sizes, offset, size);
73
74 batch->state.used = offset + size;
75 *out_offset = offset;
76
77 /* If the caller has asked for a BO, we leave them the responsibility of
78 * adding bo->gtt_offset (say, by handing an address to genxml). If not,
79 * we assume they want the offset from a base address.
80 */
81 if (out_bo)
82 *out_bo = batch->state.bo;
83
84 return (uint32_t *)batch->state.map + (offset >> 2);
85 }
86
87 static void *
blorp_emit_dwords(struct blorp_batch * blorp_batch,unsigned n)88 blorp_emit_dwords(struct blorp_batch *blorp_batch, unsigned n)
89 {
90 struct crocus_batch *batch = blorp_batch->driver_batch;
91 return crocus_get_command_space(batch, n * sizeof(uint32_t));
92 }
93
94 static uint64_t
blorp_emit_reloc(struct blorp_batch * blorp_batch,UNUSED void * location,struct blorp_address addr,uint32_t delta)95 blorp_emit_reloc(struct blorp_batch *blorp_batch, UNUSED void *location,
96 struct blorp_address addr, uint32_t delta)
97 {
98 struct crocus_batch *batch = blorp_batch->driver_batch;
99 uint32_t offset;
100
101 if (GFX_VER < 6 && crocus_ptr_in_state_buffer(batch, location)) {
102 offset = (char *)location - (char *)batch->state.map;
103 return crocus_state_reloc(batch, offset,
104 addr.buffer, addr.offset + delta,
105 addr.reloc_flags);
106 }
107
108 assert(!crocus_ptr_in_state_buffer(batch, location));
109
110 offset = (char *)location - (char *)batch->command.map;
111 return crocus_command_reloc(batch, offset,
112 addr.buffer, addr.offset + delta,
113 addr.reloc_flags);
114 }
115
116 static void
blorp_surface_reloc(struct blorp_batch * blorp_batch,uint32_t ss_offset,struct blorp_address addr,uint32_t delta)117 blorp_surface_reloc(struct blorp_batch *blorp_batch, uint32_t ss_offset,
118 struct blorp_address addr, uint32_t delta)
119 {
120 struct crocus_batch *batch = blorp_batch->driver_batch;
121 struct crocus_bo *bo = addr.buffer;
122
123 uint64_t reloc_val =
124 crocus_state_reloc(batch, ss_offset, bo, addr.offset + delta,
125 addr.reloc_flags);
126
127 void *reloc_ptr = (void *)batch->state.map + ss_offset;
128 *(uint32_t *)reloc_ptr = reloc_val;
129 }
130
131 static uint64_t
blorp_get_surface_address(struct blorp_batch * blorp_batch,struct blorp_address addr)132 blorp_get_surface_address(struct blorp_batch *blorp_batch,
133 struct blorp_address addr)
134 {
135 /* We'll let blorp_surface_reloc write the address. */
136 return 0ull;
137 }
138
139 #if GFX_VER >= 7
140 static struct blorp_address
blorp_get_surface_base_address(struct blorp_batch * blorp_batch)141 blorp_get_surface_base_address(struct blorp_batch *blorp_batch)
142 {
143 struct crocus_batch *batch = blorp_batch->driver_batch;
144 return (struct blorp_address) {
145 .buffer = batch->state.bo,
146 .offset = 0
147 };
148 }
149 #endif
150
151 static void *
blorp_alloc_dynamic_state(struct blorp_batch * blorp_batch,uint32_t size,uint32_t alignment,uint32_t * offset)152 blorp_alloc_dynamic_state(struct blorp_batch *blorp_batch,
153 uint32_t size,
154 uint32_t alignment,
155 uint32_t *offset)
156 {
157 struct crocus_batch *batch = blorp_batch->driver_batch;
158
159 return stream_state(batch, size, alignment, offset, NULL);
160 }
161
162 UNUSED static void *
blorp_alloc_general_state(struct blorp_batch * blorp_batch,uint32_t size,uint32_t alignment,uint32_t * offset)163 blorp_alloc_general_state(struct blorp_batch *blorp_batch,
164 uint32_t size,
165 uint32_t alignment,
166 uint32_t *offset)
167 {
168 /* Use dynamic state range for general state on crocus. */
169 return blorp_alloc_dynamic_state(blorp_batch, size, alignment, offset);
170 }
171
172 static bool
blorp_alloc_binding_table(struct blorp_batch * blorp_batch,unsigned num_entries,unsigned state_size,unsigned state_alignment,uint32_t * bt_offset,uint32_t * surface_offsets,void ** surface_maps)173 blorp_alloc_binding_table(struct blorp_batch *blorp_batch,
174 unsigned num_entries,
175 unsigned state_size,
176 unsigned state_alignment,
177 uint32_t *bt_offset,
178 uint32_t *surface_offsets,
179 void **surface_maps)
180 {
181 struct crocus_batch *batch = blorp_batch->driver_batch;
182 uint32_t *bt_map = stream_state(batch, num_entries * sizeof(uint32_t), 32,
183 bt_offset, NULL);
184
185 for (unsigned i = 0; i < num_entries; i++) {
186 surface_maps[i] = stream_state(batch,
187 state_size, state_alignment,
188 &(surface_offsets)[i], NULL);
189 bt_map[i] = surface_offsets[i];
190 }
191
192 return true;
193 }
194
195 static uint32_t
blorp_binding_table_offset_to_pointer(struct blorp_batch * batch,uint32_t offset)196 blorp_binding_table_offset_to_pointer(struct blorp_batch *batch,
197 uint32_t offset)
198 {
199 return offset;
200 }
201
202 static void *
blorp_alloc_vertex_buffer(struct blorp_batch * blorp_batch,uint32_t size,struct blorp_address * addr)203 blorp_alloc_vertex_buffer(struct blorp_batch *blorp_batch,
204 uint32_t size,
205 struct blorp_address *addr)
206 {
207 struct crocus_batch *batch = blorp_batch->driver_batch;
208 struct crocus_bo *bo;
209 uint32_t offset;
210
211 void *map = stream_state(batch, size, 64,
212 &offset, &bo);
213
214 *addr = (struct blorp_address) {
215 .buffer = bo,
216 .offset = offset,
217 .reloc_flags = RELOC_32BIT,
218 #if GFX_VER >= 7
219 .mocs = crocus_mocs(bo, &batch->screen->isl_dev),
220 #endif
221 };
222
223 return map;
224 }
225
226 /**
227 */
228 static void
blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch * blorp_batch,const struct blorp_address * addrs,UNUSED uint32_t * sizes,unsigned num_vbs)229 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *blorp_batch,
230 const struct blorp_address *addrs,
231 UNUSED uint32_t *sizes,
232 unsigned num_vbs)
233 {
234 }
235
236 static struct blorp_address
blorp_get_workaround_address(struct blorp_batch * blorp_batch)237 blorp_get_workaround_address(struct blorp_batch *blorp_batch)
238 {
239 struct crocus_batch *batch = blorp_batch->driver_batch;
240
241 return (struct blorp_address) {
242 .buffer = batch->ice->workaround_bo,
243 .offset = batch->ice->workaround_offset,
244 };
245 }
246
247 static void
blorp_flush_range(UNUSED struct blorp_batch * blorp_batch,UNUSED void * start,UNUSED size_t size)248 blorp_flush_range(UNUSED struct blorp_batch *blorp_batch,
249 UNUSED void *start,
250 UNUSED size_t size)
251 {
252 /* All allocated states come from the batch which we will flush before we
253 * submit it. There's nothing for us to do here.
254 */
255 }
256
257 #if GFX_VER >= 7
258 static const struct intel_l3_config *
blorp_get_l3_config(struct blorp_batch * blorp_batch)259 blorp_get_l3_config(struct blorp_batch *blorp_batch)
260 {
261 struct crocus_batch *batch = blorp_batch->driver_batch;
262 return batch->screen->l3_config_3d;
263 }
264 #endif
265
266 static void
blorp_pre_emit_urb_config(struct blorp_batch * blorp_batch,struct intel_urb_config * urb_cfg)267 blorp_pre_emit_urb_config(struct blorp_batch *blorp_batch,
268 struct intel_urb_config *urb_cfg)
269 {
270 /* Dummy. */
271 }
272
273 static void
blorp_emit_urb_config(struct blorp_batch * blorp_batch,struct intel_urb_config * urb_cfg)274 blorp_emit_urb_config(struct blorp_batch *blorp_batch,
275 struct intel_urb_config *urb_cfg)
276 {
277 #if GFX_VER < 7
278 struct crocus_batch *batch = blorp_batch->driver_batch;
279 #if GFX_VER <= 5
280 batch->screen->vtbl.calculate_urb_fence(batch, 0,
281 urb_cfg->size[MESA_SHADER_VERTEX],
282 urb_cfg->size[MESA_SHADER_FRAGMENT]);
283 #else
284 genX(crocus_upload_urb)(batch, urb_cfg->size[MESA_SHADER_VERTEX], false,
285 urb_cfg->size[MESA_SHADER_VERTEX]);
286 #endif
287 #endif
288 }
289
290 static void
crocus_blorp_exec(struct blorp_batch * blorp_batch,const struct blorp_params * params)291 crocus_blorp_exec(struct blorp_batch *blorp_batch,
292 const struct blorp_params *params)
293 {
294 struct crocus_context *ice = blorp_batch->blorp->driver_ctx;
295 struct crocus_batch *batch = blorp_batch->driver_batch;
296
297 /* Flush the sampler and render caches. We definitely need to flush the
298 * sampler cache so that we get updated contents from the render cache for
299 * the glBlitFramebuffer() source. Also, we are sometimes warned in the
300 * docs to flush the cache between reinterpretations of the same surface
301 * data with different formats, which blorp does for stencil and depth
302 * data.
303 */
304 if (params->src.enabled)
305 crocus_cache_flush_for_read(batch, params->src.addr.buffer);
306 if (params->dst.enabled) {
307 crocus_cache_flush_for_render(batch, params->dst.addr.buffer,
308 params->dst.view.format,
309 params->dst.aux_usage);
310 }
311 if (params->depth.enabled)
312 crocus_cache_flush_for_depth(batch, params->depth.addr.buffer);
313 if (params->stencil.enabled)
314 crocus_cache_flush_for_depth(batch, params->stencil.addr.buffer);
315
316 crocus_require_command_space(batch, 1400);
317 crocus_require_statebuffer_space(batch, 600);
318 batch->no_wrap = true;
319
320 #if GFX_VER == 8
321 genX(crocus_update_pma_fix)(ice, batch, false);
322 #endif
323
324 #if GFX_VER == 6
325 /* Emit workaround flushes when we switch from drawing to blorping. */
326 crocus_emit_post_sync_nonzero_flush(batch);
327 #endif
328
329 #if GFX_VER >= 6
330 crocus_emit_depth_stall_flushes(batch);
331 #endif
332
333 blorp_emit(blorp_batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
334 rect.ClippedDrawingRectangleXMax = MAX2(params->x1, params->x0) - 1;
335 rect.ClippedDrawingRectangleYMax = MAX2(params->y1, params->y0) - 1;
336 }
337
338 batch->screen->vtbl.update_surface_base_address(batch);
339 crocus_handle_always_flush_cache(batch);
340
341 batch->contains_draw = true;
342 blorp_exec(blorp_batch, params);
343
344 batch->no_wrap = false;
345 crocus_handle_always_flush_cache(batch);
346
347 /* We've smashed all state compared to what the normal 3D pipeline
348 * rendering tracks for GL.
349 */
350
351 uint64_t skip_bits = (CROCUS_DIRTY_POLYGON_STIPPLE |
352 CROCUS_DIRTY_GEN7_SO_BUFFERS |
353 CROCUS_DIRTY_SO_DECL_LIST |
354 CROCUS_DIRTY_LINE_STIPPLE |
355 CROCUS_ALL_DIRTY_FOR_COMPUTE |
356 CROCUS_DIRTY_GEN6_SCISSOR_RECT |
357 CROCUS_DIRTY_GEN75_VF |
358 CROCUS_DIRTY_SF_CL_VIEWPORT);
359
360 uint64_t skip_stage_bits = (CROCUS_ALL_STAGE_DIRTY_FOR_COMPUTE |
361 CROCUS_STAGE_DIRTY_UNCOMPILED_VS |
362 CROCUS_STAGE_DIRTY_UNCOMPILED_TCS |
363 CROCUS_STAGE_DIRTY_UNCOMPILED_TES |
364 CROCUS_STAGE_DIRTY_UNCOMPILED_GS |
365 CROCUS_STAGE_DIRTY_UNCOMPILED_FS |
366 CROCUS_STAGE_DIRTY_SAMPLER_STATES_VS |
367 CROCUS_STAGE_DIRTY_SAMPLER_STATES_TCS |
368 CROCUS_STAGE_DIRTY_SAMPLER_STATES_TES |
369 CROCUS_STAGE_DIRTY_SAMPLER_STATES_GS);
370
371 if (!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL]) {
372 /* BLORP disabled tessellation, that's fine for the next draw */
373 skip_stage_bits |= CROCUS_STAGE_DIRTY_TCS |
374 CROCUS_STAGE_DIRTY_TES |
375 CROCUS_STAGE_DIRTY_CONSTANTS_TCS |
376 CROCUS_STAGE_DIRTY_CONSTANTS_TES |
377 CROCUS_STAGE_DIRTY_BINDINGS_TCS |
378 CROCUS_STAGE_DIRTY_BINDINGS_TES;
379 }
380
381 if (!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY]) {
382 /* BLORP disabled geometry shaders, that's fine for the next draw */
383 skip_stage_bits |= CROCUS_STAGE_DIRTY_GS |
384 CROCUS_STAGE_DIRTY_CONSTANTS_GS |
385 CROCUS_STAGE_DIRTY_BINDINGS_GS;
386 }
387
388 /* we can skip flagging CROCUS_DIRTY_DEPTH_BUFFER, if
389 * BLORP_BATCH_NO_EMIT_DEPTH_STENCIL is set.
390 */
391 if (blorp_batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL)
392 skip_bits |= CROCUS_DIRTY_DEPTH_BUFFER;
393
394 if (!params->wm_prog_data)
395 skip_bits |= CROCUS_DIRTY_GEN6_BLEND_STATE;
396
397 ice->state.dirty |= ~skip_bits;
398 ice->state.stage_dirty |= ~skip_stage_bits;
399
400 ice->urb.vsize = 0;
401 ice->urb.gs_present = false;
402 ice->urb.gsize = 0;
403 ice->urb.tess_present = false;
404 ice->urb.hsize = 0;
405 ice->urb.dsize = 0;
406
407 if (params->dst.enabled) {
408 crocus_render_cache_add_bo(batch, params->dst.addr.buffer,
409 params->dst.view.format,
410 params->dst.aux_usage);
411 }
412 if (params->depth.enabled)
413 crocus_depth_cache_add_bo(batch, params->depth.addr.buffer);
414 if (params->stencil.enabled)
415 crocus_depth_cache_add_bo(batch, params->stencil.addr.buffer);
416 }
417
418 static void
blorp_measure_start(struct blorp_batch * blorp_batch,const struct blorp_params * params)419 blorp_measure_start(struct blorp_batch *blorp_batch,
420 const struct blorp_params *params)
421 {
422 }
423
424 static void
blorp_measure_end(struct blorp_batch * blorp_batch,const struct blorp_params * params)425 blorp_measure_end(struct blorp_batch *blorp_batch,
426 const struct blorp_params *params)
427 {
428 }
429
430 static void
blorp_emit_pre_draw(struct blorp_batch * batch,const struct blorp_params * params)431 blorp_emit_pre_draw(struct blorp_batch *batch, const struct blorp_params *params)
432 {
433 /* "Not implemented" */
434 }
435
436 static void
blorp_emit_post_draw(struct blorp_batch * batch,const struct blorp_params * params)437 blorp_emit_post_draw(struct blorp_batch *batch, const struct blorp_params *params)
438 {
439 /* "Not implemented" */
440 }
441
442 void
genX(crocus_init_blorp)443 genX(crocus_init_blorp)(struct crocus_context *ice)
444 {
445 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
446
447 blorp_init_elk(&ice->blorp, ice, &screen->isl_dev, screen->compiler, NULL);
448 ice->blorp.lookup_shader = crocus_blorp_lookup_shader;
449 ice->blorp.upload_shader = crocus_blorp_upload_shader;
450 ice->blorp.exec = crocus_blorp_exec;
451 }
452