1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
34
35 void
genX(cmd_buffer_enable_pma_fix)36 genX(cmd_buffer_enable_pma_fix)(struct anv_cmd_buffer *cmd_buffer, bool enable)
37 {
38 if (cmd_buffer->state.pma_fix_enabled == enable)
39 return;
40
41 cmd_buffer->state.pma_fix_enabled = enable;
42
43 /* According to the Broadwell PIPE_CONTROL documentation, software should
44 * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
45 * prior to the LRI. If stencil buffer writes are enabled, then a Render
46 * Cache Flush is also necessary.
47 *
48 * The Skylake docs say to use a depth stall rather than a command
49 * streamer stall. However, the hardware seems to violently disagree.
50 * A full command streamer stall seems to be needed in both cases.
51 */
52 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
53 pc.DepthCacheFlushEnable = true;
54 pc.CommandStreamerStallEnable = true;
55 pc.RenderTargetCacheFlushEnable = true;
56 }
57
58 uint32_t cache_mode;
59 anv_pack_struct(&cache_mode, GENX(CACHE_MODE_1),
60 .NPPMAFixEnable = enable,
61 .NPEarlyZFailsDisable = enable,
62 .NPPMAFixEnableMask = true,
63 .NPEarlyZFailsDisableMask = true);
64 anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
65 lri.RegisterOffset = GENX(CACHE_MODE_1_num);
66 lri.DataDWord = cache_mode;
67 }
68
69 /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
70 * Flush bits is often necessary. We do it regardless because it's easier.
71 * The render cache flush is also necessary if stencil writes are enabled.
72 *
73 * Again, the Skylake docs give a different set of flushes but the BDW
74 * flushes seem to work just as well.
75 */
76 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
77 pc.DepthStallEnable = true;
78 pc.DepthCacheFlushEnable = true;
79 pc.RenderTargetCacheFlushEnable = true;
80 }
81 }
82
83 UNUSED static bool
want_depth_pma_fix(struct anv_cmd_buffer * cmd_buffer,const struct vk_depth_stencil_state * ds)84 want_depth_pma_fix(struct anv_cmd_buffer *cmd_buffer,
85 const struct vk_depth_stencil_state *ds)
86 {
87 assert(GFX_VER == 8);
88
89 /* From the Broadwell PRM Vol. 2c CACHE_MODE_1::NP_PMA_FIX_ENABLE:
90 *
91 * SW must set this bit in order to enable this fix when following
92 * expression is TRUE.
93 *
94 * 3DSTATE_WM::ForceThreadDispatch != 1 &&
95 * !(3DSTATE_RASTER::ForceSampleCount != NUMRASTSAMPLES_0) &&
96 * (3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL) &&
97 * (3DSTATE_DEPTH_BUFFER::HIZ Enable) &&
98 * !(3DSTATE_WM::EDSC_Mode == EDSC_PREPS) &&
99 * (3DSTATE_PS_EXTRA::PixelShaderValid) &&
100 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
101 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
102 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
103 * 3DSTATE_WM_HZ_OP::StencilBufferClear) &&
104 * (3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable) &&
105 * (((3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
106 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
107 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
108 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
109 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable) &&
110 * 3DSTATE_WM::ForceKillPix != ForceOff &&
111 * ((3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
112 * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE) ||
113 * (3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
114 * 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
115 * 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE))) ||
116 * (3DSTATE_PS_EXTRA:: Pixel Shader Computed Depth mode != PSCDEPTH_OFF))
117 */
118
119 /* These are always true:
120 * 3DSTATE_WM::ForceThreadDispatch != 1 &&
121 * !(3DSTATE_RASTER::ForceSampleCount != NUMRASTSAMPLES_0)
122 */
123
124 /* We only enable the PMA fix if we know for certain that HiZ is enabled.
125 * If we don't know whether HiZ is enabled or not, we disable the PMA fix
126 * and there is no harm.
127 *
128 * (3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL) &&
129 * 3DSTATE_DEPTH_BUFFER::HIZ Enable
130 */
131 if (!cmd_buffer->state.hiz_enabled)
132 return false;
133
134 /* 3DSTATE_PS_EXTRA::PixelShaderValid */
135 struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
136 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT))
137 return false;
138
139 /* !(3DSTATE_WM::EDSC_Mode == EDSC_PREPS) */
140 const struct elk_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
141 if (wm_prog_data->early_fragment_tests)
142 return false;
143
144 /* We never use anv_pipeline for HiZ ops so this is trivially true:
145 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
146 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
147 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
148 * 3DSTATE_WM_HZ_OP::StencilBufferClear)
149 */
150
151 /* 3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable */
152 if (!ds->depth.test_enable)
153 return false;
154
155 /* (((3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
156 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
157 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
158 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
159 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable) &&
160 * 3DSTATE_WM::ForceKillPix != ForceOff &&
161 * ((3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
162 * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE) ||
163 * (3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
164 * 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
165 * 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE))) ||
166 * (3DSTATE_PS_EXTRA:: Pixel Shader Computed Depth mode != PSCDEPTH_OFF))
167 */
168 return (pipeline->kill_pixel && (ds->depth.write_enable ||
169 ds->stencil.write_enable)) ||
170 wm_prog_data->computed_depth_mode != PSCDEPTH_OFF;
171 }
172
173 void
genX(cmd_buffer_flush_dynamic_state)174 genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
175 {
176 struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
177 const struct vk_dynamic_graphics_state *dyn =
178 &cmd_buffer->vk.dynamic_graphics_state;
179
180 if ((cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) ||
181 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_LINE_WIDTH)) {
182 uint32_t sf_dw[GENX(3DSTATE_SF_length)];
183 struct GENX(3DSTATE_SF) sf = {
184 GENX(3DSTATE_SF_header),
185 };
186 if (cmd_buffer->device->info->platform == INTEL_PLATFORM_CHV) {
187 sf.CHVLineWidth = dyn->rs.line.width;
188 } else {
189 sf.LineWidth = dyn->rs.line.width;
190 }
191 GENX(3DSTATE_SF_pack)(NULL, sf_dw, &sf);
192 anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gfx8.sf);
193 }
194
195 if ((cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) ||
196 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_IA_PRIMITIVE_TOPOLOGY) ||
197 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_CULL_MODE) ||
198 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_FRONT_FACE) ||
199 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_DEPTH_BIAS_ENABLE) ||
200 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_DEPTH_BIAS_FACTORS)) {
201 /* Take dynamic primitive topology in to account with
202 * 3DSTATE_RASTER::APIMode
203 * 3DSTATE_RASTER::DXMultisampleRasterizationEnable
204 * 3DSTATE_RASTER::AntialiasingEnable
205 */
206 uint32_t api_mode = 0;
207 bool msaa_raster_enable = false;
208
209 VkPolygonMode dynamic_raster_mode =
210 genX(raster_polygon_mode)(cmd_buffer->state.gfx.pipeline,
211 dyn->ia.primitive_topology);
212
213 genX(rasterization_mode)(dynamic_raster_mode,
214 pipeline->line_mode, dyn->rs.line.width,
215 &api_mode, &msaa_raster_enable);
216
217 /* From the Browadwell PRM, Volume 2, documentation for
218 * 3DSTATE_RASTER, "Antialiasing Enable":
219 *
220 * "This field must be disabled if any of the render targets
221 * have integer (UINT or SINT) surface format."
222 *
223 * Additionally internal documentation for Gfx12+ states:
224 *
225 * "This bit MUST not be set when NUM_MULTISAMPLES > 1 OR
226 * FORCED_SAMPLE_COUNT > 1."
227 */
228 bool aa_enable =
229 anv_rasterization_aa_mode(dynamic_raster_mode, pipeline->line_mode) &&
230 !cmd_buffer->state.gfx.has_uint_rt;
231
232 uint32_t raster_dw[GENX(3DSTATE_RASTER_length)];
233 struct GENX(3DSTATE_RASTER) raster = {
234 GENX(3DSTATE_RASTER_header),
235 .APIMode = api_mode,
236 .DXMultisampleRasterizationEnable = msaa_raster_enable,
237 .AntialiasingEnable = aa_enable,
238 .CullMode = genX(vk_to_intel_cullmode)[dyn->rs.cull_mode],
239 .FrontWinding = genX(vk_to_intel_front_face)[dyn->rs.front_face],
240 .GlobalDepthOffsetEnableSolid = dyn->rs.depth_bias.enable,
241 .GlobalDepthOffsetEnableWireframe = dyn->rs.depth_bias.enable,
242 .GlobalDepthOffsetEnablePoint = dyn->rs.depth_bias.enable,
243 .GlobalDepthOffsetConstant = dyn->rs.depth_bias.constant,
244 .GlobalDepthOffsetScale = dyn->rs.depth_bias.slope,
245 .GlobalDepthOffsetClamp = dyn->rs.depth_bias.clamp,
246 };
247 GENX(3DSTATE_RASTER_pack)(NULL, raster_dw, &raster);
248 anv_batch_emit_merge(&cmd_buffer->batch, raster_dw,
249 pipeline->gfx8.raster);
250 }
251
252 /* Stencil reference values moved from COLOR_CALC_STATE in gfx8 to
253 * 3DSTATE_WM_DEPTH_STENCIL in gfx9. That means the dirty bits gets split
254 * across different state packets for gfx8 and gfx9. We handle that by
255 * using a big old #if switch here.
256 */
257 if (BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_REFERENCE) ||
258 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_CB_BLEND_CONSTANTS)) {
259 struct anv_state cc_state =
260 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
261 GENX(COLOR_CALC_STATE_length) * 4,
262 64);
263 struct GENX(COLOR_CALC_STATE) cc = {
264 .BlendConstantColorRed = dyn->cb.blend_constants[0],
265 .BlendConstantColorGreen = dyn->cb.blend_constants[1],
266 .BlendConstantColorBlue = dyn->cb.blend_constants[2],
267 .BlendConstantColorAlpha = dyn->cb.blend_constants[3],
268 .StencilReferenceValue = dyn->ds.stencil.front.reference & 0xff,
269 .BackfaceStencilReferenceValue = dyn->ds.stencil.back.reference & 0xff,
270 };
271 GENX(COLOR_CALC_STATE_pack)(NULL, cc_state.map, &cc);
272
273 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), ccp) {
274 ccp.ColorCalcStatePointer = cc_state.offset;
275 ccp.ColorCalcStatePointerValid = true;
276 }
277 }
278
279 if ((cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
280 ANV_CMD_DIRTY_RENDER_TARGETS)) ||
281 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_DEPTH_TEST_ENABLE) ||
282 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_DEPTH_WRITE_ENABLE) ||
283 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_DEPTH_COMPARE_OP) ||
284 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_TEST_ENABLE) ||
285 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_OP) ||
286 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_COMPARE_MASK) ||
287 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_DS_STENCIL_WRITE_MASK)) {
288 VkImageAspectFlags ds_aspects = 0;
289 if (cmd_buffer->state.gfx.depth_att.vk_format != VK_FORMAT_UNDEFINED)
290 ds_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
291 if (cmd_buffer->state.gfx.stencil_att.vk_format != VK_FORMAT_UNDEFINED)
292 ds_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
293
294 struct vk_depth_stencil_state opt_ds = dyn->ds;
295 vk_optimize_depth_stencil_state(&opt_ds, ds_aspects, true);
296
297 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_WM_DEPTH_STENCIL), ds) {
298 ds.DoubleSidedStencilEnable = true;
299
300 ds.StencilTestMask = opt_ds.stencil.front.compare_mask & 0xff;
301 ds.StencilWriteMask = opt_ds.stencil.front.write_mask & 0xff;
302
303 ds.BackfaceStencilTestMask = opt_ds.stencil.back.compare_mask & 0xff;
304 ds.BackfaceStencilWriteMask = opt_ds.stencil.back.write_mask & 0xff;
305
306 ds.DepthTestEnable = opt_ds.depth.test_enable;
307 ds.DepthBufferWriteEnable = opt_ds.depth.write_enable;
308 ds.DepthTestFunction = genX(vk_to_intel_compare_op)[opt_ds.depth.compare_op];
309 ds.StencilTestEnable = opt_ds.stencil.test_enable;
310 ds.StencilBufferWriteEnable = opt_ds.stencil.write_enable;
311 ds.StencilFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.front.op.fail];
312 ds.StencilPassDepthPassOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.front.op.pass];
313 ds.StencilPassDepthFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.front.op.depth_fail];
314 ds.StencilTestFunction = genX(vk_to_intel_compare_op)[opt_ds.stencil.front.op.compare];
315 ds.BackfaceStencilFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.back.op.fail];
316 ds.BackfaceStencilPassDepthPassOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.back.op.pass];
317 ds.BackfaceStencilPassDepthFailOp = genX(vk_to_intel_stencil_op)[opt_ds.stencil.back.op.depth_fail];
318 ds.BackfaceStencilTestFunction = genX(vk_to_intel_compare_op)[opt_ds.stencil.back.op.compare];
319 }
320
321 const bool pma = want_depth_pma_fix(cmd_buffer, &opt_ds);
322 genX(cmd_buffer_enable_pma_fix)(cmd_buffer, pma);
323 }
324
325 if (BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_RS_LINE_STIPPLE)) {
326 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_LINE_STIPPLE), ls) {
327 ls.LineStipplePattern = dyn->rs.line.stipple.pattern;
328 ls.LineStippleInverseRepeatCount =
329 1.0f / MAX2(1, dyn->rs.line.stipple.factor);
330 ls.LineStippleRepeatCount = dyn->rs.line.stipple.factor;
331 }
332 }
333
334 if ((cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE |
335 ANV_CMD_DIRTY_INDEX_BUFFER)) ||
336 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_IA_PRIMITIVE_RESTART_ENABLE)) {
337 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF), vf) {
338 vf.IndexedDrawCutIndexEnable = dyn->ia.primitive_restart_enable;
339 vf.CutIndex = cmd_buffer->state.gfx.restart_index;
340 }
341 }
342
343 if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_INDEX_BUFFER) {
344 struct anv_buffer *buffer = cmd_buffer->state.gfx.index_buffer;
345 uint32_t offset = cmd_buffer->state.gfx.index_offset;
346 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) {
347 ib.IndexFormat = cmd_buffer->state.gfx.index_type;
348 ib.MOCS = anv_mocs(cmd_buffer->device,
349 buffer->address.bo,
350 ISL_SURF_USAGE_INDEX_BUFFER_BIT);
351 ib.BufferStartingAddress = anv_address_add(buffer->address, offset);
352 ib.BufferSize = vk_buffer_range(&buffer->vk, offset,
353 VK_WHOLE_SIZE);
354 }
355 }
356
357 if (pipeline->base.device->vk.enabled_extensions.EXT_sample_locations &&
358 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_MS_SAMPLE_LOCATIONS))
359 genX(emit_sample_pattern)(&cmd_buffer->batch, dyn->ms.sample_locations);
360
361 if ((cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) ||
362 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_CB_COLOR_WRITE_ENABLES)) {
363 /* 3DSTATE_WM in the hope we can avoid spawning fragment shaders
364 * threads.
365 */
366 uint32_t wm_dwords[GENX(3DSTATE_WM_length)];
367 struct GENX(3DSTATE_WM) wm = {
368 GENX(3DSTATE_WM_header),
369
370 .ForceThreadDispatchEnable = anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT) &&
371 (pipeline->force_fragment_thread_dispatch ||
372 anv_cmd_buffer_all_color_write_masked(cmd_buffer)) ?
373 ForceON : 0,
374 };
375 GENX(3DSTATE_WM_pack)(NULL, wm_dwords, &wm);
376
377 anv_batch_emit_merge(&cmd_buffer->batch, wm_dwords, pipeline->gfx8.wm);
378 }
379
380 if ((cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) ||
381 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_CB_LOGIC_OP) ||
382 BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_CB_COLOR_WRITE_ENABLES)) {
383 const uint8_t color_writes = dyn->cb.color_write_enables;
384 const struct anv_cmd_graphics_state *state = &cmd_buffer->state.gfx;
385 bool has_writeable_rt =
386 anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT) &&
387 (color_writes & ((1u << state->color_att_count) - 1)) != 0;
388
389 /* 3DSTATE_PS_BLEND to be consistent with the rest of the
390 * BLEND_STATE_ENTRY.
391 */
392 uint32_t ps_blend_dwords[GENX(3DSTATE_PS_BLEND_length)];
393 struct GENX(3DSTATE_PS_BLEND) ps_blend = {
394 GENX(3DSTATE_PS_BLEND_header),
395 .HasWriteableRT = has_writeable_rt,
396 };
397 GENX(3DSTATE_PS_BLEND_pack)(NULL, ps_blend_dwords, &ps_blend);
398 anv_batch_emit_merge(&cmd_buffer->batch, ps_blend_dwords,
399 pipeline->gfx8.ps_blend);
400
401 uint32_t blend_dws[GENX(BLEND_STATE_length) +
402 MAX_RTS * GENX(BLEND_STATE_ENTRY_length)];
403 uint32_t *dws = blend_dws;
404 memset(blend_dws, 0, sizeof(blend_dws));
405
406 /* Skip this part */
407 dws += GENX(BLEND_STATE_length);
408
409 for (uint32_t i = 0; i < MAX_RTS; i++) {
410 /* Disable anything above the current number of color attachments. */
411 bool write_disabled = i >= cmd_buffer->state.gfx.color_att_count ||
412 (color_writes & BITFIELD_BIT(i)) == 0;
413 struct GENX(BLEND_STATE_ENTRY) entry = {
414 .WriteDisableAlpha = write_disabled ||
415 (pipeline->color_comp_writes[i] &
416 VK_COLOR_COMPONENT_A_BIT) == 0,
417 .WriteDisableRed = write_disabled ||
418 (pipeline->color_comp_writes[i] &
419 VK_COLOR_COMPONENT_R_BIT) == 0,
420 .WriteDisableGreen = write_disabled ||
421 (pipeline->color_comp_writes[i] &
422 VK_COLOR_COMPONENT_G_BIT) == 0,
423 .WriteDisableBlue = write_disabled ||
424 (pipeline->color_comp_writes[i] &
425 VK_COLOR_COMPONENT_B_BIT) == 0,
426 .LogicOpFunction = genX(vk_to_intel_logic_op)[dyn->cb.logic_op],
427 };
428 GENX(BLEND_STATE_ENTRY_pack)(NULL, dws, &entry);
429 dws += GENX(BLEND_STATE_ENTRY_length);
430 }
431
432 uint32_t num_dwords = GENX(BLEND_STATE_length) +
433 GENX(BLEND_STATE_ENTRY_length) * MAX_RTS;
434
435 struct anv_state blend_states =
436 anv_cmd_buffer_merge_dynamic(cmd_buffer, blend_dws,
437 pipeline->gfx8.blend_state, num_dwords, 64);
438 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) {
439 bsp.BlendStatePointer = blend_states.offset;
440 bsp.BlendStatePointerValid = true;
441 }
442 }
443
444 /* When we're done, there is no more dirty gfx state. */
445 vk_dynamic_graphics_state_clear_dirty(&cmd_buffer->vk.dynamic_graphics_state);
446 cmd_buffer->state.gfx.dirty = 0;
447 }
448