1 /**************************************************************************
2 *
3 * Copyright 2003 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "pipe/p_shader_tokens.h"
31 #include "draw/draw_context.h"
32 #include "draw/draw_vertex.h"
33 #include "draw/draw_private.h"
34 #include "lp_context.h"
35 #include "lp_screen.h"
36 #include "lp_setup.h"
37 #include "lp_state.h"
38
39 #include "tgsi/tgsi_from_mesa.h"
40
41 /**
42 * The vertex info describes how to convert the post-transformed vertices
43 * (simple float[][4]) used by the 'draw' module into vertices for
44 * rasterization.
45 *
46 * This function validates the vertex layout.
47 */
48 static void
compute_vertex_info(struct llvmpipe_context * llvmpipe)49 compute_vertex_info(struct llvmpipe_context *llvmpipe)
50 {
51 struct vertex_info *vinfo = &llvmpipe->vertex_info;
52
53 draw_prepare_shader_outputs(llvmpipe->draw);
54
55 /*
56 * Those can't actually be 0 (because pos is always at 0).
57 * But use ints anyway to avoid confusion (in vs outputs, they
58 * can very well be at pos 0).
59 */
60 llvmpipe->color_slot[0] = -1;
61 llvmpipe->color_slot[1] = -1;
62 llvmpipe->bcolor_slot[0] = -1;
63 llvmpipe->bcolor_slot[1] = -1;
64 llvmpipe->viewport_index_slot = -1;
65 llvmpipe->layer_slot = -1;
66 llvmpipe->face_slot = -1;
67 llvmpipe->psize_slot = -1;
68
69 /*
70 * Match FS inputs against VS outputs, emitting the necessary
71 * attributes. Could cache these structs and look them up with a
72 * combination of fragment shader, vertex shader ids.
73 */
74
75 vinfo->num_attribs = 0;
76
77 int vs_index = draw_find_shader_output(llvmpipe->draw,
78 TGSI_SEMANTIC_POSITION, 0);
79
80 draw_emit_vertex_attr(vinfo, EMIT_4F, vs_index);
81
82 struct nir_shader *nir = llvmpipe->fs->base.ir.nir;
83 uint64_t slot_emitted = 0;
84 nir_foreach_shader_in_variable(var, nir) {
85 unsigned tgsi_semantic_name, tgsi_semantic_index;
86 unsigned slots = nir_variable_count_slots(var, var->type);
87 tgsi_get_gl_varying_semantic(var->data.location,
88 true,
89 &tgsi_semantic_name,
90 &tgsi_semantic_index);
91
92 for (unsigned i = 0; i < slots; i++) {
93 vs_index = draw_find_shader_output(llvmpipe->draw,
94 tgsi_semantic_name,
95 tgsi_semantic_index);
96 if (slot_emitted & BITFIELD64_BIT(vs_index)) {
97 tgsi_semantic_index++;
98 continue;
99 }
100
101 if (tgsi_semantic_name == TGSI_SEMANTIC_COLOR &&
102 tgsi_semantic_index < 2) {
103 int idx = tgsi_semantic_index;
104 llvmpipe->color_slot[idx] = (int)vinfo->num_attribs;
105 }
106 if (tgsi_semantic_name == TGSI_SEMANTIC_FACE) {
107 llvmpipe->face_slot = (int)vinfo->num_attribs;
108 draw_emit_vertex_attr(vinfo, EMIT_4F, vs_index);
109 /*
110 * For vp index and layer, if the fs requires them but the vs doesn't
111 * provide them, draw (vbuf) will give us the required 0 (slot -1).
112 * (This means in this case we'll also use those slots in setup, which
113 * isn't necessary but they'll contain the correct (0) value.)
114 */
115 } else if (tgsi_semantic_name == TGSI_SEMANTIC_VIEWPORT_INDEX) {
116 llvmpipe->viewport_index_slot = (int)vinfo->num_attribs;
117 draw_emit_vertex_attr(vinfo, EMIT_4F, vs_index);
118 } else if (tgsi_semantic_name == TGSI_SEMANTIC_LAYER) {
119 llvmpipe->layer_slot = (int)vinfo->num_attribs;
120 draw_emit_vertex_attr(vinfo, EMIT_4F, vs_index);
121 } else {
122 /*
123 * Note that we'd actually want to skip position (as we won't use
124 * the attribute in the fs) but can't. The reason is that we don't
125 * actually have an input/output map for setup (even though it looks
126 * like we do...). Could adjust for this though even without a map
127 * (in llvmpipe_create_fs_state()).
128 */
129 draw_emit_vertex_attr(vinfo, EMIT_4F, vs_index);
130 }
131 slot_emitted |= BITFIELD64_BIT(vs_index);
132 tgsi_semantic_index++;
133 }
134 }
135
136 /*
137 * The new style front face is a system value, hence won't show up as
138 * ordinary fs register above. But we still need to assign a vs output
139 * location so draw can inject face info for unfilled tris.
140 */
141 if (llvmpipe->face_slot < 0 &&
142 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FRONT_FACE)) {
143 vs_index = draw_find_shader_output(llvmpipe->draw,
144 TGSI_SEMANTIC_FACE, 0);
145 llvmpipe->face_slot = (int)vinfo->num_attribs;
146 draw_emit_vertex_attr(vinfo, EMIT_4F, vs_index);
147 }
148
149 /* Figure out if we need bcolor as well.
150 */
151 for (unsigned i = 0; i < 2; i++) {
152 vs_index = draw_find_shader_output(llvmpipe->draw,
153 TGSI_SEMANTIC_BCOLOR, i);
154
155 if (vs_index >= 0) {
156 llvmpipe->bcolor_slot[i] = (int)vinfo->num_attribs;
157 draw_emit_vertex_attr(vinfo, EMIT_4F, vs_index);
158 }
159 }
160
161 /* Figure out if we need pointsize as well.
162 */
163 vs_index = draw_find_shader_output(llvmpipe->draw,
164 TGSI_SEMANTIC_PSIZE, 0);
165
166 if (vs_index >= 0) {
167 llvmpipe->psize_slot = (int)vinfo->num_attribs;
168 draw_emit_vertex_attr(vinfo, EMIT_4F, vs_index);
169 }
170
171 /* Figure out if we need viewport index (if it wasn't already in fs input) */
172 if (llvmpipe->viewport_index_slot < 0) {
173 vs_index = draw_find_shader_output(llvmpipe->draw,
174 TGSI_SEMANTIC_VIEWPORT_INDEX,
175 0);
176 if (vs_index >= 0) {
177 llvmpipe->viewport_index_slot =(int)vinfo->num_attribs;
178 draw_emit_vertex_attr(vinfo, EMIT_4F, vs_index);
179 }
180 }
181
182 /* Figure out if we need layer (if it wasn't already in fs input) */
183 if (llvmpipe->layer_slot < 0) {
184 vs_index = draw_find_shader_output(llvmpipe->draw,
185 TGSI_SEMANTIC_LAYER,
186 0);
187 if (vs_index >= 0) {
188 llvmpipe->layer_slot = (int)vinfo->num_attribs;
189 draw_emit_vertex_attr(vinfo, EMIT_4F, vs_index);
190 }
191 }
192
193 draw_compute_vertex_size(vinfo);
194 lp_setup_set_vertex_info(llvmpipe->setup, vinfo);
195 }
196
197
198 static void
check_linear_rasterizer(struct llvmpipe_context * lp)199 check_linear_rasterizer(struct llvmpipe_context *lp)
200 {
201 const bool valid_cb_format =
202 (lp->framebuffer.nr_cbufs == 1 && lp->framebuffer.cbufs[0] &&
203 util_res_sample_count(lp->framebuffer.cbufs[0]->texture) == 1 &&
204 lp->framebuffer.cbufs[0]->texture->target == PIPE_TEXTURE_2D &&
205 (lp->framebuffer.cbufs[0]->format == PIPE_FORMAT_B8G8R8A8_UNORM ||
206 lp->framebuffer.cbufs[0]->format == PIPE_FORMAT_B8G8R8X8_UNORM ||
207 lp->framebuffer.cbufs[0]->format == PIPE_FORMAT_R8G8B8A8_UNORM ||
208 lp->framebuffer.cbufs[0]->format == PIPE_FORMAT_R8G8B8X8_UNORM));
209
210 /* permit_linear means guardband, hence fake scissor, which we can only
211 * handle if there's just one vp. */
212 const bool single_vp = lp->viewport_index_slot < 0;
213 const bool permit_linear = (!lp->framebuffer.zsbuf &&
214 valid_cb_format &&
215 single_vp);
216
217 /* Tell draw that we're happy doing our own x/y clipping.
218 */
219 bool clipping_changed = false;
220 if (lp->permit_linear_rasterizer != permit_linear) {
221 lp->permit_linear_rasterizer = permit_linear;
222 lp_setup_set_linear_mode(lp->setup, permit_linear);
223 clipping_changed = true;
224 }
225
226 if (lp->single_vp != single_vp) {
227 lp->single_vp = single_vp;
228 clipping_changed = true;
229 }
230
231 /* Disable xy clipping in linear mode.
232 *
233 * Use a guard band if we don't have zsbuf. Could enable
234 * guardband always - this just to be conservative.
235 *
236 * Because we have a layering violation where the draw module emits
237 * state changes to the driver while we're already inside a draw
238 * call, need to be careful about when we make calls back to the
239 * draw module. Hence the clipping_changed flag which is as much
240 * to prevent flush recursion as it is to short-circuit noop state
241 * changes.
242 */
243 if (clipping_changed) {
244 draw_set_driver_clipping(lp->draw,
245 false, // bypass_clip_xy
246 false, //bypass_clip_z
247 permit_linear, // guard_band_xy,
248 single_vp); // bypass_clip_points)
249 }
250 }
251
252
253 /**
254 * Handle state changes before clears.
255 * Called just prior to clearing (pipe::clear()).
256 */
257 void
llvmpipe_update_derived_clear(struct llvmpipe_context * llvmpipe)258 llvmpipe_update_derived_clear(struct llvmpipe_context *llvmpipe)
259 {
260 if (llvmpipe->dirty & (LP_NEW_FS |
261 LP_NEW_FRAMEBUFFER))
262 check_linear_rasterizer(llvmpipe);
263 }
264
265
266 /**
267 * Handle state changes.
268 * Called just prior to drawing anything (pipe::draw_arrays(), etc).
269 *
270 * Hopefully this will remain quite simple, otherwise need to pull in
271 * something like the gallium frontend mechanism.
272 */
273 void
llvmpipe_update_derived(struct llvmpipe_context * llvmpipe)274 llvmpipe_update_derived(struct llvmpipe_context *llvmpipe)
275 {
276 struct llvmpipe_screen *lp_screen = llvmpipe_screen(llvmpipe->pipe.screen);
277
278 /* Check for updated textures.
279 */
280 if (llvmpipe->tex_timestamp != lp_screen->timestamp) {
281 llvmpipe->tex_timestamp = lp_screen->timestamp;
282 llvmpipe->dirty |= LP_NEW_SAMPLER_VIEW;
283 }
284
285 if (llvmpipe->dirty & (LP_NEW_TASK))
286 llvmpipe_update_task_shader(llvmpipe);
287
288 if (llvmpipe->dirty & (LP_NEW_MESH))
289 llvmpipe_update_mesh_shader(llvmpipe);
290
291 /* This needs LP_NEW_RASTERIZER because of draw_prepare_shader_outputs(). */
292 if (llvmpipe->dirty & (LP_NEW_RASTERIZER |
293 LP_NEW_FS |
294 LP_NEW_GS |
295 LP_NEW_TCS |
296 LP_NEW_TES |
297 LP_NEW_MESH |
298 LP_NEW_VS))
299 compute_vertex_info(llvmpipe);
300
301 if (llvmpipe->dirty & (LP_NEW_FS |
302 LP_NEW_FRAMEBUFFER |
303 LP_NEW_BLEND |
304 LP_NEW_SCISSOR |
305 LP_NEW_DEPTH_STENCIL_ALPHA |
306 LP_NEW_RASTERIZER |
307 LP_NEW_SAMPLER |
308 LP_NEW_SAMPLER_VIEW |
309 LP_NEW_OCCLUSION_QUERY))
310 llvmpipe_update_fs(llvmpipe);
311
312 if (llvmpipe->dirty & (LP_NEW_FS |
313 LP_NEW_FRAMEBUFFER |
314 LP_NEW_RASTERIZER |
315 LP_NEW_SAMPLE_MASK |
316 LP_NEW_DEPTH_STENCIL_ALPHA)) {
317 bool discard =
318 llvmpipe->rasterizer ? llvmpipe->rasterizer->rasterizer_discard : false;
319 lp_setup_set_rasterizer_discard(llvmpipe->setup, discard);
320 }
321
322 if (llvmpipe->dirty & (LP_NEW_FS |
323 LP_NEW_FRAMEBUFFER |
324 LP_NEW_RASTERIZER))
325 llvmpipe_update_setup(llvmpipe);
326
327 if (llvmpipe->dirty & LP_NEW_SAMPLE_MASK)
328 lp_setup_set_sample_mask(llvmpipe->setup, llvmpipe->sample_mask);
329
330 if (llvmpipe->dirty & LP_NEW_BLEND_COLOR)
331 lp_setup_set_blend_color(llvmpipe->setup,
332 &llvmpipe->blend_color);
333
334 if (llvmpipe->dirty & LP_NEW_SCISSOR)
335 lp_setup_set_scissors(llvmpipe->setup, llvmpipe->scissors);
336
337 if (llvmpipe->dirty & LP_NEW_DEPTH_STENCIL_ALPHA) {
338 lp_setup_set_alpha_ref_value(llvmpipe->setup,
339 llvmpipe->depth_stencil->alpha_ref_value);
340 lp_setup_set_stencil_ref_values(llvmpipe->setup,
341 llvmpipe->stencil_ref.ref_value);
342 }
343
344 if (llvmpipe->dirty & LP_NEW_FS_CONSTANTS)
345 lp_setup_set_fs_constants(llvmpipe->setup,
346 ARRAY_SIZE(llvmpipe->constants[PIPE_SHADER_FRAGMENT]),
347 llvmpipe->constants[PIPE_SHADER_FRAGMENT]);
348
349 if (llvmpipe->dirty & LP_NEW_FS_SSBOS)
350 lp_setup_set_fs_ssbos(llvmpipe->setup,
351 ARRAY_SIZE(llvmpipe->ssbos[PIPE_SHADER_FRAGMENT]),
352 llvmpipe->ssbos[PIPE_SHADER_FRAGMENT], llvmpipe->fs_ssbo_write_mask);
353
354 if (llvmpipe->dirty & LP_NEW_FS_IMAGES)
355 lp_setup_set_fs_images(llvmpipe->setup,
356 ARRAY_SIZE(llvmpipe->images[PIPE_SHADER_FRAGMENT]),
357 llvmpipe->images[PIPE_SHADER_FRAGMENT]);
358
359 if (llvmpipe->dirty & (LP_NEW_SAMPLER_VIEW))
360 lp_setup_set_fragment_sampler_views(llvmpipe->setup,
361 llvmpipe->num_sampler_views[PIPE_SHADER_FRAGMENT],
362 llvmpipe->sampler_views[PIPE_SHADER_FRAGMENT]);
363
364 if (llvmpipe->dirty & (LP_NEW_SAMPLER))
365 lp_setup_set_fragment_sampler_state(llvmpipe->setup,
366 llvmpipe->num_samplers[PIPE_SHADER_FRAGMENT],
367 llvmpipe->samplers[PIPE_SHADER_FRAGMENT]);
368
369 if (llvmpipe->dirty & LP_NEW_VIEWPORT) {
370 /*
371 * Update setup and fragment's view of the active viewport state.
372 *
373 * XXX TODO: It is possible to only loop over the active viewports
374 * instead of all viewports (PIPE_MAX_VIEWPORTS).
375 */
376 lp_setup_set_viewports(llvmpipe->setup,
377 PIPE_MAX_VIEWPORTS,
378 llvmpipe->viewports);
379 }
380
381 llvmpipe_task_update_derived(llvmpipe);
382 llvmpipe_mesh_update_derived(llvmpipe);
383
384 llvmpipe_update_derived_clear(llvmpipe);
385
386 llvmpipe->dirty = 0;
387 }
388