1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 * Copyright 2008 VMware, Inc. All rights reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /* Vertices are just an array of floats, with all the attributes
30 * packed. We currently assume a layout like:
31 *
32 * attr[0][0..3] - window position
33 * attr[1..n][0..3] - remaining attributes.
34 *
35 * Attributes are assumed to be 4 floats wide but are packed so that
36 * all the enabled attributes run contiguously.
37 */
38
39 #include "util/u_math.h"
40 #include "util/u_memory.h"
41 #include "pipe/p_defines.h"
42 #include "pipe/p_shader_tokens.h"
43
44 #include "sp_context.h"
45 #include "sp_state.h"
46 #include "sp_quad.h"
47 #include "sp_quad_pipe.h"
48
49
50 struct quad_shade_stage
51 {
52 struct quad_stage stage; /**< base class */
53
54 /* no other fields at this time */
55 };
56
57
58 /**
59 * Execute fragment shader for the four fragments in the quad.
60 * \return TRUE if quad is alive, FALSE if all four pixels are killed
61 */
62 static inline bool
shade_quad(struct quad_stage * qs,struct quad_header * quad)63 shade_quad(struct quad_stage *qs, struct quad_header *quad)
64 {
65 struct softpipe_context *softpipe = qs->softpipe;
66 struct tgsi_exec_machine *machine = softpipe->fs_machine;
67
68 if (softpipe->active_statistics_queries) {
69 softpipe->pipeline_statistics.ps_invocations +=
70 util_bitcount(quad->inout.mask);
71 }
72
73 /* run shader */
74 machine->flatshade_color = softpipe->rasterizer->flatshade ? true : false;
75 return softpipe->fs_variant->run( softpipe->fs_variant, machine, quad, softpipe->early_depth );
76 }
77
78
79
80 static void
coverage_quad(struct quad_stage * qs,struct quad_header * quad)81 coverage_quad(struct quad_stage *qs, struct quad_header *quad)
82 {
83 struct softpipe_context *softpipe = qs->softpipe;
84 uint cbuf;
85
86 /* loop over colorbuffer outputs */
87 for (cbuf = 0; cbuf < softpipe->framebuffer.nr_cbufs; cbuf++) {
88 float (*quadColor)[4] = quad->output.color[cbuf];
89 unsigned j;
90 for (j = 0; j < TGSI_QUAD_SIZE; j++) {
91 assert(quad->input.coverage[j] >= 0.0);
92 assert(quad->input.coverage[j] <= 1.0);
93 quadColor[3][j] *= quad->input.coverage[j];
94 }
95 }
96 }
97
98
99 /**
100 * Shade/write an array of quads
101 * Called via quad_stage::run()
102 */
103 static void
shade_quads(struct quad_stage * qs,struct quad_header * quads[],unsigned nr)104 shade_quads(struct quad_stage *qs,
105 struct quad_header *quads[],
106 unsigned nr)
107 {
108 struct softpipe_context *softpipe = qs->softpipe;
109 struct tgsi_exec_machine *machine = softpipe->fs_machine;
110 unsigned i, nr_quads = 0;
111
112 tgsi_exec_set_constant_buffers(machine, PIPE_MAX_CONSTANT_BUFFERS,
113 softpipe->mapped_constants[PIPE_SHADER_FRAGMENT]);
114
115 machine->InterpCoefs = quads[0]->coef;
116
117 for (i = 0; i < nr; i++) {
118 /* Only omit this quad from the output list if all the fragments
119 * are killed _AND_ it's not the first quad in the list.
120 * The first quad is special in the (optimized) depth-testing code:
121 * the quads' Z coordinates are step-wise interpolated with respect
122 * to the first quad in the list.
123 * For multi-pass algorithms we need to produce exactly the same
124 * Z values in each pass. If interpolation starts with different quads
125 * we can get different Z values for the same (x,y).
126 */
127 if (!shade_quad(qs, quads[i]) && i > 0)
128 continue; /* quad totally culled/killed */
129
130 if (/*do_coverage*/ 0)
131 coverage_quad( qs, quads[i] );
132
133 quads[nr_quads++] = quads[i];
134 }
135
136 if (nr_quads)
137 qs->next->run(qs->next, quads, nr_quads);
138 }
139
140
141 /**
142 * Per-primitive (or per-begin?) setup
143 */
144 static void
shade_begin(struct quad_stage * qs)145 shade_begin(struct quad_stage *qs)
146 {
147 qs->next->begin(qs->next);
148 }
149
150
151 static void
shade_destroy(struct quad_stage * qs)152 shade_destroy(struct quad_stage *qs)
153 {
154 FREE( qs );
155 }
156
157
158 struct quad_stage *
sp_quad_shade_stage(struct softpipe_context * softpipe)159 sp_quad_shade_stage( struct softpipe_context *softpipe )
160 {
161 struct quad_shade_stage *qss = CALLOC_STRUCT(quad_shade_stage);
162 if (!qss)
163 goto fail;
164
165 qss->stage.softpipe = softpipe;
166 qss->stage.begin = shade_begin;
167 qss->stage.run = shade_quads;
168 qss->stage.destroy = shade_destroy;
169
170 return &qss->stage;
171
172 fail:
173 FREE(qss);
174 return NULL;
175 }
176