1 /**************************************************************************
2 *
3 * Copyright 2009-2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * Depth/stencil testing to LLVM IR translation.
31 *
32 * To be done accurately/efficiently the depth/stencil test must be done with
33 * the same type/format of the depth/stencil buffer, which implies massaging
34 * the incoming depths to fit into place. Using a more straightforward
35 * type/format for depth/stencil values internally and only convert when
36 * flushing would avoid this, but it would most likely result in depth fighting
37 * artifacts.
38 *
39 * Since we're using linear layout for everything, but we need to deal with
40 * 2x2 quads, we need to load/store multiple values and swizzle them into
41 * place (we could avoid this by doing depth/stencil testing in linear format,
42 * which would be easy for late depth/stencil test as we could do that after
43 * the fragment shader loop just as we do for color buffers, but more tricky
44 * for early depth test as we'd need both masks and interpolated depth in
45 * linear format).
46 *
47 *
48 * @author Jose Fonseca <[email protected]>
49 * @author Brian Paul <[email protected]>
50 */
51
52 #include "pipe/p_state.h"
53 #include "util/format/u_format.h"
54 #include "util/u_cpu_detect.h"
55
56 #include "gallivm/lp_bld_type.h"
57 #include "gallivm/lp_bld_arit.h"
58 #include "gallivm/lp_bld_bitarit.h"
59 #include "gallivm/lp_bld_const.h"
60 #include "gallivm/lp_bld_conv.h"
61 #include "gallivm/lp_bld_logic.h"
62 #include "gallivm/lp_bld_flow.h"
63 #include "gallivm/lp_bld_intr.h"
64 #include "gallivm/lp_bld_debug.h"
65 #include "gallivm/lp_bld_swizzle.h"
66 #include "gallivm/lp_bld_pack.h"
67
68 #include "lp_bld_depth.h"
69 #include "lp_state_fs.h"
70
71
72 /** Used to select fields from pipe_stencil_state */
73 enum stencil_op {
74 S_FAIL_OP,
75 Z_FAIL_OP,
76 Z_PASS_OP
77 };
78
79
80
81 /**
82 * Do the stencil test comparison (compare FB stencil values against ref value).
83 * This will be used twice when generating two-sided stencil code.
84 * \param stencil the front/back stencil state
85 * \param stencilRef the stencil reference value, replicated as a vector
86 * \param stencilVals vector of stencil values from framebuffer
87 * \return vector mask of pass/fail values (~0 or 0)
88 */
89 static LLVMValueRef
lp_build_stencil_test_single(struct lp_build_context * bld,const struct pipe_stencil_state * stencil,LLVMValueRef stencilRef,LLVMValueRef stencilVals)90 lp_build_stencil_test_single(struct lp_build_context *bld,
91 const struct pipe_stencil_state *stencil,
92 LLVMValueRef stencilRef,
93 LLVMValueRef stencilVals)
94 {
95 LLVMBuilderRef builder = bld->gallivm->builder;
96 const unsigned stencilMax = 255; /* XXX fix */
97 struct lp_type type = bld->type;
98
99 /*
100 * SSE2 has intrinsics for signed comparisons, but not unsigned ones. Values
101 * are between 0..255 so ensure we generate the fastest comparisons for
102 * wider elements.
103 */
104 if (type.width <= 8) {
105 assert(!type.sign);
106 } else {
107 assert(type.sign);
108 }
109
110 assert(stencil->enabled);
111
112 if (stencil->valuemask != stencilMax) {
113 /* compute stencilRef = stencilRef & valuemask */
114 LLVMValueRef valuemask = lp_build_const_int_vec(bld->gallivm, type, stencil->valuemask);
115 stencilRef = LLVMBuildAnd(builder, stencilRef, valuemask, "");
116 /* compute stencilVals = stencilVals & valuemask */
117 stencilVals = LLVMBuildAnd(builder, stencilVals, valuemask, "");
118 }
119
120 LLVMValueRef res = lp_build_cmp(bld, stencil->func,
121 stencilRef, stencilVals);
122 return res;
123 }
124
125
126 /**
127 * Do the one or two-sided stencil test comparison.
128 * \sa lp_build_stencil_test_single
129 * \param front_facing an integer vector mask, indicating front (~0) or back
130 * (0) facing polygon. If NULL, assume front-facing.
131 */
132 static LLVMValueRef
lp_build_stencil_test(struct lp_build_context * bld,const struct pipe_stencil_state stencil[2],LLVMValueRef stencilRefs[2],LLVMValueRef stencilVals,LLVMValueRef front_facing)133 lp_build_stencil_test(struct lp_build_context *bld,
134 const struct pipe_stencil_state stencil[2],
135 LLVMValueRef stencilRefs[2],
136 LLVMValueRef stencilVals,
137 LLVMValueRef front_facing)
138 {
139 LLVMValueRef res;
140
141 assert(stencil[0].enabled);
142
143 /* do front face test */
144 res = lp_build_stencil_test_single(bld, &stencil[0],
145 stencilRefs[0], stencilVals);
146
147 if (stencil[1].enabled && front_facing != NULL) {
148 /* do back face test */
149 LLVMValueRef back_res;
150
151 back_res = lp_build_stencil_test_single(bld, &stencil[1],
152 stencilRefs[1], stencilVals);
153
154 res = lp_build_select(bld, front_facing, res, back_res);
155 }
156
157 return res;
158 }
159
160
161 /**
162 * Apply the stencil operator (add/sub/keep/etc) to the given vector
163 * of stencil values.
164 * \return new stencil values vector
165 */
166 static LLVMValueRef
lp_build_stencil_op_single(struct lp_build_context * bld,const struct pipe_stencil_state * stencil,enum stencil_op op,LLVMValueRef stencilRef,LLVMValueRef stencilVals)167 lp_build_stencil_op_single(struct lp_build_context *bld,
168 const struct pipe_stencil_state *stencil,
169 enum stencil_op op,
170 LLVMValueRef stencilRef,
171 LLVMValueRef stencilVals)
172
173 {
174 LLVMBuilderRef builder = bld->gallivm->builder;
175 struct lp_type type = bld->type;
176 LLVMValueRef max = lp_build_const_int_vec(bld->gallivm, type, 0xff);
177
178 assert(type.sign);
179
180 unsigned stencil_op;
181 switch (op) {
182 case S_FAIL_OP:
183 stencil_op = stencil->fail_op;
184 break;
185 case Z_FAIL_OP:
186 stencil_op = stencil->zfail_op;
187 break;
188 case Z_PASS_OP:
189 stencil_op = stencil->zpass_op;
190 break;
191 default:
192 assert(0 && "Invalid stencil_op mode");
193 stencil_op = PIPE_STENCIL_OP_KEEP;
194 }
195
196 LLVMValueRef res;
197 switch (stencil_op) {
198 case PIPE_STENCIL_OP_KEEP:
199 res = stencilVals;
200 /* we can return early for this case */
201 return res;
202 case PIPE_STENCIL_OP_ZERO:
203 res = bld->zero;
204 break;
205 case PIPE_STENCIL_OP_REPLACE:
206 res = stencilRef;
207 break;
208 case PIPE_STENCIL_OP_INCR:
209 res = lp_build_add(bld, stencilVals, bld->one);
210 res = lp_build_min(bld, res, max);
211 break;
212 case PIPE_STENCIL_OP_DECR:
213 res = lp_build_sub(bld, stencilVals, bld->one);
214 res = lp_build_max(bld, res, bld->zero);
215 break;
216 case PIPE_STENCIL_OP_INCR_WRAP:
217 res = lp_build_add(bld, stencilVals, bld->one);
218 res = LLVMBuildAnd(builder, res, max, "");
219 break;
220 case PIPE_STENCIL_OP_DECR_WRAP:
221 res = lp_build_sub(bld, stencilVals, bld->one);
222 res = LLVMBuildAnd(builder, res, max, "");
223 break;
224 case PIPE_STENCIL_OP_INVERT:
225 res = LLVMBuildNot(builder, stencilVals, "");
226 res = LLVMBuildAnd(builder, res, max, "");
227 break;
228 default:
229 assert(0 && "bad stencil op mode");
230 res = bld->undef;
231 }
232
233 return res;
234 }
235
236
237 /**
238 * Do the one or two-sided stencil test op/update.
239 */
240 static LLVMValueRef
lp_build_stencil_op(struct lp_build_context * bld,const struct pipe_stencil_state stencil[2],enum stencil_op op,LLVMValueRef stencilRefs[2],LLVMValueRef stencilVals,LLVMValueRef mask,LLVMValueRef front_facing)241 lp_build_stencil_op(struct lp_build_context *bld,
242 const struct pipe_stencil_state stencil[2],
243 enum stencil_op op,
244 LLVMValueRef stencilRefs[2],
245 LLVMValueRef stencilVals,
246 LLVMValueRef mask,
247 LLVMValueRef front_facing)
248
249 {
250 LLVMBuilderRef builder = bld->gallivm->builder;
251 LLVMValueRef res;
252
253 assert(stencil[0].enabled);
254
255 /* do front face op */
256 res = lp_build_stencil_op_single(bld, &stencil[0], op,
257 stencilRefs[0], stencilVals);
258
259 if (stencil[1].enabled && front_facing != NULL) {
260 /* do back face op */
261 LLVMValueRef back_res;
262
263 back_res = lp_build_stencil_op_single(bld, &stencil[1], op,
264 stencilRefs[1], stencilVals);
265
266 res = lp_build_select(bld, front_facing, res, back_res);
267 }
268
269 if (stencil[0].writemask != 0xff ||
270 (stencil[1].enabled && front_facing != NULL &&
271 stencil[1].writemask != 0xff)) {
272 /* mask &= stencil[0].writemask */
273 LLVMValueRef writemask = lp_build_const_int_vec(bld->gallivm, bld->type,
274 stencil[0].writemask);
275 if (stencil[1].enabled &&
276 stencil[1].writemask != stencil[0].writemask &&
277 front_facing != NULL) {
278 LLVMValueRef back_writemask =
279 lp_build_const_int_vec(bld->gallivm, bld->type,
280 stencil[1].writemask);
281 writemask = lp_build_select(bld, front_facing,
282 writemask, back_writemask);
283 }
284
285 mask = LLVMBuildAnd(builder, mask, writemask, "");
286 /* res = (res & mask) | (stencilVals & ~mask) */
287 res = lp_build_select_bitwise(bld, mask, res, stencilVals);
288 } else {
289 /* res = mask ? res : stencilVals */
290 res = lp_build_select(bld, mask, res, stencilVals);
291 }
292
293 return res;
294 }
295
296
297
298 /**
299 * Return a type that matches the depth/stencil format.
300 */
301 struct lp_type
lp_depth_type(const struct util_format_description * format_desc,unsigned length)302 lp_depth_type(const struct util_format_description *format_desc,
303 unsigned length)
304 {
305 struct lp_type type;
306 unsigned z_swizzle;
307
308 assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS);
309 assert(format_desc->block.width == 1);
310 assert(format_desc->block.height == 1);
311
312 memset(&type, 0, sizeof type);
313 type.width = format_desc->block.bits;
314
315 z_swizzle = format_desc->swizzle[0];
316 if (z_swizzle < 4) {
317 if (format_desc->channel[z_swizzle].type == UTIL_FORMAT_TYPE_FLOAT) {
318 type.floating = true;
319 assert(z_swizzle == 0);
320 assert(format_desc->channel[z_swizzle].size == 32);
321 }
322 else if (format_desc->channel[z_swizzle].type == UTIL_FORMAT_TYPE_UNSIGNED) {
323 assert(format_desc->block.bits <= 32);
324 assert(format_desc->channel[z_swizzle].normalized);
325 if (format_desc->channel[z_swizzle].size < format_desc->block.bits) {
326 /* Prefer signed integers when possible, as SSE has less support
327 * for unsigned comparison;
328 */
329 type.sign = true;
330 }
331 }
332 else
333 assert(0);
334 }
335
336 type.length = length;
337
338 return type;
339 }
340
341
342 /**
343 * Compute bitmask and bit shift to apply to the incoming fragment Z values
344 * and the Z buffer values needed before doing the Z comparison.
345 *
346 * Note that we leave the Z bits in the position that we find them
347 * in the Z buffer (typically 0xffffff00 or 0x00ffffff). That lets us
348 * get by with fewer bit twiddling steps.
349 */
350 static bool
get_z_shift_and_mask(const struct util_format_description * format_desc,unsigned * shift,unsigned * width,unsigned * mask)351 get_z_shift_and_mask(const struct util_format_description *format_desc,
352 unsigned *shift, unsigned *width, unsigned *mask)
353 {
354 unsigned total_bits;
355 unsigned z_swizzle;
356
357 assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS);
358 assert(format_desc->block.width == 1);
359 assert(format_desc->block.height == 1);
360
361 /* 64bit d/s format is special already extracted 32 bits */
362 total_bits = format_desc->block.bits > 32 ? 32 : format_desc->block.bits;
363
364 z_swizzle = format_desc->swizzle[0];
365
366 if (z_swizzle == PIPE_SWIZZLE_NONE)
367 return false;
368
369 *width = format_desc->channel[z_swizzle].size;
370 /* & 31 is for the same reason as the 32-bit limit above */
371 *shift = format_desc->channel[z_swizzle].shift & 31;
372
373 if (*width == total_bits) {
374 *mask = 0xffffffff;
375 } else {
376 *mask = ((1 << *width) - 1) << *shift;
377 }
378
379 return true;
380 }
381
382
383 /**
384 * Compute bitmask and bit shift to apply to the framebuffer pixel values
385 * to put the stencil bits in the least significant position.
386 * (i.e. 0x000000ff)
387 */
388 static bool
get_s_shift_and_mask(const struct util_format_description * format_desc,unsigned * shift,unsigned * mask)389 get_s_shift_and_mask(const struct util_format_description *format_desc,
390 unsigned *shift, unsigned *mask)
391 {
392 const unsigned s_swizzle = format_desc->swizzle[1];
393
394 if (s_swizzle == PIPE_SWIZZLE_NONE)
395 return false;
396
397 /* just special case 64bit d/s format */
398 if (format_desc->block.bits > 32) {
399 /* XXX big-endian? */
400 assert(format_desc->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT);
401 *shift = 0;
402 *mask = 0xff;
403 return true;
404 }
405
406 *shift = format_desc->channel[s_swizzle].shift;
407 const unsigned sz = format_desc->channel[s_swizzle].size;
408 *mask = (1U << sz) - 1U;
409
410 return true;
411 }
412
413
414 /**
415 * Perform the occlusion test and increase the counter.
416 * Test the depth mask. Add the number of channel which has none zero mask
417 * into the occlusion counter. e.g. maskvalue is {-1, -1, -1, -1}.
418 * The counter will add 4.
419 * TODO: could get that out of the fs loop.
420 *
421 * \param type holds element type of the mask vector.
422 * \param maskvalue is the depth test mask.
423 * \param counter is a pointer of the uint32 counter.
424 */
425 void
lp_build_occlusion_count(struct gallivm_state * gallivm,struct lp_type type,LLVMValueRef maskvalue,LLVMValueRef counter)426 lp_build_occlusion_count(struct gallivm_state *gallivm,
427 struct lp_type type,
428 LLVMValueRef maskvalue,
429 LLVMValueRef counter)
430 {
431 LLVMBuilderRef builder = gallivm->builder;
432 LLVMContextRef context = gallivm->context;
433 LLVMValueRef countmask = lp_build_const_int_vec(gallivm, type, 1);
434 LLVMValueRef count, newcount;
435
436 assert(type.length <= 16);
437 assert(type.floating);
438
439 if (util_get_cpu_caps()->has_sse && type.length == 4) {
440 const char *movmskintr = "llvm.x86.sse.movmsk.ps";
441 const char *popcntintr = "llvm.ctpop.i32";
442 LLVMValueRef bits = LLVMBuildBitCast(builder, maskvalue,
443 lp_build_vec_type(gallivm, type), "");
444 bits = lp_build_intrinsic_unary(builder, movmskintr,
445 LLVMInt32TypeInContext(context), bits);
446 count = lp_build_intrinsic_unary(builder, popcntintr,
447 LLVMInt32TypeInContext(context), bits);
448 count = LLVMBuildZExt(builder, count, LLVMIntTypeInContext(context, 64), "");
449 }
450 else if (util_get_cpu_caps()->has_avx && type.length == 8) {
451 const char *movmskintr = "llvm.x86.avx.movmsk.ps.256";
452 const char *popcntintr = "llvm.ctpop.i32";
453 LLVMValueRef bits = LLVMBuildBitCast(builder, maskvalue,
454 lp_build_vec_type(gallivm, type), "");
455 bits = lp_build_intrinsic_unary(builder, movmskintr,
456 LLVMInt32TypeInContext(context), bits);
457 count = lp_build_intrinsic_unary(builder, popcntintr,
458 LLVMInt32TypeInContext(context), bits);
459 count = LLVMBuildZExt(builder, count, LLVMIntTypeInContext(context, 64), "");
460 } else {
461 LLVMValueRef countv = LLVMBuildAnd(builder, maskvalue, countmask, "countv");
462 LLVMTypeRef counttype = LLVMIntTypeInContext(context, type.length * 8);
463 LLVMTypeRef i8vntype = LLVMVectorType(LLVMInt8TypeInContext(context), type.length * 4);
464 LLVMValueRef shufflev, countd;
465 LLVMValueRef shuffles[16];
466 const char *popcntintr = NULL;
467
468 countv = LLVMBuildBitCast(builder, countv, i8vntype, "");
469
470 for (unsigned i = 0; i < type.length; i++) {
471 #if UTIL_ARCH_LITTLE_ENDIAN
472 shuffles[i] = lp_build_const_int32(gallivm, 4*i);
473 #else
474 shuffles[i] = lp_build_const_int32(gallivm, (4*i) + 3);
475 #endif
476 }
477
478 shufflev = LLVMConstVector(shuffles, type.length);
479 countd = LLVMBuildShuffleVector(builder, countv, LLVMGetUndef(i8vntype), shufflev, "");
480 countd = LLVMBuildBitCast(builder, countd, counttype, "countd");
481
482 /*
483 * XXX FIXME
484 * this is bad on cpus without popcount (on x86 supported by intel
485 * nehalem, amd barcelona, and up - not tied to sse42).
486 * Would be much faster to just sum the 4 elements of the vector with
487 * some horizontal add (shuffle/add/shuffle/add after the initial and).
488 */
489 switch (type.length) {
490 case 4:
491 popcntintr = "llvm.ctpop.i32";
492 break;
493 case 8:
494 popcntintr = "llvm.ctpop.i64";
495 break;
496 case 16:
497 popcntintr = "llvm.ctpop.i128";
498 break;
499 default:
500 assert(0);
501 }
502 count = lp_build_intrinsic_unary(builder, popcntintr, counttype, countd);
503
504 if (type.length > 8) {
505 count = LLVMBuildTrunc(builder, count, LLVMIntTypeInContext(context, 64), "");
506 }
507 else if (type.length < 8) {
508 count = LLVMBuildZExt(builder, count, LLVMIntTypeInContext(context, 64), "");
509 }
510 }
511 newcount = LLVMBuildLoad2(builder, LLVMTypeOf(count), counter, "origcount");
512 newcount = LLVMBuildAdd(builder, newcount, count, "newcount");
513 LLVMBuildStore(builder, newcount, counter);
514 }
515
516
517 /**
518 * Load depth/stencil values.
519 * The stored values are linear, swizzle them.
520 *
521 * \param type the data type of the fragment depth/stencil values
522 * \param format_desc description of the depth/stencil surface
523 * \param is_1d whether this resource has only one dimension
524 * \param loop_counter the current loop iteration
525 * \param depth_ptr pointer to the depth/stencil values of this 4x4 block
526 * \param depth_stride stride of the depth/stencil buffer
527 * \param z_fb contains z values loaded from fb (may include padding)
528 * \param s_fb contains s values loaded from fb (may include padding)
529 */
530 void
lp_build_depth_stencil_load_swizzled(struct gallivm_state * gallivm,struct lp_type z_src_type,const struct util_format_description * format_desc,bool is_1d,LLVMValueRef depth_ptr,LLVMValueRef depth_stride,LLVMValueRef * z_fb,LLVMValueRef * s_fb,LLVMValueRef loop_counter)531 lp_build_depth_stencil_load_swizzled(struct gallivm_state *gallivm,
532 struct lp_type z_src_type,
533 const struct util_format_description *format_desc,
534 bool is_1d,
535 LLVMValueRef depth_ptr,
536 LLVMValueRef depth_stride,
537 LLVMValueRef *z_fb,
538 LLVMValueRef *s_fb,
539 LLVMValueRef loop_counter)
540 {
541 LLVMBuilderRef builder = gallivm->builder;
542 LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH / 4];
543 LLVMValueRef depth_offset1, depth_offset2;
544 const unsigned depth_bytes = format_desc->block.bits / 8;
545 struct lp_type zs_type = lp_depth_type(format_desc, z_src_type.length);
546
547 struct lp_type zs_load_type = zs_type;
548 zs_load_type.length = zs_load_type.length / 2;
549
550 LLVMTypeRef zs_dst_type = lp_build_vec_type(gallivm, zs_load_type);
551
552 if (z_src_type.length == 4) {
553 LLVMValueRef looplsb = LLVMBuildAnd(builder, loop_counter,
554 lp_build_const_int32(gallivm, 1), "");
555 LLVMValueRef loopmsb = LLVMBuildAnd(builder, loop_counter,
556 lp_build_const_int32(gallivm, 2), "");
557 LLVMValueRef offset2 = LLVMBuildMul(builder, loopmsb,
558 depth_stride, "");
559 depth_offset1 = LLVMBuildMul(builder, looplsb,
560 lp_build_const_int32(gallivm, depth_bytes * 2), "");
561 depth_offset1 = LLVMBuildAdd(builder, depth_offset1, offset2, "");
562
563 /* just concatenate the loaded 2x2 values into 4-wide vector */
564 for (unsigned i = 0; i < 4; i++) {
565 shuffles[i] = lp_build_const_int32(gallivm, i);
566 }
567 } else {
568 unsigned i;
569 LLVMValueRef loopx2 = LLVMBuildShl(builder, loop_counter,
570 lp_build_const_int32(gallivm, 1), "");
571 assert(z_src_type.length == 8);
572 depth_offset1 = LLVMBuildMul(builder, loopx2, depth_stride, "");
573 /*
574 * We load 2x4 values, and need to swizzle them (order
575 * 0,1,4,5,2,3,6,7) - not so hot with avx unfortunately.
576 */
577 for (i = 0; i < 8; i++) {
578 shuffles[i] = lp_build_const_int32(gallivm, (i&1) + (i&2) * 2 + (i&4) / 2);
579 }
580 }
581
582 depth_offset2 = LLVMBuildAdd(builder, depth_offset1, depth_stride, "");
583
584 /* Load current z/stencil values from z/stencil buffer */
585 LLVMTypeRef load_ptr_type = LLVMPointerType(zs_dst_type, 0);
586 LLVMTypeRef int8_type = LLVMInt8TypeInContext(gallivm->context);
587 LLVMValueRef zs_dst_ptr =
588 LLVMBuildGEP2(builder, int8_type, depth_ptr, &depth_offset1, 1, "");
589 zs_dst_ptr = LLVMBuildBitCast(builder, zs_dst_ptr, load_ptr_type, "");
590 LLVMValueRef zs_dst1 = LLVMBuildLoad2(builder, zs_dst_type, zs_dst_ptr, "");
591 LLVMValueRef zs_dst2;
592 if (is_1d) {
593 zs_dst2 = lp_build_undef(gallivm, zs_load_type);
594 } else {
595 zs_dst_ptr = LLVMBuildGEP2(builder, int8_type, depth_ptr, &depth_offset2, 1, "");
596 zs_dst_ptr = LLVMBuildBitCast(builder, zs_dst_ptr, load_ptr_type, "");
597 zs_dst2 = LLVMBuildLoad2(builder, zs_dst_type, zs_dst_ptr, "");
598 }
599
600 *z_fb = LLVMBuildShuffleVector(builder, zs_dst1, zs_dst2,
601 LLVMConstVector(shuffles, zs_type.length), "");
602 *s_fb = *z_fb;
603
604 if (format_desc->block.bits == 8) {
605 /* Extend stencil-only 8 bit values (S8_UINT) */
606 *s_fb = LLVMBuildZExt(builder, *s_fb,
607 lp_build_int_vec_type(gallivm, z_src_type), "");
608 }
609
610 if (format_desc->block.bits < z_src_type.width) {
611 /* Extend destination ZS values (e.g., when reading from Z16_UNORM) */
612 *z_fb = LLVMBuildZExt(builder, *z_fb,
613 lp_build_int_vec_type(gallivm, z_src_type), "");
614 }
615
616 else if (format_desc->block.bits > 32) {
617 /* rely on llvm to handle too wide vector we have here nicely */
618 struct lp_type typex2 = zs_type;
619 struct lp_type s_type = zs_type;
620 LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH / 4];
621 LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH / 4];
622 LLVMValueRef tmp;
623
624 typex2.width = typex2.width / 2;
625 typex2.length = typex2.length * 2;
626 s_type.width = s_type.width / 2;
627 s_type.floating = 0;
628
629 tmp = LLVMBuildBitCast(builder, *z_fb,
630 lp_build_vec_type(gallivm, typex2), "");
631
632 for (unsigned i = 0; i < zs_type.length; i++) {
633 shuffles1[i] = lp_build_const_int32(gallivm, i * 2);
634 shuffles2[i] = lp_build_const_int32(gallivm, i * 2 + 1);
635 }
636 *z_fb = LLVMBuildShuffleVector(builder, tmp, tmp,
637 LLVMConstVector(shuffles1, zs_type.length), "");
638 *s_fb = LLVMBuildShuffleVector(builder, tmp, tmp,
639 LLVMConstVector(shuffles2, zs_type.length), "");
640 *s_fb = LLVMBuildBitCast(builder, *s_fb,
641 lp_build_vec_type(gallivm, s_type), "");
642 lp_build_name(*s_fb, "s_dst");
643 }
644
645 lp_build_name(*z_fb, "z_dst");
646 lp_build_name(*s_fb, "s_dst");
647 lp_build_name(*z_fb, "z_dst");
648 }
649
650
651 /**
652 * Store depth/stencil values.
653 * Incoming values are swizzled (typically n 2x2 quads), stored linear.
654 * If there's a mask it will do select/store otherwise just store.
655 *
656 * \param type the data type of the fragment depth/stencil values
657 * \param format_desc description of the depth/stencil surface
658 * \param is_1d whether this resource has only one dimension
659 * \param mask_value the alive/dead pixel mask for the quad (vector)
660 * \param z_fb z values read from fb (with padding)
661 * \param s_fb s values read from fb (with padding)
662 * \param loop_counter the current loop iteration
663 * \param depth_ptr pointer to the depth/stencil values of this 4x4 block
664 * \param depth_stride stride of the depth/stencil buffer
665 * \param z_value the depth values to store (with padding)
666 * \param s_value the stencil values to store (with padding)
667 */
668 void
lp_build_depth_stencil_write_swizzled(struct gallivm_state * gallivm,struct lp_type z_src_type,const struct util_format_description * format_desc,bool is_1d,LLVMValueRef mask_value,LLVMValueRef z_fb,LLVMValueRef s_fb,LLVMValueRef loop_counter,LLVMValueRef depth_ptr,LLVMValueRef depth_stride,LLVMValueRef z_value,LLVMValueRef s_value)669 lp_build_depth_stencil_write_swizzled(struct gallivm_state *gallivm,
670 struct lp_type z_src_type,
671 const struct util_format_description *format_desc,
672 bool is_1d,
673 LLVMValueRef mask_value,
674 LLVMValueRef z_fb,
675 LLVMValueRef s_fb,
676 LLVMValueRef loop_counter,
677 LLVMValueRef depth_ptr,
678 LLVMValueRef depth_stride,
679 LLVMValueRef z_value,
680 LLVMValueRef s_value)
681 {
682 struct lp_build_context z_bld;
683 LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH / 4];
684 LLVMBuilderRef builder = gallivm->builder;
685 LLVMValueRef zs_dst1, zs_dst2;
686 LLVMValueRef zs_dst_ptr1, zs_dst_ptr2;
687 LLVMValueRef depth_offset1, depth_offset2;
688 LLVMTypeRef load_ptr_type;
689 unsigned depth_bytes = format_desc->block.bits / 8;
690 struct lp_type zs_type = lp_depth_type(format_desc, z_src_type.length);
691 struct lp_type z_type = zs_type;
692 struct lp_type zs_load_type = zs_type;
693
694 zs_load_type.length = zs_load_type.length / 2;
695 load_ptr_type = LLVMPointerType(lp_build_vec_type(gallivm, zs_load_type), 0);
696
697 z_type.width = z_src_type.width;
698
699 lp_build_context_init(&z_bld, gallivm, z_type);
700
701 /*
702 * This is far from ideal, at least for late depth write we should do this
703 * outside the fs loop to avoid all the swizzle stuff.
704 */
705 if (z_src_type.length == 4) {
706 LLVMValueRef looplsb = LLVMBuildAnd(builder, loop_counter,
707 lp_build_const_int32(gallivm, 1), "");
708 LLVMValueRef loopmsb = LLVMBuildAnd(builder, loop_counter,
709 lp_build_const_int32(gallivm, 2), "");
710 LLVMValueRef offset2 = LLVMBuildMul(builder, loopmsb,
711 depth_stride, "");
712 depth_offset1 = LLVMBuildMul(builder, looplsb,
713 lp_build_const_int32(gallivm, depth_bytes * 2), "");
714 depth_offset1 = LLVMBuildAdd(builder, depth_offset1, offset2, "");
715 } else {
716 LLVMValueRef loopx2 = LLVMBuildShl(builder, loop_counter,
717 lp_build_const_int32(gallivm, 1), "");
718 assert(z_src_type.length == 8);
719 depth_offset1 = LLVMBuildMul(builder, loopx2, depth_stride, "");
720 /*
721 * We load 2x4 values, and need to swizzle them (order
722 * 0,1,4,5,2,3,6,7) - not so hot with avx unfortunately.
723 */
724 for (unsigned i = 0; i < 8; i++) {
725 shuffles[i] = lp_build_const_int32(gallivm, (i&1) + (i&2) * 2 + (i&4) / 2);
726 }
727 }
728
729 depth_offset2 = LLVMBuildAdd(builder, depth_offset1, depth_stride, "");
730
731 LLVMTypeRef int8_type = LLVMInt8TypeInContext(gallivm->context);
732 zs_dst_ptr1 = LLVMBuildGEP2(builder, int8_type, depth_ptr, &depth_offset1, 1, "");
733 zs_dst_ptr1 = LLVMBuildBitCast(builder, zs_dst_ptr1, load_ptr_type, "");
734 zs_dst_ptr2 = LLVMBuildGEP2(builder, int8_type, depth_ptr, &depth_offset2, 1, "");
735 zs_dst_ptr2 = LLVMBuildBitCast(builder, zs_dst_ptr2, load_ptr_type, "");
736
737 if (format_desc->block.bits > 32) {
738 s_value = LLVMBuildBitCast(builder, s_value, z_bld.vec_type, "");
739 }
740
741 if (mask_value) {
742 z_value = lp_build_select(&z_bld, mask_value, z_value, z_fb);
743 if (format_desc->block.bits > 32) {
744 s_fb = LLVMBuildBitCast(builder, s_fb, z_bld.vec_type, "");
745 s_value = lp_build_select(&z_bld, mask_value, s_value, s_fb);
746 }
747 }
748
749 if (zs_type.width < z_src_type.width) {
750 /* Truncate ZS values (e.g., when writing to Z16_UNORM) */
751 z_value = LLVMBuildTrunc(builder, z_value,
752 lp_build_int_vec_type(gallivm, zs_type), "");
753 }
754
755 if (format_desc->block.bits <= 32) {
756 if (z_src_type.length == 4) {
757 zs_dst1 = lp_build_extract_range(gallivm, z_value, 0, 2);
758 zs_dst2 = lp_build_extract_range(gallivm, z_value, 2, 2);
759 } else {
760 assert(z_src_type.length == 8);
761 zs_dst1 = LLVMBuildShuffleVector(builder, z_value, z_value,
762 LLVMConstVector(&shuffles[0],
763 zs_load_type.length), "");
764 zs_dst2 = LLVMBuildShuffleVector(builder, z_value, z_value,
765 LLVMConstVector(&shuffles[4],
766 zs_load_type.length), "");
767 }
768 } else {
769 if (z_src_type.length == 4) {
770 zs_dst1 = lp_build_interleave2(gallivm, z_type,
771 z_value, s_value, 0);
772 zs_dst2 = lp_build_interleave2(gallivm, z_type,
773 z_value, s_value, 1);
774 } else {
775 LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH / 2];
776 assert(z_src_type.length == 8);
777 for (unsigned i = 0; i < 8; i++) {
778 shuffles[i*2] = lp_build_const_int32(gallivm, (i&1) + (i&2) * 2 + (i&4) / 2);
779 shuffles[i*2+1] = lp_build_const_int32(gallivm, (i&1) + (i&2) * 2 + (i&4) / 2 +
780 z_src_type.length);
781 }
782 zs_dst1 = LLVMBuildShuffleVector(builder, z_value, s_value,
783 LLVMConstVector(&shuffles[0],
784 z_src_type.length), "");
785 zs_dst2 = LLVMBuildShuffleVector(builder, z_value, s_value,
786 LLVMConstVector(&shuffles[8],
787 z_src_type.length), "");
788 }
789 zs_dst1 = LLVMBuildBitCast(builder, zs_dst1,
790 lp_build_vec_type(gallivm, zs_load_type), "");
791 zs_dst2 = LLVMBuildBitCast(builder, zs_dst2,
792 lp_build_vec_type(gallivm, zs_load_type), "");
793 }
794
795 LLVMBuildStore(builder, zs_dst1, zs_dst_ptr1);
796 if (!is_1d) {
797 LLVMBuildStore(builder, zs_dst2, zs_dst_ptr2);
798 }
799 }
800
801
802 /**
803 * Generate code for performing depth and/or stencil tests.
804 * We operate on a vector of values (typically n 2x2 quads).
805 *
806 * \param depth the depth test state
807 * \param stencil the front/back stencil state
808 * \param type the data type of the fragment depth/stencil values
809 * \param format_desc description of the depth/stencil surface
810 * \param mask the alive/dead pixel mask for the quad (vector)
811 * \param cov_mask coverage mask
812 * \param stencil_refs the front/back stencil ref values (scalar)
813 * \param z_src the incoming depth/stencil values (n 2x2 quad values, float32)
814 * \param zs_dst the depth/stencil values in framebuffer
815 * \param face contains boolean value indicating front/back facing polygon
816 */
817 void
lp_build_depth_stencil_test(struct gallivm_state * gallivm,const struct lp_depth_state * depth,const struct pipe_stencil_state stencil[2],struct lp_type z_src_type,const struct util_format_description * format_desc,struct lp_build_mask_context * mask,LLVMValueRef * cov_mask,LLVMValueRef stencil_refs[2],LLVMValueRef z_src,LLVMValueRef z_fb,LLVMValueRef s_fb,LLVMValueRef face,LLVMValueRef * z_value,LLVMValueRef * s_value,bool do_branch,bool restrict_depth)818 lp_build_depth_stencil_test(struct gallivm_state *gallivm,
819 const struct lp_depth_state *depth,
820 const struct pipe_stencil_state stencil[2],
821 struct lp_type z_src_type,
822 const struct util_format_description *format_desc,
823 struct lp_build_mask_context *mask,
824 LLVMValueRef *cov_mask,
825 LLVMValueRef stencil_refs[2],
826 LLVMValueRef z_src,
827 LLVMValueRef z_fb,
828 LLVMValueRef s_fb,
829 LLVMValueRef face,
830 LLVMValueRef *z_value,
831 LLVMValueRef *s_value,
832 bool do_branch,
833 bool restrict_depth)
834 {
835 LLVMBuilderRef builder = gallivm->builder;
836 struct lp_type z_type;
837 struct lp_build_context z_bld;
838 struct lp_build_context s_bld;
839 struct lp_type s_type;
840 unsigned z_shift = 0, z_width = 0, z_mask = 0;
841 LLVMValueRef z_dst = NULL;
842 LLVMValueRef stencil_vals = NULL;
843 LLVMValueRef z_bitmask = NULL, stencil_shift = NULL;
844 LLVMValueRef z_pass = NULL, s_pass_mask = NULL;
845 LLVMValueRef current_mask = mask ? lp_build_mask_value(mask) : *cov_mask;
846 LLVMValueRef front_facing = NULL;
847 bool have_z, have_s;
848
849 /*
850 * Depths are expected to be between 0 and 1, even if they are stored in
851 * floats. Setting these bits here will ensure that the lp_build_conv() call
852 * below won't try to unnecessarily clamp the incoming values.
853 * If depths are expected outside 0..1 don't set these bits.
854 */
855 if (z_src_type.floating) {
856 if (restrict_depth) {
857 z_src_type.sign = false;
858 z_src_type.norm = true;
859 }
860 } else {
861 assert(!z_src_type.sign);
862 assert(z_src_type.norm);
863 }
864
865 /* Pick the type matching the depth-stencil format. */
866 z_type = lp_depth_type(format_desc, z_src_type.length);
867
868 /* Pick the intermediate type for depth operations. */
869 z_type.width = z_src_type.width;
870 assert(z_type.length == z_src_type.length);
871
872 /* FIXME: for non-float depth/stencil might generate better code
873 * if we'd always split it up to use 128bit operations.
874 * For stencil we'd almost certainly want to pack to 8xi16 values,
875 * for z just run twice.
876 */
877
878 /* Sanity checking */
879 {
880 ASSERTED const unsigned z_swizzle = format_desc->swizzle[0];
881 ASSERTED const unsigned s_swizzle = format_desc->swizzle[1];
882
883 assert(z_swizzle != PIPE_SWIZZLE_NONE ||
884 s_swizzle != PIPE_SWIZZLE_NONE);
885
886 assert(depth->enabled || stencil[0].enabled);
887
888 assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS);
889 assert(format_desc->block.width == 1);
890 assert(format_desc->block.height == 1);
891
892 if (stencil[0].enabled) {
893 assert(s_swizzle < 4);
894 assert(format_desc->channel[s_swizzle].type == UTIL_FORMAT_TYPE_UNSIGNED);
895 assert(format_desc->channel[s_swizzle].pure_integer);
896 assert(!format_desc->channel[s_swizzle].normalized);
897 assert(format_desc->channel[s_swizzle].size == 8);
898 }
899
900 if (depth->enabled) {
901 assert(z_swizzle < 4);
902 if (z_type.floating) {
903 assert(z_swizzle == 0);
904 assert(format_desc->channel[z_swizzle].type ==
905 UTIL_FORMAT_TYPE_FLOAT);
906 assert(format_desc->channel[z_swizzle].size == 32);
907 } else {
908 assert(format_desc->channel[z_swizzle].type ==
909 UTIL_FORMAT_TYPE_UNSIGNED);
910 assert(format_desc->channel[z_swizzle].normalized);
911 assert(!z_type.fixed);
912 }
913 }
914 }
915
916
917 /* Setup build context for Z vals */
918 lp_build_context_init(&z_bld, gallivm, z_type);
919
920 /* Setup build context for stencil vals */
921 s_type = lp_int_type(z_type);
922 lp_build_context_init(&s_bld, gallivm, s_type);
923
924 /* Compute and apply the Z/stencil bitmasks and shifts.
925 */
926 {
927 unsigned s_shift, s_mask;
928
929 z_dst = z_fb;
930 stencil_vals = s_fb;
931
932 have_z = get_z_shift_and_mask(format_desc, &z_shift, &z_width, &z_mask);
933 have_s = get_s_shift_and_mask(format_desc, &s_shift, &s_mask);
934
935 if (have_z) {
936 if (z_mask != 0xffffffff) {
937 z_bitmask = lp_build_const_int_vec(gallivm, z_type, z_mask);
938 }
939
940 /*
941 * Align the framebuffer Z 's LSB to the right.
942 */
943 if (z_shift) {
944 LLVMValueRef shift = lp_build_const_int_vec(gallivm, z_type, z_shift);
945 z_dst = LLVMBuildLShr(builder, z_dst, shift, "z_dst");
946 } else if (z_bitmask) {
947 z_dst = LLVMBuildAnd(builder, z_dst, z_bitmask, "z_dst");
948 } else {
949 lp_build_name(z_dst, "z_dst");
950 }
951 }
952
953 if (have_s) {
954 if (s_shift) {
955 LLVMValueRef shift = lp_build_const_int_vec(gallivm, s_type, s_shift);
956 stencil_vals = LLVMBuildLShr(builder, stencil_vals, shift, "");
957 stencil_shift = shift; /* used below */
958 }
959
960 if (s_mask != 0xffffffff) {
961 LLVMValueRef mask = lp_build_const_int_vec(gallivm, s_type, s_mask);
962 stencil_vals = LLVMBuildAnd(builder, stencil_vals, mask, "");
963 }
964
965 lp_build_name(stencil_vals, "s_dst");
966 }
967 }
968
969 if (stencil[0].enabled) {
970
971 if (face) {
972 if (0) {
973 /*
974 * XXX: the scalar expansion below produces atrocious code
975 * (basically producing a 64bit scalar value, then moving the 2
976 * 32bit pieces separately to simd, plus 4 shuffles, which is
977 * seriously lame). But the scalar-simd transitions are always
978 * tricky, so no big surprise there.
979 * This here would be way better, however llvm has some serious
980 * trouble later using it in the select, probably because it will
981 * recognize the expression as constant and move the simd value
982 * away (out of the loop) - and then it will suddenly try
983 * constructing i1 high-bit masks out of it later...
984 * (Try piglit stencil-twoside.)
985 * Note this is NOT due to using SExt/Trunc, it fails exactly the
986 * same even when using native compare/select.
987 * I cannot reproduce this problem when using stand-alone compiler
988 * though, suggesting some problem with optimization passes...
989 * (With stand-alone compilation, the construction of this mask
990 * value, no matter if the easy 3 instruction here or the complex
991 * 16+ one below, never gets separated from where it's used.)
992 * The scalar code still has the same problem, but the generated
993 * code looks a bit better at least for some reason, even if
994 * mostly by luck (the fundamental issue clearly is the same).
995 */
996 front_facing = lp_build_broadcast(gallivm, s_bld.vec_type, face);
997 /* front_facing = face != 0 ? ~0 : 0 */
998 front_facing = lp_build_compare(gallivm, s_bld.type,
999 PIPE_FUNC_NOTEQUAL,
1000 front_facing, s_bld.zero);
1001 } else {
1002 LLVMValueRef zero = lp_build_const_int32(gallivm, 0);
1003
1004 /* front_facing = face != 0 ? ~0 : 0 */
1005 front_facing = LLVMBuildICmp(builder, LLVMIntNE, face, zero, "");
1006 front_facing = LLVMBuildSExt(builder, front_facing,
1007 LLVMIntTypeInContext(gallivm->context,
1008 s_bld.type.length*s_bld.type.width),
1009 "");
1010 front_facing = LLVMBuildBitCast(builder, front_facing,
1011 s_bld.int_vec_type, "");
1012
1013 }
1014 }
1015
1016 s_pass_mask = lp_build_stencil_test(&s_bld, stencil,
1017 stencil_refs, stencil_vals,
1018 front_facing);
1019
1020 /* apply stencil-fail operator */
1021 {
1022 LLVMValueRef s_fail_mask = lp_build_andnot(&s_bld, current_mask, s_pass_mask);
1023 stencil_vals = lp_build_stencil_op(&s_bld, stencil, S_FAIL_OP,
1024 stencil_refs, stencil_vals,
1025 s_fail_mask, front_facing);
1026 }
1027 }
1028
1029 if (depth->enabled) {
1030 /*
1031 * Convert fragment Z to the desired type, aligning the LSB to the right.
1032 */
1033
1034 assert(z_type.width == z_src_type.width);
1035 assert(z_type.length == z_src_type.length);
1036 assert(lp_check_value(z_src_type, z_src));
1037 if (z_src_type.floating) {
1038 /*
1039 * Convert from floating point values
1040 */
1041
1042 if (!z_type.floating) {
1043 z_src = lp_build_clamped_float_to_unsigned_norm(gallivm,
1044 z_src_type,
1045 z_width,
1046 z_src);
1047 }
1048 } else {
1049 /*
1050 * Convert from unsigned normalized values.
1051 */
1052
1053 assert(!z_src_type.sign);
1054 assert(!z_src_type.fixed);
1055 assert(z_src_type.norm);
1056 assert(!z_type.floating);
1057 if (z_src_type.width > z_width) {
1058 LLVMValueRef shift = lp_build_const_int_vec(gallivm, z_src_type,
1059 z_src_type.width - z_width);
1060 z_src = LLVMBuildLShr(builder, z_src, shift, "");
1061 }
1062 }
1063 assert(lp_check_value(z_type, z_src));
1064
1065 lp_build_name(z_src, "z_src");
1066
1067 /* compare src Z to dst Z, returning 'pass' mask */
1068 z_pass = lp_build_cmp(&z_bld, depth->func, z_src, z_dst);
1069
1070 /* mask off bits that failed stencil test */
1071 if (s_pass_mask) {
1072 current_mask = LLVMBuildAnd(builder, current_mask, s_pass_mask, "");
1073 }
1074
1075 if (!stencil[0].enabled && mask) {
1076 /* We can potentially skip all remaining operations here, but only
1077 * if stencil is disabled because we still need to update the stencil
1078 * buffer values. Don't need to update Z buffer values.
1079 */
1080 lp_build_mask_update(mask, z_pass);
1081
1082 if (do_branch) {
1083 lp_build_mask_check(mask);
1084 }
1085 }
1086
1087 if (depth->writemask) {
1088 LLVMValueRef z_pass_mask;
1089
1090 /* mask off bits that failed Z test */
1091 z_pass_mask = LLVMBuildAnd(builder, current_mask, z_pass, "");
1092
1093 /* Mix the old and new Z buffer values.
1094 * z_dst[i] = zselectmask[i] ? z_src[i] : z_dst[i]
1095 */
1096 z_dst = lp_build_select(&z_bld, z_pass_mask, z_src, z_dst);
1097 }
1098
1099 if (stencil[0].enabled) {
1100 /* update stencil buffer values according to z pass/fail result */
1101 LLVMValueRef z_fail_mask, z_pass_mask;
1102
1103 /* apply Z-fail operator */
1104 z_fail_mask = lp_build_andnot(&s_bld, current_mask, z_pass);
1105 stencil_vals = lp_build_stencil_op(&s_bld, stencil, Z_FAIL_OP,
1106 stencil_refs, stencil_vals,
1107 z_fail_mask, front_facing);
1108
1109 /* apply Z-pass operator */
1110 z_pass_mask = LLVMBuildAnd(builder, current_mask, z_pass, "");
1111 stencil_vals = lp_build_stencil_op(&s_bld, stencil, Z_PASS_OP,
1112 stencil_refs, stencil_vals,
1113 z_pass_mask, front_facing);
1114 }
1115 } else {
1116 /* No depth test: apply Z-pass operator to stencil buffer values which
1117 * passed the stencil test.
1118 */
1119 s_pass_mask = LLVMBuildAnd(builder, current_mask, s_pass_mask, "");
1120 stencil_vals = lp_build_stencil_op(&s_bld, stencil, Z_PASS_OP,
1121 stencil_refs, stencil_vals,
1122 s_pass_mask, front_facing);
1123 }
1124
1125 /* Put Z and stencil bits in the right place */
1126 if (have_z && z_shift) {
1127 LLVMValueRef shift = lp_build_const_int_vec(gallivm, z_type, z_shift);
1128 z_dst = LLVMBuildShl(builder, z_dst, shift, "");
1129 }
1130 if (stencil_vals && stencil_shift)
1131 stencil_vals = LLVMBuildShl(builder, stencil_vals,
1132 stencil_shift, "");
1133
1134 /* Finally, merge the z/stencil values */
1135 if (format_desc->block.bits <= 32) {
1136 if (have_z && have_s)
1137 *z_value = LLVMBuildOr(builder, z_dst, stencil_vals, "");
1138 else if (have_z)
1139 *z_value = z_dst;
1140 else
1141 *z_value = stencil_vals;
1142 *s_value = *z_value;
1143 } else {
1144 *z_value = z_dst;
1145 *s_value = stencil_vals;
1146 }
1147
1148 if (mask) {
1149 if (s_pass_mask)
1150 lp_build_mask_update(mask, s_pass_mask);
1151
1152 if (depth->enabled && stencil[0].enabled)
1153 lp_build_mask_update(mask, z_pass);
1154 } else {
1155 LLVMValueRef tmp_mask = *cov_mask;
1156 if (s_pass_mask)
1157 tmp_mask = LLVMBuildAnd(builder, tmp_mask, s_pass_mask, "");
1158
1159 /* for multisample we don't do the stencil optimisation so update always */
1160 if (depth->enabled)
1161 tmp_mask = LLVMBuildAnd(builder, tmp_mask, z_pass, "");
1162 *cov_mask = tmp_mask;
1163 }
1164 }
1165
1166