1 /*
2 * Copyright © Microsoft Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "dxil_nir_lower_int_samplers.h"
25 #include "nir_builder.h"
26 #include "nir_builtin_builder.h"
27
28 typedef struct {
29 unsigned n_texture_states;
30 dxil_wrap_sampler_state* wrap_states;
31 dxil_texture_swizzle_state* tex_swizzles;
32 float max_bias;
33 } sampler_states;
34
35 static bool
lower_sample_to_txf_for_integer_tex_filter(const nir_instr * instr,const void * _options)36 lower_sample_to_txf_for_integer_tex_filter(const nir_instr *instr,
37 const void *_options)
38 {
39 if (instr->type != nir_instr_type_tex)
40 return false;
41
42 nir_tex_instr *tex = nir_instr_as_tex(instr);
43 if (tex->op != nir_texop_tex &&
44 tex->op != nir_texop_txb &&
45 tex->op != nir_texop_txl &&
46 tex->op != nir_texop_txd)
47 return false;
48
49 return (tex->dest_type & (nir_type_int | nir_type_uint));
50 }
51
52 static nir_def *
dx_get_texture_lod(nir_builder * b,nir_tex_instr * tex)53 dx_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
54 {
55 nir_tex_instr *tql;
56
57 unsigned num_srcs = 0;
58 for (unsigned i = 0; i < tex->num_srcs; i++) {
59 if (tex->src[i].src_type == nir_tex_src_coord ||
60 tex->src[i].src_type == nir_tex_src_texture_deref ||
61 tex->src[i].src_type == nir_tex_src_sampler_deref ||
62 tex->src[i].src_type == nir_tex_src_texture_offset ||
63 tex->src[i].src_type == nir_tex_src_sampler_offset ||
64 tex->src[i].src_type == nir_tex_src_texture_handle ||
65 tex->src[i].src_type == nir_tex_src_sampler_handle)
66 num_srcs++;
67 }
68
69 tql = nir_tex_instr_create(b->shader, num_srcs);
70 tql->op = nir_texop_lod;
71 unsigned coord_components = tex->coord_components;
72 if (tex->is_array)
73 --coord_components;
74
75 tql->coord_components = coord_components;
76 tql->sampler_dim = tex->sampler_dim;
77 tql->is_shadow = tex->is_shadow;
78 tql->is_new_style_shadow = tex->is_new_style_shadow;
79 tql->texture_index = tex->texture_index;
80 tql->sampler_index = tex->sampler_index;
81 tql->dest_type = nir_type_float32;
82
83 /* The coordinate needs special handling because we might have
84 * to strip the array index. Don't clutter the code with an additional
85 * check for is_array though, in the worst case we create an additional
86 * move the the optimization will remove later again. */
87 int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
88 nir_def *ssa_src = nir_trim_vector(b, tex->src[coord_index].src.ssa,
89 coord_components);
90 tql->src[0].src = nir_src_for_ssa(ssa_src);
91 tql->src[0].src_type = nir_tex_src_coord;
92
93 unsigned idx = 1;
94 for (unsigned i = 0; i < tex->num_srcs; i++) {
95 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
96 tex->src[i].src_type == nir_tex_src_sampler_deref ||
97 tex->src[i].src_type == nir_tex_src_texture_offset ||
98 tex->src[i].src_type == nir_tex_src_sampler_offset ||
99 tex->src[i].src_type == nir_tex_src_texture_handle ||
100 tex->src[i].src_type == nir_tex_src_sampler_handle) {
101 tql->src[idx].src = nir_src_for_ssa(tex->src[i].src.ssa);
102 tql->src[idx].src_type = tex->src[i].src_type;
103 idx++;
104 }
105 }
106
107 nir_def_init(&tql->instr, &tql->def, 2, 32);
108 nir_builder_instr_insert(b, &tql->instr);
109
110 /* DirectX LOD only has a value in x channel */
111 return nir_channel(b, &tql->def, 0);
112 }
113
114 typedef struct {
115 nir_def *coords;
116 nir_def *use_border_color;
117 } wrap_result_t;
118
119 typedef struct {
120 nir_def *lod;
121 nir_def *size;
122 int ncoord_comp;
123 wrap_result_t wrap[3];
124 } wrap_lower_param_t;
125
126 static void
wrap_clamp_to_edge(nir_builder * b,wrap_result_t * wrap_params,nir_def * size)127 wrap_clamp_to_edge(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
128 {
129 /* clamp(coord, 0, size - 1) */
130 wrap_params->coords = nir_fmin(b, nir_fadd_imm(b, size, -1.0f),
131 nir_fmax(b, wrap_params->coords, nir_imm_float(b, 0.0f)));
132 }
133
134 static void
wrap_repeat(nir_builder * b,wrap_result_t * wrap_params,nir_def * size)135 wrap_repeat(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
136 {
137 /* mod(coord, size)
138 * This instruction must be exact, otherwise certain sizes result in
139 * incorrect sampling */
140 wrap_params->coords = nir_fmod(b, wrap_params->coords, size);
141 nir_instr_as_alu(wrap_params->coords->parent_instr)->exact = true;
142 }
143
144 static nir_def *
mirror(nir_builder * b,nir_def * coord)145 mirror(nir_builder *b, nir_def *coord)
146 {
147 /* coord if >= 0, otherwise -(1 + coord) */
148 return nir_bcsel(b, nir_fge_imm(b, coord, 0.0f), coord,
149 nir_fneg(b, nir_fadd_imm(b, coord, 1.0f)));
150 }
151
152 static void
wrap_mirror_repeat(nir_builder * b,wrap_result_t * wrap_params,nir_def * size)153 wrap_mirror_repeat(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
154 {
155 /* (size − 1) − mirror(mod(coord, 2 * size) − size) */
156 nir_def *coord_mod2size = nir_fmod(b, wrap_params->coords, nir_fmul_imm(b, size, 2.0f));
157 nir_instr_as_alu(coord_mod2size->parent_instr)->exact = true;
158 nir_def *a = nir_fsub(b, coord_mod2size, size);
159 wrap_params->coords = nir_fsub(b, nir_fadd_imm(b, size, -1.0f), mirror(b, a));
160 }
161
162 static void
wrap_mirror_clamp_to_edge(nir_builder * b,wrap_result_t * wrap_params,nir_def * size)163 wrap_mirror_clamp_to_edge(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
164 {
165 /* clamp(mirror(coord), 0, size - 1) */
166 wrap_params->coords = nir_fmin(b, nir_fadd_imm(b, size, -1.0f),
167 nir_fmax(b, mirror(b, wrap_params->coords), nir_imm_float(b, 0.0f)));
168 }
169
170 static void
wrap_clamp(nir_builder * b,wrap_result_t * wrap_params,nir_def * size)171 wrap_clamp(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
172 {
173 nir_def *is_low = nir_flt_imm(b, wrap_params->coords, 0.0);
174 nir_def *is_high = nir_fge(b, wrap_params->coords, size);
175 wrap_params->use_border_color = nir_ior(b, is_low, is_high);
176 }
177
178 static void
wrap_mirror_clamp(nir_builder * b,wrap_result_t * wrap_params,nir_def * size)179 wrap_mirror_clamp(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
180 {
181 /* We have to take care of the boundaries */
182 nir_def *is_low = nir_flt(b, wrap_params->coords, nir_fmul_imm(b, size, -1.0));
183 nir_def *is_high = nir_flt(b, nir_fmul_imm(b, size, 2.0), wrap_params->coords);
184 wrap_params->use_border_color = nir_ior(b, is_low, is_high);
185
186 /* Within the boundaries this acts like mirror_repeat */
187 wrap_mirror_repeat(b, wrap_params, size);
188
189 }
190
191 static wrap_result_t
wrap_coords(nir_builder * b,nir_def * coords,enum dxil_tex_wrap wrap,nir_def * size)192 wrap_coords(nir_builder *b, nir_def *coords, enum dxil_tex_wrap wrap,
193 nir_def *size)
194 {
195 wrap_result_t result = {coords, nir_imm_false(b)};
196
197 switch (wrap) {
198 case DXIL_TEX_WRAP_CLAMP_TO_EDGE:
199 wrap_clamp_to_edge(b, &result, size);
200 break;
201 case DXIL_TEX_WRAP_REPEAT:
202 wrap_repeat(b, &result, size);
203 break;
204 case DXIL_TEX_WRAP_MIRROR_REPEAT:
205 wrap_mirror_repeat(b, &result, size);
206 break;
207 case DXIL_TEX_WRAP_MIRROR_CLAMP:
208 case DXIL_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
209 wrap_mirror_clamp_to_edge(b, &result, size);
210 break;
211 case DXIL_TEX_WRAP_CLAMP:
212 case DXIL_TEX_WRAP_CLAMP_TO_BORDER:
213 wrap_clamp(b, &result, size);
214 break;
215 case DXIL_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
216 wrap_mirror_clamp(b, &result, size);
217 break;
218 }
219 return result;
220 }
221
222 static nir_def *
load_bordercolor(nir_builder * b,nir_tex_instr * tex,const dxil_wrap_sampler_state * active_state,const dxil_texture_swizzle_state * tex_swizzle)223 load_bordercolor(nir_builder *b, nir_tex_instr *tex, const dxil_wrap_sampler_state *active_state,
224 const dxil_texture_swizzle_state *tex_swizzle)
225 {
226 int ndest_comp = tex->def.num_components;
227
228 unsigned swizzle[4] = {
229 tex_swizzle->swizzle_r,
230 tex_swizzle->swizzle_g,
231 tex_swizzle->swizzle_b,
232 tex_swizzle->swizzle_a
233 };
234
235 /* Avoid any possible float conversion issues */
236 uint32_t border_color[4];
237 memcpy(border_color, active_state->border_color, sizeof(border_color));
238 STATIC_ASSERT(sizeof(border_color) == sizeof(active_state->border_color));
239
240 nir_const_value const_value[4];
241 for (int i = 0; i < ndest_comp; ++i) {
242 switch (swizzle[i]) {
243 case PIPE_SWIZZLE_0:
244 const_value[i] = nir_const_value_for_uint(0, 32);
245 break;
246 case PIPE_SWIZZLE_1:
247 const_value[i] = nir_const_value_for_uint(1, 32);
248 break;
249 case PIPE_SWIZZLE_X:
250 case PIPE_SWIZZLE_Y:
251 case PIPE_SWIZZLE_Z:
252 case PIPE_SWIZZLE_W:
253 const_value[i] = nir_const_value_for_uint(border_color[swizzle[i]], 32);
254 break;
255 default:
256 unreachable("Unexpected swizzle value");
257 }
258 }
259
260 return nir_build_imm(b, ndest_comp, 32, const_value);
261 }
262
263 static nir_tex_instr *
create_txf_from_tex(nir_builder * b,nir_tex_instr * tex)264 create_txf_from_tex(nir_builder *b, nir_tex_instr *tex)
265 {
266 nir_tex_instr *txf;
267
268 unsigned num_srcs = 0;
269 for (unsigned i = 0; i < tex->num_srcs; i++) {
270 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
271 tex->src[i].src_type == nir_tex_src_texture_offset ||
272 tex->src[i].src_type == nir_tex_src_texture_handle)
273 num_srcs++;
274 }
275
276 txf = nir_tex_instr_create(b->shader, num_srcs);
277 txf->op = nir_texop_txf;
278 txf->coord_components = tex->coord_components;
279 txf->sampler_dim = tex->sampler_dim;
280 txf->is_array = tex->is_array;
281 txf->is_shadow = tex->is_shadow;
282 txf->is_new_style_shadow = tex->is_new_style_shadow;
283 txf->texture_index = tex->texture_index;
284 txf->sampler_index = tex->sampler_index;
285 txf->dest_type = tex->dest_type;
286
287 unsigned idx = 0;
288 for (unsigned i = 0; i < tex->num_srcs; i++) {
289 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
290 tex->src[i].src_type == nir_tex_src_texture_offset ||
291 tex->src[i].src_type == nir_tex_src_texture_handle) {
292 txf->src[idx].src = nir_src_for_ssa(tex->src[i].src.ssa);
293 txf->src[idx].src_type = tex->src[i].src_type;
294 idx++;
295 }
296 }
297
298 nir_def_init(&txf->instr, &txf->def, nir_tex_instr_dest_size(txf), 32);
299 nir_builder_instr_insert(b, &txf->instr);
300
301 return txf;
302 }
303
304 static nir_def *
load_texel(nir_builder * b,nir_tex_instr * tex,wrap_lower_param_t * params)305 load_texel(nir_builder *b, nir_tex_instr *tex, wrap_lower_param_t *params)
306 {
307 nir_def *texcoord = NULL;
308
309 /* Put coordinates back together */
310 switch (tex->coord_components) {
311 case 1:
312 texcoord = params->wrap[0].coords;
313 break;
314 case 2:
315 texcoord = nir_vec2(b, params->wrap[0].coords, params->wrap[1].coords);
316 break;
317 case 3:
318 texcoord = nir_vec3(b, params->wrap[0].coords, params->wrap[1].coords, params->wrap[2].coords);
319 break;
320 default:
321 ;
322 }
323
324 texcoord = nir_f2i32(b, texcoord);
325
326 nir_tex_instr *load = create_txf_from_tex(b, tex);
327 nir_tex_instr_add_src(load, nir_tex_src_lod, params->lod);
328 nir_tex_instr_add_src(load, nir_tex_src_coord, texcoord);
329 b->cursor = nir_after_instr(&load->instr);
330 return &load->def;
331 }
332
333 typedef struct {
334 const dxil_wrap_sampler_state *aws;
335 float max_bias;
336 nir_def *size;
337 int ncoord_comp;
338 } lod_params;
339
340 static nir_def *
evalute_active_lod(nir_builder * b,nir_tex_instr * tex,lod_params * params)341 evalute_active_lod(nir_builder *b, nir_tex_instr *tex, lod_params *params)
342 {
343 static nir_def *lod = NULL;
344
345 /* Later we use min_lod for clamping the LOD to a legal value */
346 float min_lod = MAX2(params->aws->min_lod, 0.0f);
347
348 /* Evaluate the LOD to be used for the texel fetch */
349 if (unlikely(tex->op == nir_texop_txl)) {
350 int lod_index = nir_tex_instr_src_index(tex, nir_tex_src_lod);
351 /* if we have an explicite LOD, take it */
352 lod = tex->src[lod_index].src.ssa;
353 } else if (unlikely(tex->op == nir_texop_txd)) {
354 int ddx_index = nir_tex_instr_src_index(tex, nir_tex_src_ddx);
355 int ddy_index = nir_tex_instr_src_index(tex, nir_tex_src_ddy);
356 assert(ddx_index >= 0 && ddy_index >= 0);
357
358 nir_def *grad = nir_fmax(b,
359 tex->src[ddx_index].src.ssa,
360 tex->src[ddy_index].src.ssa);
361
362 nir_def *r = nir_fmul(b, grad, nir_i2f32(b, params->size));
363 nir_def *rho = nir_channel(b, r, 0);
364 for (int i = 1; i < params->ncoord_comp; ++i)
365 rho = nir_fmax(b, rho, nir_channel(b, r, i));
366 lod = nir_flog2(b, rho);
367 } else if (b->shader->info.stage == MESA_SHADER_FRAGMENT){
368 lod = dx_get_texture_lod(b, tex);
369 } else {
370 /* Only fragment shaders provide the gradient information to evaluate a LOD,
371 * so force 0 otherwise */
372 lod = nir_imm_float(b, 0.0);
373 }
374
375 /* Evaluate bias according to OpenGL (4.6 (Compatibility Profile) October 22, 2019),
376 * sec. 8.14.1, eq. (8.9)
377 *
378 * lod' = lambda + CLAMP(bias_texobj + bias_texunit + bias_shader)
379 *
380 * bias_texobj is the value of TEXTURE_LOD_BIAS for the bound texture object. ...
381 * bias_textunt is the value of TEXTURE_LOD_BIAS for the current texture unit, ...
382 * bias shader is the value of the optional bias parameter in the texture
383 * lookup functions available to fragment shaders. ... The sum of these values
384 * is clamped to the range [−bias_max, bias_max] where bias_max is the value
385 * of the implementation defined constant MAX_TEXTURE_LOD_BIAS.
386 * In core contexts the value bias_texunit is dropped from above equation.
387 *
388 * Gallium provides the value lod_bias as the sum of bias_texobj and bias_texunit
389 * in compatibility contexts and as bias_texobj in core contexts, hence the
390 * implementation here is the same in both cases.
391 */
392 nir_def *lod_bias = nir_imm_float(b, params->aws->lod_bias);
393
394 if (unlikely(tex->op == nir_texop_txb)) {
395 int bias_index = nir_tex_instr_src_index(tex, nir_tex_src_bias);
396 lod_bias = nir_fadd(b, lod_bias, tex->src[bias_index].src.ssa);
397 }
398
399 lod = nir_fadd(b, lod, nir_fclamp(b, lod_bias,
400 nir_imm_float(b, -params->max_bias),
401 nir_imm_float(b, params->max_bias)));
402
403 /* Clamp lod according to ibid. eq. (8.10) */
404 lod = nir_fmax(b, lod, nir_imm_float(b, min_lod));
405
406 /* If the max lod is > max_bias = log2(max_texture_size), the lod will be clamped
407 * by the number of levels, no need to clamp it againt the max_lod first. */
408 if (params->aws->max_lod <= params->max_bias)
409 lod = nir_fmin(b, lod, nir_imm_float(b, params->aws->max_lod));
410
411 /* Pick nearest LOD */
412 lod = nir_f2i32(b, nir_fround_even(b, lod));
413
414 /* cap actual lod by number of available levels */
415 return nir_imin(b, lod, nir_imm_int(b, params->aws->last_level));
416 }
417
418
419 static nir_def *
lower_sample_to_txf_for_integer_tex_impl(nir_builder * b,nir_instr * instr,void * options)420 lower_sample_to_txf_for_integer_tex_impl(nir_builder *b, nir_instr *instr,
421 void *options)
422 {
423 sampler_states *states = (sampler_states *)options;
424 wrap_lower_param_t params = {0};
425
426 nir_tex_instr *tex = nir_instr_as_tex(instr);
427
428 const static dxil_wrap_sampler_state default_wrap_state = {
429 { 0, 0, 0, 1 },
430 0,
431 FLT_MIN, FLT_MAX,
432 0,
433 { 0, 0, 0 },
434 1,
435 0,
436 0,
437 0
438 };
439 const dxil_wrap_sampler_state *active_wrap_state = tex->sampler_index < states->n_texture_states ? &states->wrap_states[tex->sampler_index] : &default_wrap_state;
440
441 b->cursor = nir_before_instr(instr);
442
443 int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
444 nir_def *old_coord = tex->src[coord_index].src.ssa;
445 params.ncoord_comp = tex->coord_components;
446 if (tex->is_array)
447 params.ncoord_comp -= 1;
448
449 /* This helper to get the texture size always uses LOD 0, and DirectX doesn't support
450 * giving another LOD when querying the texture size */
451 nir_def *size0 = nir_get_texture_size(b, tex);
452
453 params.lod = nir_imm_int(b, 0);
454
455 if (active_wrap_state->last_level > 0) {
456 lod_params p = {
457 .aws = active_wrap_state,
458 .max_bias = states->max_bias,
459 .size = size0,
460 .ncoord_comp = params.ncoord_comp
461 };
462 params.lod = evalute_active_lod(b, tex, &p);
463
464 /* Evaluate actual level size*/
465 params.size = nir_i2f32(b, nir_imax(b, nir_ishr(b, size0, params.lod),
466 nir_imm_int(b, 1)));
467 } else {
468 params.size = nir_i2f32(b, size0);
469 }
470
471 nir_def *new_coord = old_coord;
472 if (!active_wrap_state->is_nonnormalized_coords) {
473 /* Evaluate the integer lookup coordinates for the requested LOD, don't touch the
474 * array index */
475 if (!tex->is_array) {
476 new_coord = nir_fmul(b, params.size, old_coord);
477 } else {
478 nir_def *array_index = nir_channel(b, old_coord, params.ncoord_comp);
479 int mask = (1 << params.ncoord_comp) - 1;
480 nir_def *coord = nir_fmul(b, nir_channels(b, params.size, mask),
481 nir_channels(b, old_coord, mask));
482 switch (params.ncoord_comp) {
483 case 1:
484 new_coord = nir_vec2(b, coord, array_index);
485 break;
486 case 2:
487 new_coord = nir_vec3(b, nir_channel(b, coord, 0),
488 nir_channel(b, coord, 1),
489 array_index);
490 break;
491 default:
492 unreachable("unsupported number of non-array coordinates");
493 }
494 }
495 }
496
497 nir_def *coord_help[3];
498 for (int i = 0; i < params.ncoord_comp; ++i)
499 coord_help[i] = nir_ffloor(b, nir_channel(b, new_coord, i));
500
501 // Note: array index needs to be rounded to nearest before clamp rather than floored
502 if (tex->is_array)
503 coord_help[params.ncoord_comp] = nir_fround_even(b, nir_channel(b, new_coord, params.ncoord_comp));
504
505 /* Correct the texture coordinates for the offsets. */
506 int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
507 if (offset_index >= 0) {
508 nir_def *offset = tex->src[offset_index].src.ssa;
509 for (int i = 0; i < params.ncoord_comp; ++i)
510 coord_help[i] = nir_fadd(b, coord_help[i], nir_i2f32(b, nir_channel(b, offset, i)));
511 }
512
513 nir_def *use_border_color = nir_imm_false(b);
514
515 if (!active_wrap_state->skip_boundary_conditions) {
516
517 for (int i = 0; i < params.ncoord_comp; ++i) {
518 params.wrap[i] = wrap_coords(b, coord_help[i], active_wrap_state->wrap[i], nir_channel(b, params.size, i));
519 use_border_color = nir_ior(b, use_border_color, params.wrap[i].use_border_color);
520 }
521
522 if (tex->is_array)
523 params.wrap[params.ncoord_comp] =
524 wrap_coords(b, coord_help[params.ncoord_comp],
525 DXIL_TEX_WRAP_CLAMP_TO_EDGE,
526 nir_i2f32(b, nir_channel(b, size0, params.ncoord_comp)));
527 } else {
528 /* When we emulate a cube map by using a texture array, the coordinates are always
529 * in range, and we don't have to take care of boundary conditions */
530 for (unsigned i = 0; i < 3; ++i) {
531 params.wrap[i].coords = coord_help[i];
532 params.wrap[i].use_border_color = nir_imm_false(b);
533 }
534 }
535
536 const dxil_texture_swizzle_state one2one = {
537 PIPE_SWIZZLE_X, PIPE_SWIZZLE_Y, PIPE_SWIZZLE_Z, PIPE_SWIZZLE_W
538 };
539
540 nir_if *border_if = nir_push_if(b, use_border_color);
541 const dxil_texture_swizzle_state *swizzle = (states->tex_swizzles && tex->sampler_index < states->n_texture_states) ?
542 &states->tex_swizzles[tex->sampler_index]:
543 &one2one;
544
545 nir_def *border_color = load_bordercolor(b, tex, active_wrap_state, swizzle);
546 nir_if *border_else = nir_push_else(b, border_if);
547 nir_def *sampler_color = load_texel(b, tex, ¶ms);
548 nir_pop_if(b, border_else);
549
550 return nir_if_phi(b, border_color, sampler_color);
551 }
552
553 /* Sampling from integer textures is not allowed in DirectX, so we have
554 * to use texel fetches. For this we have to scale the coordiantes
555 * to be integer based, and evaluate the LOD the texel fetch has to be
556 * applied on, and take care of the boundary conditions .
557 */
558 bool
dxil_lower_sample_to_txf_for_integer_tex(nir_shader * s,unsigned n_texture_states,dxil_wrap_sampler_state * wrap_states,dxil_texture_swizzle_state * tex_swizzles,float max_bias)559 dxil_lower_sample_to_txf_for_integer_tex(nir_shader *s,
560 unsigned n_texture_states,
561 dxil_wrap_sampler_state *wrap_states,
562 dxil_texture_swizzle_state *tex_swizzles,
563 float max_bias)
564 {
565 sampler_states states = { n_texture_states, wrap_states, tex_swizzles, max_bias};
566
567 bool result =
568 nir_shader_lower_instructions(s,
569 lower_sample_to_txf_for_integer_tex_filter,
570 lower_sample_to_txf_for_integer_tex_impl,
571 &states);
572 return result;
573 }
574